Skip to content

Commit

Permalink
simulate.py: Run tests in parallel
Browse files Browse the repository at this point in the history
  • Loading branch information
colluca committed Sep 22, 2023
1 parent 4581ed9 commit 1273cc2
Show file tree
Hide file tree
Showing 4 changed files with 97 additions and 31 deletions.
6 changes: 4 additions & 2 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,8 @@ jobs:
- name: Run Tests
working-directory: target/snitch_cluster
run: |-
../../util/sim/simulate.py sw/run.yaml --simulator verilator
../../util/sim/simulate.py sw/run.yaml --simulator verilator -j \
--verbose
############################################
# Build SW on Snitch Cluster w/ Banshee #
Expand All @@ -66,4 +67,5 @@ jobs:
SNITCH_LOG: info
working-directory: target/snitch_cluster
run: |-
../../util/sim/simulate.py sw/run.yaml --simulator banshee
../../util/sim/simulate.py sw/run.yaml --simulator banshee -j \
--verbose
9 changes: 5 additions & 4 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ snitch-cluster-vlt:
script:
- cd target/snitch_cluster
- $VERILATOR make bin/snitch_cluster.vlt
- $VERILATOR ../../util/sim/simulate.py sw/run.yaml --simulator verilator
- $VERILATOR ../../util/sim/simulate.py sw/run.yaml --simulator verilator -j --verbose
# yamllint enable rule:line-length

# VCS
Expand All @@ -102,15 +102,16 @@ snitch-cluster-vcs:
script:
- cd target/snitch_cluster
- $VCS make bin/snitch_cluster.vcs
- $VCS ../../util/sim/simulate.py sw/run.yaml --simulator vcs
- $VCS ../../util/sim/simulate.py sw/run.yaml --simulator vcs -j --verbose

# Questa
snitch-cluster-vsim:
needs: [snitch-cluster-sw]
script:
- cd target/snitch_cluster
- $QUESTA make bin/snitch_cluster.vsim
- $QUESTA ../../util/sim/simulate.py sw/run.yaml --simulator vsim
- $QUESTA ../../util/sim/simulate.py sw/run.yaml --simulator vsim -j
--verbose

# Banshee
snitch-cluster-banshee:
Expand All @@ -126,4 +127,4 @@ snitch-cluster-banshee:
- cd banshee
- cargo install --debug --path .
- cd ../target/snitch_cluster
- ../../util/sim/simulate.py sw/run.yaml --simulator banshee
- ../../util/sim/simulate.py sw/run.yaml --simulator banshee -j --verbose
2 changes: 1 addition & 1 deletion target/common/common.mk
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ define QUESTASIM
@echo 'binary=$$(realpath --relative-to=${MKFILE_DIR} $$1)' >> $@
@echo 'cd ${MKFILE_DIR}' >> $@
@echo 'echo $$binary > $(LOGS_DIR)/.rtlbinary' >> $@
@echo '${VSIM} +permissive ${VSIM_FLAGS} -work ${MKFILE_DIR}/${VSIM_BUILDDIR} -c \
@echo '${VSIM} +permissive ${VSIM_FLAGS} $$3 -work ${MKFILE_DIR}/${VSIM_BUILDDIR} -c \
-ldflags "-Wl,-rpath,${FESVR}/lib -L${FESVR}/lib -lfesvr -lutil" \
$1 +permissive-off ++$$binary ++$$2' >> $@
@chmod +x $@
Expand Down
111 changes: 87 additions & 24 deletions util/sim/simulate.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,14 @@
# TODO colluca: timeout feature

import argparse
import multiprocessing
from pathlib import Path
import subprocess
from termcolor import colored, cprint
import os
import re
import sys
import time
import yaml


Expand All @@ -28,7 +31,7 @@
'vcs': 'bin/snitch_cluster.vcs'
}
SIMULATOR_CMDS = {
'vsim': '{sim_bin} {elf}',
'vsim': '{sim_bin} {elf} "" -batch',
'banshee': ('{{sim_bin}} --no-opt-llvm --no-opt-jit --configuration {cfg}'
' --trace {{elf}} > /dev/null').format(cfg=BANSHEE_CFG),
'verilator': '{sim_bin} {elf}',
Expand Down Expand Up @@ -62,6 +65,22 @@ def parse_args():
'--early-exit',
action='store_true',
help='Exit as soon as any test fails')
parser.add_argument(
'-j',
action='store',
dest='n_procs',
nargs='?',
type=int,
default=1,
const=os.cpu_count(),
help=('Maximum number of tests to run in parallel. '
'One if the option is not present. Equal to the number of CPU cores '
'if the option is present but not followed by an argument.'))
parser.add_argument(
'--verbose',
action='store_true',
help=('Option to print simulation logs when multiple tests are run in parallel.'
'Logs are always printed when n_procs == 1'))
args = parser.parse_args()
return args

Expand All @@ -81,17 +100,25 @@ def check_exit_code(test, exit_code):
return exit_code


def run_simulation(cmd, simulator, test):
def multiple_processes(args):
return args.n_procs != 1


def run_simulation(cmd, simulator, test, quiet=False):
# Defaults
result = 1
log = ''

# Spawn simulation subprocess
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, universal_newlines=True)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
universal_newlines=True)

# Poll simulation subprocess and log its output
while p.poll() is None:
line = p.stdout.readline()
print(line, end='', flush=True)
log += line
if not quiet:
print(line, end='', flush=True)

# When simulating with vsim or vcs, we need to parse the simulation
# log to catch the application's return code
Expand Down Expand Up @@ -123,7 +150,7 @@ def run_simulation(cmd, simulator, test):
if exit_code != 0:
result = exit_code

return result
return result, log


def run_test(test, args):
Expand All @@ -132,11 +159,12 @@ def run_test(test, args):
sim_bin = args.sim_bin if args.sim_bin else SIMULATOR_BINS[simulator]
dry_run = args.dry_run
testlist = args.testlist
quiet = multiple_processes(args)

# Check if simulator is supported for this test
if 'simulators' in test:
if simulator not in test['simulators']:
return 0
return (0, '')

# Construct path to executable
elf = Path(test['elf'])
Expand All @@ -152,52 +180,87 @@ def run_test(test, args):
else:
cmd = SIMULATOR_CMDS[simulator]
cmd = cmd.format(sim_bin=sim_bin, elf=elf)
print(f'$ {cmd}', flush=True)
if not quiet:
print(f'$ {cmd}', flush=True)

# Run simulation
result = 0
log = ''
if not dry_run:
result = run_simulation(cmd, simulator, test)
result, log = run_simulation(cmd, simulator, test, quiet)

# Report failure or success
if result != 0:
cprint(f'{elf} test failed', 'red', attrs=['bold'], flush=True)
else:
cprint(f'{elf} test passed', 'green', attrs=['bold'], flush=True)

return result
return (result, log)


def print_failed_test(test):
print(f'{colored(test["elf"], "cyan")} test {colored("failed", "red")}')


def print_test_summary(failed_tests, dry_run=False):
if not dry_run:
print('\n==== Test summary ====')
def print_test_summary(failed_tests, args):
if not args.dry_run:
header = f'\n==== Test summary {"(early exit)" if args.early_exit else ""} ===='
cprint(header, attrs=['bold'])
if failed_tests:
for failed_test in failed_tests:
print_failed_test(failed_test)
return 1
else:
print(f'{colored("All tests passed!", "green")}')
return 0
return 0


def run_tests(args):
# Iterate tests

# Get tests from testlist
tests = get_tests(args.testlist)

# Create a process Pool
with multiprocessing.Pool(args.n_procs) as pool:

# Create a shared object which parent and child processes can access
# concurrently to terminate the pool early as soon as one process fails
exit_early = multiprocessing.Value('B')
exit_early.value = 0

# Define callback for early exit
def completion_callback(return_value):
result = return_value[0]
log = return_value[1]
if args.early_exit and result != 0:
exit_early.value = 1
# Printing the log all at once here, rather than line-by-line
# in run_simulation, ensures that the logs of different processes
# are not interleaved in stdout.
# However, as we prefer line-by-line printing when a single process
# is used, we have to make sure we don't print twice.
if args.verbose and multiple_processes(args):
print(log)

# Queue tests to process pool
results = []
for test in tests:
result = pool.apply_async(run_test, args=(test, args), callback=completion_callback)
results.append(result)

# Wait for all tests to complete
running = range(len(tests))
while len(running) != 0 and not exit_early.value:
time.sleep(1)
running = [i for i in running if not results[i].ready()]

# Query test results
failed_tests = []
for test in tests:
# Run test
result = run_test(test, args)
if result != 0:
for test, result in zip(tests, results):
if result.ready() and result.get()[0] != 0:
failed_tests.append(test)
# End program if requested on first test failure
if args.early_exit:
break
return print_test_summary(failed_tests, args.dry_run)

print_test_summary(failed_tests, args)

return len(failed_tests)


def main():
Expand Down

0 comments on commit 1273cc2

Please sign in to comment.