-
Notifications
You must be signed in to change notification settings - Fork 4
/
run-svd-test-cori.sh
58 lines (51 loc) · 2.12 KB
/
run-svd-test-cori.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
#!/bin/bash
#SBATCH -p debug
#SBATCH -N 30
#SBATCH -t 00:30:00
#SBATCH -e mysparkjob_%j.err
#SBATCH -o mysparkjob_%j.out
#SBATCH -C haswell
#module load collectl
#start-collectl.sh
# NB on choosing parameters: from
# echo | gcc -E -xc -include 'stddef.h' - | grep size_t
# the max array size on Cori is 2^32, which means can only
# have each chunk of the Elemental matrices be of size ~<4GB
# Example calculation: I want to use as few nodes as possible to hold a 400GB dataset
# say I give 4 cores per alchemist process, then 8 alchemist processes fit on one Cori node
# then to get less than 4GB/process, that means I'll need 100 processes at least, so
# will need 100/8 = 13 nodes. Note this is very memory inefficient, since will only be using
# 32 GB per node.
# Alternatively, doing flat MPI, can fit 32 processes per node. To get less than 4GB/process,
# need at least 100 processes, so need at least 100/32 = 4 nodes.
# To give Alchemist some memory room for intermediate operations, lets double the node count to 8 (and double the number of
# cores to 2 per process to ensure we actually get more memory per process)
#
# As for Spark, idk how to choose the number of executors. Take more than you should need, i guess.
# Spark is horrible w.r.t. using memory efficiently.
module unload darshan
source setup/cori-start-alchemist.sh 8 2
method=SVD
# 2.5M by 10K double matrix is 200 GB
m=10000000
n=10000
k=20
# seems like if the partitions are too large, Spark will hang, so go for 2GB/partition
# 0 tells Spark to use default parallelism
#partitions=200
partitions=0
spark-submit --verbose\
--driver-memory 120G\
--executor-memory 120G\
--executor-cores 32 \
--driver-cores 32 \
--num-executors 21 \
--conf spark.driver.extraLibraryPath=$SCRATCH/alchemistSHELL/alchemist/lib\
--conf spark.executor.extraLibraryPath=$SCRATCH/alchemistSHELL/alchemist/lib\
--conf spark.eventLog.enabled=true\
--conf spark.eventLog.dir=$SCRATCH/spark/event_logs\
--class amplab.alchemist.BasicSuite\
test/target/scala-2.11/alchemist-tests-assembly-0.0.2.jar $method $m $n $k $partitions 2>&1 | tee test.log
stop-all.sh
exit
#stop-collectl.sh