forked from TurkuNLP/Turku-neural-parser-pipeline
-
Notifications
You must be signed in to change notification settings - Fork 0
/
docker_entry_point.sh
executable file
·37 lines (31 loc) · 1.13 KB
/
docker_entry_point.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
#!/bin/bash
hw_environment=$tnpp_hw #should be cpu or gpu
mode=$1 #should be stream or server
modelname=$2 #should be one of the models installed into this docker, like fi_tdt
pipeline=$3 #should be a name of a pipeline defined for this model like parse_plaintext
SERVER_PORT=7689 #...this is docker-internal, so doesn't matter
echo "DOCKER ENTRY HW" $tnpp_hw > /dev/stderr
echo "DOCKER ENTRY ARGS" $* > /dev/stderr
if [[ "$hw_environment" == "gpu" ]]
then
echo "Running in GPU"
gpu_arg=" "
else
echo "Running in CPU"
gpu_arg="--gpu -1"
fi
if [[ "$mode" == "stream" ]]
then
python3 full_pipeline_stream.py $gpu_arg --conf-yaml models_${modelname}/pipelines.yaml $pipeline
elif [[ "$mode" == "server" ]]
then
python3 full_pipeline_server.py $gpu_arg --host 0.0.0.0 --port $SERVER_PORT --conf-yaml models_${modelname}/pipelines.yaml $pipeline
elif [[ "$mode" == "cluster" ]]
then
ip=$(hostname -i | tr '.' '\n')
ip=($ip)
id=${ip[3]}
id=$(($id - 2))
echo "ID: "$id
python3 full_pipeline_server.py $gpu_arg --host 0.0.0.0 --port $SERVER_PORT --conf-yaml models_${modelname}_${id}/pipelines.yaml $pipeline
fi