-
Notifications
You must be signed in to change notification settings - Fork 1
/
kubeflow_v0.5.sh
112 lines (87 loc) · 4.45 KB
/
kubeflow_v0.5.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
#!/bin/bash
# Make sure to install Ksonnet first
# It will be required later
NAMESPACE=kubeflow
oc new-project ${NAMESPACE}
oc project ${NAMESPACE}
# Set permissions for service accounts
oc adm policy add-scc-to-user anyuid -z ambassador -n${NAMESPACE}
oc adm policy add-scc-to-user anyuid -z argo -n${NAMESPACE}
oc adm policy add-scc-to-user anyuid -z argo-ui -n${NAMESPACE}
oc adm policy add-scc-to-user anyuid -z builder -n${NAMESPACE}
oc adm policy add-scc-to-user anyuid -z centraldashboard -n${NAMESPACE}
oc adm policy add-scc-to-user anyuid -z default -n${NAMESPACE}
oc adm policy add-scc-to-user anyuid -z deployer -n${NAMESPACE}
oc adm policy add-scc-to-user anyuid -z jupyter -n${NAMESPACE}
oc adm policy add-scc-to-user anyuid -z jupyter-notebook -n${NAMESPACE}
oc adm policy add-scc-to-user anyuid -z jupyter-web-app -n${NAMESPACE}
oc adm policy add-scc-to-user anyuid -z katib-ui -n${NAMESPACE}
oc adm policy add-scc-to-user anyuid -z meta-controller-service -n${NAMESPACE}
oc adm policy add-scc-to-user anyuid -z metrics-collector -n${NAMESPACE}
oc adm policy add-scc-to-user anyuid -z ml-pipeline -n${NAMESPACE}
oc adm policy add-scc-to-user anyuid -z ml-pipeline-persistenceagent -n${NAMESPACE}
oc adm policy add-scc-to-user anyuid -z ml-pipeline-scheduledworkflow -n${NAMESPACE}
oc adm policy add-scc-to-user anyuid -z ml-pipeline-ui -n${NAMESPACE}
oc adm policy add-scc-to-user anyuid -z ml-pipeline-viewer-crd-service-account -n${NAMESPACE}
oc adm policy add-scc-to-user anyuid -z notebook-controller -n${NAMESPACE}
oc adm policy add-scc-to-user anyuid -z pipeline-runner -n${NAMESPACE}
oc adm policy add-scc-to-user anyuid -z profiles -n${NAMESPACE}
oc adm policy add-scc-to-user anyuid -z pytorch-operator -n${NAMESPACE}
oc adm policy add-scc-to-user anyuid -z spartakus -n${NAMESPACE}
oc adm policy add-scc-to-user anyuid -z studyjob-controller -n${NAMESPACE}
oc adm policy add-scc-to-user anyuid -z tf-job-dashboard -n${NAMESPACE}
oc adm policy add-scc-to-user anyuid -z tf-job-operator -n${NAMESPACE}
oc adm policy add-scc-to-user anyuid -z vizier-core -n${NAMESPACE}
export KUBEFLOW_SRC=kubeflow
export KUBEFLOW_TAG=v0.5-branch
mkdir ${KUBEFLOW_SRC}
cd ${KUBEFLOW_SRC}
curl https://raw.githubusercontent.com/kubeflow/kubeflow/${KUBEFLOW_TAG}/scripts/download.sh | bash
export KFAPP=openshift
scripts/kfctl.sh init ${KFAPP} --platform none
pushd openshift
../scripts/kfctl.sh generate k8s
../scripts/kfctl.sh apply k8s
#NOTE!!!!
# This is manual at the moment. It is very annoying
# There is probably a way to do with with the oc CLI
###################
# To enable the argo-ui change the argo-ui deployment
# Then create a route to it once the pod restart
# - env:
# - name: ARGO_NAMESPACE
# valueFrom:
# fieldRef:
# apiVersion: v1
# fieldPath: metadata.namespace
# - name: IN_CLUSTER
# value: 'true'
# - name: ENABLE_WEB_CONSOLE # Change starts here
# value: 'false' #
# - name: BASE_HREF #
# value: / # Change ends here
###################
# Need RBAC roles for Argo to work
oc apply -f argo-role.yaml -n kubeflow
#It is also necessary to give additional (privileged) permissions to an argo role using the following command:
oc adm policy add-scc-to-user privileged -z argo -nkubeflow
# Download the argo client
#wget https://github.com/argoproj/argo/releases/download/v2.3.0/argo-linux-amd64
# Download the minio client
#wget https://dl.min.io/server/minio/release/linux-amd64/minio
# Configure the minio CLI
#mc config host add minio http://[your-minio-service-URL] minio minio123
# Kubeflow is typically tested used GKE, which is significantly less strict compared to OpenShift
oc apply -f tfjobs-role.yaml -n kubeflow
# Kubeflow is typically tested used GKE, which is significantly less strict compared to OpenShift
oc apply -f studyjobs-role.yaml -n kubeflow
# Study job for Katib example
# oc create -f https://raw.githubusercontent.com/kubeflow/katib/master/examples/random-example.yaml
# If you want to serve ML Models from S3 you need to provide a secret to access AWS
# oc apply -f aws_access.yaml -n kubeflow
# Or you can use minio
# mc cp saved_model.pb minio/serving/mnist/1/saved_model.pb
oc adm policy add-scc-to-user privileged -nkubeflow -z pipeline-runner
# Model DB is not part of the standard kubeflow install but it can be installed using
# ks generate modeldb modeldb
# ks apply default -c modeldb