From 425c10c405a400f018de3778eb2716209c125664 Mon Sep 17 00:00:00 2001 From: Alexander Litvinenko Date: Thu, 26 Nov 2015 16:09:19 +0200 Subject: [PATCH 1/4] Bump package versions --- roles/docker/defaults/main.yml | 2 +- roles/etcd/defaults/main.yml | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml index a850ff2..e779068 100644 --- a/roles/docker/defaults/main.yml +++ b/roles/docker/defaults/main.yml @@ -1,2 +1,2 @@ --- -docker_version: 1.7.1 +docker_version: 1.8.2 diff --git a/roles/etcd/defaults/main.yml b/roles/etcd/defaults/main.yml index 67bb8df..0e708e8 100644 --- a/roles/etcd/defaults/main.yml +++ b/roles/etcd/defaults/main.yml @@ -1,5 +1,5 @@ --- -ectd_version: 2.0.13 +ectd_version: 2.1.1 etcd_client_port: 2379 etcd_peer_port: 2380 etcd_url_scheme: http @@ -22,4 +22,3 @@ etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ ansible_hostname }}:{{ e etcd_listen_client_urls: "{{ etcd_url_scheme }}://0.0.0.0:{{ etcd_client_port }}" etcd_data_dir: /var/lib/etcd - From 5bd8350b3b84e3ec025ac292f67d1dbb32586f8c Mon Sep 17 00:00:00 2001 From: Alexander Litvinenko Date: Thu, 26 Nov 2015 16:23:58 +0200 Subject: [PATCH 2/4] Update addons --- roles/addons/files/fluentd-es.yaml | 29 +++++++++++++ roles/addons/files/grafana-service.yaml | 7 ++- roles/addons/files/heapster-controller.yaml | 14 +++--- roles/addons/files/heapster-service.yaml | 2 +- .../files/influxdb-grafana-controller.yaml | 43 +++++++++++++------ roles/addons/files/kube-ui-rc.yaml | 10 ++--- roles/addons/tasks/kube-ui.yml | 4 +- roles/addons/tasks/logging.yml | 2 +- roles/addons/tasks/main.yml | 43 ------------------- roles/addons/tasks/monitoring.yml | 7 ++- roles/kubernetes-master/files/fluentd-es.yaml | 29 +++++++++++++ 11 files changed, 113 insertions(+), 77 deletions(-) create mode 100644 roles/addons/files/fluentd-es.yaml create mode 100644 roles/kubernetes-master/files/fluentd-es.yaml diff --git a/roles/addons/files/fluentd-es.yaml b/roles/addons/files/fluentd-es.yaml new file mode 100644 index 0000000..caf9052 --- /dev/null +++ b/roles/addons/files/fluentd-es.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: fluentd-elasticsearch + namespace: kube-system +spec: + containers: + - name: fluentd-elasticsearch + image: gcr.io/google_containers/fluentd-elasticsearch:1.11 + resources: + limits: + cpu: 100m + args: + - -qq + volumeMounts: + - name: varlog + mountPath: /var/log + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + terminationGracePeriodSeconds: 30 + volumes: + - name: varlog + hostPath: + path: /var/log + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers + diff --git a/roles/addons/files/grafana-service.yaml b/roles/addons/files/grafana-service.yaml index f1cfb8a..9140e8b 100644 --- a/roles/addons/files/grafana-service.yaml +++ b/roles/addons/files/grafana-service.yaml @@ -6,10 +6,13 @@ metadata: labels: kubernetes.io/cluster-service: "true" kubernetes.io/name: "Grafana" -spec: +spec: + # On production clusters, consider setting up auth for grafana, and + # exposing Grafana either using a LoadBalancer or a public IP. + # type: LoadBalancer ports: - port: 80 - targetPort: 8080 + targetPort: 3000 selector: k8s-app: influxGrafana diff --git a/roles/addons/files/heapster-controller.yaml b/roles/addons/files/heapster-controller.yaml index fbc3bad..3cc0921 100644 --- a/roles/addons/files/heapster-controller.yaml +++ b/roles/addons/files/heapster-controller.yaml @@ -1,32 +1,34 @@ apiVersion: v1 kind: ReplicationController metadata: - name: monitoring-heapster-v8 + name: heapster-v10 namespace: kube-system labels: k8s-app: heapster - version: v8 + version: v10 kubernetes.io/cluster-service: "true" spec: replicas: 1 selector: k8s-app: heapster - version: v8 + version: v10 template: metadata: labels: k8s-app: heapster - version: v8 + version: v10 kubernetes.io/cluster-service: "true" spec: containers: - - image: gcr.io/google_containers/heapster:v0.17.0 + - image: gcr.io/google_containers/heapster:v0.18.2 name: heapster resources: limits: cpu: 100m - memory: 300Mi + memory: 500Mi command: - /heapster - --source=kubernetes:'' - --sink=influxdb:http://monitoring-influxdb:8086 + - --stats_resolution=10s + - --sink_frequency=10s diff --git a/roles/addons/files/heapster-service.yaml b/roles/addons/files/heapster-service.yaml index 88c1139..e406d69 100644 --- a/roles/addons/files/heapster-service.yaml +++ b/roles/addons/files/heapster-service.yaml @@ -1,7 +1,7 @@ kind: Service apiVersion: v1 metadata: - name: monitoring-heapster + name: heapster namespace: kube-system labels: kubernetes.io/cluster-service: "true" diff --git a/roles/addons/files/influxdb-grafana-controller.yaml b/roles/addons/files/influxdb-grafana-controller.yaml index 8a27630..be23f4c 100644 --- a/roles/addons/files/influxdb-grafana-controller.yaml +++ b/roles/addons/files/influxdb-grafana-controller.yaml @@ -1,26 +1,26 @@ apiVersion: v1 kind: ReplicationController metadata: - name: monitoring-influx-grafana-v1 + name: monitoring-influxdb-grafana-v2 namespace: kube-system labels: k8s-app: influxGrafana - version: v1 + version: v2 kubernetes.io/cluster-service: "true" spec: replicas: 1 selector: k8s-app: influxGrafana - version: v1 + version: v2 template: metadata: labels: k8s-app: influxGrafana - version: v1 + version: v2 kubernetes.io/cluster-service: "true" spec: containers: - - image: gcr.io/google_containers/heapster_influxdb:v0.3 + - image: gcr.io/google_containers/heapster_influxdb:v0.4 name: influxdb resources: limits: @@ -34,20 +34,37 @@ spec: volumeMounts: - name: influxdb-persistent-storage mountPath: /data - - image: gcr.io/google_containers/heapster_grafana:v0.7 + - image: gcr.io/google_containers/heapster_grafana:v2.1.1 name: grafana + env: resources: limits: cpu: 100m memory: 100Mi - env: - - name: INFLUXDB_EXTERNAL_URL - value: /api/v1/proxy/namespaces/kube-system/services/monitoring-influxdb:api/db/ - - name: INFLUXDB_HOST - value: monitoring-influxdb - - name: INFLUXDB_PORT - value: "8086" + env: + # This variable is required to setup templates in Grafana. + - name: INFLUXDB_SERVICE_URL + value: http://monitoring-influxdb:8086 + # The following env variables are required to make Grafana accessible via + # the kubernetes api-server proxy. On production clusters, we recommend + # removing these env variables, setup auth for grafana, and expose the grafana + # service using a LoadBalancer or a public IP. + - name: GF_AUTH_BASIC_ENABLED + value: "false" + - name: GF_AUTH_ANONYMOUS_ENABLED + value: "true" + - name: GF_AUTH_ANONYMOUS_ORG_ROLE + value: Admin + - name: GF_SERVER_ROOT_URL + value: /api/v1/proxy/namespaces/kube-system/services/monitoring-grafana/ + volumeMounts: + - name: grafana-persistent-storage + mountPath: /var + volumes: - name: influxdb-persistent-storage emptyDir: {} + - name: grafana-persistent-storage + emptyDir: {} + diff --git a/roles/addons/files/kube-ui-rc.yaml b/roles/addons/files/kube-ui-rc.yaml index e853a21..4bfdf38 100644 --- a/roles/addons/files/kube-ui-rc.yaml +++ b/roles/addons/files/kube-ui-rc.yaml @@ -1,27 +1,27 @@ apiVersion: v1 kind: ReplicationController metadata: - name: kube-ui-v1 + name: kube-ui-v3 namespace: kube-system labels: k8s-app: kube-ui - version: v1 + version: v3 kubernetes.io/cluster-service: "true" spec: replicas: 1 selector: k8s-app: kube-ui - version: v1 + version: v3 template: metadata: labels: k8s-app: kube-ui - version: v1 + version: v3 kubernetes.io/cluster-service: "true" spec: containers: - name: kube-ui - image: gcr.io/google_containers/kube-ui:v1.1 + image: gcr.io/google_containers/kube-ui:v3 resources: limits: cpu: 100m diff --git a/roles/addons/tasks/kube-ui.yml b/roles/addons/tasks/kube-ui.yml index ea12cc0..a1b8ff9 100644 --- a/roles/addons/tasks/kube-ui.yml +++ b/roles/addons/tasks/kube-ui.yml @@ -26,7 +26,7 @@ kube: namespace: kube-system resource: rc - name: kube-ui-v1 + name: kube-ui-v3 filename: "{{ kube_manifest_dir }}/kube-ui-rc.yaml" state: "{{ kube_ui_rc_def.changed | ternary('latest','present') }}" when: enable_ui @@ -45,4 +45,4 @@ when: enable_ui tags: - addons - - kube-ui \ No newline at end of file + - kube-ui diff --git a/roles/addons/tasks/logging.yml b/roles/addons/tasks/logging.yml index 19be1eb..309a189 100644 --- a/roles/addons/tasks/logging.yml +++ b/roles/addons/tasks/logging.yml @@ -93,4 +93,4 @@ when: enable_logging tags: - addons - - logging \ No newline at end of file + - logging diff --git a/roles/addons/tasks/main.yml b/roles/addons/tasks/main.yml index 4e86436..66cc4d5 100644 --- a/roles/addons/tasks/main.yml +++ b/roles/addons/tasks/main.yml @@ -1,47 +1,4 @@ --- -- name: Write kube-system namespace manifest - sudo: yes - copy: - src=kube-system.yaml - dest={{ kube_manifest_dir }}/kube-system.yaml - -- name: Create kube-system namespace - sudo: yes - kube: - resource: namespace - name: kube-system - filename: "{{ kube_manifest_dir }}/kube-system.yaml" - state: present - when: "'first_master' in group_names" - tags: - - addons - -- name: tokens | generate tokens for addons - local_action: command - bash -c "{{ playbook_dir }}/{{ cert_syncdir.path }}/kube-gen-token.sh {{ item }}" - environment: - TOKEN_DIR: "{{ tokens_syncdir.path }}" - with_items: - - "system:dns" - - "system:monitoring" - - "system:logging" - register: gentoken - run_once: true - changed_when: "'Added' in gentoken.stdout" - -- name: tokens | upload known_tokens to master - sudo: yes - copy: - src: "{{ tokens_syncdir.path }}/known_tokens.csv" - dest: "{{ kube_token_dir }}" - group: "{{ kube_cert_group }}" - owner: kube - mode: 0440 - notify: - - restart apiserver - tags: - - addons - - include: skydns.yml when: dns_setup and 'first_master' in group_names diff --git a/roles/addons/tasks/monitoring.yml b/roles/addons/tasks/monitoring.yml index a9c1a69..0deea1d 100644 --- a/roles/addons/tasks/monitoring.yml +++ b/roles/addons/tasks/monitoring.yml @@ -59,7 +59,7 @@ kube: namespace: kube-system resource: rc - name: monitoring-influx-grafana-v1 + name: monitoring-influxdb-grafana-v2 filename: "{{ kube_manifest_dir }}/influxdb-grafana-controller.yaml" state: "{{ influxdb_rc_def.changed | ternary('latest','present') }}" when: enable_monitoring @@ -98,7 +98,7 @@ kube: namespace: kube-system resource: rc - name: monitoring-heapster-v8 + name: heapster-v10 filename: "{{ kube_manifest_dir }}/heapster-controller.yaml" state: "{{ heapster_rc_def.changed | ternary('latest','present') }}" when: enable_monitoring @@ -111,11 +111,10 @@ kube: namespace: kube-system resource: svc - name: monitoring-heapster + name: heapster filename: "{{ kube_manifest_dir }}/heapster-service.yaml" state: "{{ heapster_svc_def.changed | ternary('latest','present') }}" when: enable_monitoring tags: - addons - monitoring - diff --git a/roles/kubernetes-master/files/fluentd-es.yaml b/roles/kubernetes-master/files/fluentd-es.yaml new file mode 100644 index 0000000..caf9052 --- /dev/null +++ b/roles/kubernetes-master/files/fluentd-es.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: fluentd-elasticsearch + namespace: kube-system +spec: + containers: + - name: fluentd-elasticsearch + image: gcr.io/google_containers/fluentd-elasticsearch:1.11 + resources: + limits: + cpu: 100m + args: + - -qq + volumeMounts: + - name: varlog + mountPath: /var/log + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + terminationGracePeriodSeconds: 30 + volumes: + - name: varlog + hostPath: + path: /var/log + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers + From 8a2f3a7fe9f0a56c2994548a62fe6831aff6fd72 Mon Sep 17 00:00:00 2001 From: Alexander Litvinenko Date: Thu, 26 Nov 2015 16:25:29 +0200 Subject: [PATCH 3/4] Remove secrets role, now it's really small all tasks now in kubernetes role and tagged as secrets --- roles/secrets/files/kube-gen-token.sh | 33 ---------- roles/secrets/files/make-ca-cert.sh | 87 --------------------------- roles/secrets/tasks/gen_certs.yml | 23 ------- roles/secrets/tasks/gen_tokens.yml | 46 -------------- roles/secrets/tasks/main.yml | 8 --- 5 files changed, 197 deletions(-) delete mode 100644 roles/secrets/files/kube-gen-token.sh delete mode 100644 roles/secrets/files/make-ca-cert.sh delete mode 100644 roles/secrets/tasks/gen_certs.yml delete mode 100644 roles/secrets/tasks/gen_tokens.yml delete mode 100644 roles/secrets/tasks/main.yml diff --git a/roles/secrets/files/kube-gen-token.sh b/roles/secrets/files/kube-gen-token.sh deleted file mode 100644 index 7bb3c67..0000000 --- a/roles/secrets/files/kube-gen-token.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -# Copyright 2015 The Kubernetes Authors All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -token_dir=${TOKEN_DIR:-/var/srv/kubernetes} -token_file="${token_dir}/known_tokens.csv" - -mkdir -p "${token_dir}" - -create_accounts=($@) - -touch "${token_file}" -for account in "${create_accounts[@]}"; do - if grep ",${account}," "${token_file}" ; then - continue - fi - token=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) - echo "${token},${account},${account}" >> "${token_file}" - echo "${token}" > "${token_dir}/${account}.token" - echo "Added ${account}" -done diff --git a/roles/secrets/files/make-ca-cert.sh b/roles/secrets/files/make-ca-cert.sh deleted file mode 100644 index d133497..0000000 --- a/roles/secrets/files/make-ca-cert.sh +++ /dev/null @@ -1,87 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -set -o nounset -set -o pipefail - -# Caller should set in the ev: -# MASTER_IP - this may be an ip or things like "_use_gce_external_ip_" -# DNS_DOMAIN - which will be passed to minions in --cluster_domain -# SERVICE_CLUSTER_IP_RANGE - where all service IPs are allocated -# MASTER_NAME - I'm not sure what it is... - -# Also the following will be respected -# CERT_DIR - where to place the finished certs -# CERT_GROUP - who the group owner of the cert files should be - -service_range="${SERVICE_CLUSTER_IP_RANGE:="10.0.0.0/16"}" -dns_domain="${DNS_DOMAIN:="cluster.local"}" -cert_dir="${CERT_DIR:-"/tmp/certs"}" -cert_group="${CERT_GROUP:="kube-cert"}" -masters="${MASTERS}" - -# The following certificate pairs are created: -# -# - ca (the cluster's certificate authority) -# - server -# - kubelet -# - kubecfg (for kubectl) - -tmpdir=$(mktemp -d -t kubernetes_cacert.XXXXXX) -trap 'rm -rf "${tmpdir}"' EXIT -cd "${tmpdir}" - -# Calculate the first ip address in the service range -octects=($(echo "${service_range}" | sed -e 's|/.*||' -e 's/\./ /g')) -((octects[3]+=1)) -service_ip=$(echo "${octects[*]}" | sed 's/ /./g') - -# Determine appropriete subject alt names -sans="IP:${service_ip},DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${dns_domain}" -hosts=$(for host in ${masters}; do echo DNS:${host}; done|tr [[:space:]] ,) -sans="$sans,${hosts%?}" - -curl -L -O https://github.com/OpenVPN/easy-rsa/releases/download/3.0.0/EasyRSA-3.0.0.tgz > /dev/null 2>&1 -tar xzf EasyRSA-3.0.0.tgz > /dev/null -cd EasyRSA-3.0.0 - -(./easyrsa init-pki > /dev/null 2>&1 - ./easyrsa --batch "--req-cn=kubernetes@$(date +%s)" build-ca nopass > /dev/null 2>&1 - ./easyrsa --subject-alt-name="${sans}" build-server-full server nopass > /dev/null 2>&1 - ./easyrsa build-client-full kubelet nopass > /dev/null 2>&1 - ./easyrsa build-client-full kubecfg nopass > /dev/null 2>&1) || { - # If there was an error in the subshell, just die. - # TODO(roberthbailey): add better error handling here - echo "=== Failed to generate certificates: Aborting ===" - exit 2 - } - -mkdir -p "$cert_dir" - -cp -p pki/ca.crt "${cert_dir}/ca.crt" -cp -p pki/issued/server.crt "${cert_dir}/server.crt" -cp -p pki/private/server.key "${cert_dir}/server.key" -cp -p pki/issued/kubecfg.crt "${cert_dir}/kubecfg.crt" -cp -p pki/private/kubecfg.key "${cert_dir}/kubecfg.key" -cp -p pki/issued/kubelet.crt "${cert_dir}/kubelet.crt" -cp -p pki/private/kubelet.key "${cert_dir}/kubelet.key" - -CERTS=("ca.crt" "server.key" "server.crt" "kubelet.key" "kubelet.crt" "kubecfg.key" "kubecfg.crt") -for cert in "${CERTS[@]}"; do -# chgrp "${cert_group}" "${cert_dir}/${cert}" - chmod 666 "${cert_dir}/${cert}" -done diff --git a/roles/secrets/tasks/gen_certs.yml b/roles/secrets/tasks/gen_certs.yml deleted file mode 100644 index 63cffec..0000000 --- a/roles/secrets/tasks/gen_certs.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- name: create temp directory for certs syncing - local_action: file path=".syncdir" state=directory recurse=yes - run_once: true - register: cert_syncdir - -- name: certs | install cert generation script - local_action: copy src=make-ca-cert.sh dest="{{ cert_syncdir.path }}/make-ca-cert.sh" mode=u+x - run_once: true - changed_when: false - -- name: certs | run cert generation script - local_action: command - "{{ playbook_dir }}/{{ cert_syncdir.path }}/make-ca-cert.sh" - args: - creates: "{{ cert_syncdir.path }}/certs/server.crt" - environment: - #MASTER_IP: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}" - MASTERS: "{{ groups['master'] | join(' ') }}" - DNS_DOMAIN: "{{ dns_domain }}" - SERVICE_CLUSTER_IP_RANGE: "{{ kube_service_addresses }}" - CERT_DIR: "{{ playbook_dir }}/{{ cert_syncdir.path }}/certs" - run_once: true diff --git a/roles/secrets/tasks/gen_tokens.yml b/roles/secrets/tasks/gen_tokens.yml deleted file mode 100644 index 316e107..0000000 --- a/roles/secrets/tasks/gen_tokens.yml +++ /dev/null @@ -1,46 +0,0 @@ ---- -- name: create temp directory for tokens syncing - local_action: file path="{{ cert_syncdir.path }}/tokens" state=directory recurse=yes - run_once: true - register: tokens_syncdir - -- name: certs | install token generation script - local_action: copy src=kube-gen-token.sh dest="{{ cert_syncdir.path }}/kube-gen-token.sh" mode=u+x - run_once: true - changed_when: false - -- name: tokens | generate token for kubectl - local_action: command - bash -c "{{ playbook_dir }}/{{ cert_syncdir.path }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}" - environment: - TOKEN_DIR: "{{ tokens_syncdir.path }}" - with_nested: - - [ "system:kubectl" ] - - [ "kubecfg"] - register: gentoken - run_once: true - changed_when: "'Added' in gentoken.stdout" - -- name: tokens | generate tokens for master components - local_action: command - bash -c "{{ playbook_dir }}/{{ cert_syncdir.path }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}" - environment: - TOKEN_DIR: "{{ tokens_syncdir.path }}" - with_nested: - - [ "system:controller_manager", "system:scheduler", "system:proxy" ] - - "{{ groups['master'] }}" - register: gentoken - run_once: true - changed_when: "'Added' in gentoken.stdout" - -- name: tokens | generate tokens for node components - local_action: command - bash -c "{{ playbook_dir }}/{{ cert_syncdir.path }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}" - environment: - TOKEN_DIR: "{{ tokens_syncdir.path }}" - with_nested: - - [ 'system:kubelet', 'system:proxy' ] - - "{{ groups['node'] }}" - register: gentoken - run_once: true - changed_when: "'Added' in gentoken.stdout" diff --git a/roles/secrets/tasks/main.yml b/roles/secrets/tasks/main.yml deleted file mode 100644 index fa9cbef..0000000 --- a/roles/secrets/tasks/main.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- include: gen_certs.yml - tags: secrets - run_once: true - -- include: gen_tokens.yml - tags: secrets - run_once: true From de4a909129275d530eb61726176b9137aeed37f7 Mon Sep 17 00:00:00 2001 From: Alexander Litvinenko Date: Thu, 26 Nov 2015 16:32:30 +0200 Subject: [PATCH 4/4] Main part of rework: - Role names changed to kubernetes- - Flannel and docker is now installing on master - Token generation and usage are removed - Changed kubernetes packaging to upstream binaries and containers - Kubernetes components are running in pods via standalone kubelet - Multi-master is now fully functional with leader election --- group_vars/all.yml | 21 ++- roles/common/tasks/main.yml | 31 +++- roles/flannel/handlers/main.yml | 1 - roles/flannel/tasks/main.yml | 4 +- roles/kubernetes-master/handlers/main.yml | 10 ++ roles/kubernetes-master/tasks/main.yml | 90 ++++++++++++ roles/kubernetes-master/tasks/secrets.yml | 26 ++++ .../templates/kube-apiserver.yml | 44 ++++++ .../templates/kube-controller-manager.yml | 31 ++++ .../templates/kube-podmaster.yml | 48 ++++++ .../templates/kube-proxy.yml | 16 ++ .../templates/kube-scheduler.yml | 21 +++ .../templates/kube-system.yml | 4 + .../templates/kubelet.service.j2 | 19 +++ .../files/fluentd-es.yaml | 0 roles/kubernetes-node/handlers/main.yml | 10 ++ roles/kubernetes-node/tasks/main.yml | 73 ++++++++++ roles/kubernetes-node/tasks/secrets.yml | 14 ++ .../kubernetes-node/templates/kube-proxy.yml | 27 ++++ .../templates/kubelet.service.j2 | 23 +++ .../templates/node.kubeconfig.j2 | 17 +++ roles/kubernetes/defaults/main.yml | 13 +- roles/kubernetes/files/kube-gen-token.sh | 31 ---- roles/kubernetes/files/make-ca-cert.sh | 2 +- roles/kubernetes/tasks/main.yml | 33 +++-- roles/kubernetes/tasks/secrets.yml | 43 ++++++ roles/kubernetes/templates/config.j2 | 26 ---- roles/master/handlers/main.yml | 34 ----- roles/master/tasks/main.yml | 137 ------------------ roles/master/tasks/secrets.yml | 58 -------- roles/master/tasks/stable.yml | 9 -- roles/master/tasks/testing.yml | 10 -- roles/master/templates/apiserver.j2 | 26 ---- roles/master/templates/controller-manager.j2 | 7 - .../controller-manager.kubeconfig.j2 | 18 --- roles/master/templates/kubectl.kubeconfig.j2 | 18 --- roles/master/templates/proxy.j2 | 7 - roles/master/templates/proxy.kubeconfig.j2 | 18 --- roles/master/templates/scheduler.j2 | 7 - .../master/templates/scheduler.kubeconfig.j2 | 18 --- roles/minion/handlers/main.yml | 19 --- roles/minion/tasks/main.yml | 83 ----------- roles/minion/tasks/secrets.yml | 37 ----- roles/minion/tasks/stable.yml | 9 -- roles/minion/tasks/testing.yml | 10 -- roles/minion/templates/kubelet.j2 | 21 --- roles/minion/templates/kubelet.kubeconfig.j2 | 18 --- roles/minion/templates/proxy.j2 | 7 - roles/minion/templates/proxy.kubeconfig.j2 | 18 --- setup.yml | 9 +- 50 files changed, 597 insertions(+), 679 deletions(-) create mode 100644 roles/kubernetes-master/handlers/main.yml create mode 100644 roles/kubernetes-master/tasks/main.yml create mode 100644 roles/kubernetes-master/tasks/secrets.yml create mode 100644 roles/kubernetes-master/templates/kube-apiserver.yml create mode 100644 roles/kubernetes-master/templates/kube-controller-manager.yml create mode 100644 roles/kubernetes-master/templates/kube-podmaster.yml create mode 100644 roles/kubernetes-master/templates/kube-proxy.yml create mode 100644 roles/kubernetes-master/templates/kube-scheduler.yml create mode 100644 roles/kubernetes-master/templates/kube-system.yml create mode 100644 roles/kubernetes-master/templates/kubelet.service.j2 rename roles/{minion => kubernetes-node}/files/fluentd-es.yaml (100%) create mode 100644 roles/kubernetes-node/handlers/main.yml create mode 100644 roles/kubernetes-node/tasks/main.yml create mode 100644 roles/kubernetes-node/tasks/secrets.yml create mode 100644 roles/kubernetes-node/templates/kube-proxy.yml create mode 100644 roles/kubernetes-node/templates/kubelet.service.j2 create mode 100644 roles/kubernetes-node/templates/node.kubeconfig.j2 delete mode 100644 roles/kubernetes/files/kube-gen-token.sh mode change 100755 => 100644 roles/kubernetes/files/make-ca-cert.sh create mode 100644 roles/kubernetes/tasks/secrets.yml delete mode 100644 roles/kubernetes/templates/config.j2 delete mode 100644 roles/master/handlers/main.yml delete mode 100644 roles/master/tasks/main.yml delete mode 100644 roles/master/tasks/secrets.yml delete mode 100644 roles/master/tasks/stable.yml delete mode 100644 roles/master/tasks/testing.yml delete mode 100644 roles/master/templates/apiserver.j2 delete mode 100644 roles/master/templates/controller-manager.j2 delete mode 100644 roles/master/templates/controller-manager.kubeconfig.j2 delete mode 100644 roles/master/templates/kubectl.kubeconfig.j2 delete mode 100644 roles/master/templates/proxy.j2 delete mode 100644 roles/master/templates/proxy.kubeconfig.j2 delete mode 100644 roles/master/templates/scheduler.j2 delete mode 100644 roles/master/templates/scheduler.kubeconfig.j2 delete mode 100644 roles/minion/handlers/main.yml delete mode 100644 roles/minion/tasks/main.yml delete mode 100644 roles/minion/tasks/secrets.yml delete mode 100644 roles/minion/tasks/stable.yml delete mode 100644 roles/minion/tasks/testing.yml delete mode 100644 roles/minion/templates/kubelet.j2 delete mode 100644 roles/minion/templates/kubelet.kubeconfig.j2 delete mode 100644 roles/minion/templates/proxy.j2 delete mode 100644 roles/minion/templates/proxy.kubeconfig.j2 diff --git a/group_vars/all.yml b/group_vars/all.yml index b6d8a78..b5dce3d 100644 --- a/group_vars/all.yml +++ b/group_vars/all.yml @@ -1,9 +1,11 @@ # ansible remote user account # ansible_ssh_user: centos -# Kubernetes build to use, stable is used by default. -# Place "testing" here to use latest build avialable. -kube_build: stable +# Which type of packages should be used for deployment: stable, testing +package_channel: stable + +# The version of software to install for Kubernetes. +kube_version: v1.1.2 # Users to create for basic auth in Kubernetes API via HTTP kube_users: @@ -57,3 +59,16 @@ enable_logging: true # Set to "false' to disable default Monitoring (cAdvisor + heapster + influxdb + grafana) enable_monitoring: true + +# etcd specific variables +# TCP port used for client communications +etcd_client_port: 2379 + +# TCP port used for intra-cluster communications +etcd_peer_port: 2380 + +# Client communication protocol (http/https) +etcd_url_scheme: http + +# Intra-cluster communication protocol (http/https) +etcd_peer_url_scheme: http diff --git a/roles/common/tasks/main.yml b/roles/common/tasks/main.yml index 93c0a22..c6e44c0 100644 --- a/roles/common/tasks/main.yml +++ b/roles/common/tasks/main.yml @@ -24,6 +24,21 @@ tags: - common +- name: check if selinux enforcing + sudo: yes + command: getenforce + register: selinux + changed_when: false + tags: + - common + +- name: set selinux permissive + sudo: yes + selinux: state=permissive policy=targeted + when: "'Enforcing' in selinux.stdout" + tags: + - common + # add hosts to /etc/hosts - name: populate inventory into hosts file sudo: yes @@ -49,6 +64,12 @@ tags: - common +- name: upgrade all packages + sudo: yes + yum: name=* state=latest + tags: + - common + - name: enable EPEL repo sudo: yes yum: @@ -62,6 +83,14 @@ copy: src=virt7-docker-common-candidate.repo dest=/etc/yum.repos.d/virt7-docker-common-candidate.repo - when: kube_build == "testing" + when: package_channel == "testing" + tags: + - common + +- name: evaluate first_master + add_host: + name: "{{ groups['master'][0] }}" + groups: first_master + when: "master in groups and groups['master'] | length > 1" tags: - common diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml index bbd6628..a1a0676 100644 --- a/roles/flannel/handlers/main.yml +++ b/roles/flannel/handlers/main.yml @@ -6,7 +6,6 @@ - stop docker - delete docker0 - start docker - when: inventory_hostname in groups['node'] - name: restart flannel sudo: yes diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml index 478da68..e2bb066 100644 --- a/roles/flannel/tasks/main.yml +++ b/roles/flannel/tasks/main.yml @@ -9,10 +9,10 @@ - flannel - include: stable.yml - when: kube_build == "stable" + when: package_channel == "stable" - include: testing.yml - when: kube_build == "testing" + when: package_channel == "testing" - name: install flannel sysconfig file sudo: yes diff --git a/roles/kubernetes-master/handlers/main.yml b/roles/kubernetes-master/handlers/main.yml new file mode 100644 index 0000000..7472108 --- /dev/null +++ b/roles/kubernetes-master/handlers/main.yml @@ -0,0 +1,10 @@ +--- +- name: reload systemd + sudo: yes + command: systemctl --system daemon-reload + +- name: restart kubelet + sudo: yes + service: + name: kubelet + state: restarted diff --git a/roles/kubernetes-master/tasks/main.yml b/roles/kubernetes-master/tasks/main.yml new file mode 100644 index 0000000..9938e5a --- /dev/null +++ b/roles/kubernetes-master/tasks/main.yml @@ -0,0 +1,90 @@ +--- +- include: secrets.yml + tags: + - secrets + +- name: download kubernetes binaries + sudo: yes + get_url: url={{ kube_url }}/{{ item }} dest=/usr/bin mode=0753 + with_items: + - kubelet + - kubectl + tags: + - master + +- name: generate kublet systemd unit + sudo: yes + template: + src: kubelet.service.j2 + dest: /usr/lib/systemd/system/kubelet.service + owner: root + group: root + mode: 0644 + notify: + - reload systemd + - restart kubelet + tags: + - master + +- name: enable and start kubelet service + sudo: yes + service: + name: kubelet + enabled: yes + state: started + tags: + - master + +- name: generate kubernetes manifests + sudo: yes + template: + src: "{{ item }}.yml" + dest: "{{ kube_manifest_dir }}/{{ item }}.yml" + owner: root + group: root + mode: 0644 + with_items: + - kube-apiserver + - kube-proxy + - kube-podmaster + - kube-system + tags: + - master + +- name: generate kubernetes podmaster manifests + sudo: yes + template: + src: "{{ item }}.yml" + dest: "{{ kube_script_dir }}/manifests/{{ item }}.yml" + owner: root + group: root + mode: 0644 + with_items: + - kube-scheduler + - kube-controller-manager + tags: + - master + +- name: wait for apiserver is up + wait_for: port=8080 + +- name: Create kube-system namespace + sudo: yes + kube: + resource: namespace + name: kube-system + filename: "{{ kube_manifest_dir }}/kube-system.yml" + state: present + run_once: true + tags: + - master + +- name: addons | logging | create fluentd pod + sudo: yes + copy: + src: fluentd-es.yaml + dest: "{{ kube_manifest_dir }}/fluentd-es.yaml" + when: enable_logging + tags: + - addons + - logging diff --git a/roles/kubernetes-master/tasks/secrets.yml b/roles/kubernetes-master/tasks/secrets.yml new file mode 100644 index 0000000..47f16ca --- /dev/null +++ b/roles/kubernetes-master/tasks/secrets.yml @@ -0,0 +1,26 @@ +--- +- name: certs | upload certificates to masters + sudo: yes + copy: + src="{{ cert_syncdir.path }}/certs/{{ item }}" + dest="{{ kube_cert_dir }}/{{ item }}" + group="{{ kube_cert_group }}" + mode=0440 + with_items: + - "ca.crt" + - "server.crt" + - "server.key" + - "kubecfg.crt" + - "kubecfg.key" + notify: + - restart kubelet + +- name: populate users for basic auth in API + sudo: yes + lineinfile: + dest: "{{ kube_users_dir }}/known_users.csv" + create: yes + line: '{{ item.value.pass }},{{ item.key }},{{ item.value.role }}' + with_dict: "{{ kube_users }}" + notify: + - restart kubelet diff --git a/roles/kubernetes-master/templates/kube-apiserver.yml b/roles/kubernetes-master/templates/kube-apiserver.yml new file mode 100644 index 0000000..1f69248 --- /dev/null +++ b/roles/kubernetes-master/templates/kube-apiserver.yml @@ -0,0 +1,44 @@ +apiVersion: v1 +kind: Pod +metadata: + name: kube-apiserver + namespace: kube-system +spec: + hostNetwork: true + containers: + - name: kube-apiserver + image: gcr.io/google_containers/hyperkube:{{ kube_version }} + command: + - /hyperkube + - apiserver + - --bind-address=0.0.0.0 + - --etcd-servers={% for node in groups['master'] %}{{ etcd_url_scheme }}://{{ node }}:{{ etcd_client_port }}{% if not loop.last %},{% endif %}{% endfor %} + + - --allow-privileged=true + - --service-cluster-ip-range={{ kube_service_addresses }} + - --secure_port={{ kube_master_port }} + - --advertise-address={{ ansible_default_ipv4.address }} + - --admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota + - --tls-cert-file={{ kube_cert_dir }}/server.crt + - --tls-private-key-file={{ kube_cert_dir }}/server.key + - --client-ca-file={{ kube_cert_dir }}/ca.crt + - --service-account-key-file={{ kube_cert_dir }}/server.key + - --basic-auth-file={{ kube_users_dir }}/known_users.csv + - --v=3 + ports: + - containerPort: 443 + hostPort: 443 + protocol: TCP + name: https + - containerPort: 8080 + hostPort: 8080 + protocol: TCP + name: local + volumeMounts: + - mountPath: {{ kube_config_dir }} + name: etc-kubernetes + readOnly: true + volumes: + - hostPath: + path: {{ kube_config_dir }} + name: etc-kubernetes diff --git a/roles/kubernetes-master/templates/kube-controller-manager.yml b/roles/kubernetes-master/templates/kube-controller-manager.yml new file mode 100644 index 0000000..fa30b17 --- /dev/null +++ b/roles/kubernetes-master/templates/kube-controller-manager.yml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: Pod +metadata: + name: kube-controller-manager + namespace: kube-system +spec: + hostNetwork: true + containers: + - name: kube-controller-manager + image: gcr.io/google_containers/hyperkube:{{ kube_version }} + command: + - /hyperkube + - controller-manager + - --master=http://127.0.0.1:8080 + - --service-account-private-key-file={{ kube_cert_dir }}/server.key + - --root-ca-file={{ kube_cert_dir }}/ca.crt + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /healthz + port: 10252 + initialDelaySeconds: 15 + timeoutSeconds: 1 + volumeMounts: + - mountPath: {{ kube_config_dir }} + name: etc-kubernetes + readOnly: true + volumes: + - hostPath: + path: {{ kube_config_dir }} + name: etc-kubernetes diff --git a/roles/kubernetes-master/templates/kube-podmaster.yml b/roles/kubernetes-master/templates/kube-podmaster.yml new file mode 100644 index 0000000..f134461 --- /dev/null +++ b/roles/kubernetes-master/templates/kube-podmaster.yml @@ -0,0 +1,48 @@ +apiVersion: v1 +kind: Pod +metadata: + name: kube-podmaster + namespace: kube-system +spec: + hostNetwork: true + containers: + - name: scheduler-elector + image: gcr.io/google_containers/podmaster:1.1 + command: + - /podmaster + - --etcd-servers={% for node in groups['master'] %}{{ etcd_url_scheme }}://{{ node }}:{{ etcd_client_port }}{% if not loop.last %},{% endif %}{% endfor %} + + - --key=scheduler + - --whoami={{ ansible_default_ipv4.address }} + - --source-file=/src/manifests/kube-scheduler.yml + - --dest-file=/dst/manifests/kube-scheduler.yml + volumeMounts: + - mountPath: /src/manifests + name: manifest-src + readOnly: true + - mountPath: /dst/manifests + name: manifest-dst + - name: controller-manager-elector + image: gcr.io/google_containers/podmaster:1.1 + command: + - /podmaster + - --etcd-servers={% for node in groups['master'] %}{{ etcd_url_scheme }}://{{ node }}:{{ etcd_client_port }}{% if not loop.last %},{% endif %}{% endfor %} + + - --key=controller + - --whoami={{ ansible_default_ipv4.address }} + - --source-file=/src/manifests/kube-controller-manager.yml + - --dest-file=/dst/manifests/kube-controller-manager.yml + terminationMessagePath: /dev/termination-log + volumeMounts: + - mountPath: /src/manifests + name: manifest-src + readOnly: true + - mountPath: /dst/manifests + name: manifest-dst + volumes: + - hostPath: + path: /srv/kubernetes/manifests + name: manifest-src + - hostPath: + path: /etc/kubernetes/manifests + name: manifest-dst diff --git a/roles/kubernetes-master/templates/kube-proxy.yml b/roles/kubernetes-master/templates/kube-proxy.yml new file mode 100644 index 0000000..7794b0d --- /dev/null +++ b/roles/kubernetes-master/templates/kube-proxy.yml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Pod +metadata: + name: kube-proxy + namespace: kube-system +spec: + hostNetwork: true + containers: + - name: kube-proxy + image: gcr.io/google_containers/hyperkube:{{ kube_version }} + command: + - /hyperkube + - proxy + - --master=http://127.0.0.1:8080 + securityContext: + privileged: true diff --git a/roles/kubernetes-master/templates/kube-scheduler.yml b/roles/kubernetes-master/templates/kube-scheduler.yml new file mode 100644 index 0000000..6887ea2 --- /dev/null +++ b/roles/kubernetes-master/templates/kube-scheduler.yml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Pod +metadata: + name: kube-scheduler + namespace: kube-system +spec: + hostNetwork: true + containers: + - name: kube-scheduler + image: gcr.io/google_containers/hyperkube:{{ kube_version }} + command: + - /hyperkube + - scheduler + - --master=http://127.0.0.1:8080 + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /healthz + port: 10251 + initialDelaySeconds: 15 + timeoutSeconds: 1 diff --git a/roles/kubernetes-master/templates/kube-system.yml b/roles/kubernetes-master/templates/kube-system.yml new file mode 100644 index 0000000..986f4b4 --- /dev/null +++ b/roles/kubernetes-master/templates/kube-system.yml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: kube-system diff --git a/roles/kubernetes-master/templates/kubelet.service.j2 b/roles/kubernetes-master/templates/kubelet.service.j2 new file mode 100644 index 0000000..4257c6a --- /dev/null +++ b/roles/kubernetes-master/templates/kubelet.service.j2 @@ -0,0 +1,19 @@ +[Unit] +Description=Kubernetes Kubelet +Documentation=https://github.com/GoogleCloudPlatform/kubernetes +After=docker.service +Requires=docker.service + +[Service] +ExecStart=/usr/bin/kubelet \ + --api-servers=http://localhost:8080 \ + --register-node=false \ + --allow-privileged=true \ + --cluster-dns={{ dns_server }} \ + --cluster-domain={{ dns_domain }} \ + --config={{ kube_manifest_dir }} +Restart=always +RestartSec=10 + +[Install] +WantedBy=multi-user.target diff --git a/roles/minion/files/fluentd-es.yaml b/roles/kubernetes-node/files/fluentd-es.yaml similarity index 100% rename from roles/minion/files/fluentd-es.yaml rename to roles/kubernetes-node/files/fluentd-es.yaml diff --git a/roles/kubernetes-node/handlers/main.yml b/roles/kubernetes-node/handlers/main.yml new file mode 100644 index 0000000..7472108 --- /dev/null +++ b/roles/kubernetes-node/handlers/main.yml @@ -0,0 +1,10 @@ +--- +- name: reload systemd + sudo: yes + command: systemctl --system daemon-reload + +- name: restart kubelet + sudo: yes + service: + name: kubelet + state: restarted diff --git a/roles/kubernetes-node/tasks/main.yml b/roles/kubernetes-node/tasks/main.yml new file mode 100644 index 0000000..5178fd6 --- /dev/null +++ b/roles/kubernetes-node/tasks/main.yml @@ -0,0 +1,73 @@ +--- +- include: secrets.yml + tags: + - secrets + +- name: download kubernetes binaries + sudo: yes + get_url: url={{ kube_url }}/{{ item }} dest=/usr/bin mode=0753 + with_items: + - kubelet + - kubectl + tags: + - node + +- name: generate kublet systemd unit + sudo: yes + template: + src: kubelet.service.j2 + dest: /usr/lib/systemd/system/kubelet.service + owner: root + group: root + mode: 0644 + notify: + - reload systemd + - restart kubelet + tags: + - node + +- name: generate kublet kubeconfig + sudo: yes + template: + src: node.kubeconfig.j2 + dest: "{{ kube_config_dir }}/node.kubeconfig" + owner: root + group: root + mode: 0644 + notify: + - reload systemd + - restart kubelet + tags: + - node + +- name: enable and start kubelet service + sudo: yes + service: + name: kubelet + enabled: yes + state: started + tags: + - node + +- name: generate kubernetes manifests + sudo: yes + template: + src: "{{ item }}.yml" + dest: "{{ kube_manifest_dir }}/{{ item }}.yml" + owner: root + group: root + mode: 0644 + with_items: + - kube-proxy + tags: + - node + +- name: addons | logging | create fluentd pod + sudo: yes + copy: + src: fluentd-es.yaml + dest: "{{ kube_manifest_dir }}/fluentd-es.yaml" + when: enable_logging + tags: + - addons + - logging diff --git a/roles/kubernetes-node/tasks/secrets.yml b/roles/kubernetes-node/tasks/secrets.yml new file mode 100644 index 0000000..a22287a --- /dev/null +++ b/roles/kubernetes-node/tasks/secrets.yml @@ -0,0 +1,14 @@ +--- +- name: certs | upload certificates to nodes + sudo: yes + copy: + src="{{ cert_syncdir.path }}/certs/{{ item }}" + dest="{{ kube_cert_dir }}/{{ item }}" + group="{{ kube_cert_group }}" + mode=0440 + with_items: + - "ca.crt" + - "kubelet.crt" + - "kubelet.key" + # notify: + # - restart daemons diff --git a/roles/kubernetes-node/templates/kube-proxy.yml b/roles/kubernetes-node/templates/kube-proxy.yml new file mode 100644 index 0000000..f877075 --- /dev/null +++ b/roles/kubernetes-node/templates/kube-proxy.yml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + name: kube-proxy + namespace: kube-system +spec: + hostNetwork: true + containers: + - name: kube-proxy + image: gcr.io/google_containers/hyperkube:{{ kube_version }} + command: + - /hyperkube + - proxy + #- --master={% for node in groups['master'] %}https://{{ node }}{% if not loop.last %},{% endif %}{% endfor %} + + - --master=https://{{ groups['master'][0] }}:{{ kube_master_port }} + - --kubeconfig={{ kube_config_dir }}/node.kubeconfig + securityContext: + privileged: true + volumeMounts: + - mountPath: {{ kube_config_dir }} + name: etc-kubernetes + readOnly: true + volumes: + - hostPath: + path: {{ kube_config_dir }} + name: etc-kubernetes diff --git a/roles/kubernetes-node/templates/kubelet.service.j2 b/roles/kubernetes-node/templates/kubelet.service.j2 new file mode 100644 index 0000000..83212f1 --- /dev/null +++ b/roles/kubernetes-node/templates/kubelet.service.j2 @@ -0,0 +1,23 @@ +[Unit] +Description=Kubernetes Kubelet +Documentation=https://github.com/GoogleCloudPlatform/kubernetes +After=docker.service +Requires=docker.service + +[Service] +ExecStart=/usr/bin/kubelet \ + --api_servers={% for node in groups['master'] %}https://{{ node }}{% if not loop.last %},{% endif %}{% endfor %} \ + --allow-privileged=true \ + --config={{ kube_manifest_dir }} \ + --hostname-override={{ ansible_hostname }} \ + --cluster-dns={{ dns_server }} \ + --cluster-domain={{ dns_domain }} \ + --kubeconfig={{ kube_config_dir }}/node.kubeconfig \ + --tls-cert-file={{ kube_cert_dir }}/kubelet.crt \ + --tls-private-key-file={{ kube_cert_dir }}/kubelet.key \ + --v=2 +Restart=always +RestartSec=10 + +[Install] +WantedBy=multi-user.target diff --git a/roles/kubernetes-node/templates/node.kubeconfig.j2 b/roles/kubernetes-node/templates/node.kubeconfig.j2 new file mode 100644 index 0000000..a3f0cd3 --- /dev/null +++ b/roles/kubernetes-node/templates/node.kubeconfig.j2 @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Config +clusters: +- name: local + cluster: + certificate-authority: {{ kube_cert_dir }}/ca.crt +users: +- name: kubelet + user: + client-certificate: {{ kube_cert_dir }}/kubelet.crt + client-key: {{ kube_cert_dir }}/kubelet.key +contexts: +- context: + cluster: local + user: kubelet + name: kubelet-context +current-context: kubelet-context diff --git a/roles/kubernetes/defaults/main.yml b/roles/kubernetes/defaults/main.yml index abe2b09..1fed06f 100644 --- a/roles/kubernetes/defaults/main.yml +++ b/roles/kubernetes/defaults/main.yml @@ -1,6 +1,4 @@ -# The version of software to install for Kubernetes. -# When testing repo is used, state is "latest", so this has no effect. -kube_version: 1.0.0 +kube_url: https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/amd64 # Kubernetes services logging level, integer from 0 to 4, where 4 is highest # Applys to all services globally. Be carefull with highest log level, @@ -10,7 +8,7 @@ kube_log_level: 0 # This directory is where all the additional scripts go # that Kubernetes normally puts in /srv/kubernetes. # This puts them in a sane location -kube_script_dir: /usr/libexec/kubernetes +kube_script_dir: /srv/kubernetes # This directory is where all the additional config stuff goes # the kubernetes normally puts in /srv/kubernets. @@ -24,7 +22,7 @@ kube_config_dir: /etc/kubernetes kube_master_port: 443 # This is where all the cert scripts and certs will be located -kube_cert_dir: "{{ kube_config_dir }}/certs" +kube_cert_dir: "{{ kube_config_dir }}/ssl" # This is where all of the bearer tokens will be stored kube_token_dir: "{{ kube_config_dir }}/tokens" @@ -36,9 +34,12 @@ kube_users_dir: "{{ kube_config_dir }}/users" # pods on startup kube_manifest_dir: "{{ kube_config_dir }}/manifests" +# This is where manifests for podmaster will be stored +kube_podmaster_dir: "{{ kube_script_dir }}/manifests" + # This is the group that the cert creation scripts chgrp the # cert files to. Not really changable... -kube_cert_group: kube-cert +kube_cert_group: kube dns_domain: "{{ cluster_name }}" diff --git a/roles/kubernetes/files/kube-gen-token.sh b/roles/kubernetes/files/kube-gen-token.sh deleted file mode 100644 index fa6a5dd..0000000 --- a/roles/kubernetes/files/kube-gen-token.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -# Copyright 2015 The Kubernetes Authors All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -token_dir=${TOKEN_DIR:-/var/srv/kubernetes} -token_file="${token_dir}/known_tokens.csv" - -create_accounts=($@) - -touch "${token_file}" -for account in "${create_accounts[@]}"; do - if grep ",${account}," "${token_file}" ; then - continue - fi - token=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) - echo "${token},${account},${account}" >> "${token_file}" - echo "${token}" > "${token_dir}/${account}.token" - echo "Added ${account}" -done diff --git a/roles/kubernetes/files/make-ca-cert.sh b/roles/kubernetes/files/make-ca-cert.sh old mode 100755 new mode 100644 index d133497..abdb9c3 --- a/roles/kubernetes/files/make-ca-cert.sh +++ b/roles/kubernetes/files/make-ca-cert.sh @@ -31,7 +31,7 @@ set -o pipefail service_range="${SERVICE_CLUSTER_IP_RANGE:="10.0.0.0/16"}" dns_domain="${DNS_DOMAIN:="cluster.local"}" cert_dir="${CERT_DIR:-"/tmp/certs"}" -cert_group="${CERT_GROUP:="kube-cert"}" +cert_group="${CERT_GROUP:="kube"}" masters="${MASTERS}" # The following certificate pairs are created: diff --git a/roles/kubernetes/tasks/main.yml b/roles/kubernetes/tasks/main.yml index f7dbaad..07b4be4 100644 --- a/roles/kubernetes/tasks/main.yml +++ b/roles/kubernetes/tasks/main.yml @@ -1,20 +1,25 @@ --- -- name: create kubernetes config directory +- name: create kubernetes config directories sudo: yes - file: path={{ kube_config_dir }} state=directory - -- name: create kubernetes script directory - sudo: yes - file: path={{ kube_script_dir }} state=directory - -- name: Make sure manifest directory exists - sudo: yes - file: path={{ kube_manifest_dir }} state=directory + file: path={{ item }} state=directory + with_items: + - "{{ kube_config_dir }}" + - "{{ kube_script_dir }}" + - "{{ kube_manifest_dir }}" + - "{{ kube_podmaster_dir }}" + - "{{ kube_cert_dir }}" + - "{{ kube_users_dir }}" + tags: + - kubernetes -- name: write the global config file +- name: pull hyperkube docker image sudo: yes - template: - src: config.j2 - dest: "{{ kube_config_dir }}/config" + command: "docker pull gcr.io/google_containers/hyperkube:{{ kube_version }}" + register: docker_pull_result + changed_when: docker_pull_result.stdout.find('newer') > -1 tags: - kubernetes + +- include: secrets.yml + tags: + - secrets diff --git a/roles/kubernetes/tasks/secrets.yml b/roles/kubernetes/tasks/secrets.yml new file mode 100644 index 0000000..696baaf --- /dev/null +++ b/roles/kubernetes/tasks/secrets.yml @@ -0,0 +1,43 @@ +--- +- name: certs | create temp directory for certs syncing + local_action: file path=".syncdir" state=directory recurse=yes + run_once: true + register: cert_syncdir + tags: + - secrets + +- name: certs | install cert generation script + local_action: copy src=make-ca-cert.sh dest="{{ cert_syncdir.path }}/make-ca-cert.sh" mode=u+x + run_once: true + changed_when: false + tags: + - secrets + +- name: certs | run cert generation script + local_action: command + "{{ playbook_dir }}/{{ cert_syncdir.path }}/make-ca-cert.sh" + args: + creates: "{{ cert_syncdir.path }}/certs/server.crt" + environment: + MASTERS: "{{ groups['master'] | join(' ') }}" + DNS_DOMAIN: "{{ dns_domain }}" + SERVICE_CLUSTER_IP_RANGE: "{{ kube_service_addresses }}" + CERT_DIR: "{{ playbook_dir }}/{{ cert_syncdir.path }}/certs" + run_once: true + tags: + - secrets + +- name: certs | create system kubernetes groups + sudo: yes + group: name={{ kube_cert_group }} state=present system=yes + tags: + - secrets + +- name: add default user to kubernetes group + sudo: yes + user: + name={{ ansible_ssh_user }} + system=yes + groups={{ kube_cert_group }} + tags: + - secrets diff --git a/roles/kubernetes/templates/config.j2 b/roles/kubernetes/templates/config.j2 deleted file mode 100644 index 7ed96e7..0000000 --- a/roles/kubernetes/templates/config.j2 +++ /dev/null @@ -1,26 +0,0 @@ -### -# kubernetes system config -# -# The following values are used to configure various aspects of all -# kubernetes services, including -# -# kube-apiserver.service -# kube-controller-manager.service -# kube-scheduler.service -# kubelet.service -# kube-proxy.service - -# Comma separated list of nodes in the etcd cluster -KUBE_ETCD_SERVERS="--etcd_servers={{ etcd_url_scheme }}://{{ groups['master'][0] }}:{{ etcd_client_port }}" - -# logging to stderr means we get it in the systemd journal -KUBE_LOGTOSTDERR="--logtostderr=true" - -# journal message level, 0 is debug -KUBE_LOG_LEVEL="--v={{ kube_log_level }}" - -# Should this cluster be allowed to run privileged docker containers -KUBE_ALLOW_PRIV="--allow_privileged=true" - -# How the replication controller, scheduler, and proxy -KUBE_MASTER="--master=https://{{ groups['master'][0] }}:{{ kube_master_port }}" diff --git a/roles/master/handlers/main.yml b/roles/master/handlers/main.yml deleted file mode 100644 index f929369..0000000 --- a/roles/master/handlers/main.yml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- name: restart daemons - sudo: yes - command: /bin/true - notify: - - restart apiserver - - restart controller-manager - - restart scheduler - - restart proxy - -- name: restart apiserver - sudo: yes - service: - name: kube-apiserver - state: restarted - failed_when: false - -- name: restart controller-manager - sudo: yes - service: - name: kube-controller-manager - state: restarted - -- name: restart scheduler - sudo: yes - service: - name: kube-scheduler - state: restarted - -- name: restart proxy - sudo: yes - service: - name: kube-proxy - state: restarted diff --git a/roles/master/tasks/main.yml b/roles/master/tasks/main.yml deleted file mode 100644 index 1ddda2e..0000000 --- a/roles/master/tasks/main.yml +++ /dev/null @@ -1,137 +0,0 @@ ---- -- include: stable.yml - when: kube_build == "stable" - -- include: testing.yml - when: kube_build == "testing" - -- include: secrets.yml - -- name: evaluate first_master - add_host: - name: "{{ groups['master'][0] }}" - groups: first_master - when: "master in groups and groups['master'] | length > 1" - -- name: enable capability for kube-apiserver to bind to port below 1024 - sudo: yes - capabilities: - capability: cap_net_bind_service+ep - path: /usr/bin/kube-apiserver - state: present - when: "{{ kube_master_port | int }} < 1024" - notify: - - restart daemons - tags: - - master - -- name: write the config files for api server - sudo: yes - template: src=apiserver.j2 dest={{ kube_config_dir }}/apiserver - notify: - - restart daemons - tags: - - master - -- name: write config file for controller-manager - sudo: yes - template: src=controller-manager.j2 dest={{ kube_config_dir }}/controller-manager - notify: - - restart controller-manager - tags: - - master - -- name: write the kubecfg (auth) file for controller-manager - sudo: yes - template: src=controller-manager.kubeconfig.j2 dest={{ kube_config_dir }}/controller-manager.kubeconfig - notify: - - restart controller-manager - tags: - - master - -- name: write the config file for scheduler - sudo: yes - template: src=scheduler.j2 dest={{ kube_config_dir }}/scheduler - notify: - - restart scheduler - tags: - - master - -- name: write the kubecfg (auth) file for scheduler - sudo: yes - template: src=scheduler.kubeconfig.j2 dest={{ kube_config_dir }}/scheduler.kubeconfig - notify: - - restart scheduler - tags: - - master - -- name: write the kubecfg (auth) file for kubectl - sudo: yes - template: src=kubectl.kubeconfig.j2 dest={{ kube_config_dir }}/kubectl.kubeconfig - tags: - - master - -- name: write the config files for proxy - sudo: yes - template: src=proxy.j2 dest={{ kube_config_dir }}/proxy - notify: - - restart daemons - tags: - - master - -- name: write the kubecfg (auth) file for proxy - sudo: yes - template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig - tags: - - master - -- name: populate users for basic auth in API - sudo: yes - lineinfile: - dest: "{{ kube_users_dir }}/known_users.csv" - create: yes - line: '{{ item.value.pass }},{{ item.key }},{{ item.value.role }}' - with_dict: "{{ kube_users }}" - notify: - - restart apiserver - tags: - - master - -- name: Enable apiserver - sudo: yes - service: - name: kube-apiserver - enabled: yes - state: started - failed_when: false - tags: - - master - -- name: Enable controller-manager - sudo: yes - service: - name: kube-controller-manager - enabled: yes - state: started - when: "'first_master' in group_names" - tags: - - master - -- name: Enable scheduler - sudo: yes - service: - name: kube-scheduler - enabled: yes - state: started - when: "'first_master' in group_names" - tags: - - master - -- name: Enable kube-proxy - sudo: yes - service: - name: kube-proxy - enabled: yes - state: started - tags: - - master diff --git a/roles/master/tasks/secrets.yml b/roles/master/tasks/secrets.yml deleted file mode 100644 index a826e10..0000000 --- a/roles/master/tasks/secrets.yml +++ /dev/null @@ -1,58 +0,0 @@ ---- -- name: certs | create system kube-cert groups - sudo: yes - group: name={{ kube_cert_group }} state=present system=yes - -- name: create system kube user - sudo: yes - user: - name=kube - comment="Kubernetes user" - shell=/sbin/nologin - state=present - system=yes - groups={{ kube_cert_group }} - -- name: certs | make sure the certificate directory exits - sudo: yes - file: - path={{ kube_cert_dir }} - state=directory - mode=o-rwx - group={{ kube_cert_group }} - -- name: certs | upload certificates to masters - sudo: yes - copy: - src="{{ cert_syncdir.path }}/certs/{{ item }}" - dest="{{ kube_cert_dir }}/{{ item }}" - group="{{ kube_cert_group }}" - owner=kube - mode=0440 - with_items: - - "ca.crt" - - "server.crt" - - "server.key" - - "kubecfg.crt" - - "kubecfg.key" - notify: - - restart daemons - -- name: tokens | make sure the tokens directory exits - sudo: yes - file: - path={{ kube_token_dir }} - state=directory - mode=o-rwx - group={{ kube_cert_group }} - -- name: tokens | upload known_tokens to master - sudo: yes - copy: - src: "{{ tokens_syncdir.path }}/known_tokens.csv" - dest: "{{ kube_token_dir }}" - group: "{{ kube_cert_group }}" - owner: kube - mode: 0440 - notify: - - restart daemons diff --git a/roles/master/tasks/stable.yml b/roles/master/tasks/stable.yml deleted file mode 100644 index aa16ff3..0000000 --- a/roles/master/tasks/stable.yml +++ /dev/null @@ -1,9 +0,0 @@ -- name: install kubernetes master - sudo: yes - yum: - pkg="kubernetes-{{ kube_version }}" - state=present - notify: - - restart daemons - tags: - - master \ No newline at end of file diff --git a/roles/master/tasks/testing.yml b/roles/master/tasks/testing.yml deleted file mode 100644 index 7df1906..0000000 --- a/roles/master/tasks/testing.yml +++ /dev/null @@ -1,10 +0,0 @@ -- name: install latest kubernetes master - sudo: yes - yum: - pkg="kubernetes" - state=latest - enablerepo="virt7-docker-common-candidate" - notify: - - restart daemons - tags: - - master \ No newline at end of file diff --git a/roles/master/templates/apiserver.j2 b/roles/master/templates/apiserver.j2 deleted file mode 100644 index cdf5fdc..0000000 --- a/roles/master/templates/apiserver.j2 +++ /dev/null @@ -1,26 +0,0 @@ -### -# kubernetes system config -# -# The following values are used to configure the kube-apiserver -# - -# The address on the local server to listen to. -KUBE_API_ADDRESS="--insecure-bind-address=127.0.0.1" - -# The port on the local server to listen on. -KUBE_API_PORT="--secure-port={{ kube_master_port }}" - -# Port minions listen on -# KUBELET_PORT="--kubelet_port=10250" - -# Address range to use for services -KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range={{ kube_service_addresses }}" - -# Location of the etcd cluster -KUBE_ETCD_SERVERS="--etcd_servers={% for node in groups['master'] %}{{ etcd_url_scheme }}://{{ node }}:{{ etcd_client_port }}{% if not loop.last %},{% endif %}{% endfor %}" - -# default admission control policies -KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota" - -# Add you own! -KUBE_API_ARGS="--tls_cert_file={{ kube_cert_dir }}/server.crt --tls_private_key_file={{ kube_cert_dir }}/server.key --client_ca_file={{ kube_cert_dir }}/ca.crt --token_auth_file={{ kube_token_dir }}/known_tokens.csv --basic-auth-file={{ kube_users_dir }}/known_users.csv --service_account_key_file={{ kube_cert_dir }}/server.crt" diff --git a/roles/master/templates/controller-manager.j2 b/roles/master/templates/controller-manager.j2 deleted file mode 100644 index 5eddbfe..0000000 --- a/roles/master/templates/controller-manager.j2 +++ /dev/null @@ -1,7 +0,0 @@ -### -# The following values are used to configure the kubernetes controller-manager - -# defaults from config and apiserver should be adequate - -# Add you own! -KUBE_CONTROLLER_MANAGER_ARGS="--kubeconfig={{ kube_config_dir }}/controller-manager.kubeconfig --service_account_private_key_file={{ kube_cert_dir }}/server.key --root_ca_file={{ kube_cert_dir }}/ca.crt" diff --git a/roles/master/templates/controller-manager.kubeconfig.j2 b/roles/master/templates/controller-manager.kubeconfig.j2 deleted file mode 100644 index efe8399..0000000 --- a/roles/master/templates/controller-manager.kubeconfig.j2 +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Config -current-context: controller-manager-to-{{ cluster_name }} -preferences: {} -clusters: -- cluster: - certificate-authority: {{ kube_cert_dir }}/ca.crt - server: https://{{ groups['master'][0] }}:{{ kube_master_port }} - name: {{ cluster_name }} -contexts: -- context: - cluster: {{ cluster_name }} - user: controller-manager - name: controller-manager-to-{{ cluster_name }} -users: -- name: controller-manager - user: - token: "{{ lookup('file', tokens_syncdir.path+'/'+'system:controller_manager-'+inventory_hostname+'.token') }}" diff --git a/roles/master/templates/kubectl.kubeconfig.j2 b/roles/master/templates/kubectl.kubeconfig.j2 deleted file mode 100644 index 2072344..0000000 --- a/roles/master/templates/kubectl.kubeconfig.j2 +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Config -current-context: kubectl-to-{{ cluster_name }} -preferences: {} -clusters: -- cluster: - certificate-authority-data: "{{ lookup('file', cert_syncdir.path+'/certs/'+'ca.crt') | b64encode }}" - server: https://{{ groups['master'][0] }}:{{ kube_master_port }} - name: {{ cluster_name }} -contexts: -- context: - cluster: {{ cluster_name }} - user: kubectl - name: kubectl-to-{{ cluster_name }} -users: -- name: kubectl - user: - token: "{{ lookup('file', tokens_syncdir.path+'/'+'system:kubectl-kubecfg.token') }}" diff --git a/roles/master/templates/proxy.j2 b/roles/master/templates/proxy.j2 deleted file mode 100644 index 1a1f7b1..0000000 --- a/roles/master/templates/proxy.j2 +++ /dev/null @@ -1,7 +0,0 @@ -### -# kubernetes proxy config - -# default config should be adequate - -# Add your own! -KUBE_PROXY_ARGS="--kubeconfig={{ kube_config_dir }}/proxy.kubeconfig" diff --git a/roles/master/templates/proxy.kubeconfig.j2 b/roles/master/templates/proxy.kubeconfig.j2 deleted file mode 100644 index ef6e45c..0000000 --- a/roles/master/templates/proxy.kubeconfig.j2 +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Config -current-context: proxy-to-{{ cluster_name }} -preferences: {} -contexts: -- context: - cluster: {{ cluster_name }} - user: proxy - name: proxy-to-{{ cluster_name }} -clusters: -- cluster: - certificate-authority: {{ kube_cert_dir }}/ca.crt - server: https://{{ groups['master'][0] }}:{{ kube_master_port }} - name: {{ cluster_name }} -users: -- name: proxy - user: - token: "{{ lookup('file', tokens_syncdir.path+'/'+'system:proxy-'+inventory_hostname+'.token') }}" diff --git a/roles/master/templates/scheduler.j2 b/roles/master/templates/scheduler.j2 deleted file mode 100644 index 8af898d..0000000 --- a/roles/master/templates/scheduler.j2 +++ /dev/null @@ -1,7 +0,0 @@ -### -# kubernetes scheduler config - -# default config should be adequate - -# Add your own! -KUBE_SCHEDULER_ARGS="--kubeconfig={{ kube_config_dir }}/scheduler.kubeconfig" diff --git a/roles/master/templates/scheduler.kubeconfig.j2 b/roles/master/templates/scheduler.kubeconfig.j2 deleted file mode 100644 index ebdadad..0000000 --- a/roles/master/templates/scheduler.kubeconfig.j2 +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Config -current-context: scheduler-to-{{ cluster_name }} -preferences: {} -clusters: -- cluster: - certificate-authority: {{ kube_cert_dir }}/ca.crt - server: https://{{ groups['master'][0] }}:{{ kube_master_port }} - name: {{ cluster_name }} -contexts: -- context: - cluster: {{ cluster_name }} - user: scheduler - name: scheduler-to-{{ cluster_name }} -users: -- name: scheduler - user: - token: "{{ lookup('file', tokens_syncdir.path+'/'+'system:scheduler-'+inventory_hostname+'.token') }}" diff --git a/roles/minion/handlers/main.yml b/roles/minion/handlers/main.yml deleted file mode 100644 index e8237ce..0000000 --- a/roles/minion/handlers/main.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- name: restart daemons - sudo: yes - command: /bin/true - notify: - - restart kubelet - - restart proxy - -- name: restart kubelet - sudo: yes - service: - name: kubelet - state: restarted - -- name: restart proxy - sudo: yes - service: - name: kube-proxy - state: restarted diff --git a/roles/minion/tasks/main.yml b/roles/minion/tasks/main.yml deleted file mode 100644 index 79e8a4f..0000000 --- a/roles/minion/tasks/main.yml +++ /dev/null @@ -1,83 +0,0 @@ ---- -- name: Check if selinux enforcing - sudo: yes - command: getenforce - register: selinux - changed_when: false - tags: - - minion - -- name: Set selinux permissive because tokens and selinux don't work together - sudo: yes - selinux: state=permissive policy=targeted - when: "'Enforcing' in selinux.stdout" - tags: - - minion - -- include: stable.yml - when: kube_build == "stable" - -- include: testing.yml - when: kube_build == "testing" - -- include: secrets.yml - -- name: write the config files for kubelet - sudo: yes - template: src=kubelet.j2 dest={{ kube_config_dir }}/kubelet - notify: - - restart kubelet - tags: - - minion - -- name: write the kubecfg (auth) file for kubelet - sudo: yes - template: src=kubelet.kubeconfig.j2 dest={{ kube_config_dir }}/kubelet.kubeconfig - notify: - - restart kubelet - tags: - - minion - -- name: write the config files for proxy - sudo: yes - template: src=proxy.j2 dest={{ kube_config_dir }}/proxy - notify: - - restart proxy - tags: - - minion - -- name: write the kubecfg (auth) file for kube-proxy - sudo: yes - template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig - notify: - - restart proxy - tags: - - minion - -- name: Enable kubelet - sudo: yes - service: - name: kubelet - enabled: yes - state: started - tags: - - minion - -- name: Enable proxy - sudo: yes - service: - name: kube-proxy - enabled: yes - state: started - tags: - - minion - -- name: addons | Logging | Create Fluentd pod - sudo: yes - copy: - src: fluentd-es.yaml - dest: "{{ kube_manifest_dir }}/fluentd-es.yaml" - when: enable_logging - tags: - - addons - - logging diff --git a/roles/minion/tasks/secrets.yml b/roles/minion/tasks/secrets.yml deleted file mode 100644 index 16b07ca..0000000 --- a/roles/minion/tasks/secrets.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -- name: certs | create system kube-cert groups - sudo: yes - group: name={{ kube_cert_group }} state=present system=yes - -- name: create system kube user - sudo: yes - user: - name=kube - comment="Kubernetes user" - shell=/sbin/nologin - state=present - system=yes - groups={{ kube_cert_group }} - -- name: certs | make sure the certificate directory exits - sudo: yes - file: - path={{ kube_cert_dir }} - state=directory - mode=o-rwx - group={{ kube_cert_group }} - -- name: certs | upload certificates to nodes - sudo: yes - copy: - src="{{ cert_syncdir.path }}/certs/{{ item }}" - dest="{{ kube_cert_dir }}/{{ item }}" - group="{{ kube_cert_group }}" - owner=kube - mode=0440 - with_items: - - "ca.crt" - - "kubelet.crt" - - "kubelet.key" - notify: - - restart daemons diff --git a/roles/minion/tasks/stable.yml b/roles/minion/tasks/stable.yml deleted file mode 100644 index cf33281..0000000 --- a/roles/minion/tasks/stable.yml +++ /dev/null @@ -1,9 +0,0 @@ -- name: install kubernetes node - sudo: yes - yum: - pkg="kubernetes-node-{{ kube_version }}" - state=present - notify: - - restart daemons - tags: - - minion \ No newline at end of file diff --git a/roles/minion/tasks/testing.yml b/roles/minion/tasks/testing.yml deleted file mode 100644 index 960b522..0000000 --- a/roles/minion/tasks/testing.yml +++ /dev/null @@ -1,10 +0,0 @@ -- name: install latest kubernetes node - sudo: yes - yum: - pkg="kubernetes-node" - state=latest - enablerepo="virt7-docker-common-candidate" - notify: - - restart daemons - tags: - - minion \ No newline at end of file diff --git a/roles/minion/templates/kubelet.j2 b/roles/minion/templates/kubelet.j2 deleted file mode 100644 index c341968..0000000 --- a/roles/minion/templates/kubelet.j2 +++ /dev/null @@ -1,21 +0,0 @@ -### -# kubernetes kubelet (node) config - -# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces) -KUBELET_ADDRESS="--address=0.0.0.0" - -# The port for the info server to serve on -# KUBELET_PORT="--port=10250" - -# You may leave this blank to use the actual hostname -KUBELET_HOSTNAME="--hostname_override={{ inventory_hostname }}" - -# location of the api-server -KUBELET_API_SERVER="--api_servers=https://{{ groups['master'][0] }}:{{ kube_master_port }}" - -# Add your own! -{% if dns_setup %} -KUBELET_ARGS="--cluster_dns={{ dns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --config={{ kube_manifest_dir }}" -{% else %} -KUBELET_ARGS="--kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --config={{ kube_manifest_dir }}" -{% endif %} diff --git a/roles/minion/templates/kubelet.kubeconfig.j2 b/roles/minion/templates/kubelet.kubeconfig.j2 deleted file mode 100644 index b047fee..0000000 --- a/roles/minion/templates/kubelet.kubeconfig.j2 +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Config -current-context: kubelet-to-{{ cluster_name }} -preferences: {} -clusters: -- cluster: - certificate-authority: {{ kube_cert_dir }}/ca.crt - server: https://{{ groups['master'][0] }}:{{ kube_master_port }} - name: {{ cluster_name }} -contexts: -- context: - cluster: {{ cluster_name }} - user: kubelet - name: kubelet-to-{{ cluster_name }} -users: -- name: kubelet - user: - token: "{{ lookup('file', tokens_syncdir.path+'/'+'system:kubelet-'+inventory_hostname+'.token') }}" diff --git a/roles/minion/templates/proxy.j2 b/roles/minion/templates/proxy.j2 deleted file mode 100644 index 1a1f7b1..0000000 --- a/roles/minion/templates/proxy.j2 +++ /dev/null @@ -1,7 +0,0 @@ -### -# kubernetes proxy config - -# default config should be adequate - -# Add your own! -KUBE_PROXY_ARGS="--kubeconfig={{ kube_config_dir }}/proxy.kubeconfig" diff --git a/roles/minion/templates/proxy.kubeconfig.j2 b/roles/minion/templates/proxy.kubeconfig.j2 deleted file mode 100644 index ef6e45c..0000000 --- a/roles/minion/templates/proxy.kubeconfig.j2 +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Config -current-context: proxy-to-{{ cluster_name }} -preferences: {} -contexts: -- context: - cluster: {{ cluster_name }} - user: proxy - name: proxy-to-{{ cluster_name }} -clusters: -- cluster: - certificate-authority: {{ kube_cert_dir }}/ca.crt - server: https://{{ groups['master'][0] }}:{{ kube_master_port }} - name: {{ cluster_name }} -users: -- name: proxy - user: - token: "{{ lookup('file', tokens_syncdir.path+'/'+'system:proxy-'+inventory_hostname+'.token') }}" diff --git a/setup.yml b/setup.yml index 1e4a33b..484ec11 100644 --- a/setup.yml +++ b/setup.yml @@ -5,7 +5,6 @@ hosts: all roles: - common - - secrets # provide the control plane - name: Master @@ -14,8 +13,9 @@ roles: - etcd - flannel + - docker - kubernetes - - master + - kubernetes-master - addons - dnsmasq @@ -24,9 +24,8 @@ hosts: node gather_facts: no roles: - - etcd - - docker - flannel + - docker - kubernetes - - minion + - kubernetes-node - dnsmasq