forked from easzlab/kubeasz
-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.yml
131 lines (113 loc) · 4.33 KB
/
main.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
- name: 下载 kube_master 二进制
copy: src={{ base_dir }}/bin/{{ item }} dest={{ bin_dir }}/{{ item }} mode=0755
with_items:
- kube-apiserver
- kube-controller-manager
- kube-scheduler
- kubectl
tags: upgrade_k8s
- name: 注册变量 KUBERNETES_SVC_IP
shell: echo {{ SERVICE_CIDR }}|cut -d/ -f1|awk -F. '{print $1"."$2"."$3"."$4+1}'
register: KUBERNETES_SVC_IP
tags: change_cert
- name: 设置变量 CLUSTER_KUBERNETES_SVC_IP
set_fact: CLUSTER_KUBERNETES_SVC_IP={{ KUBERNETES_SVC_IP.stdout }}
tags: change_cert
- name: 创建 kubernetes 证书签名请求
template: src=kubernetes-csr.json.j2 dest={{ cluster_dir }}/ssl/kubernetes-csr.json
tags: change_cert
connection: local
- name: 创建 kubernetes 证书和私钥
shell: "cd {{ cluster_dir }}/ssl && {{ base_dir }}/bin/cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-profile=kubernetes kubernetes-csr.json | {{ base_dir }}/bin/cfssljson -bare kubernetes"
tags: change_cert
connection: local
# 创建aggregator proxy相关证书
- name: 创建 aggregator proxy证书签名请求
template: src=aggregator-proxy-csr.json.j2 dest={{ cluster_dir }}/ssl/aggregator-proxy-csr.json
connection: local
- name: 创建 aggregator-proxy证书和私钥
shell: "cd {{ cluster_dir }}/ssl && {{ base_dir }}/bin/cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-profile=kubernetes aggregator-proxy-csr.json | {{ base_dir }}/bin/cfssljson -bare aggregator-proxy"
connection: local
- name: 分发 kubernetes证书
copy: src={{ cluster_dir }}/ssl/{{ item }} dest={{ ca_dir }}/{{ item }}
with_items:
- ca.pem
- ca-key.pem
- kubernetes.pem
- kubernetes-key.pem
- aggregator-proxy.pem
- aggregator-proxy-key.pem
- name: 替换 kubeconfig 的 apiserver 地址
lineinfile:
dest: "{{ item }}"
regexp: "^ server"
line: " server: https://{{ inventory_hostname }}:6443"
with_items:
- "/root/.kube/config"
- "/etc/kubernetes/kube-controller-manager.kubeconfig"
- "/etc/kubernetes/kube-scheduler.kubeconfig"
tags: upgrade_k8s, restart_master
- name: 创建 kube-scheduler 配置文件
template: src=kube-scheduler-config.yaml.j2 dest=/etc/kubernetes/kube-scheduler-config.yaml
tags: restart_master, upgrade_k8s
- name: 创建 master 服务的 systemd unit 文件
template: src={{ item }}.j2 dest=/etc/systemd/system/{{ item }}
with_items:
- kube-apiserver.service
- kube-controller-manager.service
- kube-scheduler.service
tags: restart_master, upgrade_k8s
- name: enable master 服务
shell: systemctl enable kube-apiserver kube-controller-manager kube-scheduler
ignore_errors: true
- name: 启动 master 服务
shell: "systemctl daemon-reload && systemctl restart kube-apiserver && \
systemctl restart kube-controller-manager && systemctl restart kube-scheduler"
tags: upgrade_k8s, restart_master
# 轮询等待kube-apiserver启动完成
- name: 轮询等待kube-apiserver启动
shell: "systemctl status kube-apiserver.service|grep Active"
register: api_status
until: '"running" in api_status.stdout'
retries: 10
delay: 3
tags: upgrade_k8s, restart_master
# 轮询等待kube-controller-manager启动完成
- name: 轮询等待kube-controller-manager启动
shell: "systemctl status kube-controller-manager.service|grep Active"
register: cm_status
until: '"running" in cm_status.stdout'
retries: 8
delay: 3
tags: upgrade_k8s, restart_master
# 轮询等待kube-scheduler启动完成
- name: 轮询等待kube-scheduler启动
shell: "systemctl status kube-scheduler.service|grep Active"
register: sch_status
until: '"running" in sch_status.stdout'
retries: 8
delay: 3
tags: upgrade_k8s, restart_master
- name: 以轮询的方式等待master服务启动完成
command: "{{ bin_dir }}/kubectl get node"
register: result
until: result.rc == 0
retries: 5
delay: 6
tags: upgrade_k8s, restart_master
- name: 获取user:kubernetes是否已经绑定对应角色
shell: "{{ bin_dir }}/kubectl get clusterrolebindings|grep kubernetes-crb || echo 'notfound'"
register: crb_info
run_once: true
- name: 创建user:kubernetes角色绑定
command: "{{ bin_dir }}/kubectl create clusterrolebinding kubernetes-crb --clusterrole=cluster-admin --user=kubernetes"
run_once: true
when: "'notfound' in crb_info.stdout"