1 环境说明
ip | 主机名 | 角色 |
---|---|---|
192.168.10.11 | k8s-master01 | deploy、master1、lb1、etcd |
192.168.10.12 | k8s-master02 | master2、lb2、etcd |
192.168.10.13 | k8s-master03 | master2、etcd |
192.168.10.14 | k8s-node01 | node |
192.168.10.15 | k8s-node02 | node |
192.168.10.16 | k8s-node03 | node |
192.168.10.17 | vip |
2 准备工作
3 使用kubeasz部署kubernetes
[root@k8s-master01 ansible]# ll playbooks/
总用量 88
-rw-r--r-- 1 root root 443 1月 5 2022 01.prepare.yml
-rw-r--r-- 1 root root 58 1月 5 2022 02.etcd.yml
-rw-r--r-- 1 root root 209 1月 5 2022 03.runtime.yml
-rw-r--r-- 1 root root 482 1月 5 2022 04.kube-master.yml
-rw-r--r-- 1 root root 218 1月 5 2022 05.kube-node.yml
-rw-r--r-- 1 root root 408 1月 5 2022 06.network.yml
-rw-r--r-- 1 root root 77 1月 5 2022 07.cluster-addon.yml
-rw-r--r-- 1 root root 34 1月 5 2022 10.ex-lb.yml
-rw-r--r-- 1 root root 3893 1月 5 2022 11.harbor.yml
-rw-r--r-- 1 root root 1567 1月 5 2022 21.addetcd.yml
-rw-r--r-- 1 root root 1520 1月 5 2022 22.addnode.yml
-rw-r--r-- 1 root root 1050 1月 5 2022 23.addmaster.yml
-rw-r--r-- 1 root root 3344 1月 5 2022 31.deletcd.yml
-rw-r--r-- 1 root root 2018 1月 5 2022 32.delnode.yml
-rw-r--r-- 1 root root 2071 1月 5 2022 33.delmaster.yml
-rw-r--r-- 1 root root 1891 1月 5 2022 90.setup.yml
-rw-r--r-- 1 root root 1054 1月 5 2022 91.start.yml
-rw-r--r-- 1 root root 934 1月 5 2022 92.stop.yml
-rw-r--r-- 1 root root 1042 1月 5 2022 93.upgrade.yml
-rw-r--r-- 1 root root 1786 1月 5 2022 94.backup.yml
-rw-r--r-- 1 root root 999 1月 5 2022 95.restore.yml
-rw-r--r-- 1 root root 337 1月 5 2022 99.clean.yml
3.1 创建证书和安装准备
./ezctl setup k8s-test 01
3.2 安装etcd集群
./ezctl setup k8s-test 02
3.3 安装容器
./ezctl setup k8s-test 03
3.4 安装master节点
./ezctl setup k8s-test 04
3.5 安装node节点
./ezctl setup k8s-test 05
3.6 部署集群网络
./ezctl setup k8s-test 06
3.7 部署集群插件(dns,dashboard)
./ezctl setup k8s-test 07
[root@k8s-master01 ansible]# kubectl get svc -n kube-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
dashboard-metrics-scraper ClusterIP 10.68.219.155 <none> 8000/TCP 4m6s
kube-dns ClusterIP 10.68.0.2 <none> 53/UDP,53/TCP,9153/TCP 5m10s
kube-dns-upstream ClusterIP 10.68.183.131 <none> 53/UDP,53/TCP 5m9s
kubernetes-dashboard NodePort 10.68.173.43 <none> 443:33589/TCP 4m6s
metrics-server ClusterIP 10.68.172.244 <none> 443/TCP 4m52s
node-local-dns ClusterIP None <none> 9253/TCP 5m9s
[root@k8s-master01 ansible]# kubectl cluster-info
Kubernetes control plane is running at https://127.0.0.1:6443
CoreDNS is running at https://127.0.0.1:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
KubeDNSUpstream is running at https://127.0.0.1:6443/api/v1/namespaces/kube-system/services/kube-dns-upstream:dns/proxy
kubernetes-dashboard is running at https://127.0.0.1:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy
验证集群状态
[root@k8s-master01 ansible]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
192.168.10.11 Ready,SchedulingDisabled master 25m v1.23.1
192.168.10.12 Ready,SchedulingDisabled master 25m v1.23.1
192.168.10.13 Ready,SchedulingDisabled master 25m v1.23.1
192.168.10.14 Ready node 19m v1.23.1
192.168.10.15 Ready node 19m v1.23.1
192.168.10.16 Ready node 19m v1.23.1
[root@k8s-master01 ansible]# kubectl get pod -A -o wide
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kube-system calico-kube-controllers-754966f84c-t9mbm 1/1 Running 0 9m36s 192.168.10.14 192.168.10.14
kube-system calico-node-jwd66 1/1 Running 1 (9m29s ago) 9m36s 192.168.10.16 192.168.10.16
kube-system calico-node-md7v7 1/1 Running 0 9m36s 192.168.10.11 192.168.10.11
kube-system calico-node-nrjrl 1/1 Running 0 9m36s 192.168.10.12 192.168.10.12
kube-system calico-node-snfd7 1/1 Running 0 9m36s 192.168.10.15 192.168.10.15
kube-system calico-node-tgxch 1/1 Running 0 9m36s 192.168.10.13 192.168.10.13
kube-system calico-node-zxj7c 1/1 Running 0 9m36s 192.168.10.14 192.168.10.14
- 我的微信
- 这是我的微信扫一扫
- 我的微信公众号
- 我的微信公众号扫一扫