kubernetesV1.16二进制集群ha安装实操

Contents

1 系统设置

1.1 主机系统环境说明

[root@k8s-master01 ~]# cat /etc/redhat-release 
CentOS Linux release 7.7.1908 (Core)
[root@k8s-master01 ~]# uname -r
3.10.0-693.el7.x86_64

1.2 主机名设置

hostnamectl set-hostname k8s-master01
hostnamectl set-hostname k8s-master02
hostnamectl set-hostname k8s-master03
hostnamectl set-hostname k8s-node01
hostnamectl set-hostname k8s-node02

1.3 服务器角色规划

主机名 ip地址 角色 服务
k8s-master01 192.168.10.11 master etcd、kube-apiserver、kube-controller-manager、kube-scheduler
k8s-master02 192.168.10.12 master etcd、kube-apiserver、kube-controller-manager、kube-scheduler
k8s-master03 192.168.10.13 master etcd、kube-apiserver、kube-controller-manager、kube-scheduler
k8s-node01 192.168.10.14 work kubelet、kube-proxy、docker、dns、calico
k8s-node02 192.168.10.15 work kubelet、kube-proxy、docker、dns、calico

1.4 配置自己的yum源

yum install wget
mv /etc/yum.repos.d/Centos-7.repo  /etc/yum.repos.d/Centos-7.repo.bak
mv /etc/yum.repos.d/epel-7.repo  /etc/yum.repos.d/epel-7.repo.bak
curl -o /etc/yum.repos.d/Centos-7.repo http://mirrors.aliyun.com/repo/Centos-7.repo
curl -o /etc/yum.repos.d/epel-7.repo https://mirrors.aliyun.com/repo/epel-7.repo
yum update

1.5 关闭SELinux

sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
setenforce 0

1.6 关闭防火墙、swap

systemctl stop firewalld && systemctl disable firewalld
 swapoff -a
sed -i '/swap/s/^\(.*\)$/#\1/g' /etc/fstab

systemctl stop NetworkManager
systemctl disable NetworkManager

1.7 关闭dnsmasq(否则可能导致docker容器无法解析域名)

service dnsmasq stop && systemctl disable dnsmasq

1.8 安装ansible

只在master01上安装
yum install -y epel-release
yum install ansible -y

定义主机组
[k8s-master] #master节点服务器组
192.168.10.11
192.168.10.12
192.168.10.13

[k8s-node]  #node节点服务器组
192.168.10.14
192.168.10.15

[k8s-all:children]  #k8s集群服务器组
k8s-master
k8s-node

[k8s-all:vars]
ansible_ssh_user=root
ansible_ssh_pass="123456"

测试
ansible k8s-all -m ping

1.9 安装常用软件包

ansible k8s-all -m shell -a "yum install -y   vim openssh-clients ntpdate man lrzsz net-tools"

1.10 配置host主机域名解析

 cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.10.11 k8s-master01
192.168.10.12 k8s-master02
192.168.10.13 k8s-master03
192.168.10.14 k8s-node01
192.168.10.15 k8s-node02
分发hosts
ansible k8s-all -m copy -a "src=/etc/hosts dest=/etc/hosts"

1.11 时间同步

ansible k8s-all -m yum -a "name=ntpdate state=latest" 
ansible k8s-all -m cron -a "name='k8s cluster crontab' minute=*/30 hour=* day=* month=* weekday=* job='ntpdate time7.aliyun.com >/dev/null 2>&1'"
 ansible k8s-all -m shell -a "ntpdate time7.aliyun.com"

1.12 系统参数设置

cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
EOF
分发到其他服务器上
ansible k8s-all -m copy -a "src=/etc/sysctl.d/k8s.conf dest=/etc/sysctl.d/"
ansible k8s-all -m shell -a 'modprobe br_netfilter'
ansible k8s-all -m shell -a 'sysctl -p /etc/sysctl.d/k8s.conf'

1.13 创建集群目录

所有节点创建:
ansible k8s-all -m file -a 'path=/etc/kubernetes/ssl state=directory'
ansible k8s-all -m file -a 'path=/etc/kubernetes/config state=directory'
本机创建
mkdir /opt/k8s/{certs,cfg,unit} -p

1.14 安装docker(worker节点)

 ansible k8s-all -m shell -a 'yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo'
 ansible k8s-all -m shell -a 'yum install docker-ce -y'
 ansible k8s-all -m shell -a 'systemctl start docker && systemctl enable docker'

2 创建 CA 证书和秘钥

2.1 安装及配置CFSSL

生成证书时可在任一节点完成,这里在k8s-node03主机执行,证书只需要创建一次即可,以后在向集群中添加新节点时只要将 /etc/kubernetes/ssl 目录下的证书拷贝到新节点上即可。
mkdir k8s/cfssl -p && cd k8s/cfssl/
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
chmod +x cfssl_linux-amd64
cp cfssl_linux-amd64 /usr/local/bin/cfssl
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
chmod +x cfssljson_linux-amd64
cp cfssljson_linux-amd64 /usr/local/bin/cfssljson
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl-certinfo_linux-amd64
cp cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo

2.2 创建根证书(CA)

CA 证书是集群所有节点共享的,只需要创建一个 CA 证书,后续创建的所有证书都由 它签名。
cd  /opt/k8s/certs/
cat > ca-config.json <<EOF
{
    "signing": {
      "default": {
        "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "87600h"
      }
    }
  }
}
EOF
注释: 1.signing:表示该证书可用于签名其它证书,生成的ca.pem证书中 CA=TRUE 2.server auth:表示client可以用该证书对server提供的证书进行验证; 3.表示server可以用该该证书对client提供的证书进行验证;

2.3 创建证书签名请求文件

cat > ca-csr.json <<EOF

{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShangHai",
      "L": "ShangHai",
      "O": "k8s",
      "OU": "System"
    }
  ]
} 
EOF

2.4 生成CA证书、私钥和csr证书签名请

创建
cfssl gencert -initca ca-csr.json | cfssljson -bare ca

查看
[root@k8s-master01 certs]# ls
ca-config.json  ca.csr  ca-csr.json  ca-key.pem  ca.pem
[root@k8s-master01 certs]# 

2.5 分发证书文件

将生成的 CA 证书、秘钥文件、配置文件拷贝到所有节点的 /etc/kubernetes/cert
目录下
ansible k8s-all -m copy -a 'src=ca.csr dest=/etc/kubernetes/ssl/'
ansible k8s-all -m copy -a 'src=ca-key.pem dest=/etc/kubernetes/ssl/'
ansible k8s-all -m copy -a 'src=ca.pem dest=/etc/kubernetes/ssl/'
查看分发情况:
[root@k8s-master01 certs]# ansible k8s-all -m shell -a 'ls /etc/kubernetes/ssl'

192.168.10.11 | CHANGED | rc=0 >>
ca.csr
ca-key.pem
ca.pem

192.168.10.13 | CHANGED | rc=0 >>
ca.csr
ca-key.pem
ca.pem

192.168.10.15 | CHANGED | rc=0 >>
ca.csr
ca-key.pem
ca.pem

192.168.10.14 | CHANGED | rc=0 >>
ca.csr
ca-key.pem
ca.pem

192.168.10.12 | CHANGED | rc=0 >>
ca.csr
ca-key.pem
ca.pem

3 部署etcd集群

etcd 是k8s集群最重要的组件,用来存储k8s的所有服务信息, etcd 挂了,集群就挂了,我们这里把etcd部署在master三台节点上做高可用,etcd集群采用raft算法选举Leader, 由于Raft算法在做决策时需要多数节点的投票,所以etcd一般部署集群推荐奇数个节点,推荐的数量为3、5或者7个节点构成一个集群。

3.1 下载etcd二进制文件

cd k8s
wget https://github.com/etcd-io/etcd/releases/download/v3.4.3/etcd-v3.4.3-linux-amd64.tar.gz
[root@k8s-master01 k8s]# tar -xf etcd-v3.4.3-linux-amd64.tar.gz 
[root@k8s-master01 k8s]# cd etcd-v3.4.3-linux-amd64
[root@k8s-master01 etcd-v3.4.3-linux-amd64]# ansible k8s-master -m copy -a 'src=/root/k8s/etcd-v3.4.3-linux-amd64/etcd dest=/usr/local/bin/ mode=0755'
ansible k8s-master -m copy -a 'src=/root/k8s/etcd-v3.4.3-linux-amd64/etcdctl dest=/usr/local/bin/ mode=0755'

3.2 创建etcd证书请求模板文件

cat > /opt/k8s/certs/etcd-csr.json <<EOF
{
  "CN": "etcd",
  "hosts": [
    "127.0.0.1",
    "192.168.10.11",
    "192.168.10.12",
    "192.168.10.13",
    "k8s-master01",
    "k8s-master02",
    "k8s-master03"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShangHai",
      "L": "ShangHai",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF

说明:hosts中的IP为各etcd节点IP及本地127地址,在生产环境中hosts列表最好多预留几个IP,这样后续扩展节点或者因故障需要迁移时不需要再重新生成证.

3.3 生成证书及私钥

[root@k8s-master01 etcd-v3.4.3-linux-amd64]# cd /opt/k8s/certs/
[root@k8s-master01 certs]# cfssl gencert -ca=/opt/k8s/certs/ca.pem      -ca-key=/opt/k8s/certs/ca-key.pem      -config=/opt/k8s/certs/ca-config.json      -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
2019/12/25 18:25:07 [INFO] generate received request
2019/12/25 18:25:07 [INFO] received CSR
2019/12/25 18:25:07 [INFO] generating key: rsa-2048
2019/12/25 18:25:07 [INFO] encoded CSR
2019/12/25 18:25:07 [INFO] signed certificate with serial number 12215464798631919849402827311116750913097688886
2019/12/25 18:25:07 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
查看生成结果
[root@k8s-master01 certs]# ll etcd*
-rw-r--r--. 1 root root 1066 12月 25 18:25 etcd.csr
-rw-r--r--. 1 root root  301 12月 25 18:23 etcd-csr.json
-rw-------. 1 root root 1675 12月 25 18:25 etcd-key.pem
-rw-r--r--. 1 root root 1440 12月 25 18:25 etcd.pem

3.4 etcd证书分发

把生成的etcd证书复制到创建的证书目录并放至另2台etcd节点
ansible k8s-master -m copy -a 'src=/opt/k8s/certs/etcd.pem dest=/etc/kubernetes/ssl/'
ansible k8s-master -m copy -a 'src=/opt/k8s/certs/etcd-key.pem dest=/etc/kubernetes/ssl/'

3.5 修改etcd配置参数

ansible k8s-master -m group -a 'name=etcd'
ansible k8s-master -m user -a 'name=etcd group=etcd comment="etcd user" shell=/sbin/nologin home=/var/lib/etcd createhome=no'

3.5.1 创建etcd用户和组

ansible k8s-master -m group -a 'name=etcd'
ansible k8s-master -m user -a 'name=etcd group=etcd comment="etcd user" shell=/sbin/nologin home=/var/lib/etcd createhome=no'

3.5.2 创建etcd数据存放目录并授权

ansible k8s-master -m file -a 'path=/var/lib/etcd state=directory owner=etcd group=etcd'

3.6 配置etcd启动文件

cat <<EOF>/etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/local/bin/etcd \
  --data-dir=/var/lib/etcd \
  --name=k8s-master01 \
  --cert-file=/etc/kubernetes/ssl/etcd.pem \
  --key-file=/etc/kubernetes/ssl/etcd-key.pem \
  --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --peer-cert-file=/etc/kubernetes/ssl/etcd.pem \
  --peer-key-file=/etc/kubernetes/ssl/etcd-key.pem \
  --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --peer-client-cert-auth \
  --client-cert-auth \
  --listen-peer-urls=https://192.168.10.11:2380 \
  --initial-advertise-peer-urls=https://192.168.10.11:2380 \
  --listen-client-urls=https://192.168.10.11:2379,http://127.0.0.1:2379 \
  --advertise-client-urls=https://192.168.10.11:2379 \
  --initial-cluster-token=etcd-cluster-0 \
  --initial-cluster=k8s-master01=https://192.168.10.11:2380,k8s-master02=https://192.168.10.12:2380,k8s-master03=https://192.168.10.13:2380 \
  --initial-cluster-state=new \
  --auto-compaction-mode=periodic \
  --auto-compaction-retention=1 \
  --max-request-bytes=33554432 \
  --quota-backend-bytes=6442450944 \
  --heartbeat-interval=250 \
  --election-timeout=2000
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

copy到etcd集群的每一台:
 ansible k8s-master -m copy -a 'src=etcd.service dest=/etc/systemd/system/etcd.service'
ansible k8s-master -m shell -a 'systemctl daemon-reload'
ansible k8s-master -m shell -a 'systemctl enable etcd'
ansible k8s-master  -m shell -a 'systemctl start etcd'
etcd 3.4注意事项 ETCD3.4版本ETCDCTL_API=3 etcdctl 和 etcd --enable-v2=false 成为了默认配置,如要使用v2版本,执行etcdctl时候需要设置ETCDCTL_API环境变量,例如:ETCDCTL_API=2 etcdctl ETCD3.4版本会自动读取环境变量的参数,所以EnvironmentFile文件中有的参数,不需要再次在ExecStart启动参数中添加,二选一,如同时配置,会触发以下类似报错“etcd: conflicting environment variable "ETCD_NAME" is shadowed by corresponding command-line flag (either unset environment variable or disable flag)” flannel操作etcd使用的是v2的API,而kubernetes操作etcd使用的v3的API

3.7 验证etcd集群状态

[root@k8s-master01 k8s]# etcdctl --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem --endpoints="https://192.168.10.11:2379,https://192.168.10.12:2379,https://192.168.10.13:2379" endpoint status
https://192.168.10.11:2379, 7508c5fadccb39e2, 3.4.3, 20 kB, true, false, 4, 14, 14, 
https://192.168.10.12:2379, 1af68d968c7e3f22, 3.4.3, 20 kB, false, false, 4, 14, 14, 
https://192.168.10.13:2379, e8d9a97b17f26476, 3.4.3, 20 kB, false, false, 4, 14, 14, 

查看集群健康状态
[root@k8s-master01 k8s]# etcdctl --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem --endpoints="https://192.168.10.11:2379,https://192.168.10.12:2379,https://192.168.10.13:2379" endpoint  health
https://192.168.10.12:2379 is healthy: successfully committed proposal: took = 24.910116ms
https://192.168.10.11:2379 is healthy: successfully committed proposal: took = 27.478493ms
https://192.168.10.13:2379 is healthy: successfully committed proposal: took = 29.586593ms

etcd3.4.3部署成功

4 master节点部署组件

4.1 kubectl命令行工具部署

kubectl 是 kubernetes 集群的命令行管理工具,它默认从 ~/.kube/config 文件读取 kube-apiserver 地址、证书、用户名等信息。

4.1.1 下载kubernetes二进制安装包

wget https://storage.googleapis.com/kubernetes-release/release/v1.16.2/kubernetes-server-linux-amd64.tar.gz
tar -xf kubernetes-server-linux-amd64.tar.gz

4.1.2 分发二进制文件到对应的服务器

把对应组件二进制文件copy到指定节点
master节点组件:kube-apiserver、etcd、kube-controller-manager、kube-scheduler、kubectl
node节点组件:kubelet、kube-proxy、docker、coredns、calico
master二进制命令文件传输
scp kubernetes/server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl,kubeadm} 192.168.10.11:/usr/local/bin/
scp kubernetes/server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl,kubeadm} 192.168.10.12:/usr/local/bin/
scp kubernetes/server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl,kubeadm} 192.168.10.13:/usr/local/bin/
node节点二进制文件传输
scp kubernetes/server/bin/{kube-proxy,kubelet} 192.168.10.14:/usr/local/bin/
scp kubernetes/server/bin/{kube-proxy,kubelet} 192.168.10.15:/usr/local/bin/

4.1.3 创建请求证书

cat > /opt/k8s/certs/admin-csr.json <<EOF
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShangHai",
      "L": "ShangHai",
      "O": "system:masters",
      "OU": "System"
    }
  ]
}
EOF

4.1.4 生成证书和私钥

cfssl gencert -ca=/opt/k8s/certs/ca.pem \
     -ca-key=/opt/k8s/certs/ca-key.pem \
     -config=/opt/k8s/certs/ca-config.json \
     -profile=kubernetes admin-csr.json | cfssljson -bare admin

4.1.5 分发证书到所有的master节点

ansible k8s-master -m copy -a 'src=/opt/k8s/certs/admin-key.pem dest=/etc/kubernetes/ssl/'
ansible k8s-master -m copy -a 'src=/opt/k8s/certs/admin.pem dest=/etc/kubernetes/ssl/'

4.1.6 生成kubeconfig 配置文件

# 设置集群参数
[root@k8s-master ~]#  kubectl config set-cluster kubernetes \
     --certificate-authority=/etc/kubernetes/ssl/ca.pem \
     --embed-certs=true \
     --server=https://127.0.0.1:6443
Cluster "kubernetes" set.
# 设置客户端认证参数
[root@k8s-master ~]# kubectl config set-credentials admin \
     --client-certificate=/etc/kubernetes/ssl/admin.pem \
     --embed-certs=true \
     --client-key=/etc/kubernetes/ssl/admin-key.pem
User "admin" set.
# 设置上下文参数
 kubectl config set-context admin@kubernetes \
     --cluster=kubernetes \
     --user=admin
Context "admin@kubernetes" created.
# 设置默认上下文
[root@k8s-master ~]#  kubectl config use-context admin@kubernetes
Switched to context "admin@kubernetes".
以上操作会在当前目录下生成.kube/config文件,操作集群时,apiserver需要对该文件进行验证,创建的admin用户对kubernetes集群有所有权限(集群管理员)。

4.1.7 分发kubeconfig配置文件

scp -r  ~/.kube  192.168.10.12:~/
scp -r  ~/.kube  192.168.10.13:~/

4.2 部署kube-apiserver组件

4.2.1 创建kubernetes 证书

cat >kubernetes-csr.json<<EOF
{
  "CN": "kubernetes",
  "hosts": [
    "127.0.0.1",
    "192.168.10.11",
    "192.168.10.12",
    "192.168.10.13",
    "10.254.0.1",
    "localhost",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShangHai",
      "L": "ShangHai",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF

4.2.2 生成kubernetes 证书和私钥

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes

4.2.3 查看证书生成及分发

查看证书生成
[root@k8s-master01 certs]# ll -lrt |grep ubernetes
-rw-r--r-- 1 root root  498 1月  10 21:48 kubernetes-csr.json
-rw-r--r-- 1 root root 1647 1月  10 21:48 kubernetes.pem
-rw------- 1 root root 1675 1月  10 21:48 kubernetes-key.pem
-rw-r--r-- 1 root root 1277 1月  10 21:48 kubernetes.csr

分发证书
ansible k8s-master -m copy -a 'src=/opt/k8s/certs/kubernetes.pem dest=/etc/kubernetes/ssl'
ansible k8s-master -m copy -a 'src=/opt/k8s/certs/kubernetes-key.pem dest=/etc/kubernetes/ssl'

4.2.4 配置kube-apiserver客户端使用的token文件

创建 TLS Bootstrapping Token
[root@k8s-master01 certs]# head -c 16 /dev/urandom | od -An -t x | tr -d ' '
73002899d1c8c60eba90e0bece2b14b3
cat <<EOF > /etc/kubernetes/config/token.csv
73002899d1c8c60eba90e0bece2b14b3,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF

4.2.5 分发token文件

ansible k8s-master -m copy -a 'src=/etc/kubernetes/config/token.csv dest=/etc/kubernetes/config/'

4.2.6 创建apiserver配置文件


  • 我的微信
  • 这是我的微信扫一扫
  • weinxin
  • 我的微信公众号
  • 我的微信公众号扫一扫
  • weinxin
avatar

发表评论

:?: :razz: :sad: :evil: :!: :smile: :oops: :grin: :eek: :shock: :???: :cool: :lol: :mad: :twisted: :roll: :wink: :idea: :arrow: :neutral: :cry: :mrgreen: