二进制安装 k8s 1.22.11
参考文档
- https://github.com/kubernetes/kubernetes/releases/tag/v1.22.11
- https://github.com/easzlab/kubeasz/releases/tag/3.1.1
各组件版本说明
-
OS: Ubuntu 20.04.4 LTS
-
k8s: v1.22.11
-
etcd: v3.5.0
-
docker: 20.10.8
-
calico: v3.19.2
-
coredns: 1.8.4
-
pause: 3.5
-
dashboard: v2.3.1
-
metrics-server: v0.5.0
环境说明
- 子网:172.27.176.0/20
| Host Name | IP | VIP | service |
|---|---|---|---|
| k8s-master-1.xiangzheng.vip | 172.27.176.1 | kubeadm、kubelet、kubectl、docker | |
| k8s-master-2.xiangzheng.vip | 172.27.176.2 | kubeadm、kubelet、kubectl、docker | |
| k8s-master-3.xiangzheng.vip | 172.27.176.3 | kubeadm、kubelet、kubectl、docker | |
| haproxy-master.xiangzheng.vip | 172.27.176.66 | 172.27.176.123 | Haproxy + Keepalived |
| haproxy-slave.xiangzheng.vip | 172.27.176.88 | 172.27.176.123 | Haproxy + Keepalived |
| harbor-master.xiangzheng.vip | 172.27.176.100 | harbor、docker | |
| harbor-slave.xiangzheng.vip | 172.27.176.101 | harbor、docker | |
| k8s-node-1.xiangzheng.vip | 172.27.176.11 | kubelet、kubectl、docker | |
| k8s-node-2.xiangzheng.vip | 172.27.176.12 | kubelet、kubectl、docker | |
| k8s-node-3.xiangzheng.vip | 172.27.176.13 | kubelet、kubectl、docker |
先决条件
禁用 SElinux
- 略
时间同步
- 公有云厂商通常默认已经配置了时间同步
- 私有云如未配置时间同步,则需将NTP服务器指向企业内部 或 公有云厂商提供的公共NTP服务器,推荐使用 chrony
# 安装 chrony
yum -y install chrony #Centos
apt -y install chrony #Ubuntu
??????????
root@k8s-master-1:~# cat /etc/chrony/chrony.conf
# Use Alibaba NTP server
# Public NTP
# Alicloud NTP
server ntp.cloud.aliyuncs.com minpoll 4 maxpoll 10 iburst
server ntp.aliyun.com minpoll 4 maxpoll 10 iburst
server ntp1.aliyun.com minpoll 4 maxpoll 10 iburst
server ntp1.cloud.aliyuncs.com minpoll 4 maxpoll 10 iburst
server ntp10.cloud.aliyuncs.com minpoll 4 maxpoll 10 iburst
....
# 重启服务,重启服务后会自动开启upd本地的323端口,并观察时间是否同步成功
systemctl restart chronyd ssh 免密登录
基于 expect 工具实现
#!/bin/bash
#
#********************************************************************
#Author: xiangzheng
#QQ: 767483070
#Date: 2022-04-20
#FileName: push_ssh_key_expect.sh
#URL: https://www.xiangzheng.vip
#Email: rootroot25@163.com
#Description: 批量推送公钥到远程主机以实现ssh免密登录
#Copyright (C): 2022 All rights reserved
#********************************************************************
#此脚本在每台主机执行即可实现相互的ssh免密登录
#远程主机和本机的IP
IP_LIST="
172.27.176.1
172.27.176.2
172.27.176.3
172.27.176.66
172.27.176.88
172.27.176.100
172.27.176.101
172.27.176.11
172.27.176.12
172.27.176.13
"
#远程主机和本机的密码
PASS="Abc123456"
dpkg -L expect &> /dev/null || apt -y install expect
#rpm -q expect &> /dev/null || yum -y install expect
#生成公钥私钥对,默认不为私钥创建密码,需要创建密码的话在-P后面的""中指定密码
#ssh-keygen -f /root/.ssh/id_rsa -t rsa -P ""
#判断公钥是否存在
[ -f ~/.ssh/id_rsa.pub ] || { echo '公钥不存在 请存放在 ~/.ssh/id_rsa.pub下 退出' ; exit 3; }
#使用 for 以IP变量的方式将公钥推送到远程主机组
for IP in ${IP_LIST};do
expect <<EOF
set timeout 20
spawn ssh-copy-id -i /root/.ssh/id_rsa.pub root@${IP}
expect {
"*yes/no*" { send "yes\n";exp_continue }
"*password*" { send "${PASS}\n" }
}
expect eof
EOF
echo ${IP} push succeed
done
#使用 while read 以配置文件的方式将公钥推送到远程主机组
#while read IP ;do
#expect <<EOF
#set timeout 20
#spawn ssh-copy-id -i /root/.ssh/id_rsa.pub root@${IP}
#expect {
# "*yes/no*" { send "yes\n";exp_continue }
# "*password*" { send "${PASS}\n" }
#}
#expect eof
#EOF
#echo ${IP} push succeed
#done < hosts.txt基于 sshpass 工具实现
#!/bin/bash
#
#********************************************************************
#Author: xiangzheng
#QQ: 767483070
#Date: 2022-04-20
#FileName: push_ssh_key_sshpass.sh
#URL: https://www.xiangzheng.vip
#Email: rootroot25@163.com
#Description: The test script
#Copyright (C): 2022 All rights reserved
#********************************************************************
IPLIST="
172.27.176.1
172.27.176.2
172.27.176.3
172.27.176.66
172.27.176.88
172.27.176.100
172.27.176.101
172.27.176.11
172.27.176.12
172.27.176.13
"
dpkg -L sshpass &> /dev/null || apt -y install sshpass
#rpm -q sshpass &> /dev/null || yum -y install sshpass
[ -f /root/.ssh/id_rsa ] || ssh-keygen -f /root/.ssh/id_rsa -P ''
export SSHPASS=123
for IP in ${IPLIST};do
{
sshpass -e ssh-copy-id -o StrictHostKeyChecking=no ${IP}
}&
done
wait禁用 swap
- 否则部署时会报错,或者也可以添加一些参数来允许 swap 的存在,但通常不会使用 swap,因为 swap 会影响性能
# free -h
Swap: 0B 0B 0B #表示已经禁用开启相关模块
#查看相关模块是否开启,两者都有显示即代表开启
# lsmod | grep -E "^(overlay|br_netfilter)"
br_netfilter 28672 0
overlay 118784 0
#临时加载模块
modprobe br_netfilter
modprobe overlay
#永久加载模块
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF修改相关内核参数
- 下面只是一些部署k8s集群必要的参数,其它内核参数还需根据生产环境按需修改
# 设置所需的 sysctl 参数,参数在重新启动后保持不变
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
# 应用 sysctl 参数而不重新启动
sudo sysctl --system部署 Haproxy + Keepalived
参考文档:https://github.com/kubernetes/kubeadm/blob/main/docs/ha-considerations.md
注意事项
- 也可以选择在 node 节点直接安装 haproxy 再监听本机的 127.0.0.1:6443,进而实现与apiserver的通信,这样实现的好处是减轻了 haproxy 的压力、并且也间接的实现了 haproxy 的高可用,一个node 节点宕机也不会影响整个 k8s 集群的正常使用
Keepalived
- 安装过程省略,生产中通常选择二进制安装
keepalived.conf
MASTER
# /etc/keepalived/keepalived.conf
global_defs {
router_id LVS_DEVEL
}
vrrp_script check_apiserver {
script "/etc/keepalived/check_apiserver.sh" #健康检查脚本
interval 3 #每隔3秒检测一次
weight -2 #
fall 10 #10次检测都失败后权重-2(即设置30秒的缓冲延迟)
rise 2 #2次检测成功后权重+2
}
vrrp_instance VI_1 {
state MASTER #MASTER
interface eth0
virtual_router_id 51 #注意此ID不要冲突
priority 100 #MASTER节点的值要高于BACKUP
authentication {
auth_type PASS
auth_pass 666
}
virtual_ipaddress {
172.27.176.123/32 dev eth0 label eth0:1 #绑定VIP 并打一个标签
}
track_script {
check_apiserver #调用检测脚本段定义的名称
}
}BACKUP
# /etc/keepalived/keepalived.conf
global_defs {
router_id LVS_DEVEL
}
vrrp_script check_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 3
weight -2
fall 10
rise 2
}
vrrp_instance VI_1 {
state BACKUP #BACKUP
interface eth0
virtual_router_id 51
priority 99 #MASTER 100-2=98,99>98,BACKUP上位
authentication {
auth_type PASS
auth_pass 666
}
virtual_ipaddress {
172.27.176.123/32 dev eth0 label eth0:1
}
track_script {
check_apiserver
}
}健康检查脚本
- 在 MASTER 和 BACKUP 上都定义
- /etc/keepalived/check_apiserver.sh
#!/bin/bash
#
#********************************************************************
#Author: xiangzheng
#QQ: 767483070
#Date: 2022-07-03
#FileName: 1.sh
#URL: https://www.xiangzheng.vip
#Email: rootroot25@163.com
#Description: The test script
#Copyright (C): 2022 All rights reserved
#********************************************************************
APISERVER_DEST_PORT='6443'
curl --silent --max-time 2 --insecure https://localhost:${APISERVER_DEST_PORT}/ -o /dev/nullHaproxy
- 安装过程省略,生产中通常选择二进制安装
haproxy.cfg
# /etc/haproxy/haproxy.cfg
#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
#log 127.0.0.1 local3 info
log /dev/log local1 notice
daemon
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 1
timeout http-request 10s
timeout queue 20s
timeout connect 5s
timeout client 20s
timeout server 20s
timeout http-keep-alive 10s
timeout check 10s
#---------------------------------------------------------------------
# apiserver frontend which proxys to the control plane nodes
#---------------------------------------------------------------------
frontend apiserver
bind *:6443
mode tcp
option tcplog
default_backend apiserver
#---------------------------------------------------------------------
# round robin balancing for apiserver
#---------------------------------------------------------------------
backend apiserver
option httpchk GET /healthz
http-check expect status 200
mode tcp
option ssl-hello-chk
balance roundrobin
server 172.27.176.1 172.27.176.1:6443 check inter 3 fall 3 rise 5
server 172.27.176.2 172.27.176.2:6443 check inter 3 fall 3 rise 5
server 172.27.176.3 172.27.176.3:6443 check inter 3 fall 3 rise 5
#开启状态页
listen stats
mode http
bind 0.0.0.0:5000
stats enable
log global
stats uri /haproxy-status
stats auth haadmin:123456测试
- 测试 VIP 可否成功飘动
…
部署 Docker
安装 Docker
- 在所有的 k8s-master、k8s-node、harbor 节点执行
#!/bin/bash
#
#********************************************************************
#Author: xiangzheng
#QQ: 767483070
#Date: 2022-06-15
#FileName: init_docker.sh
#URL: https://www.xiangzheng.vip
#Email: rootroot25@163.com
#Description: 针对 Ubuntu 或 centos 一键安装docker
#Copyright (C): 2022 All rights reserved
#********************************************************************
docker --version &> /dev/null && { echo "docker已经安装 退出" ; exit; }
install_docker_for_ubuntu(){
apt-get -y install \
apt-transport-https \
ca-certificates \
curl \
gnupg \
lsb-release
curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
echo \
"deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://mirrors.aliyun.com/docker-ce/linux/ubuntu/ \
$(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
apt update
#可指定安装版本
apt -y install docker-ce=5:20.10.8~3-0~ubuntu-focal docker-ce-cli=5:20.10.8~3-0~ubuntu-focal
#apt -y install docker-ce=5:19.03.15~3-0~ubuntu-focal docker-ce-cli=5:19.03.15~3-0~ubuntu-focal
systemctl is-active docker &>/dev/null && echo "docker安装成功" || { echo "docker安装失败 退出" ; exit; }
}
install_docker_for_centos(){
. /etc/init.d/functions
yum install -y yum-utils
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo \
&& action "docker-ce.repo文件下载成功" || { action "docker-ce.repo文件下载失败 退出" false ; exit; }
mkdir -p /etc/docker/
cat > /etc/docker/daemon.json <<EOF
{
"registry-mirrors": ["https://jqm0rnhf.mirror.aliyuncs.com"]
}
EOF
action "docker加速准备完成"
#可指定安装版本
yum install -y docker-ce-20.10.10 docker-ce-cli-20.10.10 containerd.io
systemctl enable --now docker
systemctl is-active docker &>/dev/null && action "docker安装成功" || action "docker安装失败" false ; exit;
}
install_docker_for_ubuntu
#install_docker_for_centos