kubeadm搭建k8s高可用集群
如何基于Debian12部署原生K8S 1.30.0集群
一、K8S集群主机准备
1.1 主机操作系统说明
序号 | 操作系统级版本 | 备注 |
---|---|---|
1 | debian-12.10.0 |
1.2 主机硬件配置说明
需求 | CPU | 内存 | 硬盘 | 角色 | 主机名 |
---|---|---|---|---|---|
值 | 2C | 6G | 120G | master01 | k8s-master01 |
值 | 2C | 6G | 120G | master02 | k8s-master02 |
值 | 2C | 6G | 120G | master03 | k8s-master03 |
值 | 2C | 6G | 120G | node01 | k8s-worker01 |
值 | 2C | 6G | 120G | node02 | k8s-worker02 |
序号 | 主机名 | IP地址 | 备注 |
---|---|---|---|
1 | master01 | 192.168.75.120 | master |
2 | master02 | 192.168.75.121 | master |
3 | master03 | 192.168.75.122 | master |
4 | worker01 | 192.168.75.123 | node |
5 | worker02 | 192.168.75.124 | node |
6 | master01 | 192.168.75.100 | vip |
序号 | 主机名 | 功能 | 备注 |
---|---|---|---|
1 | master01 | haproxy、keepalived | keepalived主节点 |
2 | master02 | haproxy、keepalived | keepalived从节点 |
1.3 主机配置
1.3.1 配置基础软件和apt源
先配置清华http源
# 默认注释了源码镜像以提高 apt update 速度,如有需要可自行取消注释
deb http://mirrors.tuna.tsinghua.edu.cn/debian/ bookworm main contrib non-free non-free-firmware
# deb-src http://mirrors.tuna.tsinghua.edu.cn/debian/ bookworm main contrib non-free non-free-firmware
deb http://mirrors.tuna.tsinghua.edu.cn/debian/ bookworm-updates main contrib non-free non-free-firmware
# deb-src http://mirrors.tuna.tsinghua.edu.cn/debian/ bookworm-updates main contrib non-free non-free-firmware
deb http://mirrors.tuna.tsinghua.edu.cn/debian/ bookworm-backports main contrib non-free non-free-firmware
# deb-src http://mirrors.tuna.tsinghua.edu.cn/debian/ bookworm-backports main contrib non-free non-free-firmware
# 以下安全更新软件源包含了官方源与镜像站配置,如有需要可自行修改注释切换
deb https://security.debian.org/debian-security bookworm-security main contrib non-free non-free-firmware
# deb-src https://security.debian.org/debian-security bookworm-security main contrib non-free non-free-firmware
更新清华源和安装https相关工具
apt update
apt install -y apt-transport-https ca-certificates
更新apt源为https
# 默认注释了源码镜像以提高 apt update 速度,如有需要可自行取消注释
deb https://mirrors.tuna.tsinghua.edu.cn/debian/ bookworm main contrib non-free non-free-firmware
# deb-src https://mirrors.tuna.tsinghua.edu.cn/debian/ bookworm main contrib non-free non-free-firmware
deb https://mirrors.tuna.tsinghua.edu.cn/debian/ bookworm-updates main contrib non-free non-free-firmware
# deb-src https://mirrors.tuna.tsinghua.edu.cn/debian/ bookworm-updates main contrib non-free non-free-firmware
deb https://mirrors.tuna.tsinghua.edu.cn/debian/ bookworm-backports main contrib non-free non-free-firmware
# deb-src https://mirrors.tuna.tsinghua.edu.cn/debian/ bookworm-backports main contrib non-free non-free-firmware
# 以下安全更新软件源包含了官方源与镜像站配置,如有需要可自行修改注释切换
deb https://security.debian.org/debian-security bookworm-security main contrib non-free non-free-firmware
# deb-src https://security.debian.org/debian-security bookworm-security main contrib non-free non-free-firmware
安装系统基础软件
apt update
apt install -y wget sudo curl
#添加环境变量
vim /etc/profile
export PATH=$PATH:/sbin:/usr/sbin
source /etc/profile
1.3.2 主机名配置
#k8s-master01节点
hostnamectl set-hostname k8s-master01
#k8s-master02节点
hostnamectl set-hostname k8s-master02
#k8s-master03节点
hostnamectl set-hostname k8s-master03
#k8s-worker01节点
hostnamectl set-hostname k8s-worker01
#k8s-worker02节点
hostnamectl set-hostname k8s-worker02
1.3.3 主机IP地址配置
#k8s-master01节点 192.168.75.120
root@master:k8s-master01# cat /etc/network/interfaces
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
source /etc/network/interfaces.d/*
# The loopback network interface
auto lo
iface lo inet loopback
# The primary network interface
#allow-hotplug ens33
auto ens33
iface ens33 inet static
address 192.168.75.120/24
netmask 255.255.255.0
broadcat 192.168.75.255
gateway 192.168.75.2
dns-nameservers 8.8.8.8
#k8s-master02节点 192.168.75.121
root@master:k8s-master02# cat /etc/network/interfaces
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
source /etc/network/interfaces.d/*
# The loopback network interface
auto lo
iface lo inet loopback
# The primary network interface
#allow-hotplug ens33
auto ens33
iface ens33 inet static
address 192.168.75.121/24
netmask 255.255.255.0
broadcat 192.168.75.255
gateway 192.168.75.2
dns-nameservers 8.8.8.8
#k8s-master03节点192.168.75.122
root@master:k8s-master03# cat /etc/network/interfaces
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
source /etc/network/interfaces.d/*
# The loopback network interface
auto lo
iface lo inet loopback
# The primary network interface
#allow-hotplug ens33
auto ens33
#iface ens33 inet dhcp
iface ens33 inet static
address 192.168.75.122/24
netmask 255.255.255.0
broadcat 192.168.75.255
gateway 192.168.75.2
dns-nameservers 8.8.8.8
#k8s-worker01节点 192.168.75.123
root@master:k8s-worker01# cat /etc/network/interfaces
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
source /etc/network/interfaces.d/*
# The loopback network interface
auto lo
iface lo inet loopback
# The primary network interface
#allow-hotplug ens33
auto ens33
iface ens33 inet static
address 192.168.75.123/24
netmask 255.255.255.0
broadcat 192.168.75.255
gateway 192.168.75.2
dns-nameservers 8.8.8.8
#k8s-worker02节点192.168.75.124
root@master:k8s-worker01# cat /etc/network/interfaces
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
source /etc/network/interfaces.d/*
# The loopback network interface
auto lo
iface lo inet loopback
# The primary network interface
#allow-hotplug ens33
auto ens33
#iface ens33 inet dhcp
iface ens33 inet static
address 192.168.75.124/24
netmask 255.255.255.0
broadcat 192.168.75.255
gateway 192.168.75.2
dns-nameservers 8.8.8.8
#所有节点
systemctl restart network
1.3.4 主机名和IP地址解析
#所有节点
cat >> /etc/hosts << EOF
192.168.75.120 k8s-master01
192.168.75.121 k8s-master02
192.168.75.122 k8s-master03
192.168.75.123 k8s-worker01
192.168.75.124 k8s-worker02
EOF
1.3.5 时间同步配置
安装chronyd服务
apt install -y chrony
配置服务端k8s-master01
vim /etc/chrony/chrony.conf
#pool 2.debian.pool.ntp.org iburst
server ntp.aliyun.com iburst
allow 192.168.75.0/24
local stratum 10
systemctl restart chronyd
配置所有服务端work和master剩余节点
vim /etc/chrony/chrony.conf
server 192.168.75.120 iburst
systemctl restart chronyd
检查链接状态k8s-master01
chronyc clients
1.3.6 配置内核转发及网桥过滤
所有主机均需操作
创建加载内核模块
cat << EOF |tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
本次执行,手动加载此模块
modprobe overlay
modprobe br_netfilter
查看已加载的模块
# lsmod |egrep "overlay"
overlay 163840 21
# lsmod |egrep "br_netfilter"
br_netfilter 36864 0
bridge 311296 1 br_netfilter
添加网桥过滤及内核转发配置文件
cat << EOF | tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
添加内核参数
sysctl --system
1.3.7 安装ipset及ipvsadm
所有主机均需要操作
安卓ipset及ipvsadm
apt install -y ipset ipvsadm
配置ipvsadm模块加载,添加需要添加的模块
cat << EOF | tee /etc/modules-load.d/ipvs.conf
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
EOF
创建加载模块脚步文件
cat << EOF | tee ipvs.sh
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
执行脚步文件,加载模块
sh ipvs.sh
1.3.8 关闭SWAP分区
修改完后需要重启系统,如不重启,可临时关闭,命令为swapoff -a
永久关闭swap分区,需要重启操作系统
# vim /etc/fstab
……
#UUID=ee95aaa4-9245-499f-831c-d5a98b540340 none swap sw 0 0
注释这行带有swap的配置
二、K8S集群容器运行时Containerd准备
所有设备均需要执行
2.1 Containerd部署文件获取
下载指定版本containerd
wget https://github.com/containerd/containerd/releases/download/v2.0.4/containerd-2.0.4-linux-amd64.tar.gz
解压安装
tar -xvf containerd-2.0.4-linux-amd64.tar.gz
mv bin/* /usr/local/bin/
2.2 Containerd配置文件生成并修改
创建配置文件目录
mkdir /etc/containerd
生成配置文件
containerd config default > /etc/containerd/config.toml
修改配置文件
vim /etc/containerd/config.toml
# 67行修改
sandbox = 'registry.aliyuncs.com/google_containers/pause:3.9'
#106行添加
SystemdCgroup = true
#最后添加加速器
[plugins."io.containerd.grpc.v1.cri".registry]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."k8s.gcr.io"]
endpoint = ["https://d90275b2.k8sgcr.skillixx.com"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."k8sreg.com"]
endpoint = ["https://d90275b2.k8sreg.skillixx.com"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"]
endpoint = ["https://d90275b2.quay.skillixx.com"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."mcr.microsoft.com"]
endpoint = ["https://d90275b2.mcr.skillixx.com"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.elastic.co"]
endpoint = ["https://d90275b2.elastic.skillixx.com"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"]
endpoint = ["https://d90275b2.gcr.skillixx.com"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint = ["https://d90275b2.rhub.skillixx.com"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."ghcr.ioo"]
endpoint = ["https://d90275b2.ghcr.skillixx.com"]
2.3 Containerd启动及开机自启动
编写自启文件,可下载官方的
wget https://raw.githubusercontent.com/containerd/containerd/main/containerd.service
cp containerd.service /etc/systemd/system/
重新加载Containerd服务
systemctl daemon-reexec
systemctl daemon-reload
systemctl enable containerd
systemctl start containerd
2.4 runc 离线安装
访问链接:https://github.com/opencontainers/runc/releases获取最新二进制对应系统软件
下载runc软件包
wget https://github.com/opencontainers/runc/releases/download/v1.2.6/runc.amd64
cp runc.amd64 /usr/local/sbin/runc
chmod +x /usr/local/sbin/runc
三、HAProxy及Keepalived部署
3.1 HAProxy及keepalived安装(k8s-master01和k8s-master02中执行)
yum install -y haproxy keepalived
3.2 HAProxy配置及启动
3.2.1 k8s-master01中配置
root@k8s-master01:~# cat /etc/haproxy/haproxy.cfg
global
maxconn 2000
ulimit-n 16384
log 127.0.0.1 local0 err
stats timeout 30s
defaults
log global
mode http
option httplog
timeout connect 5000
timeout client 50000
timeout server 50000
timeout http-request 15s
timeout http-keep-alive 15s
frontend monitor-in
bind *:33305
mode http
option httplog
monitor-uri /monitor
frontend k8s-master
bind 0.0.0.0:16443
bind 127.0.0.1:16443
mode tcp
option tcplog
tcp-request inspect-delay 5s
default_backend k8s-master
backend k8s-master
mode tcp
option tcplog
option tcp-check
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
server master01 192.168.75.120:6443 check
server master02 192.168.75.121:6443 check
server master03 192.168.75.122:6443 check
haproxy启动并设置自启动
systemctl start haproxy
systemctl enable haproxy
systemctl status haproxy
验证haproxy是否正常,访问
root@k8s-master01:/home/k8s# curl 192.168.75.120:33305/monitor
<html><body><h1>200 OK</h1>
Service ready.
</body></html>
3.2.2 k8s-master02中配置
root@k8s-master02:/home/k8s# cat /etc/haproxy/haproxy.cfg
global
maxconn 2000
ulimit-n 16384
log 127.0.0.1 local0 err
stats timeout 30s
defaults
log global
mode http
option httplog
timeout connect 5000
timeout client 50000
timeout server 50000
timeout http-request 15s
timeout http-keep-alive 15s
frontend monitor-in
bind *:33305
mode http
option httplog
monitor-uri /monitor
frontend k8s-master
bind 0.0.0.0:16443
bind 127.0.0.1:16443
mode tcp
option tcplog
tcp-request inspect-delay 5s
default_backend k8s-master
backend k8s-master
mode tcp
option tcplog
option tcp-check
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
server master01 192.168.75.120:6443 check
server master02 192.168.75.121:6443 check
server master03 192.168.75.122:6443 check
haproxy启动并设置自启动
systemctl start haproxy
systemctl enable haproxy
systemctl status haproxy
验证haproxy是否正常,访问
root@k8s-master02:/home/k8s# curl 192.168.75.121:33305/monitor
<html><body><h1>200 OK</h1>
Service ready.
</body></html>
3.3 Keepalived配置及启动
3.2.1 k8s-master01中配置
root@k8s-master01:/home/k8s# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
script_user root
enable_script_security
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh" # 此脚本需你自行定义并放置
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state MASTER
interface ens33 # 请替换为你当前使用的网卡名
mcast_src_ip 192.168.75.120 # 本机 IP(master 节点的 IP)
virtual_router_id 51
priority 101
advert_int 2
authentication {
auth_type PASS
auth_pass abc123
}
virtual_ipaddress {
192.168.75.100 # 虚拟 IP 地址(VIP)
}
track_script {
chk_apiserver # 跟踪上面的 apiserver 检查脚本
}
}
root@k8s-master01:/home/k8s# cat /etc/keepalived/check_apiserver.sh
#!/bin/bash
err=0
for k in $(seq 1 3)
do
check_code=$(pgrep haproxy)
if [[ "$check_code" == "" ]]; then
err=$(expr $err + 1)
sleep 1
continue
else
err=0
break
fi
done
if [[ "$err" != "0" ]]; then
echo "systemctl stop keepalived"
/usr/bin/systemctl stop keepalived
exit 1
else
exit 0
fi
chmod a+x /etc/keepalived/check_apiserver.sh
keepalived启动并设置自启动
systemctl start keepalived
systemctl enable keepalived
systemctl status keepalived
验证keepalived是否正常,访问
root@k8s-master01:/home/k8s# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host noprefixroute
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UNKNOWN group default qlen 1000
link/ether 00:0c:29:f7:7b:54 brd ff:ff:ff:ff:ff:ff
altname enp2s1
inet 192.168.75.120/24 brd 192.168.75.255 scope global ens33
valid_lft forever preferred_lft forever
inet 192.168.75.100/32 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fef7:7b54/64 scope link
valid_lft forever preferred_lft forever
3.2.2 k8s-master02中配置
root@k8s-master02:/home/k8s# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
script_user root
enable_script_security
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh" # 此脚本需你自行定义并放置
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state MASTER
interface ens33 # 请替换为你当前使用的网卡名
mcast_src_ip 192.168.75.121 # 本机 IP(master 节点的 IP)
virtual_router_id 51
priority 99 #这个是权重
advert_int 2
authentication {
auth_type PASS
auth_pass abc123
}
virtual_ipaddress {
192.168.75.100 # 虚拟 IP 地址(VIP)
}
track_script {
chk_apiserver # 跟踪上面的 apiserver 检查脚本
}
}
root@k8s-master02:/home/k8s# cat /etc/keepalived/check_apiserver.sh
#!/bin/bash
err=0
for k in $(seq 1 3)
do
check_code=$(pgrep haproxy)
if [[ "$check_code" == "" ]]; then
err=$(expr $err + 1)
sleep 1
continue
else
err=0
break
fi
done
if [[ "$err" != "0" ]]; then
echo "systemctl stop keepalived"
/usr/bin/systemctl stop keepalived
exit 1
else
exit 0
fi
chmod a+x /etc/keepalived/check_apiserver.sh
haproxy启动并设置自启动
systemctl start haproxy
systemctl enable haproxy
systemctl status haproxy
验证haproxy是否正常,关闭k8s-master01节点,查看k8s-master02节点的ip
root@k8s-master02:/home/k8s# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host noprefixroute
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UNKNOWN group default qlen 1000
link/ether 00:0c:29:96:f6:13 brd ff:ff:ff:ff:ff:ff
altname enp2s1
inet 192.168.75.121/24 brd 192.168.75.255 scope global ens33
valid_lft forever preferred_lft forever
inet 192.168.75.100/32 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe96:f613/64 scope link
valid_lft forever preferred_lft forever
3.4 验证高可用集群可用性
root@k8s-master01:/home/k8s# ss -anput |grep ":16443"
tcp LISTEN 0 2000 127.0.0.1:16443 0.0.0.0:* users:(("haproxy",pid=796,fd=8))
tcp LISTEN 0 2000 0.0.0.0:16443 0.0.0.0:* users:(("haproxy",pid=796,fd=7))
root@k8s-master02:/home/k8s# ss -anput |grep ":16443"
tcp LISTEN 0 2000 127.0.0.1:16443 0.0.0.0:* users:(("haproxy",pid=796,fd=8))
tcp LISTEN 0 2000 0.0.0.0:16443 0.0.0.0:* users:(("haproxy",pid=796,fd=7))
四、K8S集群准备部署
所有设备均需要执行
4.1K8S集群软件apt源准备
apt-get install -y apt-transport-https gpg
curl -fsSL https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.30/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.30/deb/ /" | tee /etc/apt/sources.list.d/kubernetes.list
apt update
4.2 K85集群软件安装及kubelet配置
4.2.1 k8s集群软件安装
apt-get install -y kubelet kubeadm kubectl
apt-mark hold kubelet kubeadm kubectl
4.2.2配置kubelet
systemctl enable kubectl
4.3 K85集群初始化 (k8s-master01)
4.3.1 查看版本
kubeadm version
kubeadm version: &version.Info{Major:"1", Minor:"30", GitVersion:"v1.30.11", GitCommit:"6a074997c960757de911780f250ecd9931917366", GitTreeState:"clean", BuildDate:"2025-03-11T19:56:25Z", GoVersion:"go1.23.6", Compiler:"gc", Platform:"linux/amd64"}
4.3.2 生成部署配罟文件
kubeadm config print init-defaults > kubeadm-config.yaml
修改初始化文件内容
cat kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
#修改内容
advertiseAddress: 192.168.75.120
bindPort: 6443
nodeRegistration:
criSocket: unix:///var/run/containerd/containerd.sock
imagePullPolicy: IfNotPresent
# 修改内容
name: master01
taints: null
---
apiServer:
#增加内容
certSANs:
- 192.168.75.100
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
#增加内容
controlPlaneEndpoint: 192.168.75.100:16443
controllerManager: {}
dns: {}
etcd:
local:
dataDir: /var/lib/etcd
# 修改内容
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.30.0
networking:
dnsDomain: cluster.local
#增加内容
podSubnet: 10.244.0.0/16
serviceSubnet: 10.96.0.0/12
scheduler: {}
4.3.3 查看并下载镜像
查看原镜像下载
# kubeadm config images list
I0411 10:44:32.312133 478681 version.go:256] remote version is much newer: v1.32.3; falling back to: stable-1.30
registry.k8s.io/kube-apiserver:v1.30.11
registry.k8s.io/kube-controller-manager:v1.30.11
registry.k8s.io/kube-scheduler:v1.30.11
registry.k8s.io/kube-proxy:v1.30.11
registry.k8s.io/coredns/coredns:v1.11.3
registry.k8s.io/pause:3.9
registry.k8s.io/etcd:3.5.15-0
查看阿里云镜像是否匹配
#kubeadm config images list --image-repository registry.aliyuncs.com/google_containers
I0411 10:45:58.792490 479279 version.go:256] remote version is much newer: v1.32.3; falling back to: stable-1.30
registry.aliyuncs.com/google_containers/kube-apiserver:v1.30.11
registry.aliyuncs.com/google_containers/kube-controller-manager:v1.30.11
registry.aliyuncs.com/google_containers/kube-scheduler:v1.30.11
registry.aliyuncs.com/google_containers/kube-proxy:v1.30.11
registry.aliyuncs.com/google_containers/coredns:v1.11.3
registry.aliyuncs.com/google_containers/pause:3.9
registry.aliyuncs.com/google_containers/etcd:3.5.15-0
4.3.4 使用部罢配置文件初始化K8S集群
kubeadm init --config kubeadm-config.yaml
生成内容
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.75.100:16443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:05bc7f136c2a952faad32a243191228be4af27ee6b8459847d8997e4f1ff6705
root@k8s-master01:~#
五、K8S集群网络插件calico部署
calico访问链接:https://docs.tigera.io/calico/latest/about
创建网络为了方便,我们将配置下载下来,再去创建
wget https://raw.githubusercontent.com/projectcalico/calico/v3.29.3/manifests/tigera-operator.yaml
kubectl create -f tigera-operator.yaml
wget https://raw.githubusercontent.com/projectcalico/calico/v3.29.3/manifests/custom-resources.yaml
custom-resources.yaml需要修改部署内容
cat custom-resources.yaml
# This section includes base Calico installation configuration.
# For more information, see: https://docs.tigera.io/calico/latest/reference/inst allation/api#operator.tigera.io/v1.Installation
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
# 修改内容
registry: d90275b2.rhub.skillixx.com
imagePath: calico
# Configures Calico networking.
calicoNetwork:
ipPools:
- name: default-ipv4-ippool
blockSize: 26
#修改内容
cidr: 10.244.0.0/16
encapsulation: VXLANCrossSubnet
natOutgoing: Enabled
nodeSelector: all()
---
# This section configures the Calico API server.
# For more information, see: https://docs.tigera.io/calico/latest/reference/inst allation/api#operator.tigera.io/v1.APIServer
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:
name: default
spec: {}
查看启动状态
root@master:k8s-master01# kubectl get pods -n calico-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-b96c7c6c5-hbs4p 1/1 Running 0 20h
calico-node-lhxc8 1/1 Running 0 20h
calico-node-nt84d 1/1 Running 0 20h
calico-typha-95487788c-qt8zz 1/1 Running 0 20h
csi-node-driver-2j2cd 2/2 Running 0 20h
csi-node-driver-tjcb2 2/2 Running 0 20h
查看节点是否正常
root@k8s-master01:/home/k8s/cj# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master01 Ready control-plane 14m v1.30.11
六、增加k8s集群中master节点和worker节点
6.1 增加k8s-master02和k8s-master03到节点上
获取加入节点集群的certificate-key
root@k8s-master01:/home/k8s# kubeadm init phase upload-certs --upload-certs
I0417 11:35:41.045453 40574 version.go:256] remote version is much newer: v1.32.3; falling back to: stable-1.30
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
6e934275ddf60c52d866f907f6ff67f1b0072b4347d3e2a9bc90cb7c17a60470
加入到到集群
kubeadm join 192.168.75.100:16443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:05bc7f136c2a952faad32a243191228be4af27ee6b8459847d8997e4f1ff6705 --control-plane --certificate-key 6e934275ddf60c52d866f907f6ff67f1b0072b4347d3e2a9bc90cb7c17a60470
6.2 增加k8s-worker01和k8s-work02节点到集群中
kubeadm join 192.168.75.100:16443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:05bc7f136c2a952faad32a243191228be4af27ee6b8459847d8997e4f1ff6705
6.3 检查节点状态
root@k8s-master01:/home/k8s# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master02 Ready control-plane 2d15h v1.30.11
k8s-master03 Ready control-plane 2d15h v1.30.11
k8s-worker01 Ready <none> 2d15h v1.30.11
k8s-worker02 Ready <none> 2d15h v1.30.11
master01 Ready control-plane 2d17h v1.30.11
七、部署Nginx应用验证K8S集群可用性
创建测试nginx.yaml文件
cat nginx.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginxweb
spec:
replicas: 2
selector:
matchLabels:
app: nginxweb1
template:
metadata:
labels:
app: nginxweb1
spec:
containers:
- name: nginxwebc
image: registry.cn-hangzhou.aliyuncs.com/hxpdocker/nginx:latest
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: nginxweb-service
spec:
type: NodePort
externalTrafficPolicy: Cluster
selector:
app: nginxweb1
ports:
- protocol: TCP
port: 80
targetPort: 80
nodePort: 30080
创建服务并验证
kubect apply -f nginx.yaml
# curl localhost:30080
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>
<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p>
</body>
</html>


赞赏是不耍流氓的鼓励