logo头像

老陕小张学技术接地气

如何基于Debian12部署原生K8S-1.30.0集群

如何基于Debian12部署原生K8S 1.30.0集群

一、K8S集群主机准备

1.1 主机操作系统说明

序号 操作系统级版本 备注
1 debian-12.10.0

1.2 主机硬件配置说明

需求 CPU 内存 硬盘 角色 主机名
2C 6G 120G master k8s-master01
2C 6G 120G worker(node) k8s-worker01
2C 6G 120G worker(node) k8s-worker02

1.3 主机配置

1.3.1 配置基础软件和apt源

先配置清华http源

# 默认注释了源码镜像以提高 apt update 速度,如有需要可自行取消注释
deb http://mirrors.tuna.tsinghua.edu.cn/debian/ bookworm main contrib non-free non-free-firmware
# deb-src http://mirrors.tuna.tsinghua.edu.cn/debian/ bookworm main contrib non-free non-free-firmware

deb http://mirrors.tuna.tsinghua.edu.cn/debian/ bookworm-updates main contrib non-free non-free-firmware
# deb-src http://mirrors.tuna.tsinghua.edu.cn/debian/ bookworm-updates main contrib non-free non-free-firmware

deb http://mirrors.tuna.tsinghua.edu.cn/debian/ bookworm-backports main contrib non-free non-free-firmware
# deb-src http://mirrors.tuna.tsinghua.edu.cn/debian/ bookworm-backports main contrib non-free non-free-firmware

# 以下安全更新软件源包含了官方源与镜像站配置,如有需要可自行修改注释切换
deb https://security.debian.org/debian-security bookworm-security main contrib non-free non-free-firmware
# deb-src https://security.debian.org/debian-security bookworm-security main contrib non-free non-free-firmware

更新清华源和安装https相关工具

apt update
apt install -y apt-transport-https ca-certificates 

更新apt源为https

# 默认注释了源码镜像以提高 apt update 速度,如有需要可自行取消注释
deb https://mirrors.tuna.tsinghua.edu.cn/debian/ bookworm main contrib non-free non-free-firmware
# deb-src https://mirrors.tuna.tsinghua.edu.cn/debian/ bookworm main contrib non-free non-free-firmware

deb https://mirrors.tuna.tsinghua.edu.cn/debian/ bookworm-updates main contrib non-free non-free-firmware
# deb-src https://mirrors.tuna.tsinghua.edu.cn/debian/ bookworm-updates main contrib non-free non-free-firmware

deb https://mirrors.tuna.tsinghua.edu.cn/debian/ bookworm-backports main contrib non-free non-free-firmware
# deb-src https://mirrors.tuna.tsinghua.edu.cn/debian/ bookworm-backports main contrib non-free non-free-firmware

# 以下安全更新软件源包含了官方源与镜像站配置,如有需要可自行修改注释切换
deb https://security.debian.org/debian-security bookworm-security main contrib non-free non-free-firmware
# deb-src https://security.debian.org/debian-security bookworm-security main contrib non-free non-free-firmware

安装系统基础软件

apt update
apt install -y wget sudo curl  

#添加环境变量
vim /etc/profile
export PATH=$PATH:/sbin:/usr/sbin
source  /etc/profile

1.3.2 主机名配置

#k8s-master01节点
hostnamectl set-hostname k8s-master01
#k8s-worker01节点
hostnamectl set-hostname k8s-worker01
#k8s-worker02节点
hostnamectl set-hostnamek8s-worker02

1.3.3 主机IP地址配置

#k8s-master01节点 192.168.71.120
root@master:k8s-master01# cat /etc/network/interfaces
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).

source /etc/network/interfaces.d/*

# The loopback network interface
auto lo
iface lo inet loopback

# The primary network interface
#allow-hotplug ens33
auto ens33
iface ens33 inet static
address 192.168.71.120/24
netmask 255.255.255.0
broadcat 192.168.71.255
gateway 192.168.71.2
dns-nameservers 8.8.8.8
#k8s-worker01节点192.168.71.121
root@master:k8s-master01# cat /etc/network/interfaces
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).

source /etc/network/interfaces.d/*

# The loopback network interface
auto lo
iface lo inet loopback

# The primary network interface
#allow-hotplug ens33
auto ens33
#iface ens33 inet dhcp
iface ens33 inet static
address 192.168.71.121/24
netmask 255.255.255.0
broadcat 192.168.71.255
gateway 192.168.71.2
dns-nameservers 8.8.8.8
#k8s-worker02节点192.168.71.122
root@master:k8s-master01# cat /etc/network/interfaces
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).

source /etc/network/interfaces.d/*

# The loopback network interface
auto lo
iface lo inet loopback

# The primary network interface
#allow-hotplug ens33
auto ens33
#iface ens33 inet dhcp
iface ens33 inet static
address 192.168.71.122/24
netmask 255.255.255.0
broadcat 192.168.71.255
gateway 192.168.71.2
dns-nameservers 8.8.8.8
#所有节点
systemctl  restart network

1.3.4 主机名和IP地址解析

#所有节点
cat >> /etc/hosts << EOF
192.168.71.120 k8s-master01
192.168.71.121 k8s-worker01
192.168.71.122 k8s-worker02
EOF

1.3.5 时间同步配置

安装chronyd服务

apt install -y chrony

配置服务端k8s-master01

vim /etc/chrony/chrony.conf
#pool 2.debian.pool.ntp.org   iburst
server ntp.aliyun.com   iburst 
allow 192.168.71.0/24
local stratum 10


systemctl restart chronyd

配置所有服务端work

vim /etc/chrony/chrony.conf
server 192.168.71.120  iburst 

systemctl restart chronyd

检查链接状态k8s-master01

chronyc clients

1.3.6 配置内核转发及网桥过滤

所有主机均需操作

创建加载内核模块

cat << EOF |tee  /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF

本次执行,手动加载此模块

modprobe overlay
modprobe br_netfilter

查看已加载的模块

# lsmod |egrep "overlay"

overlay               163840  21

# lsmod |egrep "br_netfilter"

br_netfilter           36864  0
bridge                311296  1 br_netfilter

添加网桥过滤及内核转发配置文件

cat << EOF | tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF

添加内核参数

sysctl --system

1.3.7 安装ipset及ipvsadm

所有主机均需要操作

安卓ipset及ipvsadm

apt install -y ipset ipvsadm

配置ipvsadm模块加载,添加需要添加的模块

 cat << EOF | tee /etc/modules-load.d/ipvs.conf
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
EOF

创建加载模块脚步文件

cat << EOF | tee ipvs.sh
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF

执行脚步文件,加载模块

sh ipvs.sh

1.3.8 关闭SWAP分区

修改完后需要重启系统,如不重启,可临时关闭,命令为swapoff -a

永久关闭swap分区,需要重启操作系统

# vim /etc/fstab
……
#UUID=ee95aaa4-9245-499f-831c-d5a98b540340 none            swap    sw              0       0
注释这行带有swap的配置

二、K8S集群容器运行时Containerd准备

所有设备均需要执行

2.1 Containerd部署文件获取

下载指定版本containerd

wget  https://github.com/containerd/containerd/releases/download/v2.0.4/containerd-2.0.4-linux-amd64.tar.gz

解压安装

tar -xvf  containerd-2.0.4-linux-amd64.tar.gz
mv bin/* /usr/local/bin/

2.2 Containerd配置文件生成并修改

创建配置文件目录

mkdir /etc/containerd

生成配置文件

containerd config default > /etc/containerd/config.toml

修改配置文件

vim /etc/containerd/config.toml
# 67行修改
sandbox = 'registry.aliyuncs.com/google_containers/pause:3.9'
#106行添加
SystemdCgroup = true
#最后添加加速器
[plugins."io.containerd.grpc.v1.cri".registry]
  [plugins."io.containerd.grpc.v1.cri".registry.mirrors]
    [plugins."io.containerd.grpc.v1.cri".registry.mirrors."k8s.gcr.io"]
      endpoint = ["https://d90275b2.k8sgcr.skillixx.com"]
    [plugins."io.containerd.grpc.v1.cri".registry.mirrors."k8sreg.com"]
      endpoint = ["https://d90275b2.k8sreg.skillixx.com"]
    [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"]
      endpoint = ["https://d90275b2.quay.skillixx.com"]
    [plugins."io.containerd.grpc.v1.cri".registry.mirrors."mcr.microsoft.com"]
      endpoint = ["https://d90275b2.mcr.skillixx.com"]
    [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.elastic.co"]
      endpoint = ["https://d90275b2.elastic.skillixx.com"]
    [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"]
      endpoint = ["https://d90275b2.gcr.skillixx.com"]
    [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
      endpoint = ["https://d90275b2.rhub.skillixx.com"]
    [plugins."io.containerd.grpc.v1.cri".registry.mirrors."ghcr.ioo"]
      endpoint = ["https://d90275b2.ghcr.skillixx.com"]

2.3 Containerd启动及开机自启动

编写自启文件,可下载官方的

wget https://raw.githubusercontent.com/containerd/containerd/main/containerd.service
cp containerd.service /etc/systemd/system/

重新加载Containerd服务

systemctl daemon-reexec
systemctl daemon-reload
systemctl enable containerd
systemctl start containerd

2.4 runc 离线安装

访问链接:https://github.com/opencontainers/runc/releases获取最新二进制对应系统软件

下载runc软件包

wget https://github.com/opencontainers/runc/releases/download/v1.2.6/runc.amd64
cp runc.amd64 /usr/local/sbin/runc
chmod +x /usr/local/sbin/runc

三、K8S集群准备部署

所有设备均需要执行

3.1K8S集群软件apt源准备

apt-get install -y apt-transport-https   gpg
curl -fsSL https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.30/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.30/deb/ /" | tee /etc/apt/sources.list.d/kubernetes.list
apt update 

3.2 K85集群软件安装及kubelet配置

3.2.1 k8s集群软件安装

apt-get install -y kubelet kubeadm kubectl
apt-mark hold kubelet kubeadm kubectl

3.2.2配置kubelet

systemctl enable kubectl

3.3 K85集群初始化 (k8s-master01)

3.3.1 查看版本

kubeadm  version
kubeadm version: &version.Info{Major:"1", Minor:"30", GitVersion:"v1.30.11", GitCommit:"6a074997c960757de911780f250ecd9931917366", GitTreeState:"clean", BuildDate:"2025-03-11T19:56:25Z", GoVersion:"go1.23.6", Compiler:"gc", Platform:"linux/amd64"}

3.3.2 生成部署配罟文件

kubeadm  config print init-defaults  > kubeadm-config.yaml

修改初始化文件内容

cat kubeadm-config.yaml


apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  #修改内容
  advertiseAddress: 192.168.71.120
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/containerd/containerd.sock
  imagePullPolicy: IfNotPresent
  name: node
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
#修改内容
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.30.0
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
# 增加内容
  podSubnet: 10.244.0.0/16

scheduler: {}
---

#增加内容
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
cgroupDriver: systemd

3.3.3 查看并下载镜像

查看原镜像下载

# kubeadm  config images list
I0411 10:44:32.312133  478681 version.go:256] remote version is much newer: v1.32.3; falling back to: stable-1.30
registry.k8s.io/kube-apiserver:v1.30.11
registry.k8s.io/kube-controller-manager:v1.30.11
registry.k8s.io/kube-scheduler:v1.30.11
registry.k8s.io/kube-proxy:v1.30.11
registry.k8s.io/coredns/coredns:v1.11.3
registry.k8s.io/pause:3.9
registry.k8s.io/etcd:3.5.15-0

查看阿里云镜像是否匹配

#kubeadm  config images list   --image-repository registry.aliyuncs.com/google_containers
I0411 10:45:58.792490  479279 version.go:256] remote version is much newer: v1.32.3; falling back to: stable-1.30
registry.aliyuncs.com/google_containers/kube-apiserver:v1.30.11
registry.aliyuncs.com/google_containers/kube-controller-manager:v1.30.11
registry.aliyuncs.com/google_containers/kube-scheduler:v1.30.11
registry.aliyuncs.com/google_containers/kube-proxy:v1.30.11
registry.aliyuncs.com/google_containers/coredns:v1.11.3
registry.aliyuncs.com/google_containers/pause:3.9
registry.aliyuncs.com/google_containers/etcd:3.5.15-0

3.3.4 使用部罢配置文件初始化K8S集群

kubeadm  init --config kubeadm-config.yaml

生成内容

[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.71.120:6443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:a14c93ac81648807db8e6513139da0180a618f26c6a2f43521ba5b1ca9be1a5c
root@k8s-master01:~#

3.4 准备kubectl配置文件

systemctl enable kubectl

3.5 工作节点加入集群(所有worker)

kubeadm join 192.168.71.120:6443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:a14c93ac81648807db8e6513139da0180a618f26c6a2f43521ba5b1ca9be1a5c

3.6 验证K8S集群节点是否可用

root@k8s-master01:~# kubectl  get nodes
NAME           STATUS     ROLES           AGE     VERSION
k8s-worker01   NotReady   <none>          2m14s   v1.30.11
k8s-worker02   NotReady   <none>          2m7s    v1.30.11
node           NotReady   control-plane   3m25s   v1.30.11

四、K8S集群网络插件calico部署

calico访问链接:https://docs.tigera.io/calico/latest/about

创建网络为了方便,我们将配置下载下来,再去创建

wget   https://raw.githubusercontent.com/projectcalico/calico/v3.29.3/manifests/tigera-operator.yaml
kubectl create -f tigera-operator.yaml
wget  https://raw.githubusercontent.com/projectcalico/calico/v3.29.3/manifests/custom-resources.yaml

custom-resources.yaml需要修改部署内容

cat custom-resources.yaml


# This section includes base Calico installation configuration.
# For more information, see: https://docs.tigera.io/calico/latest/reference/inst                                                                                                                               allation/api#operator.tigera.io/v1.Installation
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
  name: default
spec:
  # 修改内容
  registry: d90275b2.rhub.skillixx.com
  imagePath: calico
  # Configures Calico networking.
  calicoNetwork:
    ipPools:
    - name: default-ipv4-ippool
      blockSize: 26
      #修改内容
      cidr: 10.244.0.0/16
      encapsulation: VXLANCrossSubnet
      natOutgoing: Enabled
      nodeSelector: all()

---

# This section configures the Calico API server.
# For more information, see: https://docs.tigera.io/calico/latest/reference/inst                                                                                                                               allation/api#operator.tigera.io/v1.APIServer
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:
  name: default
spec: {}

查看启动状态

root@master:k8s-master01#  kubectl get pods -n calico-system
NAME                                      READY   STATUS    RESTARTS   AGE
calico-kube-controllers-b96c7c6c5-hbs4p   1/1     Running   0          20h
calico-node-lhxc8                         1/1     Running   0          20h
calico-node-nt84d                         1/1     Running   0          20h
calico-typha-95487788c-qt8zz              1/1     Running   0          20h
csi-node-driver-2j2cd                     2/2     Running   0          20h
csi-node-driver-tjcb2                     2/2     Running   0          20h

查看节点是否正常

root@k8s-master01:/home/k8s/cj# kubectl  get nodes
NAME           STATUS   ROLES           AGE   VERSION
k8s-worker01   Ready    <none>          12m   v1.30.11
k8s-worker02   Ready    <none>          12m   v1.30.11
node           Ready    control-plane   14m   v1.30.11

五、部署Nginx应用验证K8S集群可用性

创建测试nginx.yaml文件

cat nginx.yaml


apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginxweb
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nginxweb1
  template:
    metadata:
      labels:
        app: nginxweb1
    spec:
      containers:
        - name: nginxwebc
          image: registry.cn-hangzhou.aliyuncs.com/hxpdocker/nginx:latest
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 80

---

apiVersion: v1
kind: Service
metadata:
  name: nginxweb-service
spec:
  type: NodePort
  externalTrafficPolicy: Cluster
  selector:
    app: nginxweb1
  ports:
    - protocol: TCP
      port: 80
      targetPort: 80
      nodePort: 30080

创建服务并验证

kubect apply -f nginx.yaml


# curl  localhost:30080
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>
支付宝打赏 微信打赏

赞赏是不耍流氓的鼓励

-->