logo头像

老陕小张学技术接地气

ubuntu-server部署k8s1.28(containerd版本)集群

ubuntu-server部署k8s1.28(containerd版本)集群

一、主机准备

1.1 操作系统说明

操作系统 版本 说明
ubuntu 23.10-live-server 最小化

1.2 主机硬件配置说明

ip CPU 内存 硬盘 角色 主机名
192.168.75.200 4C 8G 500G master master
192.168.75.210 2C 4G 500G node node1
192.168.75.220 2C 4G 500G node node2

1.3 主机配置

1.3.1 主机名配置

本次部署k8s集群使用三台主机,并给每个主机起不同的名称

#master 节点
hostnamectl set-hostname master
#node1 节点
hostnamectl set-hostname node1
#node2 节点
hostnamectl set-hostname node2

1.3.2 主机IP地址配置

#master节点的地址是192.168.75.200/24
root@master:~# vim /etc/netplan/50-cloud-init.yaml
root@master:~# cat /etc/netplan/50-cloud-init.yaml
network:
    ethernets:
        ens33:
            addresses:
            - 192.168.75.200/24
            nameservers:
                addresses:
                - 114.114.114.114
                search: []
            routes:
            -   to: default
                via: 192.168.75.254
    version: 2
#node1节点的地址是192.168.75.210/24
root@node1:~# vim /etc/netplan/50-cloud-init.yaml
root@node1:~# cat /etc/netplan/50-cloud-init.yaml
network:
    ethernets:
        ens33:
            addresses:
            - 192.168.75.210/24
            nameservers:
                addresses:
                - 114.114.114.114
                search: []
            routes:
            -   to: default
                via: 192.168.75.254
    version: 2
#node2节点的地址是192.168.75.220/24
root@node2:~# vim /etc/netplan/50-cloud-init.yaml
root@node2:~# cat /etc/netplan/50-cloud-init.yaml
network:
    ethernets:
        ens33:
            addresses:
            - 192.168.75.220/24
            nameservers:
                addresses:
                - 114.114.114.114
                search: []
            routes:
            -   to: default
                via: 192.168.75.254
    version: 2

1.3.3 主机名与IP地址解析

所有集群主机均需要进行配置

cat >> /etc/hosts << EOF
> 192.168.75.200 master
> 192.168.75.210 node1
> 192.168.75.220 node2
> EOF

1.3.4 chrony时间同步配置

所有集群主机均需要进行配置

#查看时间
date
Thu Apr 23 05:35:21 AM UTC 2024
#更换时区
timedatectl set-timezone Asia/Shanghai
root@master:~# date
Tue Apr 23 15:55:28 CST 2024
#安装同步时间
apt install -y  chrony

master节点执行

cat > /etc/chrony/chrony.conf << EOF
confdir /etc/chrony/conf.d
server ntp.aliyun.com iburst
allow 192.168.75.0/24
sourcedir /run/chrony-dhcp
sourcedir /etc/chrony/sources.d
keyfile /etc/chrony/chrony.keys
driftfile /var/lib/chrony/chrony.drift
ntsdumpdir /var/lib/chrony
logdir /var/log/chrony
maxupdateskew 100.0
rtcsync
makestep 1 3
leapsectz right/UTC
EOF

#启动chronyd服务
systemctl  restart chronyd
#设置开机自启
systemctl  enable chronyd

node1和node2节点执行

cat > /etc/chrony/chrony.conf << EOF
confdir /etc/chrony/conf.d
server 192.168.75.200 iburst
sourcedir /run/chrony-dhcp
sourcedir /etc/chrony/sources.d
keyfile /etc/chrony/chrony.keys
driftfile /var/lib/chrony/chrony.drift
ntsdumpdir /var/lib/chrony
logdir /var/log/chrony
maxupdateskew 100.0
rtcsync
makestep 1 3
leapsectz right/UTC
EOF

#启动chronyd服务
systemctl  restart chronyd
#设置开机自启
systemctl  enable chronyd

检查时间同步服务chrony,master节点执行

root@master:~# chronyc  clients list
Hostname                      NTP   Drop Int IntL Last     Cmd   Drop Int  Last
===============================================================================
node2                           5      0   6   -    12       0      0   -     -
node1                           4      0   6   -    46       0      0   -     -

1.3.5 配置内核转发及网桥过滤

所有主机都需要操作

#开机自从加载
cat << EOF | tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
#手动加载
modprobe overlay
modprobe br_netfilter
#检查是否开启
root@master:~# lsmod |egrep "overlay"
overlay               196608  21
root@master:~# lsmod |egrep "br_netfilter"
br_netfilter           32768  0
bridge                409600  1 br_netfilter
#添加网桥过滤及内核转发配置文件
cat << EOF | tee  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
#使配置文件生效
sysctl --system
#检查生效配置文件
root@master:~# sysctl  -a |grep ip_forward
net.ipv4.ip_forward = 1
net.ipv4.ip_forward_update_priority = 1
net.ipv4.ip_forward_use_pmtu = 0

1.3.6安装ipset和ipvsadm

所有主机都需要配置

#安装ipset及ipvsadm
apt install -y ipset ipvsadm
#配置ipvsadm模块开机自加载
cat << EOF | tee  /etc/modules-load.d/ipvs.conf
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
EOF
#手动加载脚本
cat << EOF | tee ipvs.sh
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
#执行命令
bash ipvs.sh

1.3.7 关闭SWAP分区

修改完成后需要重新启动系统,如不重新启动,可以临时关闭,命令为swapoff -a

#永久关闭分区,需要重新启动系统
root@master:~# vim /etc/fstab
root@master:~# cat /etc/fstab
……
#/swap.img      none    swap    sw      0       0
#检查关闭结果
root@master:~# free  -m
               total        used        free      shared  buff/cache   available
Mem:            7896        1389        2275           3        4540        6506
Swap:              0           0           0

二、搭建Containerd服务(所有主机都要配置)

2.1 下载及安装

#使用浏览器访问https://github.com/containerd/containerd/releases,获取最新版本,并找到符合服务器版本的安装包,以最新版本v1.7.15为例
#下载安装包
wget https://github.com/containerd/containerd/releases/download/v1.7.15/containerd-1.7.15-linux-amd64.tar.gz
#安装软件
tar -xf containerd-1.7.15-linux-amd64.tar.gz -C /

2.2 配置Containerd服务

#创建配置目录
mkdir /etc/containerd 
#配置containerd的默认配置文件
containerd config default > /etc/containerd/config.toml
#修改67行配置文件如下
vim +67 /etc/containerd/config.toml
sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.9" 
#修改139配置文件
vim +139 /etc/containerd/config.toml
SystemdCgroup = true
mkdir /etc/containerd/certs.d/docker.io -pv
#配置加速
cat > /etc/containerd/certs.d/docker.io/hosts.toml << EOF
server = "https://docker.io"
[host."https://b9pmyelo.mirror.aliyuncs.com"]
  capabilities = ["pull", "resolve"]
EOF

2.3 设置containerd开机自启动

systemctl enable --now containerd
#验证版本
containerd --version

2.4 检查containerd服务

#拉取镜像,测试containerd是否能创建和启动成功
ctr i pull docker.io/library/nginx:alpine		#能正常拉取镜像说明没啥问题
ctr images ls									#查看镜像
ctr c create --net-host docker.io/library/nginx:alpine nginx #创建容器
ctr task start -d nginx							#启动容器,正常说明containerd没啥问题
ctr containers ls 								#查看容器
ctr tasks kill -s SIGKILL  nginx				#终止容器
ctr containers rm nginx							#删除容器
ctr images  remove docker.io/library/nginx:alpine #删除镜像

三、k8s集群部署

3.1 k8s配置集群软件apt源及安装(所有节点都需要执行)

#下载必备软件
apt-get update && apt-get install -y apt-transport-https
#下载证书
curl -fsSL https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.28/deb/Release.key |
    gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.28/deb/ /" |
tee /etc/apt/sources.list.d/kubernetes.list
#更新源
apt-get update
#在3台虚拟机上都执行安装kubeadm、kubelet、kubectl
#查看apt可获取的kubeadm版本,这里安装1.28.0版本,不指定版本的话默认安装最新版本
apt-cache madison  kubeadm
#在所有节点上安装kubeadm、kubelet、kubectl
apt install -y kubelet=1.28.0-1.1 kubeadm=1.28.0-1.1 kubectl=1.28.0-1.1
#设置kubelet开机自启(先不用启动,也起不了,后面kubeadm init初始化master时会自动拉起kubelet)
systemctl enable kubelet
#锁定版本
apt-mark hold kubelet kubeadm kubectl

3.2 部署k8s集群

master节点

#默认初始化配置文件生成
kubeadm  config print init-defaults > kubead-config.yaml
#修改kubead-config.yaml初始化文件
root@master:/home/ztxdztg# vim kubead-config.yaml
root@master:/home/ztxdztg# cat kubead-config.yaml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.75.200  #修改内容
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///run/containerd/containerd.sock #修改内容
  imagePullPolicy: IfNotPresent
  name: master #修改内容
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers  #修改内容
kind: ClusterConfiguration
kubernetesVersion: 1.28.0 #修改内容,对应自己的k8s版本
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
  podSubnet: 10.244.0.0/16 #添加配置文件
scheduler: {}
#以下都是添加配置
---
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
cgroupDriver: systemd
#可以提前拉取镜像
kubeadm config images --image-repository registry.aliyuncs.com/google_containers  pull 
#初始化生成
kubeadm init --config kubead-config.yaml
#输出内容如下:
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.75.200:6443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:fb0b8abf6780b7dff4f155edd2ed74a80a085081e25bb5469b906410a7a80398
#根据提示需要再命令行执行下面命令
root@master:/home/ztxdztg# mkdir -p $HOME/.kube
root@master:/home/ztxdztg# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
root@master:/home/ztxdztg# sudo chown $(id -u):$(id -g) $HOME/.kube/config
root@master:/home/ztxdztg# export KUBECONFIG=/etc/kubernetes/admin.conf

node1和node2节点执行

kubeadm join 192.168.75.200:6443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:fb0b8abf6780b7dff4f155edd2ed74a80a085081e25bb5469b906410a7a80398
#加入集群输出结果
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

master节点执行验证

root@master:~# kubectl  get nodes
NAME     STATUS     ROLES           AGE   VERSION
master   NotReady   control-plane   74s   v1.28.0
node1    NotReady   <none>          20s   v1.28.0
node2    NotReady   <none>          15s   v1.28.0
root@master:~# kubectl  get pods -n kube-system
NAME                             READY   STATUS    RESTARTS   AGE
coredns-66f779496c-4dbm6         0/1     Running   0          6h44m
coredns-66f779496c-qngbw         0/1     Running   0          6h44m
etcd-master                      1/1     Running   0          6h44m
kube-apiserver-master            1/1     Running   0          6h44m
kube-controller-manager-master   1/1     Running   0          6h44m
kube-proxy-fjndq                 1/1     Running   0          6h43m
kube-proxy-jkftc                 1/1     Running   0          6h44m
kube-proxy-l79vf                 1/1     Running   0          6h43m
kube-scheduler-master            1/1     Running   0          6h44m

四、k8s集群部署网络插件calico

master节点执行

常见的网络插件有多种,比如flannel、Calico和Cilium,本次实验使用Calico使用。

首先访问Calico帮助文档https://docs.tigera.io/calico/latest/getting-started/kubernetes/quickstart

#配置插件需要的环境
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.27.3/manifests/tigera-operator.yaml
#因为我们的pod的网段和配置文件不一样,所以我们需要修改配置,首先下载配置文件yaml
wget https://raw.githubusercontent.com/projectcalico/calico/v3.27.3/manifests/custom-resources.yaml
root@master:~# vim +13 custom-resources.yaml
cidr: 10.244.0.0/16
#生成配置文件
root@master:~# kubectl  apply -f custom-resources.yaml
#需要等待几分钟查看节点是否正常
root@master:~# kubectl  get nodes
NAME     STATUS   ROLES           AGE     VERSION
master   Ready    control-plane   7h11m   v1.28.0
node1    Ready    <none>          7h10m   v1.28.0
node2    Ready    <none>          7h10m   v1.28.0

五、部署Nginx应用测试k8s集群的可用性

#创建一个httpd服务测试
kubectl create deployment httpd --image=httpd
#暴露服务,端口就写80,如果你写其他的可能防火墙拦截了
kubectl expose deployment httpd --port=80 --type=NodePort
#查看pod是否是Running状态,查看service/httpd的端口
kubectl get pod,svc
NAME                         READY   STATUS    RESTARTS   AGE
pod/httpd-757fb56c8d-w42l5   1/1     Running   0          39s
NAME                 TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE
service/httpd        NodePort    10.109.29.1   <none>        80:32569/TCP   42s			#外部端口32569
service/kubernetes   ClusterIP   10.96.0.1     <none>        443/TCP        3h22m
 
#网页测试访问,使用master节点的IP或者node节点的IP都可以访问,端口就是32569
http://192.168.75.200:32569/
It works!							#成功了
支付宝打赏 微信打赏

赞赏是不耍流氓的鼓励

-->