云计算、AI、云原生、大数据等一站式技术学习平台

网站首页 > 教程文章 正文

基于openEuler 24.03 (LTS-SP1)安装k8s V1.30.11

jxf315 2025-06-09 22:55:37 教程文章 34 ℃

配置yum源

cat <<EOF | tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.30/rpm/
enabled=1
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.30/rpm/repodata/repomd.xml.key
EOF
setenforce 0 # 关闭selinux
yum install -y kubelet kubeadm kubectl # 安装对应组件

最终安装的包,如下所示:

系统参数优化

所有节点均需要配置.

  1. 文件句柄数优化
cat>/etc/security/limits.d/kubernetes.conf<<EOF
*       soft    nproc   131072
*       hard    nproc   131072
*       soft    nofile  131072
*       hard    nofile  131072
root    soft    nproc   131072
root    hard    nproc   131072
root    soft    nofile  131072
root    hard    nofile  131072
EOF
  1. 添加hosts解析
cat /etc/hosts
# Loopback entries; do not change.
# For historical reasons, localhost precedes localhost.localdomain:
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.174.11 mynode1 mynode1.example.com
192.168.174.12 mynode2 mynode2.example.com

# See hosts(5) for proper format and other examples:
# 192.168.1.10 foo.example.org foo
# 192.168.1.13 bar.example.org bar
  1. 建议安装依赖工具
yum install socat conntrack tar -y ## 这两个是必须安装
yum install ebtables ipset ipvsadm libseccomp sysstat -y  #建议安装
  1. 配置ipvs模块
# 使用systemd-modules-load来加载
:> /etc/modules-load.d/ipvs.conf
module=(
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
br_netfilter
  )

for kernel_module in ${module[@]};do
    /sbin/modinfo -F filename $kernel_module |& grep -qv ERROR && echo $kernel_module >> /etc/modules-load.d/ipvs.conf || :
done
    
# 使用systemctl cat systemd-modules-load看下是否有Install段,没有则执行:
cat>>/usr/lib/systemd/system/systemd-modules-load.service<<EOF
[Install]
WantedBy=multi-user.target
EOF

# 启用该模块管理服务
systemctl daemon-reload
systemctl enable --now systemd-modules-load.service

# 确认内核模块挣钱加载
lsmod | grep ip_v
#PS:这里我没有看到任何信息,需要系统重启,这里我们等后续准备工作完成后,统一重启
  1. 关闭交换分区
swapoff -a && sysctl -w vm.swappiness=0
sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab
  1. 内核参数优化
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv6.conf.all.disable_ipv6 = 0
net.ipv6.conf.default.disable_ipv6 = 0
net.ipv6.conf.lo.disable_ipv6 = 0
net.ipv6.conf.all.forwarding=1
net.ipv4.neigh.default.gc_stale_time = 120
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce = 2
net.ipv4.conf.all.arp_announce = 2
net.ipv4.ip_forward = 1
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_synack_retries = 2
# 要求iptables不对bridge的数据进行处理
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-arptables = 1
net.netfilter.nf_conntrack_max = 2310720
fs.inotify.max_user_watches=89100
fs.file-max = 52706963
fs.nr_open = 52706963
vm.overcommit_memory=1
vm.panic_on_oom=0
vm.swappiness = 0
EOF

## 进行验证
modprobe br_netfilter
lsmod | grep conntrack
modprobe ip_conntrack
sysctl -p /etc/sysctl.d/k8s.conf

## 验证模块是否被加载
lsmod | grep ip_vs
lsmod | grep nf_conntrack

安装containerd

所有节点均需要安装

  1. 这里我们选择使用containerd的1.6.28版本,版本选择与阿里云的ACK一致.
# 下载
wget https://github.com/containerd/containerd/releases/download/v1.6.28/cri-containerd-cni-1.6.28-linux-amd64.tar.gz
# 解压
tar zxvf cri-containerd-cni-1.6.28-linux-amd64.tar.gz -C /
  1. 生成containerd默认配置文件
mkdir /etc/containerd
containerd config default > /etc/containerd/config.toml
  1. 修改配置文件
#1. 修改K8S基础镜像地址
sandbox_image = "registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6"
#2. 结合 runc 使用 systemd cgroup 驱动 参考k8s设置地址: https://kubernetes.io/zh-cn/docs/setup/production-environment/container-runtimes/#containerd
SystemdCgroup = true
# 3. 可选项 配置镜像加速
[plugins."io.containerd.grpc.v1.cri".registry]
      config_path = "/etc/containerd/certs.d" # 修改行

# 创建对应的目录和文件
mkdir /etc/containerd/certs.d
# docker hub镜像加速
mkdir -p /etc/containerd/certs.d/docker.io
cat > /etc/containerd/certs.d/docker.io/hosts.toml << EOF
server = "https://docker.io"
[host."https://dockerproxy.com"]
  capabilities = ["pull", "resolve", "push"]

[host."https://docker.m.daocloud.io"]
  capabilities = ["pull", "resolve", "push"]
EOF
# 一般配置到此就可以了
# registry.k8s.io镜像加速
mkdir -p /etc/containerd/certs.d/registry.k8s.io
tee /etc/containerd/certs.d/registry.k8s.io/hosts.toml << 'EOF'
server = "https://registry.k8s.io"

[host."https://k8s.m.daocloud.io"]
  capabilities = ["pull", "resolve", "push"]
EOF
# gcr.io镜像加速
mkdir -p /etc/containerd/certs.d/gcr.io
tee /etc/containerd/certs.d/gcr.io/hosts.toml << 'EOF'
server = "https://gcr.io"

[host."https://gcr.m.daocloud.io"]
  capabilities = ["pull", "resolve", "push"]
EOF

# ghcr.io镜像加速
mkdir -p /etc/containerd/certs.d/ghcr.io
tee /etc/containerd/certs.d/ghcr.io/hosts.toml << 'EOF'
server = "https://ghcr.io"

[host."https://ghcr.m.daocloud.io"]
  capabilities = ["pull", "resolve", "push"]
EOF
  1. 启动containerd
systemctl daemon-reload &&systemctl enable containerd && systemctl start containerd

安装k8s集群

  1. 启动kubelet
systemctl daemon-reload &&systemctl enable kubelet && systemctl start kubelet
  1. 初始化集群
#1. 先拉取镜像,所有节点都需要执行
kubeadm config images pull --image-repository registry.aliyuncs.com/google_containers
#2. 初始化集群
kubeadm init --kubernetes-version=v1.30.11 --pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12 --apiserver-advertise-address=192.168.174.11 --image-repository registry.aliyuncs.com/google_containers
#3. 加入node节点,在node节点上执行
kubeadm join 192.168.174.11:6443 --token 96nql1.fd5c76hcxu7556ca         --discovery-token-ca-cert-hash sha256:724b8780c5823fe360f8740c1bd4f43ac018460dcffdedb4c1d478aa6285a4e1
  1. 配置kubectl配置文件
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
  1. 进行验证
kubectl get node
NAME                  STATUS   ROLES           AGE   VERSION
mynode1.example.com   Ready    control-plane   36m   v1.30.11
mynode2.example.com   Ready    <none>          35m   v1.30.11

安装网络插件

  1. 下载flannel插件
wget https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
  1. 安装网络插件
# 如果您使用自定义 podCIDR (不是 10.244.0.0/16 ),需要修改配置
$ kubectl apply -f kube-flannel.yml
namespace/kube-flannel created
serviceaccount/flannel created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
  1. 查看所有pod的运行状态
最近发表
标签列表