设置root用户密码
# 在 Ubuntu 系统中,默认情况下 root 用户是被禁用的(没有设置密码)
# 而是通过 sudo 命令让普通用户临时获取管理员权限,
# 如果需要启用或修改 root 密码,可以按以下步骤操作:
sudo passwd root
[sudo] password for <当前用户名>: <输入当前用户登录密码>
New password: <输入root新密码>
Retry new password: <输入root新密码>
检查OS版本
root@/root# lsb_release -a
No LSB modules are available.
Distributor ID: Ubuntu
Description: Ubuntu 24.04.2 LTS
Release: 24.04
Codename: noble
root@/root# cat /etc/os-release
PRETTY_NAME="Ubuntu 24.04.2 LTS"
NAME="Ubuntu"
VERSION_ID="24.04"
VERSION="24.04.2 LTS (Noble Numbat)"
VERSION_CODENAME=noble
ID=ubuntu
ID_LIKE=debian
HOME_URL="https://www.ubuntu.com/"
SUPPORT_URL="https://help.ubuntu.com/"
BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/"
PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"
UBUNTU_CODENAME=noble
LOGO=ubuntu-logo
检查内存大小
root@/root# free -h
total used free shared buff/cache available
Mem: 1.9Gi 581Mi 639Mi 1.3Mi 863Mi 1.3Gi
Swap: 1.9Gi 0B 1.9Gi
检查ip地址
# Master 192.168.186.134
# Worker1 192.168.186.136
root@/root# ifconfig -a
ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.186.136 netmask 255.255.255.0 broadcast 192.168.186.255
inet6 fe80::20c:29ff:fe26:3193 prefixlen 64 scopeid 0x20<link>
ether 00:0c:29:26:31:93 txqueuelen 1000 (Ethernet)
RX packets 95965 bytes 139386414 (139.3 MB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 52264 bytes 7348712 (7.3 MB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
检查公网连通性
# 安装过程中需要连接公网
ping www.baidu.com
PING www.a.shifen.com (36.152.44.93) 56(84) bytes of data.
64 bytes from 36.152.44.93: icmp_seq=1 ttl=128 time=45.9 ms
...
# 如果无法连接公网,可使用dhclient恢复自动获取IP,如果ip发生变化,临时指定ip
dhclient ens33 # 恢复自动获取IP
ifconfig ens33 <IP> # 临时指定ip
检查时钟同步服务
# 使用 systemd-timesyncd(推荐,Ubuntu 16.04+ 默认)
# systemd-timesyncd 是轻量级 NTP 客户端,随 systemd 内置,适合大多数场景
root@/root# systemctl status systemd-timesyncd
systemd-timesyncd.service - Network Time Synchronization
Loaded: loaded (/usr/lib/systemd/system/systemd-timesyncd.service; enabled; preset: enabled)
Active: active (running) since Fri 2025-07-11 13:28:06 UTC; 24min ago
Docs: man:systemd-timesyncd.service(8)
Main PID: 654 (systemd-timesyn)
Status: "Contacted time server 185.125.190.58:123 (ntp.ubuntu.com)."
Tasks: 2 (limit: 2216)
Memory: 1.6M (peak: 2.1M)
CPU: 101ms
CGroup: /system.slice/systemd-timesyncd.service
└─654 /usr/lib/systemd/systemd-timesyncd
系统时区设置为北京时间
root@/root# timedatectl
Local time: Fri 2025-07-11 13:54:28 UTC
Universal time: Fri 2025-07-11 13:54:28 UTC
RTC time: Fri 2025-07-11 13:54:28
Time zone: Etc/UTC (UTC, +0000)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no
root@/root# timedatectl set-timezone Asia/Shanghai
root@/root# timedatectl status
Local time: Fri 2025-07-11 21:54:49 CST
Universal time: Fri 2025-07-11 13:54:49 UTC
RTC time: Fri 2025-07-11 13:54:49
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no
root@/root# date
Fri Jul 11 09:54:57 PM CST 2025
root@/root# ls -l /etc/localtime
lrwxrwxrwx 1 root root 33 Jul 11 21:54 /etc/localtime -> /usr/share/zoneinfo/Asia/Shanghai
禁用Swap分区
root@/root# free -h
total used free shared buff/cache available
Mem: 1.9Gi 561Mi 659Mi 1.3Mi 864Mi 1.3Gi
Swap: 1.9Gi 0B 1.9Gi
root@/root# swapoff -a # 临时禁用
root@/root# free -h
total used free shared buff/cache available
Mem: 1.9Gi 565Mi 655Mi 1.3Mi 864Mi 1.3Gi
Swap: 0B 0B 0B
root@/root# cat /etc/fstab
# / was on /dev/ubuntu-vg/ubuntu-lv during curtin installation
/dev/disk/by-id/dm-uuid-LVM-w1q13fe0A2vVmEhqplcuQuqmPe4txlGa2iGB0Vx9aIavAsYskfZNCYVBi8Y3S1xq / ext4 defaults 0 1
# /boot was on /dev/sda2 during curtin installation
/dev/disk/by-uuid/2a5752bd-e568-48a2-b4cb-10e18504ad70 /boot ext4 defaults 0 1
/swap.img none swap sw 0 0
root@/root# sed -i '/swap/d' /etc/fstab # 永久禁用
root@/root# cat /etc/fstab
# / was on /dev/ubuntu-vg/ubuntu-lv during curtin installation
/dev/disk/by-id/dm-uuid-LVM-w1q13fe0A2vVmEhqplcuQuqmPe4txlGa2iGB0Vx9aIavAsYskfZNCYVBi8Y3S1xq / ext4 defaults 0 1
# /boot was on /dev/sda2 during curtin installation
/dev/disk/by-uuid/2a5752bd-e568-48a2-b4cb-10e18504ad70 /boot ext4 defaults 0 1
配置系统参数
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
modprobe overlay
modprobe br_netfilter
# 设置所需的 sysctl 参数,参数在重新启动后保持不变
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
# 应用 sysctl 参数而不重新启动
sysctl --system
安装containerd
# 安装 containerd
apt-get update
apt-get install -y containerd
# 检查containerd服务状态
root@/root# systemctl status containerd
containerd.service - containerd container runtime
Loaded: loaded (/usr/lib/systemd/system/containerd.service; enabled; preset: enabled)
Active: active (running) since Fri 2025-07-11 22:12:00 CST; 13s ago
Docs: https://containerd.io
Process: 5108 ExecStartPre=/sbin/modprobe overlay (code=exited, status=0/SUCCESS)
Main PID: 5109 (containerd)
Tasks: 7
Memory: 13.3M (peak: 13.7M)
CPU: 106ms
CGroup: /system.slice/containerd.service
└─5109 /usr/bin/containerd
Jul 11 22:12:00 k8s-node1 containerd[5109]: time="2025-07-11T22:12:00.474179671+08:00" level=info msg="Start subscribing containerd event"
Jul 11 22:12:00 k8s-node1 containerd[5109]: time="2025-07-11T22:12:00.474493687+08:00" level=info msg="Start recovering state"
Jul 11 22:12:00 k8s-node1 containerd[5109]: time="2025-07-11T22:12:00.474666356+08:00" level=info msg="Start event monitor"
Jul 11 22:12:00 k8s-node1 containerd[5109]: time="2025-07-11T22:12:00.474741143+08:00" level=info msg="Start snapshots syncer"
Jul 11 22:12:00 k8s-node1 containerd[5109]: time="2025-07-11T22:12:00.474756491+08:00" level=info msg="Start cni network conf syncer for default"
Jul 11 22:12:00 k8s-node1 containerd[5109]: time="2025-07-11T22:12:00.474764448+08:00" level=info msg="Start streaming server"
Jul 11 22:12:00 k8s-node1 containerd[5109]: time="2025-07-11T22:12:00.475411398+08:00" level=info msg=serving... address=/run/containerd/containerd.sock.ttrpc
Jul 11 22:12:00 k8s-node1 containerd[5109]: time="2025-07-11T22:12:00.475459487+08:00" level=info msg=serving... address=/run/containerd/containerd.sock
Jul 11 22:12:00 k8s-node1 containerd[5109]: time="2025-07-11T22:12:00.475534537+08:00" level=info msg="containerd successfully booted in 0.036786s"
Jul 11 22:12:00 k8s-node1 systemd[1]: Started containerd.service - containerd container runtime.
# 查看containerd版本
root@/root# containerd -version
containerd github.com/containerd/containerd 1.7.27
# 配置 containerd 使用 systemd cgroup 驱动
sudo mkdir -p /etc/containerd
containerd config default | sudo tee /etc/containerd/config.toml
sudo sed -i 's/SystemdCgroup = false/SystemdCgroup = true/g' /etc/containerd/config.toml
grep k8s /etc/containerd/config.toml
sandbox_image = "registry.k8s.io/pause:3.8"
sed -i 's|registry.k8s.io/pause:3.8|registry.aliyuncs.com/google_containers/pause:3.9|g' /etc/containerd/config.toml
# 重启 containerd
systemctl restart containerd
# 添加开机启动
systemctl enable containerd
# containerd日志vacuum
journalctl -u containerd --vacuum-time=1m # s-秒/m-分/h-小时/d-天/w-周
# containerd 1.7+镜像加速配置
# 配置后需要重启
cat /etc/containerd/certs.d/docker.io/hosts.toml
server = "https://docker.io"
#[host."https://docker.mirrors.ustc.edu.cn"]
# capabilities = ["pull", "resolve"]
# override_path = true # 关键参数:强制覆盖路径
# skip_verify = true # 是否跳过 TLS 证书验证(建议保持 false,确保安全)
#[host."https://hub-mirror.c.163.com"]
# capabilities = ["pull", "resolve"]
# override_path = true # 关键参数:强制覆盖路径
# skip_verify = true # 是否跳过 TLS 证书验证(建议保持 false,确保安全)
#[host."https://registry.docker-cn.com"]
# capabilities = ["pull", "resolve"]
# override_path = true # 关键参数:强制覆盖路径
# skip_verify = true # 是否跳过 TLS 证书验证(建议保持 false,确保安全)
#[host."https://mirror.baidubce.com"]
# capabilities = ["pull", "resolve"]
# override_path = true # 关键参数:强制覆盖路径
# skip_verify = true # 是否跳过 TLS 证书验证(建议保持 false,确保安全)
#[host."https://mirror.ccs.tencentyun.com"]
#[host."https://docker.m.daocloud.io"]
[host."https://registry-1.docker.io"]
capabilities = ["pull", "resolve"]
override_path = true # 关键参数:强制覆盖路径
skip_verify = true # 是否跳过 TLS 证书验证(建议保持 false,确保安全)
添加kubernetes仓库
apt-get update
apt-get install -y apt-transport-https ca-certificates curl
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.30/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.30/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list
安装kubeadm、kubelet、kubectl
apt-get update
apt-get install -y kubelet kubeadm kubectl
apt-mark hold kubelet kubeadm kubectl # 防止自动更新
Master初始化集群
# 强制重置集群(忽略错误)
# kubeadm reset -f # 当kubeadm init异常时需重新来过时执行清理
# 配置文件方式初始化kubeadm init --config=kubeadm-config.yaml
k8s-master# kubeadm init \
--apiserver-advertise-address=192.168.186.134 \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.30.14 \
--pod-network-cidr=10.244.0.0/16 \
--service-cidr=10.96.0.0/12
...
To start using your cluster, you need to run the following as a regular user:
# 使用非root用户执行后续kubectl操作,则需在非root用户下执行一下步骤
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
# root用户使用kubectl需添加此环境变量
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
# worker加入master执行此命令
kubeadm join 192.168.186.134:6443 --token r6myel.abwbnw69m6bcnh9l \
--discovery-token-ca-cert-hash sha256:491e724f3a3a8923e321082f71f5ee490f5cd45d79260ebc6eb52b8c8d1c0150
Master安装CNI(以flannel为例)
# 下载原始配置文件
wget https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml
# 替换镜像源为阿里云
sed -i 's#ghcr.io/flannel-io/flannel#registry.cn-hangzhou.aliyuncs.com/google_containers/flannel#' kube-flannel.yml
sed -i 's#ghcr.io/flannel-io/flannel-cni-plugin#registry.cn-hangzhou.aliyuncs.com/google_containers/flannel-cni-plugin#' kube-flannel.yml
# 重新部署
kubectl apply -f kube-flannel.yml
k8s-master# kubectl get pods -n kube-flannel
NAME READY STATUS RESTARTS AGE
kube-flannel-ds-lspjf 1/1 Running 35 (7m45s ago) 8h
# 删除会自动重建
k8s-master# kubectl delete pod -n kube-flannel kube-flannel-ds-lspjf
pod "kube-flannel-ds-lspjf" deleted
k8s-master# kubectl get pods -n kube-flannel
NAME READY STATUS RESTARTS AGE
kube-flannel-ds-shghk 1/1 Running 0 5s
k8s-master# kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-flannel kube-flannel-ds-shghk 1/1 Running 0 11s
kube-system coredns-cb4864fb5-9jnlp 1/1 Running 1 (158m ago) 4d
kube-system coredns-cb4864fb5-g8fp8 1/1 Running 1 (158m ago) 4d
kube-system etcd-k8s-master 1/1 Running 13 (158m ago) 4d
kube-system kube-apiserver-k8s-master 1/1 Running 45 (158m ago) 4d
kube-system kube-controller-manager-k8s-master 1/1 Running 35 (158m ago) 4d
kube-system kube-proxy-8s8kt 1/1 Running 16 (158m ago) 4d
kube-system kube-scheduler-k8s-master 1/1 Running 47 (158m ago) 4d
验证证书
k8s-master# kubeadm certs check-expiration
[check-expiration] Reading configuration from the cluster...
[check-expiration] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
CERTIFICATE EXPIRES RESIDUAL TIME CERTIFICATE AUTHORITY EXTERNALLY MANAGED
admin.conf Jul 07, 2026 14:09 UTC 360d ca no
apiserver Jul 07, 2026 14:09 UTC 360d ca no
apiserver-etcd-client Jul 07, 2026 14:09 UTC 360d etcd-ca no
apiserver-kubelet-client Jul 07, 2026 14:09 UTC 360d ca no
controller-manager.conf Jul 07, 2026 14:09 UTC 360d ca no
etcd-healthcheck-client Jul 07, 2026 14:09 UTC 360d etcd-ca no
etcd-peer Jul 07, 2026 14:09 UTC 360d etcd-ca no
etcd-server Jul 07, 2026 14:09 UTC 360d etcd-ca no
front-proxy-client Jul 07, 2026 14:09 UTC 360d front-proxy-ca no
scheduler.conf Jul 07, 2026 14:09 UTC 360d ca no
super-admin.conf Jul 07, 2026 14:09 UTC 360d ca no
CERTIFICATE AUTHORITY EXPIRES RESIDUAL TIME EXTERNALLY MANAGED
ca Jul 05, 2035 14:09 UTC 9y no
etcd-ca Jul 05, 2035 14:09 UTC 9y no
front-proxy-ca Jul 05, 2035 14:09 UTC 9y no
获取Master节点token与ca-cert-hash值
k8s-master# kubeadm token create
twxu1v.icc5cbmb8cktyu5f
k8s-master# kubeadm token list
TOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPS
twxu1v.icc5cbmb8cktyu5f 23h 2025-07-12T14:46:19Z authentication,signing <none> system:bootstrappers:kubeadm:default-node-token
k8s-master# kubeadm token create --print-join-command
kubeadm join 192.168.186.134:6443 --token wia1ey.o5qtq5wvpa3t4ulx --discovery-token-ca-cert-hash sha256:950c5f7fb1b53205393394897e4d5d6b68f50685438bdbd2e1b1c7a7d171e78d
k8s-master# kubeadm token list
TOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPS
twxu1v.icc5cbmb8cktyu5f 23h 2025-07-12T14:46:19Z authentication,signing <none> system:bootstrappers:kubeadm:default-node-token
wia1ey.o5qtq5wvpa3t4ulx 23h 2025-07-12T14:46:52Z authentication,signing <none> system:bootstrappers:kubeadm:default-node-token
加入Workder
# 加入Workder(工作节点执行)
# 参数--dry-run 模拟 Worker 加入(不实际执行)
kubeadm join 192.168.186.134:6443 \
--token wia1ey.o5qtq5wvpa3t4ulx \
--discovery-token-ca-cert-hash sha256:950c5f7fb1b53205393394897e4d5d6b68f50685438bdbd2e1b1c7a7d171e78d
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is healthy after 1.008871303s
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
k8s-master# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master Ready control-plane 4d v1.30.14
k8s-node1 NotReady <none> 3m27s v1.30.14
# 检查节点详细信息
k8s-master# kubectl describe node k8s-node1
# 查看节点上的 Pod 状态
k8s-master# kubectl get pods -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
coredns-cb4864fb5-9jnlp 1/1 Running 1 (172m ago) 4d 10.244.0.4 k8s-master <none> <none>
coredns-cb4864fb5-g8fp8 1/1 Running 1 (172m ago) 4d 10.244.0.5 k8s-master <none> <none>
etcd-k8s-master 1/1 Running 13 (172m ago) 4d 192.168.186.134 k8s-master <none> <none>
kube-apiserver-k8s-master 1/1 Running 45 (172m ago) 4d 192.168.186.134 k8s-master <none> <none>
kube-controller-manager-k8s-master 1/1 Running 35 (172m ago) 4d 192.168.186.134 k8s-master <none> <none>
kube-proxy-79vql 1/1 Running 0 4m57s 192.168.186.136 k8s-node1 <none> <none>
kube-proxy-8s8kt 1/1 Running 16 (172m ago) 4d 192.168.186.134 k8s-master <none> <none>
kube-scheduler-k8s-master 1/1 Running 47 (172m ago) 4d 192.168.186.134 k8s-master <none> <none>
k8s-master# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master Ready control-plane 4d v1.30.14
k8s-node1 Ready <none> 7m32s v1.30.14
集群操作
# ctr containerd提供的容器操作,同docker
# crictl kubernetes提供的容器操作
# 查看版本
kubectl version
Client Version: v1.30.14
Kustomize Version: v5.0.4-0.20230601165947-6ce0bf390ce3
Server Version: v1.30.14
# 查看节点状态
kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master Ready control-plane 4d1h v1.30.14
k8s-node1 Ready <none> 36m v1.30.14
# 查看组件状态(控制平面)
kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-cb4864fb5-9jnlp 1/1 Running 1 (3h27m ago) 4d1h
# 查看所有pod状态
kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-flannel kube-flannel-ds-bh4m2 1/1 Running 0 37m
# 查看所有pod状态(更详细)
kubectl get pods -A -o wide
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kube-flannel kube-flannel-ds-bh4m2 1/1 Running 0 37m 192.168.186.136 k8s-node1 <none> <none>
# 查看资源详情
kubectl describe -n kube-system pod etcd-k8s-master
# 进入容器 shell
kubectl exec -it <pod-name> -- bash
查看日志
kubectl logs <pod-name> # 查看单次日志
kubectl logs -f <pod-name> # 实时跟踪日志
可视化界面portainer安装
kubectl apply -f https://raw.githubusercontent.com/portainer/k8s/master/deploy/manifests/portainer/portainer.yaml
# 该命令会阻塞当前终端窗口,关闭窗口会终止端口转发
kubectl port-forward -n portainer svc/portainer 9000:9000
# 浏览器访问 http://localhost:9000
访问 http://<NodeIP>:30777 或 http://localhost:9000
设置管理员密码
连接 Kubernetes 集群(Portainer 会自动检测当前集群)
开始管理 Kubernetes 资源
# 手动下载portainer镜像
ctr images pull docker.m.daocloud.io/portainer/portainer-ce:latest
# containerd导出镜像
ctr images export portainer.tar docker.m.daocloud.io/portainer/portainer-ce:latest
# containerd导入镜像(指定namespae为k8s.io)
# 确保每个k8s节点都导入了镜像
ctr -n k8s.io images import portainer.tar
# containerd查看k8s.io命名空间下镜像列表
ctr -n k8s.io images list -q
docker.m.daocloud.io/portainer/portainer-ce:latest
...
# 查看k8s deployment namespace
kubectl get deploy -A
NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE
kube-system coredns 2/2 2 2 5d13h
portainer portainer 0/1 1 0 23h
# 编辑portainer deploy的镜像及imagePullPolicy
kubectl edit deploy -n portainer portainer
# 修改内容如下
# imagePullPolicy: Always、IfNotPresent 或 Never
image: docker.m.daocloud.io/portainer/portainer-ce:latest
imagePullPolicy: Never
# 删除pod触发自动重建
kubectl delete -n portainer pod portainer-75b4c9bd6d-bxchw
# 访问http://192.168.186.136:30777/
# 出现以下错误,删除portainer pod触发重建
# 首次设置密码 admin/test12345678
New Portainer installation
Your Portainer instance timed out for security purposes. To re-enable your Portainer instance, you will need to restart Portainer.
For further information, view our documentation.
# 查看portainer service的yaml配置
kubectl get service -n portainer portainer -o yaml
# 访问portainer => http://<NodeIP>:30777
http://192.168.186.136:30777/