#修改主机名
root@u-k8s-master-171:~# hostnamectl set-hostname u-k8s-master-171
#部署时间同步,设定阿里云时间同步服务器,重启
root@u-k8s-master-171:~# apt update && apt -y install chrony
root@u-k8s-master-171:~# vim /etc/chrony/chrony.conf
# down (compare to just using one of the lines). See (LP: #1754358) for the
# discussion.
#
# About using servers from the NTP Pool Project in general see (LP: #104525).
# Approved by Ubuntu Technical Board on 2011-02-08.
# See http://www.pool.ntp.org/join.html for more information.
###添加阿里云的时间同步服务器地址
server ntp1.aliyun.com iburst
###禁用下面的服务器地址
#pool ntp.ubuntu.com iburst maxsources 4
#pool 0.ubuntu.pool.ntp.org iburst maxsources 1
#pool 1.ubuntu.pool.ntp.org iburst maxsources 1
#pool 2.ubuntu.pool.ntp.org iburst maxsources 2
root@u-k8s-master-171:~# systemctl restart chrony
#配置hosts
root@u-k8s-master-171:~# cat /etc/hosts
127.0.0.1 localhost
127.0.1.1 ubuntu
172.29.7.171 u-k8s-master-171
172.29.7.171 kubeapi.jiutingqiu.com
172.29.7.172 u-k8s-node1-172
172.29.7.173 u-k8s-node2-173
172.29.7.174 u-k8s-node3-174
#关闭交换分区
root@u-k8s-master-171:~# swapoff -a
#查看是否还有未关闭的选项
root@u-k8s-master-171:~# systemctl --type swap
UNIT LOAD ACTIVE SUB DESCRIPTION
0 loaded units listed. Pass --all to see loaded but inactive units, too.
To show all installed unit files use 'systemctl list-unit-files'.
#停止防火墙
root@u-k8s-master-171:~# systemctl stop ufw
#开启ip转发
root@u-k8s-master-171:~# echo 1 > /proc/sys/net/ipv4/ip_forward
2、部署容器运行时:Containerd
#使用阿里云的Contaierd源
root@u-k8s-master-171:~# apt -y install apt-transport-https ca-certificates curl software-properties-common
root@u-k8s-master-171:~# curl -fsSL http://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | apt-key add -
Warning: apt-key is deprecated. Manage keyring files in trusted.gpg.d instead (see apt-key(8)).
OK
root@u-k8s-master-171:~# add-apt-repository "deb [arch=amd64] http://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"
Repository: 'deb [arch=amd64] http://mirrors.aliyun.com/docker-ce/linux/ubuntu jammy stable'
Description:
Archive for codename: jammy components: stable
More info: http://mirrors.aliyun.com/docker-ce/linux/ubuntu
Adding repository.
Press [ENTER] to continue or Ctrl-c to cancel.
Adding deb entry to /etc/apt/sources.list.d/archive_uri-http_mirrors_aliyun_com_docker-ce_linux_ubuntu-jammy.list
Adding disabled deb-src entry to /etc/apt/sources.list.d/archive_uri-http_mirrors_aliyun_com_docker-ce_linux_ubuntu-jammy.list
Get:1 http://mirrors.aliyun.com/docker-ce/linux/ubuntu jammy InRelease [48.8 kB]
Get:2 http://mirrors.aliyun.com/docker-ce/linux/ubuntu jammy/stable amd64 Packages [26.8 kB]
Hit:3 https://mirrors.tuna.tsinghua.edu.cn/ubuntu jammy InRelease
Hit:4 https://mirrors.tuna.tsinghua.edu.cn/ubuntu jammy-updates InRelease
Hit:5 https://mirrors.tuna.tsinghua.edu.cn/ubuntu jammy-backports InRelease
Hit:6 https://mirrors.tuna.tsinghua.edu.cn/ubuntu jammy-security InRelease
Fetched 75.6 kB in 1s (59.4 kB/s)
Reading package lists... Done
W: http://mirrors.aliyun.com/docker-ce/linux/ubuntu/dists/jammy/InRelease: Key is stored in legacy trusted.gpg keyring (/etc/apt/trusted.gpg), see the DEPRECATION section in apt-key(8) for details.
root@u-k8s-master-171:~# apt update && apt -y install containerd.io
#生成配置文件
root@u-k8s-master-171:~# containerd config default > /etc/containerd/config.toml
#编辑配置文件
root@u-k8s-master-171:~# vim /etc/containerd/config.toml
#找到下面的runc,runc.options下面的Systemd
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
[plugins."io.containerd.grpc.v1.cri"]
sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.9"
[plugins."io.containerd.grpc.v1.cri".registry]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint = ["https://docker.mirrors.ustc.edu.cn", "https://registry.docker-cn.com"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."registry.k8s.io"]
endpoint = ["https://registry.aliyuncs.com/google_containers"]
#重启生效
root@u-k8s-master-171:~# systemctl daemon-reload
root@u-k8s-master-171:~# systemctl restart containerd
root@u-k8s-master-171:~# apt update && apt -y install apt-transport-https curl
root@u-k8s-master-171:~# curl -fsSL https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -
Warning: apt-key is deprecated. Manage keyring files in trusted.gpg.d instead (see apt-key(8)).
OK
root@u-k8s-master-171:~# cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
>
> deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
>
> EOF
root@u-k8s-master-171:~# apt update
root@u-k8s-master-171:~# apt install -y kubelet kubeadm kubectl
root@u-k8s-master-171:~# systemctl enable --now kubelet
5、初始化Master节点
#查看所需的镜像
root@u-k8s-master-171:~# kubeadm config images list
I0302 11:54:47.492964 1415 version.go:256] remote version is much newer: v1.29.2; falling back to: stable-1.28
registry.k8s.io/kube-apiserver:v1.28.7
registry.k8s.io/kube-controller-manager:v1.28.7
registry.k8s.io/kube-scheduler:v1.28.7
registry.k8s.io/kube-proxy:v1.28.7
registry.k8s.io/pause:3.9
registry.k8s.io/etcd:3.5.9-0
registry.k8s.io/coredns/coredns:v1.10.1
#使用阿里云的镜像
root@u-k8s-master-171:~# kubeadm config images --image-repository=registry.aliyuncs.com/google_containers pull
#进行初始化
root@u-k8s-master-171:~# kubeadm init \
--control-plane-endpoint="kubeapi.jiutingqiu.com" \
--kubernetes-version=v1.28.7 \
--pod-network-cidr=10.244.0.0/16 \
--service-cidr=10.96.0.0/12 \
--token-ttl=0 \
--cri-socket unix:///run/containerd/containerd.sock \
--upload-certs \
--image-repository=registry.aliyuncs.com/google_containers
Your Kubernetes control-plane has initialized successfully!
#步骤1
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
#步骤2
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of the control-plane node running the following command on each as root:
kubeadm join kubeapi.jiutingqiu.com:6443 --token 3e1brh.54k8ua6bxjo8cyz7 \
--discovery-token-ca-cert-hash sha256:47c47b730dc995c5f4091c7a0e590a62dd0892d1f01ed880440dcac9c0002880 \
--control-plane --certificate-key 7ceb5726cc26fde246831371c4c7dd65938e7406df4d9b6fb9610f39b1c3cb3a
Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
Then you can join any number of worker nodes by running the following on each as root:
#步骤3
kubeadm join kubeapi.jiutingqiu.com:6443 --token 3e1brh.54k8ua6bxjo8cyz7 \
--discovery-token-ca-cert-hash sha256:47c47b730dc995c5f4091c7a0e590a62dd0892d1f01ed880440dcac9c0002880z
--image-repository:指定要使用的镜像仓库,默认为registry.k8s.io;
--kubernetes-version:kubernetes程序组件的版本号,它必须要与安装的kubelet程序包的版本号相同;
--control-plane-endpoint:控制平面的固定访问端点,可以是IP地址或DNS名称,会被用于集群管理员及集群组件的kubeconfig配置文件的API Server的访问地址;单控制平面部署时可以不使用该选项;
--pod-network-cidr:Pod网络的地址范围,其值为CIDR格式的网络地址,通常,Flannel网络插件的默认为10.244.0.0/16,Project Calico插件的默认值为192.168.0.0/16;
--service-cidr:Service的网络地址范围,其值为CIDR格式的网络地址,默认为10.96.0.0/12;通常,仅Flannel一类的网络插件需要手动指定该地址;
--apiserver-advertise-address:apiserver通告给其他组件的IP地址,一般应该为Master节点的用于集群内部通信的IP地址,0.0.0.0表示节点上所有可用地址;
--token-ttl:共享令牌(token)的过期时长,默认为24小时,0表示永不过期;为防止不安全存储等原因导致的令牌泄露危及集群安全,建议为其设定过期时长。未设定该选项时,在token过期后,若期望再向集群中加入其它节点,可以使用如下命令重新创建token,并生成节点加入命令。
#和上面的步骤1、2、3一样
root@u-k8s-master-171:~# mkdir .kube
root@u-k8s-master-171:~# cp -i /etc/kubernetes/admin.conf .kube/config
#部署flannel网络插件
root@u-k8s-master-171:~# kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
namespace/kube-flannel created
serviceaccount/flannel created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
错误记录
[init] Using Kubernetes version: v1.28.7
[preflight] Running pre-flight checks
error execution phase preflight: [preflight] Some fatal errors occurred:
[ERROR FileContent--proc-sys-net-bridge-bridge-nf-call-iptables]: /proc/sys/net/bridge/bridge-nf-call-iptables does not exist
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
To see the stack trace of this error execute with --v=5 or higher
root@u-k8s-master-171:~# modprobe br_netfilter
6、Master节点添加node节点
root@u-k8s-node1-172:~# kubeadm join kubeapi.jiutingqiu.com:6443 --token 3e1brh.54k8ua6bxjo8cyz7 \
> --discovery-token-ca-cert-hash sha256:47c47b730dc995c5f4091c7a0e590a62dd0892d1f01ed880440dcac9c0002880
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
root@u-k8s-node2-173:~# kubeadm join kubeapi.jiutingqiu.com:6443 --token 3e1brh.54k8ua6bxjo8cyz7 \
> --discovery-token-ca-cert-hash sha256:47c47b730dc995c5f4091c7a0e590a62dd0892d1f01ed880440dcac9c0002880
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
root@u-k8s-node3-174:~# kubeadm join kubeapi.jiutingqiu.com:6443 --token 3e1brh.54k8ua6bxjo8cyz7 \
> --discovery-token-ca-cert-hash sha256:47c47b730dc995c5f4091c7a0e590a62dd0892d1f01ed880440dcac9c0002880
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
7、等待flannel网络查件部署完成
root@u-k8s-master-171:~# kubectl get nodes
NAME STATUS ROLES AGE VERSION
u-k8s-master-171 NotReady control-plane 2m45s v1.28.2
u-k8s-node1-172 NotReady <none> 70s v1.28.2
u-k8s-node2-173 NotReady <none> 64s v1.28.2
u-k8s-node3-174 NotReady <none> 59s v1.28.2
root@u-k8s-master-171:~# kubectl get nodes
NAME STATUS ROLES AGE VERSION
u-k8s-master-171 Ready control-plane 9m v1.28.2
u-k8s-node1-172 Ready <none> 7m25s v1.28.2
u-k8s-node2-173 Ready <none> 7m19s v1.28.2
u-k8s-node3-174 Ready <none> 7m14s v1.28.2
8、部署OpenELB
root@u-k8s-master-171:~# kubectl apply -f https://raw.githubusercontent.com/openelb/openelb/master/deploy/openelb.yaml
root@u-k8s-master-171:~# kubectl get pods -n openelb-system
NAME READY STATUS RESTARTS AGE
openelb-admission-create-nxq2g 0/1 ImagePullBackOff 0 41s
openelb-admission-patch-wh2g5 0/1 ImagePullBackOff 0 41s
openelb-manager-99b49789c-t4pn2 0/1 ContainerCreating 0 41s