您当前的位置: 首页 > 慢生活 > 程序人生 网站首页程序人生
13、Kubernetes - 集群安装
发布时间:2022-12-04 22:32:15编辑:雪饮阅读()
公共部分
Step1安装docker软件
上篇这一步骤是没有完成的,接下来还需要做如下操作
#创建/etc/docker目录
mkdir /etc/docker
#配置daemon .
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts":["native.cgroupdriver=systemd"],
"log-driver":"json-file",
"log-opts":{
"max-size":"100m"
}
}
EOF
mkdir -p /etc/systemd/system/docker.service.d
#重启docker服务
systemctl daemon-reload && systemctl restart docker && systemctl enable docker
Step2安装Kubeadm(主从配置)
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum -y install kubeadm-1.15.1 kubectl-1.15.1 kubelet-1.15.1
systemctl enable kubelet.service
Step3安装kubeadmin
解压kubeadm-basic.images.tar.gz于/root/下
tar -zxvf kubeadm-basic.images.tar.gz
编写脚本load-images.sh
#!/bin/bash
ls /root/kubeadm-basic.images >/tmp/image-list.txt
cd /root/kubeadm-basic.images
for i in $( cat /tmp/image-list.txt )
do
docker load -i $i
done
rm -rf /tmp/image-list.txt
给权限
chmod a+x load-images.sh
加载docker镜像
./load-images.sh
Master
Step4初始化主节点
kubeadm config print init-defaults > kubeadm-config.yaml
给配置文件加点料
[root@k8s-master01 ~]# vi kubeadm-config.yaml
[root@k8s-master01 ~]# cat kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.66.10
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: k8s-master01
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.15.1
networking:
dnsDomain: cluster.local
podSubnet: "10.224.0.0/16"
serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
SupportIPVSProxyMode: true
mode: ipvs
上面第一个ip地址是master的仅主机ip地址
版本哪里是配置为当前安装的版本(个人理解)
PodSubnet这是flannel网络插件要用的一个网络,这里预先配置了,因为后面要用到这个flannel网络插件。
kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs | tee kubeadm-init.log
--experimental-upload-certs是为高可用时候证书的,这里可以不用,但用了也无妨,那么这里就用吧。
查看kubeadm初始化日志(当然我这里手误,不小心又重新执行了上面那条命令,所以初始化日志内容就不对了)
vim kubeadm-init.log
按照kubeadm初始化最后的提示,执行一下操作
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Step5查看当前的节点列表
[root@k8s-master01 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master01 NotReady master 11m v1.15.1
建立/root/install-k8s目录结构并将/root/目录中的kubeadm-config.yaml和kubeadm-init.log移动到/root/install-k8s/core中
[root@k8s-master01 flannel]# tree /root/install-k8s/
/root/install-k8s/
├── core
│ ├── kubeadm-config.yaml
│ └── kubeadm-init.log
└── plugin
└── flannel
Step6部署网络
在/root/install-k8s/plugin/flannel目录下载kube-flannel的yml文件
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kub e-flannel.yml
该步骤可能下载失败,可以用其他方法下载,然后传进来也可以
然后创建
[root@k8s-master01 flannel]# kubectl create -f kube-flannel.yml
然后发现kube-flannel组件已运行(打脸,我这里没有运行,可能是需要其他另外两个节点也操了公共部分的配置吧)
[root@k8s-master01 flannel]# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-5c98db65d4-58psv 0/1 Pending 0 27m
coredns-5c98db65d4-srjbl 0/1 Pending 0 27m
etcd-k8s-master01 1/1 Running 0 26m
kube-apiserver-k8s-master01 1/1 Running 0 26m
kube-controller-manager-k8s-master01 1/1 Running 0 26m
kube-proxy-r2pqw 1/1 Running 0 27m
kube-scheduler-k8s-master01 1/1 Running 0 27m
这里-n是指定名称空间(好像必须要验证这个kube-system的名称空间才行)
Ifconfig多了flannel.1的网络(打脸,又没多,应该也是要其它节点运行了公共部分的配置吧)
[root@k8s-master01 flannel]# ifconfig
docker0: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500
inet 172.17.0.1 netmask 255.255.0.0 broadcast 172.17.255.255
ether 02:42:02:66:55:6a txqueuelen 0 (Ethernet)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.66.10 netmask 255.255.255.0 broadcast 192.168.66.255
ether 00:0c:29:cc:99:77 txqueuelen 1000 (Ethernet)
RX packets 678575 bytes 748797342 (714.1 MiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 337269 bytes 25274694 (24.1 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
loop txqueuelen 1000 (Local Loopback)
RX packets 237353 bytes 39360608 (37.5 MiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 237353 bytes 39360608 (37.5 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
其他
Step7 加入集群
节点加入命令要从master节点刚才kubeadm初始化安装末尾的提示里面拿,如
kubeadm join 192.168.66.10:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:40f73643ae21b80f5173e8110b5c7c4137d270f8751455c1ea7b881e0406e7d8
然后节点1和节点2上面执行这个命令,加入到集群
Step8 查看pod列表(详细版)
然后在master节点上查看到节点列表中有了node1和node2节点
[root@k8s-master01 flannel]# kubectl get pod -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
coredns-5c98db65d4-58psv 0/1 Pending 0 4h21m <none> <none> <none> <none>
coredns-5c98db65d4-srjbl 0/1 Pending 0 4h21m <none> <none> <none> <none>
etcd-k8s-master01 1/1 Running 0 4h20m 192.168.66.10 k8s-master01 <none> <none>
kube-apiserver-k8s-master01 1/1 Running 0 4h20m 192.168.66.10 k8s-master01 <none> <none>
kube-controller-manager-k8s-master01 1/1 Running 0 4h20m 192.168.66.10 k8s-master01 <none> <none>
kube-proxy-5w5s5 1/1 Running 0 30s 192.168.66.21 k8s-node02 <none> <none>
kube-proxy-fmjxs 1/1 Running 0 74s 192.168.66.20 k8s-node01 <none> <none>
kube-proxy-r2pqw 1/1 Running 0 4h21m 192.168.66.10 k8s-master01 <none> <none>
kube-scheduler-k8s-master01 1/1 Running 0 4h21m 192.168.66.10 k8s-master01 <none> <none>
这里-o wide是可以查看更详细的信息
有时候慢的情况下会看到不显示为Running,而是先显示的是init,过一会儿才显示running
Step9 查看pod列表(持续输出版)
则这种情况下可以在master节点上用-w参数查看可持续执行的结果
[root@k8s-master01 flannel]# kubectl get pod -n kube-system -w
NAME READY STATUS RESTARTS AGE
coredns-5c98db65d4-58psv 0/1 Pending 0 4h24m
coredns-5c98db65d4-srjbl 0/1 Pending 0 4h24m
etcd-k8s-master01 1/1 Running 0 4h23m
kube-apiserver-k8s-master01 1/1 Running 0 4h23m
kube-controller-manager-k8s-master01 1/1 Running 0 4h23m
kube-proxy-5w5s5 1/1 Running 0 3m13s
kube-proxy-fmjxs 1/1 Running 0 3m57s
kube-proxy-r2pqw 1/1 Running 0 4h24m
kube-scheduler-k8s-master01 1/1 Running 0 4h24m
就不用你不断的执行上面的命令,这样就能直接实时查看到结果。
Step10 查看节点列表(重点是确定Status状态不为NotReady)
然后在master节点还可以看到集群有了3个节点了,但是状态是NotReady,这就有点问题了
[root@k8s-master01 flannel]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master01 NotReady master 4h26m v1.15.1
k8s-node01 NotReady <none> 5m54s v1.15.1
k8s-node02 NotReady <none> 5m10s v1.15.1
Step11 归档
cd /root
mv /root/install-k8s/ /usr/local/
step12 kube-flannel安装后在kubectl get pod -n kube-system执行结果列表中不存在的问题及kubectl get node中3个节点状态都是NotReady的解决
首先就是上面初始化主节点时候的kubeadm-config.yaml里面的SupportIPVSProxyMode配置前面是有缩进的,我已补充了那个缩进。但是会不会是这个问题导致的,则不一定。
因为用这个命令检测了下是有报错的
kubeadm config images list --config /root/install-k8s/core/kubeadm-config.yaml
然后我才排查到是那个缩进出现的问题。
当然这里的kubeadm-config.yaml的路径是已经被移动了的。
用/usr/local/install-k8s/core/kubeadm-config.yaml了
那么实际的解决我觉得应该是
kubectl delete -f /usr/local/install-k8s/plugin/flannel/kube-flannel.yml
先卸载了上面安装的kube-flannel
然后重新用下面这个kube-flannel2.yml的配置文件进行创建
kubectl create -f kube-flannel2.yml
该配置文件的内容:
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-amd64
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- amd64
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.12.0-amd64
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.12.0-amd64
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-arm64
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- arm64
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.12.0-arm64
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.12.0-arm64
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-arm
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- arm
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.12.0-arm
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.12.0-arm
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-ppc64le
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- ppc64le
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.12.0-ppc64le
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.12.0-ppc64le
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-s390x
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- s390x
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.12.0-s390x
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.12.0-s390x
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
然后再次查看pod列表就会有flannel了
[root@k8s-master01 ~]# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-5c98db65d4-58psv 0/1 ContainerCreating 0 5h7m
coredns-5c98db65d4-srjbl 0/1 ContainerCreating 0 5h7m
etcd-k8s-master01 1/1 Running 0 5h6m
kube-apiserver-k8s-master01 1/1 Running 0 5h5m
kube-controller-manager-k8s-master01 1/1 Running 0 5h6m
kube-flannel-ds-amd64-8jr6g 1/1 Running 0 8m29s
kube-flannel-ds-amd64-9lgbq 1/1 Running 0 8m29s
kube-flannel-ds-amd64-r7gdg 1/1 Running 0 8m29s
kube-proxy-5w5s5 1/1 Running 0 45m
kube-proxy-fmjxs 1/1 Running 0 46m
kube-proxy-r2pqw 1/1 Running 0 5h7m
kube-scheduler-k8s-master01 1/1 Running 0 5h6m
当然了刚开始也是init状态,慢慢变成running状态的。
实际上flannel这个组件问题解决了之后,我发现kubectl get node中出现的NotReady问题也解决了
[root@k8s-master01 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready master 5h8m v1.15.1
k8s-node01 Ready <none> 47m v1.15.1
k8s-node02 Ready <none> 46m v1.15.1
现在都变成了Ready状态了。
关键字词:Kubernetes,集群,安装