작성
·
403
0
# worker1 노드 taints 설정 테스트후 ceph에서 osd-prepare worker1 노드가 사라졌는데요, 다시 추가할수있나요? taints 설정은 원복했습니다.
pod/rook-ceph-osd-prepare-kt1-test-hyu-k8s-worker-2-b9r57 0/1 Completed 0 59m
pod/rook-ceph-osd-prepare-kt1-test-hyu-k8s-worker-3-g59hp 0/1 Completed 0 59m
# ceph osd 상태는 이상없는듯합니다..
[root@kt1-test-hyu-k8s-master ceph]# k get all -n rook-ceph
NAME READY STATUS RESTARTS AGE
pod/csi-cephfsplugin-bxfr4 3/3 Running 0 47h
pod/csi-cephfsplugin-provisioner-5c8b6d6f4-bkbzv 6/6 Running 0 47h
pod/csi-cephfsplugin-provisioner-5c8b6d6f4-t8gcc 6/6 Running 0 60m
pod/csi-cephfsplugin-vzzv5 3/3 Running 0 19m
pod/csi-cephfsplugin-xz99c 3/3 Running 0 47h
pod/csi-rbdplugin-85xqk 3/3 Running 0 19m
pod/csi-rbdplugin-bxv5x 3/3 Running 0 47h
pod/csi-rbdplugin-nspg6 3/3 Running 0 47h
pod/csi-rbdplugin-provisioner-8564cfd44-rbplq 6/6 Running 0 47h
pod/csi-rbdplugin-provisioner-8564cfd44-xkqpr 6/6 Running 0 60m
pod/rook-ceph-crashcollector-kt1-test-hyu-k8s-worker-1-6947dfczcpcs 1/1 Running 0 19m
pod/rook-ceph-crashcollector-kt1-test-hyu-k8s-worker-2-5d9b647nzkks 1/1 Running 0 47h
pod/rook-ceph-crashcollector-kt1-test-hyu-k8s-worker-3-55ff5cc968ck 1/1 Running 0 47h
pod/rook-ceph-mgr-a-7b95c49f7d-7jd2v 1/1 Running 0 47h
pod/rook-ceph-mon-a-79d5d97cfc-h4tjg 1/1 Running 0 47h
pod/rook-ceph-mon-b-56b898b9d7-zt4d8 1/1 Running 0 21m
pod/rook-ceph-mon-c-5d56fdbcd-2xg5f 1/1 Running 0 47h
pod/rook-ceph-operator-6845846dcd-rmdth 1/1 Running 0 47h
pod/rook-ceph-osd-0-658b95554-qsbdx 1/1 Running 0 26h
pod/rook-ceph-osd-1-567b6f896c-vw42n 1/1 Running 0 60m
pod/rook-ceph-osd-2-7dc866bbcf-sgp2n 1/1 Running 0 26h
pod/rook-ceph-osd-prepare-kt1-test-hyu-k8s-worker-2-b9r57 0/1 Completed 0 59m
pod/rook-ceph-osd-prepare-kt1-test-hyu-k8s-worker-3-g59hp 0/1 Completed 0 59m
pod/rook-ceph-tools-7f6598cb58-jt4zv 1/1 Running 0 29h
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/csi-cephfsplugin-metrics ClusterIP 10.106.23.175 <none> 8080/TCP,8081/TCP 47h
service/csi-rbdplugin-metrics ClusterIP 10.107.73.77 <none> 8080/TCP,8081/TCP 47h
service/rook-ceph-mgr ClusterIP 10.104.247.10 <none> 9283/TCP 47h
service/rook-ceph-mgr-dashboard ClusterIP 10.111.82.234 <none> 8443/TCP 47h
service/rook-ceph-mon-a ClusterIP 10.98.214.72 <none> 6789/TCP,3300/TCP 47h
service/rook-ceph-mon-b ClusterIP 10.110.204.93 <none> 6789/TCP,3300/TCP 47h
service/rook-ceph-mon-c ClusterIP 10.100.231.229 <none> 6789/TCP,3300/TCP 47h
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
daemonset.apps/csi-cephfsplugin 3 3 3 3 3 <none> 47h
daemonset.apps/csi-rbdplugin 3 3 3 3 3 <none> 47h
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/csi-cephfsplugin-provisioner 2/2 2 2 47h
deployment.apps/csi-rbdplugin-provisioner 2/2 2 2 47h
deployment.apps/rook-ceph-crashcollector-kt1-test-hyu-k8s-worker-1 1/1 1 1 19m
deployment.apps/rook-ceph-crashcollector-kt1-test-hyu-k8s-worker-2 1/1 1 1 47h
deployment.apps/rook-ceph-crashcollector-kt1-test-hyu-k8s-worker-3 1/1 1 1 47h
deployment.apps/rook-ceph-mgr-a 1/1 1 1 47h
deployment.apps/rook-ceph-mon-a 1/1 1 1 47h
deployment.apps/rook-ceph-mon-b 1/1 1 1 47h
deployment.apps/rook-ceph-mon-c 1/1 1 1 47h
deployment.apps/rook-ceph-operator 1/1 1 1 47h
deployment.apps/rook-ceph-osd-0 1/1 1 1 47h
deployment.apps/rook-ceph-osd-1 1/1 1 1 47h
deployment.apps/rook-ceph-osd-2 1/1 1 1 47h
deployment.apps/rook-ceph-tools 1/1 1 1 29h
NAME DESIRED CURRENT READY AGE
replicaset.apps/csi-cephfsplugin-provisioner-5c8b6d6f4 2 2 2 47h
replicaset.apps/csi-rbdplugin-provisioner-8564cfd44 2 2 2 47h
replicaset.apps/rook-ceph-crashcollector-kt1-test-hyu-k8s-worker-1-6947dfcd89 1 1 1 19m
replicaset.apps/rook-ceph-crashcollector-kt1-test-hyu-k8s-worker-2-5d9b647556 1 1 1 47h
replicaset.apps/rook-ceph-crashcollector-kt1-test-hyu-k8s-worker-2-64888df58 0 0 0 47h
replicaset.apps/rook-ceph-crashcollector-kt1-test-hyu-k8s-worker-3-55ff5cc4ff 1 1 1 47h
replicaset.apps/rook-ceph-mgr-a-7b95c49f7d 1 1 1 47h
replicaset.apps/rook-ceph-mon-a-79d5d97cfc 1 1 1 47h
replicaset.apps/rook-ceph-mon-b-56b898b9d7 1 1 1 47h
replicaset.apps/rook-ceph-mon-c-5d56fdbcd 1 1 1 47h
replicaset.apps/rook-ceph-operator-6845846dcd 1 1 1 47h
replicaset.apps/rook-ceph-osd-0-658b95554 1 1 1 26h
replicaset.apps/rook-ceph-osd-0-7d48dffb84 0 0 0 47h
replicaset.apps/rook-ceph-osd-1-567b6f896c 1 1 1 26h
replicaset.apps/rook-ceph-osd-1-6b79dbb697 0 0 0 47h
replicaset.apps/rook-ceph-osd-2-7ccf576997 0 0 0 47h
replicaset.apps/rook-ceph-osd-2-7dc866bbcf 1 1 1 26h
replicaset.apps/rook-ceph-tools-7f6598cb58 1 1 1 29h
NAME COMPLETIONS DURATION AGE
job.batch/rook-ceph-osd-prepare-kt1-test-hyu-k8s-worker-1 1/1 6s 166m
job.batch/rook-ceph-osd-prepare-kt1-test-hyu-k8s-worker-2 1/1 8s 59m
job.batch/rook-ceph-osd-prepare-kt1-test-hyu-k8s-worker-3 1/1 8s 59m
[root@kt1-test-hyu-k8s-master ceph]# k -n rook-ceph exec -it rook-ceph-tools-7f6598cb58-jt4zv -- ceph osd status
ID HOST USED AVAIL WR OPS WR DATA RD OPS RD DATA STATE
0 kt1-test-hyu-k8s-worker-2 20.9M 99.9G 0 0 0 0 exists,up
1 kt1-test-hyu-k8s-worker-1 16.3M 99.9G 0 0 0 0 exists,up
2 kt1-test-hyu-k8s-worker-3 19.5M 99.9G 0 0 0 0 exists,up
[root@kt1-test-hyu-k8s-master ceph]# k -n rook-ceph exec -it rook-ceph-tools-7f6598cb58-jt4zv -- ceph osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 0.29306 root default
-5 0.09769 host kt1-test-hyu-k8s-worker-1
1 hdd 0.09769 osd.1 up 1.00000 1.00000
-7 0.09769 host kt1-test-hyu-k8s-worker-2
0 hdd 0.09769 osd.0 up 1.00000 1.00000
-3 0.09769 host kt1-test-hyu-k8s-worker-3
2 hdd 0.09769 osd.2 up 1.00000 1.00000
답변 1
1
안녕하세요 강사 최일선입니다.
혹시 스토리지 클래스를 실행하는데 문제가 있으신가요?
문제가 없다면 그대로 사용하시면 됩니다.
prepare 파드는 ceph 실행을 위한 준비를 위해 Job을 사용해 파드를 띄운 겁니다. 그리고 파드가 실행 완료한 뒤 다시 실행할 필요가 없습니다.
감사합니다!
앗 네 그렇군요. storage class사용에는 문제가 없습니다. 답변 감사합니다^^