작성
·
39
0
mac 환경에서 VMWare를 통해서 테스트를 하고 있는 상황입니다.
아래와 같이 nginx를 설치하면 docker 이미지를 가져오지 못하는 에러가 발생합니다.
어떤 부분을 체크해야 할까요?
kubectl run test --image=nginx
root@cp-k8s:~# k describe pod test
Name: test
Namespace: default
Priority: 0
Service Account: default
Node: w3-k8s/172.16.68.135
Start Time: Wed, 16 Apr 2025 16:02:00 +0900
Labels: run=test
Annotations: cni.projectcalico.org/containerID: 36f3430f70ae05d79164fb2c98afdc1d4be31322bd7410e8125aa29d2bd5b8a0
cni.projectcalico.org/podIP: 172.16.132.31/32
cni.projectcalico.org/podIPs: 172.16.132.31/32
Status: Pending
IP:
IPs:
Containers:
test:
Container ID:
Image: nginx
Image ID:
Port:
Host Port:
State: Waiting
Reason: ContainerCreating
Ready: False
Restart Count: 0
Environment:
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-77nrb (ro)
Conditions:
Type Status
PodReadyToStartContainers False
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
kube-api-access-77nrb:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional:
DownwardAPI: true
QoS Class: BestEffort
Node-Selectors:
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 19s default-scheduler Successfully assigned default/test to w3-k8s
Normal Pulling 19s kubelet Pulling image "nginx"
root@cp-k8s:~# k describe pod test
Name: test
Namespace: default
Priority: 0
Service Account: default
Node: w3-k8s/172.16.68.135
Start Time: Wed, 16 Apr 2025 16:02:00 +0900
Labels: run=test
Annotations: cni.projectcalico.org/containerID: 36f3430f70ae05d79164fb2c98afdc1d4be31322bd7410e8125aa29d2bd5b8a0
cni.projectcalico.org/podIP: 172.16.132.31/32
cni.projectcalico.org/podIPs: 172.16.132.31/32
Status: Pending
IP: 172.16.132.31
IPs:
IP: 172.16.132.31
Containers:
test:
Container ID:
Image: nginx
Image ID:
Port:
Host Port:
State: Waiting
Reason: ErrImagePull
Ready: False
Restart Count: 0
Environment:
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-77nrb (ro)
Conditions:
Type Status
PodReadyToStartContainers True
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
kube-api-access-77nrb:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional:
DownwardAPI: true
QoS Class: BestEffort
Node-Selectors:
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 67s default-scheduler Successfully assigned default/test to w3-k8s
Warning Failed 47s kubelet Failed to pull image "nginx": failed to pull and unpack image "docker.io/library/nginx:latest": failed to resolve reference "docker.io/library/nginx:la
test": failed to do request: Head "https://registry-1.docker.io/v2/library/nginx/manifests/latest": dial tcp: lookup registry-1.docker.io on 127.0.0.53:53: read udp 127.0.0.1:44714->127.0.0.53:53: i/o timeout
Normal Pulling 36s (x2 over 67s) kubelet Pulling image "nginx"
Warning Failed 16s (x2 over 47s) kubelet Error: ErrImagePull
Warning Failed 16s kubelet Failed to pull image "nginx": failed to pull and unpack image "docker.io/library/nginx:latest": failed to resolve reference "docker.io/library/nginx:la
test": failed to do request: Head "https://registry-1.docker.io/v2/library/nginx/manifests/latest": dial tcp: lookup registry-1.docker.io on 127.0.0.53:53: read udp 127.0.0.1:46918->127.0.0.53:53: i/o timeout
Normal BackOff 1s (x2 over 47s) kubelet Back-off pulling image "nginx"
Warning Failed 1s (x2 over 47s) kubelet Error: ImagePullBackOff
답변 2
0
k describe pod calico-node-dcp9k -n kube-system
Name: calico-node-dcp9k
Namespace: kube-system
Priority: 2000001000
Priority Class Name: system-node-critical
Service Account: calico-node
Node: w1-k8s/172.16.68.133
Start Time: Wed, 16 Apr 2025 15:50:27 +0900
Labels: controller-revision-hash=86497fc7f9
k8s-app=calico-node
pod-template-generation=2
Annotations: kubectl.kubernetes.io/restartedAt: 2025-04-16T15:50:17+09:00
Status: Running
SeccompProfile: RuntimeDefault
IP: 172.16.68.133
IPs:
IP: 172.16.68.133
Controlled By: DaemonSet/calico-node
Init Containers:
upgrade-ipam:
Container ID: containerd://076794f20268c207c9a17b43946be284e305a09772e2664884d0d7590b6220bd
Image: quay.io/calico/cni:v3.29.2
Image ID: quay.io/calico/cni@sha256:5c7827667dab3e7ef72682eabc3d0bf8e39c228c67fbf193dfa0a8a8674d300a
Port: <none>
Host Port: <none>
Command:
/opt/cni/bin/calico-ipam
-upgrade
State: Terminated
Reason: Completed
Exit Code: 0
Started: Wed, 16 Apr 2025 15:50:28 +0900
Finished: Wed, 16 Apr 2025 15:50:28 +0900
Ready: True
Restart Count: 0
Environment Variables from:
kubernetes-services-endpoint ConfigMap Optional: true
Environment:
KUBERNETES_NODE_NAME: (v1:spec.nodeName)
CALICO_NETWORKING_BACKEND: <set to the key 'calico_backend' of config map 'calico-config'> Optional: false
Mounts:
/host/opt/cni/bin from cni-bin-dir (rw)
/var/lib/cni/networks from host-local-net-dir (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-mk4mt (ro)
install-cni:
Container ID: containerd://afede1e59dfc437027eb7c486e4253258a11e7508016b38f05bc379927b2349c
Image: quay.io/calico/cni:v3.29.2
Image ID: quay.io/calico/cni@sha256:5c7827667dab3e7ef72682eabc3d0bf8e39c228c67fbf193dfa0a8a8674d300a
Port: <none>
Host Port: <none>
Command:
/opt/cni/bin/install
State: Terminated
Reason: Completed
Exit Code: 0
Started: Wed, 16 Apr 2025 15:50:28 +0900
Finished: Wed, 16 Apr 2025 15:50:29 +0900
Ready: True
Restart Count: 0
Environment Variables from:
kubernetes-services-endpoint ConfigMap Optional: true
Environment:
CNI_CONF_NAME: 10-calico.conflist
CNI_NETWORK_CONFIG: <set to the key 'cni_network_config' of config map 'calico-config'> Optional: false
KUBERNETES_NODE_NAME: (v1:spec.nodeName)
CNI_MTU: <set to the key 'veth_mtu' of config map 'calico-config'> Optional: false
SLEEP: false
Mounts:
/host/etc/cni/net.d from cni-net-dir (rw)
/host/opt/cni/bin from cni-bin-dir (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-mk4mt (ro)
mount-bpffs:
Container ID: containerd://162a7b51b5d56da07efa197248b711df1d6abdb0b6cd14c481a4a67a4083b2f0
Image: quay.io/calico/node:v3.29.2
Image ID: quay.io/calico/node@sha256:97dfd69511ab72c64b4db0636bdf1298b5a7f869ff0296329e0c39dbda4a80b5
Port: <none>
Host Port: <none>
Command:
calico-node
-init
-best-effort
State: Terminated
Reason: Completed
Exit Code: 0
Started: Wed, 16 Apr 2025 15:50:29 +0900
Finished: Wed, 16 Apr 2025 15:50:29 +0900
Ready: True
Restart Count: 0
Environment: <none>
Mounts:
/nodeproc from nodeproc (ro)
/sys/fs from sys-fs (rw)
/var/run/calico from var-run-calico (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-mk4mt (ro)
Containers:
calico-node:
Container ID: containerd://9dbf692d177e8c374009bb82229b45d0afff466356cdabc2a2c22eb508b557bd
Image: quay.io/calico/node:v3.29.2
Image ID: quay.io/calico/node@sha256:97dfd69511ab72c64b4db0636bdf1298b5a7f869ff0296329e0c39dbda4a80b5
Port: <none>
Host Port: <none>
State: Running
Started: Wed, 16 Apr 2025 15:50:30 +0900
Ready: True
Restart Count: 0
Requests:
cpu: 250m
Liveness: exec [/bin/calico-node -felix-live -bird-live] delay=10s timeout=10s period=10s #success=1 #failure=6
Readiness: exec [/bin/calico-node -felix-ready -bird-ready] delay=0s timeout=10s period=10s #success=1 #failure=3
Environment Variables from:
kubernetes-services-endpoint ConfigMap Optional: true
Environment:
DATASTORE_TYPE: kubernetes
WAIT_FOR_DATASTORE: true
NODENAME: (v1:spec.nodeName)
CALICO_NETWORKING_BACKEND: <set to the key 'calico_backend' of config map 'calico-config'> Optional: false
CLUSTER_TYPE: k8s,bgp
IP: autodetect
CALICO_IPV4POOL_IPIP: Always
CALICO_IPV4POOL_VXLAN: Never
CALICO_IPV6POOL_VXLAN: Never
FELIX_IPINIPMTU: <set to the key 'veth_mtu' of config map 'calico-config'> Optional: false
FELIX_VXLANMTU: <set to the key 'veth_mtu' of config map 'calico-config'> Optional: false
FELIX_WIREGUARDMTU: <set to the key 'veth_mtu' of config map 'calico-config'> Optional: false
CALICO_DISABLE_FILE_LOGGING: true
FELIX_DEFAULTENDPOINTTOHOSTACTION: ACCEPT
FELIX_IPV6SUPPORT: false
FELIX_HEALTHENABLED: true
Mounts:
/host/etc/cni/net.d from cni-net-dir (rw)
/lib/modules from lib-modules (ro)
/run/xtables.lock from xtables-lock (rw)
/sys/fs/bpf from bpffs (rw)
/var/lib/calico from var-lib-calico (rw)
/var/log/calico/cni from cni-log-dir (ro)
/var/run/calico from var-run-calico (rw)
/var/run/nodeagent from policysync (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-mk4mt (ro)
Conditions:
Type Status
PodReadyToStartContainers True
Initialized True
Ready True
ContainersReady True
PodScheduled True
Volumes:
lib-modules:
Type: HostPath (bare host directory volume)
Path: /lib/modules
HostPathType:
var-run-calico:
Type: HostPath (bare host directory volume)
Path: /var/run/calico
HostPathType: DirectoryOrCreate
var-lib-calico:
Type: HostPath (bare host directory volume)
Path: /var/lib/calico
HostPathType: DirectoryOrCreate
xtables-lock:
Type: HostPath (bare host directory volume)
Path: /run/xtables.lock
HostPathType: FileOrCreate
sys-fs:
Type: HostPath (bare host directory volume)
Path: /sys/fs/
HostPathType: DirectoryOrCreate
bpffs:
Type: HostPath (bare host directory volume)
Path: /sys/fs/bpf
HostPathType: Directory
nodeproc:
Type: HostPath (bare host directory volume)
Path: /proc
HostPathType:
cni-bin-dir:
Type: HostPath (bare host directory volume)
Path: /opt/cni/bin
HostPathType: DirectoryOrCreate
cni-net-dir:
Type: HostPath (bare host directory volume)
Path: /etc/cni/net.d
HostPathType:
cni-log-dir:
Type: HostPath (bare host directory volume)
Path: /var/log/calico/cni
HostPathType:
host-local-net-dir:
Type: HostPath (bare host directory volume)
Path: /var/lib/cni/networks
HostPathType:
policysync:
Type: HostPath (bare host directory volume)
Path: /var/run/nodeagent
HostPathType: DirectoryOrCreate
kube-api-access-mk4mt:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
QoS Class: Burstable
Node-Selectors: kubernetes.io/os=linux
Tolerations: :NoSchedule op=Exists
:NoExecute op=Exists
CriticalAddonsOnly op=Exists
node.kubernetes.io/disk-pressure:NoSchedule op=Exists
node.kubernetes.io/memory-pressure:NoSchedule op=Exists
node.kubernetes.io/network-unavailable:NoSchedule op=Exists
node.kubernetes.io/not-ready:NoExecute op=Exists
node.kubernetes.io/pid-pressure:NoSchedule op=Exists
node.kubernetes.io/unreachable:NoExecute op=Exists
node.kubernetes.io/unschedulable:NoSchedule op=Exists
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 17m default-scheduler Successfully assigned kube-system/calico-node-dcp9k to w1-k8s
Normal Pulled 17m kubelet Container image "quay.io/calico/cni:v3.29.2" already present on machine
Normal Created 17m kubelet Created container upgrade-ipam
Normal Started 17m kubelet Started container upgrade-ipam
Normal Pulled 17m kubelet Container image "quay.io/calico/cni:v3.29.2" already present on machine
Normal Created 17m kubelet Created container install-cni
Normal Started 17m kubelet Started container install-cni
Normal Pulled 17m kubelet Container image "quay.io/calico/node:v3.29.2" already present on machine
Normal Created 17m kubelet Created container mount-bpffs
Normal Started 17m kubelet Started container mount-bpffs
Normal Pulled 17m kubelet Container image "quay.io/calico/node:v3.29.2" already present on machine
Normal Created 17m kubelet Created container calico-node
Normal Started 17m kubelet Started container calico-node
Warning Unhealthy 17m (x2 over 17m) kubelet Readiness probe failed: calico/node is not ready: BIRD is not ready: Error querying BIRD: unable to connect to BIRDv4 socket: dial unix /var/run/calico/bird.ctl: connect: connection refused
0
root@cp-k8s:~# kubectl get pod calico-node-dcp9k -o yaml -n kube-system | grep -i image:
image: quay.io/calico/node:v3.29.2
image: quay.io/calico/cni:v3.29.2
image: quay.io/calico/cni:v3.29.2
image: quay.io/calico/node:v3.29.2
image: quay.io/calico/node:v3.29.2
image: quay.io/calico/cni:v3.29.2
image: quay.io/calico/cni:v3.29.2
image: quay.io/calico/node:v3.29.2