kubespray
Kubesprayλ Ansible κΈ°λ°μ Kubernetes λ°°ν¬ νλ μμν¬λ‘ λ¨μν μ€μΉ μ€ν¬λ¦½νΈκ° μλλΌ K8sλ₯Ό μ΄μ νκ²½μμ νμ€ μν€ν μ²λ₯Ό μ½λλ‘ κ΅¬νν νλ‘μ νΈλ€.
git clone -b v2.29.1 https://github.com/kubernetes-sigs/kubespray.git /root/kubespray
cd /root/kubespray
kubespray μμ€μ½λλ₯Ό ν΄λ‘ λ°μ ν vscodeμμ 컨νΈλ‘€ νλ μΈμ μ κ·Όνμ¬ μλ μΌλ―νμΌμ μμ νμ¬ μ€μ μ μ§ννλ€.
root@k8s-ctr:~/kubespray# kubectl get node -owide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k8s-ctr Ready control-plane 2m38s v1.33.3 192.168.10.10 <none> Rocky Linux 10.0 (Red Quartz) 6.12.0-55.39.1.el10_0.aarch64 containerd://2.1.5
root@k8s-ctr:~/kubespray# kubectl get pod -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-5d784884df-rxdf5 1/1 Running 0 2m7s
kube-system dns-autoscaler-676999957f-kw2vz 1/1 Running 0 2m7s
kube-system kube-apiserver-k8s-ctr 1/1 Running 1 2m40s
kube-system kube-controller-manager-k8s-ctr 1/1 Running 2 2m40s
kube-system kube-flannel-ds-arm64-h2rgj 1/1 Running 0 2m13s
kube-system kube-proxy-9kx57 1/1 Running 0 2m13s
kube-system kube-scheduler-k8s-ctr 1/1 Running 1 2m40s
kube-system metrics-server-7cd7f9897-d5qnw 1/1 Running 0 118s
node-feature-discovery node-feature-discovery-gc-6c9b8f4657-9zgqh 1/1 Running 0 112s
node-feature-discovery node-feature-discovery-master-6989794b78-ccd5t 1/1 Running 0 112s
node-feature-discovery node-feature-discovery-worker-xdv5s 1/1 Running 0 112s
requirements.txt
root@k8s-ctr:~/kubespray# pip3 install -r /root/kubespray/requirements.txt
Requirement already satisfied: ansible==10.7.0 in /usr/local/lib/python3.12/site-packages (from -r /root/kubespray/requirements.txt (line 1)) (10.7.0)
Requirement already satisfied: cryptography==46.0.2 in /usr/local/lib64/python3.12/site-packages (from -r /root/kubespray/requirements.txt (line 3)) (46.0.2)
Requirement already satisfied: jmespath==1.0.1 in /usr/local/lib/python3.12/site-packages (from -r /root/kubespray/requirements.txt (line 5)) (1.0.1)
Requirement already satisfied: netaddr==1.3.0 in /usr/local/lib/python3.12/site-packages (from -r /root/kubespray/requirements.txt (line 7)) (1.3.0)
Requirement already satisfied: ansible-core~=2.17.7 in /usr/local/lib/python3.12/site-packages (from ansible==10.7.0->-r /root/kubespray/requirements.txt (line 1)) (2.17.14)
Requirement already satisfied: cffi>=2.0.0 in /usr/local/lib64/python3.12/site-packages (from cryptography==46.0.2->-r /root/kubespray/requirements.txt (line 3)) (2.0.0)
Requirement already satisfied: jinja2>=3.0.0 in /usr/local/lib/python3.12/site-packages (from ansible-core~=2.17.7->ansible==10.7.0->-r /root/kubespray/requirements.txt (line 1)) (3.1.6)
Requirement already satisfied: PyYAML>=5.1 in /usr/lib64/python3.12/site-packages (from ansible-core~=2.17.7->ansible==10.7.0->-r /root/kubespray/requirements.txt (line 1)) (6.0.1)
Requirement already satisfied: packaging in /usr/lib/python3.12/site-packages (from ansible-core~=2.17.7->ansible==10.7.0->-r /root/kubespray/requirements.txt (line 1)) (24.2)
Requirement already satisfied: resolvelib<1.1.0,>=0.5.3 in /usr/local/lib/python3.12/site-packages (from ansible-core~=2.17.7->ansible==10.7.0->-r /root/kubespray/requirements.txt (line 1)) (1.0.1)
Requirement already satisfied: pycparser in /usr/local/lib/python3.12/site-packages (from cffi>=2.0.0->cryptography==46.0.2->-r /root/kubespray/requirements.txt (line 3)) (3.0)
Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib64/python3.12/site-packages (from jinja2>=3.0.0->ansible-core~=2.17.7->ansible==10.7.0->-r /root/kubespray/requirements.txt (line 1)) (3.0.3)
WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv
root@k8s-ctr:~/kubespray# ansible --version
ansible [core 2.17.14]
config file = /root/kubespray/ansible.cfg
configured module search path = ['/root/kubespray/library']
ansible python module location = /usr/local/lib/python3.12/site-packages/ansible
ansible collection location = /root/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/local/bin/ansible
python version = 3.12.9 (main, Aug 14 2025, 00:00:00) [GCC 14.2.1 20250110 (Red Hat 14.2.1-7)] (/usr/bin/python3)
jinja version = 3.1.6
libyaml = True
root@k8s-ctr:~/kubespray# pip list
Package Version
------------------------- -----------
ansible 10.7.0
ansible-core 2.17.14
attrs 23.2.0
cffi 2.0.0
charset-normalizer 3.3.2
cockpit 334.1
cryptography 46.0.2
dasbus 1.7
dbus-python 1.3.2
distro 1.9.0
dnf 4.20.0
file-magic 0.4.0
idna 3.7
Jinja2 3.1.6
jmespath 1.0.1
jsonschema 4.19.1
jsonschema-specifications 2023.11.2
libcomps 0.1.21
libdnf 0.73.1
MarkupSafe 3.0.3
netaddr 1.3.0
nftables 0.1
packaging 24.2
perf 0.1
pexpect 4.9.0
pip 23.3.2
ptyprocess 0.7.0
pycparser 3.0
PyGObject 3.46.0
pyinotify 0.9.6
python-dateutil 2.9.0.post0
python-linux-procfs 0.7.3
pyudev 0.24.1
PyYAML 6.0.1
pyynl 0.0.1
referencing 0.31.1
requests 2.32.4
resolvelib 1.0.1
rpds-py 0.17.1
rpm 4.19.1.1
selinux 3.8
sepolicy 3.8
setools 4.5.1
setroubleshoot 3.3.33
setuptools 69.0.3
six 1.16.0
sos 4.10.0
systemd-python 235
urllib3 1.26.19
μΈλ²€ν 리 νμΌ
root@k8s-ctr:~/kubespray# cp -rfp /root/kubespray/inventory/sample /root/kubespray/inventory/mycluster
root@k8s-ctr:~/kubespray# cat << EOF > /root/kubespray/inventory/mycluster/inventory.ini
k8s-ctr ansible_host=192.168.10.10 ip=192.168.10.10
[kube_control_plane]
k8s-ctr
[etcd:children]
kube_control_plane
[kube_node]
k8s-ctr
EOF
cat /root/kubespray/inventory/mycluster/inventory.ini
k8s-ctr ansible_host=192.168.10.10 ip=192.168.10.10
[kube_control_plane]
k8s-ctr
[etcd:children]
kube_control_plane
[kube_node]
k8s-ctr
μν μΈλ²€ν 리 νμΌμ 볡μ¬νμ¬ μΈλ²€ν 리λ₯Ό μμ±νλ€.
μ μ μ€μ
root@k8s-ctr:~/kubespray# grep "^[^#]" inventory/mycluster/group_vars/all/all.yml
---
bin_dir: /usr/local/bin
loadbalancer_apiserver_port: 6443
loadbalancer_apiserver_healthcheck_port: 8081
no_proxy_exclude_workers: false
kube_webhook_token_auth: false
kube_webhook_token_auth_url_skip_tls_verify: false
ntp_enabled: false
ntp_manage_config: false
ntp_servers:
- "0.pool.ntp.org iburst"
- "1.pool.ntp.org iburst"
- "2.pool.ntp.org iburst"
- "3.pool.ntp.org iburst"
unsafe_show_logs: false
allow_unsupported_distribution_setup: false
ν΄λ¬μ€ν° μ€μ
root@k8s-ctr:~/kubespray# grep "^[^#]" inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
---
kube_config_dir: /etc/kubernetes
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
kube_cert_dir: "{{ kube_config_dir }}/ssl"
kube_token_dir: "{{ kube_config_dir }}/tokens"
kube_api_anonymous_auth: true
local_release_dir: "/tmp/releases"
retry_stagger: 5
kube_owner: kube
kube_cert_group: kube-cert
kube_log_level: 2
credentials_dir: "{{ inventory_dir }}/credentials"
kube_network_plugin: flannel
kube_network_plugin_multus: false
kube_service_addresses: 10.233.0.0/18
kube_pods_subnet: 10.233.64.0/18
kube_network_node_prefix: 24
kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116
kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112
kube_network_node_prefix_ipv6: 120
kube_apiserver_ip: "{{ kube_service_subnets.split(',') | first | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(1) | ansible.utils.ipaddr('address') }}"
kube_apiserver_port: 6443 # (https)
kube_proxy_mode: iptables
kube_proxy_strict_arp: false
kube_proxy_nodeport_addresses: >-
{%- if kube_proxy_nodeport_addresses_cidr is defined -%}
[{{ kube_proxy_nodeport_addresses_cidr }}]
{%- else -%}
[]
{%- endif -%}
kube_encrypt_secret_data: false
cluster_name: cluster.local
ndots: 2
dns_mode: coredns
enable_nodelocaldns: false
enable_nodelocaldns_secondary: false
nodelocaldns_ip: 169.254.25.10
nodelocaldns_health_port: 9254
nodelocaldns_second_health_port: 9256
nodelocaldns_bind_metrics_host_ip: false
nodelocaldns_secondary_skew_seconds: 5
enable_coredns_k8s_external: false
coredns_k8s_external_zone: k8s_external.local
enable_coredns_k8s_endpoint_pod_names: false
resolvconf_mode: host_resolvconf
deploy_netchecker: false
skydns_server: "{{ kube_service_subnets.split(',') | first | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(3) | ansible.utils.ipaddr('address') }}"
skydns_server_secondary: "{{ kube_service_subnets.split(',') | first | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(4) | ansible.utils.ipaddr('address') }}"
dns_domain: "{{ cluster_name }}"
container_manager: containerd
kata_containers_enabled: false
kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}"
k8s_image_pull_policy: IfNotPresent
kubernetes_audit: false
default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir"
volume_cross_zone_attachment: false
persistent_volumes_enabled: false
event_ttl_duration: "1h0m0s"
auto_renew_certificates: true
auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:00:00"
kubeadm_patches_dir: "{{ kube_config_dir }}/patches"
kubeadm_patches: []
remove_anonymous_access: false
Flannel μΈν°νμ΄μ€ μ€μ
root@k8s-ctr:~/kubespray# sed -i 's|kube_network_plugin: calico|kube_network_plugin: flannel|g' inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
sed -i 's|kube_proxy_mode: ipvs|kube_proxy_mode: iptables|g' inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
sed -i 's|enable_nodelocaldns: true|enable_nodelocaldns: false|g' inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
sed -i 's|auto_renew_certificates: false|auto_renew_certificates: true|g' inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
sed -i 's|# auto_renew_certificates_systemd_calendar|auto_renew_certificates_systemd_calendar|g' inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
grep -iE 'kube_network_plugin:|kube_proxy_mode|enable_nodelocaldns:|^auto_renew_certificates' inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
kube_network_plugin: flannel
kube_proxy_mode: iptables
enable_nodelocaldns: false
auto_renew_certificates: true
auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:00:00"
root@k8s-ctr:~/kubespray# cat inventory/mycluster/group_vars/k8s_cluster/kube_control_plane.yml
# Reservation for control plane kubernetes components
# kube_memory_reserved: 512Mi
# kube_cpu_reserved: 200m
# kube_ephemeral_storage_reserved: 2Gi
# kube_pid_reserved: "1000"
# Reservation for control plane host system
# system_memory_reserved: 256Mi
# system_cpu_reserved: 250m
# system_ephemeral_storage_reserved: 2Gi
# system_pid_reserved: "1000"
Addson μ€μ
root@k8s-ctr:~/kubespray# grep "^[^#]" inventory/mycluster/group_vars/k8s_cluster/addons.yml
---
helm_enabled: true
registry_enabled: false
metrics_server_enabled: true
local_path_provisioner_enabled: false
local_volume_provisioner_enabled: false
gateway_api_enabled: false
ingress_nginx_enabled: false
ingress_publish_status_address: ""
ingress_alb_enabled: false
cert_manager_enabled: false
metallb_enabled: false
metallb_speaker_enabled: "{{ metallb_enabled }}"
metallb_namespace: "metallb-system"
argocd_enabled: false
kube_vip_enabled: false
node_feature_discovery_enabled: true
# ν
μ€νΈν κΈ°λ₯μ μμ
sed -i 's|helm_enabled: false|helm_enabled: true|g' inventory/mycluster/group_vars/k8s_cluster/addons.yml
sed -i 's|metrics_server_enabled: false|metrics_server_enabled: true|g' inventory/mycluster/group_vars/k8s_cluster/addons.yml
sed -i 's|node_feature_discovery_enabled: false|node_feature_discovery_enabled: true|g' inventory/mycluster/group_vars/k8s_cluster/addons.yml
grep -iE 'helm_enabled:|metrics_server_enabled:|node_feature_discovery_enabled:' inventory/mycluster/group_vars/k8s_cluster/addons.yml
etcd systemd unit
root@k8s-ctr:~/kubespray# grep "^[^#]" inventory/mycluster/group_vars/all/etcd.yml
---
etcd_data_dir: /var/lib/etcd
etcd_deployment_type: host
etcdλ₯Ό static podμ ꡬλνλ κ²μ΄ μλ systemd μλΉμ€λ‘ μ€ννλ€.
containerd
root@k8s-ctr:~/kubespray# cat inventory/mycluster/group_vars/all/containerd.yml
---
# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options
# containerd_storage_dir: "/var/lib/containerd"
# containerd_state_dir: "/run/containerd"
# containerd_oom_score: 0
# containerd_default_runtime: "runc"
# containerd_snapshotter: "native"
# containerd_runc_runtime:
# name: runc
# type: "io.containerd.runc.v2"
# engine: ""
# root: ""
# containerd_additional_runtimes:
# Example for Kata Containers as additional runtime:
# - name: kata
# type: "io.containerd.kata.v2"
# engine: ""
# root: ""
# containerd_grpc_max_recv_message_size: 16777216
# containerd_grpc_max_send_message_size: 16777216
# Containerd debug socket location: unix or tcp format
# containerd_debug_address: ""
# Containerd log level
# containerd_debug_level: "info"
# Containerd logs format, supported values: text, json
# containerd_debug_format: ""
# Containerd debug socket UID
# containerd_debug_uid: 0
# Containerd debug socket GID
# containerd_debug_gid: 0
# containerd_metrics_address: ""
# containerd_metrics_grpc_histogram: false
# Registries defined within containerd.
# containerd_registries_mirrors:
# - prefix: docker.io
# mirrors:
# - host: https://registry-1.docker.io
# capabilities: ["pull", "resolve"]
# skip_verify: false
# header:
# Authorization: "Basic XXX"
# containerd_max_container_log_line_size: 16384
# containerd_registry_auth:
# - registry: 10.0.0.2:5000
# username: user
# password: pass
μ€μλΈ λ°°ν¬
ansible-playbook -i inventory/mycluster/inventory.ini -v cluster.yml -e kube_version="1.33.3" --list-tasks
ANSIBLE_FORCE_COLOR=true ansible-playbook -i inventory/mycluster/inventory.ini -v cluster.yml -e kube_version="1.33.3" | tee kubespray_install.log
...
PLAY RECAP *********************************************************************
k8s-ctr : ok=571 changed=24 unreachable=0 failed=0 skipped=907 rescued=0 ignored=0
Saturday 31 January 2026 23:41:07 +0900 (0:00:00.012) 0:01:54.385 ******
===============================================================================
network_plugin/flannel : Flannel | Wait for flannel subnet.env file presence --- 5.15s
system_packages : Manage packages --------------------------------------- 4.73s
kubernetes-apps/node_feature_discovery : Node Feature Discovery | Create manifests --- 2.93s
kubernetes-apps/node_feature_discovery : Node Feature Discovery | Apply manifests --- 2.18s
kubernetes-apps/ansible : Kubernetes Apps | CoreDNS --------------------- 2.14s
etcdctl_etcdutl : Extract_file | Unpacking archive ---------------------- 1.82s
kubernetes-apps/metrics_server : Metrics Server | Create manifests ------ 1.77s
download : Download_file | Download item -------------------------------- 1.62s
kubernetes-apps/helm : Download_file | Download item -------------------- 1.61s
kubernetes-apps/helm : Extract_file | Unpacking archive ----------------- 1.56s
network_plugin/cni : CNI | Copy cni plugins ----------------------------- 1.54s
container-engine/crictl : Extract_file | Unpacking archive -------------- 1.47s
container-engine/runc : Download_file | Download item ------------------- 1.45s
container-engine/containerd : Download_file | Download item ------------- 1.43s
container-engine/crictl : Download_file | Download item ----------------- 1.42s
container-engine/containerd : Containerd | Unpack containerd archive ---- 1.42s
etcdctl_etcdutl : Download_file | Download item ------------------------- 1.42s
container-engine/nerdctl : Download_file | Download item ---------------- 1.41s
network_plugin/cni : CNI | Copy cni plugins ----------------------------- 1.40s
etcdctl_etcdutl : Copy etcd binary -------------------------------------- 1.35s
νλ μ΄λΆ λ°°ν¬κΉμ§ μ½ 5λΆ μ λκ° μμλλ€.
root@k8s-ctr:~/kubespray# kubectl get node -owide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k8s-ctr Ready control-plane 2d22h v1.33.3 192.168.10.10 <none> Rocky Linux 10.0 (Red Quartz) 6.12.0-55.39.1.el10_0.aarch64 containerd://2.1.5
roles/kubespray_defaults
λ°°ν¬ νκ²½ λΆμνκΈ°
μ€μΉ λ°μ΄λ리
root@k8s-ctr:~/kubespray# tree /usr/local/bin/
/usr/local/bin/
βββ ansible
βββ ansible-community
βββ ansible-config
βββ ansible-connection
βββ ansible-console
βββ ansible-doc
βββ ansible-galaxy
βββ ansible-inventory
βββ ansible-playbook
βββ ansible-pull
βββ ansible-test
βββ ansible-vault
βββ containerd
βββ containerd-shim-runc-v2
βββ containerd-stress
βββ crictl
βββ ctr
βββ etcd
βββ etcdctl
βββ etcdctl.sh
βββ etcd-scripts
β βββ make-ssl-etcd.sh
βββ etcdutl
βββ helm
βββ jp.py
βββ k8s-certs-renew.sh
βββ k9s
βββ kubeadm
βββ kubectl
βββ kubelet
βββ kubernetes-scripts
βββ nerdctl
βββ netaddr
βββ __pycache__
β βββ jp.cpython-312.pyc
βββ runc
μ€μΉλ λ²μ νμΈ
root@k8s-ctr:~/kubespray# helm version
version.BuildInfo{Version:"v3.18.4", GitCommit:"d80839cf37d860c8aa9a0503fe463278f26cd5e2", GitTreeState:"clean", GoVersion:"go1.24.4"}
root@k8s-ctr:~/kubespray# etcdctl version
etcdctl version: 3.5.25
API version: 3.5
root@k8s-ctr:~/kubespray# containerd --version
containerd github.com/containerd/containerd/v2 v2.1.5 fcd43222d6b07379a4be9786bda52438f0dd16a1
root@k8s-ctr:~/kubespray# kubeadm version -o yaml
clientVersion:
buildDate: "2025-07-15T18:05:14Z"
compiler: gc
gitCommit: 80779bd6ff08b451e1c165a338a7b69351e9b0b8
gitTreeState: clean
gitVersion: v1.33.3
goVersion: go1.24.4
major: "1"
minor: "33"
platform: linux/arm64
kube κ³μ
root@k8s-ctr:~/kubespray# cat /etc/passwd | grep kube
kube:x:990:988:Kubernetes user:/home/kube:/sbin/nologin
root@k8s-ctr:~/kubespray# find / -user kube 2>/dev/null
/etc/cni
/etc/cni/net.d
/etc/kubernetes
/etc/kubernetes/manifests
/usr/libexec/kubernetes
/usr/libexec/kubernetes/kubelet-plugins
/usr/libexec/kubernetes/kubelet-plugins/volume
/usr/libexec/kubernetes/kubelet-plugins/volume/exec
/usr/local/bin/kubernetes-scripts
/opt/cni
/opt/cni/bin
/opt/cni/bin/README.md
/opt/cni/bin/static
/opt/cni/bin/host-device
/opt/cni/bin/ipvlan
/opt/cni/bin/dhcp
/opt/cni/bin/LICENSE
/opt/cni/bin/portmap
/opt/cni/bin/tap
/opt/cni/bin/host-local
/opt/cni/bin/vlan
/opt/cni/bin/loopback
/opt/cni/bin/sbr
/opt/cni/bin/firewall
/opt/cni/bin/bandwidth
/opt/cni/bin/bridge
/opt/cni/bin/vrf
/opt/cni/bin/macvlan
/opt/cni/bin/tuning
/opt/cni/bin/dummy
/opt/cni/bin/ptp
/opt/cni/bin/flannel
Kubesprayλ Kubernetes κ΅¬μ± μμμ νμΌ μμ κΆκ³Ό μ€ν κΆνμ λΆλ¦¬νκΈ° μν΄ μ μ© μμ€ν μ μ μΈ kubeλ₯Ό μμ±νλ€.
μ΄ κ³μ μ λ‘κ·ΈμΈ λΆκ°(/sbin/nologin) μ΅μ μ΄ μ μ©λμ΄ μμΌλ©°, μΌλ° μ¬μ©μ λͺ©μ μ΄ μλ μλΉμ€ μ μ© κ³μ μ΄λ€.
λν kubelet, kubeadm, μΈμ¦μ, λ§€λνμ€νΈ νμΌμ μμ μ£Όμ²΄λ‘ λμνλ€.
λ§μ½ cilium cniλ‘ kubesprayλ₯Ό μ§νν κ²½μ° μ΄ λ kube_ownerλ₯Ό rootλ‘ λ³κ²½ν΄μΌνλ€.
μΈμ¦μ μλ κ°±μ λμ νμΈ
root@k8s-ctr:~/kubespray# systemctl status k8s-certs-renew.timer --no-pager
β k8s-certs-renew.timer - Timer to renew K8S control plane certificates
Loaded: loaded (/etc/systemd/system/k8s-certs-renew.timer; enabled; preset: disabled)
Active: active (waiting) since Thu 2026-01-29 02:44:13 KST; 3 days ago
Invocation: 831995db84f74a4b9de89d98e6b50e66
Trigger: Mon 2026-02-02 03:09:32 KST; 22h left
Triggers: β k8s-certs-renew.service
Jan 29 02:44:13 k8s-ctr systemd[1]: Started k8s-certs-renew.timer - Timer to renew K8S control plane certificates.
Hint: Some lines were ellipsized, use -l to show in full.
root@k8s-ctr:~/kubespray# cat /etc/systemd/system/k8s-certs-renew.timer
[Unit]
Description=Timer to renew K8S control plane certificates
[Timer]
OnCalendar=Mon *-*-1,2,3,4,5,6,7 03:00:00
RandomizedDelaySec=10min
FixedRandomDelay=yes
Persistent=yes
[Install]
WantedBy=multi-user.target
sed -i 's|auto_renew_certificates: false|auto_renew_certificates: true|g' k8s-cluster.yml
μ΅μ μ ν΅ν΄ μΈμ¦μ μλ κ°±μ μ νμ±ννμλ€.
λ°λΌμ ν΄λΉ μ€ν¬λ¦½νΈμ μμ€ν λ°λͺ¬ νμΌλ‘ κ±°μ¬λ¬ μ¬λΌκ°λ³΄λ©΄...
root@k8s-ctr:~/kubespray# cat /etc/systemd/system/k8s-certs-renew.service
[Unit]
Description=Renew K8S control plane certificates
[Service]
Type=oneshot
ExecStart=/usr/local/bin/k8s-certs-renew.sh
root@k8s-ctr:~/kubespray# cat /usr/local/bin/k8s-certs-renew.sh
#!/bin/bash
echo "## Check Expiration before renewal ##"
/usr/local/bin/kubeadm certs check-expiration
days_buffer=7 # set a time margin, because we should not renew at the last moment
calendar=Mon *-*-1,2,3,4,5,6,7 03:00:00
next_time=$(systemctl show k8s-certs-renew.timer -p NextElapseUSecRealtime --value)
if [ "${next_time}" == "" ]; then
echo "## Skip expiry comparison due to fail to parse next elapse from systemd calendar,do renewal directly ##"
else
current_time=$(date +%s)
target_time=$(date -d "${next_time} + ${days_buffer} days" +%s) # $next_time - $days_buffer days
expiry_threshold=$(( ${target_time} - ${current_time} ))
expired_certs=$(/usr/local/bin/kubeadm certs check-expiration -o jsonpath="{.certificates[?(@.residualTime<${expiry_threshold}.0)]}")
if [ "${expired_certs}" == "" ];then
echo "## Skip cert renew and K8S container restart, since all residualTimes are beyond threshold ##"
exit 0
fi
fi
echo "## Renewing certificates managed by kubeadm ##"
/usr/local/bin/kubeadm certs renew all
echo "## Restarting control plane pods managed by kubeadm ##"
/usr/local/bin/crictl pods --namespace kube-system --name 'kube-scheduler-*|kube-controller-manager-*|kube-apiserver-*|etcd-*' -q | /usr/bin/xargs /usr/local/bin/crictl rmp -f
echo "## Updating /root/.kube/config ##"
cp /etc/kubernetes/admin.conf /root/.kube/config
echo "## Waiting for apiserver to be up again ##"
until printf "" 2>>/dev/null >>/dev/tcp/127.0.0.1/6443; do sleep 1; done
echo "## Expiration after renewal ##"
/usr/local/bin/kubeadm certs check-expiration
Kubernetes control planeμ΄ static Podλ‘ κ΅¬μ±λ κ²μ κ΄μ°°
oot@k8s-ctr:~/kubespray# crictl pods --namespace kube-system --name 'kube-scheduler-*|kube-controller-manager-*|kube-apiserver-*|etcd-*' -q | xargs crictl rmp -f
Stopped sandbox 43f5e0ab6fa0ce006185fd7860322f4bfd7efc35f9c63e04af8413251a0188c1
Removed sandbox 43f5e0ab6fa0ce006185fd7860322f4bfd7efc35f9c63e04af8413251a0188c1
Stopped sandbox f2b4e38064731a0312538e72062e67966bfc06fe47cd64604512a8ae7eb17c4b
Removed sandbox f2b4e38064731a0312538e72062e67966bfc06fe47cd64604512a8ae7eb17c4b
Stopped sandbox 23a141bd60f188de2d3330f34832093ffcca67db3c24bef4c6f3a0e87e2b9388
Removed sandbox 23a141bd60f188de2d3330f34832093ffcca67db3c24bef4c6f3a0e87e2b9388

kube-system λ€μμ€νμ΄μ€μ μ‘΄μ¬νλ kube-apiserver, kube-controller-manager, kube-scheduler, etcd Pod sandboxλ₯Ό κ°μ λ‘ μμ ν΄λ³Έλ€.
control plane μ»΄ν¬λνΈλ Kubernetes APIλ‘ κ΄λ¦¬λμ§ μμΌλ©° /etc/kubernetes/manifestsμ μ μλ static Podλ₯Ό kubeletμ΄ μ§μ κ°μλλλ° μ»¨ν
μ΄λ λ°νμμμ κ°μ λ‘ μ κ±°νλλΌλ μ¦μ μ¬μμ±λλ€.
μ΄ κ΅¬μ‘° λλΆμ Kubernetesλ API Server μμ²΄κ° μ₯μ κ° λλλΌλ λ
Έλ λ 벨μμ control planeμ 볡ꡬν μ μλ€.
bootstrap_os
TASK [bootstrap_os : Fetch /etc/os-release] ************************************
TASK [bootstrap_os : Include tasks] ******************************************** ^[[0;36mincluded: /root/kubespray/roles/bootstrap_os/tasks/rocky.yml for k8s-ctr => (item=/root/kubespray/roles/bootstrap_os/tasks/rocky.yml)^[[0mSaturday 31 January 2026 23:39:14 +0900 (0:00:00.023) 0:00:01.775 ******
TASK [bootstrap_os : Gather host facts to get ansible_distribution_version ansible_distribution_major_version] ***^[[0;32mok: [k8s-ctr]^[[0mSaturday 31 January 2026 23:39:14 +0900 (0:00:00.428) 0:00:02.204 ****** TASK [bootstrap_os : Add proxy to yum.conf or dnf.conf if http_proxy is defined] ***^[[0;32mok: [k8s-ctr] => {"changed": false, "gid": 0, "group": "root", "mode": "0644", "msg": "OK@@@
TASK [bootstrap_os : Check presence of fastestmirror.conf] *********************
^[[0;32mok: [k8s-ctr] => {"changed": false, "stat": {"exists": false}}^[[0m
Saturday 31 January 2026 23:39:15 +0900 (0:00:00.113) 0:00:02.489 ******
Saturday 31 January 2026 23:39:15 +0900 (0:00:00.005) 0:00:02.494 ******
TASK [bootstrap_os : Create remote_tmp for it is used by another module] *******^[[0;32mok: [k8s-ctr] => {"changed": false, "gid": 0, "group": "root", "mode": "0700", "owner": "root", "path": "/root/.ansible/tmp", "secontext": "unconfined_u:object_r:admin_home_t:s0", "size": 42, "state": "directory", "uid": 0}^[[0m
Saturday 31 January 2026 23:39:20 +0900 (0:00:00.107) 0:00:07.679 ******
TASK [bootstrap_os : Gather facts] *********************************************
^[[0;32mok: [k8s-ctr]^[[0m
Saturday 31 January 2026 23:39:20 +0900 (0:00:00.145) 0:00:07.824 ******
TASK [bootstrap_os : Assign inventory name to unconfigured hostnames (non-CoreOS, non-Flatcar, Suse and ClearLinux, non-Fedora)] ***
^[[0;32mok: [k8s-ctr] => {"ansible_facts": {"ansible_domain": "", "ansible_fqdn": "k8s-ctr", "ans@@@
TASK [bootstrap_os : Ensure bash_completion.d folder exists] *******************
^[[0;32mok: [k8s-ctr] => {"changed": false, "gid": 0, "group": "root", "mode": "0755", "owner": "root", "path": "/etc/bash_completion.d/", "secontext": "system_u:object_r:etc_t:s0", "size": 86, "state": "directory", "uid": 0}^[[0m
λ Έλλ₯Ό 컨ν μ΄λ μ€νμ΄ κ°λ₯νλλ‘ OS μνλ₯Ό μ ν νλ€.
etcd
PLAY [Prepare for etcd install] ************************************************
Saturday 31 January 2026 23:39:22 +0900 (0:00:00.820) 0:00:09.879 ******
TASK [adduser : User | Create User Group] **************************************
^[[0;32mok: [k8s-ctr] => {"changed": false, "gid": 988, "name": "kube-cert", "state": "present", "system": true}^[[0m
Saturday 31 January 2026 23:39:22 +0900 (0:00:00.138) 0:00:10.018 ******
TASK [adduser : User | Create User] ********************************************
^[[0;32mok: [k8s-ctr] => {"append": false, "changed": false, "comment": "Kubernetes user", "group": 988, "home": "/home/kube", "move_home": false, "name": "kube", "shell": "/sbin/nologin", "state": "present", "uid": 990}^[[0m
Saturday 31 January 2026 23:39:22 +0900 (0:00:00.189) 0:00:10.208 ******
root@k8s-ctr:~/kubespray# cat /etc/passwd | tail -n 3
vboxadd:x:991:1::/var/run/vboxadd:/bin/false
kube:x:990:988:Kubernetes user:/home/kube:/sbin/nologin
etcd:x:989:987:Etcd user:/home/etcd:/sbin/nologin
root@k8s-ctr:~/kubespray# cat /etc/group | tail -n 3
vboxdrmipc:x:989:
kube-cert:x:988:
etcd:x:987:
root@k8s-ctr:~/kubespray# find / -user etcd 2>/dev/null
/etc/ssl/etcd
/etc/ssl/etcd/ssl
/etc/ssl/etcd/ssl/admin-k8s-ctr-key.pem
/etc/ssl/etcd/ssl/admin-k8s-ctr.pem
/etc/ssl/etcd/ssl/ca-key.pem
/etc/ssl/etcd/ssl/ca.pem
/etc/ssl/etcd/ssl/member-k8s-ctr-key.pem
/etc/ssl/etcd/ssl/member-k8s-ctr.pem
/etc/ssl/etcd/ssl/node-k8s-ctr-key.pem
/etc/ssl/etcd/ssl/node-k8s-ctr.pem
etcd CA λ° private keyλ etcd κ³μ μμ λ‘ κ΄λ¦¬λλ€.
sysctl
root@k8s-ctr:~/kubespray# grep "^[^#]" /etc/sysctl.conf
net.ipv4.ip_forward=1
kernel.keys.root_maxbytes=25000000
kernel.keys.root_maxkeys=1000000
kernel.panic=10
kernel.panic_on_oops=1
vm.overcommit_memory=1
vm.panic_on_oom=0
net.ipv4.ip_local_reserved_ports=30000-32767
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-arptables=1
net.bridge.bridge-nf-call-ip6tables=1
컀λ μΈν μ μ 리νλ©΄ μλμ κ°λ€.
| λ€νΈμν¬ | net.ipv4.ip_forward | 1 | IPv4 ν¨ν· ν¬μλ© νμ±ν | Pod ↔ Pod, Pod ↔ Service λΌμ°ν μ μ μ 쑰건 |
| λ€νΈμν¬ | net.bridge.bridge-nf-call-iptables | 1 | λΈλ¦¬μ§ νΈλν½μ iptablesλ‘ μ λ¬ | Service NAT, NetworkPolicy μ μ λμ |
| λ€νΈμν¬ | net.bridge.bridge-nf-call-arptables | 1 | ARP ν¨ν·μ arptablesλ‘ μ²λ¦¬ | λΈλ¦¬μ§ κΈ°λ° CNI ARP μ²λ¦¬ μμ ν |
| λ€νΈμν¬ | net.bridge.bridge-nf-call-ip6tables | 1 | IPv6 λΈλ¦¬μ§ νΈλν½μ ip6tablesλ‘ μ λ¬ | IPv6 Pod λ€νΈμν¬ λλΉ |
| λ€νΈμν¬ | net.ipv4.ip_local_reserved_ports | 30000-32767 | λ‘컬 ephemeral ν¬νΈ μμ½ | NodePort ν¬νΈ μΆ©λ λ°©μ§ |
| λ©λͺ¨λ¦¬ | vm.overcommit_memory | 1 | λ©λͺ¨λ¦¬ overcommit νμ© | etcd μμ μ± ν보 (OOM μλ°©) |
| λ©λͺ¨λ¦¬ | vm.panic_on_oom | 0 | OOM μ 컀λ panic λ°©μ§ | Pod λ¨μ μ₯μ λ₯Ό λ Έλ μ₯μ λ‘ νμ° λ°©μ§ |
| 컀λ | kernel.keys.root_maxkeys | 1000000 | 컀λ key κ°μ μ ν νμ₯ | ServiceAccount, TLS ν€ κ΄λ¦¬ μμ ν |
| 컀λ | kernel.keys.root_maxbytes | 25000000 | 컀λ key μ΄ μ©λ νμ₯ | μΈμ¦ μ 보 μ²λ¦¬ λλΉ |
| μ₯μ λμ | kernel.panic_on_oops | 1 | 컀λ oops λ°μ μ panic | λΆμμ μν μ§μ λ°©μ§ |
| μ₯μ λμ | kernel.panic | 10 | panic ν μλ μ¬λΆν μκ°(μ΄) | λ Έλ μλ 볡ꡬ μ λ΅ |
preinstall
root@k8s-ctr:~/kubespray# tree roles/kubernetes/preinstall/tasks/
roles/kubernetes/preinstall/tasks/
βββ 0010-swapoff.yml
βββ 0020-set_facts.yml
βββ 0040-verify-settings.yml
βββ 0050-create_directories.yml
βββ 0060-resolvconf.yml
βββ 0061-systemd-resolved.yml
βββ 0062-networkmanager-unmanaged-devices.yml
βββ 0063-networkmanager-dns.yml
βββ 0080-system-configurations.yml
βββ 0081-ntp-configurations.yml
βββ 0100-dhclient-hooks.yml
βββ 0110-dhclient-hooks-undo.yml
βββ main.yml
1 directory, 13 files
preinstall λ¨κ³λ Kubernetes κ΅¬μ± μμλ₯Ό μ€μΉνκΈ° μ μ μ€μΉν λ Έλλ₯Ό μΏ λ²λ€ν°μ€κ° λμ κ°λ₯ν μνλ‘ μ¬μ μ λΉνλ λ¨κ³μ΄λ€.
main.yaml
# ------------------------------------------------------------
# Swap λΉνμ±ν
# - kubeletμ swapμ΄ νμ±νλ λ
Έλμμ μ€νμ΄ λΆκ°νλ€.
# - kubelet_fail_swap_on μ΄ trueμΈ κ²½μ°μλ§ κ°μ μ μ©
# ------------------------------------------------------------
- name: Disable swap
import_tasks: 0010-swapoff.yml
when:
- not dns_late # DNS μ€μ μ μ§μ°νλ νΉμ μλ리μ€κ° μλ λ
- kubelet_fail_swap_on # kubeletμ΄ swapμ νμ©νμ§ μλ μ€μ μΌ λ
# ------------------------------------------------------------
# λ
Έλ νκ²½ μ 보 μμ§ (Ansible facts νμ₯)
# - OS μ’
λ₯, λ€νΈμν¬ νκ²½, systemd μ¬μ© μ¬λΆ λ±μ μμ§
# - μ΄ν λͺ¨λ when 쑰건μ κΈ°μ€ λ°μ΄ν°λ‘ μ¬μ©λλ€
# ------------------------------------------------------------
- name: Set facts
import_tasks: 0020-set_facts.yml
tags:
- resolvconf
- facts
# ------------------------------------------------------------
# μ¬μ 쑰건 κ²μ¦
# - 컀λ λͺ¨λ, sysctl, λ€νΈμν¬ μꡬ μ¬νμ κ²μ¦
# - λ¬Έμ κ° μμΌλ©΄ μ΄κΈ°μ fail νμ¬ λ€ λ¨κ³ μ§νμ μ°¨λ¨
# ------------------------------------------------------------
- name: Check settings
import_tasks: 0040-verify-settings.yml
when:
- not dns_late
tags:
- asserts
# ------------------------------------------------------------
# Kubernetes κ΄λ ¨ λλ ν°λ¦¬ μμ±
# - /etc/kubernetes, /etc/cni, /var/lib/kubelet λ±
# - μ΄ν μΈμ¦μ, manifest, μ€μ νμΌ μμ± μ€ν¨λ₯Ό λ°©μ§
# ------------------------------------------------------------
- name: Create directories
import_tasks: 0050-create_directories.yml
when:
- not dns_late
# ------------------------------------------------------------
# DNS μ€μ λΆκΈ° (resolvconf κΈ°λ° νκ²½)
# - systemd-resolved, NetworkManagerλ₯Ό μ¬μ©νμ§ μλ κ²½μ°
# - μ ν΅μ μΈ /etc/resolv.conf μ§μ κ΄λ¦¬ λ°©μ
# ------------------------------------------------------------
- name: Apply resolvconf settings
import_tasks: 0060-resolvconf.yml
when:
- dns_mode != 'none' # DNSλ₯Ό μ¬μ©νλ κ²½μ°
- resolvconf_mode == 'host_resolvconf' # νΈμ€νΈ resolv.conf μ¬μ©
- systemd_resolved_enabled.rc != 0 # systemd-resolved λ―Έμ¬μ©
- networkmanager_enabled.rc != 0 # NetworkManager λ―Έμ¬μ©
tags:
- bootstrap_os
- resolvconf
# ------------------------------------------------------------
# DNS μ€μ λΆκΈ° (systemd-resolved κΈ°λ° νκ²½)
# - Ubuntu κ³μ΄μμ νν DNS μΆ©λ λ¬Έμ λ₯Ό λ°©μ§
# ------------------------------------------------------------
- name: Apply systemd-resolved settings
import_tasks: 0061-systemd-resolved.yml
when:
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
- systemd_resolved_enabled.rc == 0 # systemd-resolved μ¬μ© μ€
tags:
- bootstrap_os
- resolvconf
# ------------------------------------------------------------
# NetworkManagerκ° CNI μΈν°νμ΄μ€λ₯Ό 건λλ¦¬μ§ λͺ»νλλ‘ μ°¨λ¨
# - flannel, calico λ±μ λ€νΈμν¬ μΈν°νμ΄μ€ 보νΈ
# ------------------------------------------------------------
- name: Apply networkmanager unmanaged devices settings
import_tasks: 0062-networkmanager-unmanaged-devices.yml
when:
- networkmanager_enabled.rc == 0
tags:
- bootstrap_os
# ------------------------------------------------------------
# NetworkManagerμ DNS κ΄λ¦¬ κΈ°λ₯ μ μ΄
# - kubelet / CoreDNS / Pod DNS κ²½λ‘ μμ ν
# ------------------------------------------------------------
- name: Apply networkmanager DNS settings
import_tasks: 0063-networkmanager-dns.yml
when:
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
- networkmanager_enabled.rc == 0
tags:
- bootstrap_os
- resolvconf
# ------------------------------------------------------------
# 컀λ λ° μμ€ν
μ€μ μ μ©
# - sysctl (ip_forward, bridge-nf-call-iptables λ±)
# - Kubernetes λ€νΈμν¬ λμμ μ μ 쑰건
# ------------------------------------------------------------
- name: Apply system configurations
import_tasks: 0080-system-configurations.yml
when:
- not dns_late
tags:
- bootstrap_os
# ------------------------------------------------------------
# NTP μ€μ
# - μΈμ¦μ μ ν¨μ±
# - etcd ν©μ μμ μ±
# - λ‘κ·Έ νμλΌμΈ μ ν©μ± ν보
# ------------------------------------------------------------
- name: Configure NTP
import_tasks: 0081-ntp-configurations.yml
when:
- not dns_late
- ntp_enabled
tags:
- bootstrap_os
# ------------------------------------------------------------
# DHCP κ°±μ μ DNS / IP λ³κ²½μΌλ‘ μΈν μ₯μ λ°©μ§
# - dhclient ν
μΆκ°
# ------------------------------------------------------------
- name: Configure dhclient
import_tasks: 0100-dhclient-hooks.yml
when:
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
- dhclientconffile is defined
- not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
tags:
- bootstrap_os
- resolvconf
# ------------------------------------------------------------
# νκ²½μ λ°λΌ DHCP ν
λ‘€λ°±
# - λͺ¨λ νκ²½μ λμΌν μ€μ μ κ°μ νμ§ μκΈ° μν μμ μ₯μΉ
# ------------------------------------------------------------
- name: Configure dhclient dhclient hooks
import_tasks: 0110-dhclient-hooks-undo.yml
when:
- dns_mode != 'none'
- resolvconf_mode != 'host_resolvconf'
- dhclientconffile is defined
- not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
tags:
- bootstrap_os
- resolvconf
# ------------------------------------------------------------
# λ€νΈμν¬/DNS λ³κ²½ μ¬νμ μ¦μ λ°μ
# - μ΄ν container runtime, kubelet μ€μΉ μ μ νκ²½μ κ³ μ
# ------------------------------------------------------------
- name: Flush handlers
meta: flush_handlers
# ------------------------------------------------------------
# Azure VM μ¬λΆ νμΈ
# - ν΄λΌμ°λ νκ²½ νΉμ μ²λ¦¬λ₯Ό μν fact μμ§
# ------------------------------------------------------------
- name: Check if we are running inside a Azure VM
stat:
path: /var/lib/waagent/
get_attributes: false
get_checksum: false
get_mime: false
register: azure_check
when:
- not dns_late
tags:
- bootstrap_os
# ------------------------------------------------------------
# Calico μ ν μ μ μ© μ¬μ κ²μ¬
# - 컀λ λͺ¨λ, iptables, λ€νΈμν¬ μꡬ μ¬ν κ²μ¦
# ------------------------------------------------------------
- name: Run calico checks
include_role:
name: network_plugin/calico
tasks_from: check
when:
- kube_network_plugin == 'calico'
- not ignore_assert_errors
create-directories.yaml
Kubernetes κ΅¬μ± μμ, μΈμ¦μ, λ€νΈμν¬, μ€ν 리μ§μ κΆν κ²½κ³λ₯Ό νμΌ μμ€ν λ 벨μμ μ μνλ€.
---
# ============================================================
# Kubernetesκ° μ¬μ©νκ² λ λλ ν°λ¦¬ ꡬ쑰λ₯Ό μ¬μ μ μμ±νλ€
# - μ€μΉ μ€κ°μ λλ ν°λ¦¬ μμ± μ€ν¨λ₯Ό λ°©μ§
# - μμ μ/κΆνμ 미리 κ³ μ νμ¬ λ³΄μ κ²½κ³λ₯Ό λͺ
νν νλ€
# ============================================================
# ------------------------------------------------------------
# Kubernetes κ΅¬μ± μμκ° μ§μ μ¬μ©νλ λλ ν°λ¦¬ μμ±
# - kube κ³μ μ΄ μμ
# - kubelet, control plane μ»΄ν¬λνΈκ° μ κ·Όν΄μΌ νλ κ²½λ‘
# ------------------------------------------------------------
- name: Create kubernetes directories
file:
path: "{{ item }}"
state: directory
owner: "{{ kube_owner }}" # μΌλ°μ μΌλ‘ 'kube' μ¬μ©μ
mode: "0755"
when: ('k8s_cluster' in group_names)
become: true
tags:
- kubelet
- kube-controller-manager
- kube-apiserver
- bootstrap_os
- apps
- network
- control-plane
- node
with_items:
- "{{ kube_config_dir }}" # /etc/kubernetes : μΈμ¦μ, kubeconfig, μ€μ
- "{{ kube_manifest_dir }}" # /etc/kubernetes/manifests : static Pod μ μ
- "{{ kube_script_dir }}" # /usr/local/bin/kubernetes-scripts : Kubespray ν¬νΌ μ€ν¬λ¦½νΈ
- "{{ kubelet_flexvolumes_plugins_dir }}" # kubelet legacy volume plugin κ²½λ‘
# ------------------------------------------------------------
# root κΆνμ΄ νμν λλ ν°λ¦¬ μμ±
# - 보μμ Kubernetes νλ‘μΈμ€κ° μμ νλ©΄ μ λλ μμ
# ------------------------------------------------------------
- name: Create other directories of root owner
file:
path: "{{ item }}"
state: directory
owner: root # rootλ§ μμ
mode: "0755"
when: ('k8s_cluster' in group_names)
become: true
tags:
- kubelet
- kube-controller-manager
- kube-apiserver
- bootstrap_os
- apps
- network
- control-plane
- node
with_items:
- "{{ kube_cert_dir }}" # /etc/kubernetes/ssl : λ―Όκ°ν μΈμ¦μ κ²½λ‘
- "{{ bin_dir }}" # /usr/local/bin : μμ€ν
λ°μ΄λ리 κ²½λ‘
# ------------------------------------------------------------
# kubeadm νΈνμ±μ μν μΈμ¦μ κ²½λ‘ νμΈ
# - kubeadmμ΄ νΉμ κ²½λ‘λ₯Ό νλμ½λ©ν΄ μ°Έμ‘°νλ λ¬Έμ λμ
# ------------------------------------------------------------
- name: Check if kubernetes kubeadm compat cert dir exists
stat:
path: "{{ kube_cert_compat_dir }}"
get_attributes: false
get_checksum: false
get_mime: false
register: kube_cert_compat_dir_check
when:
- ('k8s_cluster' in group_names)
- kube_cert_dir != kube_cert_compat_dir
# ------------------------------------------------------------
# kubeadm νΈν μΈμ¦μ λλ ν°λ¦¬ μμ± (μ¬λ³Όλ¦ λ§ν¬)
# - μ€μ μΈμ¦μλ νλλ§ μ μ§
# - kubeadm κΈ°λ κ²½λ‘λ₯Ό λ§ν¬λ‘ λ§μΆλ€
# ------------------------------------------------------------
- name: Create kubernetes kubeadm compat cert dir (kubernetes/kubeadm issue 1498)
file:
src: "{{ kube_cert_dir }}"
dest: "{{ kube_cert_compat_dir }}"
state: link
mode: "0755"
when:
- ('k8s_cluster' in group_names)
- kube_cert_dir != kube_cert_compat_dir
- not kube_cert_compat_dir_check.stat.exists
# ------------------------------------------------------------
# CNI νλ¬κ·ΈμΈμ μν νμ€ λλ ν°λ¦¬ μμ±
# - /etc/cni/net.d : CNI μ€μ νμΌ
# - /opt/cni/bin : CNI λ°μ΄λ리
# ------------------------------------------------------------
- name: Create cni directories
file:
path: "{{ item }}"
state: directory
owner: "{{ kube_owner }}"
mode: "0755"
with_items:
- "/etc/cni/net.d"
- "/opt/cni/bin"
when:
- kube_network_plugin in ["calico", "flannel", "cilium", "kube-ovn", "kube-router", "macvlan"]
- ('k8s_cluster' in group_names)
tags:
- network
- cilium
- calico
- kube-ovn
- kube-router
- bootstrap_os
# ------------------------------------------------------------
# Calico μ μ© λ°μ΄ν° λλ ν°λ¦¬ μμ±
# - IPAM, μν μ 보 μ μ₯ μ©λ
# ------------------------------------------------------------
- name: Create calico cni directories
file:
path: "{{ item }}"
state: directory
owner: "{{ kube_owner }}"
mode: "0755"
with_items:
- "/var/lib/calico"
when:
- kube_network_plugin == "calico"
- ('k8s_cluster' in group_names)
tags:
- network
- calico
- bootstrap_os
# ------------------------------------------------------------
# Local Volume Provisionerμ© νΈμ€νΈ λλ ν°λ¦¬ μμ±
# - Kubernetes μΈλΆ μ€ν 리μ§λ₯Ό PVλ‘ λ
ΈμΆνκΈ° μν κ²½λ‘
# ------------------------------------------------------------
- name: Create local volume provisioner directories
file:
path: "{{ local_volume_provisioner_storage_classes[item].host_dir }}"
state: directory
owner: root
group: root
mode: "{{ local_volume_provisioner_directory_mode }}"
with_items: "{{ local_volume_provisioner_storage_classes.keys() | list }}"
when:
- ('k8s_cluster' in group_names)
- local_volume_provisioner_enabled
tags:
- persistent_volumes
handlers/main.yaml
Kubesprayμ preinstall handlerλ OS λ 벨 DNS λ³κ²½μ΄ kubelet, static Pod, κ·Έλ¦¬κ³ control planeκΉμ§ μΌκ΄λκ² λ°μλλλ‘ νλ€.
---
# ============================================================
# Kubernetes preinstall λ¨κ³μ handlers
# - DNS / λ€νΈμν¬ / μκ° μ€μ λ³κ²½μ μ€μ μμ€ν
κ³Ό
# Kubernetes μ»΄ν¬λνΈμ "μ ν(propagate)"νλ μν
# - taskμμ notifyκ° λ°μνμ λλ§ μ€νλλ€
# ============================================================
# ------------------------------------------------------------
# Flatcar(Container Linux) κ³μ΄μμ resolvconf μ μ©
# - Flatcarλ μΌλ°μ μΈ /etc/resolv.conf κ΄λ¦¬ λ°©μμ΄ μλ
# - cloud-initμ ν΅ν΄ DNS μ€μ μ λ°μν΄μΌ νλ€
# ------------------------------------------------------------
- name: Preinstall | apply resolvconf cloud-init
command: /usr/bin/coreos-cloudinit --from-file {{ resolveconf_cloud_init_conf }}
when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
listen: Preinstall | update resolvconf for Flatcar Container Linux by Kinvolk
# ------------------------------------------------------------
# NetworkManager μ¬μμ
# - DNS μ€μ λ³κ²½ μ NMμ΄ μΊμ/μ€μ μ λ€μ μ½λλ‘ κ°μ
# ------------------------------------------------------------
- name: Preinstall | reload NetworkManager
service:
name: NetworkManager.service
state: restarted
listen: Preinstall | update resolvconf for networkmanager
# ------------------------------------------------------------
# kubelet μ¬μμ
# - kubeletμ /etc/resolv.conf λ₯Ό μμ μμ μ μ½λλ€
# - DNS λ³κ²½ μ¬νμ Pod/Static Podμ λ°μνλ €λ©΄ μ¬μμ νμ
# ------------------------------------------------------------
- name: Preinstall | reload kubelet
service:
name: kubelet
state: restarted
notify:
# kubelet μ¬μμ μ΄ν, control plane static pod μν νμΈ λ° μ¬κΈ°λ
- Preinstall | kube-controller configured
- Preinstall | kube-apiserver configured
- Preinstall | restart kube-controller-manager docker
- Preinstall | restart kube-controller-manager crio/containerd
- Preinstall | restart kube-apiserver docker
- Preinstall | restart kube-apiserver crio/containerd
when: not dns_early | bool
listen:
- Preinstall | propagate resolvconf to k8s components
- Preinstall | update resolvconf for Flatcar Container Linux by Kinvolk
- Preinstall | update resolvconf for networkmanager
# ------------------------------------------------------------
# kube-apiserver static pod μ‘΄μ¬ μ¬λΆ νμΈ
# - DNS λ³κ²½ ν μ¬κΈ°λ λμμΈμ§ νλ¨νκΈ° μν μ¬μ 체ν¬
# ------------------------------------------------------------
# FIXME: kubeadm λͺ¨λμμλ λ³λ μ²λ¦¬ νμ
- name: Preinstall | kube-apiserver configured
stat:
path: "{{ kube_manifest_dir }}/kube-apiserver.yaml"
get_attributes: false
get_checksum: false
get_mime: false
register: kube_apiserver_set
when:
- ('kube_control_plane' in group_names)
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
listen: Preinstall | propagate resolvconf to k8s components
# ------------------------------------------------------------
# kube-controller-manager static pod μ‘΄μ¬ μ¬λΆ νμΈ
# ------------------------------------------------------------
# FIXME: kubeadm λͺ¨λμμλ λ³λ μ²λ¦¬ νμ
- name: Preinstall | kube-controller configured
stat:
path: "{{ kube_manifest_dir }}/kube-controller-manager.yaml"
get_attributes: false
get_checksum: false
get_mime: false
register: kube_controller_set
when:
- ('kube_control_plane' in group_names)
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
listen: Preinstall | propagate resolvconf to k8s components
# ------------------------------------------------------------
# docker λ°νμ νκ²½μμ kube-controller-manager μ¬κΈ°λ
# - static podλ₯Ό μ§μ μμ νμ¬ kubeletμ΄ μ¬μμ±νλλ‘ μ λ
# ------------------------------------------------------------
- name: Preinstall | restart kube-controller-manager docker
shell: >
set -o pipefail &&
{{ docker_bin_dir }}/docker ps -f name=k8s_POD_kube-controller-manager* -q |
xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f
args:
executable: /bin/bash
when:
- container_manager == "docker"
- ('kube_control_plane' in group_names)
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
- kube_controller_set.stat.exists
listen: Preinstall | propagate resolvconf to k8s components
# ------------------------------------------------------------
# crio/containerd νκ²½μμ kube-controller-manager μ¬κΈ°λ
# - crictlλ‘ static pod sandboxλ₯Ό μ κ±°
# ------------------------------------------------------------
- name: Preinstall | restart kube-controller-manager crio/containerd
shell: >
set -o pipefail &&
{{ bin_dir }}/crictl pods --name kube-controller-manager* -q |
xargs -I% --no-run-if-empty bash -c
'{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'
args:
executable: /bin/bash
register: preinstall_restart_controller_manager
retries: 10
delay: 1
until: preinstall_restart_controller_manager.rc == 0
when:
- container_manager in ['crio', 'containerd']
- ('kube_control_plane' in group_names)
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
- kube_controller_set.stat.exists
listen: Preinstall | propagate resolvconf to k8s components
# ------------------------------------------------------------
# docker λ°νμ νκ²½μμ kube-apiserver μ¬κΈ°λ
# ------------------------------------------------------------
- name: Preinstall | restart kube-apiserver docker
shell: >
set -o pipefail &&
{{ docker_bin_dir }}/docker ps -f name=k8s_POD_kube-apiserver* -q |
xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f
args:
executable: /bin/bash
when:
- container_manager == "docker"
- ('kube_control_plane' in group_names)
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
- kube_apiserver_set.stat.exists
listen: Preinstall | propagate resolvconf to k8s components
# ------------------------------------------------------------
# crio/containerd νκ²½μμ kube-apiserver μ¬κΈ°λ
# ------------------------------------------------------------
- name: Preinstall | restart kube-apiserver crio/containerd
shell: >
set -o pipefail &&
{{ bin_dir }}/crictl pods --name kube-apiserver* -q |
xargs -I% --no-run-if-empty bash -c
'{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'
args:
executable: /bin/bash
register: preinstall_restart_apiserver
retries: 10
until: preinstall_restart_apiserver.rc == 0
delay: 1
when:
- container_manager in ['crio', 'containerd']
- ('kube_control_plane' in group_names)
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
- kube_apiserver_set.stat.exists
listen: Preinstall | propagate resolvconf to k8s components
# ------------------------------------------------------------
# dns_late μλ리μ€μμ kube-apiserver ν¬μ€ μ²΄ν¬ λκΈ°
# - DNS μ€μ μ λ§μ§λ§μ μ μ©νλ νκ²½μ©
# ------------------------------------------------------------
- name: Preinstall | wait for the apiserver to be running
uri:
url: "{{ kube_apiserver_endpoint }}/healthz"
validate_certs: false
register: result
until: result.status == 200
retries: 60
delay: 1
when:
- dns_late
- ('kube_control_plane' in group_names)
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
- not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
- not is_fedora_coreos
listen: Preinstall | propagate resolvconf to k8s components
# ------------------------------------------------------------
# systemd-resolved μ¬μμ
# - DNS μΊμ λ° stub resolver κ°±μ
# ------------------------------------------------------------
- name: Preinstall | Restart systemd-resolved
service:
name: systemd-resolved
state: restarted
# ------------------------------------------------------------
# NTP μλΉμ€ μ¬μμ
# - μκ° λκΈ°ν μ€μ λ³κ²½ λ°μ
# ------------------------------------------------------------
- name: Preinstall | restart ntp
service:
name: "{{ ntp_service_name }}"
state: restarted
when: ntp_enabled
Container-engine
컨ν μ΄λ λ°νμ μ€μ μ μλ΄μ©μ μ΄ν΄λ³Έλ€.
root@k8s-ctr:~/kubespray# tree roles/container-engine/
roles/container-engine/
βββ containerd
β βββ defaults
β β βββ main.yml
β βββ handlers
β β βββ main.yml
β β βββ reset.yml
β βββ meta
β β βββ main.yml
β βββ molecule
β β βββ default
β β βββ converge.yml
β β βββ molecule.yml
β β βββ verify.yml
β βββ tasks
β β βββ main.yml
β β βββ reset.yml
β βββ templates
β βββ config.toml.j2
β βββ config-v1.toml.j2
β βββ containerd.service.j2
β βββ hosts.toml.j2
β βββ http-proxy.conf.j2
βββ containerd-common
β βββ defaults
β β βββ main.yml
β βββ meta
β β βββ main.yml
β βββ tasks
β β βββ main.yml
β βββ vars
β βββ amazon.yml
β βββ suse.yml
βββ crictl
β βββ handlers
β β βββ main.yml
β βββ tasks
β β βββ crictl.yml
β β βββ main.yml
β βββ templates
β βββ crictl.yaml.j2
βββ cri-dockerd
β βββ defaults
β β βββ main.yml
β βββ handlers
β β βββ main.yml
β βββ meta
β β βββ main.yml
β βββ molecule
β β βββ default
β β βββ converge.yml
β β βββ files
β β β βββ 10-mynet.conf
β β β βββ container.json
β β β βββ sandbox.json
β β βββ molecule.yml
β β βββ verify.yml
β βββ tasks
β β βββ main.yml
β βββ templates
β βββ cri-dockerd.service.j2
β βββ cri-dockerd.socket.j2
βββ cri-o
β βββ defaults
β β βββ main.yml
β βββ handlers
β β βββ main.yml
β βββ meta
β β βββ main.yml
β βββ molecule
β β βββ default
β β βββ converge.yml
β β βββ molecule.yml
β β βββ verify.yml
β βββ tasks
β β βββ load_vars.yml
β β βββ main.yaml
β β βββ reset.yml
β β βββ setup-amazon.yaml
β βββ templates
β β βββ config.json.j2
β β βββ crio.conf.j2
β β βββ http-proxy.conf.j2
β β βββ mounts.conf.j2
β β βββ registry.conf.j2
β β βββ unqualified.conf.j2
β βββ vars
β βββ v1.29.yml
β βββ v1.31.yml
βββ crun
β βββ tasks
β βββ main.yml
βββ docker
β βββ defaults
β β βββ main.yml
β βββ files
β β βββ cleanup-docker-orphans.sh
β βββ handlers
β β βββ main.yml
β βββ meta
β β βββ main.yml
β βββ tasks
β β βββ docker_plugin.yml
β β βββ main.yml
β β βββ pre-upgrade.yml
β β βββ reset.yml
β β βββ set_facts_dns.yml
β β βββ systemd.yml
β βββ templates
β β βββ docker-dns.conf.j2
β β βββ docker-options.conf.j2
β β βββ docker-orphan-cleanup.conf.j2
β β βββ docker.service.j2
β β βββ fedora_docker.repo.j2
β β βββ http-proxy.conf.j2
β β βββ rh_docker.repo.j2
β βββ vars
β βββ amazon.yml
β βββ clearlinux.yml
β βββ debian.yml
β βββ fedora.yml
β βββ kylin.yml
β βββ openeuler.yml -> kylin.yml
β βββ redhat.yml
β βββ suse.yml
β βββ ubuntu.yml
β βββ uniontech.yml
βββ gvisor
β βββ molecule
β β βββ default
β β βββ converge.yml
β β βββ files
β β β βββ 10-mynet.conf
β β β βββ container.json
β β β βββ sandbox.json
β β βββ molecule.yml
β β βββ verify.yml
β βββ tasks
β βββ main.yml
βββ kata-containers
β βββ defaults
β β βββ main.yml
β βββ molecule
β β βββ default
β β βββ converge.yml
β β βββ files
β β β βββ 10-mynet.conf
β β β βββ container.json
β β β βββ sandbox.json
β β βββ molecule.yml
β β βββ verify.yml
β βββ tasks
β β βββ main.yml
β βββ templates
β βββ configuration-qemu.toml.j2
β βββ containerd-shim-kata-v2.j2
βββ meta
β βββ main.yml
βββ molecule
β βββ files
β β βββ 10-mynet.conf
β βββ prepare.yml
β βββ templates
β β βββ container.json.j2
β β βββ sandbox.json.j2
β βββ test_cri.yml
β βββ test_runtime.yml
βββ nerdctl
β βββ handlers
β β βββ main.yml
β βββ tasks
β β βββ main.yml
β βββ templates
β βββ nerdctl.toml.j2
βββ runc
β βββ defaults
β β βββ main.yml
β βββ tasks
β βββ main.yml
βββ skopeo
β βββ tasks
β βββ main.yml
βββ validate-container-engine
β βββ tasks
β βββ main.yml
βββ youki
βββ defaults
β βββ main.yml
βββ molecule
β βββ default
β βββ converge.yml
β βββ files
β β βββ 10-mynet.conf
β β βββ container.json
β β βββ sandbox.json
β βββ molecule.yml
β βββ verify.yml
βββ tasks
βββ main.yml
79 directories, 120 files
μ¬μ©ν μ μλ λ€μν μ¬μ μνμ΄ μλ€. μ°λ¦¬κ° μ€μ΅μ μ μ©ν containerd, runc μͺ½μ μ΄ν΄λ³΄λλ‘νλ€.
main.yaml
---
# ============================================================
# Container Runtime κ²μ¦ λ° μ 리 λ¨κ³
# - λ
Έλμ μ΄λ€ 컨ν
μ΄λ λ°νμμ΄ μ€μΉ/μ€ν μ€μΈμ§ νμΈνλ€
# - inventoryμμ μ νν container_manager μΈμ λ°νμμ μ κ±°νλ€
# - kubelet + λ°νμ κ° λΆμΌμΉλ‘ μΈν μ₯μ λ₯Ό μ¬μ μ μ°¨λ¨νλ€
# ============================================================
# ------------------------------------------------------------
# Fedora CoreOS / OSTree κΈ°λ° OS μ¬λΆ νμΈ
# - OSTree κΈ°λ° OSλ ν¨ν€μ§ κ΄λ¦¬ λ°©μμ΄ λ€λ₯΄λ―λ‘
# λ°νμ μ κ±°/μ€μΉ λ‘μ§μ λ€λ₯΄κ² κ°μ Έκ°μΌ νλ€
# ------------------------------------------------------------
- name: Validate-container-engine | check if fedora coreos
stat:
path: /run/ostree-booted
get_attributes: false
get_checksum: false
get_mime: false
register: ostree
tags:
- facts
# ------------------------------------------------------------
# OSTree κΈ°λ° OS μ¬λΆλ₯Ό factλ‘ μ μ₯
# ------------------------------------------------------------
- name: Validate-container-engine | set is_ostree
set_fact:
is_ostree: "{{ ostree.stat.exists }}"
tags:
- facts
# ------------------------------------------------------------
# kubelet systemd unit μ‘΄μ¬ μ¬λΆ νμΈ
# - λ°νμ μ κ±° μ μ kubeletμ μ€μ§ν΄μΌ νλμ§ νλ¨
# ------------------------------------------------------------
- name: Ensure kubelet systemd unit exists
stat:
path: "/etc/systemd/system/kubelet.service"
register: kubelet_systemd_unit_exists
tags:
- facts
# ------------------------------------------------------------
# systemd μλΉμ€ μν μμ§
# - containerd / docker / crio μ€ν μ¬λΆ νλ¨μ μ¬μ©
# ------------------------------------------------------------
- name: Populate service facts
service_facts:
tags:
- facts
# ------------------------------------------------------------
# containerd μ€μΉ μ¬λΆ νμΈ
# - systemd unit νμΌ μ‘΄μ¬ μ¬λΆ κΈ°μ€
# ------------------------------------------------------------
- name: Check if containerd is installed
find:
file_type: file
recurse: true
use_regex: true
patterns:
- containerd.service$
paths:
- /lib/systemd
- /etc/systemd
- /run/systemd
register: containerd_installed
tags:
- facts
# ------------------------------------------------------------
# docker μ€μΉ μ¬λΆ νμΈ
# ------------------------------------------------------------
- name: Check if docker is installed
find:
file_type: file
recurse: true
use_regex: true
patterns:
- docker.service$
paths:
- /lib/systemd
- /etc/systemd
- /run/systemd
register: docker_installed
tags:
- facts
# ------------------------------------------------------------
# CRI-O μ€μΉ μ¬λΆ νμΈ
# ------------------------------------------------------------
- name: Check if crio is installed
find:
file_type: file
recurse: true
use_regex: true
patterns:
- crio.service$
paths:
- /lib/systemd
- /etc/systemd
- /run/systemd
register: crio_installed
tags:
- facts
# ------------------------------------------------------------
# containerd μ κ±° λ‘μ§
# - inventoryμμ containerdλ₯Ό μ ννμ§ μμκ³
# - dockerλ μ¬μ© μ€μ΄ μλλ©°
# - containerdκ° μ€μ λ‘ μ€ν μ€μΌ λλ§ μ κ±°
# ------------------------------------------------------------
- name: Uninstall containerd
vars:
service_name: containerd.service
when:
# OSTree/Flatcar κ³μ΄μ ν¨ν€μ§ μ κ±° λ°©μμ΄ λ€λ₯΄λ―λ‘ μ μΈ
- not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
# μ νλ λ°νμμ΄ containerdκ° μλ λ
- container_manager != "containerd"
# dockerκ° μ€μΉλμ΄ μμ§ μμ λ
- docker_installed.matched == 0
# containerdκ° μ€μΉλμ΄ μκ³
- containerd_installed.matched > 0
# μ€μ λ‘ μ€ν μ€μΌ λλ§ μ κ±°
- ansible_facts.services[service_name]['state'] == 'running'
block:
# --------------------------------------------------------
# λ
Έλ Drain
# - λ°νμ μ κ±° μ Podλ₯Ό μμ νκ² λ€λ₯Έ λ
Έλλ‘ μ΄λ
# --------------------------------------------------------
- name: Drain node
include_role:
name: remove_node/pre_remove
apply:
tags:
- pre-remove
when: kubelet_systemd_unit_exists.stat.exists
# --------------------------------------------------------
# kubelet μ€μ§
# - λ°νμ μ κ±° μ€ kubelet μ¬μμ/μ€μλ λ°©μ§
# --------------------------------------------------------
- name: Stop kubelet
service:
name: kubelet
state: stopped
when: kubelet_systemd_unit_exists.stat.exists
# --------------------------------------------------------
# containerd μ κ±°
# --------------------------------------------------------
- name: Remove Containerd
import_role:
name: container-engine/containerd
tasks_from: reset
handlers_from: reset
# ------------------------------------------------------------
# docker μ κ±° λ‘μ§
# ------------------------------------------------------------
- name: Uninstall docker
vars:
service_name: docker.service
when:
- not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
- container_manager != "docker"
- docker_installed.matched > 0
- ansible_facts.services[service_name]['state'] == 'running'
block:
- name: Drain node
include_role:
name: remove_node/pre_remove
apply:
tags:
- pre-remove
when: kubelet_systemd_unit_exists.stat.exists
- name: Stop kubelet
service:
name: kubelet
state: stopped
when: kubelet_systemd_unit_exists.stat.exists
- name: Remove Docker
import_role:
name: container-engine/docker
tasks_from: reset
# ------------------------------------------------------------
# CRI-O μ κ±° λ‘μ§
# ------------------------------------------------------------
- name: Uninstall crio
vars:
service_name: crio.service
when:
- not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
- container_manager != "crio"
- crio_installed.matched > 0
- ansible_facts.services[service_name]['state'] == 'running'
block:
- name: Drain node
include_role:
name: remove_node/pre_remove
apply:
tags:
- pre-remove
when: kubelet_systemd_unit_exists.stat.exists
- name: Stop kubelet
service:
name: kubelet
state: stopped
when: kubelet_systemd_unit_exists.stat.exists
- name: Remove CRI-O
import_role:
name: container-engine/cri-o
tasks_from: reset
# ------------------------------------------------------------
# Fedora CoreOS / OSTree κΈ°λ° OS μ¬λΆ νμΈ
# - OSTree κΈ°λ° OSλ ν¨ν€μ§ κ΄λ¦¬ λ°©μμ΄ λ€λ₯΄λ―λ‘
# runc μ κ±°/μ€μΉ λ‘μ§μ λΆκΈ°νκΈ° μν¨
# ------------------------------------------------------------
- name: Runc | check if fedora coreos
stat:
path: /run/ostree-booted
get_attributes: false
get_checksum: false
get_mime: false
register: ostree
# ------------------------------------------------------------
# OSTree μ¬λΆλ₯Ό factλ‘ μ μ₯
# ------------------------------------------------------------
- name: Runc | set is_ostree
set_fact:
is_ostree: "{{ ostree.stat.exists }}"
# ------------------------------------------------------------
# OS ν¨ν€μ§ λ§€λμ λ‘ μ€μΉλ runc μ κ±°
# - distro κΈ°λ³Έ runcλ:
# * λ²μ μ΄ μ€λλμμ μ μκ³
# * containerd/CRI-O μꡬ λ²μ κ³Ό λΆμΌμΉν μ μλ€
# - Kubesprayλ μ΄ runcλ₯Ό μ λ’°νμ§ μλλ€
# ------------------------------------------------------------
- name: Runc | Uninstall runc package managed by package manager
package:
name: "{{ runc_package_name }}"
state: absent
when:
# OSTree / Flatcar κ³μ΄μ ν¨ν€μ§ μ κ±° λ°©μμ΄ λ€λ₯΄λ―λ‘ μ μΈ
- not (is_ostree or
(ansible_distribution == "Flatcar Container Linux by Kinvolk") or
(ansible_distribution == "Flatcar"))
# ------------------------------------------------------------
# Kubesprayκ° μ μν runc λ°μ΄λ리 λ€μ΄λ‘λ
# - checksums.yml μ μ μλ μ νν λ²μ
# - containerd / Kubernetesμ νΈνμ± κ²μ¦λ runc
# ------------------------------------------------------------
- name: Runc | Download runc binary
include_tasks: "../../../download/tasks/download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.runc) }}"
# ------------------------------------------------------------
# λ€μ΄λ‘λν runc λ°μ΄λ리λ₯Ό μ§μ λ κ²½λ‘λ‘ λ°°μΉ
# - λ³΄ν΅ /usr/local/bin/runc
# - μ€ν κΆν(0755) λΆμ¬
# ------------------------------------------------------------
- name: Copy runc binary from download dir
copy:
src: "{{ downloads.runc.dest }}"
dest: "{{ runc_bin_dir }}/runc"
mode: "0755"
remote_src: true
# ------------------------------------------------------------
# OS κΈ°λ³Έ κ²½λ‘(/usr/bin)μ λ¨μ μμ μ μλ
# μ€λλ runc λ°μ΄λ리 μ κ±°
# - container runtimeμ΄ μλͺ»λ runcλ₯Ό νΈμΆνλ κ²μ λ°©μ§
# ------------------------------------------------------------
- name: Runc | Remove orphaned binary
file:
path: /usr/bin/runc
state: absent
when: runc_bin_dir != "/usr/bin"
ignore_errors: true # λ€λ₯Έ ν¨ν€μ§κ° μ΄λ―Έ μ§μ μ μλ μμ
containerd
root@k8s-ctr:~/kubespray# tree roles/container-engine/containerd/
roles/container-engine/containerd/
βββ defaults
β βββ main.yml
βββ handlers
β βββ main.yml
β βββ reset.yml
βββ meta
β βββ main.yml
βββ molecule
β βββ default
β βββ converge.yml
β βββ molecule.yml
β βββ verify.yml
βββ tasks
β βββ main.yml
β βββ reset.yml
βββ templates
βββ config.toml.j2
βββ config-v1.toml.j2
βββ containerd.service.j2
βββ hosts.toml.j2
βββ http-proxy.conf.j2
8 directories, 14 files
# ------------------------------------------------------------
# containerd λ°μ΄λ리 λ€μ΄λ‘λ
# - checksums.yml μ μ μλ "κ²μ¦λ λ²μ "λ§ μ¬μ©
# - distro repo λ²μ ? μ λ―Ώμ
# ------------------------------------------------------------
- name: Containerd | Download containerd
include_tasks: "../../../download/tasks/download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.containerd) }}"
# ------------------------------------------------------------
# containerd μμΉ΄μ΄λΈ μμΆ ν΄μ
# - containerd_bin_dir (λ³΄ν΅ /usr/local/bin)
# - strip-components=1 → bin λ°λ‘ μλλ‘ νμ΄λ²λ¦Ό
# ------------------------------------------------------------
- name: Containerd | Unpack containerd archive
unarchive:
src: "{{ downloads.containerd.dest }}"
dest: "{{ containerd_bin_dir }}"
mode: "0755"
remote_src: true
extra_opts:
- --strip-components=1
notify: Restart containerd # λ°μ΄λ리 λ°λμμΌλ μ¬μμ μμ½
# ------------------------------------------------------------
# containerd systemd unit μμ±
# - OS κΈ°λ³Έ unit μ μ
# - Kubespray ν
νλ¦ΏμΌλ‘ μ§μ μμ±
# ------------------------------------------------------------
- name: Containerd | Generate systemd service for containerd
template:
src: containerd.service.j2
dest: /etc/systemd/system/containerd.service
mode: "0644"
validate: >
sh -c '[ -f /usr/bin/systemd/system/factory-reset.target ] || exit 0 &&
systemd-analyze verify %s:containerd.service'
notify: Restart containerd
# μ¬κΈ°μ λκ»΄μ§λ Kubesprayμ μ§μ°© ν¬μΈνΈ:
# - systemd λ²μ κΉμ§ κ³ λ €
# - unit κΉ¨μ§ κ°λ₯μ± μ¬μ μ μ°¨λ¨
# ------------------------------------------------------------
# containerd μ€μ λ° systemd drop-in λλ ν°λ¦¬ μμ±
# ------------------------------------------------------------
- name: Containerd | Ensure containerd directories exist
file:
dest: "{{ item }}"
state: directory
mode: "0755"
owner: root
group: root
with_items:
- "{{ containerd_systemd_dir }}" # /etc/systemd/system/containerd.service.d
- "{{ containerd_cfg_dir }}" # /etc/containerd
# ------------------------------------------------------------
# HTTP/HTTPS proxy μ€μ
# - systemd drop-in λ°©μ
# - config.toml μμ μ€μ μ λλ €λ£μ
# ------------------------------------------------------------
- name: Containerd | Write containerd proxy drop-in
template:
src: http-proxy.conf.j2
dest: "{{ containerd_systemd_dir }}/http-proxy.conf"
mode: "0644"
notify: Restart containerd
when: http_proxy is defined or https_proxy is defined
# ------------------------------------------------------------
# containerd κΈ°λ³Έ OCI runtime spec μμ±
# - ctr oci spec λͺ
λ ΉμΌλ‘ νμ¬ νκ²½ κΈ°μ€μμ μ€ν μμ±
# - μ΄κ±Έ μ μ°λ©΄ runtime μ΅μ
κΌ¬μ
# ------------------------------------------------------------
- name: Containerd | Generate default base_runtime_spec
register: ctr_oci_spec
command: "{{ containerd_bin_dir }}/ctr oci spec"
check_mode: false
changed_when: false
# ------------------------------------------------------------
# μμ±λ OCI runtime specμ factλ‘ μ μ₯
# - μ΄ν custom runtime specμ κΈ°μ€κ°μΌλ‘ μ¬μ©
# ------------------------------------------------------------
- name: Containerd | Store generated default base_runtime_spec
set_fact:
containerd_default_base_runtime_spec: "{{ ctr_oci_spec.stdout | from_json }}"
# ------------------------------------------------------------
# runtime spec νμΌ μμ±
# - runc, kata, custom runtime λ± νμ₯ λλΉ
# ------------------------------------------------------------
- name: Containerd | Write base_runtime_specs
copy:
content: "{{ item.value }}"
dest: "{{ containerd_cfg_dir }}/{{ item.key }}"
owner: "root"
mode: "0644"
with_dict: "{{ containerd_base_runtime_specs | default({}) }}"
notify: Restart containerd
# ------------------------------------------------------------
# containerd λ©μΈ μ€μ νμΌ(config.toml) μμ±
# - containerd 2.x / 1.x λΆκΈ° μ²λ¦¬
# - μ¬κΈ°μ CRI, runc κ²½λ‘, cgroup, registry λ€ κ²°μ
# ------------------------------------------------------------
- name: Containerd | Copy containerd config file
template:
src: "{{ 'config.toml.j2'
if containerd_version is version('2.0.0', '>=')
else 'config-v1.toml.j2' }}"
dest: "{{ containerd_cfg_dir }}/config.toml"
owner: "root"
mode: "0640"
notify: Restart containerd
# ------------------------------------------------------------
# containerd registry mirror μ€μ
# - μ¬μ€ λ μ§μ€νΈλ¦¬, νλ‘μ νκ²½ λμ
# ------------------------------------------------------------
- name: Containerd | Configure containerd registries
no_log: "{{ not (unsafe_show_logs | bool) }}"
block:
- name: Containerd | Create registry directories
file:
path: "{{ containerd_cfg_dir }}/certs.d/{{ item.prefix }}"
state: directory
mode: "0755"
loop: "{{ containerd_registries_mirrors }}"
- name: Containerd | Write hosts.toml file
template:
src: hosts.toml.j2
dest: "{{ containerd_cfg_dir }}/certs.d/{{ item.prefix }}/hosts.toml"
mode: "0640"
loop: "{{ containerd_registries_mirrors }}"
# ------------------------------------------------------------
# μ¬κΈ°μ μΌλΆλ¬ handlerλ₯Ό λ°λ‘ μ€ν
# - μ€μ μ λ€ λλλ° containerd μ λ° κ²½μ°λ₯Ό λ°©μ§
# ------------------------------------------------------------
- name: Containerd | Flush handlers
meta: flush_handlers
# ------------------------------------------------------------
# containerd μλΉμ€ νμ±ν λ° μμ
# - daemon_reload νμ unit μ§μ λ§λ€μμΌλκΉ..
# ------------------------------------------------------------
- name: Containerd | Ensure containerd is started and enabled
systemd_service:
name: containerd
daemon_reload: true
enabled: true
state: started
containerd μμ€ν λ°λͺ¬ νμΌμ μ΄ν΄λ³΄λ©΄...
root@k8s-ctr:~/kubespray# cat /etc/systemd/system/containerd.service
# Copyright The containerd Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target dbus.service
[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=1048576
LimitMEMLOCK=infinity
# Comment TasksMax if your systemd version does not supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
OOMScoreAdjust=-999
# Set the cgroup slice of the service so that kube reserved takes effect
[Install]
WantedBy=multi-user.target
μ΄λ κ² μ μλμ΄ μλ€.
νλ λ΄λΆ ulimit μ΅μ μ μ©νκΈ°
root@k8s-ctr:~/kubespray# cat /etc/containerd/cri-base.json | jq | grep rlimits -A 10
"rlimits": [
{
"type": "RLIMIT_NOFILE",
"hard": 65535,
"soft": 65535
}
],
"noNewPrivileges": true
},
"root": {
"path": "rootfs"
νμ¬ μ»€λμ μ μ©λλ§νΌμ 리λ°μ νλμλ μ μ©ν΄λ³΄λλ‘νλ€.
root@k8s-ctr:~/kubespray# cat << EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: ubuntu
spec:
containers:
- name: ubuntu
image: ubuntu
command: ["sh", "-c", "sleep infinity"]
securityContext:
privileged: true
EOF
pod/ubuntu created
root@k8s-ctr:~/kubespray# kubectl exec -it ubuntu -- sh -c 'ulimit -a'
time(seconds) unlimited
file(blocks) unlimited
data(kbytes) unlimited
stack(kbytes) 8192
coredump(blocks) unlimited
memory(kbytes) unlimited
locked memory(kbytes) unlimited
process unlimited
nofiles 65535
vmemory(kbytes) unlimited
locks unlimited
rtprio 0
컀λ λ¨μ 리λ°
root@k8s-ctr:~/kubespray# sysctl fs.file-max
fs.file-max = 9223372036854775807
root@k8s-ctr:~/kubespray# cat /proc/sys/fs/file-max
9223372036854775807
root@k8s-ctr:~/kubespray# cat /proc/sys/fs/file-nr
2656 0 9223372036854775807
μ¬μ©μ λ 벨 리λ°
root@k8s-ctr:~/kubespray# grep "^[^#]" /etc/security/limits.conf
cat /etc/security/limits.conf
# /etc/security/limits.conf
#
#This file sets the resource limits for the users logged in via PAM.
#It does not affect resource limits of the system services.
#
#Also note that configuration files in /etc/security/limits.d directory,
#which are read in alphabetical order, override the settings in this
#file in case the domain is the same or more specific.
#That means, for example, that setting a limit for wildcard domain here
#can be overridden with a wildcard setting in a config file in the
#subdirectory, but a user specific setting here can be overridden only
#with a user specific setting in the subdirectory.
#
#Each line describes a limit for a user in the form:
#
#<domain> <type> <item> <value>
#
#Where:
#<domain> can be:
# - a user name
# - a group name, with @group syntax
# - the wildcard *, for default entry
# - the wildcard %, can be also used with %group syntax,
# for maxlogin limit
#
#<type> can have the two values:
# - "soft" for enforcing the soft limits
# - "hard" for enforcing hard limits
#
#<item> can be one of the following:
# - core - limits the core file size (KB)
# - data - max data size (KB)
# - fsize - maximum filesize (KB)
# - memlock - max locked-in-memory address space (KB)
# - nofile - max number of open file descriptors
# - rss - max resident set size (KB)
# - stack - max stack size (KB)
# - cpu - max CPU time (MIN)
# - nproc - max number of processes
# - as - address space limit (KB)
# - maxlogins - max number of logins for this user
# - maxsyslogins - max number of logins on the system
# - priority - the priority to run user process with
# - locks - max number of file locks the user can hold
# - sigpending - max number of pending signals
# - msgqueue - max memory used by POSIX message queues (bytes)
# - nice - max nice priority allowed to raise to values: [-20, 19]
# - rtprio - max realtime priority
#
#<domain> <type> <item> <value>
#
#* soft core 0
#* hard rss 10000
#@student hard nproc 20
#@faculty soft nproc 20
#@faculty hard nproc 50
#ftp hard nproc 0
#@student - maxlogins 4
# End of file
root@k8s-ctr:~/kubespray# ulimit -n
524288
Systemd μλΉμ€ λ 벨
root@k8s-ctr:~/kubespray# cat /proc/<PID>/limits | grep "Max open files"
bash: PID: No such file or directory
root@k8s-ctr:~/kubespray# systemctl show kubelet | grep LimitNOFILE
LimitNOFILE=524288
LimitNOFILESoft=1024
root@k8s-ctr:~/kubespray# cat /proc/$(pidof kubelet)/limits | grep open
Max open files 1000000 1000000 files
root@k8s-ctr:~/kubespray# systemctl show containerd | grep LimitNOFILE
LimitNOFILE=1048576
LimitNOFILESoft=1048576
root@k8s-ctr:~/kubespray# cat /proc/$(pidof containerd)/limits | grep open
Max open files 1048576 1048576 files
μ€μ λ³κ²½ ν μ μ© (νλ μ΄λΆμΌλ‘ μ€νμ)
runcλ νΈμ€νΈμ κΈ°λ³Έ ulimitμ κ·Έλλ‘ μμλ°μ μ μλλ‘ ν¨μΉν ν μ€μλΈ νλ μ΄λΆμ λ€μ λλ¦°λ€.
root@k8s-ctr:~/kubespray# cat << EOF >> inventory/mycluster/group_vars/all/containerd.yml
containerd_default_base_runtime_spec_patch:
process:
rlimits: []
EOF
root@k8s-ctr:~/kubespray# grep "^[^#]" inventory/mycluster/group_vars/all/containerd.yml
---
containerd_default_base_runtime_spec_patch:
process:
rlimits: []
root@k8s-ctr:~/kubespray# ansible-playbook -i inventory/mycluster/inventory.ini -v cluster.yml --tags "container-engine" --limit k8s-ctr -e kube_version="1.33.3"
...
PLAY RECAP **************************************************************************************
k8s-ctr : ok=84 changed=5 unreachable=0 failed=0 skipped=196 rescued=0 ignored=0
Sunday 01 February 2026 05:07:56 +0900 (0:
# μλ μ€μ μ μλ λ°©λ² μ μ©!
cat << EOF > /etc/containerd/cri-base.json
{"ociVersion": "1.2.1", "process": {"user": {"uid": 0, "gid": 0}, "cwd": "/", "capabilities": {"bounding": ["CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER", "CAP_MKNOD", "CAP_NET_RAW", "CAP_SETGID", "CAP_SETUID", "CAP_SETFCAP", "CAP_SETPCAP", "CAP_NET_BIND_SERVICE", "CAP_SYS_CHROOT", "CAP_KILL", "CAP_AUDIT_WRITE"], "effective": ["CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER", "CAP_MKNOD", "CAP_NET_RAW", "CAP_SETGID", "CAP_SETUID", "CAP_SETFCAP", "CAP_SETPCAP", "CAP_NET_BIND_SERVICE", "CAP_SYS_CHROOT", "CAP_KILL", "CAP_AUDIT_WRITE"], "permitted": ["CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER", "CAP_MKNOD", "CAP_NET_RAW", "CAP_SETGID", "CAP_SETUID", "CAP_SETFCAP", "CAP_SETPCAP", "CAP_NET_BIND_SERVICE", "CAP_SYS_CHROOT", "CAP_KILL", "CAP_AUDIT_WRITE"]}, "noNewPrivileges": true}, "root": {"path": "rootfs"}, "mounts": [{"destination": "/proc", "type": "proc", "source": "proc", "options": ["nosuid", "noexec", "nodev"]}, {"destination": "/dev", "type": "tmpfs", "source": "tmpfs", "options": ["nosuid", "strictatime", "mode=755", "size=65536k"]}, {"destination": "/dev/pts", "type": "devpts", "source": "devpts", "options": ["nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"]}, {"destination": "/dev/shm", "type": "tmpfs", "source": "shm", "options": ["nosuid", "noexec", "nodev", "mode=1777", "size=65536k"]}, {"destination": "/dev/mqueue", "type": "mqueue", "source": "mqueue", "options": ["nosuid", "noexec", "nodev"]}, {"destination": "/sys", "type": "sysfs", "source": "sysfs", "options": ["nosuid", "noexec", "nodev", "ro"]}, {"destination": "/run", "type": "tmpfs", "source": "tmpfs", "options": ["nosuid", "strictatime", "mode=755", "size=65536k"]}], "linux": {"resources": {"devices": [{"allow": false, "access": "rwm"}]}, "cgroupsPath": "/default", "namespaces": [{"type": "pid"}, {"type": "ipc"}, {"type": "uts"}, {"type": "mount"}, {"type": "network"}], "maskedPaths": ["/proc/acpi", "/proc/asound", "/proc/kcore", "/proc/keys", "/proc/latency_stats", "/proc/timer_list", "/proc/timer_stats", "/proc/sched_debug", "/sys/firmware", "/sys/devices/virtual/powercap", "/proc/scsi"], "readonlyPaths": ["/proc/bus", "/proc/fs", "/proc/irq", "/proc/sys", "/proc/sysrq-trigger"]}}
EOF
cat /etc/containerd/cri-base.json | jq | grep rlimits
cat /etc/containerd/cri-base.json | jq
systemctl restart containerd.service
systemctl status containerd.service --no-pager
root@k8s-ctr:~/kubespray# kubectl delete pod ubuntu
pod "ubuntu" deleted
root@k8s-ctr:~/kubespray#
root@k8s-ctr:~/kubespray#
root@k8s-ctr:~/kubespray#
root@k8s-ctr:~/kubespray# cat << EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: ubuntu
spec:
containers:
- name: ubuntu
image: ubuntu
command: ["sh", "-c", "sleep infinity"]
securityContext:
privileged: true
EOF
pod/ubuntu created
root@k8s-ctr:~/kubespray# kubectl exec -it ubuntu -- sh -c 'ulimit -a'
time(seconds) unlimited
file(blocks) unlimited
data(kbytes) unlimited
stack(kbytes) 8192
coredump(blocks) unlimited
memory(kbytes) unlimited
locked memory(kbytes) unlimited
process unlimited
nofiles 1048576
vmemory(kbytes) unlimited
locks unlimited
rtprio 0
Podμ νλ‘μΈμ€ μ νμ Kubernetes 보μ μ€μ μ΄λ privileged μ¬λΆλ‘ κ²°μ λμ§ μλλ°, μ΄λ 컨ν μ΄λ μμ± μμ μ containerdκ° μ λ¬νλ OCI Runtime Specμ μν΄ κ²°μ λλ©°, μ΅μ’ μ μΌλ‘ runcκ° μ΄λ₯Ό 컀λμ μ μ©νκ² λλ κ²μ΄λ€.
container-eigninedμ΄ μ΄λ€ νκ·Έλ₯Ό λ³κ²½ν κΉ
containerd, runc, docker, cri-o, crictl, nerdctl, skopeo, kata, gvisorκ° μμ
μ λͺ¨λ ν¬ν¨λΌμλλ°
Kubesprayμμ container-engineμ ν΄λΉνλ taskλ containerd μ€μΉκ° μλλΌ λ
Έλμ 컨ν
μ΄λ μ€ν νκ²½ μ 체λ₯Ό ν λ² μ 리νλ λ¨κ³μ΄λ€.
root@k8s-ctr:~/kubespray# ansible-playbook \
-i inventory/mycluster/inventory.ini \
-v cluster.yml \
--tags "container-engine" \
--list-tasks
Using /root/kubespray/ansible.cfg as config file
[WARNING]: Could not match supplied host pattern, ignoring: bastion
[WARNING]: Could not match supplied host pattern, ignoring: k8s_cluster
[WARNING]: Could not match supplied host pattern, ignoring: calico_rr
[WARNING]: Could not match supplied host pattern, ignoring: _kubespray_needs_etcd
playbook: cluster.yml
play #1 (all): Check Ansible version TAGS: [always]
tasks:
Check {{ minimal_ansible_version }} <= Ansible version < {{ maximal_ansible_version }} TAGS: [always, check]
Check that python netaddr is installed TAGS: [always, check]
Check that jinja is not too old (install via pip) TAGS: [always, check]
play #2 (all): Inventory setup and validation TAGS: [always]
tasks:
dynamic_groups : Match needed groups by their old names or definition TAGS: [always]
validate_inventory : Stop if removed tags are used TAGS: [always]
validate_inventory : Stop if kube_control_plane group is empty TAGS: [always]
validate_inventory : Stop if etcd group is empty in external etcd mode TAGS: [always]
validate_inventory : Warn if `kube_network_plugin` is `none TAGS: [always]
validate_inventory : Stop if unsupported version of Kubernetes TAGS: [always]
validate_inventory : Stop if known booleans are set as strings (Use JSON format on CLI: -e "{'key': true }") TAGS: [always]
validate_inventory : Stop if even number of etcd hosts TAGS: [always]
validate_inventory : Guarantee that enough network address space is available for all podsTAGS: [always]
validate_inventory : Stop if RBAC is not enabled when dashboard is enabled TAGS: [always]
validate_inventory : Check cloud_provider value TAGS: [always]
validate_inventory : Check external_cloud_provider value TAGS: [always]
validate_inventory : Check that kube_service_addresses is a network range TAGS: [always]
validate_inventory : Check that kube_pods_subnet is a network range TAGS: [always]
validate_inventory : Check that kube_pods_subnet does not collide with kube_service_addresses TAGS: [always]
validate_inventory : Check that ipv4 IP range is enough for the nodes TAGS: [always]
validate_inventory : Check that kube_service_addresses_ipv6 is a network range TAGS: [always]
validate_inventory : Check that kube_pods_subnet_ipv6 is a network range TAGS: [always]
validate_inventory : Check that kube_pods_subnet_ipv6 does not collide with kube_service_addresses_ipv6 TAGS: [always]
validate_inventory : Check that ipv6 IP range is enough for the nodes TAGS: [always]
validate_inventory : Stop if unsupported options selected TAGS: [always]
validate_inventory : Warn if `enable_dual_stack_networks` is set TAGS: [always]
validate_inventory : Stop if download_localhost is enabled but download_run_once is not TAGS: [always]
validate_inventory : Stop if kata_containers_enabled is enabled when container_manager is docker TAGS: [always]
validate_inventory : Stop if gvisor_enabled is enabled when container_manager is not containerd TAGS: [always]
validate_inventory : Ensure minimum containerd version TAGS: [always]
validate_inventory : Stop if auto_renew_certificates is enabled when certificates are managed externally (kube_external_ca_mode is true) TAGS: [always]
play #3 (bastion[0]): Install bastion ssh config TAGS: []
tasks:
play #4 (k8s_cluster:etcd:calico_rr): Bootstrap hosts for Ansible TAGS: []
tasks:
play #5 (k8s_cluster:etcd:calico_rr): Gather facts TAGS: [always]
tasks:
network_facts : Gather ansible_default_ipv4 TAGS: [always]
network_facts : Set fallback_ip TAGS: [always]
network_facts : Gather ansible_default_ipv6 TAGS: [always]
network_facts : Set fallback_ip6 TAGS: [always]
network_facts : Set main access ip(access_ip based on ipv4_stack/ipv6_stack options). TAGS: [always]
network_facts : Set main ip(ip based on ipv4_stack/ipv6_stack options). TAGS: [always]
network_facts : Set main access ips(mixed ips for dualstack). TAGS: [always]
network_facts : Set main ips(mixed ips for dualstack). TAGS: [always]
network_facts : Set no_proxy to all assigned cluster IPs and hostnames TAGS: [always]
network_facts : Populates no_proxy to all hosts TAGS: [always]
Gather minimal facts TAGS: [always]
Gather necessary facts (network) TAGS: [always]
Gather necessary facts (hardware) TAGS: [always]
play #6 (k8s_cluster:etcd): Prepare for etcd install TAGS: []
tasks:
container-engine/validate-container-engine : Validate-container-engine | check if fedora coreos TAGS: [container-engine, facts, validate-container-engine]
container-engine/validate-container-engine : Validate-container-engine | set is_ostree TAGS: [container-engine, facts, validate-container-engine]
container-engine/validate-container-engine : Ensure kubelet systemd unit exists TAGS: [container-engine, facts, validate-container-engine]
container-engine/validate-container-engine : Populate service facts TAGS: [container-engine, facts, validate-container-engine]
container-engine/validate-container-engine : Check if containerd is installed TAGS: [container-engine, facts, validate-container-engine]
container-engine/validate-container-engine : Check if docker is installed TAGS: [container-engine, facts, validate-container-engine]
container-engine/validate-container-engine : Check if crio is installed TAGS: [container-engine, facts, validate-container-engine]
Drain node TAGS: [container-engine, validate-container-engine]
container-engine/validate-container-engine : Stop kubelet TAGS: [container-engine, validate-container-engine]
container-engine/containerd-common : Containerd-common | check if fedora coreos TAGS: [container-engine, validate-container-engine]
container-engine/containerd-common : Containerd-common | set is_ostree TAGS: [container-engine, validate-container-engine]
container-engine/containerd-common : Containerd-common | gather os specific variables TAGS: [container-engine, facts, validate-container-engine]
container-engine/runc : Runc | check if fedora coreos TAGS: [container-engine, validate-container-engine]
container-engine/runc : Runc | set is_ostree TAGS: [container-engine, validate-container-engine]
container-engine/runc : Runc | Uninstall runc package managed by package manager TAGS: [container-engine, validate-container-engine]
container-engine/runc : Runc | Download runc binary TAGS: [container-engine, validate-container-engine]
container-engine/runc : Copy runc binary from download dir TAGS: [container-engine, validate-container-engine]
container-engine/runc : Runc | Remove orphaned binary TAGS: [container-engine, validate-container-engine]
container-engine/crictl : Install crictl TAGS: [container-engine, validate-container-engine]
container-engine/nerdctl : Nerdctl | Download nerdctl TAGS: [container-engine, validate-container-engine]
container-engine/nerdctl : Nerdctl | Copy nerdctl binary from download dir TAGS: [container-engine, validate-container-engine]
container-engine/nerdctl : Nerdctl | Create configuration dir TAGS: [container-engine, validate-container-engine]
container-engine/nerdctl : Nerdctl | Install nerdctl configuration TAGS: [container-engine, validate-container-engine]
container-engine/containerd : Containerd | Stop containerd service TAGS: [container-engine, reset_containerd, validate-container-engine]
container-engine/containerd : Containerd | Remove configuration files TAGS: [container-engine, reset_containerd, validate-container-engine]
Drain node TAGS: [container-engine, validate-container-engine]
container-engine/validate-container-engine : Stop kubelet TAGS: [container-engine, validate-container-engine]
container-engine/containerd-common : Containerd-common | check if fedora coreos TAGS: [container-engine, validate-container-engine]
container-engine/containerd-common : Containerd-common | set is_ostree TAGS: [container-engine, validate-container-engine]
container-engine/containerd-common : Containerd-common | gather os specific variables TAGS: [container-engine, facts, validate-container-engine]
container-engine/docker : Docker | Get package facts TAGS: [container-engine, validate-container-engine]
container-engine/docker : Docker | Find docker packages TAGS: [container-engine, validate-container-engine]
container-engine/docker : Docker | Stop all running container TAGS: [container-engine, validate-container-engine]
container-engine/docker : Reset | remove all containers TAGS: [container-engine, validate-container-engine]
container-engine/docker : Docker | Stop docker service TAGS: [container-engine, validate-container-engine]
container-engine/docker : Docker | Remove dpkg hold TAGS: [container-engine, validate-container-engine]
container-engine/docker : Docker | Remove docker package TAGS: [container-engine, validate-container-engine]
container-engine/docker : Docker | ensure docker-ce repository is removed TAGS: [container-engine, validate-container-engine]
container-engine/docker : Docker | Remove docker repository on Fedora TAGS: [container-engine, validate-container-engine]
container-engine/docker : Docker | Remove docker repository on RedHat/CentOS/Oracle/AlmaLinux Linux TAGS: [container-engine, validate-container-engine]
container-engine/docker : Docker | Remove docker configuration files TAGS: [container-engine, validate-container-engine]
container-engine/docker : Docker | systemctl daemon-reload TAGS: [container-engine, validate-container-engine]
Drain node TAGS: [container-engine, validate-container-engine]
container-engine/validate-container-engine : Stop kubelet TAGS: [container-engine, validate-container-engine]
container-engine/crictl : Install crictl TAGS: [container-engine, validate-container-engine]
container-engine/skopeo : Skopeo | check if fedora coreos TAGS: [container-engine, validate-container-engine]
container-engine/skopeo : Skopeo | set is_ostree TAGS: [container-engine, validate-container-engine]
container-engine/skopeo : Skopeo | Uninstall skopeo package managed by package manager TAGS: [container-engine, validate-container-engine]
container-engine/skopeo : Skopeo | Download skopeo binary TAGS: [container-engine, validate-container-engine]
container-engine/skopeo : Copy skopeo binary from download dir TAGS: [container-engine, validate-container-engine]
container-engine/cri-o : Cri-o | include vars/v1.29.yml TAGS: [container-engine, validate-container-engine]
container-engine/cri-o : Cri-o | include vars/v1.31.yml TAGS: [container-engine, validate-container-engine]
container-engine/cri-o : CRI-O | Kubic repo name for debian os family TAGS: [container-engine, reset_crio, validate-container-engine]
container-engine/cri-o : CRI-O | Remove kubic apt repo TAGS: [container-engine, reset_crio, validate-container-engine]
container-engine/cri-o : CRI-O | Remove cri-o apt repo TAGS: [container-engine, reset_crio, validate-container-engine]
container-engine/cri-o : CRI-O | Remove CRI-O kubic yum repo TAGS: [container-engine, reset_crio, validate-container-engine]
container-engine/cri-o : CRI-O | Remove CRI-O kubic yum repo TAGS: [container-engine, reset_crio, validate-container-engine]
container-engine/cri-o : CRI-O | Run yum-clean-metadata TAGS: [container-engine, reset_crio, validate-container-engine]
container-engine/cri-o : CRI-O | Remove crictl TAGS: [container-engine, reset_crio, validate-container-engine]
container-engine/cri-o : CRI-O | Stop crio service TAGS: [container-engine, reset_crio, validate-container-engine]
container-engine/cri-o : CRI-O | Remove CRI-O configuration files TAGS: [container-engine, reset_crio, validate-container-engine]
container-engine/cri-o : CRI-O | Remove CRI-O binaries TAGS: [container-engine, reset_crio, validate-container-engine]
container-engine/cri-o : CRI-O | Remove CRI-O libexec TAGS: [container-engine, reset_crio, validate-container-engine]
container-engine/kata-containers : Kata-containers | Download kata binary TAGS: [container-engine, kata-containers]
container-engine/kata-containers : Kata-containers | Copy kata-containers binary TAGS: [container-engine, kata-containers]
container-engine/kata-containers : Kata-containers | Create config directory TAGS: [container-engine, kata-containers]
container-engine/kata-containers : Kata-containers | Set configuration TAGS: [container-engine, kata-containers]
container-engine/kata-containers : Kata-containers | Set containerd bin TAGS: [container-engine, kata-containers]
container-engine/kata-containers : Kata-containers | Load vhost kernel modules TAGS: [container-engine, kata-containers]
container-engine/kata-containers : Kata-containers | Persist vhost kernel modules TAGS: [container-engine, kata-containers]
container-engine/gvisor : GVisor | Download runsc binary TAGS: [container-engine, gvisor]
container-engine/gvisor : GVisor | Download containerd-shim-runsc-v1 binary TAGS: [container-engine, gvisor]
container-engine/gvisor : GVisor | Copy binaries TAGS: [container-engine, gvisor]
container-engine/crun : Crun | Download crun binary TAGS: [container-engine, crun]
container-engine/crun : Copy crun binary from download dir TAGS: [container-engine, crun]
container-engine/youki : Youki | Download youki TAGS: [container-engine, youki]
container-engine/youki : Youki | Copy youki binary from download dir TAGS: [container-engine, youki]
container-engine/crictl : Install crictl TAGS: [container-engine, crio]
container-engine/skopeo : Skopeo | check if fedora coreos TAGS: [container-engine, crio]
container-engine/skopeo : Skopeo | set is_ostree TAGS: [container-engine, crio]
container-engine/skopeo : Skopeo | Uninstall skopeo package managed by package manager TAGS: [container-engine, crio]
container-engine/skopeo : Skopeo | Download skopeo binary TAGS: [container-engine, crio]
container-engine/skopeo : Copy skopeo binary from download dir TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | include vars/v1.29.yml TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | include vars/v1.31.yml TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | check if fedora coreos TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | set is_ostree TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | get ostree version TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | Download cri-o TAGS: [container-engine, crio]
container-engine/cri-o : Check that amzn2-extras.repo exists TAGS: [container-engine, crio]
container-engine/cri-o : Find docker repo in amzn2-extras.repo file TAGS: [container-engine, crio]
container-engine/cri-o : Remove docker repository TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | build a list of crio runtimes with Katacontainers runtimesTAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | build a list of crio runtimes with runc runtime TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | build a list of crio runtimes with youki runtime TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | Stop kubelet service if running TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | Get all pods TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | Stop and remove pods not on host network TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | Stop and remove all remaining pods TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | stop crio service if running TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | make sure needed folders exist in the system TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | install cri-o config TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | install config.json TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | copy binaries TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | create directory for libexec TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | copy libexec TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | copy service file TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | configure crio to use kube reserved cgroups TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | update the bin dir for crio.service file TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | copy default policy TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | copy mounts.conf TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | create directory for oci hooks TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | set overlay driver TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | set metacopy mount options correctly TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | create directory registries configs TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | write registries configs TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | configure unqualified registry settings TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | write cri-o proxy drop-in TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | configure the uid/gid space for user namespaces TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | ensure crio service is started and enabled TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | trigger service restart only when needed TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | verify that crio is running TAGS: [container-engine, crio]
container-engine/cri-o : Cri-o | ensure kubelet service is started if present and stopped TAGS: [container-engine, crio]
container-engine/containerd-common : Containerd-common | check if fedora coreos TAGS: [container-engine, containerd]
container-engine/containerd-common : Containerd-common | set is_ostree TAGS: [container-engine, containerd]
container-engine/containerd-common : Containerd-common | gather os specific variables TAGS: [container-engine, containerd, facts]
container-engine/runc : Runc | check if fedora coreos TAGS: [container-engine, containerd]
container-engine/runc : Runc | set is_ostree TAGS: [container-engine, containerd]
container-engine/runc : Runc | Uninstall runc package managed by package manager TAGS: [container-engine, containerd]
container-engine/runc : Runc | Download runc binary TAGS: [container-engine, containerd]
container-engine/runc : Copy runc binary from download dir TAGS: [container-engine, containerd]
container-engine/runc : Runc | Remove orphaned binary TAGS: [container-engine, containerd]
container-engine/crictl : Install crictl TAGS: [container-engine, containerd]
container-engine/nerdctl : Nerdctl | Download nerdctl TAGS: [container-engine, containerd]
container-engine/nerdctl : Nerdctl | Copy nerdctl binary from download dir TAGS: [container-engine, containerd]
container-engine/nerdctl : Nerdctl | Create configuration dir TAGS: [container-engine, containerd]
container-engine/nerdctl : Nerdctl | Install nerdctl configuration TAGS: [container-engine, containerd]
container-engine/containerd : Containerd | Download containerd TAGS: [container-engine, containerd]
container-engine/containerd : Containerd | Unpack containerd archive TAGS: [container-engine, containerd]
container-engine/containerd : Containerd | Generate systemd service for containerd TAGS: [container-engine, containerd]
container-engine/containerd : Containerd | Ensure containerd directories exist TAGS: [container-engine, containerd]
container-engine/containerd : Containerd | Write containerd proxy drop-in TAGS: [container-engine, containerd]
container-engine/containerd : Containerd | Generate default base_runtime_spec TAGS: [container-engine, containerd]
container-engine/containerd : Containerd | Store generated default base_runtime_spec TAGS: [container-engine, containerd]
container-engine/containerd : Containerd | Write base_runtime_specs TAGS: [container-engine, containerd]
container-engine/containerd : Containerd | Copy containerd config file TAGS: [container-engine, containerd]
container-engine/containerd : Containerd | Create registry directories TAGS: [container-engine, containerd]
container-engine/containerd : Containerd | Write hosts.toml file TAGS: [container-engine, containerd]
container-engine/containerd : Containerd | Flush handlers TAGS: [container-engine, containerd]
container-engine/containerd : Containerd | Ensure containerd is started and enabled TAGS: [container-engine, containerd]
container-engine/containerd-common : Containerd-common | check if fedora coreos TAGS: [container-engine, docker]
container-engine/containerd-common : Containerd-common | set is_ostree TAGS: [container-engine, docker]
container-engine/containerd-common : Containerd-common | gather os specific variables TAGS: [container-engine, docker, facts]
container-engine/docker : Check if fedora coreos TAGS: [container-engine, docker]
container-engine/docker : Set is_ostree TAGS: [container-engine, docker]
container-engine/docker : Gather os specific variables TAGS: [container-engine, docker, facts]
container-engine/docker : Warn about Docker version on SUSE TAGS: [container-engine, docker]
container-engine/docker : Gather DNS facts TAGS: [container-engine, docker, facts]
container-engine/docker : Remove legacy docker repo file TAGS: [container-engine, docker]
container-engine/docker : Ensure old versions of Docker are not installed. | Debian TAGS: [container-engine, docker]
container-engine/docker : Ensure podman not installed. | RedHat TAGS: [container-engine, docker]
container-engine/docker : Ensure old versions of Docker are not installed. | RedHat TAGS: [container-engine, docker]
container-engine/docker : Ensure docker-ce repository public key is installed TAGS: [container-engine, docker]
container-engine/docker : Convert -backports sources to archive.debian.org for bullseye and older TAGS: [container-engine, docker]
container-engine/docker : Ensure docker-ce repository is enabled TAGS: [container-engine, docker]
container-engine/docker : Configure docker repository on Fedora TAGS: [container-engine, docker]
container-engine/docker : Configure docker repository on RedHat/CentOS/OracleLinux/AlmaLinux/KylinLinux TAGS: [container-engine, docker]
container-engine/docker : Remove dpkg hold TAGS: [container-engine, docker]
container-engine/docker : Ensure docker packages are installed TAGS: [container-engine, docker]
container-engine/docker : Tell Debian hosts not to change the docker version with apt upgrade TAGS: [container-engine, docker]
container-engine/docker : Ensure service is started if docker packages are already presentTAGS: [container-engine, docker]
container-engine/docker : Flush handlers so we can wait for docker to come up TAGS: [container-engine, docker]
container-engine/docker : Install docker plugin TAGS: [container-engine, docker]
container-engine/docker : Create docker service systemd directory if it doesn't exist TAGS: [container-engine, docker]
container-engine/docker : Write docker proxy drop-in TAGS: [container-engine, docker]
container-engine/docker : Write docker.service systemd file TAGS: [container-engine, docker]
container-engine/docker : Write docker options systemd drop-in TAGS: [container-engine, docker]
container-engine/docker : Write docker dns systemd drop-in TAGS: [container-engine, docker]
container-engine/docker : Copy docker orphan clean up script to the node TAGS: [container-engine, docker]
container-engine/docker : Write docker orphan clean up systemd drop-in TAGS: [container-engine, docker]
container-engine/docker : Flush handlers TAGS: [container-engine, docker]
container-engine/docker : Ensure docker service is started and enabled TAGS: [container-engine, docker]
container-engine/crictl : Install crictl TAGS: [container-engine, docker]
container-engine/cri-dockerd : Runc | Download cri-dockerd binary TAGS: [container-engine, docker]
container-engine/cri-dockerd : Copy cri-dockerd binary from download dir TAGS: [container-engine, docker]
container-engine/cri-dockerd : Generate cri-dockerd systemd unit files TAGS: [container-engine, docker]
container-engine/cri-dockerd : Flush handlers TAGS: [container-engine, docker]
play #7 (kube_node): Add worker nodes to the etcd play if needed TAGS: []
tasks:
play #8 (etcd:kube_control_plane:_kubespray_needs_etcd): Install etcd TAGS: []
tasks:
play #9 (k8s_cluster): Install Kubernetes nodes TAGS: []
tasks:
play #10 (kube_control_plane): Install the control plane TAGS: []
tasks:
play #11 (k8s_cluster): Invoke kubeadm and install a CNI TAGS: []
tasks:
helm-apps : Validating arguments against arg spec 'main' - Install a list of Helm charts. TAGS: [always, custom_cni, network]
play #12 (calico_rr): Install Calico Route Reflector TAGS: []
tasks:
play #13 (kube_control_plane[0]): Patch Kubernetes for Windows TAGS: []
tasks:
play #14 (kube_control_plane): Install Kubernetes apps TAGS: []
tasks:
helm-apps : Validating arguments against arg spec 'main' - Install a list of Helm charts. TAGS: [always, apps, kubelet-csr-approver]
play #15 (k8s_cluster): Apply resolv.conf changes now that cluster DNS is up TAGS: []
tasks:
νΉν νλ¦μ μμΈν 보면, κ·Έλ₯ μ€μΉλ§ νλ κ² μλλΌ μ΄λ―Έ κΉλ € μμ μλ μλ λ°νμλ€μ μ λΆ μ κ²νκ³ ,
μ νλμ§ μμ λ°νμμ λ
Έλ drain → kubelet μ€μ§ → λ°νμ μ κ±°λ₯Ό μννλ€.
kubeadm λ°μ΄λ리 / 컨ν μ΄λ μ΄λ―Έμ§ λ€μ΄λ‘λ
ubeadmμ΄ νμλ‘ νλ 컨νΈλ‘€ νλ μΈ μ΄λ―Έμ§ λͺ©λ‘μ 미리 κ³μ°νλ Kubesprayμ λ€μ΄λ‘λ νμ΄νλΌμΈμ μ΄ν΄λ³΄λλ‘νλ€.
root@k8s-ctr:~/kubespray# cat roles/download/tasks/prep_kubeadm_images.yml
---
- name: Prep_kubeadm_images | Download kubeadm binary
include_tasks: "download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.kubeadm) }}"
when:
- not skip_downloads | default(false)
- downloads.kubeadm.enabled
- name: Prep_kubeadm_images | Copy kubeadm binary from download dir to system path
copy:
src: "{{ downloads.kubeadm.dest }}"
dest: "{{ bin_dir }}/kubeadm"
mode: "0755"
remote_src: true
- name: Prep_kubeadm_images | Create kubeadm config
template:
src: "kubeadm-images.yaml.j2"
dest: "{{ kube_config_dir }}/kubeadm-images.yaml"
mode: "0644"
validate: "{{ kubeadm_config_validate_enabled | ternary(bin_dir + '/kubeadm config validate --config %s', omit) }}"
when:
- not skip_kubeadm_images | default(false)
- name: Prep_kubeadm_images | Generate list of required images
shell: "set -o pipefail && {{ bin_dir }}/kubeadm config images list --config={{ kube_config_dir }}/kubeadm-images.yaml | grep -Ev 'coredns|pause'"
args:
executable: /bin/bash
register: kubeadm_images_raw
run_once: true
changed_when: false
when:
- not skip_kubeadm_images | default(false)
- name: Prep_kubeadm_images | Parse list of images
vars:
kubeadm_images_list: "{{ kubeadm_images_raw.stdout_lines }}"
set_fact:
kubeadm_image:
key: "kubeadm_{{ (item | regex_replace('^(?:.*\\/)*', '')).split(':')[0] }}"
value:
enabled: true
container: true
repo: "{{ item | regex_replace('^(.*):.*$', '\\1') }}"
tag: "{{ item | regex_replace('^.*:(.*)$', '\\1') }}"
groups:
- k8s_cluster
loop: "{{ kubeadm_images_list | flatten(levels=1) }}"
register: kubeadm_images_cooked
run_once: true
when:
- not skip_kubeadm_images | default(false)
- name: Prep_kubeadm_images | Convert list of images to dict for later use
set_fact:
kubeadm_images: "{{ kubeadm_images_cooked.results | map(attribute='ansible_facts.kubeadm_image') | list | items2dict }}"
run_once: true
when:
- not skip_kubeadm_images | default(false)
kubeadmμ΄ μΆλ ₯ν μ΄λ―Έμ§ λͺ©λ‘μ λ¨μν λ¬Έμμ΄ λ¦¬μ€νΈ ννμ΄κΈ° λλ¬Έμ, κ·Έλλ‘λ Kubesprayμ λ€μ΄λ‘λ λ‘μ§μμ μ¬μ©ν μ μλ€. κ·Έλμ Kubesprayλ μ΄ λ¦¬μ€νΈλ₯Ό νλμ© κΊΌλ΄μ 곡ν΅μ μΌλ‘ μ¬μ©νλ downloads ꡬ쑰μ λ§κ² λ€μ κ°κ³΅νλ€.
μ΄ κ³Όμ μμ κ° μ΄λ―Έμ§λ μ΄λ―Έμ§ μ΄λ¦μ κΈ°μ€μΌλ‘ ν key κ°, μ€μ μ μ₯μ μ£Όμ(repo), νκ·Έ(tag), 컨ν μ΄λ μ΄λ―Έμ§ μ¬λΆ, κ·Έλ¦¬κ³ μ΄λ€ λ Έλ κ·Έλ£Ήμμ μ¬μ©ν κ²μΈμ§(k8s_cluster) κ°μ λ©νλ°μ΄ν°λ‘ λΆν΄λλ€.
λ§μ½ kubeadmμ΄ νμλ‘ νλ μ΄λ―Έμ§λΌ νλλΌλ Kubespray μ μ₯μμλ κ²°κ΅ λ€μ΄λ‘λν΄μΌ ν μν°ν©νΈ μ€ νλμΌ λΏμ΄κΈ° λλ¬Έμ kubeadm μ΄λ―Έμ§ μμ etcd μ΄λ―Έμ§λ CNI μ΄λ―Έμ§, containerd λ°μ΄λ리μ λμΌν λ€μ΄λ‘λ νμ΄νλΌμΈμ νκ² λκ³ , κ²°κ³Όμ μΌλ‘ μ€μΉ κ³Όμ μ λ°μμ μ΄λ―Έμ§ κ΄λ¦¬ λ°©μμ΄ μΌκ΄λκ² μ μ§λλ€.
install_etcd.yaml

root@k8s-ctr:~/kubespray# cat playbooks/install_etcd.yml
---
- name: Add worker nodes to the etcd play if needed
hosts: kube_node
roles:
- { role: kubespray_defaults }
tasks:
- name: Check if nodes needs etcd client certs (depends on network_plugin)
group_by:
key: "_kubespray_needs_etcd"
when:
- kube_network_plugin in ["flannel", "canal", "cilium"] or
(cilium_deploy_additionally | default(false)) or
(kube_network_plugin == "calico" and calico_datastore == "etcd")
- etcd_deployment_type != "kubeadm"
tags: etcd
- name: Install etcd
hosts: etcd:kube_control_plane:_kubespray_needs_etcd
gather_facts: false
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray_defaults }
- role: etcd
tags: etcd
when: etcd_deployment_type != "kubeadm"
install_etcd.ymlμ ν΄λ¬μ€ν° μμμ λκ° etcdμ μ κ·Όν΄μΌ νλμ§ μ μνκ³ , κ·Έμ λ§λ μ€μΉ λ²μλ₯Ό κ²°μ νλ€.
root@k8s-ctr:~/kubespray# tree ~/kubespray/roles/etcd
/root/kubespray/roles/etcd
βββ handlers
β βββ backup_cleanup.yml
β βββ backup.yml
β βββ main.yml
βββ meta
β βββ main.yml
βββ tasks
β βββ check_certs.yml
β βββ configure.yml
β βββ gen_certs_script.yml
β βββ gen_nodes_certs_script.yml
β βββ install_docker.yml
β βββ install_host.yml
β βββ join_etcd-events_member.yml
β βββ join_etcd_member.yml
β βββ main.yml
β βββ refresh_config.yml
β βββ upd_ca_trust.yml
βββ templates
βββ etcd-docker.service.j2
βββ etcd.env.j2
βββ etcd-events-docker.service.j2
βββ etcd-events.env.j2
βββ etcd-events-host.service.j2
βββ etcd-events.j2
βββ etcd-host.service.j2
βββ etcd.j2
βββ make-ssl-etcd.sh.j2
βββ openssl.conf.j2
5 directories, 25 files
root@k8s-ctr:~/kubespray# systemctl status etcd.service --no-pager
β etcd.service - etcd
Loaded: loaded (/etc/systemd/system/etcd.service; enabled; preset: disabled)
Active: active (running) since Thu 2026-01-29 02:43:57 KST; 3 days ago
Invocation: 3c7c280979f44467b4109035c356415a
Main PID: 17852 (etcd)
Tasks: 12 (limit: 24792)
Memory: 318.6M (peak: 335.5M)
CPU: 39min 7.945s
CGroup: /system.slice/etcd.service
ββ17852 /usr/local/bin/etcd
Feb 01 05:07:53 k8s-ctr etcd[17852]: {"level":"info","ts":"2026-02-01T05:07:53.601930+0900…32085}
Feb 01 05:12:53 k8s-ctr etcd[17852]: {"level":"info","ts":"2026-02-01T05:12:53.618206+0900…32929}
Feb 01 05:12:53 k8s-ctr etcd[17852]: {"level":"info","ts":"2026-02-01T05:12:53.628062+0900","cal…
Feb 01 05:12:53 k8s-ctr etcd[17852]: {"level":"info","ts":"2026-02-01T05:12:53.628129+0900…32502}
Feb 01 05:17:53 k8s-ctr etcd[17852]: {"level":"info","ts":"2026-02-01T05:17:53.625719+0900…33362}
Feb 01 05:17:53 k8s-ctr etcd[17852]: {"level":"info","ts":"2026-02-01T05:17:53.634790+0900","cal…
Feb 01 05:17:53 k8s-ctr etcd[17852]: {"level":"info","ts":"2026-02-01T05:17:53.634866+0900…32929}
Feb 01 05:22:53 k8s-ctr etcd[17852]: {"level":"info","ts":"2026-02-01T05:22:53.632116+0900…33780}
Feb 01 05:22:53 k8s-ctr etcd[17852]: {"level":"info","ts":"2026-02-01T05:22:53.639231+0900","cal…
Feb 01 05:22:53 k8s-ctr etcd[17852]: {"level":"info","ts":"2026-02-01T05:22:53.639292+0900…33362}
Hint: Some lines were ellipsized, use -l to show in full.
root@k8s-ctr:~/kubespray# cat /etc/systemd/system/etcd.service
[Unit]
Description=etcd
After=network.target
[Service]
Type=notify
User=root
EnvironmentFile=/etc/etcd.env
ExecStart=/usr/local/bin/etcd
NotifyAccess=all
Restart=always
RestartSec=10s
LimitNOFILE=40000
[Install]
WantedBy=multi-user.target
root@k8s-ctr:~/kubespray#
cat /etc/etcd.env
# Environment file for etcd 3.5.25
ETCD_DATA_DIR=/var/lib/etcd
ETCD_ADVERTISE_CLIENT_URLS=https://192.168.10.10:2379
ETCD_INITIAL_ADVERTISE_PEER_URLS=https://192.168.10.10:2380
ETCD_INITIAL_CLUSTER_STATE=existing
ETCD_METRICS=basic
ETCD_LISTEN_CLIENT_URLS=https://192.168.10.10:2379,https://127.0.0.1:2379
ETCD_ELECTION_TIMEOUT=5000
ETCD_HEARTBEAT_INTERVAL=250
ETCD_INITIAL_CLUSTER_TOKEN=k8s_etcd
ETCD_LISTEN_PEER_URLS=https://192.168.10.10:2380
ETCD_NAME=etcd1
ETCD_PROXY=off
ETCD_INITIAL_CLUSTER=etcd1=https://192.168.10.10:2380
ETCD_AUTO_COMPACTION_RETENTION=8
ETCD_SNAPSHOT_COUNT=100000
ETCD_QUOTA_BACKEND_BYTES=2147483648
ETCD_MAX_REQUEST_BYTES=1572864
ETCD_LOG_LEVEL=info
ETCD_MAX_SNAPSHOTS=5
ETCD_MAX_WALS=5
# Flannel need etcd v2 API
ETCD_ENABLE_V2=true
# TLS settings
ETCD_TRUSTED_CA_FILE=/etc/ssl/etcd/ssl/ca.pem
ETCD_CERT_FILE=/etc/ssl/etcd/ssl/member-k8s-ctr.pem
ETCD_KEY_FILE=/etc/ssl/etcd/ssl/member-k8s-ctr-key.pem
ETCD_CLIENT_CERT_AUTH=true
ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/etcd/ssl/ca.pem
ETCD_PEER_CERT_FILE=/etc/ssl/etcd/ssl/member-k8s-ctr.pem
ETCD_PEER_KEY_FILE=/etc/ssl/etcd/ssl/member-k8s-ctr-key.pem
ETCD_PEER_CLIENT_CERT_AUTH=True
# CLI settings
ETCDCTL_ENDPOINTS=https://127.0.0.1:2379
ETCDCTL_CACERT=/etc/ssl/etcd/ssl/ca.pem
ETCDCTL_KEY=/etc/ssl/etcd/ssl/admin-k8s-ctr-key.pem
ETCDCTL_CERT=/etc/ssl/etcd/ssl/admin-k8s-ctr.pem
# ETCD 3.5.x issue
# https://groups.google.com/a/kubernetes.io/g/dev/c/B7gJs88XtQc/m/rSgNOzV2BwAJ?utm_medium=email&utm_source=footer
ETCD_EXPERIMENTAL_INITIAL_CORRUPT_CHECK=True
ETCD_EXPERIMENTAL_WATCH_PROGRESS_NOTIFY_INTERVAL=5s
λ°λͺ¬ ννλ‘ κΈ°λμ€μΈ etcd μλΉμ€μ΄λ€.
etcdctl νμΈ
root@k8s-ctr:~/kubespray# ss -tnlp | grep etcd
LISTEN 0 4096 192.168.10.10:2380 0.0.0.0:* users:(("etcd",pid=17852,fd=6))
LISTEN 0 4096 192.168.10.10:2379 0.0.0.0:* users:(("etcd",pid=17852,fd=8))
LISTEN 0 4096 127.0.0.1:2379 0.0.0.0:* users:(("etcd",pid=17852,fd=7))
root@k8s-ctr:~/kubespray# etcdctl.sh member list -w table
+------------------+---------+-------+----------------------------+----------------------------+------------+
| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS | IS LEARNER |
+------------------+---------+-------+----------------------------+----------------------------+------------+
| a997582217e26c7f | started | etcd1 | https://192.168.10.10:2380 | https://192.168.10.10:2379 | false |
+------------------+---------+-------+----------------------------+----------------------------+------------+
root@k8s-ctr:~/kubespray# cat /usr/local/bin/etcdctl.sh
#!/bin/bash
# Ansible managed
# example invocation: etcdctl.sh get --keys-only --from-key ""
etcdctl \
--cacert /etc/ssl/etcd/ssl/ca.pem \
--cert /etc/ssl/etcd/ssl/admin-k8s-ctr.pem \
--key /etc/ssl/etcd/ssl/admin-k8s-ctr-key.pem "$@"
root@k8s-ctr:~/kubespray# tree /etc/ssl/etcd
/etc/ssl/etcd
βββ openssl.conf
βββ ssl
βββ admin-k8s-ctr-key.pem
βββ admin-k8s-ctr.pem
βββ ca-key.pem
βββ ca.pem
βββ member-k8s-ctr-key.pem
βββ member-k8s-ctr.pem
βββ node-k8s-ctr-key.pem
βββ node-k8s-ctr.pem
2 directories, 9 files
root@k8s-ctr:~/kubespray# cat /etc/ssl/etcd/openssl.conf
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[ ssl_client ]
extendedKeyUsage = clientAuth, serverAuth
basicConstraints = CA:FALSE
subjectKeyIdentifier=hash
authorityKeyIdentifier=keyid,issuer
subjectAltName = @alt_names
[ v3_ca ]
basicConstraints = CA:TRUE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
authorityKeyIdentifier=keyid:always,issuer
[alt_names]
DNS.1 = localhost
DNS.2 = k8s-ctr
DNS.3 = lb-apiserver.kubernetes.local
DNS.4 = etcd.kube-system.svc.cluster.local
DNS.5 = etcd.kube-system.svc
DNS.6 = etcd.kube-system
DNS.7 = etcd
IP.1 = 192.168.10.10
IP.2 = 127.0.0.1
IP.3 = ::1
nodes
kubeletκ³Ό kube-proxyκ° λ¬Έμ μμ΄ λμν μ μλ OS μνλ₯Ό λ¨Όμ λ§λ λ€.
---
# λ
Έλμμ μ¬μ©ν κ°μ’
fact(λ€νΈμν¬, OS, λ°νμ μ 보 λ±)λ₯Ό μμ§
# μ΄ν 쑰건 λΆκΈ°μ μ€μ κ° κ³μ°μ κΈ°λ°μ΄ λ¨
- name: Fetch facts
import_tasks: facts.yml
tags:
- facts
- kubelet
# CNIκ° μ¬μ©νλ μν λλ ν°λ¦¬ 보μ₯
# λ€νΈμν¬ νλ¬κ·ΈμΈμ΄ μ μ λμνκΈ° μν νμ λλ ν°λ¦¬
- name: Ensure /var/lib/cni exists
file:
path: /var/lib/cni
state: directory
mode: "0755"
# kubelet λ°μ΄λ리 μ€μΉ
# μ€μ kubelet μ€ν νμΌμ λ
Έλμ λ°°μΉ
- name: Install kubelet binary
import_tasks: install.yml
tags:
- kubelet
# control-plane λ
Έλμμ kube-vipμ μ¬μ©νλ κ²½μ°
# API Server VIP μ 곡μ μν kube-vip μ€μΉ
- name: Install kube-vip
import_tasks: loadbalancer/kube-vip.yml
when:
- ('kube_control_plane' in group_names)
- kube_vip_enabled
tags:
- kube-vip
# μ컀 λ
Έλ λλ νΉμ IPv6 쑰건μμ
# localhost κΈ°λ° API Server μ κ·Όμ μν nginx νλ‘μ μ€μΉ
- name: Install nginx-proxy
import_tasks: loadbalancer/nginx-proxy.yml
when:
- ('kube_control_plane' not in group_names) or (kube_apiserver_bind_address != '::')
- loadbalancer_apiserver_localhost
- loadbalancer_apiserver_type == 'nginx'
tags:
- nginx
# nginx λμ haproxyλ₯Ό μ νν κ²½μ°
# λμΌν λͺ©μ (API Server μ κ·Ό νλ‘μ)μΌλ‘ haproxy μ€μΉ
- name: Install haproxy
import_tasks: loadbalancer/haproxy.yml
when:
- ('kube_control_plane' not in group_names) or (kube_apiserver_bind_address != '::')
- loadbalancer_apiserver_localhost
- loadbalancer_apiserver_type == 'haproxy'
tags:
- haproxy
# NodePortλ‘ μ¬μ©νλ ν¬νΈ λ²μλ₯Ό
# 컀λμμ λ‘컬 ν¬νΈλ‘ μμ½ν΄μ μΆ©λ λ°©μ§
- name: Ensure nodePort range is reserved
ansible.posix.sysctl:
name: net.ipv4.ip_local_reserved_ports
value: "{{ kube_apiserver_node_port_range }}"
sysctl_set: true
sysctl_file: "{{ sysctl_file_path }}"
state: present
reload: true
ignoreerrors: "{{ sysctl_ignore_unknown_keys }}"
when: kube_apiserver_node_port_range is defined
tags:
- kube-proxy
# br_netfilter 컀λ λͺ¨λ μ‘΄μ¬ μ¬λΆ νμΈ
# kube-proxy, CNIμμ νμ
- name: Verify if br_netfilter module exists
command: "modinfo br_netfilter"
environment:
PATH: "{{ ansible_env.PATH }}:/sbin"
register: modinfo_br_netfilter
failed_when: modinfo_br_netfilter.rc not in [0, 1]
changed_when: false
check_mode: false
# 컀λ λͺ¨λ μ€μ νμΌμ μ μ₯ν λλ ν°λ¦¬ 보μ₯
- name: Verify br_netfilter module path exists
file:
path: "{{ item }}"
state: directory
mode: "0755"
loop:
- /etc/modules-load.d
- /etc/modprobe.d
# br_netfilter λͺ¨λμ μ¦μ λ‘λ
- name: Enable br_netfilter module
community.general.modprobe:
name: br_netfilter
state: present
when: modinfo_br_netfilter.rc == 0
# λΆν
μμλ br_netfilter μλ λ‘λλλλ‘ μ€μ
- name: Persist br_netfilter module
copy:
dest: /etc/modules-load.d/kubespray-br_netfilter.conf
content: br_netfilter
mode: "0644"
when: modinfo_br_netfilter.rc == 0
# 컀λμ bridge-nf sysctl ν€κ° μλμ§ νμΈ
# (μΌλΆ 컀λμμλ br_netfilter μμ΄λ μ‘΄μ¬)
- name: Check if bridge-nf-call-iptables key exists
command: "sysctl net.bridge.bridge-nf-call-iptables"
failed_when: false
changed_when: false
check_mode: false
register: sysctl_bridge_nf_call_iptables
# λΈλ¦¬μ§ νΈλν½μ iptables/arptables/ip6tablesλ‘ μ λ¬
# Service, NetworkPolicy μ μ λμμ μν ν΅μ¬ μ€μ
- name: Enable bridge-nf-call tables
ansible.posix.sysctl:
name: "{{ item }}"
state: present
sysctl_file: "{{ sysctl_file_path }}"
value: "1"
reload: true
ignoreerrors: "{{ sysctl_ignore_unknown_keys }}"
when: sysctl_bridge_nf_call_iptables.rc == 0
with_items:
- net.bridge.bridge-nf-call-iptables
- net.bridge.bridge-nf-call-arptables
- net.bridge.bridge-nf-call-ip6tables
# kube-proxyκ° IPVS λͺ¨λμΌ κ²½μ° νμν 컀λ λͺ¨λ λ‘λ
- name: Modprobe Kernel Module for IPVS
community.general.modprobe:
name: "{{ item }}"
state: present
persistent: present
loop: "{{ kube_proxy_ipvs_modules }}"
when: kube_proxy_mode == 'ipvs'
tags:
- kube-proxy
# IPVS λͺ¨λμμ conntrack λͺ¨λ λ‘λ
# 컀λ/λ°°ν¬ν μ°¨μ΄λ₯Ό κ³ λ €ν΄ μ¬λ¬ λͺ¨λ μλ
- name: Modprobe conntrack module
community.general.modprobe:
name: "{{ item }}"
state: present
persistent: present
register: modprobe_conntrack_module
ignore_errors: true
loop:
- nf_conntrack
- nf_conntrack_ipv4
when:
- kube_proxy_mode == 'ipvs'
- modprobe_conntrack_module is not defined or modprobe_conntrack_module is ansible.builtin.failed
tags:
- kube-proxy
# kube-proxyκ° nftables λͺ¨λμΌ κ²½μ° νμν 컀λ λͺ¨λ λ‘λ
- name: Modprobe Kernel Module for nftables
community.general.modprobe:
name: "nf_tables"
state: present
persistent: present
when: kube_proxy_mode == 'nftables'
tags:
- kube-proxy
# kubelet systemd μλΉμ€ μ€μ , config μμ± λ° μ€ν μ€λΉ
- name: Install kubelet
import_tasks: kubelet.yml
tags:
- kubelet
- kubeadm
kubelet
root@k8s-ctr:~/kubespray# cat roles/kubernetes/node/tasks/kubelet.yml
---
- name: Set kubelet api version to v1beta1
set_fact:
kubeletConfig_api_version: v1beta1
tags:
- kubelet
- kubeadm
- name: Write kubelet environment config file (kubeadm)
template:
src: "kubelet.env.{{ kubeletConfig_api_version }}.j2"
dest: "{{ kube_config_dir }}/kubelet.env"
setype: "{{ (preinstall_selinux_state != 'disabled') | ternary('etc_t', omit) }}"
backup: true
mode: "0600"
notify: Node | restart kubelet
tags:
- kubelet
- kubeadm
- name: Write kubelet config file
template:
src: "kubelet-config.{{ kubeletConfig_api_version }}.yaml.j2"
dest: "{{ kube_config_dir }}/kubelet-config.yaml"
mode: "0600"
notify: Kubelet | restart kubelet
tags:
- kubelet
- kubeadm
- name: Write kubelet systemd init file
template:
src: "kubelet.service.j2"
dest: "/etc/systemd/system/kubelet.service"
backup: true
mode: "0600"
validate: "sh -c '[ -f /usr/bin/systemd/system/factory-reset.target ] || exit 0 && systemd-analyze verify %s:kubelet.service'"
# FIXME: check that systemd version >= 250 (factory-reset.target was introduced in that release)
# Remove once we drop support for systemd < 250
notify: Node | restart kubelet
tags:
- kubelet
- kubeadm
- name: Flush_handlers and reload-systemd
meta: flush_handlers
- name: Enable kubelet
service:
name: kubelet
enabled: true
state: started
tags:
- kubelet
notify: Kubelet | restart kubelet
root@k8s-ctr:~/kubespray# cat /etc/kubernetes/kubelet-config.yaml
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
nodeStatusUpdateFrequency: "10s"
failSwapOn: True
authentication:
anonymous:
enabled: false
webhook:
enabled: True
x509:
clientCAFile: /etc/kubernetes/ssl/ca.crt
authorization:
mode: Webhook
staticPodPath: "/etc/kubernetes/manifests"
cgroupDriver: systemd
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
containerRuntimeEndpoint : unix:///var/run/containerd/containerd.sock
maxPods: 110
podPidsLimit: -1
address: "192.168.10.10"
readOnlyPort: 0
healthzPort: 10248
healthzBindAddress: "127.0.0.1"
kubeletCgroups: /system.slice/kubelet.service
clusterDomain: cluster.local
protectKernelDefaults: true
rotateCertificates: true
clusterDNS:
- 10.233.0.3
kubeReserved:
cpu: "100m"
memory: "256Mi"
ephemeral-storage: "500Mi"
pid: "1000"
systemReserved:
cpu: "500m"
memory: "512Mi"
ephemeral-storage: "500Mi"
pid: "1000"
resolvConf: "/etc/resolv.conf"
eventRecordQPS: 50
shutdownGracePeriod: 60s
shutdownGracePeriodCriticalPods: 20s
maxParallelImagePulls: 1
control-plane
root@k8s-ctr:~/kubespray# tree roles/kubernetes/control-plane/
roles/kubernetes/control-plane/
βββ defaults
β βββ main
β βββ etcd.yml
β βββ kube-proxy.yml
β βββ kube-scheduler.yml
β βββ main.yml
βββ handlers
β βββ main.yml
βββ meta
β βββ main.yml
βββ tasks
β βββ check-api.yml
β βββ define-first-kube-control.yml
β βββ encrypt-at-rest.yml
β βββ kubeadm-backup.yml
β βββ kubeadm-etcd.yml
β βββ kubeadm-fix-apiserver.yml
β βββ kubeadm-secondary.yml
β βββ kubeadm-setup.yml
β βββ kubeadm-upgrade.yml
β βββ kubelet-fix-client-cert-rotation.yml
β βββ main.yml
β βββ pre-upgrade.yml
βββ templates
β βββ admission-controls.yaml.j2
β βββ apiserver-audit-policy.yaml.j2
β βββ apiserver-audit-webhook-config.yaml.j2
β βββ apiserver-tracing.yaml.j2
β βββ eventratelimit.yaml.j2
β βββ k8s-certs-renew.service.j2
β βββ k8s-certs-renew.sh.j2
β βββ k8s-certs-renew.timer.j2
β βββ kubeadm-config.v1beta3.yaml.j2
β βββ kubeadm-config.v1beta4.yaml.j2
β βββ kubeadm-controlplane.yaml.j2
β βββ kubescheduler-config.yaml.j2
β βββ podnodeselector.yaml.j2
β βββ podsecurity.yaml.j2
β βββ resourcequota.yaml.j2
β βββ secrets_encryption.yaml.j2
β βββ webhook-authorization-config.yaml.j2
β βββ webhook-token-auth-config.yaml.j2
βββ vars
βββ main.yaml
8 directories, 37 files
---
# 컨νΈλ‘€ νλ μΈ μ
κ·Έλ μ΄λ μ μ νμν μ¬μ μμ
# (λ²μ μ°¨μ΄λ‘ μΈν μ€μ μΆ©λ, cert λ¬Έμ λ±μ 미리 μ 리)
- name: Pre-upgrade control plane
import_tasks: pre-upgrade.yml
tags:
- k8s-pre-upgrade
# kube-apiserverμμ webhook κΈ°λ° ν ν° μΈμ¦μ μ¬μ©ν κ²½μ°
# ν΄λΉ μΈμ¦ μ€μ νμΌ μμ±
- name: Create webhook token auth config
template:
src: webhook-token-auth-config.yaml.j2
dest: "{{ kube_config_dir }}/webhook-token-auth-config.yaml"
mode: "0640"
when: kube_webhook_token_auth | default(false)
# kube-apiserverμμ webhook κΈ°λ° μΈκ°(authorization)λ₯Ό μ¬μ©ν κ²½μ°
# μΈκ° μ€μ νμΌ μμ±
- name: Create webhook authorization config
template:
src: webhook-authorization-config.yaml.j2
dest: "{{ kube_config_dir }}/webhook-authorization-config.yaml"
mode: "0640"
when: kube_webhook_authorization | default(false)
# Kubernetesμ ꡬ쑰νλ AuthorizationConfiguration μ¬μ© μ
# apiserverκ° μ§μ μ½λ μΈκ° μ€μ νμΌ μμ±
- name: Create structured AuthorizationConfiguration file
copy:
content: "{{ authz_config | to_nice_yaml(indent=2, sort_keys=false) }}"
dest: "{{ kube_config_dir }}/apiserver-authorization-config-{{ kube_apiserver_authorization_config_api_version }}.yaml"
mode: "0640"
vars:
authz_config:
apiVersion: apiserver.config.k8s.io/{{ kube_apiserver_authorization_config_api_version }}
kind: AuthorizationConfiguration
authorizers: "{{ kube_apiserver_authorization_config_authorizers }}"
when: kube_apiserver_use_authorization_config_file
# kube-scheduler μ€μ νμΌ μμ±
# μ€μΌμ€λ§ μ μ±
, νλ‘νμΌ λ±μ kube-schedulerμ μ λ¬
- name: Create kube-scheduler config
template:
src: kubescheduler-config.yaml.j2
dest: "{{ kube_config_dir }}/kubescheduler-config.yaml"
mode: "0644"
# Kubernetes Secretμ etcdμ μνΈνν΄μ μ μ₯νκΈ° μν μ€μ
# (Encrypt at Rest)
- name: Apply Kubernetes encrypt at rest config
import_tasks: encrypt-at-rest.yml
when:
- kube_encrypt_secret_data
tags:
- kube-apiserver
# kubectl λ°μ΄λ리λ₯Ό λ
Έλμ μ€μΉ
# 컨νΈλ‘€ νλ μΈ λ
Έλμμ κ΄λ¦¬ μμ
μν μ©λ
- name: Install | Copy kubectl binary from download dir
copy:
src: "{{ downloads.kubectl.dest }}"
dest: "{{ bin_dir }}/kubectl"
mode: "0755"
remote_src: true
tags:
- kubectl
- upgrade
# kubectl bash μλμμ± μ€ν¬λ¦½νΈ μμ±
- name: Install kubectl bash completion
shell: "{{ bin_dir }}/kubectl completion bash >/etc/bash_completion.d/kubectl.sh"
when: ansible_os_family in ["Debian","RedHat", "Suse"]
tags:
- kubectl
ignore_errors: true
# μλμμ± μ€ν¬λ¦½νΈ κΆν μ€μ
- name: Set kubectl bash completion file permissions
file:
path: /etc/bash_completion.d/kubectl.sh
owner: root
group: root
mode: "0755"
when: ansible_os_family in ["Debian","RedHat", "Suse"]
tags:
- kubectl
- upgrade
ignore_errors: true
# kubectlμ λν λ³μΉ(alias)μ μ€μ
# ex) k → kubectl
- name: Set bash alias for kubectl
blockinfile:
path: /etc/bash_completion.d/kubectl.sh
block: |-
alias {{ kubectl_alias }}=kubectl
if [[ $(type -t compopt) = "builtin" ]]; then
complete -o default -F __start_kubectl {{ kubectl_alias }}
else
complete -o default -o nospace -F __start_kubectl {{ kubectl_alias }}
fi
state: present
marker: "# Ansible entries {mark}"
when:
- ansible_os_family in ["Debian","RedHat", "Suse"]
- kubectl_alias is defined and kubectl_alias != ""
tags:
- kubectl
- upgrade
ignore_errors: true
# κΈ°μ‘΄ ν΄λ¬μ€ν°μ μ΄λ―Έ μ‘°μΈλ λ
Έλμ
# μ΅μ΄ control-plane λ
Έλλ₯Ό μλ³
- name: Define nodes already joined to existing cluster and first_kube_control_plane
import_tasks: define-first-kube-control.yml
# kubeadmμ μ΄μ©ν 컨νΈλ‘€ νλ μΈ μ΄κΈ°ν/μ‘°μΈ λ‘μ§
- name: Include kubeadm setup
import_tasks: kubeadm-setup.yml
# etcdλ₯Ό kubeadmμ΄ κ΄λ¦¬νλ κ΅¬μ‘°μΌ κ²½μ°
# kubeadm κΈ°λ° etcd μΆκ° μμ
μν
- name: Include kubeadm etcd extra tasks
include_tasks: kubeadm-etcd.yml
when: etcd_deployment_type == "kubeadm"
# λ©ν° 컨νΈλ‘€ νλ μΈ νκ²½μμ
# secondary apiserver κ΄λ ¨ λ¬Έμ 보μ
- name: Include kubeadm secondary server apiserver fixes
include_tasks: kubeadm-fix-apiserver.yml
# μ¬μ©νμ§ μλ AuthorizationConfiguration λ²μ νμΌ μ 리
- name: Cleanup unused AuthorizationConfiguration file versions
file:
path: "{{ kube_config_dir }}/apiserver-authorization-config-{{ item }}.yaml"
state: absent
loop: "{{ ['v1alpha1', 'v1beta1', 'v1'] | reject('equalto', kube_apiserver_authorization_config_api_version) | list }}"
when: kube_apiserver_use_authorization_config_file
# kubelet client μΈμ¦μ μλ νμ κ΄λ ¨ μ΄μ 보μ
- name: Include kubelet client cert rotation fixes
include_tasks: kubelet-fix-client-cert-rotation.yml
when: kubelet_rotate_certificates
# 컨νΈλ‘€ νλ μΈ μΈμ¦μ μλ κ°±μ μ© μ€ν¬λ¦½νΈ μ€μΉ
- name: Install script to renew K8S control plane certificates
template:
src: k8s-certs-renew.sh.j2
dest: "{{ bin_dir }}/k8s-certs-renew.sh"
mode: "0755"
# μΈμ¦μ μλ κ°±μ μ© systemd service / timer μ€μΉ
- name: Renew K8S control plane certificates monthly 1/2
template:
src: "{{ item }}.j2"
dest: "/etc/systemd/system/{{ item }}"
mode: "0644"
validate: "sh -c '[ -f /usr/bin/systemd/system/factory-reset.target ] || exit 0 && systemd-analyze verify %s:{{item}}'"
with_items:
- k8s-certs-renew.service
- k8s-certs-renew.timer
register: k8s_certs_units
when: auto_renew_certificates
# μΈμ¦μ μλ κ°±μ νμ΄λ¨Έ νμ±ν
- name: Renew K8S control plane certificates monthly 2/2
systemd_service:
name: k8s-certs-renew.timer
enabled: true
state: started
daemon_reload: "{{ k8s_certs_units is changed }}"
when: auto_renew_certificates
kubeadmμ μ€μ¬μΌλ‘ 컨νΈλ‘€ νλ μΈμ ꡬμ±νκ³ , 보μ(μΈμ¦/μΈκ°), μ€μΌμ€λ§, μΈμ¦μ μλͺ μ£ΌκΈ°κΉμ§ ν¬ν¨ν΄μμ΄μ κ°λ₯ν μνμ Kubernetes 컨νΈλ‘€ νλ μΈμ ꡬμ±νλ€.
client
root@k8s-ctr:~/kubespray# tree roles/kubernetes/client/
roles/kubernetes/client/
βββ defaults
β βββ main.yml
βββ tasks
βββ main.yml
3 directories, 2 files
root@k8s-ctr:~/kubespray#
cat roles/kubernetes/client/defaults/main.yml
---
kubeconfig_localhost: false
kubeconfig_localhost_ansible_host: false
kubectl_localhost: false
artifacts_dir: "{{ inventory_dir }}/artifacts"
kube_config_dir: "/etc/kubernetes"
---
# μΈλΆμμ μ κ·Όν kube-apiserver μλν¬μΈνΈλ₯Ό κ²°μ
# μ°μ μμ:
# 1) λͺ
μμ μΈ LB μ£Όμ
# 2) ansible_host κΈ°λ° μ£Όμ (μ΅μ
νμ±ν μ)
# 3) κΈ°λ³Έ kube_apiserver_access_address
- name: Set external kube-apiserver endpoint
set_fact:
external_apiserver_address: >-
{%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined -%}
{{ loadbalancer_apiserver.address }}
{%- elif kubeconfig_localhost_ansible_host is defined and kubeconfig_localhost_ansible_host -%}
{{ hostvars[groups['kube_control_plane'][0]].ansible_host }}
{%- else -%}
{{ kube_apiserver_access_address }}
{%- endif -%}
external_apiserver_port: >-
{%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined and loadbalancer_apiserver.port is defined -%}
{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
{%- else -%}
{{ kube_apiserver_port }}
{%- endif -%}
tags:
- facts
# νμ¬ μ¬μ©μ(λλ becomeλ μ¬μ©μ)μ kubeconfig λλ ν°λ¦¬ μμ±
# κΈ°λ³Έμ μΌλ‘ ~/.kube
- name: Create kube config dir for current/ansible become user
file:
path: "{{ ansible_env.HOME | default('/root') }}/.kube"
mode: "0700"
state: directory
# 컨νΈλ‘€ νλ μΈμ μμ±λ admin.confλ₯Ό
# νμ¬ μ¬μ©μ ν λλ ν°λ¦¬λ‘ 볡μ¬
- name: Copy admin kubeconfig to current/ansible become user home
copy:
src: "{{ kube_config_dir }}/admin.conf"
dest: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
remote_src: true
mode: "0600"
backup: true
# kubeconfigλ₯Ό λ‘컬(Ansible μ€ν λ¨Έμ )λ‘ κ°μ Έμ¬ κ²½μ°
# μ°μΆλ¬Ό λλ ν°λ¦¬ μμ±
- name: Create kube artifacts dir
file:
path: "{{ artifacts_dir }}"
mode: "0750"
state: directory
delegate_to: localhost
connection: local
become: false
run_once: true
when: kubeconfig_localhost
# kube-apiserverκ° μ€μ λ‘ μ΄λ¦΄ λκΉμ§ λκΈ°
# kubeconfigλ₯Ό κ°μ Έμ€κΈ° μ μ μμ μ₯μΉ
- name: Wait for k8s apiserver
wait_for:
host: "{{ kube_apiserver_access_address }}"
port: "{{ kube_apiserver_port }}"
timeout: 180
# admin.conf νμΌμ μ격 λ
Έλμμ μ½μ΄μμ
# base64 ννλ‘ λ‘컬 λ³μμ μ μ₯
- name: Get admin kubeconfig from remote host
slurp:
src: "{{ kube_config_dir }}/admin.conf"
run_once: true
register: raw_admin_kubeconfig
when: kubeconfig_localhost
# base64 → YAML λ³ν
- name: Convert kubeconfig to YAML
set_fact:
admin_kubeconfig: "{{ raw_admin_kubeconfig.content | b64decode | from_yaml }}"
when: kubeconfig_localhost
# kubeconfig λ΄λΆ μ 보 μμ
# - cluster name
# - context name
# - user name
# - apiserver μ£Όμλ₯Ό μΈλΆ μ κ·Ό μ£Όμλ‘ κ΅μ²΄
- name: Override username in kubeconfig
set_fact:
final_admin_kubeconfig: "{{ admin_kubeconfig
| combine(override_cluster_name, recursive=true)
| combine(override_context, recursive=true)
| combine(override_user, recursive=true) }}"
vars:
cluster_infos: "{{ admin_kubeconfig['clusters'][0]['cluster'] }}"
user_certs: "{{ admin_kubeconfig['users'][0]['user'] }}"
username: "kubernetes-admin-{{ cluster_name }}"
context: "kubernetes-admin-{{ cluster_name }}@{{ cluster_name }}"
override_cluster_name: >-
{{ {'clusters': [{
'cluster': (cluster_infos | combine({
'server': 'https://' + (external_apiserver_address | ansible.utils.ipwrap) + ':' + (external_apiserver_port | string)
})),
'name': cluster_name
}]} }}
override_context: >-
{{ {'contexts': [{
'context': {'user': username, 'cluster': cluster_name},
'name': context
}], 'current-context': context} }}
override_user: >-
{{ {'users': [{'name': username, 'user': user_certs}]} }}
when: kubeconfig_localhost
# μ΅μ’
kubeconfigλ₯Ό Ansible μ€ν λ¨Έμ μ μ μ₯
- name: Write admin kubeconfig on ansible host
copy:
content: "{{ final_admin_kubeconfig | to_nice_yaml(indent=2) }}"
dest: "{{ artifacts_dir }}/admin.conf"
mode: "0600"
delegate_to: localhost
connection: local
become: false
run_once: true
when: kubeconfig_localhost
# kubectl λ°μ΄λ리λ₯Ό λ‘컬(Ansible νΈμ€νΈ)λ‘ λ³΅μ¬
- name: Copy kubectl binary to ansible host
fetch:
src: "{{ bin_dir }}/kubectl"
dest: "{{ artifacts_dir }}/kubectl"
flat: true
validate_checksum: false
register: copy_binary_result
until: copy_binary_result is not failed
retries: 20
become: false
run_once: true
when: kubectl_localhost
# λ‘컬μμ λ°λ‘ kubectlμ μΈ μ μλλ‘ ν¬νΌ μ€ν¬λ¦½νΈ μμ±
# ./kubectl.sh get nodes ννλ‘ μ¬μ©
- name: Create helper script kubectl.sh on ansible host
copy:
content: |
#!/bin/bash
${BASH_SOURCE%/*}/kubectl --kubeconfig=${BASH_SOURCE%/*}/admin.conf "$@"
dest: "{{ artifacts_dir }}/kubectl.sh"
mode: "0755"
become: false
run_once: true
delegate_to: localhost
connection: local
when: kubectl_localhost and kubeconfig_localhost
ν΄λ¬μ€ν°λ₯Ό λκ°, μ΄λμ, μ΄λ€ λ°©μμΌλ‘ κ΄λ¦¬ν κ²μΈμ§μ λν λ΄μ©μ μ μνλ€.
cluster-roles
root@k8s-ctr:~/kubespray# tree roles/kubernetes-apps/cluster_roles/
roles/kubernetes-apps/cluster_roles/
βββ files
β βββ k8s-cluster-critical-pc.yml
βββ tasks
β βββ main.yml
βββ templates
βββ namespace.j2
βββ node-crb.yml.j2
βββ vsphere-rbac.yml.j2
4 directories, 5 files
---
# kube-apiserverκ° μ€μ λ‘ μ΄μμλμ§ νμΈ
# /healthz μλν¬μΈνΈκ° 200μ λ°νν λκΉμ§ λκΈ°
# → RBAC 리μμ€λ₯Ό μ μ©νκΈ° μ μ API μλ² μ€λΉ μ¬λΆλ₯Ό 보μ₯
- name: Kubernetes Apps | Wait for kube-apiserver
uri:
url: "{{ kube_apiserver_endpoint }}/healthz"
validate_certs: false
client_cert: "{{ kube_apiserver_client_cert }}"
client_key: "{{ kube_apiserver_client_key }}"
register: result
until: result.status == 200
retries: 10
delay: 6
# μ΅μ΄ control-plane λ
Έλμμλ§ μ€ν
when: inventory_hostname == groups['kube_control_plane'][0]
# Nodeκ° ν΄λ¬μ€ν°μ joinλ μ μλλ‘ ClusterRoleBinding λ§€λνμ€νΈ μμ±
# system:nodes μ‘°μ§(O)μ κ°μ§ μΈμ¦μλ₯Ό κ°μ§ λ
Έλλ€μ΄
# API μλ²μ λ±λ‘(admit)λ μ μκ² ν΄μ£Όλ λ°μΈλ©
- name: Kubernetes Apps | Add ClusterRoleBinding to admit nodes
template:
src: "node-crb.yml.j2"
dest: "{{ kube_config_dir }}/node-crb.yml"
mode: "0640"
register: node_crb_manifest
when:
- rbac_enabled
- inventory_hostname == groups['kube_control_plane'][0]
# μμμ μμ±ν ClusterRoleBindingμ μ€μ λ‘ μ μ©
# → kubelet μΈμ¦μ(O=system:nodes)λ₯Ό κ°μ§ λ
Έλλ€μ΄
# RBACμ μν΄ κ±°λΆλμ§ μκ³ μ μμ μΌλ‘ λ±λ‘λλλ‘ νλ μν¬μ΄λΌμ΄λ
- name: Apply workaround to allow all nodes with cert O=system:nodes to register
kube:
name: "kubespray:system:node"
kubectl: "{{ bin_dir }}/kubectl"
resource: "clusterrolebinding"
filename: "{{ kube_config_dir }}/node-crb.yml"
state: latest
register: result
until: result is succeeded
retries: 10
delay: 6
when:
- rbac_enabled
- node_crb_manifest.changed
- inventory_hostname == groups['kube_control_plane'][0]
# κ³Όκ±° λ²μ μμ μ¬μ©λλ node webhook κ΄λ ¨ ClusterRole μ κ±°
# → λ μ΄μ νμ μλ κΆν/κ΅¬μ± μ 리
- name: Kubernetes Apps | Remove old webhook ClusterRole
kube:
name: "system:node-webhook"
kubectl: "{{ bin_dir }}/kubectl"
resource: "clusterrole"
state: absent
when:
- rbac_enabled
- inventory_hostname == groups['kube_control_plane'][0]
tags: node-webhook
# μμ μ§μ μ΄λ£¨λ ClusterRoleBindingλ ν¨κ» μ κ±°
- name: Kubernetes Apps | Remove old webhook ClusterRoleBinding
kube:
name: "system:node-webhook"
kubectl: "{{ bin_dir }}/kubectl"
resource: "clusterrolebinding"
state: absent
when:
- rbac_enabled
- inventory_hostname == groups['kube_control_plane'][0]
tags: node-webhook
# ν΄λ¬μ€ν° ν΅μ¬ μ»΄ν¬λνΈμ© PriorityClass λ§€λνμ€νΈ 볡μ¬
# schedulerκ° νλλ₯Ό λ°μ΄λΌ λ κ°μ₯ λ§μ§λ§κΉμ§ 보νΈλ°λ μ°μ μμ
- name: PriorityClass | Copy k8s-cluster-critical-pc.yml file
copy:
src: k8s-cluster-critical-pc.yml
dest: "{{ kube_config_dir }}/k8s-cluster-critical-pc.yml"
mode: "0640"
# control-plane μ€ λ§μ§λ§ λ
Έλμμλ§ μ€ν
when: inventory_hostname == groups['kube_control_plane'] | last
# k8s-cluster-critical PriorityClass μμ±
# → control-plane, λ€νΈμν¬, ν΅μ¬ μ λμ¨λ€μ΄
# 리μμ€ μλ° μν©μμλ λ¨Όμ μ£½μ§ μλλ‘ λ³΄μ₯
- name: PriorityClass | Create k8s-cluster-critical
kube:
name: k8s-cluster-critical
kubectl: "{{ bin_dir }}/kubectl"
resource: "PriorityClass"
filename: "{{ kube_config_dir }}/k8s-cluster-critical-pc.yml"
state: latest
register: result
until: result is succeeded
retries: 10
delay: 6
when: inventory_hostname == groups['kube_control_plane'] | last
kube-apiserverκ° μ€λΉλμλμ§ νμΈν λ€, λ Έλ μΈμ¦μ(O=system:nodes)λ₯Ό κ°μ§ kubeletλ€μ΄ RBACμ λ§νμ§ μκ³ ν΄λ¬μ€ν°μ μ μμ μΌλ‘ λ±λ‘λ μ μλλ‘ ClusterRoleBindingμ 보μνλ€
kubernetes-apps
root@k8s-ctr:~/kubespray# tree roles/kubernetes-apps/ -L 1
roles/kubernetes-apps/
βββ ansible
βββ argocd
βββ cluster_roles
βββ common_crds
βββ container_engine_accelerator
βββ container_runtimes
βββ csi_driver
βββ defaults
βββ external_cloud_controller
βββ external_provisioner
βββ helm
βββ ingress_controller
βββ kubelet-csr-approver
βββ meta
βββ metallb
βββ metrics_server
βββ node_feature_discovery
βββ persistent_volumes
βββ policy_controller
βββ registry
βββ scheduler_plugins
βββ snapshots
βββ vars
24 directories, 0 files
μ΄λ² μ€μ΅ λ μ€μ ν appμΈ coredns, helm, metrics serverλ₯Ό μ΄ν΄λ³΄λ©΄...
root@k8s-ctr:~/kubespray# kubectl get pod -n kube-system -l app.kubernetes.io/name=metrics-server
NAME READY STATUS RESTARTS AGE
metrics-server-7cd7f9897-d5qnw 1/1 Running 0 3d2h
root@k8s-ctr:~/kubespray# helm version
version.BuildInfo{Version:"v3.18.4", GitCommit:"d80839cf37d860c8aa9a0503fe463278f26cd5e2", GitTreeState:"clean", GoVersion:"go1.24.4"}
root@k8s-ctr:~/kubespray# kubectl get deployment -n kube-system coredns dns-autoscaler -o wide
NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
coredns 1/1 1 1 3d2h coredns registry.k8s.io/coredns/coredns:v1.12.0 k8s-app=kube-dns
dns-autoscaler 1/1 1 1 3d2h autoscaler registry.k8s.io/cpa/cluster-proportional-autoscaler:v1.8.8 k8s-app=dns-autoscaler
λ₯Ό νμΈν μ μλ€.
Kubespray μ€μ΅μ λ°λΌν΄λ³΄λ©΄μ λλ μ μ μ 체 κ΅¬μ‘°κ° κ΅μ₯ν λ°©λν΄μ λ¬Έμ νλλ§ μ‘μλ λλ²κΉ
μ κ½€ μκ°μ΄ λ€ μ μκ² λ€λ μκ°μ΄ λ¨Όμ λ€μλ€.
κ·Έλ¦¬κ³ λ‘€κ³Ό νμ€ν¬κ° μ΄μ΄νκ² μͺΌκ°μ Έ μμ΄μ kubespray_install.log λ₯Ό κΈ°μ€μΌλ‘ νμ€ν¬λ₯Ό νλμ© λ°λΌκ°λ€ 보면 μ΄λμ 무μμ νλμ§λ κ²°κ΅ μΆμ μ΄ κ°λ₯ν κ²μ μ’μ κ² κ°κΈ΄νλ°...
λ΄κ° μ§ μ½λκ° μλλ©΄ ν΄μνκΈ° μ’ λν΄ν μλ μκ² λ€λ μκ°μ΄ λ€μλ€ γ γ γ (λκ° μ€μΉν λΆλΆμ΄ μ΄λ μ½λμ μλμ§ λ°λΌκ°λκ² μͺΌνΌ μ΄λ €μ΄ λλ..)
'Infra > μΏ λ²λ€ν°μ€' μΉ΄ν κ³ λ¦¬μ λ€λ₯Έ κΈ
| [K8S Deploy] #6μ£Όμ°¨ Kubespray offline μ€μΉνκΈ° (0) | 2026.02.15 |
|---|---|
| [K8S Deploy] #5μ£Όμ°¨ Kubespray HA & Upgrade (0) | 2026.02.08 |
| [K8S Deploy] #3μ£Όμ°¨ kubeadm upgrade (2) (0) | 2026.01.25 |
| [K8S Deploy] #3μ£Όμ°¨ kubeadm (1) (0) | 2026.01.25 |
| [K8S Deploy] #2μ£Όμ°¨ Ansible (2) (1) | 2026.01.18 |