[K8S Deploy] #4μ£Όμ°¨ Kubespray λ™μž‘ 원리 μ΄ν•΄ν•˜κΈ°

kubespray

KubesprayλŠ” Ansible 기반의 Kubernetes 배포 ν”„λ ˆμž„μ›Œν¬λ‘œ λ‹¨μˆœν•œ μ„€μΉ˜ μŠ€ν¬λ¦½νŠΈκ°€ μ•„λ‹ˆλΌ K8sλ₯Ό 운영 ν™˜κ²½μ—μ„œ ν‘œμ€€ μ•„ν‚€ν…μ²˜λ₯Ό μ½”λ“œλ‘œ κ΅¬ν˜„ν•œ ν”„λ‘œμ νŠΈλ‹€.

 

git clone -b v2.29.1 https://github.com/kubernetes-sigs/kubespray.git /root/kubespray
cd /root/kubespray

 

kubespray μ†ŒμŠ€μ½”λ“œλ₯Ό 클둠 받은 ν›„ vscodeμ—μ„œ 컨트둀 ν”Œλ ˆμΈμ— μ ‘κ·Όν•˜μ—¬ μ•„λž˜ μ•Όλ―ˆνŒŒμΌμ„ μˆ˜μ •ν•˜μ—¬ 섀정을 μ§„ν–‰ν•œλ‹€.

 

root@k8s-ctr:~/kubespray# kubectl get node -owide
NAME      STATUS   ROLES           AGE     VERSION   INTERNAL-IP     EXTERNAL-IP   OS-IMAGE                        KERNEL-VERSION                  CONTAINER-RUNTIME
k8s-ctr   Ready    control-plane   2m38s   v1.33.3   192.168.10.10   <none>        Rocky Linux 10.0 (Red Quartz)   6.12.0-55.39.1.el10_0.aarch64   containerd://2.1.5
root@k8s-ctr:~/kubespray# kubectl get pod -A
NAMESPACE                NAME                                             READY   STATUS    RESTARTS   AGE
kube-system              coredns-5d784884df-rxdf5                         1/1     Running   0          2m7s
kube-system              dns-autoscaler-676999957f-kw2vz                  1/1     Running   0          2m7s
kube-system              kube-apiserver-k8s-ctr                           1/1     Running   1          2m40s
kube-system              kube-controller-manager-k8s-ctr                  1/1     Running   2          2m40s
kube-system              kube-flannel-ds-arm64-h2rgj                      1/1     Running   0          2m13s
kube-system              kube-proxy-9kx57                                 1/1     Running   0          2m13s
kube-system              kube-scheduler-k8s-ctr                           1/1     Running   1          2m40s
kube-system              metrics-server-7cd7f9897-d5qnw                   1/1     Running   0          118s
node-feature-discovery   node-feature-discovery-gc-6c9b8f4657-9zgqh       1/1     Running   0          112s
node-feature-discovery   node-feature-discovery-master-6989794b78-ccd5t   1/1     Running   0          112s
node-feature-discovery   node-feature-discovery-worker-xdv5s              1/1     Running   0          112s

 

requirements.txt

root@k8s-ctr:~/kubespray# pip3 install -r /root/kubespray/requirements.txt
Requirement already satisfied: ansible==10.7.0 in /usr/local/lib/python3.12/site-packages (from -r /root/kubespray/requirements.txt (line 1)) (10.7.0)
Requirement already satisfied: cryptography==46.0.2 in /usr/local/lib64/python3.12/site-packages (from -r /root/kubespray/requirements.txt (line 3)) (46.0.2)
Requirement already satisfied: jmespath==1.0.1 in /usr/local/lib/python3.12/site-packages (from -r /root/kubespray/requirements.txt (line 5)) (1.0.1)
Requirement already satisfied: netaddr==1.3.0 in /usr/local/lib/python3.12/site-packages (from -r /root/kubespray/requirements.txt (line 7)) (1.3.0)
Requirement already satisfied: ansible-core~=2.17.7 in /usr/local/lib/python3.12/site-packages (from ansible==10.7.0->-r /root/kubespray/requirements.txt (line 1)) (2.17.14)
Requirement already satisfied: cffi>=2.0.0 in /usr/local/lib64/python3.12/site-packages (from cryptography==46.0.2->-r /root/kubespray/requirements.txt (line 3)) (2.0.0)
Requirement already satisfied: jinja2>=3.0.0 in /usr/local/lib/python3.12/site-packages (from ansible-core~=2.17.7->ansible==10.7.0->-r /root/kubespray/requirements.txt (line 1)) (3.1.6)
Requirement already satisfied: PyYAML>=5.1 in /usr/lib64/python3.12/site-packages (from ansible-core~=2.17.7->ansible==10.7.0->-r /root/kubespray/requirements.txt (line 1)) (6.0.1)
Requirement already satisfied: packaging in /usr/lib/python3.12/site-packages (from ansible-core~=2.17.7->ansible==10.7.0->-r /root/kubespray/requirements.txt (line 1)) (24.2)
Requirement already satisfied: resolvelib<1.1.0,>=0.5.3 in /usr/local/lib/python3.12/site-packages (from ansible-core~=2.17.7->ansible==10.7.0->-r /root/kubespray/requirements.txt (line 1)) (1.0.1)
Requirement already satisfied: pycparser in /usr/local/lib/python3.12/site-packages (from cffi>=2.0.0->cryptography==46.0.2->-r /root/kubespray/requirements.txt (line 3)) (3.0)
Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib64/python3.12/site-packages (from jinja2>=3.0.0->ansible-core~=2.17.7->ansible==10.7.0->-r /root/kubespray/requirements.txt (line 1)) (3.0.3)
WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv


root@k8s-ctr:~/kubespray# ansible --version
ansible [core 2.17.14]
  config file = /root/kubespray/ansible.cfg
  configured module search path = ['/root/kubespray/library']
  ansible python module location = /usr/local/lib/python3.12/site-packages/ansible
  ansible collection location = /root/.ansible/collections:/usr/share/ansible/collections
  executable location = /usr/local/bin/ansible
  python version = 3.12.9 (main, Aug 14 2025, 00:00:00) [GCC 14.2.1 20250110 (Red Hat 14.2.1-7)] (/usr/bin/python3)
  jinja version = 3.1.6
  libyaml = True
  
  
root@k8s-ctr:~/kubespray# pip list
Package                   Version
------------------------- -----------
ansible                   10.7.0
ansible-core              2.17.14
attrs                     23.2.0
cffi                      2.0.0
charset-normalizer        3.3.2
cockpit                   334.1
cryptography              46.0.2
dasbus                    1.7
dbus-python               1.3.2
distro                    1.9.0
dnf                       4.20.0
file-magic                0.4.0
idna                      3.7
Jinja2                    3.1.6
jmespath                  1.0.1
jsonschema                4.19.1
jsonschema-specifications 2023.11.2
libcomps                  0.1.21
libdnf                    0.73.1
MarkupSafe                3.0.3
netaddr                   1.3.0
nftables                  0.1
packaging                 24.2
perf                      0.1
pexpect                   4.9.0
pip                       23.3.2
ptyprocess                0.7.0
pycparser                 3.0
PyGObject                 3.46.0
pyinotify                 0.9.6
python-dateutil           2.9.0.post0
python-linux-procfs       0.7.3
pyudev                    0.24.1
PyYAML                    6.0.1
pyynl                     0.0.1
referencing               0.31.1
requests                  2.32.4
resolvelib                1.0.1
rpds-py                   0.17.1
rpm                       4.19.1.1
selinux                   3.8
sepolicy                  3.8
setools                   4.5.1
setroubleshoot            3.3.33
setuptools                69.0.3
six                       1.16.0
sos                       4.10.0
systemd-python            235
urllib3                   1.26.19

 

 

인벀토리 파일

root@k8s-ctr:~/kubespray# cp -rfp /root/kubespray/inventory/sample /root/kubespray/inventory/mycluster

root@k8s-ctr:~/kubespray# cat << EOF > /root/kubespray/inventory/mycluster/inventory.ini
k8s-ctr ansible_host=192.168.10.10 ip=192.168.10.10

[kube_control_plane]
k8s-ctr

[etcd:children]
kube_control_plane

[kube_node]
k8s-ctr
EOF
cat /root/kubespray/inventory/mycluster/inventory.ini
k8s-ctr ansible_host=192.168.10.10 ip=192.168.10.10

[kube_control_plane]
k8s-ctr

[etcd:children]
kube_control_plane

[kube_node]
k8s-ctr

 

μƒ˜ν”Œ 인벀토리 νŒŒμΌμ„ λ³΅μ‚¬ν•˜μ—¬ 인벀토리λ₯Ό μž‘μ„±ν•œλ‹€.

 

μ „μ—­ μ„€μ •

root@k8s-ctr:~/kubespray# grep "^[^#]" inventory/mycluster/group_vars/all/all.yml
---
bin_dir: /usr/local/bin
loadbalancer_apiserver_port: 6443
loadbalancer_apiserver_healthcheck_port: 8081
no_proxy_exclude_workers: false
kube_webhook_token_auth: false
kube_webhook_token_auth_url_skip_tls_verify: false
ntp_enabled: false
ntp_manage_config: false
ntp_servers:
  - "0.pool.ntp.org iburst"
  - "1.pool.ntp.org iburst"
  - "2.pool.ntp.org iburst"
  - "3.pool.ntp.org iburst"
unsafe_show_logs: false
allow_unsupported_distribution_setup: false

 

 

ν΄λŸ¬μŠ€ν„° μ„€μ •

root@k8s-ctr:~/kubespray# grep "^[^#]" inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
---
kube_config_dir: /etc/kubernetes
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
kube_cert_dir: "{{ kube_config_dir }}/ssl"
kube_token_dir: "{{ kube_config_dir }}/tokens"
kube_api_anonymous_auth: true
local_release_dir: "/tmp/releases"
retry_stagger: 5
kube_owner: kube
kube_cert_group: kube-cert
kube_log_level: 2
credentials_dir: "{{ inventory_dir }}/credentials"
kube_network_plugin: flannel
kube_network_plugin_multus: false
kube_service_addresses: 10.233.0.0/18
kube_pods_subnet: 10.233.64.0/18
kube_network_node_prefix: 24
kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116
kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112
kube_network_node_prefix_ipv6: 120
kube_apiserver_ip: "{{ kube_service_subnets.split(',') | first | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(1) | ansible.utils.ipaddr('address') }}"
kube_apiserver_port: 6443  # (https)
kube_proxy_mode: iptables
kube_proxy_strict_arp: false
kube_proxy_nodeport_addresses: >-
  {%- if kube_proxy_nodeport_addresses_cidr is defined -%}
  [{{ kube_proxy_nodeport_addresses_cidr }}]
  {%- else -%}
  []
  {%- endif -%}
kube_encrypt_secret_data: false
cluster_name: cluster.local
ndots: 2
dns_mode: coredns
enable_nodelocaldns: false
enable_nodelocaldns_secondary: false
nodelocaldns_ip: 169.254.25.10
nodelocaldns_health_port: 9254
nodelocaldns_second_health_port: 9256
nodelocaldns_bind_metrics_host_ip: false
nodelocaldns_secondary_skew_seconds: 5
enable_coredns_k8s_external: false
coredns_k8s_external_zone: k8s_external.local
enable_coredns_k8s_endpoint_pod_names: false
resolvconf_mode: host_resolvconf
deploy_netchecker: false
skydns_server: "{{ kube_service_subnets.split(',') | first | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(3) | ansible.utils.ipaddr('address') }}"
skydns_server_secondary: "{{ kube_service_subnets.split(',') | first | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(4) | ansible.utils.ipaddr('address') }}"
dns_domain: "{{ cluster_name }}"
container_manager: containerd
kata_containers_enabled: false
kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}"
k8s_image_pull_policy: IfNotPresent
kubernetes_audit: false
default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir"
volume_cross_zone_attachment: false
persistent_volumes_enabled: false
event_ttl_duration: "1h0m0s"
auto_renew_certificates: true
auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:00:00"
kubeadm_patches_dir: "{{ kube_config_dir }}/patches"
kubeadm_patches: []
remove_anonymous_access: false

 

 

Flannel μΈν„°νŽ˜μ΄μŠ€ μ„€μ •

root@k8s-ctr:~/kubespray# sed -i 's|kube_network_plugin: calico|kube_network_plugin: flannel|g' inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
sed -i 's|kube_proxy_mode: ipvs|kube_proxy_mode: iptables|g' inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
sed -i 's|enable_nodelocaldns: true|enable_nodelocaldns: false|g' inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
sed -i 's|auto_renew_certificates: false|auto_renew_certificates: true|g' inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
sed -i 's|# auto_renew_certificates_systemd_calendar|auto_renew_certificates_systemd_calendar|g' inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
grep -iE 'kube_network_plugin:|kube_proxy_mode|enable_nodelocaldns:|^auto_renew_certificates' inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml

kube_network_plugin: flannel
kube_proxy_mode: iptables
enable_nodelocaldns: false
auto_renew_certificates: true
auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:00:00"



root@k8s-ctr:~/kubespray# cat inventory/mycluster/group_vars/k8s_cluster/kube_control_plane.yml 
# Reservation for control plane kubernetes components
# kube_memory_reserved: 512Mi
# kube_cpu_reserved: 200m
# kube_ephemeral_storage_reserved: 2Gi
# kube_pid_reserved: "1000"

# Reservation for control plane host system
# system_memory_reserved: 256Mi
# system_cpu_reserved: 250m
# system_ephemeral_storage_reserved: 2Gi
# system_pid_reserved: "1000"

 

Addson μ„€μ •

root@k8s-ctr:~/kubespray# grep "^[^#]" inventory/mycluster/group_vars/k8s_cluster/addons.yml
---
helm_enabled: true
registry_enabled: false
metrics_server_enabled: true
local_path_provisioner_enabled: false
local_volume_provisioner_enabled: false
gateway_api_enabled: false
ingress_nginx_enabled: false
ingress_publish_status_address: ""
ingress_alb_enabled: false
cert_manager_enabled: false
metallb_enabled: false
metallb_speaker_enabled: "{{ metallb_enabled }}"
metallb_namespace: "metallb-system"
argocd_enabled: false
kube_vip_enabled: false
node_feature_discovery_enabled: true


# ν…ŒμŠ€νŠΈν•  κΈ°λŠ₯을 μˆ˜μ •
sed -i 's|helm_enabled: false|helm_enabled: true|g' inventory/mycluster/group_vars/k8s_cluster/addons.yml
sed -i 's|metrics_server_enabled: false|metrics_server_enabled: true|g' inventory/mycluster/group_vars/k8s_cluster/addons.yml
sed -i 's|node_feature_discovery_enabled: false|node_feature_discovery_enabled: true|g' inventory/mycluster/group_vars/k8s_cluster/addons.yml
grep -iE 'helm_enabled:|metrics_server_enabled:|node_feature_discovery_enabled:' inventory/mycluster/group_vars/k8s_cluster/addons.yml

etcd systemd unit

root@k8s-ctr:~/kubespray# grep "^[^#]" inventory/mycluster/group_vars/all/etcd.yml
---
etcd_data_dir: /var/lib/etcd
etcd_deployment_type: host

 

etcdλ₯Ό static pod에 κ΅¬λ™ν•˜λŠ” 것이 μ•„λ‹Œ systemd μ„œλΉ„μŠ€λ‘œ μ‹€ν–‰ν•œλ‹€.

 

containerd

root@k8s-ctr:~/kubespray# cat inventory/mycluster/group_vars/all/containerd.yml
---
# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options

# containerd_storage_dir: "/var/lib/containerd"
# containerd_state_dir: "/run/containerd"
# containerd_oom_score: 0

# containerd_default_runtime: "runc"
# containerd_snapshotter: "native"

# containerd_runc_runtime:
#   name: runc
#   type: "io.containerd.runc.v2"
#   engine: ""
#   root: ""

# containerd_additional_runtimes:
# Example for Kata Containers as additional runtime:
#   - name: kata
#     type: "io.containerd.kata.v2"
#     engine: ""
#     root: ""

# containerd_grpc_max_recv_message_size: 16777216
# containerd_grpc_max_send_message_size: 16777216

# Containerd debug socket location: unix or tcp format
# containerd_debug_address: ""

# Containerd log level
# containerd_debug_level: "info"

# Containerd logs format, supported values: text, json
# containerd_debug_format: ""

# Containerd debug socket UID
# containerd_debug_uid: 0

# Containerd debug socket GID
# containerd_debug_gid: 0

# containerd_metrics_address: ""

# containerd_metrics_grpc_histogram: false

# Registries defined within containerd.
# containerd_registries_mirrors:
#  - prefix: docker.io
#    mirrors:
#     - host: https://registry-1.docker.io
#       capabilities: ["pull", "resolve"]
#       skip_verify: false
#       header:
#         Authorization: "Basic XXX"

# containerd_max_container_log_line_size: 16384

# containerd_registry_auth:
#   - registry: 10.0.0.2:5000
#     username: user
#     password: pass

μ•€μ„œλΈ” 배포

ansible-playbook -i inventory/mycluster/inventory.ini -v cluster.yml -e kube_version="1.33.3" --list-tasks

ANSIBLE_FORCE_COLOR=true ansible-playbook -i inventory/mycluster/inventory.ini -v cluster.yml -e kube_version="1.33.3" | tee kubespray_install.log
...
PLAY RECAP *********************************************************************
k8s-ctr                    : ok=571  changed=24   unreachable=0    failed=0    skipped=907  rescued=0    ignored=0   

Saturday 31 January 2026  23:41:07 +0900 (0:00:00.012)       0:01:54.385 ****** 
=============================================================================== 
network_plugin/flannel : Flannel | Wait for flannel subnet.env file presence --- 5.15s
system_packages : Manage packages --------------------------------------- 4.73s
kubernetes-apps/node_feature_discovery : Node Feature Discovery | Create manifests --- 2.93s
kubernetes-apps/node_feature_discovery : Node Feature Discovery | Apply manifests --- 2.18s
kubernetes-apps/ansible : Kubernetes Apps | CoreDNS --------------------- 2.14s
etcdctl_etcdutl : Extract_file | Unpacking archive ---------------------- 1.82s
kubernetes-apps/metrics_server : Metrics Server | Create manifests ------ 1.77s
download : Download_file | Download item -------------------------------- 1.62s
kubernetes-apps/helm : Download_file | Download item -------------------- 1.61s
kubernetes-apps/helm : Extract_file | Unpacking archive ----------------- 1.56s
network_plugin/cni : CNI | Copy cni plugins ----------------------------- 1.54s
container-engine/crictl : Extract_file | Unpacking archive -------------- 1.47s
container-engine/runc : Download_file | Download item ------------------- 1.45s
container-engine/containerd : Download_file | Download item ------------- 1.43s
container-engine/crictl : Download_file | Download item ----------------- 1.42s
container-engine/containerd : Containerd | Unpack containerd archive ---- 1.42s
etcdctl_etcdutl : Download_file | Download item ------------------------- 1.42s
container-engine/nerdctl : Download_file | Download item ---------------- 1.41s
network_plugin/cni : CNI | Copy cni plugins ----------------------------- 1.40s
etcdctl_etcdutl : Copy etcd binary -------------------------------------- 1.35s

 

ν”Œλ ˆμ΄λΆ λ°°ν¬κΉŒμ§€ μ•½ 5λΆ„ 정도가 μ†Œμš”λœλ‹€.

 

root@k8s-ctr:~/kubespray# kubectl get node -owide
NAME      STATUS   ROLES           AGE     VERSION   INTERNAL-IP     EXTERNAL-IP   OS-IMAGE                        KERNEL-VERSION                  CONTAINER-RUNTIME
k8s-ctr   Ready    control-plane   2d22h   v1.33.3   192.168.10.10   <none>        Rocky Linux 10.0 (Red Quartz)   6.12.0-55.39.1.el10_0.aarch64   containerd://2.1.5

 

 

roles/kubespray_defaults

 

배포 ν™˜κ²½ λΆ„μ„ν•˜κΈ°

μ„€μΉ˜ λ°”μ΄λ„ˆλ¦¬

root@k8s-ctr:~/kubespray# tree /usr/local/bin/
/usr/local/bin/
β”œβ”€β”€ ansible
β”œβ”€β”€ ansible-community
β”œβ”€β”€ ansible-config
β”œβ”€β”€ ansible-connection
β”œβ”€β”€ ansible-console
β”œβ”€β”€ ansible-doc
β”œβ”€β”€ ansible-galaxy
β”œβ”€β”€ ansible-inventory
β”œβ”€β”€ ansible-playbook
β”œβ”€β”€ ansible-pull
β”œβ”€β”€ ansible-test
β”œβ”€β”€ ansible-vault
β”œβ”€β”€ containerd
β”œβ”€β”€ containerd-shim-runc-v2
β”œβ”€β”€ containerd-stress
β”œβ”€β”€ crictl
β”œβ”€β”€ ctr
β”œβ”€β”€ etcd
β”œβ”€β”€ etcdctl
β”œβ”€β”€ etcdctl.sh
β”œβ”€β”€ etcd-scripts
β”‚   └── make-ssl-etcd.sh
β”œβ”€β”€ etcdutl
β”œβ”€β”€ helm
β”œβ”€β”€ jp.py
β”œβ”€β”€ k8s-certs-renew.sh
β”œβ”€β”€ k9s
β”œβ”€β”€ kubeadm
β”œβ”€β”€ kubectl
β”œβ”€β”€ kubelet
β”œβ”€β”€ kubernetes-scripts
β”œβ”€β”€ nerdctl
β”œβ”€β”€ netaddr
β”œβ”€β”€ __pycache__
β”‚   └── jp.cpython-312.pyc
└── runc

 

μ„€μΉ˜λœ 버전 확인

root@k8s-ctr:~/kubespray# helm version
version.BuildInfo{Version:"v3.18.4", GitCommit:"d80839cf37d860c8aa9a0503fe463278f26cd5e2", GitTreeState:"clean", GoVersion:"go1.24.4"}


root@k8s-ctr:~/kubespray# etcdctl version
etcdctl version: 3.5.25
API version: 3.5


root@k8s-ctr:~/kubespray# containerd --version
containerd github.com/containerd/containerd/v2 v2.1.5 fcd43222d6b07379a4be9786bda52438f0dd16a1


root@k8s-ctr:~/kubespray# kubeadm version -o yaml
clientVersion:
  buildDate: "2025-07-15T18:05:14Z"
  compiler: gc
  gitCommit: 80779bd6ff08b451e1c165a338a7b69351e9b0b8
  gitTreeState: clean
  gitVersion: v1.33.3
  goVersion: go1.24.4
  major: "1"
  minor: "33"
  platform: linux/arm64

 

 

kube 계정

root@k8s-ctr:~/kubespray# cat /etc/passwd | grep kube
kube:x:990:988:Kubernetes user:/home/kube:/sbin/nologin


root@k8s-ctr:~/kubespray# find / -user kube 2>/dev/null
/etc/cni
/etc/cni/net.d
/etc/kubernetes
/etc/kubernetes/manifests
/usr/libexec/kubernetes
/usr/libexec/kubernetes/kubelet-plugins
/usr/libexec/kubernetes/kubelet-plugins/volume
/usr/libexec/kubernetes/kubelet-plugins/volume/exec
/usr/local/bin/kubernetes-scripts
/opt/cni
/opt/cni/bin
/opt/cni/bin/README.md
/opt/cni/bin/static
/opt/cni/bin/host-device
/opt/cni/bin/ipvlan
/opt/cni/bin/dhcp
/opt/cni/bin/LICENSE
/opt/cni/bin/portmap
/opt/cni/bin/tap
/opt/cni/bin/host-local
/opt/cni/bin/vlan
/opt/cni/bin/loopback
/opt/cni/bin/sbr
/opt/cni/bin/firewall
/opt/cni/bin/bandwidth
/opt/cni/bin/bridge
/opt/cni/bin/vrf
/opt/cni/bin/macvlan
/opt/cni/bin/tuning
/opt/cni/bin/dummy
/opt/cni/bin/ptp
/opt/cni/bin/flannel

 

KubesprayλŠ” Kubernetes ꡬ성 μš”μ†Œμ˜ 파일 μ†Œμœ κΆŒκ³Ό μ‹€ν–‰ κΆŒν•œμ„ λΆ„λ¦¬ν•˜κΈ° μœ„ν•΄ μ „μš© μ‹œμŠ€ν…œ μœ μ €μΈ kubeλ₯Ό μƒμ„±ν•œλ‹€.

이 계정은 둜그인 λΆˆκ°€(/sbin/nologin) μ˜΅μ…˜μ΄ μ μš©λ˜μ–΄ 있으며, 일반 μ‚¬μš©μž λͺ©μ μ΄ μ•„λ‹Œ μ„œλΉ„μŠ€ μ „μš© 계정이닀.

λ˜ν•œ kubelet, kubeadm, μΈμ¦μ„œ, λ§€λ‹ˆνŽ˜μŠ€νŠΈ 파일의 μ†Œμœ  주체둜 λ™μž‘ν•œλ‹€.

 

λ§Œμ•½ cilium cni둜 kubesprayλ₯Ό μ§„ν–‰ν•  경우 이 λ•Œ kube_ownerλ₯Ό root둜 λ³€κ²½ν•΄μ•Όν•œλ‹€.

 

 

μΈμ¦μ„œ μžλ™ κ°±μ‹  λ™μž‘ 확인

root@k8s-ctr:~/kubespray# systemctl status k8s-certs-renew.timer --no-pager
● k8s-certs-renew.timer - Timer to renew K8S control plane certificates
     Loaded: loaded (/etc/systemd/system/k8s-certs-renew.timer; enabled; preset: disabled)
     Active: active (waiting) since Thu 2026-01-29 02:44:13 KST; 3 days ago
 Invocation: 831995db84f74a4b9de89d98e6b50e66
    Trigger: Mon 2026-02-02 03:09:32 KST; 22h left
   Triggers: ● k8s-certs-renew.service

Jan 29 02:44:13 k8s-ctr systemd[1]: Started k8s-certs-renew.timer - Timer to renew K8S control plane certificates.
Hint: Some lines were ellipsized, use -l to show in full.
root@k8s-ctr:~/kubespray# cat /etc/systemd/system/k8s-certs-renew.timer
[Unit]
Description=Timer to renew K8S control plane certificates

[Timer]
OnCalendar=Mon *-*-1,2,3,4,5,6,7 03:00:00
RandomizedDelaySec=10min
FixedRandomDelay=yes
Persistent=yes

[Install]
WantedBy=multi-user.target

 

sed -i 's|auto_renew_certificates: false|auto_renew_certificates: true|g' k8s-cluster.yml

μ˜΅μ…˜μ„ 톡해 μΈμ¦μ„œ μžλ™ 갱신을 ν™œμ„±ν™”ν•˜μ˜€λ‹€.

 

λ”°λΌμ„œ ν•΄λ‹Ή 슀크립트의 μ‹œμŠ€ν…œ 데λͺ¬ 파일둜 거슬러 μ˜¬λΌκ°€λ³΄λ©΄...

 

root@k8s-ctr:~/kubespray# cat /etc/systemd/system/k8s-certs-renew.service
[Unit]
Description=Renew K8S control plane certificates

[Service]
Type=oneshot
ExecStart=/usr/local/bin/k8s-certs-renew.sh


root@k8s-ctr:~/kubespray# cat /usr/local/bin/k8s-certs-renew.sh
#!/bin/bash

echo "## Check Expiration before renewal ##"

/usr/local/bin/kubeadm certs check-expiration

days_buffer=7 # set a time margin, because we should not renew at the last moment
calendar=Mon *-*-1,2,3,4,5,6,7 03:00:00
next_time=$(systemctl show k8s-certs-renew.timer  -p NextElapseUSecRealtime --value)

if [ "${next_time}" == "" ]; then
        echo "## Skip expiry comparison due to fail to parse next elapse from systemd calendar,do renewal directly ##"
else
        current_time=$(date +%s)
        target_time=$(date -d "${next_time} + ${days_buffer} days" +%s) # $next_time - $days_buffer days
        expiry_threshold=$(( ${target_time} - ${current_time} ))
        expired_certs=$(/usr/local/bin/kubeadm certs check-expiration -o jsonpath="{.certificates[?(@.residualTime<${expiry_threshold}.0)]}")
    if [ "${expired_certs}" == "" ];then
                echo "## Skip cert renew and K8S container restart, since all residualTimes are beyond threshold ##"
                exit 0
        fi
fi

echo "## Renewing certificates managed by kubeadm ##"
/usr/local/bin/kubeadm certs renew all

echo "## Restarting control plane pods managed by kubeadm ##"
/usr/local/bin/crictl pods --namespace kube-system --name 'kube-scheduler-*|kube-controller-manager-*|kube-apiserver-*|etcd-*' -q | /usr/bin/xargs /usr/local/bin/crictl rmp -f

echo "## Updating /root/.kube/config ##"
cp /etc/kubernetes/admin.conf /root/.kube/config

echo "## Waiting for apiserver to be up again ##"
until printf "" 2>>/dev/null >>/dev/tcp/127.0.0.1/6443; do sleep 1; done

echo "## Expiration after renewal ##"
/usr/local/bin/kubeadm certs check-expiration

 

 

Kubernetes control plane이 static Pod둜 κ΅¬μ„±λœ 것을 κ΄€μ°°

oot@k8s-ctr:~/kubespray# crictl pods --namespace kube-system --name 'kube-scheduler-*|kube-controller-manager-*|kube-apiserver-*|etcd-*' -q | xargs crictl rmp -f
Stopped sandbox 43f5e0ab6fa0ce006185fd7860322f4bfd7efc35f9c63e04af8413251a0188c1
Removed sandbox 43f5e0ab6fa0ce006185fd7860322f4bfd7efc35f9c63e04af8413251a0188c1
Stopped sandbox f2b4e38064731a0312538e72062e67966bfc06fe47cd64604512a8ae7eb17c4b
Removed sandbox f2b4e38064731a0312538e72062e67966bfc06fe47cd64604512a8ae7eb17c4b
Stopped sandbox 23a141bd60f188de2d3330f34832093ffcca67db3c24bef4c6f3a0e87e2b9388
Removed sandbox 23a141bd60f188de2d3330f34832093ffcca67db3c24bef4c6f3a0e87e2b9388

 

 

kube-system λ„€μž„μŠ€νŽ˜μ΄μŠ€μ— μ‘΄μž¬ν•˜λŠ” kube-apiserver, kube-controller-manager, kube-scheduler, etcd Pod sandboxλ₯Ό κ°•μ œλ‘œ μ‚­μ œν•΄λ³Έλ‹€.

control plane μ»΄ν¬λ„ŒνŠΈλŠ” Kubernetes API둜 κ΄€λ¦¬λ˜μ§€ μ•ŠμœΌλ©° /etc/kubernetes/manifests에 μ •μ˜λœ static Podλ₯Ό kubelet이 직접 κ°μ‹œλ˜λŠ”λ° μ»¨ν…Œμ΄λ„ˆ λŸ°νƒ€μž„μ—μ„œ κ°•μ œλ‘œ μ œκ±°ν•˜λ”λΌλ„ μ¦‰μ‹œ μž¬μƒμ„±λœλ‹€.
이 ꡬ쑰 덕뢄에 KubernetesλŠ” API Server μžμ²΄κ°€ μž₯μ• κ°€ λ‚˜λ”λΌλ„ λ…Έλ“œ λ ˆλ²¨μ—μ„œ control plane을 볡ꡬ할 수 μžˆλ‹€.

 

 

bootstrap_os

TASK [bootstrap_os : Fetch /etc/os-release] ************************************
TASK [bootstrap_os : Include tasks] ******************************************** ^[[0;36mincluded: /root/kubespray/roles/bootstrap_os/tasks/rocky.yml for k8s-ctr => (item=/root/kubespray/roles/bootstrap_os/tasks/rocky.yml)^[[0mSaturday 31 January 2026  23:39:14 +0900 (0:00:00.023)       0:00:01.775 ****** 
TASK [bootstrap_os : Gather host facts to get ansible_distribution_version ansible_distribution_major_version] ***^[[0;32mok: [k8s-ctr]^[[0mSaturday 31 January 2026  23:39:14 +0900 (0:00:00.428)       0:00:02.204 ******  TASK [bootstrap_os : Add proxy to yum.conf or dnf.conf if http_proxy is defined] ***^[[0;32mok: [k8s-ctr] => {"changed": false, "gid": 0, "group": "root", "mode": "0644", "msg": "OK@@@                                                                        
TASK [bootstrap_os : Check presence of fastestmirror.conf] *********************
^[[0;32mok: [k8s-ctr] => {"changed": false, "stat": {"exists": false}}^[[0m
Saturday 31 January 2026  23:39:15 +0900 (0:00:00.113)       0:00:02.489 ******
Saturday 31 January 2026  23:39:15 +0900 (0:00:00.005)       0:00:02.494 ******
TASK [bootstrap_os : Create remote_tmp for it is used by another module] *******^[[0;32mok: [k8s-ctr] => {"changed": false, "gid": 0, "group": "root", "mode": "0700", "owner": "root", "path": "/root/.ansible/tmp", "secontext": "unconfined_u:object_r:admin_home_t:s0", "size": 42, "state": "directory", "uid": 0}^[[0m
Saturday 31 January 2026  23:39:20 +0900 (0:00:00.107)       0:00:07.679 ******

TASK [bootstrap_os : Gather facts] *********************************************
^[[0;32mok: [k8s-ctr]^[[0m
Saturday 31 January 2026  23:39:20 +0900 (0:00:00.145)       0:00:07.824 ****** 

TASK [bootstrap_os : Assign inventory name to unconfigured hostnames (non-CoreOS, non-Flatcar, Suse and ClearLinux, non-Fedora)] ***
^[[0;32mok: [k8s-ctr] => {"ansible_facts": {"ansible_domain": "", "ansible_fqdn": "k8s-ctr", "ans@@@                                
TASK [bootstrap_os : Ensure bash_completion.d folder exists] *******************
^[[0;32mok: [k8s-ctr] => {"changed": false, "gid": 0, "group": "root", "mode": "0755", "owner": "root", "path": "/etc/bash_completion.d/", "secontext": "system_u:object_r:etc_t:s0", "size": 86, "state": "directory", "uid": 0}^[[0m

 

λ…Έλ“œλ₯Ό μ»¨ν…Œμ΄λ„ˆ 싀행이 κ°€λŠ₯ν•˜λ„λ‘ OS μƒνƒœλ₯Ό μ…‹νŒ…ν•œλ‹€.

 

 

etcd

PLAY [Prepare for etcd install] ************************************************ 
Saturday 31 January 2026  23:39:22 +0900 (0:00:00.820)       0:00:09.879 ****** 

TASK [adduser : User | Create User Group] **************************************
^[[0;32mok: [k8s-ctr] => {"changed": false, "gid": 988, "name": "kube-cert", "state": "present", "system": true}^[[0m
Saturday 31 January 2026  23:39:22 +0900 (0:00:00.138)       0:00:10.018 ******

TASK [adduser : User | Create User] ********************************************
^[[0;32mok: [k8s-ctr] => {"append": false, "changed": false, "comment": "Kubernetes user", "group": 988, "home": "/home/kube", "move_home": false, "name": "kube", "shell": "/sbin/nologin", "state": "present", "uid": 990}^[[0m
Saturday 31 January 2026  23:39:22 +0900 (0:00:00.189)       0:00:10.208 ******

 

root@k8s-ctr:~/kubespray# cat /etc/passwd | tail -n 3
vboxadd:x:991:1::/var/run/vboxadd:/bin/false
kube:x:990:988:Kubernetes user:/home/kube:/sbin/nologin
etcd:x:989:987:Etcd user:/home/etcd:/sbin/nologin

root@k8s-ctr:~/kubespray# cat /etc/group | tail -n 3
vboxdrmipc:x:989:
kube-cert:x:988:
etcd:x:987:

root@k8s-ctr:~/kubespray# find / -user etcd 2>/dev/null
/etc/ssl/etcd
/etc/ssl/etcd/ssl
/etc/ssl/etcd/ssl/admin-k8s-ctr-key.pem
/etc/ssl/etcd/ssl/admin-k8s-ctr.pem
/etc/ssl/etcd/ssl/ca-key.pem
/etc/ssl/etcd/ssl/ca.pem
/etc/ssl/etcd/ssl/member-k8s-ctr-key.pem
/etc/ssl/etcd/ssl/member-k8s-ctr.pem
/etc/ssl/etcd/ssl/node-k8s-ctr-key.pem
/etc/ssl/etcd/ssl/node-k8s-ctr.pem

 

etcd CA 및 private keyλŠ” etcd 계정 μ†Œμœ λ‘œ κ΄€λ¦¬λœλ‹€.

 

 

sysctl

root@k8s-ctr:~/kubespray# grep "^[^#]" /etc/sysctl.conf
net.ipv4.ip_forward=1
kernel.keys.root_maxbytes=25000000
kernel.keys.root_maxkeys=1000000
kernel.panic=10
kernel.panic_on_oops=1
vm.overcommit_memory=1
vm.panic_on_oom=0
net.ipv4.ip_local_reserved_ports=30000-32767
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-arptables=1
net.bridge.bridge-nf-call-ip6tables=1

 

컀널 μ„ΈνŒ…μ„ μ •λ¦¬ν•˜λ©΄ μ•„λž˜μ™€ κ°™λ‹€.

 

λ„€νŠΈμ›Œν¬ net.ipv4.ip_forward 1 IPv4 νŒ¨ν‚· ν¬μ›Œλ”© ν™œμ„±ν™” Pod ↔ Pod, Pod ↔ Service λΌμš°νŒ…μ˜ μ „μ œ 쑰건
λ„€νŠΈμ›Œν¬ net.bridge.bridge-nf-call-iptables 1 λΈŒλ¦¬μ§€ νŠΈλž˜ν”½μ„ iptables둜 전달 Service NAT, NetworkPolicy 정상 λ™μž‘
λ„€νŠΈμ›Œν¬ net.bridge.bridge-nf-call-arptables 1 ARP νŒ¨ν‚·μ„ arptables둜 처리 λΈŒλ¦¬μ§€ 기반 CNI ARP 처리 μ•ˆμ •ν™”
λ„€νŠΈμ›Œν¬ net.bridge.bridge-nf-call-ip6tables 1 IPv6 λΈŒλ¦¬μ§€ νŠΈλž˜ν”½μ„ ip6tables둜 전달 IPv6 Pod λ„€νŠΈμ›Œν¬ λŒ€λΉ„
λ„€νŠΈμ›Œν¬ net.ipv4.ip_local_reserved_ports 30000-32767 둜컬 ephemeral 포트 μ˜ˆμ•½ NodePort 포트 좩돌 λ°©μ§€
λ©”λͺ¨λ¦¬ vm.overcommit_memory 1 λ©”λͺ¨λ¦¬ overcommit ν—ˆμš© etcd μ•ˆμ •μ„± 확보 (OOM 예방)
λ©”λͺ¨λ¦¬ vm.panic_on_oom 0 OOM μ‹œ 컀널 panic λ°©μ§€ Pod λ‹¨μœ„ μž₯μ• λ₯Ό λ…Έλ“œ μž₯μ• λ‘œ ν™•μ‚° λ°©μ§€
컀널 kernel.keys.root_maxkeys 1000000 컀널 key 개수 μ œν•œ ν™•μž₯ ServiceAccount, TLS ν‚€ 관리 μ•ˆμ •ν™”
컀널 kernel.keys.root_maxbytes 25000000 컀널 key 총 μš©λŸ‰ ν™•μž₯ 인증 정보 처리 λŒ€λΉ„
μž₯μ•  λŒ€μ‘ kernel.panic_on_oops 1 컀널 oops λ°œμƒ μ‹œ panic λΆˆμ•ˆμ • μƒνƒœ 지속 λ°©μ§€
μž₯μ•  λŒ€μ‘ kernel.panic 10 panic ν›„ μžλ™ μž¬λΆ€νŒ… μ‹œκ°„(초) λ…Έλ“œ μžλ™ 볡ꡬ μ „λž΅

 

 

preinstall

root@k8s-ctr:~/kubespray# tree roles/kubernetes/preinstall/tasks/
roles/kubernetes/preinstall/tasks/
β”œβ”€β”€ 0010-swapoff.yml
β”œβ”€β”€ 0020-set_facts.yml
β”œβ”€β”€ 0040-verify-settings.yml
β”œβ”€β”€ 0050-create_directories.yml
β”œβ”€β”€ 0060-resolvconf.yml
β”œβ”€β”€ 0061-systemd-resolved.yml
β”œβ”€β”€ 0062-networkmanager-unmanaged-devices.yml
β”œβ”€β”€ 0063-networkmanager-dns.yml
β”œβ”€β”€ 0080-system-configurations.yml
β”œβ”€β”€ 0081-ntp-configurations.yml
β”œβ”€β”€ 0100-dhclient-hooks.yml
β”œβ”€β”€ 0110-dhclient-hooks-undo.yml
└── main.yml

1 directory, 13 files

 

preinstall λ‹¨κ³„λŠ” Kubernetes ꡬ성 μš”μ†Œλ₯Ό μ„€μΉ˜ν•˜κΈ° 전에 μ„€μΉ˜ν•  λ…Έλ“œλ₯Ό μΏ λ²„λ„€ν‹°μŠ€κ°€ λ™μž‘ κ°€λŠ₯ν•œ μƒνƒœλ‘œ 사전 μ •λΉ„ν•˜λŠ” 단계이닀.

 

main.yaml

# ------------------------------------------------------------
# Swap λΉ„ν™œμ„±ν™”
# - kubelet은 swap이 ν™œμ„±ν™”λœ λ…Έλ“œμ—μ„œ 싀행이 λΆˆκ°€ν•˜λ‹€.
# - kubelet_fail_swap_on 이 true인 κ²½μš°μ—λ§Œ κ°•μ œ 적용
# ------------------------------------------------------------
- name: Disable swap
  import_tasks: 0010-swapoff.yml
  when:
    - not dns_late                # DNS 섀정을 μ§€μ—°ν•˜λŠ” 특수 μ‹œλ‚˜λ¦¬μ˜€κ°€ 아닐 λ•Œ
    - kubelet_fail_swap_on        # kubelet이 swap을 ν—ˆμš©ν•˜μ§€ μ•ŠλŠ” 섀정일 λ•Œ


# ------------------------------------------------------------
# λ…Έλ“œ ν™˜κ²½ 정보 μˆ˜μ§‘ (Ansible facts ν™•μž₯)
# - OS μ’…λ₯˜, λ„€νŠΈμ›Œν¬ ν™˜κ²½, systemd μ‚¬μš© μ—¬λΆ€ 등을 μˆ˜μ§‘
# - 이후 λͺ¨λ“  when 쑰건의 κΈ°μ€€ λ°μ΄ν„°λ‘œ μ‚¬μš©λœλ‹€
# ------------------------------------------------------------
- name: Set facts
  import_tasks: 0020-set_facts.yml
  tags:
    - resolvconf
    - facts


# ------------------------------------------------------------
# 사전 쑰건 검증
# - 컀널 λͺ¨λ“ˆ, sysctl, λ„€νŠΈμ›Œν¬ μš”κ΅¬ 사항을 검증
# - λ¬Έμ œκ°€ 있으면 μ΄ˆκΈ°μ— fail ν•˜μ—¬ λ’€ 단계 진행을 차단
# ------------------------------------------------------------
- name: Check settings
  import_tasks: 0040-verify-settings.yml
  when:
    - not dns_late
  tags:
    - asserts


# ------------------------------------------------------------
# Kubernetes κ΄€λ ¨ 디렉터리 생성
# - /etc/kubernetes, /etc/cni, /var/lib/kubelet λ“±
# - 이후 μΈμ¦μ„œ, manifest, μ„€μ • 파일 생성 μ‹€νŒ¨λ₯Ό λ°©μ§€
# ------------------------------------------------------------
- name: Create directories
  import_tasks: 0050-create_directories.yml
  when:
    - not dns_late


# ------------------------------------------------------------
# DNS μ„€μ • λΆ„κΈ° (resolvconf 기반 ν™˜κ²½)
# - systemd-resolved, NetworkManagerλ₯Ό μ‚¬μš©ν•˜μ§€ μ•ŠλŠ” 경우
# - 전톡적인 /etc/resolv.conf 직접 관리 방식
# ------------------------------------------------------------
- name: Apply resolvconf settings
  import_tasks: 0060-resolvconf.yml
  when:
    - dns_mode != 'none'                          # DNSλ₯Ό μ‚¬μš©ν•˜λŠ” 경우
    - resolvconf_mode == 'host_resolvconf'        # 호슀트 resolv.conf μ‚¬μš©
    - systemd_resolved_enabled.rc != 0            # systemd-resolved λ―Έμ‚¬μš©
    - networkmanager_enabled.rc != 0              # NetworkManager λ―Έμ‚¬μš©
  tags:
    - bootstrap_os
    - resolvconf


# ------------------------------------------------------------
# DNS μ„€μ • λΆ„κΈ° (systemd-resolved 기반 ν™˜κ²½)
# - Ubuntu κ³„μ—΄μ—μ„œ ν”ν•œ DNS 좩돌 문제λ₯Ό λ°©μ§€
# ------------------------------------------------------------
- name: Apply systemd-resolved settings
  import_tasks: 0061-systemd-resolved.yml
  when:
    - dns_mode != 'none'
    - resolvconf_mode == 'host_resolvconf'
    - systemd_resolved_enabled.rc == 0             # systemd-resolved μ‚¬μš© 쀑
  tags:
    - bootstrap_os
    - resolvconf


# ------------------------------------------------------------
# NetworkManagerκ°€ CNI μΈν„°νŽ˜μ΄μŠ€λ₯Ό κ±΄λ“œλ¦¬μ§€ λͺ»ν•˜λ„둝 차단
# - flannel, calico λ“±μ˜ λ„€νŠΈμ›Œν¬ μΈν„°νŽ˜μ΄μŠ€ 보호
# ------------------------------------------------------------
- name: Apply networkmanager unmanaged devices settings
  import_tasks: 0062-networkmanager-unmanaged-devices.yml
  when:
    - networkmanager_enabled.rc == 0
  tags:
    - bootstrap_os


# ------------------------------------------------------------
# NetworkManager의 DNS 관리 κΈ°λŠ₯ μ œμ–΄
# - kubelet / CoreDNS / Pod DNS 경둜 μ•ˆμ •ν™”
# ------------------------------------------------------------
- name: Apply networkmanager DNS settings
  import_tasks: 0063-networkmanager-dns.yml
  when:
    - dns_mode != 'none'
    - resolvconf_mode == 'host_resolvconf'
    - networkmanager_enabled.rc == 0
  tags:
    - bootstrap_os
    - resolvconf


# ------------------------------------------------------------
# 컀널 및 μ‹œμŠ€ν…œ μ„€μ • 적용
# - sysctl (ip_forward, bridge-nf-call-iptables λ“±)
# - Kubernetes λ„€νŠΈμ›Œν¬ λ™μž‘μ˜ μ „μ œ 쑰건
# ------------------------------------------------------------
- name: Apply system configurations
  import_tasks: 0080-system-configurations.yml
  when:
    - not dns_late
  tags:
    - bootstrap_os


# ------------------------------------------------------------
# NTP μ„€μ •
# - μΈμ¦μ„œ μœ νš¨μ„±
# - etcd ν•©μ˜ μ•ˆμ •μ„±
# - 둜그 νƒ€μž„λΌμΈ μ •ν•©μ„± 확보
# ------------------------------------------------------------
- name: Configure NTP
  import_tasks: 0081-ntp-configurations.yml
  when:
    - not dns_late
    - ntp_enabled
  tags:
    - bootstrap_os


# ------------------------------------------------------------
# DHCP κ°±μ‹  μ‹œ DNS / IP λ³€κ²½μœΌλ‘œ μΈν•œ μž₯μ•  λ°©μ§€
# - dhclient ν›… μΆ”κ°€
# ------------------------------------------------------------
- name: Configure dhclient
  import_tasks: 0100-dhclient-hooks.yml
  when:
    - dns_mode != 'none'
    - resolvconf_mode == 'host_resolvconf'
    - dhclientconffile is defined
    - not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
  tags:
    - bootstrap_os
    - resolvconf


# ------------------------------------------------------------
# ν™˜κ²½μ— 따라 DHCP ν›… λ‘€λ°±
# - λͺ¨λ“  ν™˜κ²½μ— λ™μΌν•œ 섀정을 κ°•μ œν•˜μ§€ μ•ŠκΈ° μœ„ν•œ μ•ˆμ „μž₯치
# ------------------------------------------------------------
- name: Configure dhclient dhclient hooks
  import_tasks: 0110-dhclient-hooks-undo.yml
  when:
    - dns_mode != 'none'
    - resolvconf_mode != 'host_resolvconf'
    - dhclientconffile is defined
    - not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
  tags:
    - bootstrap_os
    - resolvconf


# ------------------------------------------------------------
# λ„€νŠΈμ›Œν¬/DNS λ³€κ²½ 사항을 μ¦‰μ‹œ 반영
# - 이후 container runtime, kubelet μ„€μΉ˜ 전에 ν™˜κ²½μ„ κ³ μ •
# ------------------------------------------------------------
- name: Flush handlers
  meta: flush_handlers


# ------------------------------------------------------------
# Azure VM μ—¬λΆ€ 확인
# - ν΄λΌμš°λ“œ ν™˜κ²½ 특수 처리λ₯Ό μœ„ν•œ fact μˆ˜μ§‘
# ------------------------------------------------------------
- name: Check if we are running inside a Azure VM
  stat:
    path: /var/lib/waagent/
    get_attributes: false
    get_checksum: false
    get_mime: false
  register: azure_check
  when:
    - not dns_late
  tags:
    - bootstrap_os


# ------------------------------------------------------------
# Calico 선택 μ‹œ μ „μš© 사전 검사
# - 컀널 λͺ¨λ“ˆ, iptables, λ„€νŠΈμ›Œν¬ μš”κ΅¬ 사항 검증
# ------------------------------------------------------------
- name: Run calico checks
  include_role:
    name: network_plugin/calico
    tasks_from: check
  when:
    - kube_network_plugin == 'calico'
    - not ignore_assert_errors

 

 

create-directories.yaml

Kubernetes ꡬ성 μš”μ†Œ, μΈμ¦μ„œ, λ„€νŠΈμ›Œν¬, μŠ€ν† λ¦¬μ§€μ˜ κΆŒν•œ 경계λ₯Ό 파일 μ‹œμŠ€ν…œ λ ˆλ²¨μ—μ„œ μ •μ˜ν•œλ‹€.

---
# ============================================================
# Kubernetesκ°€ μ‚¬μš©ν•˜κ²Œ 될 디렉터리 ꡬ쑰λ₯Ό 사전에 μƒμ„±ν•œλ‹€
# - μ„€μΉ˜ 쀑간에 디렉터리 생성 μ‹€νŒ¨λ₯Ό λ°©μ§€
# - μ†Œμœ μž/κΆŒν•œμ„ 미리 κ³ μ •ν•˜μ—¬ λ³΄μ•ˆ 경계λ₯Ό λͺ…ν™•νžˆ ν•œλ‹€
# ============================================================


# ------------------------------------------------------------
# Kubernetes ꡬ성 μš”μ†Œκ°€ 직접 μ‚¬μš©ν•˜λŠ” 디렉터리 생성
# - kube 계정이 μ†Œμœ 
# - kubelet, control plane μ»΄ν¬λ„ŒνŠΈκ°€ μ ‘κ·Όν•΄μ•Ό ν•˜λŠ” 경둜
# ------------------------------------------------------------
- name: Create kubernetes directories
  file:
    path: "{{ item }}"
    state: directory
    owner: "{{ kube_owner }}"     # 일반적으둜 'kube' μ‚¬μš©μž
    mode: "0755"
  when: ('k8s_cluster' in group_names)
  become: true
  tags:
    - kubelet
    - kube-controller-manager
    - kube-apiserver
    - bootstrap_os
    - apps
    - network
    - control-plane
    - node
  with_items:
    - "{{ kube_config_dir }}"     # /etc/kubernetes : μΈμ¦μ„œ, kubeconfig, μ„€μ •
    - "{{ kube_manifest_dir }}"   # /etc/kubernetes/manifests : static Pod μ •μ˜
    - "{{ kube_script_dir }}"     # /usr/local/bin/kubernetes-scripts : Kubespray 헬퍼 슀크립트
    - "{{ kubelet_flexvolumes_plugins_dir }}"  # kubelet legacy volume plugin 경둜


# ------------------------------------------------------------
# root κΆŒν•œμ΄ ν•„μš”ν•œ 디렉터리 생성
# - λ³΄μ•ˆμƒ Kubernetes ν”„λ‘œμ„ΈμŠ€κ°€ μ†Œμœ ν•˜λ©΄ μ•ˆ λ˜λŠ” μ˜μ—­
# ------------------------------------------------------------
- name: Create other directories of root owner
  file:
    path: "{{ item }}"
    state: directory
    owner: root                  # root만 μ†Œμœ 
    mode: "0755"
  when: ('k8s_cluster' in group_names)
  become: true
  tags:
    - kubelet
    - kube-controller-manager
    - kube-apiserver
    - bootstrap_os
    - apps
    - network
    - control-plane
    - node
  with_items:
    - "{{ kube_cert_dir }}"       # /etc/kubernetes/ssl : λ―Όκ°ν•œ μΈμ¦μ„œ 경둜
    - "{{ bin_dir }}"             # /usr/local/bin : μ‹œμŠ€ν…œ λ°”μ΄λ„ˆλ¦¬ 경둜


# ------------------------------------------------------------
# kubeadm ν˜Έν™˜μ„±μ„ μœ„ν•œ μΈμ¦μ„œ 경둜 확인
# - kubeadm이 νŠΉμ • 경둜λ₯Ό ν•˜λ“œμ½”λ”©ν•΄ μ°Έμ‘°ν•˜λŠ” 문제 λŒ€μ‘
# ------------------------------------------------------------
- name: Check if kubernetes kubeadm compat cert dir exists
  stat:
    path: "{{ kube_cert_compat_dir }}"
    get_attributes: false
    get_checksum: false
    get_mime: false
  register: kube_cert_compat_dir_check
  when:
    - ('k8s_cluster' in group_names)
    - kube_cert_dir != kube_cert_compat_dir


# ------------------------------------------------------------
# kubeadm ν˜Έν™˜ μΈμ¦μ„œ 디렉터리 생성 (심볼릭 링크)
# - μ‹€μ œ μΈμ¦μ„œλŠ” ν•˜λ‚˜λ§Œ μœ μ§€
# - kubeadm κΈ°λŒ€ 경둜λ₯Ό 링크둜 λ§žμΆ˜λ‹€
# ------------------------------------------------------------
- name: Create kubernetes kubeadm compat cert dir (kubernetes/kubeadm issue 1498)
  file:
    src: "{{ kube_cert_dir }}"
    dest: "{{ kube_cert_compat_dir }}"
    state: link
    mode: "0755"
  when:
    - ('k8s_cluster' in group_names)
    - kube_cert_dir != kube_cert_compat_dir
    - not kube_cert_compat_dir_check.stat.exists


# ------------------------------------------------------------
# CNI ν”ŒλŸ¬κ·ΈμΈμ„ μœ„ν•œ ν‘œμ€€ 디렉터리 생성
# - /etc/cni/net.d : CNI μ„€μ • 파일
# - /opt/cni/bin   : CNI λ°”μ΄λ„ˆλ¦¬
# ------------------------------------------------------------
- name: Create cni directories
  file:
    path: "{{ item }}"
    state: directory
    owner: "{{ kube_owner }}"
    mode: "0755"
  with_items:
    - "/etc/cni/net.d"
    - "/opt/cni/bin"
  when:
    - kube_network_plugin in ["calico", "flannel", "cilium", "kube-ovn", "kube-router", "macvlan"]
    - ('k8s_cluster' in group_names)
  tags:
    - network
    - cilium
    - calico
    - kube-ovn
    - kube-router
    - bootstrap_os


# ------------------------------------------------------------
# Calico μ „μš© 데이터 디렉터리 생성
# - IPAM, μƒνƒœ 정보 μ €μž₯ μš©λ„
# ------------------------------------------------------------
- name: Create calico cni directories
  file:
    path: "{{ item }}"
    state: directory
    owner: "{{ kube_owner }}"
    mode: "0755"
  with_items:
    - "/var/lib/calico"
  when:
    - kube_network_plugin == "calico"
    - ('k8s_cluster' in group_names)
  tags:
    - network
    - calico
    - bootstrap_os


# ------------------------------------------------------------
# Local Volume Provisioner용 호슀트 디렉터리 생성
# - Kubernetes μ™ΈλΆ€ μŠ€ν† λ¦¬μ§€λ₯Ό PV둜 λ…ΈμΆœν•˜κΈ° μœ„ν•œ 경둜
# ------------------------------------------------------------
- name: Create local volume provisioner directories
  file:
    path: "{{ local_volume_provisioner_storage_classes[item].host_dir }}"
    state: directory
    owner: root
    group: root
    mode: "{{ local_volume_provisioner_directory_mode }}"
  with_items: "{{ local_volume_provisioner_storage_classes.keys() | list }}"
  when:
    - ('k8s_cluster' in group_names)
    - local_volume_provisioner_enabled
  tags:
    - persistent_volumes

 

 

 

handlers/main.yaml

Kubespray의 preinstall handlerλŠ” OS 레벨 DNS 변경이 kubelet, static Pod, 그리고 control planeκΉŒμ§€ μΌκ΄€λ˜κ²Œ λ°˜μ˜λ˜λ„λ‘ ν•œλ‹€.

---
# ============================================================
# Kubernetes preinstall λ‹¨κ³„μ˜ handlers
# - DNS / λ„€νŠΈμ›Œν¬ / μ‹œκ°„ μ„€μ • 변경을 μ‹€μ œ μ‹œμŠ€ν…œκ³Ό
#   Kubernetes μ»΄ν¬λ„ŒνŠΈμ— "μ „νŒŒ(propagate)"ν•˜λŠ” μ—­ν• 
# - taskμ—μ„œ notifyκ°€ λ°œμƒν–ˆμ„ λ•Œλ§Œ μ‹€ν–‰λœλ‹€
# ============================================================


# ------------------------------------------------------------
# Flatcar(Container Linux) κ³„μ—΄μ—μ„œ resolvconf 적용
# - FlatcarλŠ” 일반적인 /etc/resolv.conf 관리 방식이 μ•„λ‹˜
# - cloud-init을 톡해 DNS 섀정을 λ°˜μ˜ν•΄μ•Ό ν•œλ‹€
# ------------------------------------------------------------
- name: Preinstall | apply resolvconf cloud-init
  command: /usr/bin/coreos-cloudinit --from-file {{ resolveconf_cloud_init_conf }}
  when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
  listen: Preinstall | update resolvconf for Flatcar Container Linux by Kinvolk


# ------------------------------------------------------------
# NetworkManager μž¬μ‹œμž‘
# - DNS μ„€μ • λ³€κ²½ μ‹œ NM이 μΊμ‹œ/섀정을 λ‹€μ‹œ 읽도둝 κ°•μ œ
# ------------------------------------------------------------
- name: Preinstall | reload NetworkManager
  service:
    name: NetworkManager.service
    state: restarted
  listen: Preinstall | update resolvconf for networkmanager


# ------------------------------------------------------------
# kubelet μž¬μ‹œμž‘
# - kubelet은 /etc/resolv.conf λ₯Ό μ‹œμž‘ μ‹œμ μ— μ½λŠ”λ‹€
# - DNS λ³€κ²½ 사항을 Pod/Static Pod에 λ°˜μ˜ν•˜λ €λ©΄ μž¬μ‹œμž‘ ν•„μš”
# ------------------------------------------------------------
- name: Preinstall | reload kubelet
  service:
    name: kubelet
    state: restarted
  notify:
    # kubelet μž¬μ‹œμž‘ 이후, control plane static pod μƒνƒœ 확인 및 μž¬κΈ°λ™
    - Preinstall | kube-controller configured
    - Preinstall | kube-apiserver configured
    - Preinstall | restart kube-controller-manager docker
    - Preinstall | restart kube-controller-manager crio/containerd
    - Preinstall | restart kube-apiserver docker
    - Preinstall | restart kube-apiserver crio/containerd
  when: not dns_early | bool
  listen:
    - Preinstall | propagate resolvconf to k8s components
    - Preinstall | update resolvconf for Flatcar Container Linux by Kinvolk
    - Preinstall | update resolvconf for networkmanager


# ------------------------------------------------------------
# kube-apiserver static pod 쑴재 μ—¬λΆ€ 확인
# - DNS λ³€κ²½ ν›„ μž¬κΈ°λ™ λŒ€μƒμΈμ§€ νŒλ‹¨ν•˜κΈ° μœ„ν•œ 사전 체크
# ------------------------------------------------------------
# FIXME: kubeadm λͺ¨λ“œμ—μ„œλŠ” 별도 처리 ν•„μš”
- name: Preinstall | kube-apiserver configured
  stat:
    path: "{{ kube_manifest_dir }}/kube-apiserver.yaml"
    get_attributes: false
    get_checksum: false
    get_mime: false
  register: kube_apiserver_set
  when:
    - ('kube_control_plane' in group_names)
    - dns_mode != 'none'
    - resolvconf_mode == 'host_resolvconf'
  listen: Preinstall | propagate resolvconf to k8s components


# ------------------------------------------------------------
# kube-controller-manager static pod 쑴재 μ—¬λΆ€ 확인
# ------------------------------------------------------------
# FIXME: kubeadm λͺ¨λ“œμ—μ„œλŠ” 별도 처리 ν•„μš”
- name: Preinstall | kube-controller configured
  stat:
    path: "{{ kube_manifest_dir }}/kube-controller-manager.yaml"
    get_attributes: false
    get_checksum: false
    get_mime: false
  register: kube_controller_set
  when:
    - ('kube_control_plane' in group_names)
    - dns_mode != 'none'
    - resolvconf_mode == 'host_resolvconf'
  listen: Preinstall | propagate resolvconf to k8s components


# ------------------------------------------------------------
# docker λŸ°νƒ€μž„ ν™˜κ²½μ—μ„œ kube-controller-manager μž¬κΈ°λ™
# - static podλ₯Ό 직접 μ‚­μ œν•˜μ—¬ kubelet이 μž¬μƒμ„±ν•˜λ„λ‘ μœ λ„
# ------------------------------------------------------------
- name: Preinstall | restart kube-controller-manager docker
  shell: >
    set -o pipefail &&
    {{ docker_bin_dir }}/docker ps -f name=k8s_POD_kube-controller-manager* -q |
    xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f
  args:
    executable: /bin/bash
  when:
    - container_manager == "docker"
    - ('kube_control_plane' in group_names)
    - dns_mode != 'none'
    - resolvconf_mode == 'host_resolvconf'
    - kube_controller_set.stat.exists
  listen: Preinstall | propagate resolvconf to k8s components


# ------------------------------------------------------------
# crio/containerd ν™˜κ²½μ—μ„œ kube-controller-manager μž¬κΈ°λ™
# - crictl둜 static pod sandboxλ₯Ό 제거
# ------------------------------------------------------------
- name: Preinstall | restart kube-controller-manager crio/containerd
  shell: >
    set -o pipefail &&
    {{ bin_dir }}/crictl pods --name kube-controller-manager* -q |
    xargs -I% --no-run-if-empty bash -c
    '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'
  args:
    executable: /bin/bash
  register: preinstall_restart_controller_manager
  retries: 10
  delay: 1
  until: preinstall_restart_controller_manager.rc == 0
  when:
    - container_manager in ['crio', 'containerd']
    - ('kube_control_plane' in group_names)
    - dns_mode != 'none'
    - resolvconf_mode == 'host_resolvconf'
    - kube_controller_set.stat.exists
  listen: Preinstall | propagate resolvconf to k8s components


# ------------------------------------------------------------
# docker λŸ°νƒ€μž„ ν™˜κ²½μ—μ„œ kube-apiserver μž¬κΈ°λ™
# ------------------------------------------------------------
- name: Preinstall | restart kube-apiserver docker
  shell: >
    set -o pipefail &&
    {{ docker_bin_dir }}/docker ps -f name=k8s_POD_kube-apiserver* -q |
    xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f
  args:
    executable: /bin/bash
  when:
    - container_manager == "docker"
    - ('kube_control_plane' in group_names)
    - dns_mode != 'none'
    - resolvconf_mode == 'host_resolvconf'
    - kube_apiserver_set.stat.exists
  listen: Preinstall | propagate resolvconf to k8s components


# ------------------------------------------------------------
# crio/containerd ν™˜κ²½μ—μ„œ kube-apiserver μž¬κΈ°λ™
# ------------------------------------------------------------
- name: Preinstall | restart kube-apiserver crio/containerd
  shell: >
    set -o pipefail &&
    {{ bin_dir }}/crictl pods --name kube-apiserver* -q |
    xargs -I% --no-run-if-empty bash -c
    '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'
  args:
    executable: /bin/bash
  register: preinstall_restart_apiserver
  retries: 10
  until: preinstall_restart_apiserver.rc == 0
  delay: 1
  when:
    - container_manager in ['crio', 'containerd']
    - ('kube_control_plane' in group_names)
    - dns_mode != 'none'
    - resolvconf_mode == 'host_resolvconf'
    - kube_apiserver_set.stat.exists
  listen: Preinstall | propagate resolvconf to k8s components


# ------------------------------------------------------------
# dns_late μ‹œλ‚˜λ¦¬μ˜€μ—μ„œ kube-apiserver ν—¬μŠ€ 체크 λŒ€κΈ°
# - DNS 섀정을 λ§ˆμ§€λ§‰μ— μ μš©ν•˜λŠ” ν™˜κ²½μš©
# ------------------------------------------------------------
- name: Preinstall | wait for the apiserver to be running
  uri:
    url: "{{ kube_apiserver_endpoint }}/healthz"
    validate_certs: false
  register: result
  until: result.status == 200
  retries: 60
  delay: 1
  when:
    - dns_late
    - ('kube_control_plane' in group_names)
    - dns_mode != 'none'
    - resolvconf_mode == 'host_resolvconf'
    - not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
    - not is_fedora_coreos
  listen: Preinstall | propagate resolvconf to k8s components


# ------------------------------------------------------------
# systemd-resolved μž¬μ‹œμž‘
# - DNS μΊμ‹œ 및 stub resolver κ°±μ‹ 
# ------------------------------------------------------------
- name: Preinstall | Restart systemd-resolved
  service:
    name: systemd-resolved
    state: restarted


# ------------------------------------------------------------
# NTP μ„œλΉ„μŠ€ μž¬μ‹œμž‘
# - μ‹œκ°„ 동기화 μ„€μ • λ³€κ²½ 반영
# ------------------------------------------------------------
- name: Preinstall | restart ntp
  service:
    name: "{{ ntp_service_name }}"
    state: restarted
  when: ntp_enabled

 

 

Container-engine

μ»¨ν…Œμ΄λ„ˆ λŸ°νƒ€μž„ μ„€μ • μ •μ˜λ‚΄μš©μ„ μ‚΄νŽ΄λ³Έλ‹€.

root@k8s-ctr:~/kubespray# tree roles/container-engine/
roles/container-engine/
β”œβ”€β”€ containerd
β”‚   β”œβ”€β”€ defaults
β”‚   β”‚   └── main.yml
β”‚   β”œβ”€β”€ handlers
β”‚   β”‚   β”œβ”€β”€ main.yml
β”‚   β”‚   └── reset.yml
β”‚   β”œβ”€β”€ meta
β”‚   β”‚   └── main.yml
β”‚   β”œβ”€β”€ molecule
β”‚   β”‚   └── default
β”‚   β”‚       β”œβ”€β”€ converge.yml
β”‚   β”‚       β”œβ”€β”€ molecule.yml
β”‚   β”‚       └── verify.yml
β”‚   β”œβ”€β”€ tasks
β”‚   β”‚   β”œβ”€β”€ main.yml
β”‚   β”‚   └── reset.yml
β”‚   └── templates
β”‚       β”œβ”€β”€ config.toml.j2
β”‚       β”œβ”€β”€ config-v1.toml.j2
β”‚       β”œβ”€β”€ containerd.service.j2
β”‚       β”œβ”€β”€ hosts.toml.j2
β”‚       └── http-proxy.conf.j2
β”œβ”€β”€ containerd-common
β”‚   β”œβ”€β”€ defaults
β”‚   β”‚   └── main.yml
β”‚   β”œβ”€β”€ meta
β”‚   β”‚   └── main.yml
β”‚   β”œβ”€β”€ tasks
β”‚   β”‚   └── main.yml
β”‚   └── vars
β”‚       β”œβ”€β”€ amazon.yml
β”‚       └── suse.yml
β”œβ”€β”€ crictl
β”‚   β”œβ”€β”€ handlers
β”‚   β”‚   └── main.yml
β”‚   β”œβ”€β”€ tasks
β”‚   β”‚   β”œβ”€β”€ crictl.yml
β”‚   β”‚   └── main.yml
β”‚   └── templates
β”‚       └── crictl.yaml.j2
β”œβ”€β”€ cri-dockerd
β”‚   β”œβ”€β”€ defaults
β”‚   β”‚   └── main.yml
β”‚   β”œβ”€β”€ handlers
β”‚   β”‚   └── main.yml
β”‚   β”œβ”€β”€ meta
β”‚   β”‚   └── main.yml
β”‚   β”œβ”€β”€ molecule
β”‚   β”‚   └── default
β”‚   β”‚       β”œβ”€β”€ converge.yml
β”‚   β”‚       β”œβ”€β”€ files
β”‚   β”‚       β”‚   β”œβ”€β”€ 10-mynet.conf
β”‚   β”‚       β”‚   β”œβ”€β”€ container.json
β”‚   β”‚       β”‚   └── sandbox.json
β”‚   β”‚       β”œβ”€β”€ molecule.yml
β”‚   β”‚       └── verify.yml
β”‚   β”œβ”€β”€ tasks
β”‚   β”‚   └── main.yml
β”‚   └── templates
β”‚       β”œβ”€β”€ cri-dockerd.service.j2
β”‚       └── cri-dockerd.socket.j2
β”œβ”€β”€ cri-o
β”‚   β”œβ”€β”€ defaults
β”‚   β”‚   └── main.yml
β”‚   β”œβ”€β”€ handlers
β”‚   β”‚   └── main.yml
β”‚   β”œβ”€β”€ meta
β”‚   β”‚   └── main.yml
β”‚   β”œβ”€β”€ molecule
β”‚   β”‚   └── default
β”‚   β”‚       β”œβ”€β”€ converge.yml
β”‚   β”‚       β”œβ”€β”€ molecule.yml
β”‚   β”‚       └── verify.yml
β”‚   β”œβ”€β”€ tasks
β”‚   β”‚   β”œβ”€β”€ load_vars.yml
β”‚   β”‚   β”œβ”€β”€ main.yaml
β”‚   β”‚   β”œβ”€β”€ reset.yml
β”‚   β”‚   └── setup-amazon.yaml
β”‚   β”œβ”€β”€ templates
β”‚   β”‚   β”œβ”€β”€ config.json.j2
β”‚   β”‚   β”œβ”€β”€ crio.conf.j2
β”‚   β”‚   β”œβ”€β”€ http-proxy.conf.j2
β”‚   β”‚   β”œβ”€β”€ mounts.conf.j2
β”‚   β”‚   β”œβ”€β”€ registry.conf.j2
β”‚   β”‚   └── unqualified.conf.j2
β”‚   └── vars
β”‚       β”œβ”€β”€ v1.29.yml
β”‚       └── v1.31.yml
β”œβ”€β”€ crun
β”‚   └── tasks
β”‚       └── main.yml
β”œβ”€β”€ docker
β”‚   β”œβ”€β”€ defaults
β”‚   β”‚   └── main.yml
β”‚   β”œβ”€β”€ files
β”‚   β”‚   └── cleanup-docker-orphans.sh
β”‚   β”œβ”€β”€ handlers
β”‚   β”‚   └── main.yml
β”‚   β”œβ”€β”€ meta
β”‚   β”‚   └── main.yml
β”‚   β”œβ”€β”€ tasks
β”‚   β”‚   β”œβ”€β”€ docker_plugin.yml
β”‚   β”‚   β”œβ”€β”€ main.yml
β”‚   β”‚   β”œβ”€β”€ pre-upgrade.yml
β”‚   β”‚   β”œβ”€β”€ reset.yml
β”‚   β”‚   β”œβ”€β”€ set_facts_dns.yml
β”‚   β”‚   └── systemd.yml
β”‚   β”œβ”€β”€ templates
β”‚   β”‚   β”œβ”€β”€ docker-dns.conf.j2
β”‚   β”‚   β”œβ”€β”€ docker-options.conf.j2
β”‚   β”‚   β”œβ”€β”€ docker-orphan-cleanup.conf.j2
β”‚   β”‚   β”œβ”€β”€ docker.service.j2
β”‚   β”‚   β”œβ”€β”€ fedora_docker.repo.j2
β”‚   β”‚   β”œβ”€β”€ http-proxy.conf.j2
β”‚   β”‚   └── rh_docker.repo.j2
β”‚   └── vars
β”‚       β”œβ”€β”€ amazon.yml
β”‚       β”œβ”€β”€ clearlinux.yml
β”‚       β”œβ”€β”€ debian.yml
β”‚       β”œβ”€β”€ fedora.yml
β”‚       β”œβ”€β”€ kylin.yml
β”‚       β”œβ”€β”€ openeuler.yml -> kylin.yml
β”‚       β”œβ”€β”€ redhat.yml
β”‚       β”œβ”€β”€ suse.yml
β”‚       β”œβ”€β”€ ubuntu.yml
β”‚       └── uniontech.yml
β”œβ”€β”€ gvisor
β”‚   β”œβ”€β”€ molecule
β”‚   β”‚   └── default
β”‚   β”‚       β”œβ”€β”€ converge.yml
β”‚   β”‚       β”œβ”€β”€ files
β”‚   β”‚       β”‚   β”œβ”€β”€ 10-mynet.conf
β”‚   β”‚       β”‚   β”œβ”€β”€ container.json
β”‚   β”‚       β”‚   └── sandbox.json
β”‚   β”‚       β”œβ”€β”€ molecule.yml
β”‚   β”‚       └── verify.yml
β”‚   └── tasks
β”‚       └── main.yml
β”œβ”€β”€ kata-containers
β”‚   β”œβ”€β”€ defaults
β”‚   β”‚   └── main.yml
β”‚   β”œβ”€β”€ molecule
β”‚   β”‚   └── default
β”‚   β”‚       β”œβ”€β”€ converge.yml
β”‚   β”‚       β”œβ”€β”€ files
β”‚   β”‚       β”‚   β”œβ”€β”€ 10-mynet.conf
β”‚   β”‚       β”‚   β”œβ”€β”€ container.json
β”‚   β”‚       β”‚   └── sandbox.json
β”‚   β”‚       β”œβ”€β”€ molecule.yml
β”‚   β”‚       └── verify.yml
β”‚   β”œβ”€β”€ tasks
β”‚   β”‚   └── main.yml
β”‚   └── templates
β”‚       β”œβ”€β”€ configuration-qemu.toml.j2
β”‚       └── containerd-shim-kata-v2.j2
β”œβ”€β”€ meta
β”‚   └── main.yml
β”œβ”€β”€ molecule
β”‚   β”œβ”€β”€ files
β”‚   β”‚   └── 10-mynet.conf
β”‚   β”œβ”€β”€ prepare.yml
β”‚   β”œβ”€β”€ templates
β”‚   β”‚   β”œβ”€β”€ container.json.j2
β”‚   β”‚   └── sandbox.json.j2
β”‚   β”œβ”€β”€ test_cri.yml
β”‚   └── test_runtime.yml
β”œβ”€β”€ nerdctl
β”‚   β”œβ”€β”€ handlers
β”‚   β”‚   └── main.yml
β”‚   β”œβ”€β”€ tasks
β”‚   β”‚   └── main.yml
β”‚   └── templates
β”‚       └── nerdctl.toml.j2
β”œβ”€β”€ runc
β”‚   β”œβ”€β”€ defaults
β”‚   β”‚   └── main.yml
β”‚   └── tasks
β”‚       └── main.yml
β”œβ”€β”€ skopeo
β”‚   └── tasks
β”‚       └── main.yml
β”œβ”€β”€ validate-container-engine
β”‚   └── tasks
β”‚       └── main.yml
└── youki
    β”œβ”€β”€ defaults
    β”‚   └── main.yml
    β”œβ”€β”€ molecule
    β”‚   └── default
    β”‚       β”œβ”€β”€ converge.yml
    β”‚       β”œβ”€β”€ files
    β”‚       β”‚   β”œβ”€β”€ 10-mynet.conf
    β”‚       β”‚   β”œβ”€β”€ container.json
    β”‚       β”‚   └── sandbox.json
    β”‚       β”œβ”€β”€ molecule.yml
    β”‚       └── verify.yml
    └── tasks
        └── main.yml

79 directories, 120 files

 

μ‚¬μš©ν•  수 μžˆλŠ” λ‹€μ–‘ν•œ 사전 μƒ˜ν”Œμ΄ μžˆλ‹€. μš°λ¦¬κ°€ μ‹€μŠ΅μ— μ μš©ν•œ containerd, runc μͺ½μ„ μ‚΄νŽ΄λ³΄λ„λ‘ν•œλ‹€.

 

main.yaml

---
# ============================================================
# Container Runtime 검증 및 정리 단계
# - λ…Έλ“œμ— μ–΄λ–€ μ»¨ν…Œμ΄λ„ˆ λŸ°νƒ€μž„μ΄ μ„€μΉ˜/μ‹€ν–‰ 쀑인지 ν™•μΈν•œλ‹€
# - inventoryμ—μ„œ μ„ νƒν•œ container_manager μ™Έμ˜ λŸ°νƒ€μž„μ€ μ œκ±°ν•œλ‹€
# - kubelet + λŸ°νƒ€μž„ κ°„ 뢈일치둜 μΈν•œ μž₯μ• λ₯Ό 사전에 μ°¨λ‹¨ν•œλ‹€
# ============================================================


# ------------------------------------------------------------
# Fedora CoreOS / OSTree 기반 OS μ—¬λΆ€ 확인
# - OSTree 기반 OSλŠ” νŒ¨ν‚€μ§€ 관리 방식이 λ‹€λ₯΄λ―€λ‘œ
#   λŸ°νƒ€μž„ 제거/μ„€μΉ˜ λ‘œμ§μ„ λ‹€λ₯΄κ²Œ κ°€μ Έκ°€μ•Ό ν•œλ‹€
# ------------------------------------------------------------
- name: Validate-container-engine | check if fedora coreos
  stat:
    path: /run/ostree-booted
    get_attributes: false
    get_checksum: false
    get_mime: false
  register: ostree
  tags:
    - facts


# ------------------------------------------------------------
# OSTree 기반 OS μ—¬λΆ€λ₯Ό fact둜 μ €μž₯
# ------------------------------------------------------------
- name: Validate-container-engine | set is_ostree
  set_fact:
    is_ostree: "{{ ostree.stat.exists }}"
  tags:
    - facts


# ------------------------------------------------------------
# kubelet systemd unit 쑴재 μ—¬λΆ€ 확인
# - λŸ°νƒ€μž„ 제거 전에 kubelet을 쀑지해야 ν•˜λŠ”μ§€ νŒλ‹¨
# ------------------------------------------------------------
- name: Ensure kubelet systemd unit exists
  stat:
    path: "/etc/systemd/system/kubelet.service"
  register: kubelet_systemd_unit_exists
  tags:
    - facts


# ------------------------------------------------------------
# systemd μ„œλΉ„μŠ€ μƒνƒœ μˆ˜μ§‘
# - containerd / docker / crio μ‹€ν–‰ μ—¬λΆ€ νŒλ‹¨μ— μ‚¬μš©
# ------------------------------------------------------------
- name: Populate service facts
  service_facts:
  tags:
    - facts


# ------------------------------------------------------------
# containerd μ„€μΉ˜ μ—¬λΆ€ 확인
# - systemd unit 파일 쑴재 μ—¬λΆ€ κΈ°μ€€
# ------------------------------------------------------------
- name: Check if containerd is installed
  find:
    file_type: file
    recurse: true
    use_regex: true
    patterns:
      - containerd.service$
    paths:
      - /lib/systemd
      - /etc/systemd
      - /run/systemd
  register: containerd_installed
  tags:
    - facts


# ------------------------------------------------------------
# docker μ„€μΉ˜ μ—¬λΆ€ 확인
# ------------------------------------------------------------
- name: Check if docker is installed
  find:
    file_type: file
    recurse: true
    use_regex: true
    patterns:
      - docker.service$
    paths:
      - /lib/systemd
      - /etc/systemd
      - /run/systemd
  register: docker_installed
  tags:
    - facts


# ------------------------------------------------------------
# CRI-O μ„€μΉ˜ μ—¬λΆ€ 확인
# ------------------------------------------------------------
- name: Check if crio is installed
  find:
    file_type: file
    recurse: true
    use_regex: true
    patterns:
      - crio.service$
    paths:
      - /lib/systemd
      - /etc/systemd
      - /run/systemd
  register: crio_installed
  tags:
    - facts


# ------------------------------------------------------------
# containerd 제거 둜직
# - inventoryμ—μ„œ containerdλ₯Ό μ„ νƒν•˜μ§€ μ•Šμ•˜κ³ 
# - docker도 μ‚¬μš© 쀑이 μ•„λ‹ˆλ©°
# - containerdκ°€ μ‹€μ œλ‘œ μ‹€ν–‰ 쀑일 λ•Œλ§Œ 제거
# ------------------------------------------------------------
- name: Uninstall containerd
  vars:
    service_name: containerd.service
  when:
    # OSTree/Flatcar 계열은 νŒ¨ν‚€μ§€ 제거 방식이 λ‹€λ₯΄λ―€λ‘œ μ œμ™Έ
    - not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
    # μ„ νƒλœ λŸ°νƒ€μž„μ΄ containerdκ°€ 아닐 λ•Œ
    - container_manager != "containerd"
    # dockerκ°€ μ„€μΉ˜λ˜μ–΄ μžˆμ§€ μ•Šμ„ λ•Œ
    - docker_installed.matched == 0
    # containerdκ°€ μ„€μΉ˜λ˜μ–΄ 있고
    - containerd_installed.matched > 0
    # μ‹€μ œλ‘œ μ‹€ν–‰ 쀑일 λ•Œλ§Œ 제거
    - ansible_facts.services[service_name]['state'] == 'running'
  block:
    # --------------------------------------------------------
    # λ…Έλ“œ Drain
    # - λŸ°νƒ€μž„ 제거 μ „ Podλ₯Ό μ•ˆμ „ν•˜κ²Œ λ‹€λ₯Έ λ…Έλ“œλ‘œ 이동
    # --------------------------------------------------------
    - name: Drain node
      include_role:
        name: remove_node/pre_remove
        apply:
          tags:
            - pre-remove
      when: kubelet_systemd_unit_exists.stat.exists

    # --------------------------------------------------------
    # kubelet 쀑지
    # - λŸ°νƒ€μž„ 제거 쀑 kubelet μž¬μ‹œμž‘/μ˜€μž‘λ™ λ°©μ§€
    # --------------------------------------------------------
    - name: Stop kubelet
      service:
        name: kubelet
        state: stopped
      when: kubelet_systemd_unit_exists.stat.exists

    # --------------------------------------------------------
    # containerd 제거
    # --------------------------------------------------------
    - name: Remove Containerd
      import_role:
        name: container-engine/containerd
        tasks_from: reset
        handlers_from: reset


# ------------------------------------------------------------
# docker 제거 둜직
# ------------------------------------------------------------
- name: Uninstall docker
  vars:
    service_name: docker.service
  when:
    - not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
    - container_manager != "docker"
    - docker_installed.matched > 0
    - ansible_facts.services[service_name]['state'] == 'running'
  block:
    - name: Drain node
      include_role:
        name: remove_node/pre_remove
        apply:
          tags:
            - pre-remove
      when: kubelet_systemd_unit_exists.stat.exists

    - name: Stop kubelet
      service:
        name: kubelet
        state: stopped
      when: kubelet_systemd_unit_exists.stat.exists

    - name: Remove Docker
      import_role:
        name: container-engine/docker
        tasks_from: reset


# ------------------------------------------------------------
# CRI-O 제거 둜직
# ------------------------------------------------------------
- name: Uninstall crio
  vars:
    service_name: crio.service
  when:
    - not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
    - container_manager != "crio"
    - crio_installed.matched > 0
    - ansible_facts.services[service_name]['state'] == 'running'
  block:
    - name: Drain node
      include_role:
        name: remove_node/pre_remove
        apply:
          tags:
            - pre-remove
      when: kubelet_systemd_unit_exists.stat.exists

    - name: Stop kubelet
      service:
        name: kubelet
        state: stopped
      when: kubelet_systemd_unit_exists.stat.exists

    - name: Remove CRI-O
      import_role:
        name: container-engine/cri-o
        tasks_from: reset
# ------------------------------------------------------------
# Fedora CoreOS / OSTree 기반 OS μ—¬λΆ€ 확인
# - OSTree 기반 OSλŠ” νŒ¨ν‚€μ§€ 관리 방식이 λ‹€λ₯΄λ―€λ‘œ
#   runc 제거/μ„€μΉ˜ λ‘œμ§μ„ λΆ„κΈ°ν•˜κΈ° μœ„ν•¨
# ------------------------------------------------------------
- name: Runc | check if fedora coreos
  stat:
    path: /run/ostree-booted
    get_attributes: false
    get_checksum: false
    get_mime: false
  register: ostree


# ------------------------------------------------------------
# OSTree μ—¬λΆ€λ₯Ό fact둜 μ €μž₯
# ------------------------------------------------------------
- name: Runc | set is_ostree
  set_fact:
    is_ostree: "{{ ostree.stat.exists }}"


# ------------------------------------------------------------
# OS νŒ¨ν‚€μ§€ λ§€λ‹ˆμ €λ‘œ μ„€μΉ˜λœ runc 제거
# - distro κΈ°λ³Έ runcλŠ”:
#   * 버전이 μ˜€λž˜λ˜μ—ˆμ„ 수 있고
#   * containerd/CRI-O μš”κ΅¬ 버전과 λΆˆμΌμΉ˜ν•  수 μžˆλ‹€
# - KubesprayλŠ” 이 runcλ₯Ό μ‹ λ’°ν•˜μ§€ μ•ŠλŠ”λ‹€
# ------------------------------------------------------------
- name: Runc | Uninstall runc package managed by package manager
  package:
    name: "{{ runc_package_name }}"
    state: absent
  when:
    # OSTree / Flatcar 계열은 νŒ¨ν‚€μ§€ 제거 방식이 λ‹€λ₯΄λ―€λ‘œ μ œμ™Έ
    - not (is_ostree or
           (ansible_distribution == "Flatcar Container Linux by Kinvolk") or
           (ansible_distribution == "Flatcar"))


# ------------------------------------------------------------
# Kubesprayκ°€ μ •μ˜ν•œ runc λ°”μ΄λ„ˆλ¦¬ λ‹€μš΄λ‘œλ“œ
# - checksums.yml 에 μ •μ˜λœ μ •ν™•ν•œ 버전
# - containerd / Kubernetes와 ν˜Έν™˜μ„± κ²€μ¦λœ runc
# ------------------------------------------------------------
- name: Runc | Download runc binary
  include_tasks: "../../../download/tasks/download_file.yml"
  vars:
    download: "{{ download_defaults | combine(downloads.runc) }}"


# ------------------------------------------------------------
# λ‹€μš΄λ‘œλ“œν•œ runc λ°”μ΄λ„ˆλ¦¬λ₯Ό μ§€μ •λœ 경둜둜 배치
# - 보톡 /usr/local/bin/runc
# - μ‹€ν–‰ κΆŒν•œ(0755) λΆ€μ—¬
# ------------------------------------------------------------
- name: Copy runc binary from download dir
  copy:
    src: "{{ downloads.runc.dest }}"
    dest: "{{ runc_bin_dir }}/runc"
    mode: "0755"
    remote_src: true


# ------------------------------------------------------------
# OS κΈ°λ³Έ 경둜(/usr/bin)에 남아 μžˆμ„ 수 μžˆλŠ”
# 였래된 runc λ°”μ΄λ„ˆλ¦¬ 제거
# - container runtime이 잘λͺ»λœ runcλ₯Ό ν˜ΈμΆœν•˜λŠ” 것을 λ°©μ§€
# ------------------------------------------------------------
- name: Runc | Remove orphaned binary
  file:
    path: /usr/bin/runc
    state: absent
  when: runc_bin_dir != "/usr/bin"
  ignore_errors: true  # λ‹€λ₯Έ νŒ¨ν‚€μ§€κ°€ 이미 지웠을 μˆ˜λ„ 있음

 

containerd

root@k8s-ctr:~/kubespray# tree roles/container-engine/containerd/
roles/container-engine/containerd/
β”œβ”€β”€ defaults
β”‚   └── main.yml
β”œβ”€β”€ handlers
β”‚   β”œβ”€β”€ main.yml
β”‚   └── reset.yml
β”œβ”€β”€ meta
β”‚   └── main.yml
β”œβ”€β”€ molecule
β”‚   └── default
β”‚       β”œβ”€β”€ converge.yml
β”‚       β”œβ”€β”€ molecule.yml
β”‚       └── verify.yml
β”œβ”€β”€ tasks
β”‚   β”œβ”€β”€ main.yml
β”‚   └── reset.yml
└── templates
    β”œβ”€β”€ config.toml.j2
    β”œβ”€β”€ config-v1.toml.j2
    β”œβ”€β”€ containerd.service.j2
    β”œβ”€β”€ hosts.toml.j2
    └── http-proxy.conf.j2

8 directories, 14 files

 

# ------------------------------------------------------------
# containerd λ°”μ΄λ„ˆλ¦¬ λ‹€μš΄λ‘œλ“œ
# - checksums.yml 에 μ •μ˜λœ "κ²€μ¦λœ 버전"만 μ‚¬μš©
# - distro repo 버전? μ•ˆ 믿음
# ------------------------------------------------------------
- name: Containerd | Download containerd
  include_tasks: "../../../download/tasks/download_file.yml"
  vars:
    download: "{{ download_defaults | combine(downloads.containerd) }}"


# ------------------------------------------------------------
# containerd μ•„μΉ΄μ΄λΈŒ μ••μΆ• ν•΄μ œ
# - containerd_bin_dir (보톡 /usr/local/bin)
# - strip-components=1 → bin λ°”λ‘œ μ•„λž˜λ‘œ 풀어버림
# ------------------------------------------------------------
- name: Containerd | Unpack containerd archive
  unarchive:
    src: "{{ downloads.containerd.dest }}"
    dest: "{{ containerd_bin_dir }}"
    mode: "0755"
    remote_src: true
    extra_opts:
      - --strip-components=1
  notify: Restart containerd   # λ°”μ΄λ„ˆλ¦¬ λ°”λ€Œμ—ˆμœΌλ‹ˆ μž¬μ‹œμž‘ μ˜ˆμ•½


# ------------------------------------------------------------
# containerd systemd unit 생성
# - OS κΈ°λ³Έ unit μ•ˆ 씀
# - Kubespray ν…œν”Œλ¦ΏμœΌλ‘œ 직접 생성
# ------------------------------------------------------------
- name: Containerd | Generate systemd service for containerd
  template:
    src: containerd.service.j2
    dest: /etc/systemd/system/containerd.service
    mode: "0644"
    validate: >
      sh -c '[ -f /usr/bin/systemd/system/factory-reset.target ] || exit 0 &&
      systemd-analyze verify %s:containerd.service'
  notify: Restart containerd

  # μ—¬κΈ°μ„œ λŠκ»΄μ§€λŠ” Kubespray의 μ§‘μ°© 포인트:
  # - systemd λ²„μ „κΉŒμ§€ κ³ λ €
  # - unit 깨질 κ°€λŠ₯μ„± 사전에 차단


# ------------------------------------------------------------
# containerd μ„€μ • 및 systemd drop-in 디렉터리 생성
# ------------------------------------------------------------
- name: Containerd | Ensure containerd directories exist
  file:
    dest: "{{ item }}"
    state: directory
    mode: "0755"
    owner: root
    group: root
  with_items:
    - "{{ containerd_systemd_dir }}"   # /etc/systemd/system/containerd.service.d
    - "{{ containerd_cfg_dir }}"       # /etc/containerd


# ------------------------------------------------------------
# HTTP/HTTPS proxy μ„€μ •
# - systemd drop-in 방식
# - config.toml μ•ˆμ— μ„€μ • μ•ˆ λ•Œλ €λ„£μŒ
# ------------------------------------------------------------
- name: Containerd | Write containerd proxy drop-in
  template:
    src: http-proxy.conf.j2
    dest: "{{ containerd_systemd_dir }}/http-proxy.conf"
    mode: "0644"
  notify: Restart containerd
  when: http_proxy is defined or https_proxy is defined


# ------------------------------------------------------------
# containerd κΈ°λ³Έ OCI runtime spec 생성
# - ctr oci spec λͺ…λ ΉμœΌλ‘œ ν˜„μž¬ ν™˜κ²½ κΈ°μ€€μ—μ„œ μŠ€νŽ™ 생성
# - 이걸 μ•ˆ μ“°λ©΄ runtime μ˜΅μ…˜ κΌ¬μž„
# ------------------------------------------------------------
- name: Containerd | Generate default base_runtime_spec
  register: ctr_oci_spec
  command: "{{ containerd_bin_dir }}/ctr oci spec"
  check_mode: false
  changed_when: false


# ------------------------------------------------------------
# μƒμ„±λœ OCI runtime spec을 fact둜 μ €μž₯
# - 이후 custom runtime spec의 κΈ°μ€€κ°’μœΌλ‘œ μ‚¬μš©
# ------------------------------------------------------------
- name: Containerd | Store generated default base_runtime_spec
  set_fact:
    containerd_default_base_runtime_spec: "{{ ctr_oci_spec.stdout | from_json }}"


# ------------------------------------------------------------
# runtime spec 파일 μž‘μ„±
# - runc, kata, custom runtime λ“± ν™•μž₯ λŒ€λΉ„
# ------------------------------------------------------------
- name: Containerd | Write base_runtime_specs
  copy:
    content: "{{ item.value }}"
    dest: "{{ containerd_cfg_dir }}/{{ item.key }}"
    owner: "root"
    mode: "0644"
  with_dict: "{{ containerd_base_runtime_specs | default({}) }}"
  notify: Restart containerd


# ------------------------------------------------------------
# containerd 메인 μ„€μ • 파일(config.toml) 생성
# - containerd 2.x / 1.x λΆ„κΈ° 처리
# - μ—¬κΈ°μ„œ CRI, runc 경둜, cgroup, registry λ‹€ κ²°μ •
# ------------------------------------------------------------
- name: Containerd | Copy containerd config file
  template:
    src: "{{ 'config.toml.j2'
              if containerd_version is version('2.0.0', '>=')
              else 'config-v1.toml.j2' }}"
    dest: "{{ containerd_cfg_dir }}/config.toml"
    owner: "root"
    mode: "0640"
  notify: Restart containerd


# ------------------------------------------------------------
# containerd registry mirror μ„€μ •
# - 사섀 λ ˆμ§€μŠ€νŠΈλ¦¬, ν”„λ‘μ‹œ ν™˜κ²½ λŒ€μ‘
# ------------------------------------------------------------
- name: Containerd | Configure containerd registries
  no_log: "{{ not (unsafe_show_logs | bool) }}"
  block:
    - name: Containerd | Create registry directories
      file:
        path: "{{ containerd_cfg_dir }}/certs.d/{{ item.prefix }}"
        state: directory
        mode: "0755"
      loop: "{{ containerd_registries_mirrors }}"

    - name: Containerd | Write hosts.toml file
      template:
        src: hosts.toml.j2
        dest: "{{ containerd_cfg_dir }}/certs.d/{{ item.prefix }}/hosts.toml"
        mode: "0640"
      loop: "{{ containerd_registries_mirrors }}"


# ------------------------------------------------------------
# μ—¬κΈ°μ„œ μΌλΆ€λŸ¬ handlerλ₯Ό λ°”λ‘œ μ‹€ν–‰
# - 섀정은 λ‹€ λλŠ”λ° containerd μ•ˆ 뜰 경우λ₯Ό λ°©μ§€
# ------------------------------------------------------------
- name: Containerd | Flush handlers
  meta: flush_handlers


# ------------------------------------------------------------
# containerd μ„œλΉ„μŠ€ ν™œμ„±ν™” 및 μ‹œμž‘
# - daemon_reload ν•„μˆ˜ unit 직접 λ§Œλ“€μ—ˆμœΌλ‹ˆκΉŒ..
# ------------------------------------------------------------
- name: Containerd | Ensure containerd is started and enabled
  systemd_service:
    name: containerd
    daemon_reload: true
    enabled: true
    state: started

 

containerd μ‹œμŠ€ν…œ 데λͺ¬ νŒŒμΌμ„ μ‚΄νŽ΄λ³΄λ©΄...

 

root@k8s-ctr:~/kubespray# cat /etc/systemd/system/containerd.service
# Copyright The containerd Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target dbus.service

[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/containerd

Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=1048576
LimitMEMLOCK=infinity
# Comment TasksMax if your systemd version does not supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
OOMScoreAdjust=-999
# Set the cgroup slice of the service so that kube reserved takes effect

[Install]
WantedBy=multi-user.target

 

μ΄λ ‡κ²Œ μ •μ˜λ˜μ–΄ μžˆλ‹€.

 

 

νŒŒλ“œ λ‚΄λΆ€ ulimit μ˜΅μ…˜ μ μš©ν•˜κΈ°

root@k8s-ctr:~/kubespray# cat /etc/containerd/cri-base.json | jq | grep rlimits -A 10
    "rlimits": [
      {
        "type": "RLIMIT_NOFILE",
        "hard": 65535,
        "soft": 65535
      }
    ],
    "noNewPrivileges": true
  },
  "root": {
    "path": "rootfs"

 

ν˜„μž¬ 컀널에 적용된만큼의 리밋을 νŒŒλ“œμ—λ„ μ μš©ν•΄λ³΄λ„λ‘ν•œλ‹€.

 

root@k8s-ctr:~/kubespray# cat << EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
  name: ubuntu
spec:
  containers:
  - name: ubuntu
    image: ubuntu
    command: ["sh", "-c", "sleep infinity"]
    securityContext:
      privileged: true
EOF
pod/ubuntu created
root@k8s-ctr:~/kubespray# kubectl exec -it ubuntu -- sh -c 'ulimit -a'
time(seconds)        unlimited
file(blocks)         unlimited
data(kbytes)         unlimited
stack(kbytes)        8192
coredump(blocks)     unlimited
memory(kbytes)       unlimited
locked memory(kbytes) unlimited
process              unlimited
nofiles              65535
vmemory(kbytes)      unlimited
locks                unlimited
rtprio               0

 

컀널 λ‹¨μœ„ 리밋

root@k8s-ctr:~/kubespray# sysctl fs.file-max
fs.file-max = 9223372036854775807
root@k8s-ctr:~/kubespray# cat /proc/sys/fs/file-max
9223372036854775807
root@k8s-ctr:~/kubespray# cat /proc/sys/fs/file-nr
2656    0       9223372036854775807

 

 

μ‚¬μš©μž 레벨 리밋

root@k8s-ctr:~/kubespray# grep "^[^#]" /etc/security/limits.conf
cat /etc/security/limits.conf
# /etc/security/limits.conf
#
#This file sets the resource limits for the users logged in via PAM.
#It does not affect resource limits of the system services.
#
#Also note that configuration files in /etc/security/limits.d directory,
#which are read in alphabetical order, override the settings in this
#file in case the domain is the same or more specific.
#That means, for example, that setting a limit for wildcard domain here
#can be overridden with a wildcard setting in a config file in the
#subdirectory, but a user specific setting here can be overridden only
#with a user specific setting in the subdirectory.
#
#Each line describes a limit for a user in the form:
#
#<domain>        <type>  <item>  <value>
#
#Where:
#<domain> can be:
#        - a user name
#        - a group name, with @group syntax
#        - the wildcard *, for default entry
#        - the wildcard %, can be also used with %group syntax,
#                 for maxlogin limit
#
#<type> can have the two values:
#        - "soft" for enforcing the soft limits
#        - "hard" for enforcing hard limits
#
#<item> can be one of the following:
#        - core - limits the core file size (KB)
#        - data - max data size (KB)
#        - fsize - maximum filesize (KB)
#        - memlock - max locked-in-memory address space (KB)
#        - nofile - max number of open file descriptors
#        - rss - max resident set size (KB)
#        - stack - max stack size (KB)
#        - cpu - max CPU time (MIN)
#        - nproc - max number of processes
#        - as - address space limit (KB)
#        - maxlogins - max number of logins for this user
#        - maxsyslogins - max number of logins on the system
#        - priority - the priority to run user process with
#        - locks - max number of file locks the user can hold
#        - sigpending - max number of pending signals
#        - msgqueue - max memory used by POSIX message queues (bytes)
#        - nice - max nice priority allowed to raise to values: [-20, 19]
#        - rtprio - max realtime priority
#
#<domain>      <type>  <item>         <value>
#

#*               soft    core            0
#*               hard    rss             10000
#@student        hard    nproc           20
#@faculty        soft    nproc           20
#@faculty        hard    nproc           50
#ftp             hard    nproc           0
#@student        -       maxlogins       4

# End of file
root@k8s-ctr:~/kubespray# ulimit -n
524288

 

 

Systemd μ„œλΉ„μŠ€ 레벨

root@k8s-ctr:~/kubespray# cat /proc/<PID>/limits | grep "Max open files"
bash: PID: No such file or directory
root@k8s-ctr:~/kubespray# systemctl show kubelet | grep LimitNOFILE
LimitNOFILE=524288
LimitNOFILESoft=1024
root@k8s-ctr:~/kubespray# cat /proc/$(pidof kubelet)/limits | grep open
Max open files            1000000              1000000              files     
root@k8s-ctr:~/kubespray# systemctl show containerd | grep LimitNOFILE
LimitNOFILE=1048576
LimitNOFILESoft=1048576
root@k8s-ctr:~/kubespray# cat /proc/$(pidof containerd)/limits | grep open
Max open files            1048576              1048576              files

 

 

μ„€μ • λ³€κ²½ ν›„ 적용 (ν”Œλ ˆμ΄λΆμœΌλ‘œ μ‹€ν–‰μ‹œ)

runcλŠ” 호슀트의 κΈ°λ³Έ ulimit을 κ·ΈλŒ€λ‘œ 상속받을 수 μžˆλ„λ‘ νŒ¨μΉ˜ν•œ ν›„ μ•€μ„œλΈ” ν”Œλ ˆμ΄λΆμ„ λ‹€μ‹œ λŒλ¦°λ‹€.

root@k8s-ctr:~/kubespray# cat << EOF >> inventory/mycluster/group_vars/all/containerd.yml
containerd_default_base_runtime_spec_patch:
  process:
    rlimits: []
EOF
root@k8s-ctr:~/kubespray# grep "^[^#]" inventory/mycluster/group_vars/all/containerd.yml
---
containerd_default_base_runtime_spec_patch:
  process:
    rlimits: []
root@k8s-ctr:~/kubespray# ansible-playbook -i inventory/mycluster/inventory.ini -v cluster.yml --tags "container-engine" --limit k8s-ctr -e kube_version="1.33.3"

...

PLAY RECAP **************************************************************************************
k8s-ctr                    : ok=84   changed=5    unreachable=0    failed=0    skipped=196  rescued=0    ignored=0   

Sunday 01 February 2026  05:07:56 +0900 (0:


# μˆ˜λ™ 섀정은 μ•„λž˜ 방법 적용!
cat << EOF > /etc/containerd/cri-base.json
{"ociVersion": "1.2.1", "process": {"user": {"uid": 0, "gid": 0}, "cwd": "/", "capabilities": {"bounding": ["CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER", "CAP_MKNOD", "CAP_NET_RAW", "CAP_SETGID", "CAP_SETUID", "CAP_SETFCAP", "CAP_SETPCAP", "CAP_NET_BIND_SERVICE", "CAP_SYS_CHROOT", "CAP_KILL", "CAP_AUDIT_WRITE"], "effective": ["CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER", "CAP_MKNOD", "CAP_NET_RAW", "CAP_SETGID", "CAP_SETUID", "CAP_SETFCAP", "CAP_SETPCAP", "CAP_NET_BIND_SERVICE", "CAP_SYS_CHROOT", "CAP_KILL", "CAP_AUDIT_WRITE"], "permitted": ["CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER", "CAP_MKNOD", "CAP_NET_RAW", "CAP_SETGID", "CAP_SETUID", "CAP_SETFCAP", "CAP_SETPCAP", "CAP_NET_BIND_SERVICE", "CAP_SYS_CHROOT", "CAP_KILL", "CAP_AUDIT_WRITE"]}, "noNewPrivileges": true}, "root": {"path": "rootfs"}, "mounts": [{"destination": "/proc", "type": "proc", "source": "proc", "options": ["nosuid", "noexec", "nodev"]}, {"destination": "/dev", "type": "tmpfs", "source": "tmpfs", "options": ["nosuid", "strictatime", "mode=755", "size=65536k"]}, {"destination": "/dev/pts", "type": "devpts", "source": "devpts", "options": ["nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"]}, {"destination": "/dev/shm", "type": "tmpfs", "source": "shm", "options": ["nosuid", "noexec", "nodev", "mode=1777", "size=65536k"]}, {"destination": "/dev/mqueue", "type": "mqueue", "source": "mqueue", "options": ["nosuid", "noexec", "nodev"]}, {"destination": "/sys", "type": "sysfs", "source": "sysfs", "options": ["nosuid", "noexec", "nodev", "ro"]}, {"destination": "/run", "type": "tmpfs", "source": "tmpfs", "options": ["nosuid", "strictatime", "mode=755", "size=65536k"]}], "linux": {"resources": {"devices": [{"allow": false, "access": "rwm"}]}, "cgroupsPath": "/default", "namespaces": [{"type": "pid"}, {"type": "ipc"}, {"type": "uts"}, {"type": "mount"}, {"type": "network"}], "maskedPaths": ["/proc/acpi", "/proc/asound", "/proc/kcore", "/proc/keys", "/proc/latency_stats", "/proc/timer_list", "/proc/timer_stats", "/proc/sched_debug", "/sys/firmware", "/sys/devices/virtual/powercap", "/proc/scsi"], "readonlyPaths": ["/proc/bus", "/proc/fs", "/proc/irq", "/proc/sys", "/proc/sysrq-trigger"]}}
EOF
cat /etc/containerd/cri-base.json | jq | grep rlimits
cat /etc/containerd/cri-base.json | jq

systemctl restart containerd.service
systemctl status containerd.service --no-pager

 

 

root@k8s-ctr:~/kubespray# kubectl delete pod ubuntu
pod "ubuntu" deleted



root@k8s-ctr:~/kubespray# 
root@k8s-ctr:~/kubespray# 
root@k8s-ctr:~/kubespray# 
root@k8s-ctr:~/kubespray# cat << EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
  name: ubuntu
spec:
  containers:
  - name: ubuntu
    image: ubuntu
    command: ["sh", "-c", "sleep infinity"]
    securityContext:
      privileged: true
EOF
pod/ubuntu created
root@k8s-ctr:~/kubespray# kubectl exec -it ubuntu -- sh -c 'ulimit -a'
time(seconds)        unlimited
file(blocks)         unlimited
data(kbytes)         unlimited
stack(kbytes)        8192
coredump(blocks)     unlimited
memory(kbytes)       unlimited
locked memory(kbytes) unlimited
process              unlimited
nofiles              1048576
vmemory(kbytes)      unlimited
locks                unlimited
rtprio               0

 

Pod의 ν”„λ‘œμ„ΈμŠ€ μ œν•œμ€ Kubernetes λ³΄μ•ˆ μ„€μ •μ΄λ‚˜ privileged μ—¬λΆ€λ‘œ κ²°μ •λ˜μ§€ μ•ŠλŠ”λ°, μ΄λŠ” μ»¨ν…Œμ΄λ„ˆ 생성 μ‹œμ μ— containerdκ°€ μ „λ‹¬ν•˜λŠ” OCI Runtime Spec에 μ˜ν•΄ κ²°μ •λ˜λ©°, μ΅œμ’…μ μœΌλ‘œ runcκ°€ 이λ₯Ό 컀널에 μ μš©ν•˜κ²Œ λ˜λŠ” 것이닀.

 

 

container-eignined이 μ–΄λ–€ νƒœκ·Έλ₯Ό λ³€κ²½ν• κΉŒ

containerd, runc, docker, cri-o, crictl, nerdctl, skopeo, kata, gvisorκ°€ μž‘μ—…μ— λͺ¨λ‘ ν¬ν•¨λΌμžˆλŠ”λ°
Kubesprayμ—μ„œ container-engine에 ν•΄λ‹Ήν•˜λŠ” taskλŠ” containerd μ„€μΉ˜κ°€ μ•„λ‹ˆλΌ λ…Έλ“œμ˜ μ»¨ν…Œμ΄λ„ˆ μ‹€ν–‰ ν™˜κ²½ 전체λ₯Ό ν•œ 번 μ •λ¦¬ν•˜λŠ” 단계이닀.

root@k8s-ctr:~/kubespray# ansible-playbook \
  -i inventory/mycluster/inventory.ini \
  -v cluster.yml \
  --tags "container-engine" \
  --list-tasks
Using /root/kubespray/ansible.cfg as config file
[WARNING]: Could not match supplied host pattern, ignoring: bastion
[WARNING]: Could not match supplied host pattern, ignoring: k8s_cluster
[WARNING]: Could not match supplied host pattern, ignoring: calico_rr
[WARNING]: Could not match supplied host pattern, ignoring: _kubespray_needs_etcd

playbook: cluster.yml

  play #1 (all): Check Ansible version  TAGS: [always]
    tasks:
      Check {{ minimal_ansible_version }} <= Ansible version < {{ maximal_ansible_version }}    TAGS: [always, check]
      Check that python netaddr is installed    TAGS: [always, check]
      Check that jinja is not too old (install via pip) TAGS: [always, check]

  play #2 (all): Inventory setup and validation TAGS: [always]
    tasks:
      dynamic_groups : Match needed groups by their old names or definition     TAGS: [always]
      validate_inventory : Stop if removed tags are used        TAGS: [always]
      validate_inventory : Stop if kube_control_plane group is empty    TAGS: [always]
      validate_inventory : Stop if etcd group is empty in external etcd mode    TAGS: [always]
      validate_inventory : Warn if `kube_network_plugin` is `none       TAGS: [always]
      validate_inventory : Stop if unsupported version of Kubernetes    TAGS: [always]
      validate_inventory : Stop if known booleans are set as strings (Use JSON format on CLI: -e "{'key': true }")       TAGS: [always]
      validate_inventory : Stop if even number of etcd hosts    TAGS: [always]
      validate_inventory : Guarantee that enough network address space is available for all podsTAGS: [always]
      validate_inventory : Stop if RBAC is not enabled when dashboard is enabled        TAGS: [always]
      validate_inventory : Check cloud_provider value   TAGS: [always]
      validate_inventory : Check external_cloud_provider value  TAGS: [always]
      validate_inventory : Check that kube_service_addresses is a network range TAGS: [always]
      validate_inventory : Check that kube_pods_subnet is a network range       TAGS: [always]
      validate_inventory : Check that kube_pods_subnet does not collide with kube_service_addresses      TAGS: [always]
      validate_inventory : Check that ipv4 IP range is enough for the nodes     TAGS: [always]
      validate_inventory : Check that kube_service_addresses_ipv6 is a network range    TAGS: [always]
      validate_inventory : Check that kube_pods_subnet_ipv6 is a network range  TAGS: [always]
      validate_inventory : Check that kube_pods_subnet_ipv6 does not collide with kube_service_addresses_ipv6    TAGS: [always]
      validate_inventory : Check that ipv6 IP range is enough for the nodes     TAGS: [always]
      validate_inventory : Stop if unsupported options selected TAGS: [always]
      validate_inventory : Warn if `enable_dual_stack_networks` is set  TAGS: [always]
      validate_inventory : Stop if download_localhost is enabled but download_run_once is not   TAGS: [always]
      validate_inventory : Stop if kata_containers_enabled is enabled when container_manager is docker   TAGS: [always]
      validate_inventory : Stop if gvisor_enabled is enabled when container_manager is not containerd    TAGS: [always]
      validate_inventory : Ensure minimum containerd version    TAGS: [always]
      validate_inventory : Stop if auto_renew_certificates is enabled when certificates are managed externally (kube_external_ca_mode is true)   TAGS: [always]

  play #3 (bastion[0]): Install bastion ssh config      TAGS: []
    tasks:

  play #4 (k8s_cluster:etcd:calico_rr): Bootstrap hosts for Ansible     TAGS: []
    tasks:

  play #5 (k8s_cluster:etcd:calico_rr): Gather facts    TAGS: [always]
    tasks:
      network_facts : Gather ansible_default_ipv4       TAGS: [always]
      network_facts : Set fallback_ip   TAGS: [always]
      network_facts : Gather ansible_default_ipv6       TAGS: [always]
      network_facts : Set fallback_ip6  TAGS: [always]
      network_facts : Set main access ip(access_ip based on ipv4_stack/ipv6_stack options).     TAGS: [always]
      network_facts : Set main ip(ip based on ipv4_stack/ipv6_stack options).   TAGS: [always]
      network_facts : Set main access ips(mixed ips for dualstack).     TAGS: [always]
      network_facts : Set main ips(mixed ips for dualstack).    TAGS: [always]
      network_facts : Set no_proxy to all assigned cluster IPs and hostnames    TAGS: [always]
      network_facts : Populates no_proxy to all hosts   TAGS: [always]
      Gather minimal facts      TAGS: [always]
      Gather necessary facts (network)  TAGS: [always]
      Gather necessary facts (hardware) TAGS: [always]

  play #6 (k8s_cluster:etcd): Prepare for etcd install  TAGS: []
    tasks:
      container-engine/validate-container-engine : Validate-container-engine | check if fedora coreos    TAGS: [container-engine, facts, validate-container-engine]
      container-engine/validate-container-engine : Validate-container-engine | set is_ostree    TAGS: [container-engine, facts, validate-container-engine]
      container-engine/validate-container-engine : Ensure kubelet systemd unit exists   TAGS: [container-engine, facts, validate-container-engine]
      container-engine/validate-container-engine : Populate service facts       TAGS: [container-engine, facts, validate-container-engine]
      container-engine/validate-container-engine : Check if containerd is installed     TAGS: [container-engine, facts, validate-container-engine]
      container-engine/validate-container-engine : Check if docker is installed TAGS: [container-engine, facts, validate-container-engine]
      container-engine/validate-container-engine : Check if crio is installed   TAGS: [container-engine, facts, validate-container-engine]
      Drain node        TAGS: [container-engine, validate-container-engine]
      container-engine/validate-container-engine : Stop kubelet TAGS: [container-engine, validate-container-engine]
      container-engine/containerd-common : Containerd-common | check if fedora coreos   TAGS: [container-engine, validate-container-engine]
      container-engine/containerd-common : Containerd-common | set is_ostree    TAGS: [container-engine, validate-container-engine]
      container-engine/containerd-common : Containerd-common | gather os specific variables     TAGS: [container-engine, facts, validate-container-engine]
      container-engine/runc : Runc | check if fedora coreos     TAGS: [container-engine, validate-container-engine]
      container-engine/runc : Runc | set is_ostree      TAGS: [container-engine, validate-container-engine]
      container-engine/runc : Runc | Uninstall runc package managed by package manager  TAGS: [container-engine, validate-container-engine]
      container-engine/runc : Runc | Download runc binary       TAGS: [container-engine, validate-container-engine]
      container-engine/runc : Copy runc binary from download dir        TAGS: [container-engine, validate-container-engine]
      container-engine/runc : Runc | Remove orphaned binary     TAGS: [container-engine, validate-container-engine]
      container-engine/crictl : Install crictl  TAGS: [container-engine, validate-container-engine]
      container-engine/nerdctl : Nerdctl | Download nerdctl     TAGS: [container-engine, validate-container-engine]
      container-engine/nerdctl : Nerdctl | Copy nerdctl binary from download dir        TAGS: [container-engine, validate-container-engine]
      container-engine/nerdctl : Nerdctl | Create configuration dir     TAGS: [container-engine, validate-container-engine]
      container-engine/nerdctl : Nerdctl | Install nerdctl configuration        TAGS: [container-engine, validate-container-engine]
      container-engine/containerd : Containerd | Stop containerd service        TAGS: [container-engine, reset_containerd, validate-container-engine]
      container-engine/containerd : Containerd | Remove configuration files     TAGS: [container-engine, reset_containerd, validate-container-engine]
      Drain node        TAGS: [container-engine, validate-container-engine]
      container-engine/validate-container-engine : Stop kubelet TAGS: [container-engine, validate-container-engine]
      container-engine/containerd-common : Containerd-common | check if fedora coreos   TAGS: [container-engine, validate-container-engine]
      container-engine/containerd-common : Containerd-common | set is_ostree    TAGS: [container-engine, validate-container-engine]
      container-engine/containerd-common : Containerd-common | gather os specific variables     TAGS: [container-engine, facts, validate-container-engine]
      container-engine/docker : Docker | Get package facts      TAGS: [container-engine, validate-container-engine]
      container-engine/docker : Docker | Find docker packages   TAGS: [container-engine, validate-container-engine]
      container-engine/docker : Docker | Stop all running container     TAGS: [container-engine, validate-container-engine]
      container-engine/docker : Reset | remove all containers   TAGS: [container-engine, validate-container-engine]
      container-engine/docker : Docker | Stop docker service    TAGS: [container-engine, validate-container-engine]
      container-engine/docker : Docker | Remove dpkg hold       TAGS: [container-engine, validate-container-engine]
      container-engine/docker : Docker | Remove docker package  TAGS: [container-engine, validate-container-engine]
      container-engine/docker : Docker | ensure docker-ce repository is removed TAGS: [container-engine, validate-container-engine]
      container-engine/docker : Docker | Remove docker repository on Fedora     TAGS: [container-engine, validate-container-engine]
      container-engine/docker : Docker | Remove docker repository on RedHat/CentOS/Oracle/AlmaLinux Linux        TAGS: [container-engine, validate-container-engine]
      container-engine/docker : Docker | Remove docker configuration files      TAGS: [container-engine, validate-container-engine]
      container-engine/docker : Docker | systemctl daemon-reload        TAGS: [container-engine, validate-container-engine]
      Drain node        TAGS: [container-engine, validate-container-engine]
      container-engine/validate-container-engine : Stop kubelet TAGS: [container-engine, validate-container-engine]
      container-engine/crictl : Install crictl  TAGS: [container-engine, validate-container-engine]
      container-engine/skopeo : Skopeo | check if fedora coreos TAGS: [container-engine, validate-container-engine]
      container-engine/skopeo : Skopeo | set is_ostree  TAGS: [container-engine, validate-container-engine]
      container-engine/skopeo : Skopeo | Uninstall skopeo package managed by package manager    TAGS: [container-engine, validate-container-engine]
      container-engine/skopeo : Skopeo | Download skopeo binary TAGS: [container-engine, validate-container-engine]
      container-engine/skopeo : Copy skopeo binary from download dir    TAGS: [container-engine, validate-container-engine]
      container-engine/cri-o : Cri-o | include vars/v1.29.yml   TAGS: [container-engine, validate-container-engine]
      container-engine/cri-o : Cri-o | include vars/v1.31.yml   TAGS: [container-engine, validate-container-engine]
      container-engine/cri-o : CRI-O | Kubic repo name for debian os family     TAGS: [container-engine, reset_crio, validate-container-engine]
      container-engine/cri-o : CRI-O | Remove kubic apt repo    TAGS: [container-engine, reset_crio, validate-container-engine]
      container-engine/cri-o : CRI-O | Remove cri-o apt repo    TAGS: [container-engine, reset_crio, validate-container-engine]
      container-engine/cri-o : CRI-O | Remove CRI-O kubic yum repo      TAGS: [container-engine, reset_crio, validate-container-engine]
      container-engine/cri-o : CRI-O | Remove CRI-O kubic yum repo      TAGS: [container-engine, reset_crio, validate-container-engine]
      container-engine/cri-o : CRI-O | Run yum-clean-metadata   TAGS: [container-engine, reset_crio, validate-container-engine]
      container-engine/cri-o : CRI-O | Remove crictl    TAGS: [container-engine, reset_crio, validate-container-engine]
      container-engine/cri-o : CRI-O | Stop crio service        TAGS: [container-engine, reset_crio, validate-container-engine]
      container-engine/cri-o : CRI-O | Remove CRI-O configuration files TAGS: [container-engine, reset_crio, validate-container-engine]
      container-engine/cri-o : CRI-O | Remove CRI-O binaries    TAGS: [container-engine, reset_crio, validate-container-engine]
      container-engine/cri-o : CRI-O | Remove CRI-O libexec     TAGS: [container-engine, reset_crio, validate-container-engine]
      container-engine/kata-containers : Kata-containers | Download kata binary TAGS: [container-engine, kata-containers]
      container-engine/kata-containers : Kata-containers | Copy kata-containers binary  TAGS: [container-engine, kata-containers]
      container-engine/kata-containers : Kata-containers | Create config directory      TAGS: [container-engine, kata-containers]
      container-engine/kata-containers : Kata-containers | Set configuration    TAGS: [container-engine, kata-containers]
      container-engine/kata-containers : Kata-containers | Set containerd bin   TAGS: [container-engine, kata-containers]
      container-engine/kata-containers : Kata-containers | Load vhost kernel modules    TAGS: [container-engine, kata-containers]
      container-engine/kata-containers : Kata-containers | Persist vhost kernel modules TAGS: [container-engine, kata-containers]
      container-engine/gvisor : GVisor | Download runsc binary  TAGS: [container-engine, gvisor]
      container-engine/gvisor : GVisor | Download containerd-shim-runsc-v1 binary       TAGS: [container-engine, gvisor]
      container-engine/gvisor : GVisor | Copy binaries  TAGS: [container-engine, gvisor]
      container-engine/crun : Crun | Download crun binary       TAGS: [container-engine, crun]
      container-engine/crun : Copy crun binary from download dir        TAGS: [container-engine, crun]
      container-engine/youki : Youki | Download youki   TAGS: [container-engine, youki]
      container-engine/youki : Youki | Copy youki binary from download dir      TAGS: [container-engine, youki]
      container-engine/crictl : Install crictl  TAGS: [container-engine, crio]
      container-engine/skopeo : Skopeo | check if fedora coreos TAGS: [container-engine, crio]
      container-engine/skopeo : Skopeo | set is_ostree  TAGS: [container-engine, crio]
      container-engine/skopeo : Skopeo | Uninstall skopeo package managed by package manager    TAGS: [container-engine, crio]
      container-engine/skopeo : Skopeo | Download skopeo binary TAGS: [container-engine, crio]
      container-engine/skopeo : Copy skopeo binary from download dir    TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | include vars/v1.29.yml   TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | include vars/v1.31.yml   TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | check if fedora coreos   TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | set is_ostree    TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | get ostree version       TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | Download cri-o   TAGS: [container-engine, crio]
      container-engine/cri-o : Check that amzn2-extras.repo exists      TAGS: [container-engine, crio]
      container-engine/cri-o : Find docker repo in amzn2-extras.repo file       TAGS: [container-engine, crio]
      container-engine/cri-o : Remove docker repository TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | build a list of crio runtimes with Katacontainers runtimesTAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | build a list of crio runtimes with runc runtime  TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | build a list of crio runtimes with youki runtime TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | Stop kubelet service if running  TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | Get all pods     TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | Stop and remove pods not on host network TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | Stop and remove all remaining pods       TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | stop crio service if running     TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | make sure needed folders exist in the system     TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | install cri-o config     TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | install config.json      TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | copy binaries    TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | create directory for libexec     TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | copy libexec     TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | copy service file        TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | configure crio to use kube reserved cgroups      TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | update the bin dir for crio.service file TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | copy default policy      TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | copy mounts.conf TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | create directory for oci hooks   TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | set overlay driver       TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | set metacopy mount options correctly     TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | create directory registries configs      TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | write registries configs TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | configure unqualified registry settings  TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | write cri-o proxy drop-in        TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | configure the uid/gid space for user namespaces  TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | ensure crio service is started and enabled       TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | trigger service restart only when needed TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | verify that crio is running      TAGS: [container-engine, crio]
      container-engine/cri-o : Cri-o | ensure kubelet service is started if present and stopped TAGS: [container-engine, crio]
      container-engine/containerd-common : Containerd-common | check if fedora coreos   TAGS: [container-engine, containerd]
      container-engine/containerd-common : Containerd-common | set is_ostree    TAGS: [container-engine, containerd]
      container-engine/containerd-common : Containerd-common | gather os specific variables     TAGS: [container-engine, containerd, facts]
      container-engine/runc : Runc | check if fedora coreos     TAGS: [container-engine, containerd]
      container-engine/runc : Runc | set is_ostree      TAGS: [container-engine, containerd]
      container-engine/runc : Runc | Uninstall runc package managed by package manager  TAGS: [container-engine, containerd]
      container-engine/runc : Runc | Download runc binary       TAGS: [container-engine, containerd]
      container-engine/runc : Copy runc binary from download dir        TAGS: [container-engine, containerd]
      container-engine/runc : Runc | Remove orphaned binary     TAGS: [container-engine, containerd]
      container-engine/crictl : Install crictl  TAGS: [container-engine, containerd]
      container-engine/nerdctl : Nerdctl | Download nerdctl     TAGS: [container-engine, containerd]
      container-engine/nerdctl : Nerdctl | Copy nerdctl binary from download dir        TAGS: [container-engine, containerd]
      container-engine/nerdctl : Nerdctl | Create configuration dir     TAGS: [container-engine, containerd]
      container-engine/nerdctl : Nerdctl | Install nerdctl configuration        TAGS: [container-engine, containerd]
      container-engine/containerd : Containerd | Download containerd    TAGS: [container-engine, containerd]
      container-engine/containerd : Containerd | Unpack containerd archive      TAGS: [container-engine, containerd]
      container-engine/containerd : Containerd | Generate systemd service for containerd        TAGS: [container-engine, containerd]
      container-engine/containerd : Containerd | Ensure containerd directories exist    TAGS: [container-engine, containerd]
      container-engine/containerd : Containerd | Write containerd proxy drop-in TAGS: [container-engine, containerd]
      container-engine/containerd : Containerd | Generate default base_runtime_spec     TAGS: [container-engine, containerd]
      container-engine/containerd : Containerd | Store generated default base_runtime_spec      TAGS: [container-engine, containerd]
      container-engine/containerd : Containerd | Write base_runtime_specs       TAGS: [container-engine, containerd]
      container-engine/containerd : Containerd | Copy containerd config file    TAGS: [container-engine, containerd]
      container-engine/containerd : Containerd | Create registry directories    TAGS: [container-engine, containerd]
      container-engine/containerd : Containerd | Write hosts.toml file  TAGS: [container-engine, containerd]
      container-engine/containerd : Containerd | Flush handlers TAGS: [container-engine, containerd]
      container-engine/containerd : Containerd | Ensure containerd is started and enabled       TAGS: [container-engine, containerd]
      container-engine/containerd-common : Containerd-common | check if fedora coreos   TAGS: [container-engine, docker]
      container-engine/containerd-common : Containerd-common | set is_ostree    TAGS: [container-engine, docker]
      container-engine/containerd-common : Containerd-common | gather os specific variables     TAGS: [container-engine, docker, facts]
      container-engine/docker : Check if fedora coreos  TAGS: [container-engine, docker]
      container-engine/docker : Set is_ostree   TAGS: [container-engine, docker]
      container-engine/docker : Gather os specific variables    TAGS: [container-engine, docker, facts]
      container-engine/docker : Warn about Docker version on SUSE       TAGS: [container-engine, docker]
      container-engine/docker : Gather DNS facts        TAGS: [container-engine, docker, facts]
      container-engine/docker : Remove legacy docker repo file  TAGS: [container-engine, docker]
      container-engine/docker : Ensure old versions of Docker are not installed. | Debian       TAGS: [container-engine, docker]
      container-engine/docker : Ensure podman not installed. | RedHat   TAGS: [container-engine, docker]
      container-engine/docker : Ensure old versions of Docker are not installed. | RedHat       TAGS: [container-engine, docker]
      container-engine/docker : Ensure docker-ce repository public key is installed     TAGS: [container-engine, docker]
      container-engine/docker : Convert -backports sources to archive.debian.org for bullseye and older  TAGS: [container-engine, docker]
      container-engine/docker : Ensure docker-ce repository is enabled  TAGS: [container-engine, docker]
      container-engine/docker : Configure docker repository on Fedora   TAGS: [container-engine, docker]
      container-engine/docker : Configure docker repository on RedHat/CentOS/OracleLinux/AlmaLinux/KylinLinux    TAGS: [container-engine, docker]
      container-engine/docker : Remove dpkg hold        TAGS: [container-engine, docker]
      container-engine/docker : Ensure docker packages are installed    TAGS: [container-engine, docker]
      container-engine/docker : Tell Debian hosts not to change the docker version with apt upgrade      TAGS: [container-engine, docker]
      container-engine/docker : Ensure service is started if docker packages are already presentTAGS: [container-engine, docker]
      container-engine/docker : Flush handlers so we can wait for docker to come up     TAGS: [container-engine, docker]
      container-engine/docker : Install docker plugin   TAGS: [container-engine, docker]
      container-engine/docker : Create docker service systemd directory if it doesn't exist     TAGS: [container-engine, docker]
      container-engine/docker : Write docker proxy drop-in      TAGS: [container-engine, docker]
      container-engine/docker : Write docker.service systemd file       TAGS: [container-engine, docker]
      container-engine/docker : Write docker options systemd drop-in    TAGS: [container-engine, docker]
      container-engine/docker : Write docker dns systemd drop-in        TAGS: [container-engine, docker]
      container-engine/docker : Copy docker orphan clean up script to the node  TAGS: [container-engine, docker]
      container-engine/docker : Write docker orphan clean up systemd drop-in    TAGS: [container-engine, docker]
      container-engine/docker : Flush handlers  TAGS: [container-engine, docker]
      container-engine/docker : Ensure docker service is started and enabled    TAGS: [container-engine, docker]
      container-engine/crictl : Install crictl  TAGS: [container-engine, docker]
      container-engine/cri-dockerd : Runc | Download cri-dockerd binary TAGS: [container-engine, docker]
      container-engine/cri-dockerd : Copy cri-dockerd binary from download dir  TAGS: [container-engine, docker]
      container-engine/cri-dockerd : Generate cri-dockerd systemd unit files    TAGS: [container-engine, docker]
      container-engine/cri-dockerd : Flush handlers     TAGS: [container-engine, docker]

  play #7 (kube_node): Add worker nodes to the etcd play if needed      TAGS: []
    tasks:

  play #8 (etcd:kube_control_plane:_kubespray_needs_etcd): Install etcd TAGS: []
    tasks:

  play #9 (k8s_cluster): Install Kubernetes nodes       TAGS: []
    tasks:

  play #10 (kube_control_plane): Install the control plane      TAGS: []
    tasks:

  play #11 (k8s_cluster): Invoke kubeadm and install a CNI      TAGS: []
    tasks:
      helm-apps : Validating arguments against arg spec 'main' - Install a list of Helm charts. TAGS: [always, custom_cni, network]

  play #12 (calico_rr): Install Calico Route Reflector  TAGS: []
    tasks:

  play #13 (kube_control_plane[0]): Patch Kubernetes for Windows        TAGS: []
    tasks:

  play #14 (kube_control_plane): Install Kubernetes apps        TAGS: []
    tasks:
      helm-apps : Validating arguments against arg spec 'main' - Install a list of Helm charts. TAGS: [always, apps, kubelet-csr-approver]

  play #15 (k8s_cluster): Apply resolv.conf changes now that cluster DNS is up  TAGS: []
    tasks:

 

특히 흐름을 μžμ„Ένžˆ 보면, κ·Έλƒ₯ μ„€μΉ˜λ§Œ ν•˜λŠ” 게 μ•„λ‹ˆλΌ 이미 κΉ”λ € μžˆμ„ μˆ˜λ„ μžˆλŠ” λŸ°νƒ€μž„λ“€μ„ μ „λΆ€ μ κ²€ν•˜κ³ ,
μ„ νƒλ˜μ§€ μ•Šμ€ λŸ°νƒ€μž„μ€ λ…Έλ“œ drain → kubelet 쀑지 → λŸ°νƒ€μž„ 제거λ₯Ό μˆ˜ν–‰ν•œλ‹€.

 

kubeadm λ°”μ΄λ„ˆλ¦¬ / μ»¨ν…Œμ΄λ„ˆ 이미지 λ‹€μš΄λ‘œλ“œ

ubeadm이 ν•„μš”λ‘œ ν•˜λŠ” 컨트둀 ν”Œλ ˆμΈ 이미지 λͺ©λ‘μ„ 미리 κ³„μ‚°ν•˜λŠ” Kubespray의 λ‹€μš΄λ‘œλ“œ νŒŒμ΄ν”„λΌμΈμ„ μ‚΄νŽ΄λ³΄λ„λ‘ν•œλ‹€.

root@k8s-ctr:~/kubespray# cat roles/download/tasks/prep_kubeadm_images.yml
---
- name: Prep_kubeadm_images | Download kubeadm binary
  include_tasks: "download_file.yml"
  vars:
    download: "{{ download_defaults | combine(downloads.kubeadm) }}"
  when:
    - not skip_downloads | default(false)
    - downloads.kubeadm.enabled

- name: Prep_kubeadm_images | Copy kubeadm binary from download dir to system path
  copy:
    src: "{{ downloads.kubeadm.dest }}"
    dest: "{{ bin_dir }}/kubeadm"
    mode: "0755"
    remote_src: true

- name: Prep_kubeadm_images | Create kubeadm config
  template:
    src: "kubeadm-images.yaml.j2"
    dest: "{{ kube_config_dir }}/kubeadm-images.yaml"
    mode: "0644"
    validate: "{{ kubeadm_config_validate_enabled | ternary(bin_dir + '/kubeadm config validate --config %s', omit) }}"
  when:
    - not skip_kubeadm_images | default(false)

- name: Prep_kubeadm_images | Generate list of required images
  shell: "set -o pipefail && {{ bin_dir }}/kubeadm config images list --config={{ kube_config_dir }}/kubeadm-images.yaml | grep -Ev 'coredns|pause'"
  args:
    executable: /bin/bash
  register: kubeadm_images_raw
  run_once: true
  changed_when: false
  when:
    - not skip_kubeadm_images | default(false)

- name: Prep_kubeadm_images | Parse list of images
  vars:
    kubeadm_images_list: "{{ kubeadm_images_raw.stdout_lines }}"
  set_fact:
    kubeadm_image:
      key: "kubeadm_{{ (item | regex_replace('^(?:.*\\/)*', '')).split(':')[0] }}"
      value:
        enabled: true
        container: true
        repo: "{{ item | regex_replace('^(.*):.*$', '\\1') }}"
        tag: "{{ item | regex_replace('^.*:(.*)$', '\\1') }}"
        groups:
          - k8s_cluster
  loop: "{{ kubeadm_images_list | flatten(levels=1) }}"
  register: kubeadm_images_cooked
  run_once: true
  when:
    - not skip_kubeadm_images | default(false)

- name: Prep_kubeadm_images | Convert list of images to dict for later use
  set_fact:
    kubeadm_images: "{{ kubeadm_images_cooked.results | map(attribute='ansible_facts.kubeadm_image') | list | items2dict }}"
  run_once: true
  when:
    - not skip_kubeadm_images | default(false)

 

kubeadm이 좜λ ₯ν•œ 이미지 λͺ©λ‘μ€ λ‹¨μˆœν•œ λ¬Έμžμ—΄ 리슀트 ν˜•νƒœμ΄κΈ° λ•Œλ¬Έμ—, κ·ΈλŒ€λ‘œλŠ” Kubespray의 λ‹€μš΄λ‘œλ“œ λ‘œμ§μ—μ„œ μ‚¬μš©ν•  수 μ—†λ‹€. κ·Έλž˜μ„œ KubesprayλŠ” 이 리슀트λ₯Ό ν•˜λ‚˜μ”© κΊΌλ‚΄μ„œ κ³΅ν†΅μ μœΌλ‘œ μ‚¬μš©ν•˜λŠ” downloads ꡬ쑰에 맞게 λ‹€μ‹œ κ°€κ³΅ν•œλ‹€.

이 κ³Όμ •μ—μ„œ 각 μ΄λ―Έμ§€λŠ” 이미지 이름을 κΈ°μ€€μœΌλ‘œ ν•œ key κ°’, μ‹€μ œ μ €μž₯μ†Œ μ£Όμ†Œ(repo), νƒœκ·Έ(tag), μ»¨ν…Œμ΄λ„ˆ 이미지 μ—¬λΆ€, 그리고 μ–΄λ–€ λ…Έλ“œ κ·Έλ£Ήμ—μ„œ μ‚¬μš©ν•  것인지(k8s_cluster) 같은 λ©”νƒ€λ°μ΄ν„°λ‘œ λΆ„ν•΄λœλ‹€.

 

λ§Œμ•½ kubeadm이 ν•„μš”λ‘œ ν•˜λŠ” 이미지라 ν•˜λ”λΌλ„ Kubespray μž…μž₯μ—μ„œλŠ” κ²°κ΅­ λ‹€μš΄λ‘œλ“œν•΄μ•Ό ν•  μ•„ν‹°νŒ©νŠΈ 쀑 ν•˜λ‚˜μΌ 뿐이기 λ•Œλ¬Έμ— kubeadm 이미지 μ—­μ‹œ etcd μ΄λ―Έμ§€λ‚˜ CNI 이미지, containerd λ°”μ΄λ„ˆλ¦¬μ™€ λ™μΌν•œ λ‹€μš΄λ‘œλ“œ νŒŒμ΄ν”„λΌμΈμ„ νƒ€κ²Œ 되고, 결과적으둜 μ„€μΉ˜ κ³Όμ • μ „λ°˜μ—μ„œ 이미지 관리 방식이 μΌκ΄€λ˜κ²Œ μœ μ§€λœλ‹€.

 

 

 

install_etcd.yaml

root@k8s-ctr:~/kubespray# cat playbooks/install_etcd.yml 
---
- name: Add worker nodes to the etcd play if needed
  hosts: kube_node
  roles:
    - { role: kubespray_defaults }
  tasks:
    - name: Check if nodes needs etcd client certs (depends on network_plugin)
      group_by:
        key: "_kubespray_needs_etcd"
      when:
        - kube_network_plugin in ["flannel", "canal", "cilium"] or
          (cilium_deploy_additionally | default(false)) or
          (kube_network_plugin == "calico" and calico_datastore == "etcd")
        - etcd_deployment_type != "kubeadm"
      tags: etcd

- name: Install etcd
  hosts: etcd:kube_control_plane:_kubespray_needs_etcd
  gather_facts: false
  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
  environment: "{{ proxy_disable_env }}"
  roles:
    - { role: kubespray_defaults }
    - role: etcd
      tags: etcd
      when: etcd_deployment_type != "kubeadm"

 

install_etcd.yml은 ν΄λŸ¬μŠ€ν„° μ•ˆμ—μ„œ λˆ„κ°€ etcd에 μ ‘κ·Όν•΄μ•Ό ν•˜λŠ”μ§€ μ •μ˜ν•˜κ³ , 그에 λ§žλŠ” μ„€μΉ˜ λ²”μœ„λ₯Ό κ²°μ •ν•œλ‹€.

root@k8s-ctr:~/kubespray# tree ~/kubespray/roles/etcd
/root/kubespray/roles/etcd
β”œβ”€β”€ handlers
β”‚   β”œβ”€β”€ backup_cleanup.yml
β”‚   β”œβ”€β”€ backup.yml
β”‚   └── main.yml
β”œβ”€β”€ meta
β”‚   └── main.yml
β”œβ”€β”€ tasks
β”‚   β”œβ”€β”€ check_certs.yml
β”‚   β”œβ”€β”€ configure.yml
β”‚   β”œβ”€β”€ gen_certs_script.yml
β”‚   β”œβ”€β”€ gen_nodes_certs_script.yml
β”‚   β”œβ”€β”€ install_docker.yml
β”‚   β”œβ”€β”€ install_host.yml
β”‚   β”œβ”€β”€ join_etcd-events_member.yml
β”‚   β”œβ”€β”€ join_etcd_member.yml
β”‚   β”œβ”€β”€ main.yml
β”‚   β”œβ”€β”€ refresh_config.yml
β”‚   └── upd_ca_trust.yml
└── templates
    β”œβ”€β”€ etcd-docker.service.j2
    β”œβ”€β”€ etcd.env.j2
    β”œβ”€β”€ etcd-events-docker.service.j2
    β”œβ”€β”€ etcd-events.env.j2
    β”œβ”€β”€ etcd-events-host.service.j2
    β”œβ”€β”€ etcd-events.j2
    β”œβ”€β”€ etcd-host.service.j2
    β”œβ”€β”€ etcd.j2
    β”œβ”€β”€ make-ssl-etcd.sh.j2
    └── openssl.conf.j2

5 directories, 25 files

 

root@k8s-ctr:~/kubespray# systemctl status etcd.service --no-pager
● etcd.service - etcd
     Loaded: loaded (/etc/systemd/system/etcd.service; enabled; preset: disabled)
     Active: active (running) since Thu 2026-01-29 02:43:57 KST; 3 days ago
 Invocation: 3c7c280979f44467b4109035c356415a
   Main PID: 17852 (etcd)
      Tasks: 12 (limit: 24792)
     Memory: 318.6M (peak: 335.5M)
        CPU: 39min 7.945s
     CGroup: /system.slice/etcd.service
             └─17852 /usr/local/bin/etcd

Feb 01 05:07:53 k8s-ctr etcd[17852]: {"level":"info","ts":"2026-02-01T05:07:53.601930+0900…32085}
Feb 01 05:12:53 k8s-ctr etcd[17852]: {"level":"info","ts":"2026-02-01T05:12:53.618206+0900…32929}
Feb 01 05:12:53 k8s-ctr etcd[17852]: {"level":"info","ts":"2026-02-01T05:12:53.628062+0900","cal…
Feb 01 05:12:53 k8s-ctr etcd[17852]: {"level":"info","ts":"2026-02-01T05:12:53.628129+0900…32502}
Feb 01 05:17:53 k8s-ctr etcd[17852]: {"level":"info","ts":"2026-02-01T05:17:53.625719+0900…33362}
Feb 01 05:17:53 k8s-ctr etcd[17852]: {"level":"info","ts":"2026-02-01T05:17:53.634790+0900","cal…
Feb 01 05:17:53 k8s-ctr etcd[17852]: {"level":"info","ts":"2026-02-01T05:17:53.634866+0900…32929}
Feb 01 05:22:53 k8s-ctr etcd[17852]: {"level":"info","ts":"2026-02-01T05:22:53.632116+0900…33780}
Feb 01 05:22:53 k8s-ctr etcd[17852]: {"level":"info","ts":"2026-02-01T05:22:53.639231+0900","cal…
Feb 01 05:22:53 k8s-ctr etcd[17852]: {"level":"info","ts":"2026-02-01T05:22:53.639292+0900…33362}
Hint: Some lines were ellipsized, use -l to show in full.
root@k8s-ctr:~/kubespray# cat /etc/systemd/system/etcd.service
[Unit]
Description=etcd
After=network.target

[Service]
Type=notify
User=root
EnvironmentFile=/etc/etcd.env
ExecStart=/usr/local/bin/etcd
NotifyAccess=all
Restart=always
RestartSec=10s
LimitNOFILE=40000

[Install]
WantedBy=multi-user.target
root@k8s-ctr:~/kubespray# 
cat /etc/etcd.env 
# Environment file for etcd 3.5.25
ETCD_DATA_DIR=/var/lib/etcd
ETCD_ADVERTISE_CLIENT_URLS=https://192.168.10.10:2379
ETCD_INITIAL_ADVERTISE_PEER_URLS=https://192.168.10.10:2380
ETCD_INITIAL_CLUSTER_STATE=existing
ETCD_METRICS=basic
ETCD_LISTEN_CLIENT_URLS=https://192.168.10.10:2379,https://127.0.0.1:2379
ETCD_ELECTION_TIMEOUT=5000
ETCD_HEARTBEAT_INTERVAL=250
ETCD_INITIAL_CLUSTER_TOKEN=k8s_etcd
ETCD_LISTEN_PEER_URLS=https://192.168.10.10:2380
ETCD_NAME=etcd1
ETCD_PROXY=off
ETCD_INITIAL_CLUSTER=etcd1=https://192.168.10.10:2380
ETCD_AUTO_COMPACTION_RETENTION=8
ETCD_SNAPSHOT_COUNT=100000
ETCD_QUOTA_BACKEND_BYTES=2147483648
ETCD_MAX_REQUEST_BYTES=1572864
ETCD_LOG_LEVEL=info
ETCD_MAX_SNAPSHOTS=5
ETCD_MAX_WALS=5
# Flannel need etcd v2 API
ETCD_ENABLE_V2=true

# TLS settings
ETCD_TRUSTED_CA_FILE=/etc/ssl/etcd/ssl/ca.pem
ETCD_CERT_FILE=/etc/ssl/etcd/ssl/member-k8s-ctr.pem
ETCD_KEY_FILE=/etc/ssl/etcd/ssl/member-k8s-ctr-key.pem
ETCD_CLIENT_CERT_AUTH=true

ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/etcd/ssl/ca.pem
ETCD_PEER_CERT_FILE=/etc/ssl/etcd/ssl/member-k8s-ctr.pem
ETCD_PEER_KEY_FILE=/etc/ssl/etcd/ssl/member-k8s-ctr-key.pem
ETCD_PEER_CLIENT_CERT_AUTH=True



# CLI settings
ETCDCTL_ENDPOINTS=https://127.0.0.1:2379
ETCDCTL_CACERT=/etc/ssl/etcd/ssl/ca.pem
ETCDCTL_KEY=/etc/ssl/etcd/ssl/admin-k8s-ctr-key.pem
ETCDCTL_CERT=/etc/ssl/etcd/ssl/admin-k8s-ctr.pem

# ETCD 3.5.x issue
# https://groups.google.com/a/kubernetes.io/g/dev/c/B7gJs88XtQc/m/rSgNOzV2BwAJ?utm_medium=email&utm_source=footer
ETCD_EXPERIMENTAL_INITIAL_CORRUPT_CHECK=True


ETCD_EXPERIMENTAL_WATCH_PROGRESS_NOTIFY_INTERVAL=5s

 

데λͺ¬ ν˜•νƒœλ‘œ 기동쀑인 etcd μ„œλΉ„μŠ€μ΄λ‹€.

 

etcdctl 확인

root@k8s-ctr:~/kubespray# ss -tnlp | grep etcd
LISTEN 0      4096   192.168.10.10:2380       0.0.0.0:*    users:(("etcd",pid=17852,fd=6))                          
LISTEN 0      4096   192.168.10.10:2379       0.0.0.0:*    users:(("etcd",pid=17852,fd=8))                          
LISTEN 0      4096       127.0.0.1:2379       0.0.0.0:*    users:(("etcd",pid=17852,fd=7)) 


root@k8s-ctr:~/kubespray# etcdctl.sh member list -w table
+------------------+---------+-------+----------------------------+----------------------------+------------+
|        ID        | STATUS  | NAME  |         PEER ADDRS         |        CLIENT ADDRS        | IS LEARNER |
+------------------+---------+-------+----------------------------+----------------------------+------------+
| a997582217e26c7f | started | etcd1 | https://192.168.10.10:2380 | https://192.168.10.10:2379 |      false |
+------------------+---------+-------+----------------------------+----------------------------+------------+
root@k8s-ctr:~/kubespray# cat /usr/local/bin/etcdctl.sh
#!/bin/bash
# Ansible managed
# example invocation: etcdctl.sh get --keys-only --from-key ""

etcdctl \
  --cacert /etc/ssl/etcd/ssl/ca.pem \
  --cert /etc/ssl/etcd/ssl/admin-k8s-ctr.pem \
  --key /etc/ssl/etcd/ssl/admin-k8s-ctr-key.pem "$@"
root@k8s-ctr:~/kubespray# tree /etc/ssl/etcd
/etc/ssl/etcd
β”œβ”€β”€ openssl.conf
└── ssl
    β”œβ”€β”€ admin-k8s-ctr-key.pem
    β”œβ”€β”€ admin-k8s-ctr.pem
    β”œβ”€β”€ ca-key.pem
    β”œβ”€β”€ ca.pem
    β”œβ”€β”€ member-k8s-ctr-key.pem
    β”œβ”€β”€ member-k8s-ctr.pem
    β”œβ”€β”€ node-k8s-ctr-key.pem
    └── node-k8s-ctr.pem

2 directories, 9 files
root@k8s-ctr:~/kubespray# cat /etc/ssl/etcd/openssl.conf
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name

[req_distinguished_name]

[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names

[ ssl_client ]
extendedKeyUsage = clientAuth, serverAuth
basicConstraints = CA:FALSE
subjectKeyIdentifier=hash
authorityKeyIdentifier=keyid,issuer
subjectAltName = @alt_names

[ v3_ca ]
basicConstraints = CA:TRUE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
authorityKeyIdentifier=keyid:always,issuer

[alt_names]
DNS.1 = localhost
DNS.2 = k8s-ctr
DNS.3 = lb-apiserver.kubernetes.local
DNS.4 = etcd.kube-system.svc.cluster.local
DNS.5 = etcd.kube-system.svc
DNS.6 = etcd.kube-system
DNS.7 = etcd
IP.1 = 192.168.10.10
IP.2 = 127.0.0.1
IP.3 = ::1

 

 

 

nodes

kubeletκ³Ό kube-proxyκ°€ λ¬Έμ œμ—†μ΄ λ™μž‘ν•  수 μžˆλŠ” OS μƒνƒœλ₯Ό λ¨Όμ € λ§Œλ“ λ‹€.

---
# λ…Έλ“œμ—μ„œ μ‚¬μš©ν•  각쒅 fact(λ„€νŠΈμ›Œν¬, OS, λŸ°νƒ€μž„ 정보 λ“±)λ₯Ό μˆ˜μ§‘
# 이후 쑰건 뢄기와 μ„€μ • κ°’ κ³„μ‚°μ˜ 기반이 됨
- name: Fetch facts
  import_tasks: facts.yml
  tags:
    - facts
    - kubelet

# CNIκ°€ μ‚¬μš©ν•˜λŠ” μƒνƒœ 디렉터리 보μž₯
# λ„€νŠΈμ›Œν¬ ν”ŒλŸ¬κ·ΈμΈμ΄ 정상 λ™μž‘ν•˜κΈ° μœ„ν•œ ν•„μˆ˜ 디렉터리
- name: Ensure /var/lib/cni exists
  file:
    path: /var/lib/cni
    state: directory
    mode: "0755"

# kubelet λ°”μ΄λ„ˆλ¦¬ μ„€μΉ˜
# μ‹€μ œ kubelet μ‹€ν–‰ νŒŒμΌμ„ λ…Έλ“œμ— 배치
- name: Install kubelet binary
  import_tasks: install.yml
  tags:
    - kubelet

# control-plane λ…Έλ“œμ—μ„œ kube-vip을 μ‚¬μš©ν•˜λŠ” 경우
# API Server VIP μ œκ³΅μ„ μœ„ν•œ kube-vip μ„€μΉ˜
- name: Install kube-vip
  import_tasks: loadbalancer/kube-vip.yml
  when:
    - ('kube_control_plane' in group_names)
    - kube_vip_enabled
  tags:
    - kube-vip

# μ›Œμ»€ λ…Έλ“œ λ˜λŠ” νŠΉμ • IPv6 μ‘°κ±΄μ—μ„œ
# localhost 기반 API Server 접근을 μœ„ν•œ nginx ν”„λ‘μ‹œ μ„€μΉ˜
- name: Install nginx-proxy
  import_tasks: loadbalancer/nginx-proxy.yml
  when:
    - ('kube_control_plane' not in group_names) or (kube_apiserver_bind_address != '::')
    - loadbalancer_apiserver_localhost
    - loadbalancer_apiserver_type == 'nginx'
  tags:
    - nginx

# nginx λŒ€μ‹  haproxyλ₯Ό μ„ νƒν•œ 경우
# λ™μΌν•œ λͺ©μ (API Server μ ‘κ·Ό ν”„λ‘μ‹œ)으둜 haproxy μ„€μΉ˜
- name: Install haproxy
  import_tasks: loadbalancer/haproxy.yml
  when:
    - ('kube_control_plane' not in group_names) or (kube_apiserver_bind_address != '::')
    - loadbalancer_apiserver_localhost
    - loadbalancer_apiserver_type == 'haproxy'
  tags:
    - haproxy

# NodePort둜 μ‚¬μš©ν•˜λŠ” 포트 λ²”μœ„λ₯Ό
# μ»€λ„μ—μ„œ 둜컬 포트둜 μ˜ˆμ•½ν•΄μ„œ 좩돌 λ°©μ§€
- name: Ensure nodePort range is reserved
  ansible.posix.sysctl:
    name: net.ipv4.ip_local_reserved_ports
    value: "{{ kube_apiserver_node_port_range }}"
    sysctl_set: true
    sysctl_file: "{{ sysctl_file_path }}"
    state: present
    reload: true
    ignoreerrors: "{{ sysctl_ignore_unknown_keys }}"
  when: kube_apiserver_node_port_range is defined
  tags:
    - kube-proxy

# br_netfilter 컀널 λͺ¨λ“ˆ 쑴재 μ—¬λΆ€ 확인
# kube-proxy, CNIμ—μ„œ ν•„μˆ˜
- name: Verify if br_netfilter module exists
  command: "modinfo br_netfilter"
  environment:
    PATH: "{{ ansible_env.PATH }}:/sbin"
  register: modinfo_br_netfilter
  failed_when: modinfo_br_netfilter.rc not in [0, 1]
  changed_when: false
  check_mode: false

# 컀널 λͺ¨λ“ˆ μ„€μ • νŒŒμΌμ„ μ €μž₯ν•  디렉터리 보μž₯
- name: Verify br_netfilter module path exists
  file:
    path: "{{ item }}"
    state: directory
    mode: "0755"
  loop:
    - /etc/modules-load.d
    - /etc/modprobe.d

# br_netfilter λͺ¨λ“ˆμ„ μ¦‰μ‹œ λ‘œλ“œ
- name: Enable br_netfilter module
  community.general.modprobe:
    name: br_netfilter
    state: present
  when: modinfo_br_netfilter.rc == 0

# λΆ€νŒ… μ‹œμ—λ„ br_netfilter μžλ™ λ‘œλ“œλ˜λ„λ‘ μ„€μ •
- name: Persist br_netfilter module
  copy:
    dest: /etc/modules-load.d/kubespray-br_netfilter.conf
    content: br_netfilter
    mode: "0644"
  when: modinfo_br_netfilter.rc == 0

# 컀널에 bridge-nf sysctl ν‚€κ°€ μžˆλŠ”μ§€ 확인
# (일뢀 μ»€λ„μ—μ„œλŠ” br_netfilter 없이도 쑴재)
- name: Check if bridge-nf-call-iptables key exists
  command: "sysctl net.bridge.bridge-nf-call-iptables"
  failed_when: false
  changed_when: false
  check_mode: false
  register: sysctl_bridge_nf_call_iptables

# λΈŒλ¦¬μ§€ νŠΈλž˜ν”½μ„ iptables/arptables/ip6tables둜 전달
# Service, NetworkPolicy 정상 λ™μž‘μ„ μœ„ν•œ 핡심 μ„€μ •
- name: Enable bridge-nf-call tables
  ansible.posix.sysctl:
    name: "{{ item }}"
    state: present
    sysctl_file: "{{ sysctl_file_path }}"
    value: "1"
    reload: true
    ignoreerrors: "{{ sysctl_ignore_unknown_keys }}"
  when: sysctl_bridge_nf_call_iptables.rc == 0
  with_items:
    - net.bridge.bridge-nf-call-iptables
    - net.bridge.bridge-nf-call-arptables
    - net.bridge.bridge-nf-call-ip6tables

# kube-proxyκ°€ IPVS λͺ¨λ“œμΌ 경우 ν•„μš”ν•œ 컀널 λͺ¨λ“ˆ λ‘œλ“œ
- name: Modprobe Kernel Module for IPVS
  community.general.modprobe:
    name: "{{ item }}"
    state: present
    persistent: present
  loop: "{{ kube_proxy_ipvs_modules }}"
  when: kube_proxy_mode == 'ipvs'
  tags:
    - kube-proxy

# IPVS λͺ¨λ“œμ—μ„œ conntrack λͺ¨λ“ˆ λ‘œλ“œ
# 컀널/배포판 차이λ₯Ό κ³ λ €ν•΄ μ—¬λŸ¬ λͺ¨λ“ˆ μ‹œλ„
- name: Modprobe conntrack module
  community.general.modprobe:
    name: "{{ item }}"
    state: present
    persistent: present
  register: modprobe_conntrack_module
  ignore_errors: true
  loop:
    - nf_conntrack
    - nf_conntrack_ipv4
  when:
    - kube_proxy_mode == 'ipvs'
    - modprobe_conntrack_module is not defined or modprobe_conntrack_module is ansible.builtin.failed
  tags:
    - kube-proxy

# kube-proxyκ°€ nftables λͺ¨λ“œμΌ 경우 ν•„μš”ν•œ 컀널 λͺ¨λ“ˆ λ‘œλ“œ
- name: Modprobe Kernel Module for nftables
  community.general.modprobe:
    name: "nf_tables"
    state: present
    persistent: present
  when: kube_proxy_mode == 'nftables'
  tags:
    - kube-proxy

# kubelet systemd μ„œλΉ„μŠ€ μ„€μ •, config 생성 및 μ‹€ν–‰ μ€€λΉ„
- name: Install kubelet
  import_tasks: kubelet.yml
  tags:
    - kubelet
    - kubeadm

 

kubelet

root@k8s-ctr:~/kubespray# cat roles/kubernetes/node/tasks/kubelet.yml
---
- name: Set kubelet api version to v1beta1
  set_fact:
    kubeletConfig_api_version: v1beta1
  tags:
    - kubelet
    - kubeadm

- name: Write kubelet environment config file (kubeadm)
  template:
    src: "kubelet.env.{{ kubeletConfig_api_version }}.j2"
    dest: "{{ kube_config_dir }}/kubelet.env"
    setype: "{{ (preinstall_selinux_state != 'disabled') | ternary('etc_t', omit) }}"
    backup: true
    mode: "0600"
  notify: Node | restart kubelet
  tags:
    - kubelet
    - kubeadm

- name: Write kubelet config file
  template:
    src: "kubelet-config.{{ kubeletConfig_api_version }}.yaml.j2"
    dest: "{{ kube_config_dir }}/kubelet-config.yaml"
    mode: "0600"
  notify: Kubelet | restart kubelet
  tags:
    - kubelet
    - kubeadm

- name: Write kubelet systemd init file
  template:
    src: "kubelet.service.j2"
    dest: "/etc/systemd/system/kubelet.service"
    backup: true
    mode: "0600"
    validate: "sh -c '[ -f /usr/bin/systemd/system/factory-reset.target ] || exit 0 && systemd-analyze verify %s:kubelet.service'"
    # FIXME: check that systemd version >= 250 (factory-reset.target was introduced in that release)
    # Remove once we drop support for systemd < 250
  notify: Node | restart kubelet
  tags:
    - kubelet
    - kubeadm

- name: Flush_handlers and reload-systemd
  meta: flush_handlers

- name: Enable kubelet
  service:
    name: kubelet
    enabled: true
    state: started
  tags:
    - kubelet
  notify: Kubelet | restart kubelet
root@k8s-ctr:~/kubespray# cat /etc/kubernetes/kubelet-config.yaml
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
nodeStatusUpdateFrequency: "10s"
failSwapOn: True
authentication:
  anonymous:
    enabled: false
  webhook:
    enabled: True
  x509:
    clientCAFile: /etc/kubernetes/ssl/ca.crt
authorization:
  mode: Webhook
staticPodPath: "/etc/kubernetes/manifests"
cgroupDriver: systemd
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
containerRuntimeEndpoint : unix:///var/run/containerd/containerd.sock
maxPods: 110
podPidsLimit: -1
address: "192.168.10.10"
readOnlyPort: 0
healthzPort: 10248
healthzBindAddress: "127.0.0.1"
kubeletCgroups: /system.slice/kubelet.service
clusterDomain: cluster.local
protectKernelDefaults: true
rotateCertificates: true
clusterDNS:
- 10.233.0.3
kubeReserved:
  cpu: "100m"
  memory: "256Mi"
  ephemeral-storage: "500Mi"
  pid: "1000"
systemReserved:
  cpu: "500m"
  memory: "512Mi"
  ephemeral-storage: "500Mi"
  pid: "1000"
resolvConf: "/etc/resolv.conf"
eventRecordQPS: 50
shutdownGracePeriod: 60s
shutdownGracePeriodCriticalPods: 20s
maxParallelImagePulls: 1

 

 

control-plane

root@k8s-ctr:~/kubespray# tree roles/kubernetes/control-plane/
roles/kubernetes/control-plane/
β”œβ”€β”€ defaults
β”‚   └── main
β”‚       β”œβ”€β”€ etcd.yml
β”‚       β”œβ”€β”€ kube-proxy.yml
β”‚       β”œβ”€β”€ kube-scheduler.yml
β”‚       └── main.yml
β”œβ”€β”€ handlers
β”‚   └── main.yml
β”œβ”€β”€ meta
β”‚   └── main.yml
β”œβ”€β”€ tasks
β”‚   β”œβ”€β”€ check-api.yml
β”‚   β”œβ”€β”€ define-first-kube-control.yml
β”‚   β”œβ”€β”€ encrypt-at-rest.yml
β”‚   β”œβ”€β”€ kubeadm-backup.yml
β”‚   β”œβ”€β”€ kubeadm-etcd.yml
β”‚   β”œβ”€β”€ kubeadm-fix-apiserver.yml
β”‚   β”œβ”€β”€ kubeadm-secondary.yml
β”‚   β”œβ”€β”€ kubeadm-setup.yml
β”‚   β”œβ”€β”€ kubeadm-upgrade.yml
β”‚   β”œβ”€β”€ kubelet-fix-client-cert-rotation.yml
β”‚   β”œβ”€β”€ main.yml
β”‚   └── pre-upgrade.yml
β”œβ”€β”€ templates
β”‚   β”œβ”€β”€ admission-controls.yaml.j2
β”‚   β”œβ”€β”€ apiserver-audit-policy.yaml.j2
β”‚   β”œβ”€β”€ apiserver-audit-webhook-config.yaml.j2
β”‚   β”œβ”€β”€ apiserver-tracing.yaml.j2
β”‚   β”œβ”€β”€ eventratelimit.yaml.j2
β”‚   β”œβ”€β”€ k8s-certs-renew.service.j2
β”‚   β”œβ”€β”€ k8s-certs-renew.sh.j2
β”‚   β”œβ”€β”€ k8s-certs-renew.timer.j2
β”‚   β”œβ”€β”€ kubeadm-config.v1beta3.yaml.j2
β”‚   β”œβ”€β”€ kubeadm-config.v1beta4.yaml.j2
β”‚   β”œβ”€β”€ kubeadm-controlplane.yaml.j2
β”‚   β”œβ”€β”€ kubescheduler-config.yaml.j2
β”‚   β”œβ”€β”€ podnodeselector.yaml.j2
β”‚   β”œβ”€β”€ podsecurity.yaml.j2
β”‚   β”œβ”€β”€ resourcequota.yaml.j2
β”‚   β”œβ”€β”€ secrets_encryption.yaml.j2
β”‚   β”œβ”€β”€ webhook-authorization-config.yaml.j2
β”‚   └── webhook-token-auth-config.yaml.j2
└── vars
    └── main.yaml

8 directories, 37 files
---
# 컨트둀 ν”Œλ ˆμΈ μ—…κ·Έλ ˆμ΄λ“œ 전에 ν•„μš”ν•œ 사전 μž‘μ—…
# (버전 차이둜 μΈν•œ μ„€μ • 좩돌, cert 문제 등을 미리 정리)
- name: Pre-upgrade control plane
  import_tasks: pre-upgrade.yml
  tags:
    - k8s-pre-upgrade

# kube-apiserverμ—μ„œ webhook 기반 토큰 인증을 μ‚¬μš©ν•  경우
# ν•΄λ‹Ή 인증 μ„€μ • 파일 생성
- name: Create webhook token auth config
  template:
    src: webhook-token-auth-config.yaml.j2
    dest: "{{ kube_config_dir }}/webhook-token-auth-config.yaml"
    mode: "0640"
  when: kube_webhook_token_auth | default(false)

# kube-apiserverμ—μ„œ webhook 기반 인가(authorization)λ₯Ό μ‚¬μš©ν•  경우
# 인가 μ„€μ • 파일 생성
- name: Create webhook authorization config
  template:
    src: webhook-authorization-config.yaml.j2
    dest: "{{ kube_config_dir }}/webhook-authorization-config.yaml"
    mode: "0640"
  when: kube_webhook_authorization | default(false)

# Kubernetes의 κ΅¬μ‘°ν™”λœ AuthorizationConfiguration μ‚¬μš© μ‹œ
# apiserverκ°€ 직접 μ½λŠ” 인가 μ„€μ • 파일 생성
- name: Create structured AuthorizationConfiguration file
  copy:
    content: "{{ authz_config | to_nice_yaml(indent=2, sort_keys=false) }}"
    dest: "{{ kube_config_dir }}/apiserver-authorization-config-{{ kube_apiserver_authorization_config_api_version }}.yaml"
    mode: "0640"
  vars:
    authz_config:
      apiVersion: apiserver.config.k8s.io/{{ kube_apiserver_authorization_config_api_version }}
      kind: AuthorizationConfiguration
      authorizers: "{{ kube_apiserver_authorization_config_authorizers }}"
  when: kube_apiserver_use_authorization_config_file

# kube-scheduler μ„€μ • 파일 생성
# μŠ€μΌ€μ€„λ§ μ •μ±…, ν”„λ‘œνŒŒμΌ 등을 kube-scheduler에 전달
- name: Create kube-scheduler config
  template:
    src: kubescheduler-config.yaml.j2
    dest: "{{ kube_config_dir }}/kubescheduler-config.yaml"
    mode: "0644"

# Kubernetes Secret을 etcd에 μ•”ν˜Έν™”ν•΄μ„œ μ €μž₯ν•˜κΈ° μœ„ν•œ μ„€μ •
# (Encrypt at Rest)
- name: Apply Kubernetes encrypt at rest config
  import_tasks: encrypt-at-rest.yml
  when:
    - kube_encrypt_secret_data
  tags:
    - kube-apiserver

# kubectl λ°”μ΄λ„ˆλ¦¬λ₯Ό λ…Έλ“œμ— μ„€μΉ˜
# 컨트둀 ν”Œλ ˆμΈ λ…Έλ“œμ—μ„œ 관리 μž‘μ—… μˆ˜ν–‰ μš©λ„
- name: Install | Copy kubectl binary from download dir
  copy:
    src: "{{ downloads.kubectl.dest }}"
    dest: "{{ bin_dir }}/kubectl"
    mode: "0755"
    remote_src: true
  tags:
    - kubectl
    - upgrade

# kubectl bash μžλ™μ™„μ„± 슀크립트 생성
- name: Install kubectl bash completion
  shell: "{{ bin_dir }}/kubectl completion bash >/etc/bash_completion.d/kubectl.sh"
  when: ansible_os_family in ["Debian","RedHat", "Suse"]
  tags:
    - kubectl
  ignore_errors: true

# μžλ™μ™„μ„± 슀크립트 κΆŒν•œ μ„€μ •
- name: Set kubectl bash completion file permissions
  file:
    path: /etc/bash_completion.d/kubectl.sh
    owner: root
    group: root
    mode: "0755"
  when: ansible_os_family in ["Debian","RedHat", "Suse"]
  tags:
    - kubectl
    - upgrade
  ignore_errors: true

# kubectl에 λŒ€ν•œ 별칭(alias)을 μ„€μ •
# ex) k → kubectl
- name: Set bash alias for kubectl
  blockinfile:
    path: /etc/bash_completion.d/kubectl.sh
    block: |-
      alias {{ kubectl_alias }}=kubectl
      if [[ $(type -t compopt) = "builtin" ]]; then
        complete -o default -F __start_kubectl {{ kubectl_alias }}
      else
        complete -o default -o nospace -F __start_kubectl {{ kubectl_alias }}
      fi
    state: present
    marker: "# Ansible entries {mark}"
  when:
    - ansible_os_family in ["Debian","RedHat", "Suse"]
    - kubectl_alias is defined and kubectl_alias != ""
  tags:
    - kubectl
    - upgrade
  ignore_errors: true

# κΈ°μ‘΄ ν΄λŸ¬μŠ€ν„°μ— 이미 쑰인된 λ…Έλ“œμ™€
# 졜초 control-plane λ…Έλ“œλ₯Ό 식별
- name: Define nodes already joined to existing cluster and first_kube_control_plane
  import_tasks: define-first-kube-control.yml

# kubeadm을 μ΄μš©ν•œ 컨트둀 ν”Œλ ˆμΈ μ΄ˆκΈ°ν™”/쑰인 둜직
- name: Include kubeadm setup
  import_tasks: kubeadm-setup.yml

# etcdλ₯Ό kubeadm이 κ΄€λ¦¬ν•˜λŠ” ꡬ쑰일 경우
# kubeadm 기반 etcd μΆ”κ°€ μž‘μ—… μˆ˜ν–‰
- name: Include kubeadm etcd extra tasks
  include_tasks: kubeadm-etcd.yml
  when: etcd_deployment_type == "kubeadm"

# λ©€ν‹° 컨트둀 ν”Œλ ˆμΈ ν™˜κ²½μ—μ„œ
# secondary apiserver κ΄€λ ¨ 문제 보정
- name: Include kubeadm secondary server apiserver fixes
  include_tasks: kubeadm-fix-apiserver.yml

# μ‚¬μš©ν•˜μ§€ μ•ŠλŠ” AuthorizationConfiguration 버전 파일 정리
- name: Cleanup unused AuthorizationConfiguration file versions
  file:
    path: "{{ kube_config_dir }}/apiserver-authorization-config-{{ item }}.yaml"
    state: absent
  loop: "{{ ['v1alpha1', 'v1beta1', 'v1'] | reject('equalto', kube_apiserver_authorization_config_api_version) | list }}"
  when: kube_apiserver_use_authorization_config_file

# kubelet client μΈμ¦μ„œ μžλ™ νšŒμ „ κ΄€λ ¨ 이슈 보정
- name: Include kubelet client cert rotation fixes
  include_tasks: kubelet-fix-client-cert-rotation.yml
  when: kubelet_rotate_certificates

# 컨트둀 ν”Œλ ˆμΈ μΈμ¦μ„œ μˆ˜λ™ κ°±μ‹ μš© 슀크립트 μ„€μΉ˜
- name: Install script to renew K8S control plane certificates
  template:
    src: k8s-certs-renew.sh.j2
    dest: "{{ bin_dir }}/k8s-certs-renew.sh"
    mode: "0755"

# μΈμ¦μ„œ μžλ™ κ°±μ‹ μš© systemd service / timer μ„€μΉ˜
- name: Renew K8S control plane certificates monthly 1/2
  template:
    src: "{{ item }}.j2"
    dest: "/etc/systemd/system/{{ item }}"
    mode: "0644"
    validate: "sh -c '[ -f /usr/bin/systemd/system/factory-reset.target ] || exit 0 && systemd-analyze verify %s:{{item}}'"
  with_items:
    - k8s-certs-renew.service
    - k8s-certs-renew.timer
  register: k8s_certs_units
  when: auto_renew_certificates

# μΈμ¦μ„œ μžλ™ κ°±μ‹  타이머 ν™œμ„±ν™”
- name: Renew K8S control plane certificates monthly 2/2
  systemd_service:
    name: k8s-certs-renew.timer
    enabled: true
    state: started
    daemon_reload: "{{ k8s_certs_units is changed }}"
  when: auto_renew_certificates

 

kubeadm을 μ€‘μ‹¬μœΌλ‘œ 컨트둀 ν”Œλ ˆμΈμ„ κ΅¬μ„±ν•˜κ³ , λ³΄μ•ˆ(인증/인가), μŠ€μΌ€μ€„λ§, μΈμ¦μ„œ 수λͺ…μ£ΌκΈ°κΉŒμ§€ ν¬ν•¨ν•΄μ„œμš΄μ˜ κ°€λŠ₯ν•œ μƒνƒœμ˜ Kubernetes 컨트둀 ν”Œλ ˆμΈμ„ κ΅¬μ„±ν•œλ‹€.

 

client

root@k8s-ctr:~/kubespray# tree roles/kubernetes/client/
roles/kubernetes/client/
β”œβ”€β”€ defaults
β”‚   └── main.yml
└── tasks
    └── main.yml

3 directories, 2 files
root@k8s-ctr:~/kubespray# 
cat roles/kubernetes/client/defaults/main.yml 
---
kubeconfig_localhost: false
kubeconfig_localhost_ansible_host: false
kubectl_localhost: false
artifacts_dir: "{{ inventory_dir }}/artifacts"

kube_config_dir: "/etc/kubernetes"
---
# μ™ΈλΆ€μ—μ„œ μ ‘κ·Όν•  kube-apiserver μ—”λ“œν¬μΈνŠΈλ₯Ό κ²°μ •
# μš°μ„ μˆœμœ„:
# 1) λͺ…μ‹œμ μΈ LB μ£Όμ†Œ
# 2) ansible_host 기반 μ£Όμ†Œ (μ˜΅μ…˜ ν™œμ„±ν™” μ‹œ)
# 3) κΈ°λ³Έ kube_apiserver_access_address
- name: Set external kube-apiserver endpoint
  set_fact:
    external_apiserver_address: >-
      {%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined -%}
      {{ loadbalancer_apiserver.address }}
      {%- elif kubeconfig_localhost_ansible_host is defined and kubeconfig_localhost_ansible_host -%}
      {{ hostvars[groups['kube_control_plane'][0]].ansible_host }}
      {%- else -%}
      {{ kube_apiserver_access_address }}
      {%- endif -%}
    external_apiserver_port: >-
      {%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined and loadbalancer_apiserver.port is defined -%}
      {{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
      {%- else -%}
      {{ kube_apiserver_port }}
      {%- endif -%}
  tags:
    - facts

# ν˜„μž¬ μ‚¬μš©μž(λ˜λŠ” become된 μ‚¬μš©μž)의 kubeconfig 디렉터리 생성
# 기본적으둜 ~/.kube
- name: Create kube config dir for current/ansible become user
  file:
    path: "{{ ansible_env.HOME | default('/root') }}/.kube"
    mode: "0700"
    state: directory

# 컨트둀 ν”Œλ ˆμΈμ— μƒμ„±λœ admin.confλ₯Ό
# ν˜„μž¬ μ‚¬μš©μž ν™ˆ λ””λ ‰ν„°λ¦¬λ‘œ 볡사
- name: Copy admin kubeconfig to current/ansible become user home
  copy:
    src: "{{ kube_config_dir }}/admin.conf"
    dest: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
    remote_src: true
    mode: "0600"
    backup: true

# kubeconfigλ₯Ό 둜컬(Ansible μ‹€ν–‰ λ¨Έμ‹ )둜 κ°€μ Έμ˜¬ 경우
# μ‚°μΆœλ¬Ό 디렉터리 생성
- name: Create kube artifacts dir
  file:
    path: "{{ artifacts_dir }}"
    mode: "0750"
    state: directory
  delegate_to: localhost
  connection: local
  become: false
  run_once: true
  when: kubeconfig_localhost

# kube-apiserverκ°€ μ‹€μ œλ‘œ 열릴 λ•ŒκΉŒμ§€ λŒ€κΈ°
# kubeconfigλ₯Ό κ°€μ Έμ˜€κΈ° 전에 μ•ˆμ „μž₯치
- name: Wait for k8s apiserver
  wait_for:
    host: "{{ kube_apiserver_access_address }}"
    port: "{{ kube_apiserver_port }}"
    timeout: 180

# admin.conf νŒŒμΌμ„ 원격 λ…Έλ“œμ—μ„œ μ½μ–΄μ™€μ„œ
# base64 ν˜•νƒœλ‘œ 둜컬 λ³€μˆ˜μ— μ €μž₯
- name: Get admin kubeconfig from remote host
  slurp:
    src: "{{ kube_config_dir }}/admin.conf"
  run_once: true
  register: raw_admin_kubeconfig
  when: kubeconfig_localhost

# base64 → YAML λ³€ν™˜
- name: Convert kubeconfig to YAML
  set_fact:
    admin_kubeconfig: "{{ raw_admin_kubeconfig.content | b64decode | from_yaml }}"
  when: kubeconfig_localhost

# kubeconfig λ‚΄λΆ€ 정보 μˆ˜μ •
# - cluster name
# - context name
# - user name
# - apiserver μ£Όμ†Œλ₯Ό μ™ΈλΆ€ μ ‘κ·Ό μ£Όμ†Œλ‘œ ꡐ체
- name: Override username in kubeconfig
  set_fact:
    final_admin_kubeconfig: "{{ admin_kubeconfig
      | combine(override_cluster_name, recursive=true)
      | combine(override_context, recursive=true)
      | combine(override_user, recursive=true) }}"
  vars:
    cluster_infos: "{{ admin_kubeconfig['clusters'][0]['cluster'] }}"
    user_certs: "{{ admin_kubeconfig['users'][0]['user'] }}"
    username: "kubernetes-admin-{{ cluster_name }}"
    context: "kubernetes-admin-{{ cluster_name }}@{{ cluster_name }}"
    override_cluster_name: >-
      {{ {'clusters': [{
          'cluster': (cluster_infos | combine({
            'server': 'https://' + (external_apiserver_address | ansible.utils.ipwrap) + ':' + (external_apiserver_port | string)
          })),
          'name': cluster_name
      }]} }}
    override_context: >-
      {{ {'contexts': [{
          'context': {'user': username, 'cluster': cluster_name},
          'name': context
      }], 'current-context': context} }}
    override_user: >-
      {{ {'users': [{'name': username, 'user': user_certs}]} }}
  when: kubeconfig_localhost

# μ΅œμ’… kubeconfigλ₯Ό Ansible μ‹€ν–‰ 머신에 μ €μž₯
- name: Write admin kubeconfig on ansible host
  copy:
    content: "{{ final_admin_kubeconfig | to_nice_yaml(indent=2) }}"
    dest: "{{ artifacts_dir }}/admin.conf"
    mode: "0600"
  delegate_to: localhost
  connection: local
  become: false
  run_once: true
  when: kubeconfig_localhost

# kubectl λ°”μ΄λ„ˆλ¦¬λ₯Ό 둜컬(Ansible 호슀트)둜 볡사
- name: Copy kubectl binary to ansible host
  fetch:
    src: "{{ bin_dir }}/kubectl"
    dest: "{{ artifacts_dir }}/kubectl"
    flat: true
    validate_checksum: false
  register: copy_binary_result
  until: copy_binary_result is not failed
  retries: 20
  become: false
  run_once: true
  when: kubectl_localhost

# λ‘œμ»¬μ—μ„œ λ°”λ‘œ kubectl을 μ“Έ 수 μžˆλ„λ‘ 헬퍼 슀크립트 생성
# ./kubectl.sh get nodes ν˜•νƒœλ‘œ μ‚¬μš©
- name: Create helper script kubectl.sh on ansible host
  copy:
    content: |
      #!/bin/bash
      ${BASH_SOURCE%/*}/kubectl --kubeconfig=${BASH_SOURCE%/*}/admin.conf "$@"
    dest: "{{ artifacts_dir }}/kubectl.sh"
    mode: "0755"
  become: false
  run_once: true
  delegate_to: localhost
  connection: local
  when: kubectl_localhost and kubeconfig_localhost

 

ν΄λŸ¬μŠ€ν„°λ₯Ό λˆ„κ°€, μ–΄λ””μ„œ, μ–΄λ–€ λ°©μ‹μœΌλ‘œ 관리할 것인지에 λŒ€ν•œ λ‚΄μš©μ„ μ •μ˜ν•œλ‹€.

 

 

cluster-roles

root@k8s-ctr:~/kubespray# tree roles/kubernetes-apps/cluster_roles/
roles/kubernetes-apps/cluster_roles/
β”œβ”€β”€ files
β”‚   └── k8s-cluster-critical-pc.yml
β”œβ”€β”€ tasks
β”‚   └── main.yml
└── templates
    β”œβ”€β”€ namespace.j2
    β”œβ”€β”€ node-crb.yml.j2
    └── vsphere-rbac.yml.j2

4 directories, 5 files
---
# kube-apiserverκ°€ μ‹€μ œλ‘œ μ‚΄μ•„μžˆλŠ”μ§€ 확인
# /healthz μ—”λ“œν¬μΈνŠΈκ°€ 200을 λ°˜ν™˜ν•  λ•ŒκΉŒμ§€ λŒ€κΈ°
# → RBAC λ¦¬μ†ŒμŠ€λ₯Ό μ μš©ν•˜κΈ° 전에 API μ„œλ²„ μ€€λΉ„ μ—¬λΆ€λ₯Ό 보μž₯
- name: Kubernetes Apps | Wait for kube-apiserver
  uri:
    url: "{{ kube_apiserver_endpoint }}/healthz"
    validate_certs: false
    client_cert: "{{ kube_apiserver_client_cert }}"
    client_key: "{{ kube_apiserver_client_key }}"
  register: result
  until: result.status == 200
  retries: 10
  delay: 6
  # 졜초 control-plane λ…Έλ“œμ—μ„œλ§Œ μ‹€ν–‰
  when: inventory_hostname == groups['kube_control_plane'][0]


# Nodeκ°€ ν΄λŸ¬μŠ€ν„°μ— join될 수 μžˆλ„λ‘ ClusterRoleBinding λ§€λ‹ˆνŽ˜μŠ€νŠΈ 생성
# system:nodes 쑰직(O)을 κ°€μ§„ μΈμ¦μ„œλ₯Ό κ°€μ§„ λ…Έλ“œλ“€μ΄
# API μ„œλ²„μ— 등둝(admit)될 수 있게 ν•΄μ£ΌλŠ” 바인딩
- name: Kubernetes Apps | Add ClusterRoleBinding to admit nodes
  template:
    src: "node-crb.yml.j2"
    dest: "{{ kube_config_dir }}/node-crb.yml"
    mode: "0640"
  register: node_crb_manifest
  when:
    - rbac_enabled
    - inventory_hostname == groups['kube_control_plane'][0]


# μœ„μ—μ„œ μƒμ„±ν•œ ClusterRoleBinding을 μ‹€μ œλ‘œ 적용
# → kubelet μΈμ¦μ„œ(O=system:nodes)λ₯Ό κ°€μ§„ λ…Έλ“œλ“€μ΄
#   RBAC에 μ˜ν•΄ κ±°λΆ€λ˜μ§€ μ•Šκ³  μ •μƒμ μœΌλ‘œ λ“±λ‘λ˜λ„λ‘ ν•˜λŠ” μ›Œν¬μ–΄λΌμš΄λ“œ
- name: Apply workaround to allow all nodes with cert O=system:nodes to register
  kube:
    name: "kubespray:system:node"
    kubectl: "{{ bin_dir }}/kubectl"
    resource: "clusterrolebinding"
    filename: "{{ kube_config_dir }}/node-crb.yml"
    state: latest
  register: result
  until: result is succeeded
  retries: 10
  delay: 6
  when:
    - rbac_enabled
    - node_crb_manifest.changed
    - inventory_hostname == groups['kube_control_plane'][0]


# κ³Όκ±° λ²„μ „μ—μ„œ μ‚¬μš©λ˜λ˜ node webhook κ΄€λ ¨ ClusterRole 제거
# → 더 이상 ν•„μš” μ—†λŠ” κΆŒν•œ/ꡬ성 정리
- name: Kubernetes Apps | Remove old webhook ClusterRole
  kube:
    name: "system:node-webhook"
    kubectl: "{{ bin_dir }}/kubectl"
    resource: "clusterrole"
    state: absent
  when:
    - rbac_enabled
    - inventory_hostname == groups['kube_control_plane'][0]
  tags: node-webhook


# μœ„μ™€ 짝을 μ΄λ£¨λŠ” ClusterRoleBinding도 ν•¨κ»˜ 제거
- name: Kubernetes Apps | Remove old webhook ClusterRoleBinding
  kube:
    name: "system:node-webhook"
    kubectl: "{{ bin_dir }}/kubectl"
    resource: "clusterrolebinding"
    state: absent
  when:
    - rbac_enabled
    - inventory_hostname == groups['kube_control_plane'][0]
  tags: node-webhook


# ν΄λŸ¬μŠ€ν„° 핡심 μ»΄ν¬λ„ŒνŠΈμš© PriorityClass λ§€λ‹ˆνŽ˜μŠ€νŠΈ 볡사
# schedulerκ°€ νŒŒλ“œλ₯Ό λ°€μ–΄λ‚Ό λ•Œ κ°€μž₯ λ§ˆμ§€λ§‰κΉŒμ§€ λ³΄ν˜Έλ°›λŠ” μš°μ„ μˆœμœ„
- name: PriorityClass | Copy k8s-cluster-critical-pc.yml file
  copy:
    src: k8s-cluster-critical-pc.yml
    dest: "{{ kube_config_dir }}/k8s-cluster-critical-pc.yml"
    mode: "0640"
  # control-plane 쀑 λ§ˆμ§€λ§‰ λ…Έλ“œμ—μ„œλ§Œ μ‹€ν–‰
  when: inventory_hostname == groups['kube_control_plane'] | last


# k8s-cluster-critical PriorityClass 생성
# → control-plane, λ„€νŠΈμ›Œν¬, 핡심 μ• λ“œμ˜¨λ“€μ΄
#   λ¦¬μ†ŒμŠ€ μ••λ°• μƒν™©μ—μ„œλ„ λ¨Όμ € μ£½μ§€ μ•Šλ„λ‘ 보μž₯
- name: PriorityClass | Create k8s-cluster-critical
  kube:
    name: k8s-cluster-critical
    kubectl: "{{ bin_dir }}/kubectl"
    resource: "PriorityClass"
    filename: "{{ kube_config_dir }}/k8s-cluster-critical-pc.yml"
    state: latest
  register: result
  until: result is succeeded
  retries: 10
  delay: 6
  when: inventory_hostname == groups['kube_control_plane'] | last

 

kube-apiserverκ°€ μ€€λΉ„λ˜μ—ˆλŠ”μ§€ ν™•μΈν•œ λ’€, λ…Έλ“œ μΈμ¦μ„œ(O=system:nodes)λ₯Ό κ°€μ§„ kubelet듀이 RBAC에 λ§‰νžˆμ§€ μ•Šκ³  ν΄λŸ¬μŠ€ν„°μ— μ •μƒμ μœΌλ‘œ 등둝될 수 μžˆλ„λ‘ ClusterRoleBinding을 λ³΄μ™„ν•œλ‹€

 

 

kubernetes-apps

root@k8s-ctr:~/kubespray# tree roles/kubernetes-apps/ -L 1
roles/kubernetes-apps/
β”œβ”€β”€ ansible
β”œβ”€β”€ argocd
β”œβ”€β”€ cluster_roles
β”œβ”€β”€ common_crds
β”œβ”€β”€ container_engine_accelerator
β”œβ”€β”€ container_runtimes
β”œβ”€β”€ csi_driver
β”œβ”€β”€ defaults
β”œβ”€β”€ external_cloud_controller
β”œβ”€β”€ external_provisioner
β”œβ”€β”€ helm
β”œβ”€β”€ ingress_controller
β”œβ”€β”€ kubelet-csr-approver
β”œβ”€β”€ meta
β”œβ”€β”€ metallb
β”œβ”€β”€ metrics_server
β”œβ”€β”€ node_feature_discovery
β”œβ”€β”€ persistent_volumes
β”œβ”€β”€ policy_controller
β”œβ”€β”€ registry
β”œβ”€β”€ scheduler_plugins
β”œβ”€β”€ snapshots
└── vars

24 directories, 0 files

 

이번 μ‹€μŠ΅ λ•Œ μ„€μ •ν•œ app인 coredns, helm, metrics serverλ₯Ό μ‚΄νŽ΄λ³΄λ©΄...

 

root@k8s-ctr:~/kubespray# kubectl get pod -n kube-system -l app.kubernetes.io/name=metrics-server
NAME                             READY   STATUS    RESTARTS   AGE
metrics-server-7cd7f9897-d5qnw   1/1     Running   0          3d2h
root@k8s-ctr:~/kubespray# helm version
version.BuildInfo{Version:"v3.18.4", GitCommit:"d80839cf37d860c8aa9a0503fe463278f26cd5e2", GitTreeState:"clean", GoVersion:"go1.24.4"}
root@k8s-ctr:~/kubespray# kubectl get deployment -n kube-system coredns dns-autoscaler -o wide
NAME             READY   UP-TO-DATE   AVAILABLE   AGE    CONTAINERS   IMAGES                                                       SELECTOR
coredns          1/1     1            1           3d2h   coredns      registry.k8s.io/coredns/coredns:v1.12.0                      k8s-app=kube-dns
dns-autoscaler   1/1     1            1           3d2h   autoscaler   registry.k8s.io/cpa/cluster-proportional-autoscaler:v1.8.8   k8s-app=dns-autoscaler

 

λ₯Ό 확인할 수 μžˆλ‹€.

 

Kubespray μ‹€μŠ΅μ„ λ”°λΌν•΄λ³΄λ©΄μ„œ λŠλ‚€ 점은 전체 ꡬ쑰가 ꡉμž₯히 λ°©λŒ€ν•΄μ„œ 문제 ν•˜λ‚˜λ§Œ μž‘μ•„λ„ 디버깅에 κ½€ μ‹œκ°„μ΄ λ“€ 수 μžˆκ² λ‹€λŠ” 생각이 λ¨Όμ € λ“€μ—ˆλ‹€.
그리고 λ‘€κ³Ό νƒœμŠ€ν¬κ°€ μ΄˜μ΄˜ν•˜κ²Œ μͺΌκ°œμ Έ μžˆμ–΄μ„œ kubespray_install.log λ₯Ό κΈ°μ€€μœΌλ‘œ νƒœμŠ€ν¬λ₯Ό ν•˜λ‚˜μ”© 따라가닀 보면 μ–΄λ””μ„œ 무엇을 ν•˜λŠ”μ§€λŠ” κ²°κ΅­ 좔적이 κ°€λŠ₯ν•œ 것은 쒋은 것 κ°™κΈ΄ν•œλ°...

λ‚΄κ°€ μ§  μ½”λ“œκ°€ μ•„λ‹ˆλ©΄ ν•΄μ„ν•˜κΈ° μ’€ λ‚œν•΄ν•  μˆ˜λ„ μžˆκ² λ‹€λŠ” 생각이 λ“€μ—ˆλ‹€ γ…Žγ……γ…Ž (λ­”κ°€ μ„€μΉ˜ν•  뢀뢄이 μ–΄λŠ μ½”λ“œμ— μžˆλŠ”μ§€ λ”°λΌκ°€λŠ”κ²Œ μͺΌνΌ μ–΄λ €μš΄ λŠλ‚Œ..)