Установка Kubernetes на LXC В данном примере показана установка поверх LXC, тогда как для любого более-менее серьезного применения это совершенно неприемлемо и требует использования KVM с дополнительными мерами безопасности. И, кроме того, установка на KVM будет намного, намного проще. LXC не обеспечивает адекватной безопасности, поскольку для правильной работы требуется монтирование разделов /sys и /proc с хоста, которые доступны всем экземплярам LXC. Кроме того, этот пример требует использования ПРИВИЛЕГИРОВАННОГО контейнера LXC. Однако LXC позволяет относительно легко настроить тестовую инфраструктуру без накладных расходов, связанных с KVM. Поэтому некоторые аспекты в этом примере будут связаны исключительно с настройкой в LXC.
Хост Для начала давайте подготовим хост-систему.
nano /etc/modules
1
2
3
4
5
6
7
8
9
10
11
overlay
nf_nat
br_netfilter
xt_conntrack
rbd
fuse
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
iptable_nat
nano /etc/sysctl.d/35-lxc-kubernetes.conf
1
2
3
4
5
6
7
8
9
10
11
kernel.dmesg_restrict = 0
net.ipv4.ip_forward = 1
net.ipv6.conf.all.forwarding = 1
net.bridge.bridge-nf-call-iptables = 1
# --conntrack-max-per-core Default: 32768 * N
net.netfilter.nf_conntrack_max= 131072
# net.bridge.bridge-nf-call-arptables
# kernel.pid_max=100000
# user.max_user_namespaces=15000
vm.compact_memory = 1
vm.overcommit_memory = 1
Во время установки мы отключим swap подкачку; вы сможете включить ее позже после настройки всех виртуальных машин.
swapoff -a
Установка lxc 1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
LANG = C MIRROR = https://deb.debian.org/debian lxc-create \
-n kubemaster -t debian -- -r trixie
nano /var/lib/lxc/kubemaster/config
lxc.net.0.type = veth
lxc.net.0.flags = up
lxc.net.0.link = lxcbr11
lxc.net.0.name = eth121
lxc.net.0.veth.pair = veth121
lxc.net.0.ipv4.address = 10.11.11.121/24
lxc.net.0.hwaddr = 00:16:a9:7d:99:33
lxc.net.0.ipv4.gateway = 10.11.11.1
lxc.apparmor.profile = unconfined
lxc.apparmor.allow_nesting = 1
lxc.cap.drop =
lxc.cgroup.devices.allow = a
lxc.mount.auto = proc:rw sys:rw
lxc.cgroup2.devices.allow = c 1:11 rwm
lxc.mount.entry = /dev/kmsg dev/kmsg none defaults,bind,create= file
lxc.cgroup2.devices.allow = c 10:200 rwm
limits.memory.swap = true
linux.kernel_modules = ip_tables,ip6_tables,nf_nat,overlay,br_netfilter
lxc.mount.entry = /proc/sys/vm/overcommit_memory proc/sys/vm/overcommit_memory none defaults,bind,create= file
lxc.mount.entry = /proc/sys/kernel/panic proc/sys/kernel/panic none defaults,bind,create= file
lxc.mount.entry = /proc/sys/kernel/panic_on_oops proc/sys/kernel/panic_on_oops none defaults,bind,create= file
lxc.rootfs.path = dir :/var/lib/lxc/kubemaster/rootfs
# Common configuration
lxc.include = /usr/share/lxc/config/debian.common.conf
lxc.include = /usr/share/lxc/config/debian.userns.conf
# Container specific configuration
lxc.tty.max = 4
lxc.uts.name = kubemaster
lxc.arch = amd64
lxc.pty.max = 1024
lxc.start.order = 5
lxc.start.auto = 1
lxc.start.delay = 25
security.nesting = true
security.privileged = true
iptables 1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# lxc kubemaster
iptables -t nat -A POSTROUTING -s 10.11.11.121/32 ! -d 10.11.11.0/24 -o eth0 -j MASQUERADE
iptables -A FORWARD -s 10.11.11.121/32 -i lxcbr1 -o eth0 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT
iptables -A FORWARD -d 10.11.11.121/32 -i eth0 -o lxcbr1 -m state --state RELATED,ESTABLISHED -j ACCEPT
iptables -A FORWARD -s 10.11.11.121/32 -i lxcbr1 -o lxcbr1 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT
iptables -A FORWARD -d 10.11.11.121/32 -o lxcbr1 -i lxcbr1 -m state --state RELATED,ESTABLISHED -j ACCEPT
# lxc kubenode1
iptables -t nat -A POSTROUTING -s 10.11.11.125/32 ! -d 10.11.11.0/24 -o eth0 -j MASQUERADE
iptables -A FORWARD -s 10.11.11.125/32 -i lxcbr1 -o eth0 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT
iptables -A FORWARD -d 10.11.11.125/32 -i eth0 -o lxcbr1 -m state --state RELATED,ESTABLISHED -j ACCEPT
iptables -A FORWARD -s 10.11.11.125/32 -i lxcbr1 -o lxcbr1 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT
iptables -A FORWARD -d 10.11.11.125/32 -o lxcbr1 -i lxcbr1 -m state --state RELATED,ESTABLISHED -j ACCEPT
# lxc kubenode2
iptables -t nat -A POSTROUTING -s 10.11.11.129/32 ! -d 10.11.11.0/24 -o eth0 -j MASQUERADE
iptables -A FORWARD -s 10.11.11.129/32 -i lxcbr1 -o eth0 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT
iptables -A FORWARD -d 10.11.11.129/32 -i eth0 -o lxcbr1 -m state --state RELATED,ESTABLISHED -j ACCEPT
iptables -A FORWARD -s 10.11.11.129/32 -i lxcbr1 -o lxcbr1 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT
iptables -A FORWARD -d 10.11.11.129/32 -o lxcbr1 -i lxcbr1 -m state --state RELATED,ESTABLISHED -j ACCEPT
lxc-start -n kubemaster
Внутри kubemaster Теперь переключимся в lxc.
lxc-attach -n kubemaster
nano /etc/apt/sources.list
1
2
3
4
5
# trixie
deb https://deb.debian.org/debian/ trixie main contrib non-free non-free-firmware
deb http://security.debian.org/debian-security/ trixie-security main contrib non-free non-free-firmware
deb https://deb.debian.org/debian/ trixie-updates main contrib non-free non-free-firmware
apt update
1
2
3
4
apt --no-install-recommends --no-install-suggests install \
iputils-ping net-tools htop mc nano curl wget \
ca-certificates apt-transport-https gpg locales \
binutils bash-completion dirmngr kmod ebtables ethtool
dpkg-reconfigure locales
Настройка hostname и hosts printf 'kubemaster.web.webart4.me\n' > /etc/hostname
nano /etc/hosts
1
2
3
4
5
127.0.0.1 localhost
127.0.1.1 kubemaster
10.11.11.121 kubemaster.web.webart4.me
10.11.11.125 kubenode1.web.webart4.me
10.11.11.129 kubenode2.web.webart4.me
Установка containerd 1
2
3
4
5
6
7
cd /usr/local
wget https://github.com/containerd/containerd/releases/download/v2.2.0/containerd-2.2.0-linux-amd64.tar.gz
wget https://github.com/containerd/containerd/releases/download/v2.2.0/containerd-2.2.0-linux-amd64.tar.gz.sha256sum
sha256sum --check containerd-2.2.0-linux-amd64.tar.gz.sha256sum
tar -xzv -f containerd-2.2.0-linux-amd64.tar.gz
wget https://raw.githubusercontent.com/containerd/containerd/main/containerd.service
Поскольку модуль overlay уже загружен на уровне хоста , мы отключим его загрузку в юните systemd.
nano containerd.service
1
2
[Service]
# ExecStartPre=-/sbin/modprobe overlay
1
2
3
4
mkdir -p /usr/local/lib/systemd/system/
mv containerd.service /usr/local/lib/systemd/system/
systemctl daemon-reload
systemctl enable --now containerd
Установка runc - spawning and running tool Загрузка:
1
2
3
4
5
6
7
8
9
wget https://github.com/opencontainers/runc/releases/download/v1.3.3/runc.amd64
wget https://github.com/opencontainers/runc/releases/download/v1.3.3/runc.amd64.asc
wget https://github.com/opencontainers/runc/releases/download/v1.3.3/runc.sha256sum
gpg --verify runc.amd64.asc
gpg: Signature made Ср 05 ноя 2025 09:12:15 UTC
gpg: using EDDSA key B64E4955B29FA3D463F2A9062897FAD2B7E9446F
gpg: Can't check signature: No public key
Импортируем отсутствующий сертификат:
1
2
3
4
5
6
7
8
9
gpg --keyserver keys.gnupg.net \
--search-keys B64E4955B29FA3D463F2A9062897FAD2B7E9446F
gpg: data source : http://185.125.188.26:11371
( 1) Aleksa Sarai <cyphar@cyphar.com>
263 bit EDDSA key 34401015D1D2D386, created: 2019-06-21
Keys 1-1 of 1 for "B64E4955B29FA3D463F2A9062897FAD2B7E9446F" . Enter number( s) , N) ext, or Q) uit > 1
gpg: Total number processed: 1
gpg: imported: 1
Проверка подписи:
1
2
3
4
5
6
7
8
9
10
11
gpg --verify runc.amd64.asc
gpg: assuming signed data in 'runc.amd64'
gpg: Signature made Ср 05 ноя 2025 09:12:15 UTC
gpg: using EDDSA key B64E4955B29FA3D463F2A9062897FAD2B7E9446F
gpg: Good signature from "Aleksa Sarai <cyphar@cyphar.com>" [ unknown]
gpg: Signature notation: manu = 2,2.5+1.11,2,2
gpg: WARNING: This key is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner.
Primary key fingerprint: C9C3 70B2 46B0 9F6D BCFC 744C 3440 1015 D1D2 D386
Subkey fingerprint: B64E 4955 B29F A3D4 63F2 A906 2897 FAD2 B7E9 446F
Проверка и установка:
sha256sum --check runc.sha256sum
runc.amd64: OK
install -m 755 runc.amd64 /usr/local/sbin/runc
Установка CNI плагинов 1
2
3
4
5
6
7
wget https://github.com/containernetworking/plugins/releases/download/v1.8.0/cni-plugins-linux-amd64-v1.8.0.tgz
wget https://github.com/containernetworking/plugins/releases/download/v1.8.0/cni-plugins-linux-amd64-v1.8.0.tgz.sha512
sha512sum --check cni-plugins-linux-amd64-v1.8.0.tgz.sha512
cni-plugins-linux-amd64-v1.8.0.tgz: OK
mkdir -p /opt/cni/bin
tar -xzv -f cni-plugins-linux-amd64-v1.8.0.tgz -C /opt/cni/bin
Конфигурация containerd systemctl status containerd
mkdir -p /etc/containerd
containerd config default | tee /etc/containerd/config.toml
Откроем файл: nano /etc/containerd/config.toml
И изменим следующие две строки:
Редактируем строку: SystemdCgroup = true
1
2
3
4
5
6
7
8
9
10
11
[ plugins.'io.containerd.cri.v1.runtime' .containerd.runtimes.runc.options]
BinaryName = ''
CriuImagePath = ''
CriuWorkPath = ''
IoGid = 0
IoUid = 0
NoNewKeyring = false
Root = ''
ShimCgroup = ''
# SystemdCgroup = false
SystemdCgroup = true
дабавляем строку: sandbox_image = 'registry.k8s.io/pause:3.10'
1
2
3
4
5
6
7
[ plugins.'io.containerd.grpc.v1.cri' ]
disable_tcp_service = true
stream_server_address = '127.0.0.1'
stream_server_port = '0'
stream_idle_timeout = '4h0m0s'
enable_tls_streaming = false
sandbox_image = 'registry.k8s.io/pause:3.10'
systemctl restart containerd
systemctl status containerd
Установка kubernetes: kubelet kubeadm kubectl 1
2
3
4
5
6
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.34/deb/Release.key | \
gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] \
https://pkgs.k8s.io/core:/stable:/v1.34/deb/ /" | \
tee /etc/apt/sources.list.d/kubernetes.list
apt update
apt install kubelet kubeadm kubectl
apt-mark hold kubelet kubeadm kubectl
Временно переключитесь на HOST систему и скопируйте файл конфигурации ядра:
1
2
cp /boot/config-` uname -r ` \
/var/lib/lxc/kubemaster/rootfs/boot/config-` uname -r `
Теперь обратно в lxc kubemaster.
nano /etc/default/kubelet
1
KUBELET_EXTRA_ARGS = --fail-swap-on = false
nano /etc/rc.local
1
2
3
#!/bin/sh -e
mount --make-rshared /
chmod +x /etc/rc.local
Удаляем временные файлы и очищаем кэш rm -f /usr/local/cni-plugins-linux-amd64-v1.8.0.tgz rm -f /usr/local/cni-plugins-linux-amd64-v1.8.0.tgz.sha512 rm -f /usr/local/containerd-2.2.0-linux-amd64.tar.gz rm -f /usr/local/containerd-2.2.0-linux-amd64.tar.gz.sha256sum rm -f /usr/local/runc.amd64 rm -f /usr/local/runc.amd64.asc rm -f /usr/local/runc.sha256sum apt clean Хост: lxc-stop -n kubemaster
Клонирование и бэкап 1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
cd /var/lib/lxc
time /usr/bin/tar \
--use-compress-program = "/usr/bin/pzstd -15 \
--keep --processes 3" \
-pcvf kubemaster_clean.zstd kubemaster
# 254M kubemaster_clean.zstd
mkdir kubenode1
mkdir kubenode2
chmod ug = rwX,o-rwXx kubenode1 kubenode2
cd /var/lib/lxc/kubemaster
find ./ -xdev -print0 | cpio -pa0V /var/lib/lxc/kubenode1
find ./ -xdev -print0 | cpio -pa0V /var/lib/lxc/kubenode2
nano /var/lib/lxc/kubenode1/config
Пример конфигурации kubenode1 находится в конце статьи.
nano /var/lib/lxc/kubenode2/config
Пример конфигурации kubenode2 находится в конце статьи.
kubenode1 lxc-attach -n kubenode1
1
2
3
4
5
6
7
8
9
printf 'kubenode1.web.webart4.me\n' > /etc/hostname
nano /etc/hosts
127.0.0.1 localhost
127.0.1.1 kubenode1
10.11.11.121 kubemaster.web.webart4.me
10.11.11.125 kubenode1.web.webart4.me
10.11.11.129 kubenode2.web.webart4.me
kubenode2 lxc-attach -n kubenode2
1
2
3
4
5
6
7
8
9
printf 'kubenode2.web.webart4.me\n' > /etc/hostname
nano /etc/hosts
127.0.0.1 localhost
127.0.1.1 kubenode2
10.11.11.121 kubemaster.web.webart4.me
10.11.11.125 kubenode1.web.webart4.me
10.11.11.129 kubenode2.web.webart4.me
Restart lxc-stop --reboot --name kubemaster lxc-stop --reboot --name kubenode1 lxc-stop --reboot --name kubenode2 [--nowait | --kill]
Инициализация кластера в kubemaster lxc-attach -n kubemaster
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
kubeadm init --control-plane-endpoint = 10.11.11.121:6443 --apiserver-advertise-address = 10.11.11.121 --pod-network-cidr = 10.244.0.0/16
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME /.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME /.kube/config
sudo chown $( id -u ) :$( id -g ) $HOME /.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG = /etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:
kubeadm join 10.11.11.121:6443 --token qejrqw.uwer98qy6v7xt599 \
--discovery-token-ca-cert-hash sha256:52d72e6e39aee3127d9a649aeff25fff17c6ff5be2c3f43f6bc126631718bf1e \
--control-plane
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 10.11.11.121:6443 --token qejrqw.uwer98qy6v7xt599 \
--discovery-token-ca-cert-hash sha256:52d72e6e39aee3127d9a649aeff25fff17c6ff5be2c3f43f6bc126631718bf1e
Отключаем ошибку swap и настраиваем возможность его использования.
Опять же, это имеет смысл при настройке в lxc, поскольку все виртуальные машины получают состояние swap с хоста.
И я бы не хотел отключать его на хосте.
nano /var/lib/kubelet/config.yaml
1
2
3
4
5
6
failSwapOn: false
featureGates:
NodeSwap: true
memorySwap:
swapBehavior: LimitedSwap
# memorySwap: {}
mkdir -p $HOME/.kube cp -i /etc/kubernetes/admin.conf $HOME/.kube/config chown $(id -u):$(id -g) -R $HOME/.kube chmod -R ug=rx,o-rwxX $HOME/.kube
Установка сети Pod - Flannel 1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
cd /usr/src/
wget https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
nano kube-flannel.yml
# добавляем KUBERNETES_SERVICE_HOST и KUBERNETES_SERVICE_PORT переменные под POD_NAMESPACE
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: KUBERNETES_SERVICE_HOST
value: '10.11.11.121'
- name: KUBERNETES_SERVICE_PORT
value: '6443'
kubectl apply -f kube-flannel.yml
namespace/kube-flannel created
serviceaccount/flannel created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
systemctl enable --now kubelet systemctl restart kubelet systemctl status kubelet kubectl get nodes
1
2
NAME STATUS ROLES AGE VERSION
kubemaster.web.webart4.me Ready control-plane 7m35s v1.34.2
ifconfig -a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
eth121: flags = 4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 10.11.11.121 netmask 255.255.255.0 broadcast 0.0.0.255
inet6 fe80::216:3eff:fe7d:ba33 prefixlen 64 scopeid 0x20<link >
ether 00:16:3e:7d:ba:33 txqueuelen 1000 ( Ethernet)
RX packets 28035 bytes 63635564 ( 60.6 MiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 14864 bytes 1100501 ( 1.0 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
flannel.1: flags = 4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1450
inet 10.244.0.0 netmask 255.255.255.255 broadcast 0.0.0.0
inet6 fe80::f46a:59ff:feee:f2a0 prefixlen 64 scopeid 0x20<link >
ether f6:6a:59:ee:f2:a0 txqueuelen 0 ( Ethernet)
RX packets 0 bytes 0 ( 0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 ( 0.0 B)
TX errors 0 dropped 12 overruns 0 carrier 0 collisions 0
lo: flags = 73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
inet6 ::1 prefixlen 128 scopeid 0x10<host>
loop txqueuelen 1000 ( Local Loopback)
RX packets 293302 bytes 68241882 ( 65.0 MiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 293302 bytes 68241882 ( 65.0 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
kubenode1 lxc-attach -n kubenode1
ping 10.11.11.121
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
kubeadm join 10.11.11.121:6443 \
--token qejrqw.uwer98qy6v7xt599 \
--discovery-token-ca-cert-hash sha256:52d72e6e39aee3127d9a649aeff25fff17c6ff5be2c3f43f6bc126631718bf1e
[ preflight] Running pre-flight checks
[ preflight] Reading configuration from the "kubeadm-config" ConfigMap in namespace "kube-system" ...
[ preflight] Use 'kubeadm init phase upload-config kubeadm --config your-config-file' to re-upload it.
[ kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
[ patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
[ kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[ kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[ kubelet-start] Starting the kubelet
[ kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[ kubelet-check] The kubelet is healthy after 501.132212ms
[ kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
Откройте файл nano /var/lib/kubelet/config.yaml.
И добавьте то же самое, что и для kubemaster .
systemctl enable --now kubelet systemctl restart kubelet systemctl status kubelet
kubenode2 lxc-attach -n kubenode2
ping 10.11.11.121
1
2
3
kubeadm join 10.11.11.121:6443 \
--token qejrqw.uwer98qy6v7xt599 \
--discovery-token-ca-cert-hash sha256:a1da2fa2fe3aa3df252c183040810924a119f5ea051850d6a91882af6bf5ccf7
Здесь то же самое.
nano /var/lib/kubelet/config.yaml
Добавьте те же строки из kubemaster .
systemctl enable --now kubelet systemctl restart kubelet systemctl status kubelet
Проверка подключенных нод и статус кластера kubemaster kubectl get nodes
1
2
3
4
NAME STATUS ROLES AGE VERSION
kubemaster.web.webart4.me Ready control-plane 54m v1.34.2
kubenode1.web.webart4.me Ready <none> 9m55s v1.34.2
kubenode2.web.webart4.me Ready <none> 6m46s v1.34.2
kubectl get pods --all-namespaces -o wide
1
2
3
4
5
6
7
8
9
10
11
12
13
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kube-flannel kube-flannel-ds-nvrht 1/1 Running 10 ( 24m ago) 105m 10.11.11.125 kubenode1.web.webart4.me <none> <none>
kube-flannel kube-flannel-ds-t95hr 1/1 Running 1 ( 14m ago) 17m 10.11.11.129 kubenode2.web.webart4.me <none> <none>
kube-flannel kube-flannel-ds-zdml5 1/1 Running 4 ( 22m ago) 130m 10.11.11.121 kubemaster.web.webart4.me <none> <none>
kube-system coredns-66bc5c9577-qkjpk 1/1 Running 8 ( 24m ago) 132m 10.244.1.18 kubenode1.web.webart4.me <none> <none>
kube-system coredns-66bc5c9577-w2fh6 1/1 Running 8 ( 24m ago) 132m 10.244.1.19 kubenode1.web.webart4.me <none> <none>
kube-system etcd-kubemaster.web.webart4.me 1/1 Running 11 ( 22m ago) 132m 10.11.11.121 kubemaster.web.webart4.me <none> <none>
kube-system kube-apiserver-kubemaster.web.webart4.me 1/1 Running 11 ( 22m ago) 132m 10.11.11.121 kubemaster.web.webart4.me <none> <none>
kube-system kube-controller-manager-kubemaster.web.webart4.me 1/1 Running 11 ( 22m ago) 132m 10.11.11.121 kubemaster.web.webart4.me <none> <none>
kube-system kube-proxy-76qnk 1/1 Running 1 ( 14m ago) 17m 10.11.11.129 kubenode2.web.webart4.me <none> <none>
kube-system kube-proxy-9mcc4 1/1 Running 47 ( 24m ago) 105m 10.11.11.125 kubenode1.web.webart4.me <none> <none>
kube-system kube-proxy-p8pkc 1/1 Running 39 ( 22m ago) 132m 10.11.11.121 kubemaster.web.webart4.me <none> <none>
kube-system kube-scheduler-kubemaster.web.webart4.me 1/1 Running 11 ( 22m ago) 132m 10.11.11.121 kubemaster.web.webart4.me <none> <none>
Проверка кластера Kubernetes Кластерные taints kubectl describe node | grep -E "NoSchedule|Taints|Name\:"
1
2
3
4
5
6
7
Name: kubemaster.web.webart4.me
Taints: node-role.kubernetes.io/control-plane:NoSchedule
node.kubernetes.io/disk-pressure:NoSchedule
Name: kubenode1.web.webart4.me
Taints: node.kubernetes.io/disk-pressure:NoSchedule
Name: kubenode2.web.webart4.me
Taints: node.kubernetes.io/disk-pressure:NoSchedule
nano /var/lib/kubelet/config.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
evictionSoft:
memory.available: "100Mi"
nodefs.available: "100Mi"
nodefs.inodesFree: "1%"
imagefs.available: "100Mi"
imagefs.inodesFree: "1%"
evictionHard:
memory.available: "100Mi"
nodefs.available: "100Mi"
nodefs.inodesFree: "1%"
imagefs.available: "100Mi"
imagefs.inodesFree: "1%"
evictionSoftGracePeriod:
memory.available: 2m
nodefs.available: 2m
nodefs.inodesFree: 2m
imagefs.available: 2m
imagefs.inodesFree: 2m
systemctl restart kubelet
Правильное состояние кластера kubectl describe node | grep -E "NoSchedule|Taints|Name\:"
1
2
3
4
5
6
Name: kubemaster.web.webart4.me
Taints: node-role.kubernetes.io/control-plane:NoSchedule
Name: kubenode1.web.webart4.me
Taints: <none>
Name: kubenode2.web.webart4.me
Taints: <none>
Первое развертывание на новом кластере nano nginx-deployment.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
apiVersion : apps/v1
kind : Deployment
metadata :
name : nginx-deployment
labels :
app : nginx
spec :
replicas : 1
selector :
matchLabels :
app : nginx
template :
metadata :
labels :
app : nginx
spec :
containers :
- name : nginx
image : nginx:1.14.2
ports :
- containerPort : 80
kubectl apply -f nginx-deployment.yaml kubectl rollout status deployment/nginx-deployment
1
deployment "nginx-deployment" successfully rolled out
kubectl get deployments
1
2
NAME READY UP-TO-DATE AVAILABLE AGE
nginx-deployment 1/1 1 1 20s
kubectl get pods --all-namespaces -o wide
1
2
3
4
5
6
7
8
9
10
11
12
13
14
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
default nginx-deployment-bf744486c-5zfqz 1/1 Running 0 14m 10.244.2.2 kubenode2.web.webart4.me <none> <none>
kube-flannel kube-flannel-ds-nvrht 1/1 Running 11 ( 3h2m ago) 5h44m 10.11.11.125 kubenode1.web.webart4.me <none> <none>
kube-flannel kube-flannel-ds-t95hr 1/1 Running 2 ( 3h2m ago) 4h16m 10.11.11.129 kubenode2.web.webart4.me <none> <none>
kube-flannel kube-flannel-ds-zdml5 1/1 Running 5 ( 3h2m ago) 6h9m 10.11.11.121 kubemaster.web.webart4.me <none> <none>
kube-system coredns-66bc5c9577-cmtz5 1/1 Running 0 29m 10.244.1.23 kubenode1.web.webart4.me <none> <none>
kube-system coredns-66bc5c9577-j5l49 1/1 Running 0 29m 10.244.1.22 kubenode1.web.webart4.me <none> <none>
kube-system etcd-kubemaster.web.webart4.me 1/1 Running 12 ( 3h2m ago) 6h11m 10.11.11.121 kubemaster.web.webart4.me <none> <none>
kube-system kube-apiserver-kubemaster.web.webart4.me 1/1 Running 12 ( 3h2m ago) 6h11m 10.11.11.121 kubemaster.web.webart4.me <none> <none>
kube-system kube-controller-manager-kubemaster.web.webart4.me 1/1 Running 12 ( 3h2m ago) 6h11m 10.11.11.121 kubemaster.web.webart4.me <none> <none>
kube-system kube-proxy-76qnk 1/1 Running 2 ( 3h2m ago) 4h16m 10.11.11.129 kubenode2.web.webart4.me <none> <none>
kube-system kube-proxy-9mcc4 1/1 Running 48 ( 3h2m ago) 5h44m 10.11.11.125 kubenode1.web.webart4.me <none> <none>
kube-system kube-proxy-p8pkc 1/1 Running 40 ( 3h2m ago) 6h10m 10.11.11.121 kubemaster.web.webart4.me <none> <none>
kube-system kube-scheduler-kubemaster.web.webart4.me 1/1 Running 12 ( 3h2m ago) 6h11m 10.11.11.121 kubemaster.web.webart4.me <none> <none>
Удаление тестового развертывания kubectl delete deployment nginx-deployment --namespace default
Вишенка на торте - Helm lxc-attach -n kubemaster
1
2
3
4
5
6
cd /usr/src/
wget https://get.helm.sh/helm-v4.0.1-linux-amd64.tar.gz
sha256sum -c <( printf 'e0365548f01ed52a58a1181ad310b604a3244f59257425bb1739499372bdff60 helm-v4.0.1-linux-amd64.tar.gz' )
tar -xzv -f helm-v4.0.1-linux-amd64.tar.gz
install -m 755 linux-amd64/helm /usr/local/bin/helm
helm version
Еще несколько команд kubectl get nodes kubectl get svc --all-namespaces kubectl get deploy --all-namespaces kubectl get rs --all-namespaces kubectl get pods --all-namespaces
kubectl config view kubectl config current-context
kubeadm token list kubeadm token create kubeadm token create --print-join-command
Как получить строку discovery-token-ca-cert-hash 1
2
3
4
cat /etc/kubernetes/pki/ca.crt | \
openssl x509 -pubkey | \
openssl rsa -pubin -outform der 2>/dev/null | \
openssl dgst -sha256 -hex | sed 's/^.* //'
kubenode1/config 1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
lxc.net.0.type = veth
lxc.net.0.flags = up
lxc.net.0.link = lxcbr11
lxc.net.0.name = eth125
lxc.net.0.veth.pair = veth125
lxc.net.0.ipv4.address = 10.11.11.125/24
lxc.net.0.hwaddr = 00:16:a9:7d:99:25
lxc.net.0.ipv4.gateway = 10.11.11.1
lxc.apparmor.profile = unconfined
lxc.apparmor.allow_nesting = 1
lxc.cap.drop =
lxc.cgroup.devices.allow = a
lxc.mount.auto = proc:rw sys:rw
lxc.cgroup2.devices.allow = c 1:11 rwm
lxc.mount.entry = /dev/kmsg dev/kmsg none defaults,bind,create= file
lxc.cgroup2.devices.allow = c 10:200 rwm
limits.memory.swap = true
linux.kernel_modules = ip_tables,ip6_tables,nf_nat,overlay,br_netfilter
lxc.mount.entry = /proc/sys/vm/overcommit_memory proc/sys/vm/overcommit_memory none defaults,bind,create= file
lxc.mount.entry = /proc/sys/kernel/panic proc/sys/kernel/panic none defaults,bind,create= file
lxc.mount.entry = /proc/sys/kernel/panic_on_oops proc/sys/kernel/panic_on_oops none defaults,bind,create= file
lxc.rootfs.path = dir :/var/lib/lxc/kubenode1/rootfs
# Common configuration
lxc.include = /usr/share/lxc/config/debian.common.conf
lxc.include = /usr/share/lxc/config/debian.userns.conf
# Container specific configuration
lxc.tty.max = 4
lxc.uts.name = kubenode1
lxc.arch = amd64
lxc.pty.max = 1024
lxc.start.order = 5
lxc.start.auto = 1
lxc.start.delay = 25
security.nesting = true
security.privileged = true
kubenode2/config 1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
lxc.net.0.type = veth
lxc.net.0.flags = up
lxc.net.0.link = lxcbr11
lxc.net.0.name = eth129
lxc.net.0.veth.pair = veth129
lxc.net.0.ipv4.address = 10.11.11.129/24
lxc.net.0.hwaddr = 00:16:a9:7d:99:29
lxc.net.0.ipv4.gateway = 10.11.11.1
lxc.apparmor.profile = unconfined
lxc.apparmor.allow_nesting = 1
lxc.cap.drop =
lxc.cgroup.devices.allow = a
lxc.mount.auto = proc:rw sys:rw
lxc.cgroup2.devices.allow = c 1:11 rwm
lxc.mount.entry = /dev/kmsg dev/kmsg none defaults,bind,create= file
lxc.cgroup2.devices.allow = c 10:200 rwm
limits.memory.swap = true
linux.kernel_modules = ip_tables,ip6_tables,nf_nat,overlay,br_netfilter
lxc.mount.entry = /proc/sys/vm/overcommit_memory proc/sys/vm/overcommit_memory none defaults,bind,create= file
lxc.mount.entry = /proc/sys/kernel/panic proc/sys/kernel/panic none defaults,bind,create= file
lxc.mount.entry = /proc/sys/kernel/panic_on_oops proc/sys/kernel/panic_on_oops none defaults,bind,create= file
lxc.rootfs.path = dir :/var/lib/lxc/kubenode2/rootfs
# Common configuration
lxc.include = /usr/share/lxc/config/debian.common.conf
lxc.include = /usr/share/lxc/config/debian.userns.conf
# Container specific configuration
lxc.tty.max = 4
lxc.uts.name = kubenode2
lxc.arch = amd64
lxc.pty.max = 1024
lxc.start.order = 5
lxc.start.auto = 1
lxc.start.delay = 25
security.nesting = true
security.privileged = true
2025-11-21 21:25 +0000