Installing Kubernetes on LXC This example shows an installation over LXC, while for any more or less serious use this is completely unacceptable and requires using KVM with additional security measures. And besides, installation on KVM will be much, much easier. LXC does not provide adequate security because, for proper operation, it requires mounting the /sys and /proc partitions from the host, which are accessible to all LXC instances. Furthermore, this example requires using a PRIVILEGED virtual machine. However, LXC allows relatively easy setting up a testing infrastructure without the overhead associated with KVM. So some aspects in this example will only be related to the setup in LXC.
On the HOST system First, let’s make preparations on the host system.
nano /etc/modules
1
2
3
4
5
6
7
8
9
10
11
overlay
nf_nat
br_netfilter
xt_conntrack
rbd
fuse
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
iptable_nat
nano /etc/sysctl.d/35-lxc-kubernetes.conf
1
2
3
4
5
6
7
8
9
10
11
kernel.dmesg_restrict = 0
net.ipv4.ip_forward = 1
net.ipv6.conf.all.forwarding = 1
net.bridge.bridge-nf-call-iptables = 1
# --conntrack-max-per-core Default: 32768 * N
net.netfilter.nf_conntrack_max= 131072
# net.bridge.bridge-nf-call-arptables
# kernel.pid_max=100000
# user.max_user_namespaces=15000
vm.compact_memory = 1
vm.overcommit_memory = 1
During installation, we’ll disable swap; you can turn it back on later after setting up all the virtual machines.
swapoff -a
Setup lxc 1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
LANG = C MIRROR = https://deb.debian.org/debian lxc-create \
-n kubemaster -t debian -- -r trixie
nano /var/lib/lxc/kubemaster/config
lxc.net.0.type = veth
lxc.net.0.flags = up
lxc.net.0.link = lxcbr11
lxc.net.0.name = eth121
lxc.net.0.veth.pair = veth121
lxc.net.0.ipv4.address = 10.11.11.121/24
lxc.net.0.hwaddr = 00:16:a9:7d:99:33
lxc.net.0.ipv4.gateway = 10.11.11.1
lxc.apparmor.profile = unconfined
lxc.apparmor.allow_nesting = 1
lxc.cap.drop =
lxc.cgroup.devices.allow = a
lxc.mount.auto = proc:rw sys:rw
lxc.cgroup2.devices.allow = c 1:11 rwm
lxc.mount.entry = /dev/kmsg dev/kmsg none defaults,bind,create= file
lxc.cgroup2.devices.allow = c 10:200 rwm
limits.memory.swap = true
linux.kernel_modules = ip_tables,ip6_tables,nf_nat,overlay,br_netfilter
lxc.mount.entry = /proc/sys/vm/overcommit_memory proc/sys/vm/overcommit_memory none defaults,bind,create= file
lxc.mount.entry = /proc/sys/kernel/panic proc/sys/kernel/panic none defaults,bind,create= file
lxc.mount.entry = /proc/sys/kernel/panic_on_oops proc/sys/kernel/panic_on_oops none defaults,bind,create= file
lxc.rootfs.path = dir :/var/lib/lxc/kubemaster/rootfs
# Common configuration
lxc.include = /usr/share/lxc/config/debian.common.conf
lxc.include = /usr/share/lxc/config/debian.userns.conf
# Container specific configuration
lxc.tty.max = 4
lxc.uts.name = kubemaster
lxc.arch = amd64
lxc.pty.max = 1024
lxc.start.order = 5
lxc.start.auto = 1
lxc.start.delay = 25
security.nesting = true
security.privileged = true
iptables 1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# lxc kubemaster
iptables -t nat -A POSTROUTING -s 10.11.11.121/32 ! -d 10.11.11.0/24 -o eth0 -j MASQUERADE
iptables -A FORWARD -s 10.11.11.121/32 -i lxcbr1 -o eth0 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT
iptables -A FORWARD -d 10.11.11.121/32 -i eth0 -o lxcbr1 -m state --state RELATED,ESTABLISHED -j ACCEPT
iptables -A FORWARD -s 10.11.11.121/32 -i lxcbr1 -o lxcbr1 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT
iptables -A FORWARD -d 10.11.11.121/32 -o lxcbr1 -i lxcbr1 -m state --state RELATED,ESTABLISHED -j ACCEPT
# lxc kubenode1
iptables -t nat -A POSTROUTING -s 10.11.11.125/32 ! -d 10.11.11.0/24 -o eth0 -j MASQUERADE
iptables -A FORWARD -s 10.11.11.125/32 -i lxcbr1 -o eth0 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT
iptables -A FORWARD -d 10.11.11.125/32 -i eth0 -o lxcbr1 -m state --state RELATED,ESTABLISHED -j ACCEPT
iptables -A FORWARD -s 10.11.11.125/32 -i lxcbr1 -o lxcbr1 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT
iptables -A FORWARD -d 10.11.11.125/32 -o lxcbr1 -i lxcbr1 -m state --state RELATED,ESTABLISHED -j ACCEPT
# lxc kubenode2
iptables -t nat -A POSTROUTING -s 10.11.11.129/32 ! -d 10.11.11.0/24 -o eth0 -j MASQUERADE
iptables -A FORWARD -s 10.11.11.129/32 -i lxcbr1 -o eth0 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT
iptables -A FORWARD -d 10.11.11.129/32 -i eth0 -o lxcbr1 -m state --state RELATED,ESTABLISHED -j ACCEPT
iptables -A FORWARD -s 10.11.11.129/32 -i lxcbr1 -o lxcbr1 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT
iptables -A FORWARD -d 10.11.11.129/32 -o lxcbr1 -i lxcbr1 -m state --state RELATED,ESTABLISHED -j ACCEPT
lxc-start -n kubemaster
Inside kubemaster Now let’s switch to lxc.
lxc-attach -n kubemaster
nano /etc/apt/sources.list
1
2
3
4
5
# trixie
deb https://deb.debian.org/debian/ trixie main contrib non-free non-free-firmware
deb http://security.debian.org/debian-security/ trixie-security main contrib non-free non-free-firmware
deb https://deb.debian.org/debian/ trixie-updates main contrib non-free non-free-firmware
apt update
1
2
3
4
apt --no-install-recommends --no-install-suggests install \
iputils-ping net-tools htop mc nano curl wget \
ca-certificates apt-transport-https gpg locales \
binutils bash-completion dirmngr kmod ebtables ethtool
dpkg-reconfigure locales
Setting up hostname & hosts printf 'kubemaster.web.webart4.me\n' > /etc/hostname
nano /etc/hosts
1
2
3
4
5
127.0.0.1 localhost
127.0.1.1 kubemaster
10.11.11.121 kubemaster.web.webart4.me
10.11.11.125 kubenode1.web.webart4.me
10.11.11.129 kubenode2.web.webart4.me
Install containerd 1
2
3
4
5
6
7
cd /usr/local
wget https://github.com/containerd/containerd/releases/download/v2.2.0/containerd-2.2.0-linux-amd64.tar.gz
wget https://github.com/containerd/containerd/releases/download/v2.2.0/containerd-2.2.0-linux-amd64.tar.gz.sha256sum
sha256sum --check containerd-2.2.0-linux-amd64.tar.gz.sha256sum
tar -xzv -f containerd-2.2.0-linux-amd64.tar.gz
wget https://raw.githubusercontent.com/containerd/containerd/main/containerd.service
Since we already have overlay module loaded at the host level , we disable its loading in systemd.
nano containerd.service
1
2
[Service]
# ExecStartPre=-/sbin/modprobe overlay
1
2
3
4
mkdir -p /usr/local/lib/systemd/system/
mv containerd.service /usr/local/lib/systemd/system/
systemctl daemon-reload
systemctl enable --now containerd
Install runc - spawning and running tool Download:
1
2
3
4
5
6
7
8
9
wget https://github.com/opencontainers/runc/releases/download/v1.3.3/runc.amd64
wget https://github.com/opencontainers/runc/releases/download/v1.3.3/runc.amd64.asc
wget https://github.com/opencontainers/runc/releases/download/v1.3.3/runc.sha256sum
gpg --verify runc.amd64.asc
gpg: Signature made Ср 05 ноя 2025 09:12:15 UTC
gpg: using EDDSA key B64E4955B29FA3D463F2A9062897FAD2B7E9446F
gpg: Can't check signature: No public key
Import the Certificate:
1
2
3
4
5
6
7
8
9
gpg --keyserver keys.gnupg.net \
--search-keys B64E4955B29FA3D463F2A9062897FAD2B7E9446F
gpg: data source : http://185.125.188.26:11371
( 1) Aleksa Sarai <cyphar@cyphar.com>
263 bit EDDSA key 34401015D1D2D386, created: 2019-06-21
Keys 1-1 of 1 for "B64E4955B29FA3D463F2A9062897FAD2B7E9446F" . Enter number( s) , N) ext, or Q) uit > 1
gpg: Total number processed: 1
gpg: imported: 1
Verify:
1
2
3
4
5
6
7
8
9
10
11
gpg --verify runc.amd64.asc
gpg: assuming signed data in 'runc.amd64'
gpg: Signature made Ср 05 ноя 2025 09:12:15 UTC
gpg: using EDDSA key B64E4955B29FA3D463F2A9062897FAD2B7E9446F
gpg: Good signature from "Aleksa Sarai <cyphar@cyphar.com>" [ unknown]
gpg: Signature notation: manu = 2,2.5+1.11,2,2
gpg: WARNING: This key is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner.
Primary key fingerprint: C9C3 70B2 46B0 9F6D BCFC 744C 3440 1015 D1D2 D386
Subkey fingerprint: B64E 4955 B29F A3D4 63F2 A906 2897 FAD2 B7E9 446F
Check and install:
sha256sum --check runc.sha256sum
runc.amd64: OK
install -m 755 runc.amd64 /usr/local/sbin/runc
Install CNI plugins 1
2
3
4
5
6
7
wget https://github.com/containernetworking/plugins/releases/download/v1.8.0/cni-plugins-linux-amd64-v1.8.0.tgz
wget https://github.com/containernetworking/plugins/releases/download/v1.8.0/cni-plugins-linux-amd64-v1.8.0.tgz.sha512
sha512sum --check cni-plugins-linux-amd64-v1.8.0.tgz.sha512
cni-plugins-linux-amd64-v1.8.0.tgz: OK
mkdir -p /opt/cni/bin
tar -xzv -f cni-plugins-linux-amd64-v1.8.0.tgz -C /opt/cni/bin
containerd config systemctl status containerd
mkdir -p /etc/containerd
containerd config default | tee /etc/containerd/config.toml
Open nano /etc/containerd/config.toml file
and change the following two lines:
change line SystemdCgroup = true
1
2
3
4
5
6
7
8
9
10
11
[ plugins.'io.containerd.cri.v1.runtime' .containerd.runtimes.runc.options]
BinaryName = ''
CriuImagePath = ''
CriuWorkPath = ''
IoGid = 0
IoUid = 0
NoNewKeyring = false
Root = ''
ShimCgroup = ''
# SystemdCgroup = false
SystemdCgroup = true
add line sandbox_image = 'registry.k8s.io/pause:3.10'
1
2
3
4
5
6
7
[ plugins.'io.containerd.grpc.v1.cri' ]
disable_tcp_service = true
stream_server_address = '127.0.0.1'
stream_server_port = '0'
stream_idle_timeout = '4h0m0s'
enable_tls_streaming = false
sandbox_image = 'registry.k8s.io/pause:3.10'
systemctl restart containerd
systemctl status containerd
Install kubernetes: kubelet kubeadm kubectl 1
2
3
4
5
6
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.34/deb/Release.key | \
gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] \
https://pkgs.k8s.io/core:/stable:/v1.34/deb/ /" | \
tee /etc/apt/sources.list.d/kubernetes.list
apt update
apt install kubelet kubeadm kubectl
apt-mark hold kubelet kubeadm kubectl
Temporarily switch to HOST system and copy kernel config file:
1
2
cp /boot/config-` uname -r ` \
/var/lib/lxc/kubemaster/rootfs/boot/config-` uname -r `
Now back to lxc kubemaster.
nano /etc/default/kubelet
1
KUBELET_EXTRA_ARGS = --fail-swap-on = false
nano /etc/rc.local
1
2
3
#!/bin/sh -e
mount --make-rshared /
chmod +x /etc/rc.local
Remove temporary files and clear the cache rm -f /usr/local/cni-plugins-linux-amd64-v1.8.0.tgz rm -f /usr/local/cni-plugins-linux-amd64-v1.8.0.tgz.sha512 rm -f /usr/local/containerd-2.2.0-linux-amd64.tar.gz rm -f /usr/local/containerd-2.2.0-linux-amd64.tar.gz.sha256sum rm -f /usr/local/runc.amd64 rm -f /usr/local/runc.amd64.asc rm -f /usr/local/runc.sha256sum apt clean Host: lxc-stop -n kubemaster
Clone and Backup 1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
cd /var/lib/lxc
time /usr/bin/tar \
--use-compress-program = "/usr/bin/pzstd -15 \
--keep --processes 3" \
-pcvf kubemaster_clean.zstd kubemaster
# 254M kubemaster_clean.zstd
mkdir kubenode1
mkdir kubenode2
chmod ug = rwX,o-rwXx kubenode1 kubenode2
cd /var/lib/lxc/kubemaster
find ./ -xdev -print0 | cpio -pa0V /var/lib/lxc/kubenode1
find ./ -xdev -print0 | cpio -pa0V /var/lib/lxc/kubenode2
nano /var/lib/lxc/kubenode1/config
An example kubenode1 config is at the bottom of the article.
nano /var/lib/lxc/kubenode2/config
An example kubenode2 config is at the bottom of the article.
kubenode1 lxc-attach -n kubenode1
1
2
3
4
5
6
7
8
9
printf 'kubenode1.web.webart4.me\n' > /etc/hostname
nano /etc/hosts
127.0.0.1 localhost
127.0.1.1 kubenode1
10.11.11.121 kubemaster.web.webart4.me
10.11.11.125 kubenode1.web.webart4.me
10.11.11.129 kubenode2.web.webart4.me
kubenode2 lxc-attach -n kubenode2
1
2
3
4
5
6
7
8
9
printf 'kubenode2.web.webart4.me\n' > /etc/hostname
nano /etc/hosts
127.0.0.1 localhost
127.0.1.1 kubenode2
10.11.11.121 kubemaster.web.webart4.me
10.11.11.125 kubenode1.web.webart4.me
10.11.11.129 kubenode2.web.webart4.me
Restart lxc-stop --reboot --name kubemaster lxc-stop --reboot --name kubenode1 lxc-stop --reboot --name kubenode2 [--nowait | --kill]
Initialize the Cluster in kubemaster lxc-attach -n kubemaster
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
kubeadm init --control-plane-endpoint = 10.11.11.121:6443 --apiserver-advertise-address = 10.11.11.121 --pod-network-cidr = 10.244.0.0/16
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME /.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME /.kube/config
sudo chown $( id -u ) :$( id -g ) $HOME /.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG = /etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:
kubeadm join 10.11.11.121:6443 --token qejrqw.uwer98qy6v7xt599 \
--discovery-token-ca-cert-hash sha256:52d72e6e39aee3127d9a649aeff25fff17c6ff5be2c3f43f6bc126631718bf1e \
--control-plane
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 10.11.11.121:6443 --token qejrqw.uwer98qy6v7xt599 \
--discovery-token-ca-cert-hash sha256:52d72e6e39aee3127d9a649aeff25fff17c6ff5be2c3f43f6bc126631718bf1e
Disable the swap error and configure the ability to use it.
Again, this makes sense when configured in lxc because all VMs get the host’s swap state.
And I wouldn’t want to disable it on the host.
nano /var/lib/kubelet/config.yaml
1
2
3
4
5
6
failSwapOn: false
featureGates:
NodeSwap: true
memorySwap:
swapBehavior: LimitedSwap
# memorySwap: {}
mkdir -p $HOME/.kube cp -i /etc/kubernetes/admin.conf $HOME/.kube/config chown $(id -u):$(id -g) -R $HOME/.kube chmod -R ug=rx,o-rwxX $HOME/.kube
Install Pod Network - Flannel 1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
cd /usr/src/
wget https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
nano kube-flannel.yml
# add KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT variables under the POD_NAMESPACE
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: KUBERNETES_SERVICE_HOST
value: '10.11.11.121'
- name: KUBERNETES_SERVICE_PORT
value: '6443'
kubectl apply -f kube-flannel.yml
namespace/kube-flannel created
serviceaccount/flannel created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
systemctl enable --now kubelet systemctl restart kubelet systemctl status kubelet kubectl get nodes
1
2
NAME STATUS ROLES AGE VERSION
kubemaster.web.webart4.me Ready control-plane 7m35s v1.34.2
ifconfig -a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
eth121: flags = 4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 10.11.11.121 netmask 255.255.255.0 broadcast 0.0.0.255
inet6 fe80::216:3eff:fe7d:ba33 prefixlen 64 scopeid 0x20<link >
ether 00:16:3e:7d:ba:33 txqueuelen 1000 ( Ethernet)
RX packets 28035 bytes 63635564 ( 60.6 MiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 14864 bytes 1100501 ( 1.0 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
flannel.1: flags = 4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1450
inet 10.244.0.0 netmask 255.255.255.255 broadcast 0.0.0.0
inet6 fe80::f46a:59ff:feee:f2a0 prefixlen 64 scopeid 0x20<link >
ether f6:6a:59:ee:f2:a0 txqueuelen 0 ( Ethernet)
RX packets 0 bytes 0 ( 0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 ( 0.0 B)
TX errors 0 dropped 12 overruns 0 carrier 0 collisions 0
lo: flags = 73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
inet6 ::1 prefixlen 128 scopeid 0x10<host>
loop txqueuelen 1000 ( Local Loopback)
RX packets 293302 bytes 68241882 ( 65.0 MiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 293302 bytes 68241882 ( 65.0 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
kubenode1 lxc-attach -n kubenode1
ping 10.11.11.121
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
kubeadm join 10.11.11.121:6443 \
--token qejrqw.uwer98qy6v7xt599 \
--discovery-token-ca-cert-hash sha256:52d72e6e39aee3127d9a649aeff25fff17c6ff5be2c3f43f6bc126631718bf1e
[ preflight] Running pre-flight checks
[ preflight] Reading configuration from the "kubeadm-config" ConfigMap in namespace "kube-system" ...
[ preflight] Use 'kubeadm init phase upload-config kubeadm --config your-config-file' to re-upload it.
[ kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
[ patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
[ kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[ kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[ kubelet-start] Starting the kubelet
[ kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[ kubelet-check] The kubelet is healthy after 501.132212ms
[ kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
Open file nano /var/lib/kubelet/config.yaml.
And add the same as for kubemaster .
systemctl enable --now kubelet systemctl restart kubelet systemctl status kubelet
kubenode2 lxc-attach -n kubenode2
ping 10.11.11.121
1
2
3
kubeadm join 10.11.11.121:6443 \
--token qejrqw.uwer98qy6v7xt599 \
--discovery-token-ca-cert-hash sha256:52d72e6e39aee3127d9a649aeff25fff17c6ff5be2c3f43f6bc126631718bf1e
It’s the same here.
nano /var/lib/kubelet/config.yaml
Add the same lines from kubemaster .
systemctl enable --now kubelet systemctl restart kubelet systemctl status kubelet
Checking connected nodes and kubemaster cluster status kubectl get nodes
1
2
3
4
NAME STATUS ROLES AGE VERSION
kubemaster.web.webart4.me Ready control-plane 54m v1.34.2
kubenode1.web.webart4.me Ready <none> 9m55s v1.34.2
kubenode2.web.webart4.me Ready <none> 6m46s v1.34.2
kubectl get pods --all-namespaces -o wide
1
2
3
4
5
6
7
8
9
10
11
12
13
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kube-flannel kube-flannel-ds-nvrht 1/1 Running 10 ( 24m ago) 105m 10.11.11.125 kubenode1.web.webart4.me <none> <none>
kube-flannel kube-flannel-ds-t95hr 1/1 Running 1 ( 14m ago) 17m 10.11.11.129 kubenode2.web.webart4.me <none> <none>
kube-flannel kube-flannel-ds-zdml5 1/1 Running 4 ( 22m ago) 130m 10.11.11.121 kubemaster.web.webart4.me <none> <none>
kube-system coredns-66bc5c9577-qkjpk 1/1 Running 8 ( 24m ago) 132m 10.244.1.18 kubenode1.web.webart4.me <none> <none>
kube-system coredns-66bc5c9577-w2fh6 1/1 Running 8 ( 24m ago) 132m 10.244.1.19 kubenode1.web.webart4.me <none> <none>
kube-system etcd-kubemaster.web.webart4.me 1/1 Running 11 ( 22m ago) 132m 10.11.11.121 kubemaster.web.webart4.me <none> <none>
kube-system kube-apiserver-kubemaster.web.webart4.me 1/1 Running 11 ( 22m ago) 132m 10.11.11.121 kubemaster.web.webart4.me <none> <none>
kube-system kube-controller-manager-kubemaster.web.webart4.me 1/1 Running 11 ( 22m ago) 132m 10.11.11.121 kubemaster.web.webart4.me <none> <none>
kube-system kube-proxy-76qnk 1/1 Running 1 ( 14m ago) 17m 10.11.11.129 kubenode2.web.webart4.me <none> <none>
kube-system kube-proxy-9mcc4 1/1 Running 47 ( 24m ago) 105m 10.11.11.125 kubenode1.web.webart4.me <none> <none>
kube-system kube-proxy-p8pkc 1/1 Running 39 ( 22m ago) 132m 10.11.11.121 kubemaster.web.webart4.me <none> <none>
kube-system kube-scheduler-kubemaster.web.webart4.me 1/1 Running 11 ( 22m ago) 132m 10.11.11.121 kubemaster.web.webart4.me <none> <none>
Testing Kubernetes cluster Cluster taints kubectl describe node | grep -E "NoSchedule|Taints|Name\:"
1
2
3
4
5
6
7
Name: kubemaster.web.webart4.me
Taints: node-role.kubernetes.io/control-plane:NoSchedule
node.kubernetes.io/disk-pressure:NoSchedule
Name: kubenode1.web.webart4.me
Taints: node.kubernetes.io/disk-pressure:NoSchedule
Name: kubenode2.web.webart4.me
Taints: node.kubernetes.io/disk-pressure:NoSchedule
nano /var/lib/kubelet/config.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
evictionSoft:
memory.available: "100Mi"
nodefs.available: "100Mi"
nodefs.inodesFree: "1%"
imagefs.available: "100Mi"
imagefs.inodesFree: "1%"
evictionHard:
memory.available: "100Mi"
nodefs.available: "100Mi"
nodefs.inodesFree: "1%"
imagefs.available: "100Mi"
imagefs.inodesFree: "1%"
evictionSoftGracePeriod:
memory.available: 2m
nodefs.available: 2m
nodefs.inodesFree: 2m
imagefs.available: 2m
imagefs.inodesFree: 2m
systemctl restart kubelet
Correct cluster state kubectl describe node | grep -E "NoSchedule|Taints|Name\:"
1
2
3
4
5
6
Name: kubemaster.web.webart4.me
Taints: node-role.kubernetes.io/control-plane:NoSchedule
Name: kubenode1.web.webart4.me
Taints: <none>
Name: kubenode2.web.webart4.me
Taints: <none>
First deployment on a new cluster nano nginx-deployment.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
apiVersion : apps/v1
kind : Deployment
metadata :
name : nginx-deployment
labels :
app : nginx
spec :
replicas : 1
selector :
matchLabels :
app : nginx
template :
metadata :
labels :
app : nginx
spec :
containers :
- name : nginx
image : nginx:1.14.2
ports :
- containerPort : 80
kubectl apply -f nginx-deployment.yaml kubectl rollout status deployment/nginx-deployment
1
deployment "nginx-deployment" successfully rolled out
kubectl get deployments
1
2
NAME READY UP-TO-DATE AVAILABLE AGE
nginx-deployment 1/1 1 1 20s
kubectl get pods --all-namespaces -o wide
1
2
3
4
5
6
7
8
9
10
11
12
13
14
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
default nginx-deployment-bf744486c-5zfqz 1/1 Running 0 14m 10.244.2.2 kubenode2.web.webart4.me <none> <none>
kube-flannel kube-flannel-ds-nvrht 1/1 Running 11 ( 3h2m ago) 5h44m 10.11.11.125 kubenode1.web.webart4.me <none> <none>
kube-flannel kube-flannel-ds-t95hr 1/1 Running 2 ( 3h2m ago) 4h16m 10.11.11.129 kubenode2.web.webart4.me <none> <none>
kube-flannel kube-flannel-ds-zdml5 1/1 Running 5 ( 3h2m ago) 6h9m 10.11.11.121 kubemaster.web.webart4.me <none> <none>
kube-system coredns-66bc5c9577-cmtz5 1/1 Running 0 29m 10.244.1.23 kubenode1.web.webart4.me <none> <none>
kube-system coredns-66bc5c9577-j5l49 1/1 Running 0 29m 10.244.1.22 kubenode1.web.webart4.me <none> <none>
kube-system etcd-kubemaster.web.webart4.me 1/1 Running 12 ( 3h2m ago) 6h11m 10.11.11.121 kubemaster.web.webart4.me <none> <none>
kube-system kube-apiserver-kubemaster.web.webart4.me 1/1 Running 12 ( 3h2m ago) 6h11m 10.11.11.121 kubemaster.web.webart4.me <none> <none>
kube-system kube-controller-manager-kubemaster.web.webart4.me 1/1 Running 12 ( 3h2m ago) 6h11m 10.11.11.121 kubemaster.web.webart4.me <none> <none>
kube-system kube-proxy-76qnk 1/1 Running 2 ( 3h2m ago) 4h16m 10.11.11.129 kubenode2.web.webart4.me <none> <none>
kube-system kube-proxy-9mcc4 1/1 Running 48 ( 3h2m ago) 5h44m 10.11.11.125 kubenode1.web.webart4.me <none> <none>
kube-system kube-proxy-p8pkc 1/1 Running 40 ( 3h2m ago) 6h10m 10.11.11.121 kubemaster.web.webart4.me <none> <none>
kube-system kube-scheduler-kubemaster.web.webart4.me 1/1 Running 12 ( 3h2m ago) 6h11m 10.11.11.121 kubemaster.web.webart4.me <none> <none>
Delete test deployment kubectl delete deployment nginx-deployment --namespace default
The icing on the cake - Helm lxc-attach -n kubemaster
1
2
3
4
5
6
cd /usr/src/
wget https://get.helm.sh/helm-v4.0.1-linux-amd64.tar.gz
sha256sum -c <( printf 'e0365548f01ed52a58a1181ad310b604a3244f59257425bb1739499372bdff60 helm-v4.0.1-linux-amd64.tar.gz' )
tar -xzv -f helm-v4.0.1-linux-amd64.tar.gz
install -m 755 linux-amd64/helm /usr/local/bin/helm
helm version
A few more commands kubectl get nodes kubectl get svc --all-namespaces kubectl get deploy --all-namespaces kubectl get rs --all-namespaces kubectl get pods --all-namespaces
kubectl config view kubectl config current-context
kubeadm token list kubeadm token create kubeadm token create --print-join-command
How to get a discovery-token-ca-cert-hash string 1
2
3
4
cat /etc/kubernetes/pki/ca.crt | \
openssl x509 -pubkey | \
openssl rsa -pubin -outform der 2>/dev/null | \
openssl dgst -sha256 -hex | sed 's/^.* //'
kubenode1/config 1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
lxc.net.0.type = veth
lxc.net.0.flags = up
lxc.net.0.link = lxcbr11
lxc.net.0.name = eth125
lxc.net.0.veth.pair = veth125
lxc.net.0.ipv4.address = 10.11.11.125/24
lxc.net.0.hwaddr = 00:16:a9:7d:99:25
lxc.net.0.ipv4.gateway = 10.11.11.1
lxc.apparmor.profile = unconfined
lxc.apparmor.allow_nesting = 1
lxc.cap.drop =
lxc.cgroup.devices.allow = a
lxc.mount.auto = proc:rw sys:rw
lxc.cgroup2.devices.allow = c 1:11 rwm
lxc.mount.entry = /dev/kmsg dev/kmsg none defaults,bind,create= file
lxc.cgroup2.devices.allow = c 10:200 rwm
limits.memory.swap = true
linux.kernel_modules = ip_tables,ip6_tables,nf_nat,overlay,br_netfilter
lxc.mount.entry = /proc/sys/vm/overcommit_memory proc/sys/vm/overcommit_memory none defaults,bind,create= file
lxc.mount.entry = /proc/sys/kernel/panic proc/sys/kernel/panic none defaults,bind,create= file
lxc.mount.entry = /proc/sys/kernel/panic_on_oops proc/sys/kernel/panic_on_oops none defaults,bind,create= file
lxc.rootfs.path = dir :/var/lib/lxc/kubenode1/rootfs
# Common configuration
lxc.include = /usr/share/lxc/config/debian.common.conf
lxc.include = /usr/share/lxc/config/debian.userns.conf
# Container specific configuration
lxc.tty.max = 4
lxc.uts.name = kubenode1
lxc.arch = amd64
lxc.pty.max = 1024
lxc.start.order = 5
lxc.start.auto = 1
lxc.start.delay = 25
security.nesting = true
security.privileged = true
kubenode2/config 1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
lxc.net.0.type = veth
lxc.net.0.flags = up
lxc.net.0.link = lxcbr11
lxc.net.0.name = eth129
lxc.net.0.veth.pair = veth129
lxc.net.0.ipv4.address = 10.11.11.129/24
lxc.net.0.hwaddr = 00:16:a9:7d:99:29
lxc.net.0.ipv4.gateway = 10.11.11.1
lxc.apparmor.profile = unconfined
lxc.apparmor.allow_nesting = 1
lxc.cap.drop =
lxc.cgroup.devices.allow = a
lxc.mount.auto = proc:rw sys:rw
lxc.cgroup2.devices.allow = c 1:11 rwm
lxc.mount.entry = /dev/kmsg dev/kmsg none defaults,bind,create= file
lxc.cgroup2.devices.allow = c 10:200 rwm
limits.memory.swap = true
linux.kernel_modules = ip_tables,ip6_tables,nf_nat,overlay,br_netfilter
lxc.mount.entry = /proc/sys/vm/overcommit_memory proc/sys/vm/overcommit_memory none defaults,bind,create= file
lxc.mount.entry = /proc/sys/kernel/panic proc/sys/kernel/panic none defaults,bind,create= file
lxc.mount.entry = /proc/sys/kernel/panic_on_oops proc/sys/kernel/panic_on_oops none defaults,bind,create= file
lxc.rootfs.path = dir :/var/lib/lxc/kubenode2/rootfs
# Common configuration
lxc.include = /usr/share/lxc/config/debian.common.conf
lxc.include = /usr/share/lxc/config/debian.userns.conf
# Container specific configuration
lxc.tty.max = 4
lxc.uts.name = kubenode2
lxc.arch = amd64
lxc.pty.max = 1024
lxc.start.order = 5
lxc.start.auto = 1
lxc.start.delay = 25
security.nesting = true
security.privileged = true
2025-11-21 21:25 +0000