The below illustration is assuming that one has a local RAID mount being added to a worker node due to it’s lack of local storage to run kubelets and docker containers
# On K8s controller, remove worker node
kubectl drain node linux03 --ignore-damonsets
kubectl delete node linux03
# On the worker node uninstall docker & kubelet
sudo apt-get remove docker-ce docker-ce-cli containerd.io kubelet
# Check the health of its RAID mount /dev/md0
mdadm --detail /dev/md0
# Sample expected output:
Version : 1.2
Creation Time : Fri Aug 13 23:46:13 2021
Raid Level : raid10
Array Size : 1953257472 (1862.77 GiB 2000.14 GB)
Used Dev Size : 976628736 (931.39 GiB 1000.07 GB)
Raid Devices : 4
Total Devices : 4
Persistence : Superblock is persistent
Intent Bitmap : Internal
Update Time : Sat Aug 28 23:39:08 2021
State : clean
Active Devices : 4
Working Devices : 4
Failed Devices : 0
Spare Devices : 0
Layout : near=2
Chunk Size : 512K
Consistency Policy : bitmap
Name : linux03:0 (local to host linux03)
UUID :
Events : 1750
Number Major Minor RaidDevice State
0 8 97 0 active sync set-A /dev/sdg1
1 8 81 1 active sync set-B /dev/sdf1
2 8 17 2 active sync set-A /dev/sdb1
3 8 1 3 active sync set-B /dev/sda1
# Check the logical mount
mount=/nfs-share
df -hT -P $mount
# Sample expected output:
root@linux03:/home/kimconnect# df -hT -P $mount
Filesystem Type Size Used Avail Use% Mounted on
/dev/md0 ext4 1.8T 77M 1.7T 1% /nfs-share
# Prepare docker & kubelet redirected links
source1=/nfs-share/linux03/docker
source2=/nfs-share/linux03/kubelet
destinationdirectory=/var/lib/
sudo mkdir -p $source1
sudo mkdir -p $source2
# Optional: remove existing docker & kubelet directories
rm -rf /var/lib/kubelet
rm -rf /var/lib/docker
# Create links
sudo ln -sfn $source1 $destinationdirectory
sudo ln -sfn $source2 $destinationdirectory
# Verify
ls -la /var/lib
# Expected output:
root@linux03:/home/kim# ls /var/lib -la
total 180
drwxr-xr-x 45 root root 4096 Aug 28 00:38 .
drwxr-xr-x 13 root root 4096 Feb 1 2021 ..
drwxr-xr-x 4 root root 4096 Feb 1 2021 AccountsService
drwxr-xr-x 5 root root 4096 Aug 28 00:24 apt
drwxr-xr-x 2 root root 4096 Sep 10 2020 boltd
drwxr-xr-x 2 root root 4096 Aug 27 21:21 calico
drwxr-xr-x 8 root root 4096 Aug 28 00:34 cloud
drwxr-xr-x 4 root root 4096 Aug 27 23:52 cni
drwxr-xr-x 2 root root 4096 Aug 27 19:38 command-not-found
drwx--x--x 11 root root 4096 Aug 27 20:24 containerd
drwxr-xr-x 2 root root 4096 Aug 27 19:57 dbus
drwxr-xr-x 2 root root 4096 Apr 10 2020 dhcp
lrwxrwxrwx 1 root root 25 Aug 27 23:24 docker -> /nfs-share/linux03/docker
drwxr-xr-x 3 root root 4096 Aug 27 21:15 dockershim
drwxr-xr-x 7 root root 4096 Aug 28 00:24 dpkg
drwxr-xr-x 3 root root 4096 Feb 1 2021 fwupd
drwxr-xr-x 2 root root 4096 Apr 20 2020 git
drwxr-xr-x 4 root root 4096 Aug 27 19:39 grub
drwxr-xr-x 2 root root 4096 Aug 27 19:51 initramfs-tools
lrwxrwxrwx 1 root root 26 Aug 28 00:38 kubelet -> /nfs-share/linux03/kubelet
### truncated for brevity ###
# Reinstall docker & kubernetes
version=1.20.10-00
apt-get install -qy --allow-downgrades --allow-change-held-packages kubeadm=$version kubelet=$version kubectl=$version docker-ce docker-ce-cli containerd.io nfs-common
apt-mark hold kubeadm kubelet kubectl
I may consider making another illustration for NFS mounts. It may not be necessary as the instructions would be mostly similar. The difference would be that one must ensure that the worker node automatically mounts the nfs share upon reboots. The command to make symbolic soft-links would be the same.
Categories: