Step 1: Creating NFS Server
A. Create NFS Share on File Server
There are many ways to perform this task. Here’s an illustration of a manual method of enabling a standard Ubuntu server to serve as an NFS server.
Here’s a related blog with updated instructions: https://blog.kimconnect.com/how-to-install-nfs-server-on-ubuntu-21-04/
# Install prerequisites:
apt install nfs-utils
# Create nfs share:
shareName=/export/kubernetes
sudo mkdir $shareName
sudo chown -R nobody: $shareName
sudo systemctl enable nfs-server
sudo systemctl start nfs-server
vim /etc/export
### Add this line
/export/kubernetes *(rw,sync,no_subtree_check,no_root_squash,no_all_squash,insecure)
###
sudo exportfs -rav
sudo exportfs -v
B. Testing access from a client
# Install prerequisite
sudo apt install nfs-common
# Mount, create/delete a file, and unmount
# Set variables
nfsShare=kubernetes # assuming that the 'pihole' share has already been created on the server
nfsServer=192.168.100.21 # assuming NAS servername is resolved to its correct IP
sharePath=/volume1/$nfsShare
mountPoint=/mnt/$nfsShare
sudo mkdir $mountPoint
sudo mount -t nfs $nfsServer:$sharePath $mountPoint # Test mounting
sudo mount | grep $nfsShare
touch $mountPoint/test.txt
ls $mountPoint
rm $mountPoint/test.txt
ls $mountPoint
sudo umount -f -l $mountPoint # or sudo umount $mountPoint
Step 2a: Install Dynamic NFS Provisioner Using Helm
# Check current helm repo
kim@linux01:~$ helm repo list
NAME URL
bitnami
ingress-nginx https://kubernetes.github.io/ingress-nginx/
rancher-stable
jetstack https://charts.jetstack.io
k8s-at-home https://k8s-at-home.com/charts/
nextcloud https://nextcloud.github.io/helm/
chrisingenhaag https://chrisingenhaag.github.io/helm/
wiremind https://wiremind.github.io/wiremind-helm-charts/
# Add repo
helm repo add nfs-subdir-external-provisioner https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/
# The easy way
nfsServer=192.168.100.21
nfsShare=/volume1/k8s
helm repo add nfs-subdir-external-provisioner https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/
helm install nfs-subdir-external-provisioner nfs-subdir-external-provisioner/nfs-subdir-external-provisioner \
--set nfs.server=$nfsServer \
--set nfs.path=$nfsShare
# Sample output
NAME: nfs-subdir-external-provisioner
LAST DEPLOYED: Sun Aug 1 21:16:05 2021
NAMESPACE: default
STATUS: deployed
REVISION: 1
TEST SUITE: None
# Possible error:
Error: chart requires kubeVersion: >=1.9.0-0 <1.20.0-0 which is incompatible with Kubernetes v1.20.2
# Workaround: downgrade Kubernetes - not recommended!
version=1.20.0-00
sudo apt install -qy kubeadm=$version kubectl=$version kubelet=$version kubernetes-cni=$version --allow-downgrades
# If everything works out, storage class 'nfs-client' will become available
kim@linux01:~$ k get storageclasses.storage.k8s.io
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
nfs-class kubernetes.io/nfs Retain Immediate true 181d
nfs-client cluster.local/nfs-subdir-external-provisioner Delete Immediate true 25m
# set default storage class
defaultStorageClassName=nfs-client
kubectl patch storageclass $defaultStorageClassName -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
# Check storage classes for the suffix '(default)'
kim@linux01:~$ kubectl get storageclass
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
nfs-class kubernetes.io/nfs Retain Immediate true 181d
nfs-client (default) cluster.local/nfs-subdir-external-provisioner Delete Immediate true 42m
# Test creating nfs claim
cat > test-pvc.yaml <<EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-nfs-pv1
spec:
storageClassName: nfs-client # this variable must match the helm nfs-subdir-external-provisioner's default!
accessModes:
- ReadWriteMany
resources:
requests:
storage: 500Mi
EOF
kubectl apply -f test-pvc.yaml
# Check result
kim@linux01:~$ k get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-8ed4fc70-71c4-48c7-85a9-57175cfc21e7 500Mi RWX Delete Bound default/pvc-nfs-pv1 nfs-client 10s
kim@linux01:~$ k get pvc pvc-nfs-pv1
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc-nfs-pv1 Bound pvc-8ed4fc70-71c4-48c7-85a9-57175cfc21e7 500Mi RWX nfs-client 91s
kim@linux01:~$ k delete -f test-pvc.yaml
persistentvolumeclaim "pvc-nfs-pv1" deleted
Step 2b: Manual Installation of Dynamics NFS Provisioner
# Pull the source code
workingDirectory=~/nfs-dynamic-provisioner
mkdir $workingDirectory && cd $workingDirectory
git clone https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner
cd nfs-subdir-external-provisioner/deploy
# Deploying the service accounts, accepting defaults
k create -f rbac.yaml
# Editing storage class
vim class.yaml
##############################################
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: managed-nfs-ssd # set this value
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
archiveOnDelete: "true" # value of true means retaining data upon pod terminations
allowVolumeExpansion: "true" # this attribute doesn't exist by default
##############################################
# Deploying storage class
k create -f class.yaml
# Sample output
stoic@masternode:~/nfs-dynamic-provisioner/nfs-subdir-external-provisioner/deploy$ k get storageclasses.storage.k8s.io
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
managed-nfs-ssd k8s-sigs.io/nfs-subdir-external-provisioner Delete Immediate false 33s
nfs-class kubernetes.io/nfs Retain Immediate true 193d
nfs-client (default) cluster.local/nfs-subdir-external-provisioner Delete Immediate true 12d
# Example of patching an applied object
kubectl patch storageclass managed-nfs-ssd -p '{"allowVolumeExpansion":true}'
kubectl patch storageclass managed-nfs-ssd -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}' # Set storage class as default
# Editing deployment of dynamic nfs provisioning service pod
vim deployment.yaml
##############################################
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: k8s-sigs.io/nfs-subdir-external-provisioner
- name: NFS_SERVER
value: X.X.X.X # change this value
- name: NFS_PATH
value: /nfs-share # change this value
volumes:
- name: nfs-client-root
nfs:
server: 192.168.100.93 # change this value
path: /nfs-share # change this value
##############################################
# Creating nfs provisioning service pod
k create -f deployment.yaml
# Troubleshooting: example where the deployment was pending variables to be created by rbac.yaml
stoic@masternode: $ k describe deployments.apps nfs-client-provisioner
Name: nfs-client-provisioner
Namespace: default
CreationTimestamp: Sat, 14 Aug 2021 00:09:24 +0000
Labels: app=nfs-client-provisioner
Annotations: deployment.kubernetes.io/revision: 1
Selector: app=nfs-client-provisioner
Replicas: 1 desired | 0 updated | 0 total | 0 available | 1 unavailable
StrategyType: Recreate
MinReadySeconds: 0
Pod Template:
Labels: app=nfs-client-provisioner
Service Account: nfs-client-provisioner
Containers:
nfs-client-provisioner:
Image: k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2
Port: <none>
Host Port: <none>
Environment:
PROVISIONER_NAME: k8s-sigs.io/nfs-subdir-external-provisioner
NFS_SERVER: X.X.X.X
NFS_PATH: /nfs-share
Mounts:
/persistentvolumes from nfs-client-root (rw)
Volumes:
nfs-client-root:
Type: NFS (an NFS mount that lasts the lifetime of a pod)
Server: X.X.X.X
Path: /nfs-share
ReadOnly: false
Conditions:
Type Status Reason
---- ------ ------
Progressing True NewReplicaSetCreated
Available False MinimumReplicasUnavailable
ReplicaFailure True FailedCreate
OldReplicaSets: <none>
NewReplicaSet: nfs-client-provisioner-7768c6dfb4 (0/1 replicas created)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal ScalingReplicaSet 3m47s deployment-controller Scaled up replica set nfs-client-provisioner-7768c6dfb4 to 1
# Get the default nfs storage class
echo $(kubectl get sc -o=jsonpath='{range .items[?(@.metadata.annotations.storageclass\.kubernetes\.io/is-default-class=="true")]}{@.metadata.name}{"\n"}{end}')
##### OLD NOTES: Feel free the ignore the below chicken scratch #######
# The less-easy way: manually install the provisioner
git clone https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner/
cd nfs-subdir-external-provisioner/deploy
NS=$(kubectl config get-contexts|grep -e "^\*" |awk '{print $5}')
NAMESPACE=${NS:-default}
sed -i'' "s/namespace:.*/namespace: $NAMESPACE/g" ./rbac.yaml ./deployment.yaml
kubectl apply -f ./rbac.yaml
vim deployment.yaml
###
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: quay.io/external_storage/nfs-client-provisioner:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: nfs-storage
- name: NFS_SERVER
value: 192.168.100.21
- name: NFS_PATH
value: /kubernetes
volumes:
- name: nfs-client-root
nfs:
server: 192.168.100.21
path: /kubernetes
k apply -f deployment.yaml
vim class.yaml
######
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: managed-nfs-storage
provisioner: nfs-storage # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
pathPattern: "${.PVC.namespace}/${.PVC.annotations.nfs.io/storage-path}" # waits for nfs.io/storage-path annotation, if not specified will accept as empty string.
onDelete: delete
# Create Persistent Volume Claim
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
nfs.io/storage-path: "test-path" # not required, depending on whether this annotation was shown in the storage class description
spec:
storageClassName: managed-nfs-storage
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
k apply -f class.yaml
kubectl create -f test-claim.yaml
kubectl create -f test-pod.yaml
Categories: