Kubernetes pods’ Quality of Service (QoS) can be controlled by setting node affinity. Here are some quick commands to perform this task:

# List nodes
kubectl get nodes

# Assign label to node
ssdNode=linux03
kubectl label nodes $ssdNode disktype=ssd

# Check node labels
kubectl get nodes --show-labels

# Add this value to a helm deployment
helm upgrade nextcloud-db bitnami/postgresql \
  --set primary.nodeSelector.disktype=ssd,postgresqlPassword=PASSWORDHERE,persistence.existingClaim=nextcloud-claim,persistence.size=100Gi,persistence.subPath='postgres',resources.requests.memory=8Gi,resources.requests.cpu=3500m

# Alternatively, this can be added to the deployment spec
apiVersion: v1
kind: Pod
metadata:
  name: nginx
  labels:
    env: test
spec:
  containers:
  - name: nginx
    image: nginx
    imagePullPolicy: IfNotPresent
  nodeSelector:
    disktype: ssd

Another example of setting node affinity via HELM set command:

# Add node label
nodename=worker02
label=disktype
value=ssd
kubectl label nodes $nodename $label=$value

# Remove node label
nodename=worker02
label=disktype
kubectl label nodes $nodename $label-

# Show current labels
kubectl get nodes --show-labels

# How to use label using helm
appName=nextcloud
appRepo=nextcloud/nextcloud
helm upgrade $appName $appRepo \
  --set nodeSelector.disktype=ssd

# How to use label in pod yaml
apiVersion: v1
kind: Pod
metadata:
  name: nginx
  labels:
    env: test
spec:
  containers:
  - name: nginx
    image: nginx
    imagePullPolicy: IfNotPresent
  nodeSelector:
    disktype: ssd

# How to use label in deployment plans
apiVersion: apps/v1
kind: Deployment
metadata:
  name: streamer-v4
  labels:
    app: streamer-v4
spec:
  replicas: 2
  selector:
    matchLabels:
      app: streamer-v4
  template:
    metadata:
      labels:
        app: streamer-v4
    spec:
      containers:
      - name: streamer-v4
        image: nginx
        ports:
        - containerPort: 8880
      nodeSelector:
        disktype: ssd

# Generally, add this fragment to yaml files
spec:
  affinity:
    nodeAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
        - matchExpressions:
          - key: disktype
            operator: In
            values:
            - ssd

How to set node affinity as map or list values via HELM set command:

appName=kimconnect
domainName=kimconnect.com
wordpressusername=kimconnect
wordpressPassword=SOMEVERYCOMPLEXPASSWORD
rootPassword=SOMEVERYCOMPLEXPASSWORD
nodeAffinity=k8-node003
storageClass=managed-nfs-ssd
helm upgrade $appName bitnami/wordpress \
  --set readinessProbe.enabled=false \
  --set image.tag=latest \
  --set persistence.accessMode=ReadWriteMany \
  --set persistence.storageClass=$storageClass \
  --set persistence.size=1Ti \
  --set mariadb.primary.persistence.storageClass=$storageClass \
  --set mariadb.primary.persistence.size=100Gi \
  --set wordpressUsername=$wordpressusername \
  --set wordpressPassword=$wordpressPassword \
  --set mariadb.auth.rootPassword=$rootPassword \
  --set mariadb.auth.password=$rootPassword \
  --set ingress.enabled=true,ingress.hostname=$domainName \
  --set volumePermissions.enabled=true \
  --set volumePermissions.securityContext.runAsUser=Null \
  --set allowEmptyPassword=false \
  --set service.externalTrafficPolicy=Local \
  --set nodeAffinityPreset.type=hard \
  --set nodeAffinityPreset.key=kubernetes.io/hostname \
  --set nodeAffinityPreset.values[0]=$nodeAffinity

A manual method of moving a pod to an available node:

# Mark certain nodes as un-schedulable
admin@k8-master:~$ k cordon k8-worker02
node/k8-worker02 cordoned
admin@k8-master:~$ k cordon k8-worker03
node/k8-worker03 cordoned

# Delete a pod so that it would be re-created on the only scheduling available node
# Deletion
admin@k8-master:~$ k delete pod kimconnect-mariadb-0
pod "kimconnect-mariadb-0" deleted
# Checking for recreation
admin@k8-master:~$ k get pod -o wide
NAME                                   READY   STATUS    RESTARTS   AGE     IP             NODE      NOMINATED NODE   READINESS GATES
kimconnect-mariadb-0                    0/1     Running   0          4s      172.16.0.84   k8-worker01   <none>           <none>