Step 0: Create NFS Share

Ensure that the NFS share has been created with these setttings:
– NFS share name: pihole
– Client Access: world accessible (*) or allowed ingress from the Kubernetes nodes IPs and/or subnets (e.g. x.x.x.x/netmask)
– Privilege: Read/Write (rw)
– Other options: sync,no_subtree_check,no_root_squash,no_all_squash,insecure
– Shared directory ACL: 755 with entity ‘nobody:www-data‘ as owner of pihole share & files. As long as the NFS options are set correctly, it’s unnecessary to manually configure ACL’s

Step 1: Validate that NFS is accessible

# Include nfs client
sudo apt -y install nfs-common

# Set variables
nfsShare=pihole # assuming that the 'pihole' share has already been created on the server
nfsServer=192.168.1.21 # assuming NAS servername is resolved to its correct IP
mountPoint=/mnt/$nfsShare

# Test Mount, create/delete a file, and unmount
sudo mkdir $mountPoint
sudo mount -t nfs $nfsServer:/$nfsShare $mountPoint # Test mounting
sudo touch $mountPoint/test.txt
ls $mountPoint
sudo rm $mountPoint/test.txt
ls $mountPoint
sudo umount -f -l $mountPoint # Unmounting

Step 2: Create Storage Class ONLY if It Does Not Already Exist

# Create custom storage class - if it doesn't exist
storageClassName=nfs-class
nfsStorageClassExists=$(kubectl get storageclasses $storageClassName)
if [ -z "$nfsStorageClassExists" ]
then
    cat > $storageClassName.yaml <<EOF
class=nfs
storageClassName=$class-class
cat > $storageClassName.yaml <<EOF
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
  name: $storageClassName
provisioner: kubernetes.io/$class
reclaimPolicy: Retain
allowVolumeExpansion: true
EOF
kubectl apply -f $storageClassName.yaml
else
    echo "Storage class $storageClassName already exists."
fi

Step 3: Create 2 Persistent Volumes

# Set variables
appName=pihole
pv1=$appName-pv
pv1Label=$appName
pv1Size=1.5Gi
storageClassName=nfs-class
nfsServer=192.168.1.21
nfs1=$appName

# Create pv
cat > $pv1.yaml << EOF
apiVersion: v1
kind: PersistentVolume
metadata:
  name: $pv1
  labels:
    directory: $pv1Label
spec:
  storageClassName: $storageClassName
  nfs: 
    path: /$nfs1 
    server: $nfsServer
  persistentVolumeReclaimPolicy: Retain
  capacity:
    storage: $pv1Size 
  accessModes:
  - ReadWriteMany 
EOF
kubectl apply -f $pv1.yaml

Step 4: Create 2 Persistent Volume Claims

appName=pihole
pvc1Label=$appName
pvc1=$appName-claim
pv1Size=1.5Gi
storageClassName=nfs-class

# Create pvc
cat > $pvc1.yaml << EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: $pvc1
spec:
  storageClassName: $storageClassName
  accessModes:
  - ReadWriteMany
  resources:
    requests:
      storage: $pvc1Size
  selector:
    matchLabels:
      directory: $pvc1Label      
EOF
kubectl apply -f $pvc1.yaml

Step 5: Implement MetalLB Load Balancer – If Not Already Exists

# Set strictARP, ipvs mode
kubectl get configmap kube-proxy -n kube-system -o yaml | \
sed -e "s/strictARP: false/strictARP: true/" | sed -e "s/mode: \"\"/mode: \"ipvs\"/" | \
kubectl apply -f - -n kube-system
 
# Apply the manifests provided by the author, David Anderson (https://www.dave.tf/) - an awesome dude
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.9.5/manifests/namespace.yaml
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.9.5/manifests/metallb.yaml
# On first install only
kubectl create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)"
 
# Customize for this system
ipRange=192.168.1.2-192.168.1.99
loadBalancerFile=metallb-config.yaml
cat > $loadBalancerFile << EOF
apiVersion: v1
kind: ConfigMap
metadata:
  namespace: metallb-system
  name: config
data:
  config: |
    address-pools:
    - name: default
      protocol: layer2
      addresses:
      - $ipRange
EOF
kubectl apply -f $loadBalancerFile

Step 6: Define Services

Services defined in this section is dependent on the metallb load balancer plugin. Moreover, the sticky sessions feature is important for HTTP services. There are two methods of setting sticky sessions: (1) ‘service.spec.sessionAffinity: ClientIP’ and (2) Ingress session affinity based on cookie with this sequence:

Create NGINX controller deployment
Create NGINX service
Create Ingress
Redirect hostname to the NGINX service external IP

For simplicity sake, we would use option (1) as the preferred method to achieve this client to node consistency of client sessions.

# Set variables
appName=pihole
serviceName=$appName-service
externalIp=192.168.1.50

# Generate tcp & udp services for pihole
cat > pihole-svc-udp.yaml << EOF
apiVersion: v1
kind: Service
metadata:
  name: $appName-svc-udp
  annotations:
    metallb.universe.tf/address-pool: default
    metallb.universe.tf/allow-shared-ip: psk
spec:
  type: LoadBalancer
  loadBalancerIP: $externalIp
  # sessionAffinity: ClientIP
  # externalTrafficPolicy: Local # This is to preserve the client source IP
  ports:
    - port: 53
      protocol: UDP
      targetPort: dns-udp
      name: dns-udp
  selector:
    app: $appName
EOF
cat > pihole-svc-tcp.yaml << EOF
apiVersion: v1
kind: Service
metadata:
  name: $appName-svc-tcp
  annotations:
    metallb.universe.tf/address-pool: default
    metallb.universe.tf/allow-shared-ip: psk
spec:
  type: LoadBalancer
  loadBalancerIP: $externalIp
  sessionAffinity: ClientIP # This is necessary for multi-replica deployments
  # externalTrafficPolicy: Local # This is to preserve the client source IP
  ports:
    - port: 80
      targetPort: http
      protocol: TCP
      name: http
    - port: 53
      targetPort: dns-tcp
      protocol: TCP
      name: dns-tcp
  selector:
    app: $appName
EOF
kubectl apply -f pihole-svc-tcp.yaml
kubectl apply -f pihole-svc-udp.yaml

Step 7: Pihole Config Map

cat > piholeConfigMap.yml <<EOF
apiVersion: v1
kind: ConfigMap
metadata:
  name: pihole-env
  namespace: default
data:
  02-lan: |
    addn-hosts=/etc/pihole/lan.list
  adlist : |
    https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts 
     
    http://sysctl.org/cameleon/hosts 
     
    https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt 
    https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt 
     
     
     
     
     
     
     
     
     
     
    's-uMatrix-FilterList.txt 
    http://phishing.mailscanner.info/phishing.bad.sites.conf 
     
     
     
     
     
    https://gitlab.com/quidsup/notrack-blocklists/raw/master/notrack-blocklist.txt 
    https://gitlab.com/quidsup/notrack-blocklists/raw/master/notrack-malware.txt 
    https://zerodot1.gitlab.io/CoinBlockerLists/list.txt 
    https://zerodot1.gitlab.io/CoinBlockerLists/list_browser.txt 
    https://zerodot1.gitlab.io/CoinBlockerLists/list_optional.txt 
    https://raw.githubusercontent.com/crazy-max/WindowsSpyBlocker/master/data/hosts/spy.txt 
    https://raw.githubusercontent.com/w13d/adblockListABP-PiHole/master/Spotify.txt 
    https://smokingwheels.github.io/Pi-hole/allhosts
  reglist: |
    ^(.+[-.])??adse?rv(er?|ice)?s?[0-9][-.]
    ^analytics?[-.]
    ^banners?[-.]
    ^count(ers?)?[0-9][-.] ^pixels?[-.]
    ^beacons?[0-9][-.]
    ^stat(s|istics)?[0-9][-.]
    ^telemetry[-.]
    ^track(ers?|ing)?[0-9]*[-.]
    ^traff(ic)?[-.]
    ^adim(age|g)s?[0-9][-.]
    ^adtrack(er|ing)?[0-9][-.]
    ^advert(s|is(ing|ements?))?[0-9][-_.]
    ^aff(iliat(es?|ion))?[-.]
    ^(.+[-.])??m?ad[sxv]?[0-9][-.]
    (^r[[:digit:]]+(.|-+)[[:alnum:]]+-+[[:alnum:]]+-+[[:alnum:]]+.)(googlevideo|gvt1).com$
EOF
kubectl apply -f piholeConfigMap.yml

Step 8: Deployment Plan

# Set variables
appName=pihole
replicas=1
imageSource=pihole/pihole:latest
pv1=$appName-volume
pvc1=$appName-claim
dns1=8.8.8.8
dns2=192.168.1.1
setIp=192.168.1.50
hostAlias1=pihole.kimconnect.com
hostAlias2=pihole
timeZone=America/Los_Angeles
adminPassword=nopassword # this only applies to first time deployment

# Create deployment file
cat > $appName.yaml << EOF
kind: Deployment
apiVersion: apps/v1
metadata:
  name: $appName
  labels:
    app: $appName
spec:
  replicas: $replicas
  strategy:
    type: Recreate
  selector: 
    matchLabels:
      app: $appName # This must be identical to the pod name (template label)
  template:    
    metadata:
      labels:
        app: $appName
        # name: $appName
    spec:
      # securityContext:
      #   runAsUser: 0
      #   runAsGroup: 0
      #   fsGroup: 0
      # hostNetwork: true
      affinity:
        podAntiAffinity: # this is an important constraint to ensure that each pod is scheduled on a different node to avoid problems with 'ports in use'
          requiredDuringSchedulingIgnoredDuringExecution:
          - labelSelector:
              matchExpressions:
                - key: "app"
                  operator: In
                  values:
                  - $appName
            topologyKey: "kubernetes.io/hostname"
      restartPolicy: Always
      # hostAliases:
      #   - ip: $setIp
      #     hostnames:
      #     - "$hostAlias1"
      #   - ip: 127.0.0.1
      #     hostnames:
      #     - "$appName"
      # hostname: $appName
      containers:
      - name: $appName
        image: $imageSource
        securityContext:
          allowPrivilegeEscalation: true
          privileged: true
          capabilities:
            add:
              - NET_ADMIN
              - CHOWN
        imagePullPolicy: Always
        ports:
          - containerPort: 80
            name: http
          - containerPort: 53
            protocol: TCP
            name: dns-tcp
          - containerPort: 53
            protocol: UDP
            name: dns-udp
        lifecycle:
          postStart:
            exec:
              command: ["/bin/sh", "-c", "sleep 30 && chown pihole:www-data /etc/pihole/gravity.db"]
        env:
        - name: 'DNS1'
          value: '$dns1'
        - name: 'DNS2'
          value: '$dns2'  
        - name: TZ
          value: "$timeZone"
        - name: WEBPASSWORD
          value: "$adminPassword"
        volumeMounts:
        - name: $pv1
          mountPath: "/etc/pihole"
          subPath: "pihole"
        - name: $pv1
          mountPath: "/etc/dnsmasq.d"
          subPath: "dnsmasq"
        # - name: adlist
        #   mountPath: "/etc/pihole/adlists.list"
        #   subPath: "adlists.list"
        # - name: reglist
        #   mountPath: "/etc/pihole/regex.list"
        #   subPath: "regex.list"
        # - name: 02-lan
        #   mountPath: "/etc/dnsmasq.d/02-lan.conf"
        #   subPath: "02-lan.conf"
      volumes:
      - name: $pv1
        persistentVolumeClaim:
          claimName: $pvc1
      # - name: reglist
      #   configMap:
      #     name: pihole-env
      #     items:
      #     - key: reglist
      #       path: regex.list
      # - name: adlist
      #   configMap:
      #     name: pihole-env
      #     items:
      #     - key: adlist
      #       path: adlists.list
      # - name: 02-lan
      #   configMap:
      #     name: pihole-env
      #     items:
      #     - key: 02-lan
      #       path: 02-lan.conf
EOF
kubectl apply -f $appName.yaml

How to Change Pihole Password in Kubernetes

brucelee@controller:~$ k exec --stdin --tty pihole-75684d64cb-mzmq2 -- /bin/bash
root@pihole-75684d64cb-mzmq2:/# pihole -a -p
Enter New Password (Blank for no password): 
Confirm Password: 
  [✓] New password set

How to Scale Up or Down Replicas

# Note: pihole database file currently isn't meant for multi-access; thus, it's not advisable to set replicas higher than 1
brucelee@controller:~$ kubectl scale deployment pihole --replicas=1
deployment.apps/pihole scaled
brucelee@controller:~$ k get pod -o wide
NAME                           READY   STATUS        RESTARTS   AGE   IP              NODE      NOMINATED NODE   READINESS GATES
pihole-75684d64cb-mzmq2        1/1     Running       0          34h   172.16.90.221   linux03   <none>           <none>
pihole-75684d64cb-tnv74        0/1     Terminating   0          34h   <none>          linux02   <none>           <none>

Troubleshooting

# This setting has caused error: 'dnsmasq: failed to create listening socket for port 53: Address already in use' - it's not recommended for K8s clusters with the metallb load balancer.
deployment.spec.template.spec.hostNetwork: true
# How to view logs of a container: kubectl logs {containername}
# How to view logs of previously terminated container in a pod: kubectl logs {containername} {podname} --previous
dragoncoin@controller:~$ kubectl logs pihole-7d96dc7986-jc4tj -c pihole --previous
[s6-init] making user provided files available at /var/run/s6/etc...exited 0.
[s6-init] ensuring user provided files have correct perms...exited 0.
[fix-attrs.d] applying ownership & permissions fixes...
[fix-attrs.d] 01-resolver-resolv: applying... 
[fix-attrs.d] 01-resolver-resolv: exited 0.
[fix-attrs.d] done.
[cont-init.d] executing container initialization scripts...
[cont-init.d] 20-start.sh: executing... 
 ::: Starting docker specific checks & setup for docker pihole/pihole
Assigning random password: 7y_qSVCx

  [i] Installing configs from /etc/.pihole...
  [i] Existing dnsmasq.conf found... it is not a Pi-hole file, leaving alone!
  [✓] Copying 01-pihole.conf to /etc/dnsmasq.d/01-pihole.conf
chown: changing ownership of '/etc/pihole/pihole-FTL.conf': Operation not permitted
chown: cannot access '': No such file or directory
chmod: cannot access '': No such file or directory
chown: changing ownership of '/etc/pihole': Operation not permitted
chown: cannot access '/etc/pihole/dhcp.leases': No such file or directory
Converting DNS1 to PIHOLE_DNS_
Converting DNS2 to PIHOLE_DNS_
Setting DNS servers based on PIHOLE_DNS_ variable
::: Pre existing WEBPASSWORD found
DNSMasq binding to default interface: eth0
Added ENV to php:
			"PHP_ERROR_LOG" => "/var/log/lighttpd/error.log",
			"ServerIP" => "0.0.0.0",
			"VIRTUAL_HOST" => "0.0.0.0",
Using IPv4 and IPv6
::: Preexisting ad list /etc/pihole/adlists.list detected ((exiting setup_blocklists early))
https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts

dnsmasq: failed to create listening socket for port 53: Address already in use
::: Testing pihole-FTL DNS: [cont-init.d] 20-start.sh: exited 1.
[cont-finish.d] executing container finish scripts...
[cont-finish.d] done.
[s6-finish] waiting for services.
[s6-finish] sending all processes the TERM signal.
[s6-finish] sending all processes the KILL signal and exiting.

# Resolutions of errors above:
1. dnsmasq: failed to create listening socket for port 53: Address already in use => remove 'hostNetwork: true' in deployment
2. chown: changing ownership of '/etc/pihole/pihole-FTL.conf': Operation not permitted => chmod 777 for all files on the NFS directory, recursively
# Error when pihole cannot own gravity.db
Error, something went wrong!
While executing INSERT OT IGNORE: attempt to write a readonly database
Added 0 out of 1 domains

# View the ACL of files on the NAS
root@ovm:/export# stat -c "%U:%G %a %n" pihole/pihole/*
nobody:nogroup 600 pihole/pihole/custom.list
nobody:nogroup 644 pihole/pihole/dns-servers.conf
nobody:nogroup 644 pihole/pihole/GitHubVersions
nobody:nogroup 664 pihole/pihole/gravity.db
nobody:nogroup 644 pihole/pihole/list.1.raw.githubusercontent.com.domains
nobody:nogroup 644 pihole/pihole/localbranches
nobody:nogroup 644 pihole/pihole/local.list
nobody:nogroup 644 pihole/pihole/localversions
nobody:nogroup 777 pihole/pihole/migration_backup
nobody:nogroup 644 pihole/pihole/pihole-FTL.conf
nobody:nogroup 644 pihole/pihole/pihole-FTL.db
nobody:nogroup 666 pihole/pihole/setupVars.conf
nobody:nogroup 666 pihole/pihole/setupVars.conf.update.bak

# Check the logs again on the K8s controller
dragoncoin@controller:~$ k logs pihole-7d584d94b8-bnzcv -c pihole
chown: changing ownership of '/etc/pihole/pihole-FTL.conf': Operation not permitted
chown: cannot access '': No such file or directory
chmod: cannot access '': No such file or directory
chown: changing ownership of '/etc/pihole': Operation not permitted
chown: cannot access '/etc/pihole/dhcp.leases': No such file or directory
chown: changing ownership of '/etc/pihole/gravity.db': Operation not permitted

# Solution:
The proper fix is to set correct permissions on pihole files as shown below:
root    root      644  /etc/pihole/adlists.list
root    root      644  /etc/pihole/adlists.list.old
root    root      644  /etc/pihole/black.list
root    root      644  /etc/pihole/blacklist.txt
pihole  pihole    644  /etc/pihole/dhcp.leases
root    root      777  /etc/pihole/dnsmasq.d
root    root      644  /etc/pihole/dns-servers.conf
root    root      644  /etc/pihole/GitHubVersions
root    root      644  /etc/pihole/gravity.list
root    root      644  /etc/pihole/install.log
root    root      600  /etc/pihole/list.0.raw.githubusercontent.com.domains
root    root      600  /etc/pihole/list.1.mirror1.malwaredomains.com.domains
root    root      600  /etc/pihole/list.2.sysctl.org.domains
root    root      600  /etc/pihole/list.3.zeustracker.abuse.ch.domains
root    root      600  /etc/pihole/list.4.s3.amazonaws.com.domains
root    root      600  /etc/pihole/list.5.s3.amazonaws.com.domains
root    root      600  /etc/pihole/list.6.hosts-file.net.domains
root    root      600  /etc/pihole/list.7.dehakkelaar.nl.domains
root    root      600  /etc/pihole/list.8.gitlab.com.domains
root    root      644  /etc/pihole/list.preEventHorizon
root    root      644  /etc/pihole/localbranches
root    root      644  /etc/pihole/local.list
root    root      644  /etc/pihole/localversions
root    root      644  /etc/pihole/logrotate
root    root      644  /etc/pihole/macvendor.db
pihole  pihole    664  /etc/pihole/pihole-FTL.conf
pihole  pihole    644  /etc/pihole/pihole-FTL.db
root    root      644  /etc/pihole/pihole-FTL.db.bak
pihole  www-data  664  /etc/pihole/regex.list
root    root      644  /etc/pihole/setupVars.conf
root    root      644  /etc/pihole/setupVars.conf.update.bak
root    root      644  /etc/pihole/whitelist.txt

Unfortunately, username 'pihole' with id 999 may not exist on a NFS server - it could also be associated with another username. Also, by default, shares are owned by root upon container instantiation. In the case of pihole container, root has automatically chmod 644 /etc/pihole/gravity.db.

# workaround option (1) - Manual
# Thus, this is the previously improvised workaround, which is a manual process:
piholeShare=/export/pihole # OpenMediaVault share named 'pihole' would be mounted here
chmod 777 $piholeShare/pihole/gravity.db
# chmod 777 -R $piholeShare # Recursively set all files/folders with read/write permissions for everyone
# chown nobody:www-data -R $piholeShare # Set object owner as 'nobody' as it's the account to maquerade the NFS service

# workaround option (b) - Manual
# Enter the running container
containerName=pihole-5b68f98875-p7wgl
kubectl exec --stdin --tty $containerName -- /bin/bash

root@pihole:/# ls -la /etc/pihole/gravity.db
-rwxrwxrwx 1 pihole pihole 164777984 Feb  6 14:30 /etc/pihole/gravity.db
root@pihole:/# id pihole
uid=999(pihole) gid=999(pihole) groups=999(pihole)
root@pihole:/# chmod 777 /etc/pihole/gravity.db

# workaround option (c) - Automatic
Update: a better fix to this issue is to add a lifecycle into the POD deployment plan to execute a command after a pod has been generated. Note that a sleep timer is to delay execution of this command so that it runs after processes specified by the container 'entrypoint'. This is a workaround to asynchronous exec between entrypoint and lifecycle processes.
      containers:
        lifecycle:
          postStart:
            exec:
              command: ["/bin/sh", "-c", "sleep 30 && chown pihole:www-data /etc/pihole/gravity.db"]
# How to test name resolution toward the new Pihole DNS server
user1@workstation:~$ dig @192.168.1.50 google.com

; <<>> DiG 9.16.1-Ubuntu <<>> @192.168.1.50 google.com
; (1 server found)
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 12494
;; flags: qr rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1

;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;google.com.			IN	A

;; ANSWER SECTION:
google.com.		246	IN	A	142.250.68.110

;; Query time: 4 msec
;; SERVER: 192.168.1.50#53(192.168.1.50)
;; WHEN: Mon Feb 01 23:49:40 PST 2021
;; MSG SIZE  rcvd: 55
Issue: DNS record pointing to Pihole IP address would show as 'Manually Blacklisted by Wildcard'

Resolution:
- Pihole a-record (e.g. pihole.kimconnect.com) pointing directly to the IP address of Pihole isn't allowed; although,  would display a message to prompt users to go to . Hence, users could use a direct URL toward the admin panel such as .
- Alternatively, a hard-code edit to file /etc/lighttpd/lighttpd.conf with this content would suffice:
  url.redirect = ("^/$" => "/admin")

Alternative Pihole Configuration – Retain Client Source IPs

Currently, MetalLb cannot combine TCP and UDP into the same service. Hence, we’ve had to create 2 services as detailed previously. Those 2 services have shared the same loadBalancerIP by invoking these properties: metallb.universe.tf/allow-shared-ip: $appName and externalTrafficPolicy: Local. Note that the $appName must match between the 2 services as well as the deployment app selector for this to work. Otherwise, one of the services will not be granted an external (shared) IP.

appName=pihole
piholeWebIp=192.168.1.51
piholeDnsIp=192.168.1.50

cat > $appName-svc-udp.yaml << EOF
apiVersion: v1
kind: Service
metadata:
  name: $appName-svc-udp
  annotations:
    #metallb.universe.tf/address-pool: default
    metallb.universe.tf/allow-shared-ip: $appName
  labels:
    app: $appName
  managedFields:
  - apiVersion: v1
    fieldsType: FieldsV1
    fieldsV1:
      f:status:
        f:loadBalancer:
          f:ingress: {}
    manager: controller
    operation: Update
  - apiVersion: v1
    fieldsType: FieldsV1
    fieldsV1:
      f:metadata:
        f:labels:
          .: {}
          f:run: {}
      f:spec:
        f:ports:
          .: {}
          k:{"port":53,"protocol":"UDP"}:
            .: {}
            f:port: {}
            f:protocol: {}
            f:targetPort: {}
        f:selector:
          .: {}
          f:run: {}
        f:sessionAffinity: {}
        f:type: {}
    manager: kubectl-expose
    operation: Update
  - apiVersion: v1
    fieldsType: FieldsV1
    fieldsV1:
      f:spec:
        f:externalTrafficPolicy: {}
    manager: kubectl-patch
    operation: Update
  name: $appName-svc-udp
  namespace: default
spec:
  type: LoadBalancer
  loadBalancerIP: $piholeDnsIp
  clusterIP: 10.109.175.203 # this is an arbitrary address for the cluster, a required item
  clusterIPs:
  - 10.109.175.203
  externalTrafficPolicy: Local
  healthCheckNodePort: 32153
  ports:
  - nodePort: 31293
    port: 53
    protocol: UDP
    targetPort: dns-udp
    name: dns-udp
  selector:
    app: $appName
  sessionAffinity: None
status:
  loadBalancer:
    ingress:
    - ip: $piholeDnsIp
EOF
kubectl apply -f $appName-svc-udp.yaml

cat > $appName-svc-tcp.yaml << EOF
apiVersion: v1
kind: Service
metadata:
  name: $appName-svc-tcp
  annotations:
    #metallb.universe.tf/address-pool: default
    metallb.universe.tf/allow-shared-ip: $appName
spec:
  type: LoadBalancer
  loadBalancerIP: $piholeDnsIp
  sessionAffinity: ClientIP # This is necessary for multi-replica deployments
  externalTrafficPolicy: Local
  ports:
    - port: 80
      targetPort: http
      protocol: TCP
      name: http
    - port: 53 
      # Transmission packets over 512 bytes (zone transfers, DNSSEC, and TXT records) would be switched over to TCP as there's no transfer limit with the TCP protocol. Hence, this 53/TCP port is required
      targetPort: dns-tcp
      protocol: TCP
      name: dns-tcp      
  selector:
    app: $appName
EOF
kubectl apply -f $appName-svc-tcp.yaml
# Sample Result:
brucelee@controller:~$ kubectl get service
NAME             TYPE           CLUSTER-IP       EXTERNAL-IP      PORT(S)                     AGE
kubernetes       ClusterIP      10.96.0.1        <none>           443/TCP                     19d
pihole-svc-tcp   LoadBalancer   10.109.87.95     192.168.1.50   
  80:32607/TCP,53:32411/TCP   16h
pihole-svc-udp   LoadBalancer   10.109.175.203   192.168.1.50   
  53:31293/UDP                16h