Step 1: Preparing All Nodes
# Installing prerequisites

# Update before installation
sudo su
yum -y update

# Pre-emptively resolve this problem 
#Error:
# Problem: package docker-ce-3:19.03.11-3.el7.x86_64 requires containerd.io >= 1.2.2-3, but none of the providers can be installed
#  - cannot install the best candidate for the job
#  - package containerd.io-1.2.10-3.2.el7.x86_64 is excluded
#  - package containerd.io-1.2.13-3.1.el7.x86_64 is excluded
#  - package containerd.io-1.2.13-3.2.el7.x86_64 is excluded
#  - package containerd.io-1.2.2-3.3.el7.x86_64 is excluded
#  - package containerd.io-1.2.2-3.el7.x86_64 is excluded
#  - package containerd.io-1.2.4-3.1.el7.x86_64 is excluded
#  - package containerd.io-1.2.5-3.1.el7.x86_64 is excluded
#  - package containerd.io-1.2.6-3.3.el7.x86_64 is excluded
#(try to add '--skip-broken' to skip uninstallable packages or '--nobest' to use not only best candidate packages)
# 
# As Red Hat explicitly excluded Docker CE on CentOS 8 in favor of Podman (Redhat replacement of Docker),
# It is necessary to bypass this void by manually adding Docker's dependency
# Check this URL for the latest version of containerd: https://download.docker.com/linux/centos/7/x86_64/stable/Packages/
containerdFile=https://download.docker.com/linux/centos/7/x86_64/stable/Packages/containerd.io-1.2.13-3.2.el7.x86_64.rpm
dnf install $containerdFile -y

# Install Docker
dnf config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo
dnf install docker-ce docker-ce-cli -y

# Configure the Docker daemon to use systemd drivers
systemctl start docker
cat > /etc/docker/daemon.json <<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ]
}
EOF
systemctl daemon-reload
systemctl restart docker
systemctl enable docker

# Prepare Kubernetes
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-\$basearch
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
yum install -y kubectl kubelet kubeadm    #kubernetes-cni docker-engine
systemctl enable kubelet
systemctl start kubelet
systemctl daemon-reload
systemctl restart kubelet

# Turn off swap
swapoff -a
sed '/^#/! {/swap/ s/^/#/}' -i /etc/fstab

# Extraneous information:
# This error would occur if Docker isn't running
#error execution phase preflight: [preflight] Some fatal errors occurred:
#        [ERROR CRI]: container runtime is not running: output: Client:
# Debug Mode: false
#Server:
#ERROR: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?
#errors pretty printing info
#, error: exit status 1
#        [ERROR Service-Docker]: docker service is not active, please run 'systemctl start docker.service'
#        [ERROR IsDockerSystemdCheck]: cannot execute 'docker info -f {{.CgroupDriver}}': exit status 2
#        [ERROR FileContent--proc-sys-net-bridge-bridge-nf-call-iptables]: /proc/sys/net/bridge/bridge-nf-call-iptables does not exist
#       [ERROR Swap]: running with swap on is not supported. Please disable swap
#        [ERROR SystemVerification]: error verifying Docker info: "Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
# More complaints about firewalld 
#W0607 13:10:49.101317   15446 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
#[init] Using Kubernetes version: v1.18.3
#[preflight] Running pre-flight checks
#        [WARNING Firewalld]: firewalld is active, please ensure ports [6443 10250] are open or your cluster may not function correctly
#        [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/production-environment/container-runtimes/
#error execution phase preflight: [preflight] Some fatal errors occurred:
#        [ERROR Swap]: running with swap on is not supported. Please disable swap
#[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
#To see the stack trace of this error execute with --v=5 or higher

# Open firewall ports
firewall-cmd --permanent --add-port=6443/tcp # Kubernetes API server
firewall-cmd --permanent --add-port=2379-2380/tcp #etcd server client api
firewall-cmd --permanent --add-port=10250/tcp # Kubelet API
firewall-cmd --permanent --add-port=10251/tcp # kube-scheduler
firewall-cmd --permanent --add-port=10252/tcp # kube-controller-manager
firewall-cmd --permanent --add-port=10255/tcp
firewall-cmd --reload

# Disable SELinux by setting it on permissive or disabled mode
# This is the easy button to enable Kubernetes resolve DNS internally.
# The alternative is to manually configure SELinux, which is out scope of this document.
# setenforce 0 # set to permissive or run command shown below to disable it
sed -i --follow-symlinks 's/SELINUX=.*/SELINUX=disabled/g' /etc/sysconfig/selinux
reboot

# OPTIONAL: reverse changes to selinux, as part of an uninstall process
setenforce 1
sed -i 's/^SELINUX=.*/SELINUX=enforcing/' /etc/selinux/config
Step 2: Initialize the Cluster

Part A: Master Node

# Initialize the Master node
# Generate networking variables
defaultInterface=$(route | grep '^default' | grep -o '[^ ]*$')
thisIp=$(ifconfig $defaultInterface | sed -En 's/127.0.0.1//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\2/p')
# Set private network for Kubernetes
k8network='192.168.1.0/24'
# Initialize the master node with the given variables
kubeadm init --apiserver-advertise-address=$thisIp --pod-network-cidr=$k8network
# Sample output of a successful init:
#Your Kubernetes control-plane has initialized successfully!
#To start using your cluster, you need to run the following as a regular user:
#
#  mkdir -p $HOME/.kube
#  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
#  sudo chown $(id -u):$(id -g) $HOME/.kube/config
#
#You should now deploy a pod network to the cluster.
#Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
#  https://kubernetes.io/docs/concepts/cluster-administration/addons/
#
#Then you can join any number of worker nodes by running the following on each as root:
#
#kubeadm join 5.5.5.5:6443 --token nzr0zi.6nio7trfm6uewx4g \
#    --discovery-token-ca-cert-hash sha256:906bc45150c123fb82816ef0572fc04f39a8160dc3e464a1c6f52e7d679d3811

# OPTIONAL: How to reset the previous setup as part of an unistall process
[root@localhost ~]# kubeadm reset
[reset] Reading configuration from the cluster...
[reset] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[reset] WARNING: Changes made to this host by 'kubeadm init' or 'kubeadm join' will be reverted.
[reset] Are you sure you want to proceed? [y/N]: y

# Run Kubadm containers
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

# Check status of pods
[root@mango ~]# kubectl get nodes
NAME                   STATUS     ROLES    AGE   VERSION
chili.kimconnect.com   NotReady   <none>   17m   v1.18.3
mango                  NotReady   master   19m   v1.18.3

# Install Calico network plugin
kubectl apply -f https://calico-v3-25.netlify.app/archive/v3.25/manifests/calico.yaml

# Validate the nodes are now 'ready'
[root@mango ~]# kubectl get nodes
NAME                   STATUS   ROLES    AGE   VERSION
chili.kimconnect.com   Ready    <none>   35m   v1.18.3
mango                  Ready    master   36m   v1.18.3

# Monitor the statuses of all pods in real time
watch kubectl get pods --all-namespaces

Part B: Worker Nodes

# Join the Master node with these commands
sudo firewall-cmd --zone=public --add-port=6443/tcp --permanent
sudo firewall-cmd --zone=public --add-port=10250/tcp --permanent
sudo firewall-cmd --reload
swapoff -a
sed '/^#/! {/swap/ s/^/#/}' -i /etc/fstab
masternodeIp=5.5.5.5
token=mcy7ou.cocacola
hash=sha256:hahahihihuhu
kubeadm join $masternodeIp:6443 --token $token --discovery-token-ca-cert-hash $hash

Part C: Validation

# Show that all nodes are operational
[root@mango ~]# kubectl get nodes -o wide
NAME                   STATUS   ROLES    AGE   VERSION   INTERNAL-IP     EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION                CONTAINER-RUNTIME
chili.kimconnect.com   Ready    <none>   63m   v1.18.3   500.500.500.5   <none>        CentOS Linux 8 (Core)   4.18.0-147.8.1.el8_1.x86_64   docker://19.3.11
mango                  Ready    master   64m   v1.18.3   500.500.500.6   <none>        CentOS Linux 8 (Core)   4.18.0-147.8.1.el8_1.x86_64   docker://19.3.11

[root@mango ~]# watch kubectl get pods --all-namespaces
Every 2.0s: kubectl get pods --all-namespaces                              web02.kimconnect.com: Sun Jun  7 16:56:51 2020

NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-76d4774d89-dhk7r   1/1     Running   0          37m
kube-system   calico-node-qpkz4                          0/1     Running   0          37m
kube-system   calico-node-xh58j                          0/1     Running   0          37m
kube-system   coredns-66bff467f8-vvqf9                   1/1     Running   0          71m
kube-system   coredns-66bff467f8-wnrlt                   1/1     Running   0          71m
kube-system   etcd-web02                                 1/1     Running   0          72m
kube-system   kube-apiserver-web02                       1/1     Running   0          72m
kube-system   kube-controller-manager-web02              1/1     Running   0          72m
kube-system   kube-proxy-2kkcc                           1/1     Running   0          71m
kube-system   kube-proxy-s9858                           1/1     Running   0          71m
kube-system   kube-scheduler-web02                       1/1     Running   0          72m

# Press Ctrl+C to exit

Extraneous:

# Find the yaml file of kubectl
set | findstr /i kube

# get all pods
kubectl get all

# set alias to kubectl
kPath=$(which kubectl)
alias k=$kPath

cat <<EOF > ~/ubuntu-xrdp.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: ubuntu-xrdp
  namespace: default
spec:
  replicas: 1
  selector:
    matchLabels:
      bb: ubuntu-xrdp
  template:
    metadata:
      labels:
        bb: ubuntu-xrdp
    spec:
      containers:
      - name: ubuntu-xrdp
        image: danielguerra/ubuntu-xrdp
---
apiVersion: v1
kind: Service
metadata:
  name: ubuntu-xrdp-entrypoint
  namespace: default
spec:
  type: NodePort
  selector:
    bb: ubuntu-xrdp
  ports:
  - port: 3389
    targetPort: 3389
    nodePort: 30000
EOF				

# apply
kubectl apply -f ./ubuntu-xrdp.yaml

# Expose the pod to the external world as a service
kubectl expose deployment tomcat-deployment --type=NodePort				

# Find the service
minikube service tomcat-deployment --url

# Check the service
curl https://127.0.0.1:portnumber

Password-less Authentication
# Generate key
ssh-keygen 
# Copy public key to other nodes
cat ~/.ssh/id_rsa.pub
# paste key on remote machines
vim ~/.ssh/authorized_keys
# validate
ssh $masternode

# Set hosts files of all machines to bypass DNS
sudo vim /etc/hosts
# input ips of all nodes in the cluster