Step 1: Scan all SCSI host controllers

sudo su # run as root
scsiPath=/sys/class/scsi_host # specify the scsi host path
for host in $(ls $scsiPath); do # loop through all host controllers
    echo $host
    echo "- - -" > $scsiPath/$host/scan # trigger a scan
done
lsblk # list all block devices
# fdisk -l # alternatively, use disk formatting tool to list all disks

# Example output: lsblk
root@linux02:/sys/class/scsi_host# lsblk
NAME                      MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
loop0                       7:0    0 67.8M  1 loop /snap/lxd/18150
loop1                       7:1    0   55M  1 loop /snap/core18/1880
loop2                       7:2    0 55.4M  1 loop /snap/core18/1944
loop3                       7:3    0 31.1M  1 loop /snap/snapd/10707
loop4                       7:4    0 69.8M  1 loop /snap/lxd/19032
loop5                       7:5    0 29.9M  1 loop /snap/snapd/8542
sda                         8:0    0  100G  0 disk 
├─sda1                      8:1    0    1M  0 part 
├─sda2                      8:2    0    1G  0 part /boot
└─sda3                      8:3    0   99G  0 part 
  └─ubuntu--vg-ubuntu--lv 253:0    0 49.5G  0 lvm  /
sdb                         8:16   0  250G  0 disk

# Example output: fdisk -l
root@linux02:/sys/class/scsi_host# fdisk -l
Disk /dev/loop0: 67.77 MiB, 71041024 bytes, 138752 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes

Disk /dev/loop1: 54.98 MiB, 57626624 bytes, 112552 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes

Disk /dev/loop2: 55.39 MiB, 58073088 bytes, 113424 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes

Disk /dev/loop3: 31.9 MiB, 32600064 bytes, 63672 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes

Disk /dev/loop4: 69.78 MiB, 73150464 bytes, 142872 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes

Disk /dev/loop5: 29.9 MiB, 31334400 bytes, 61200 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes

Disk /dev/sda: 100 GiB, 107374182400 bytes, 209715200 sectors
Disk model: Virtual disk    
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disklabel type: gpt
Disk identifier: 0A6734B7-0284-442B-9193-D3E4AA880673

Device       Start       End   Sectors Size Type
/dev/sda1     2048      4095      2048   1M BIOS boot
/dev/sda2     4096   2101247   2097152   1G Linux filesystem
/dev/sda3  2101248 209713151 207611904  99G Linux filesystem

Disk /dev/mapper/ubuntu--vg-ubuntu--lv: 49.51 GiB, 53150220288 bytes, 103809024 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes

Disk /dev/sdb: 250 GiB, 268435456000 bytes, 524288000 sectors
Disk model: Virtual disk    
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes

Step 2: Add New Disk as an Logical Virtual Volume (LVM)

Part 2a – create virtual volume (aka partition) from physical volume (aka device or disk)

# Define new disk
newDisk=/dev/sdb

# Clear the first 512 bytes to clean MBR
dd if=/dev/zero of=$newDisk bs=512 count=1

root@linux02:/sys/class/scsi_host# dd if=/dev/zero of=$newDisk bs=512 count=1
1+0 records in
1+0 records out
512 bytes copied, 0.000172954 s, 3.0 MB/s

# Format physical volume (aka device) as lvm2
pvcreate $newDisk

root@linux02:/sys/class/scsi_host# pvcreate $newDisk
  Physical volume "/dev/sdb" successfully created.

# View existing physical volumes
root@linux02:/sys/class/scsi_host# pvscan -v
  PV /dev/sda3   VG ubuntu-vg       lvm2 [<99.00 GiB / <49.50 GiB free]
  PV /dev/sdb                       lvm2 [250.00 GiB]
  Total: 2 [<349.00 GiB] / in use: 1 [<99.00 GiB] / in no VG: 1 [250.00 GiB]

# Check for existing volume groups
root@linux02:/sys/class/scsi_host# vgscan
  Found volume group "ubuntu-vg" using metadata type lvm2
root@linux02:/sys/class/scsi_host# vgdisplay
  --- Volume group ---
  VG Name               ubuntu-vg
  System ID             
  Format                lvm2
  Metadata Areas        1
  Metadata Sequence No  2
  VG Access             read/write
  VG Status             resizable
  MAX LV                0
  Cur LV                1
  Open LV               1
  Max PV                0
  Cur PV                1
  Act PV                1
  VG Size               <99.00 GiB
  PE Size               4.00 MiB
  Total PE              25343
  Alloc PE / Size       12672 / 49.50 GiB
  Free  PE / Size       12671 / <49.50 GiB
  VG UUID               gOK9jE-s7j6-8YKt-NYb5-GZeC-b0kf-aofXlf

Alternative 1

# Alternative 1a: create a new volume group with the new volume as first member
newDisk=/dev/sdb
newVolumeGroup=data
vgcreate $newVolumeGroup $newDisk

# Alternative 1b: create a new logical volume with all available volume space - no RAID
volumeName=data
newVolumeGroup=data
lvcreate --name $volumeName -l 100%FREE $newVolumeGroup

root@linux02:/sys/class/scsi_host# lvcreate --name $volumeName -l 100%FREE $newVolumeGroup
  Logical volume "data" created.

Alternative 2

# Alternative 2a: add new volume into an existing volume group
newDisk=/dev/sdb
existingVolumeGroup=ubuntu-vg
vgextend $existingVolumeGroup $newDisk

# Alternative 2b: create a RAID 10 logical volume
size=50G
raidType=raid10
mirrors=1
stripes=2
existingVolumeGroup=ubuntu-vg
volumeName=data
lvcreate --type $raidType -m $mirrors -i $stripes -L $size -n $volumeName $existingVolumeGroup

Part 2b – format the new virtual volume

# Format logical volume
volumeGroupName=data
volumeName=data
volumePath=/dev/$volumeGroupName/$volumeName
mkfs.ext4 $volumePath # XFS max size is 500TB (good for big files) vs Ext4 max size of 16TB (faster with small files) - XFS is good for flat files while Ext4 does better with databases

root@linux02:/sys/class/scsi_host# mkfs.ext4 $volumePath
mke2fs 1.45.5 (07-Jan-2020)
Discarding device blocks: done                            
Creating filesystem with 65534976 4k blocks and 16384000 inodes
Filesystem UUID: e771b030-e639-46ca-8341-edd44ea45e32
Superblock backups stored on blocks: 
	32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632, 2654208, 
	4096000, 7962624, 11239424, 20480000, 23887872
Allocating group tables: done                            
Writing inode tables: done
Creating journal (262144 blocks): done
Writing superblocks and filesystem accounting information: done 

Part 2c – mount the new virtual volume

# Mount the LV into /data directory
volumeGroupName=data
volumeName=data
volumePath=/dev/$volumeGroupName/$volumeName
mkdir /$volumeName # create the directory prior to mounting
mount $volumePath /$volumeName/ # the trailing ‘/’ is important as it denotes a directory - not a file

# Possible error if mounting toward a directory the doesn't yet exist
root@linux02:/sys/class/scsi_host# mount $volumePath /$volumeName/
mount: /data/: mount point does not exist.

# Verify results of the new volume mount
root@linux02:/sys/class/scsi_host# df /$volumeName -h
Filesystem             Size  Used Avail Use% Mounted on
/dev/mapper/data-data  246G   61M  233G   1% /data

How To Undo the Test Volume Group

Case 1: Retain Data and Volume Group While Removing Only One Disk

volumeGroupToEdit=data
deviceToRemove=/dev/sdb
sudo pvmove $deviceToRemove # redistribute old extents (data) from a device to other disks
sudo vgreduce $volumeGroupToEdit $deviceToRemove # delete the device
sudo pvremove $deviceToRemove # clear disk labels

Case 2: Remove Volume Group and Disk – DESTROY data!

# Remove mount
mountPoint=/data
sudo umount $mountPoint
sudo rmdir mountPoint

# Check Volume Groups
kim@linux03:~$ sudo vgscan
  Found volume group "data" using metadata type lvm2
  Found volume group "ubuntu-vg" using metadata type lvm2

# Remove Volume Group
volumeGroupToRemove=data
sudo vgremove $volumeGroupToRemove
# Sample output
kim@linux03:~$ sudo vgremove $volumeGroupToRemove
Do you really want to remove volume group "data" containing 1 logical volumes? [y/n]: y
Do you really want to remove and DISCARD active logical volume data/data? [y/n]: y
  Logical volume "data" successfully removed
  Volume group "data" successfully removed

# Check LVM disks
kim@linux03:~$ sudo pvscan -v
  PV /dev/sda3   VG ubuntu-vg       lvm2 [<99.00 GiB / <49.50 GiB free]
  PV /dev/sdb                       lvm2 [250.00 GiB]
  Total: 2 [<349.00 GiB] / in use: 1 [<99.00 GiB] / in no VG: 1 [250.00 GiB]

# Remove a Device Partition from Volume Group - this step is unnecessary as the logical volume has already been removed in the previous step
volumeGroupToEdit=data
deviceToRemove=/dev/sdb
sudo pvmove $deviceToRemove # redistribute old extents (data) from a device to other disks
sudo vgreduce $volumeGroupToEdit $deviceToRemove # delete the device
sudo pvremove $deviceToRemove # clear disk labels

# Output if there's no data to redistribute
# Note: if there's data, it will take time for the command to complete processing
kim@linux03:~$ sudo pvmove $deviceToRemove
[sudo] password for kim: 
  No extents available for allocation.

kim@linux03:~$ sudo vgreduce $volumeGroupToEdit $deviceToRemove
  Volume group "data" not found
  Cannot process volume group data
  Failed to find physical volume "/dev/sdb".

kim@linux03:~$ sudo pvremove $deviceToRemove
  Labels on physical volume "/dev/sdb" successfully wiped.