Step 1: prepare to configure RAID by checking the system
In the below example, we’re using a test Linux computer that is running Ubuntu 20.04. It has 4 x Samsung SSD hard drives models 860_EVO & 870_EVO. The intention of this exercise is to setup a RAID-10 (a pair of stripped mirrors). The output shows that sda & sdc are of the same model, while sdb & sdg are of the other model. For avoid ‘lowest common denominator’ issues, we’re ensuring that each set of stripe shall consist of similar performance drives.
# Verify whether system has the software raid package installed
kimconnect@devlinux02:~$ apt list -a mdadm
Listing... Done
mdadm/hirsute,now 4.1-10ubuntu3 amd64 [installed,automatic]
# Check the partitions
root@devlinux02:/home/kimconnect# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
loop0 7:0 0 68.8M 1 loop /snap/lxd/20037
loop1 7:1 0 55.4M 1 loop /snap/core18/1997
loop2 7:2 0 32.3M 1 loop /snap/snapd/11588
sda 8:0 0 931.5G 0 disk
└─sda1 8:1 0 931.5G 0 part
sdb 8:16 0 931.5G 0 disk
└─sdb1 8:17 0 931.5G 0 part
sdc 8:32 0 931.5G 0 disk
└─sdc1 8:33 0 931.5G 0 part
sdd 8:48 1 29.8G 0 disk
├─sdd1 8:49 1 512M 0 part /boot/efi
├─sdd2 8:50 1 1G 0 part /boot
└─sdd3 8:51 1 28.3G 0 part
└─ubuntu--vg-ubuntu--lv 253:0 0 28.3G 0 lvm /
sdg 8:96 0 931.5G 0 disk
└─sdg1 8:97 0 931.5G 0 part
# Verify disk specs
ls -lF /dev/disk/by-id/
root@devlinux02:/home/kimconnect# ls -lF /dev/disk/by-id/|grep Samsung
lrwxrwxrwx 1 root root 9 Aug 13 00:38 ata-Samsung_SSD_860_EVO_1TB_S3Z8NB0KB46308J -> ../../sdb
lrwxrwxrwx 1 root root 10 Aug 13 00:46 ata-Samsung_SSD_860_EVO_1TB_S3Z8NB0KB46308J-part1 -> ../../sdb1
lrwxrwxrwx 1 root root 9 Aug 13 00:10 ata-Samsung_SSD_860_EVO_1TB_S5B3NDFN912396N -> ../../sdg
lrwxrwxrwx 1 root root 10 Aug 13 00:46 ata-Samsung_SSD_860_EVO_1TB_S5B3NDFN912396N-part1 -> ../../sdg1
lrwxrwxrwx 1 root root 9 Aug 13 00:10 ata-Samsung_SSD_870_QVO_1TB_S5VSNG0NA05357H -> ../../sda
lrwxrwxrwx 1 root root 10 Aug 13 00:46 ata-Samsung_SSD_870_QVO_1TB_S5VSNG0NA05357H-part1 -> ../../sda1
lrwxrwxrwx 1 root root 9 Aug 13 00:38 ata-Samsung_SSD_870_QVO_1TB_S5VSNJ0NC00894D -> ../../sdc
lrwxrwxrwx 1 root root 10 Aug 13 00:46 ata-Samsung_SSD_870_QVO_1TB_S5VSNJ0NC00894D-part1 -> ../../sdc1
lrwxrwxrwx 1 root root 9 Aug 13 00:38 scsi-0ATA_Samsung_SSD_860_S3Z8NB0KB46308J -> ../../sdb
lrwxrwxrwx 1 root root 10 Aug 13 00:46 scsi-0ATA_Samsung_SSD_860_S3Z8NB0KB46308J-part1 -> ../../sdb1
lrwxrwxrwx 1 root root 9 Aug 13 00:10 scsi-0ATA_Samsung_SSD_860_S5B3NDFN912396N -> ../../sdg
lrwxrwxrwx 1 root root 10 Aug 13 00:46 scsi-0ATA_Samsung_SSD_860_S5B3NDFN912396N-part1 -> ../../sdg1
lrwxrwxrwx 1 root root 9 Aug 13 00:10 scsi-0ATA_Samsung_SSD_870_S5VSNG0NA05357H -> ../../sda
lrwxrwxrwx 1 root root 10 Aug 13 00:46 scsi-0ATA_Samsung_SSD_870_S5VSNG0NA05357H-part1 -> ../../sda1
lrwxrwxrwx 1 root root 9 Aug 13 00:38 scsi-0ATA_Samsung_SSD_870_S5VSNJ0NC00894D -> ../../sdc
lrwxrwxrwx 1 root root 10 Aug 13 00:46 scsi-0ATA_Samsung_SSD_870_S5VSNJ0NC00894D-part1 -> ../../sdc1
lrwxrwxrwx 1 root root 9 Aug 13 00:38 scsi-1ATA_Samsung_SSD_860_EVO_1TB_S3Z8NB0KB46308J -> ../../sdb
lrwxrwxrwx 1 root root 10 Aug 13 00:46 scsi-1ATA_Samsung_SSD_860_EVO_1TB_S3Z8NB0KB46308J-part1 -> ../../sdb1
lrwxrwxrwx 1 root root 9 Aug 13 00:10 scsi-1ATA_Samsung_SSD_860_EVO_1TB_S5B3NDFN912396N -> ../../sdg
lrwxrwxrwx 1 root root 10 Aug 13 00:46 scsi-1ATA_Samsung_SSD_860_EVO_1TB_S5B3NDFN912396N-part1 -> ../../sdg1
lrwxrwxrwx 1 root root 9 Aug 13 00:10 scsi-1ATA_Samsung_SSD_870_QVO_1TB_S5VSNG0NA05357H -> ../../sda
lrwxrwxrwx 1 root root 10 Aug 13 00:46 scsi-1ATA_Samsung_SSD_870_QVO_1TB_S5VSNG0NA05357H-part1 -> ../../sda1
lrwxrwxrwx 1 root root 9 Aug 13 00:38 scsi-1ATA_Samsung_SSD_870_QVO_1TB_S5VSNJ0NC00894D -> ../../sdc
lrwxrwxrwx 1 root root 10 Aug 13 00:46 scsi-1ATA_Samsung_SSD_870_QVO_1TB_S5VSNJ0NC00894D-part1 -> ../../sdc1
lrwxrwxrwx 1 root root 9 Aug 13 00:38 scsi-SATA_Samsung_SSD_860_S3Z8NB0KB46308J -> ../../sdb
lrwxrwxrwx 1 root root 10 Aug 13 00:46 scsi-SATA_Samsung_SSD_860_S3Z8NB0KB46308J-part1 -> ../../sdb1
lrwxrwxrwx 1 root root 9 Aug 13 00:10 scsi-SATA_Samsung_SSD_860_S5B3NDFN912396N -> ../../sdg
lrwxrwxrwx 1 root root 10 Aug 13 00:46 scsi-SATA_Samsung_SSD_860_S5B3NDFN912396N-part1 -> ../../sdg1
lrwxrwxrwx 1 root root 9 Aug 13 00:10 scsi-SATA_Samsung_SSD_870_S5VSNG0NA05357H -> ../../sda
lrwxrwxrwx 1 root root 10 Aug 13 00:46 scsi-SATA_Samsung_SSD_870_S5VSNG0NA05357H-part1 -> ../../sda1
lrwxrwxrwx 1 root root 9 Aug 13 00:38 scsi-SATA_Samsung_SSD_870_S5VSNJ0NC00894D -> ../../sdc
lrwxrwxrwx 1 root root 10 Aug 13 00:46 scsi-SATA_Samsung_SSD_870_S5VSNJ0NC00894D-part1 -> ../../sdc1
Step 2: Setup RAID
# Initialize disks as Raid capable
diskArray=(a b c g)
partitionType=msdos
fileSystemType=ext4
for item in "${diskArray[@]}"
do
device=/dev/sd$item
if grep -q $device /proc/mounts;
then
echo "Disk $device is currently mounted. Skipping it..."
else
echo "configuring $device"
parted -a optimal -s $device mklabel $partitionType
parted -a optimal -s $device mkpart primary $fileSystemType 0% 100%
parted -a optimal -s $device set 1 raid on
parted -a optimal -s $device print # display partition table
fi
done
# Create RAID
disksCount=4
diskList=[abcg]
raidMount=/dev/md0
raidLevel=10 # options are: linear, raid0, 0, stripe, raid1, 1, mirror, raid4, 4, raid5, 5, raid6, 6, raid10, 10, multipath
# Warning: this command will destroy data on disks that have previously been members of other RAID arrays
# yes | mdadm --create $raidMount --level=$raidLevel --raid-devices=$disksCount /dev/sd$diskList\1
mdadm --create $raidMount --level=$raidLevel --raid-devices=$disksCount /dev/sd$diskList\1
Optional: Troubleshooting
# How to remove disks from RAID array
raidMount=/dev/md0
diskArray=(a b c g)
for item in "${diskArray[@]}"
do
device=/dev/sd$item
mdadm $raidMount -r $device
done
# How to stop the RAID array (as a prerequisite to rebuilding or re-configuring)
raidMount=/dev/md0
mdadm -S $raidMount
root@devlinux02:/home/kimconnect# mdadm -S /dev/md0
mdadm: stopped /dev/md0
# Verify the new RAID array
root@devlinux02:/home/kimconnect# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
loop0 7:0 0 55.4M 1 loop /snap/core18/1997
loop1 7:1 0 61.8M 1 loop /snap/core20/1081
loop2 7:2 0 55.4M 1 loop /snap/core18/2128
loop3 7:3 0 68.3M 1 loop /snap/lxd/21260
loop4 7:4 0 68.8M 1 loop /snap/lxd/20037
loop5 7:5 0 32.3M 1 loop /snap/snapd/12704
loop6 7:6 0 32.3M 1 loop /snap/snapd/11588
sda 8:0 0 931.5G 0 disk
└─sda1 8:1 0 931.5G 0 part
└─md0 9:0 0 1.8T 0 raid10
sdb 8:16 0 931.5G 0 disk
└─sdb1 8:17 0 931.5G 0 part
└─md0 9:0 0 1.8T 0 raid10
sdc 8:32 0 931.5G 0 disk
└─sdc1 8:33 0 931.5G 0 part
└─md0 9:0 0 1.8T 0 raid10
sdd 8:48 1 29.8G 0 disk
├─sdd1 8:49 1 512M 0 part /boot/efi
├─sdd2 8:50 1 1G 0 part /boot
└─sdd3 8:51 1 28.3G 0 part
└─ubuntu--vg-ubuntu--lv 253:0 0 28.3G 0 lvm /
sdg 8:96 0 931.5G 0 disk
└─sdg1 8:97 0 931.5G 0 part
└─md0 9:0 0 1.8T 0 raid10
# Show RAID status
root@devlinux02:/home/kimconnect# mdadm --detail /dev/md0
/dev/md0:
Version : 1.2
Creation Time : Fri Aug 13 00:46:13 2021
Raid Level : raid10
Array Size : 1953257472 (1862.77 GiB 2000.14 GB)
Used Dev Size : 976628736 (931.39 GiB 1000.07 GB)
Raid Devices : 4
Total Devices : 4
Persistence : Superblock is persistent
Intent Bitmap : Internal
Update Time : Fri Aug 13 01:05:18 2021
State : clean, resyncing
Active Devices : 4
Working Devices : 4
Failed Devices : 0
Spare Devices : 0
Layout : near=2
Chunk Size : 512K
Consistency Policy : bitmap
Resync Status : 11% complete
Name : devlinux02:0 (local to host devlinux02)
UUID : 3287cfe9:7a213a3f:381214bb:05564dd4
Events : 200
Number Major Minor RaidDevice State
0 8 1 0 active sync set-A /dev/sda1
1 8 17 1 active sync set-B /dev/sdb1
2 8 33 2 active sync set-A /dev/sdc1
3 8 97 3 active sync set-B /dev/sdg1
Step 3: Partition the new RAID Array
# Create a partition (volume) on the RAID array
mkfs.ext4 /dev/md0
root@devlinux02:/home/kimconnect# mkfs.ext4 /dev/md0
mke2fs 1.45.7 (28-Jan-2021)
Discarding device blocks: done
Creating filesystem with 488314368 4k blocks and 122085376 inodes
Filesystem UUID: 29e6572f-a9f1-486a-85a5-874b7bf1ff9d
Superblock backups stored on blocks:
32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632, 2654208,
4096000, 7962624, 11239424, 20480000, 23887872, 71663616, 78675968,
102400000, 214990848
Allocating group tables: done
Writing inode tables: done
Creating journal (262144 blocks): done
Writing superblocks and filesystem accounting information: done
# Mount the volume
mount=/nfs-share
mkdir $mount
raidMount=/dev/md0
mount $raidMount $mount
# Check the mount
df -hT -P $mount
root@devlinux02:/home/kimconnect# df -hT -P $mount
Filesystem Type Size Used Avail Use% Mounted on
/dev/md0 ext4 1.8T 77M 1.7T 1% /mnt/raid10
# check the running Raid config
root@devlinux02:/home/kimconnect# mdadm --detail --scan
ARRAY /dev/md0 metadata=1.2 name=devlinux02:0 UUID=3287cfe9:7a213a3f:381214bb:05564dd4
# Read the contents of persistent RAID array config
cat /etc/mdadm/mdadm.conf
# Append living config into the persistent config file
mdadm --detail --scan >> /etc/mdadm/mdadm.conf
# Update file system initialization
update-initramfs -u
# Search for the new raid mount point to retrieve it's uuid expression
root@devlinux02:~$ ls -la /dev/disk/by-uuid/
total 0
drwxr-xr-x 2 root root 120 Aug 13 03:58 .
drwxr-xr-x 7 root root 140 Aug 13 03:58 ..
lrwxrwxrwx 1 root root 9 Aug 13 03:58 29e6572f-a9f1-486a-85a5-874b7bf1ff9d -> ../../md0
lrwxrwxrwx 1 root root 10 Aug 13 03:58 322fa7a9-da0a-4d01-9b97-1d4878852f07 -> ../../dm-0
lrwxrwxrwx 1 root root 10 Aug 13 03:58 73C5-267E -> ../../sdd1
lrwxrwxrwx 1 root root 10 Aug 13 03:58 e2d8ca2f-df9b-4b6c-af98-d66e3723e459 -> ../../sdd2
# Enable mount to persist on reboots
mount=/nfs-share
fileSystemType=ext4
uuid=29e6572f-a9f1-486a-85a5-874b7bf1ff9d
echo "/dev/disk/by-uuid/$uuid $mount $fileSystemType defaults 0 1" >> /etc/fstab
The above illustration has taken into account the type of file system named ext4, instead of btrfs, xfs, zfs, etc. At the time of this writing, ext4 is the standard for most Ubuntu machines. It does yield faster transfer speed than btrfs with the down side of lacking checksum, snapshots, and other modern file system features. Still, ext4 is know for stability while the other types of fs are considered bleeding edge.
Moreover, the last step of configuring a file system on a RAID array would require a reboot to verify that the newly mounted volume would persist upon restarts.