127 lines
4.3 KiB
Bash
Executable File
127 lines
4.3 KiB
Bash
Executable File
#!/bin/bash
|
|
|
|
################################################################################
|
|
# ZFS_HEALTH_CHECK.SH
|
|
# -------------------
|
|
# This script checks the ZFS health and reports any problems on stdout
|
|
#
|
|
# Inspired by https://gist.github.com/petervanderdoes/bd6660302404ed5b094d
|
|
#
|
|
# Author: Robin Meier - robin@meier.si
|
|
################################################################################
|
|
|
|
script_dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
|
|
|
# Load configuration
|
|
set -o allexport
|
|
source ${script_dir}/config/zfs_health_check
|
|
set +o allexport
|
|
|
|
# Import logging functionality
|
|
logfile=${script_dir}/log/zfs_health_check.log
|
|
log_identifier="ZFS"
|
|
source ${script_dir}/functions/logging.sh
|
|
|
|
problems=0
|
|
|
|
log "Starting ZFS Health Check"
|
|
|
|
# Pool Status
|
|
zpool_status=$(/sbin/zpool status | egrep -i '(DEGRADED|FAULTED|OFFLINE|UNAVAIL|REMOVED|FAIL|DESTROYED|corrupt|cannot|unrecover)')
|
|
if [ "${zpool_status}" ]; then
|
|
log_echo "[ERROR] !!! BAD ZFS HEALTH !!!"
|
|
/sbin/zpool status | ts "[%Y-%m-%d %H:%M:%S] $log_identifier" | tee -a $logfile
|
|
problems=1
|
|
fi
|
|
|
|
# Capacity
|
|
maxCapacity=50
|
|
if [ ${problems} -eq 0 ]; then
|
|
capacity=$(/sbin/zpool list -H -o capacity)
|
|
for line in ${capacity//%/}
|
|
do
|
|
if [ $line -ge $maxCapacity ]; then
|
|
problems=1
|
|
fi
|
|
done
|
|
if [ ${problems} -eq 1 ]; then
|
|
log_echo "[ERROR] !!! BAD ZFS CAPACITY !!!"
|
|
/sbin/zpool list -o name,cap,free | ts "[%Y-%m-%d %H:%M:%S] $log_identifier" | tee -a $logfile
|
|
fi
|
|
fi
|
|
|
|
# Errors - Check the columns for READ, WRITE and CKSUM (checksum) drive errors
|
|
# on all volumes and all drives using "zpool status". If any non-zero errors
|
|
# are reported an email will be sent out. You should then look to replace the
|
|
# faulty drive and run "zpool scrub" on the affected volume after resilvering.
|
|
if [ ${problems} -eq 0 ]; then
|
|
errors=$(/sbin/zpool status | grep ONLINE | grep -v state | awk '{print $3 $4 $5}' | grep -v 000)
|
|
if [ "${errors}" ]; then
|
|
log_echo "[ERROR] !!! ZFS ERRORS FOUND !!!"
|
|
/sbin/zpool status | ts "[%Y-%m-%d %H:%M:%S] $log_identifier" | tee -a $logfile
|
|
log_echo "[ERROR] You shoud replace the faulty drive and run \"zpool scrub\" after resilvering"
|
|
problems=1
|
|
fi
|
|
fi
|
|
|
|
# Scrub Expired - Check if all volumes have been scrubbed in at least the last
|
|
# 8 days. The general guide is to scrub volumes on desktop quality drives once
|
|
# a week and volumes on enterprise class drives once a month. You can always
|
|
# use cron to schedule "zpool scrub" in off hours. We scrub our volumes every
|
|
# Sunday morning for example.
|
|
#
|
|
# Scrubbing traverses all the data in the pool once and verifies all blocks can
|
|
# be read. Scrubbing proceeds as fast as the devices allows, though the
|
|
# priority of any I/O remains below that of normal calls. This operation might
|
|
# negatively impact performance, but the file system will remain usable and
|
|
# responsive while scrubbing occurs. To initiate an explicit scrub, use the
|
|
# "zpool scrub" command.
|
|
#
|
|
# The scrubExpire variable is in seconds. So for 8 days we calculate 8 days
|
|
# times 24 hours times 3600 seconds to equal 691200 seconds.
|
|
# scrubExpire=691200
|
|
scrubExpire=1382400
|
|
if [ ${problems} -eq 0 ]; then
|
|
currentDate=$(date +%s)
|
|
zfsVolumes=$(/sbin/zpool list -H -o name)
|
|
for volume in ${zfsVolumes}
|
|
do
|
|
if [ $(/sbin/zpool status $volume | egrep -c "none requested") -ge 1 ]; then
|
|
log_echo "ERROR: You need to run \"zpool scrub $volume\" before this script can monitor the scrub expiration time."
|
|
break
|
|
fi
|
|
if [ $(/sbin/zpool status $volume | egrep -c "scrub in progress|resilver") -ge 1 ]; then
|
|
break
|
|
fi
|
|
scrubRawDate=$(/sbin/zpool status $volume | grep scrub | awk '{print $11" "$12" " $13" " $14" "$15}')
|
|
scrubDate=$(date -d "$scrubRawDate" +%s)
|
|
if [ $(($currentDate - $scrubDate)) -ge $scrubExpire ]; then
|
|
problems=1
|
|
log_echo "[ERROR] Pool: $volume needs scrub!"
|
|
fi
|
|
done
|
|
fi
|
|
|
|
# Unmounted datasets
|
|
unmounted=$(/sbin/zfs list -o mounted | grep no)
|
|
if [ "${unmounted}" ]; then
|
|
log "[WARN] THERE ARE UNMOUNTED DATASETS"
|
|
fi
|
|
|
|
# Warnable unmounted datasets
|
|
for dataset in $MONITORED_DATASETS
|
|
do
|
|
unmounted=$(/sbin/zfs list $dataset -o mounted | grep no)
|
|
if [ "${unmounted}" ]; then
|
|
log_echo "[MON] Monitored dataset $dataset is not mounted"
|
|
problems=1
|
|
fi
|
|
done
|
|
|
|
# Finish
|
|
if [ ${problems} -eq 0 ]; then
|
|
log "ZFS Health Check Successful"
|
|
else
|
|
log "ZFS Health Check Found Problems"
|
|
fi
|