From cc81a4f1a8387e6991eb447383cae05220a65d15 Mon Sep 17 00:00:00 2001 From: "Alan J. Pippin" Date: Thu, 11 Oct 2012 21:26:25 -0600 Subject: [PATCH] Major changes from nexenta versions to support running under unbuntu. Added ability to replicate from local pool to local backup pool. Added new option to all import/export of backup pool prior/after replication. --- zfs-autosnap | 2 +- zfs-replicate | 107 ++++++++++++++++++++++-------------------- zfs-replicate-all | 39 +++++++++++---- zfs-restore | 20 +++++--- zfs-restore-all | 9 ++-- zfs-rm-all-snaps | 34 ++++++++++++++ zfs-scripts.conf.dist | 36 ++++++++------ zfs-scrub | 6 +-- 8 files changed, 164 insertions(+), 89 deletions(-) create mode 100755 zfs-rm-all-snaps diff --git a/zfs-autosnap b/zfs-autosnap index da650f5..09e1a62 100755 --- a/zfs-autosnap +++ b/zfs-autosnap @@ -79,7 +79,7 @@ fi # Check to see if this zfs filesystem has a scrub being performed on it now. # If it does, we cannot perform any snapshot create or destroy operations. if [ -z "$SNAP_UNDER_TEST" ]; then - zpool status $pool | grep scrub: | grep "in progress" > /dev/null 2>&1 + zpool status $pool | grep scan: | grep "in progress" > /dev/null 2>&1 if [ $? == 0 ]; then echo "-W- The zfs pool '$pool' is currently being scrubbed. Skipping all snapshot operations." exit 0 diff --git a/zfs-replicate b/zfs-replicate index 8b53d1f..174d6dd 100755 --- a/zfs-replicate +++ b/zfs-replicate @@ -27,26 +27,24 @@ cleanup() { if [[ -e "$remote_list" ]]; then rm -f $remote_list fi - if [[ -n "$remote" ]]; then - ssh $remote ls -d "$lockdir" > /dev/null 2>&1 - if [[ $? == 0 ]]; then - ssh $remote rm -rf "$lockdir" - fi + $ssh $remote ls -d "$lockdir" > /dev/null 2>&1 + if [[ $? == 0 ]]; then + $ssh $remote rm -rf "$lockdir" fi } fatal_and_exit() { echo -e 2>&1 "$1" # Destroy the backup markers on the local filesystem if they exist if [[ -n "$current_backup_marker" ]]; then - zfs list -t snapshot ${local_pool}/${current_backup_marker} > /dev/null 2>&1 + zfs list -t snapshot ${backup_pool}/${current_backup_marker} > /dev/null 2>&1 if [ $? == 0 ]; then - $zfs destroy ${local_pool}/${current_backup_marker} + $zfs destroy ${backup_pool}/${current_backup_marker} fi fi if [[ -n "$previous_backup_marker" ]]; then - zfs list -t snapshot ${local_pool}/${previous_backup_marker} > /dev/null 2>&1 + zfs list -t snapshot ${backup_pool}/${previous_backup_marker} > /dev/null 2>&1 if [ $? == 0 ]; then - $zfs destroy ${local_pool}/${previous_backup_marker} + $zfs destroy ${backup_pool}/${previous_backup_marker} fi fi # send email notification @@ -64,26 +62,32 @@ if [[ -z "$remote" ]] || [[ -z "$remote_fs" ]]; then fatal_and_exit "Usage: $0 " fi -# Make sure the local pool and local receiving filesystem exist, or print some errors -zpool list -H "$local_pool" >/dev/null 2>&1 +# check for localhost +if [[ $remote = "localhost" ]]; then + remote="" + ssh="" +fi + +# Make sure the local backup pool and local receiving filesystem exist, or print some errors +zpool list -H "$backup_pool" >/dev/null 2>&1 if [ $? != 0 ]; then - fatal_and_exit "-E- The local pool, '$local_pool' doesn't seem to exist." $mailto + fatal_and_exit "-E- The local backup pool, '$backup_pool' doesn't seem to exist." $mailto fi -zfs list "$local_pool/$remote_pool" >/dev/null 2>&1 +zfs list "$backup_pool/$remote_pool" >/dev/null 2>&1 if [ $? != 0 ]; then - echo >&2 "-I- The local filesystem for the remote pool, '$local_pool/$remote_pool' doesn't seem to exist." - echo >&2 " Creating the local filesystem to receive the remote pool into: $local_pool/$remote_pool" - $zfs create $local_pool/$remote_pool + echo >&2 "-I- The local filesystem for the remote pool, '$backup_pool/$remote_pool' doesn't seem to exist." + echo >&2 " Creating the local filesystem to receive the remote pool into: $backup_pool/$remote_pool" + $zfs create $backup_pool/$remote_pool if [ $? != 0 ]; then fatal_and_exit "-E- remote $zfs create command failed" $mailto fi fi -# Obtain the zpool guid for the local pool -local_pool_guid=`zpool get guid $local_pool 2>&1 | grep $local_pool | awk '{ print $3 }'` -zpool get guid $local_pool > /dev/null 2>&1 +# Obtain the zpool guid for the local backup pool +backup_pool_guid=`zpool get guid $backup_pool 2>&1 | grep $backup_pool | awk '{ print $3 }'` +zpool get guid $backup_pool > /dev/null 2>&1 if [ $? != 0 ]; then - fatal_and_exit "-E- Unable to extract the guid for the local pool: $local_pool" $mailto + fatal_and_exit "-E- Unable to extract the guid for the local backup pool: $backup_pool" $mailto fi # Turn on shell verbosity @@ -95,7 +99,7 @@ maxsleeptime=60 maxattempts=500 attempts=0 while true; do - ssh $remote mkdir "$lockdir" >/dev/null 2>&1 + $ssh $remote mkdir "$lockdir" >/dev/null 2>&1 if [ $? != 0 ]; then # Another zfs admin tool is running. # Wait a random amount of time and try again @@ -109,18 +113,18 @@ while true; do if [[ $attempts -gt $maxattempts ]]; then # We've exceeded our maximum while loop count echo "-E- The zfs filesystem has been locked down. Skipping replicate operation." - fail_msg=`ssh $remote ls -ld $lockdir 2>&1` + fail_msg=`$ssh $remote ls -ld $lockdir 2>&1` fatal_and_exit "zfs-replicate-all unable to obtain zfs admin lock:\n$fail_msg" $mailto fi done # Setup our backup marker names -current_backup_marker=${remote_fs}@current-backup-${local_pool_guid} -previous_backup_marker=${remote_fs}@previous-backup-${local_pool_guid} +current_backup_marker=${remote_fs}@current-backup-${backup_pool_guid} +previous_backup_marker=${remote_fs}@previous-backup-${backup_pool_guid} # List the snapshots on the remote machine. remote_list=$(mktemp /tmp/replicate.XXXXXX) -ssh $remote \ +$ssh $remote \ $zfs list -H -t snapshot | grep ^${remote_fs}@ | awk '{print$1}' > $remote_list @@ -133,9 +137,9 @@ fi # If you do, it can mess up the common finding algorithm below. local_list=$(mktemp /tmp/replicate.XXXXXX) $zfs list -H -t snapshot | - grep ^${local_pool}/${remote_fs}@ | - grep -v ^${local_pool}/${current_backup_marker} | - awk "{gsub(/^$local_pool./,\"\",\$1); print\$1}" > $local_list + grep ^${backup_pool}/${remote_fs}@ | + grep -v ^${backup_pool}/${current_backup_marker} | + awk "{gsub(/^$backup_pool./,\"\",\$1); print\$1}" > $local_list if [ $? != 0 ]; then fatal_and_exit "-E- local $zfs list command failed" $mailto fi @@ -143,14 +147,14 @@ fi # Destroy the current backup marker snapshot on the remote system if it exists grep -q ${current_backup_marker} $remote_list if [ $? == 0 ]; then - ssh $remote $zfs destroy ${current_backup_marker} + $ssh $remote $zfs destroy ${current_backup_marker} if [ $? != 0 ]; then fatal_and_exit "-E- remote $zfs destroy command failed" $mailto fi fi # Create the current backup marker snapshot on the remote system -ssh $remote $zfs snapshot ${current_backup_marker} +$ssh $remote $zfs snapshot ${current_backup_marker} if [ $? != 0 ]; then fatal_and_exit "-E- remote $zfs snapshot command failed" $mailto fi @@ -168,21 +172,21 @@ no_markers=$(($no_markers || $?)) if [ $no_markers == 0 ]; then # We found backup markers, incrementally send the new snaps - # First, rollback the local pool to the previous backup marker in case the previous + # First, rollback the local backup pool to the previous backup marker in case the previous # backup was interrupted for some reason. If we don't do this, the zfs send -R command # below may complain about snaps already existing as it tries to resend from the # previous backup marker again from a previously interrupted replicate. - $zfs rollback -r ${local_pool}/${previous_backup_marker} + $zfs rollback -r ${backup_pool}/${previous_backup_marker} if [ $? != 0 ]; then fatal_and_exit "-E- remote incremental $zfs rollback command failed" $mailto fi # Now it should be safe to send the snaps if [[ $throttle_enable == 1 && -e $throttle ]]; then - ssh $remote $zfs send -R -I${previous_backup_marker} ${current_backup_marker} | - $throttle $throttle_opt | $zfs receive -vF -d ${local_pool}/${remote_pool} + $ssh $remote $zfs send -R -I${previous_backup_marker} ${current_backup_marker} | + $throttle $throttle_opt | $zfs receive -vF -d ${backup_pool}/${remote_pool} else - ssh $remote $zfs send -R -I${previous_backup_marker} ${current_backup_marker} | - $zfs receive -vF -d ${local_pool}/${remote_pool} + $ssh $remote $zfs send -R -I${previous_backup_marker} ${current_backup_marker} | + $zfs receive -vF -d ${backup_pool}/${remote_pool} fi if [ $? != 0 ]; then fatal_and_exit "-E- remote incremental $zfs send command failed" $mailto @@ -203,11 +207,11 @@ else if [[ -n "$common" ]]; then # We found a common snapshot, incrementally send the new snaps if [[ $throttle_enable == 1 && -e $throttle ]]; then - ssh $remote $zfs send -R -I${common/*@/@} ${current_backup_marker} | - $throttle $throttle_opt | $zfs receive -vF -d ${local_pool}/${remote_pool} + $ssh $remote $zfs send -R -I${common/*@/@} ${current_backup_marker} | + $throttle $throttle_opt | $zfs receive -vF -d ${backup_pool}/${remote_pool} else - ssh $remote $zfs send -R -I${common/*@/@} ${current_backup_marker} | - $zfs receive -vF -d ${local_pool}/${remote_pool} + $ssh $remote $zfs send -R -I${common/*@/@} ${current_backup_marker} | + $zfs receive -vF -d ${backup_pool}/${remote_pool} fi if [ $? != 0 ]; then fatal_and_exit "-E- remote incremental $zfs send command failed" $mailto @@ -216,26 +220,26 @@ else # We did not find any markers or a common snapshot # At this point, we'll have to send the entire filesystem # Destroy the local filesystem if it exists before receving the full replicate - zfs list ${local_pool}/${remote_fs} > /dev/null 2>&1 + zfs list ${backup_pool}/${remote_fs} > /dev/null 2>&1 if [ $? == 0 ]; then if [[ $destroy_local_filesystem_on_full_replicate == 1 ]]; then - $zfs destroy -r ${local_pool}/${remote_fs} + $zfs destroy -r ${backup_pool}/${remote_fs} if [ $? != 0 ]; then fatal_and_exit "-E- remote full $zfs destroy command failed" $mailto fi else echo "-W- We need to destroy a local filesystem before receiving a full stream." echo " However, since the option is set to prevent this, skipping replicate operation." - fatal_and_exit "unable to destroy local filesystem:\n$zfs destroy -r ${local_pool}/${remote_fs} not able to run" $mailto + fatal_and_exit "unable to destroy local filesystem:\n$zfs destroy -r ${backup_pool}/${remote_fs} not able to run" $mailto fi fi # Send the full filesystem if [[ $throttle_enable == 1 && -e $throttle ]]; then - ssh $remote $zfs send -R ${current_backup_marker} | - $throttle $throttle_opt | $zfs receive -vF -d ${local_pool}/${remote_pool} + $ssh $remote $zfs send -R ${current_backup_marker} | + $throttle $throttle_opt | $zfs receive -vF -d ${backup_pool}/${remote_pool} else - ssh $remote $zfs send -R ${current_backup_marker} | - $zfs receive -vF -d ${local_pool}/${remote_pool} + $ssh $remote $zfs send -R ${current_backup_marker} | + $zfs receive -vF -d ${backup_pool}/${remote_pool} fi if [ $? != 0 ]; then fatal_and_exit "-E- remote full $zfs send command failed" $mailto @@ -245,16 +249,15 @@ fi # destroy the previous backup markers now that we've replicated past them # don't check the return codes here because these may not exist, and that is ok -$zfs destroy ${local_pool}/${previous_backup_marker} > /dev/null 2>&1 -ssh $remote $zfs destroy ${previous_backup_marker} > /dev/null 2>&1 +$zfs destroy ${backup_pool}/${previous_backup_marker} > /dev/null 2>&1 +$ssh $remote $zfs destroy ${previous_backup_marker} > /dev/null 2>&1 # Rename the current backup marker to be the previous backup marker -$zfs rename ${local_pool}/${current_backup_marker} ${local_pool}/${previous_backup_marker} +$zfs rename ${backup_pool}/${current_backup_marker} ${backup_pool}/${previous_backup_marker} if [ $? != 0 ]; then fatal_and_exit "-E- local $zfs rename command failed" $mailto fi -ssh $remote $zfs rename ${current_backup_marker} ${previous_backup_marker} +$ssh $remote $zfs rename ${current_backup_marker} ${previous_backup_marker} if [ $? != 0 ]; then fatal_and_exit "-E- remote $zfs rename command failed" $mailto fi - diff --git a/zfs-replicate-all b/zfs-replicate-all index 6f105d6..c47b86f 100755 --- a/zfs-replicate-all +++ b/zfs-replicate-all @@ -61,7 +61,7 @@ replicate() { # This function obtains the date a given snapshot was created in epoch seconds snapshot_age() { - snapshot=${local_pool}/${1}${previous_backup_marker} + snapshot=${backup_pool}/${1}${previous_backup_marker} $zfs list -t snapshot ${snapshot} > /dev/null 2>&1 if [ $? == 0 ]; then $zfs get creation ${snapshot} > /dev/null 2>&1 @@ -77,17 +77,32 @@ snapshot_age() { fi } -# Obtain the zpool guid for the local pool -local_pool_guid=`zpool get guid $local_pool 2>&1 | grep $local_pool | awk '{ print $3 }'` -zpool get guid $local_pool > /dev/null 2>&1 +# Mount the local backup pool if needed and the option is given to do so, else error out +zpool list -H "$backup_pool" >/dev/null 2>&1 if [ $? != 0 ]; then - echo >&2 "-E- Unable to extract the guid for the local pool: $local_pool" + if [[ $import_export_backup_pool == 1 ]]; then + zpool import $backup_pool + if [ $? != 0 ]; then + echo "-E- unable to import the backup pool $backup_pool" + exit 1 + fi + else + echo "-E- The local backup pool, '$backup_pool' doesn't seem to exist." $mailto + exit 1 + fi +fi + +# Obtain the zpool guid for the local backup pool +backup_pool_guid=`zpool get guid $backup_pool 2>&1 | grep $backup_pool | awk '{ print $3 }'` +zpool get guid $backup_pool > /dev/null 2>&1 +if [ $? != 0 ]; then + echo >&2 "-E- Unable to extract the guid for the local backup pool: $backup_pool" exit 1 fi # Setup our backup marker names -current_backup_marker=@current-backup-${local_pool_guid} -previous_backup_marker=@previous-backup-${local_pool_guid} +current_backup_marker=@current-backup-${backup_pool_guid} +previous_backup_marker=@previous-backup-${backup_pool_guid} # Auto snapshot every zfs filesystem on the system specified below echo "$date ZFS replicate started" >> $logfile @@ -104,10 +119,18 @@ rm -f $tmpfile # Replicate the sorted filesystems for filesystem in $sorted_filesystems; do - echo "-> Replicating $remote:$filesystem to ${local_pool}/${filesystem}" | tee -a $mylogfile + echo "-> Replicating $remote:$filesystem to ${backup_pool}/${filesystem}" | tee -a $mylogfile replicate $remote $filesystem done +# Export the local pool if told to do so +if [[ $import_export_backup_pool == 1 ]]; then + zpool export $backup_pool + if [ $? != 0 ]; then + fatal_and_exit "-E- unable to export the local backup pool $backup_pool" + fi +fi + # All done echo `date` ZFS replicate complete >> $logfile echo `date` ZFS replicate complete | tee -a $mylogfile diff --git a/zfs-restore b/zfs-restore index b3c809a..84de61e 100755 --- a/zfs-restore +++ b/zfs-restore @@ -34,6 +34,12 @@ fi date=`date` echo "$date ZFS restore started: $src_pool/$src_fs -> $dst_hostname:$dst_pool/$dst_fs" +# check for localhost +if [[ $dst_hostname = "localhost" ]]; then + dst_hostname="" + ssh="" +fi + # Make sure the src pool and src filesystem exist, or print some errors zpool list -H "$src_pool" >/dev/null 2>&1 if [ $? != 0 ]; then @@ -61,33 +67,33 @@ if [ $? != 0 ]; then fi # Check to make sure the dst pool exists -ssh $dst_hostname "$zfs list ${dst_pool}" > /dev/null 2>&1 +$ssh $dst_hostname $zfs list ${dst_pool} > /dev/null 2>&1 if [ $? != 0 ]; then fatal_and_exit "-E- The destination pool '$dst_pool' does not exist. Create the pool '$dst_pool' and try again." fi # Check to make sure the dst filesystem does not exist -ssh $dst_hostname "$zfs list ${dst_pool}/${dst_fs}" > /dev/null 2>&1 +$ssh $dst_hostname $zfs list ${dst_pool}/${dst_fs} > /dev/null 2>&1 if [ $? == 0 ]; then fatal_and_exit "-E- The destination pool/filesystem '$dst_pool/$dst_fs' already exists. Destroy the filesystem '$dst_fs' and try again." fi # Now send the src filesystem if [[ -n "$SCRIPT_UNDER_TEST" ]]; then - echo "$zfs send -R $src_pool/$last_backup_marker | ssh $dst_hostname $zfs recv -dv $dst_pool" + echo "$zfs send -R $src_pool/$last_backup_marker | $ssh $dst_hostname $zfs recv -dv $dst_pool" else if [[ $throttle_enable == 1 && -e $throttle ]]; then - $zfs send -R "$src_pool/$last_backup_marker" | $throttle $throttle_opt | ssh $dst_hostname "$zfs recv -dv $dst_pool" + $zfs send -R "$src_pool/$last_backup_marker" | $throttle $throttle_opt | $ssh $dst_hostname $zfs recv -dv $dst_pool else - $zfs send -R "$src_pool/$last_backup_marker" | ssh $dst_hostname "$zfs recv -dv $dst_pool" + $zfs send -R "$src_pool/$last_backup_marker" | $ssh $dst_hostname $zfs recv -dv $dst_pool fi fi # Now rename the dst filesystem (move it into place) if [[ -n "$SCRIPT_UNDER_TEST" ]]; then - echo "$dst_hostname $zfs rename $dst_pool/$src_fs $dst_pool/$dst_fs" + echo "$ssh $dst_hostname $zfs rename $dst_pool/$src_fs $dst_pool/$dst_fs" else - ssh $dst_hostname "$zfs rename $dst_pool/$src_fs $dst_pool/$dst_fs" + $ssh $dst_hostname $zfs rename $dst_pool/$src_fs $dst_pool/$dst_fs fi # All done! diff --git a/zfs-restore-all b/zfs-restore-all index 7d174b4..3931190 100755 --- a/zfs-restore-all +++ b/zfs-restore-all @@ -22,7 +22,6 @@ cleanup_and_exit() { } trap cleanup_and_exit INT - # See if the user has a specific pool to restore in mind restore_pool=$1 @@ -34,13 +33,17 @@ for filesystem in $filesystems_to_replicate; do dst_pool=${filesystem%%/*} dst_fs=${filesystem#*/} # Check to make sure the dst filesystem does not exist - ssh $remote "$zfs list ${dst_pool}/${dst_fs}" > /dev/null 2>&1 + if [[ $remote = "localhost" ]]; then + $ssh $remote $zfs list ${dst_pool}/${dst_fs} > /dev/null 2>&1 + else + $zfs list ${dst_pool}/${dst_fs} > /dev/null 2>&1 + fi if [ $? != 0 ]; then echo "$filesystem" | grep -q "$restore_pool" if [ $? == 0 ]; then # This filesystem matches our restore pool pattern echo `date` Restoring $filesystem to $remote - zfs-restore $local_pool $filesystem $dst_pool $dst_fs $remote + zfs-restore $backup_pool $filesystem $dst_pool $dst_fs $remote fi else echo "-I- Filesystem already exists on destination. Skipping: $filesystem" diff --git a/zfs-rm-all-snaps b/zfs-rm-all-snaps new file mode 100755 index 0000000..c65c5ba --- /dev/null +++ b/zfs-rm-all-snaps @@ -0,0 +1,34 @@ +#!/bin/bash + +# Author: Alan Pippin +# Description: This script will remove all snaps containing a given snapshot pattern +# across all filesystems on a given pool. +# Usage: zfs-rm-all-snaps + +# source our configuration +config="${0%/*}/zfs-scripts.conf" +[ -e "${config}.dist" ] && . ${config}.dist +[ -e "${config}" ] && . ${config} + +# command line arg parsing +zfs_pool=$1 +snap_pattern=$2 + +if [[ -z "$zfs_pool" ]] || [[ -z "$snap_pattern" ]]; then + echo "Usage: $0 " + exit 1 +fi + +echo "-> Deleting all snapshots on pool '$zfs_pool' with pattern '$snap_pattern'" +for snap in `zfs list -t snapshot 2>/dev/null | grep "^$zfs_pool" | grep "$snap_pattern" | awk '{print $1}'`; do + echo " removing snapshot: $snap" +done + +echo -e "\n"; +read + +for snap in `zfs list -t snapshot 2>/dev/null | grep "^$zfs_pool" | grep "$snap_pattern" | awk '{print $1}'`; do + zfs destroy "$snap" 2>/dev/null +done + +echo "-> Snapshots removed" \ No newline at end of file diff --git a/zfs-scripts.conf.dist b/zfs-scripts.conf.dist index 956e697..7b021f6 100644 --- a/zfs-scripts.conf.dist +++ b/zfs-scripts.conf.dist @@ -1,15 +1,12 @@ ########################################################################### -# This is a shell config script included by the other ZFS scripts. -# You must copy this file and modify these values to match your system. -# Save the resulting file as "zfs-scripts.conf" in this directory. -# This will prevent your settings from being overwritten by future updates. +# local configuration file for ZFS scripts ########################################################################### # setup your path here to find all the binaries the ZFS scripts call PATH=/usr/sbin:/sbin:/etc/bin:.:$PATH # specify the path to the zfs binary -zfs=/usr/sbin/zfs +zfs=/sbin/zfs # specify the path to your command line mailer mailx=/usr/bin/mailx @@ -21,31 +18,40 @@ logdir="/var/log/zfs" lockdir="/tmp/zfs-admin-lock" # specify the user to send email reports to -mailto= +mailto="user@domain" -# specify the name of the remote server to pull snapshots from to backup locally +# specifu the ssh command and any options needed for accessing a remote server +ssh="ssh" + +# specify the fqdn of the remote server to pull snapshots from to backup locally +# specify "localhost" if the remote server is the local machine remote= -# specify the name of the local pool to store remotely pulled (backup) snapshots to -local_pool= +# specify the name of the local pool to store the $remote's snapshots into +backup_pool= + +# import backup pool before replication and export backup pool after replication +import_export_backup_pool=0 # when this variable is set, local filesystems will be destroyed # before receiving a full streams into them from the remote source. +# if it needs to do this, and this option is set to 0, it aborts. destroy_local_filesystem_on_full_replicate=0 # set this to 1 if you want the snapshot script to run in "test" mode (not really take any snaps) +# leave blank if not under test SNAP_UNDER_TEST= # Specify the maximum run time in minutes that the replicate script can run for (0=no limit) maxruntime=0 # Setup throttling related parameters that will rate limit the zfs send | zfs receive pipe +# The maximum limit a local backup can handle to avoid excessive CPU load is 10MB/s throttle_enable=0 -throttle_opt="-v -M 1" -throttle=/usr/local/bin/throttle +throttle_opt="-v -M 10" -# Specify the list of filesystems to replicate from the remote to the local_pool (1 per line) -# (enter 1 filesystem per line) -filesystems_to_replicate=' -' +# Specify the list of filesystems to replicate from the $remote to the $backup_pool (1 per line) +# The format of each line should be: pool/filesystem +filesystems_to_replicate=" +" diff --git a/zfs-scrub b/zfs-scrub index 939c798..462d979 100755 --- a/zfs-scrub +++ b/zfs-scrub @@ -26,7 +26,7 @@ do # Check to see if any zfs filesystem has a scrub being performed on it now. # If it does, we cannot perform more than one scrub operation at a time. while true; do - zpool status | grep scrub: | grep "in progress" > /dev/null 2>&1 + zpool status | grep scan: | grep "in progress" > /dev/null 2>&1 if [ $? == 0 ]; then # Another zpool scrub operation is already running # Wait until it is done before continuing @@ -44,7 +44,7 @@ do # Wait until the scrub completes, and check for any errors while true; do - zpool status $i | grep scrub: | grep "in progress" > /dev/null 2>&1 + zpool status $i | grep scan: | grep "in progress" > /dev/null 2>&1 if [ $? == 0 ]; then # Our zpool scrub operation is still running # Wait until it is done before continuing @@ -60,7 +60,7 @@ do echo "$date: Scrub completed for zfs pool $i" # Check for any scrub errors - zpool status $i | grep scrub: | grep "with 0 errors" > /dev/null 2>&1 + zpool status $i | grep scan: | grep "with 0 errors" > /dev/null 2>&1 if [ $? != 0 ]; then # The scrub found errors zpool status $i | $mailx -s "zpool scrub $i found errors" $mailto -- 2.34.1