#!/bin/bash
# Author: Carl Baldwin & Alan Pippin
-# Description: This script replicates a given zfs filesystem to a given zfs pool.
+# Description: This script replicates a remote zfs filesystem to a local zfs pool.
# This script will keep all snapshots in sync, removing the ones
# that have been deleted since the last replicate was performed.
# This script will only send the new, or missing, snapshots since
# the last replicate was performed.
+# Usage: replicate <hostname> <zfs filesystem>
-# In test mode (test=1) commands are echoed, not executed
-test=0
-
-[ $test == 0 ] && exec >> /var/log/zfs/zfs-replicate.log 2>&1
-
-# Usage: zfs-backup [filesystem] [destination_pool]
-# This script has a limitation with children under a given filesystem.
-# You must initially backup the parent filesystems first using this script
-# before backing up any of the children filesystems.
-
-fs=$1
-fs=${fs%/}
-fsname=${1#*/}
-srcpool=${1%%/*}
-srcfs="${srcpool}/$fsname"
-srcfs=${srcfs%/}
-dstpool=$2
-dstfs="${dstpool}/$srcfs"
-dstfs=${dstfs%/}
-nodstsnaps=0
-common=""
-
-if [ $test == 1 ]; then
- echo "fs: $fs"
- echo "fsname: $fsname"
- echo "srcpool: $srcpool"
- echo "srcfs: $srcfs"
- echo "dstpool: $dstpool"
- echo "dstfs: $dstfs"
-fi
+# source our configuration
+config="${0%/*}/zfs-scripts.conf"
+[ -e "${config}.dist" ] && . ${config}.dist
+[ -e "${config}" ] && . ${config}
+
+# command line arg parsing
+remote=$1
+remote_fs=$2
+remote_pool=${2%%/*}
+hostname=`hostname`
-if ! zpool list -H "$srcpool" >/dev/null 2>&1; then
- echo >&2 "-E- The source pool, '$srcpool' doesn't seem to exist."
+# Setup our cleanup and exit trap
+cleanup() {
+ if [[ -e "$local_list" ]]; then
+ rm -f $local_list
+ fi
+ if [[ -e "$remote_list" ]]; then
+ rm -f $remote_list
+ fi
+ if [[ -n "$remote" ]]; then
+ ssh $remote ls -d "$lockdir" > /dev/null 2>&1
+ if [[ $? == 0 ]]; then
+ ssh $remote rm -rf "$lockdir"
+ fi
+ fi
+}
+fatal_and_exit() {
+ echo -e 2>&1 "$1"
+ # Destroy the backup markers on the local filesystem if they exist
+ if [[ -n "$current_backup_marker" ]]; then
+ zfs list -t snapshot ${local_pool}/${current_backup_marker} > /dev/null 2>&1
+ if [ $? == 0 ]; then
+ $zfs destroy ${local_pool}/${current_backup_marker}
+ fi
+ fi
+ if [[ -n "$previous_backup_marker" ]]; then
+ zfs list -t snapshot ${local_pool}/${previous_backup_marker} > /dev/null 2>&1
+ if [ $? == 0 ]; then
+ $zfs destroy ${local_pool}/${previous_backup_marker}
+ fi
+ fi
+ # send email notification
+ if [[ -n "$2" ]]; then
+ echo -e "$1" | $mailx -s "zfs replicate on $hostname failed" "$2"
+ fi
+ # exit
exit 1
+}
+trap fatal_and_exit INT
+trap cleanup EXIT
+
+# Make sure we have valid arguments
+if [[ -z "$remote" ]] || [[ -z "$remote_fs" ]]; then
+ fatal_and_exit "Usage: $0 <hostname> <zfs filesystem>"
fi
-if ! zpool list -H "$dstpool" >/dev/null 2>&1; then
- echo >&2 "-E- The destination pool, '$dstpool' doesn't seem to exist."
- exit 1
+# Make sure the local pool and local receiving filesystem exist, or print some errors
+zpool list -H "$local_pool" >/dev/null 2>&1
+if [ $? != 0 ]; then
+ fatal_and_exit "-E- The local pool, '$local_pool' doesn't seem to exist." $mailto
+fi
+zfs list "$local_pool/$remote_pool" >/dev/null 2>&1
+if [ $? != 0 ]; then
+ echo >&2 "-I- The local filesystem for the remote pool, '$local_pool/$remote_pool' doesn't seem to exist."
+ echo >&2 " Creating the local filesystem to receive the remote pool into: $local_pool/$remote_pool"
+ $zfs create $local_pool/$remote_pool
+ if [ $? != 0 ]; then
+ fatal_and_exit "-E- remote $zfs create command failed" $mailto
+ fi
fi
-if ! zfs list -rH -t snapshot "$dstfs" 2>&1 | grep "$dstfs@" > /dev/null 2>&1; then
- echo >&2 "-W- No snapshots detected on the destination drive for this filesystem"
- nodstsnaps=1
+# Obtain the zpool guid for the local pool
+local_pool_guid=`zpool get guid $local_pool 2>&1 | grep $local_pool | awk '{ print $3 }'`
+zpool get guid $local_pool > /dev/null 2>&1
+if [ $? != 0 ]; then
+ fatal_and_exit "-E- Unable to extract the guid for the local pool: $local_pool" $mailto
fi
-if [ $nodstsnaps == 0 ]; then
- zfs list -rH -t snapshot $srcfs | grep "$srcfs@" | awk '{print $1}' > /tmp/source-list
- zfs list -rH -t snapshot $dstfs | grep "$dstfs@" | sed "s,$dstpool/,," | awk '{print $1}' > /tmp/destination-list
- diff -u /tmp/source-list /tmp/destination-list | grep -v '^+++' | awk '/^\+/ {print}' | sed "s,^\+,$dstpool/," > /tmp/obsolete-snapshots
- rm -f /tmp/source-list /tmp/destination-list
-
- echo >&2 "Removing obsolete backups from the destination pool"
- for snapshot in $(cat /tmp/obsolete-snapshots); do
- echo >&2 "Removing '$snapshot' from destination."
- [ $test == 0 ] && zfs destroy "$snapshot"
- done
-
- echo >&2 "Rolling back to the most recent snapshot on the destination."
- [ $test == 0 ] && zfs rollback $(zfs list -rH -t snapshot $dstfs | grep "$dstfs@" | awk '{snap=$1} END {print snap}')
-
- echo >&2 "Calculating the most recent common snapshot between the two filesystems."
- if zfs list -H "$dstfs" > /dev/null 2>&1; then
- for snap in $(zfs list -rH -t snapshot "$dstfs" | grep "$dstfs@" |
- sed 's,.*@,,' | awk '{print$1}'); do
- if zfs list -rH -t snapshot "$fs" | grep "$fs@" | sed 's,.*@,,' | awk '{print$1}' | grep "^${snap}$" >/dev/null 2>&1; then
- common=$snap
- fi
- done
+# Turn on shell verbosity
+set -x
+
+# Create the remote lockdir before continuing with the replicate
+# Spinlock on creating the lock
+maxsleeptime=60
+maxattempts=500
+attempts=0
+while true; do
+ ssh $remote mkdir "$lockdir" >/dev/null 2>&1
+ if [ $? != 0 ]; then
+ # Another zfs admin tool is running.
+ # Wait a random amount of time and try again
+ ransleep=$(($RANDOM % $maxsleeptime))
+ sleep $ransleep
+ ((attempts=attempts+1))
+ else
+ # No other zfs admin tool is running, we can now.
+ break
fi
+ if [[ $attempts -gt $maxattempts ]]; then
+ # We've exceeded our maximum while loop count
+ echo "-E- The zfs filesystem has been locked down. Skipping replicate operation."
+ fail_msg=`ssh $remote ls -ld $lockdir 2>&1`
+ fatal_and_exit "zfs-replicate-all unable to obtain zfs admin lock:\n$fail_msg" $mailto
+ fi
+done
+
+# Setup our backup marker names
+current_backup_marker=${remote_fs}@current-backup-${local_pool_guid}
+previous_backup_marker=${remote_fs}@previous-backup-${local_pool_guid}
+
+# List the snapshots on the remote machine.
+remote_list=$(mktemp /tmp/replicate.XXXXXX)
+ssh $remote \
+ $zfs list -H -t snapshot |
+ grep ^${remote_fs}@ |
+ awk '{print$1}' > $remote_list
+if [ $? != 0 ]; then
+ fatal_and_exit "-E- remote $zfs list command failed" $mailto
fi
-base=$common
-foundcommon=false
-if [ -z "$common" ]; then
- foundcommon=true
+# List the snapshots on the local machine.
+# Don't list the current backup marker if it exists on the local side.
+# If you do, it can mess up the common finding algorithm below.
+local_list=$(mktemp /tmp/replicate.XXXXXX)
+$zfs list -H -t snapshot |
+ grep ^${local_pool}/${remote_fs}@ |
+ grep -v ^${local_pool}/${current_backup_marker} |
+ awk "{gsub(/^$local_pool./,\"\",\$1); print\$1}" > $local_list
+if [ $? != 0 ]; then
+ fatal_and_exit "-E- local $zfs list command failed" $mailto
fi
-for snap in $(zfs list -rH -t snapshot "$fs" | grep "$fs@" |
- sed 's,.*@,,' | awk '{print$1}'); do
- if [ "$snap" = "$common" ]; then
- foundcommon=true
- continue
+# Destroy the current backup marker snapshot on the remote system if it exists
+grep -q ${current_backup_marker} $remote_list
+if [ $? == 0 ]; then
+ ssh $remote $zfs destroy ${current_backup_marker}
+ if [ $? != 0 ]; then
+ fatal_and_exit "-E- remote $zfs destroy command failed" $mailto
fi
+fi
- if $foundcommon; then
- if [ -z "$base" ]; then
- echo >&2 "Sending '$fs@$snap'"
- [ $test == 0 ] && zfs set readonly=on "$dstpool"
- [ $test == 0 ] && zfs set atime=off "$dstpool"
- [ $test == 0 ] && zfs set sharenfs=off "$dstpool"
- [ $test == 0 ] && zfs set mountpoint=legacy "$dstpool"
- [ $test == 0 ] && zfs send "$fs@$snap" | zfs recv -v "$dstfs"
+# Create the current backup marker snapshot on the remote system
+ssh $remote $zfs snapshot ${current_backup_marker}
+if [ $? != 0 ]; then
+ fatal_and_exit "-E- remote $zfs snapshot command failed" $mailto
+fi
+
+# Check to see if the previous backup marker exists in the remote snapshot list.
+# Check to see if the previous backup marker exists in the local snapshot list.
+# If the previous backup markers exists, perform an incremental replicate. Else:
+# 1) check to see if a common snapshot exists, and perform an incremental replicate.
+# 2) if no common snapshot exists, destroy the local filesystem, and perform a full replicate.
+grep -q ${previous_backup_marker} $remote_list
+no_markers=$?
+grep -q ${previous_backup_marker} $local_list
+no_markers=$(($no_markers || $?))
+
+if [ $no_markers == 0 ]; then
+ # We found backup markers, incrementally send the new snaps
+
+ # First, rollback the local pool to the previous backup marker in case the previous
+ # backup was interrupted for some reason. If we don't do this, the zfs send -R command
+ # below may complain about snaps already existing as it tries to resend from the
+ # previous backup marker again from a previously interrupted replicate.
+ $zfs rollback -r ${local_pool}/${previous_backup_marker}
+ if [ $? != 0 ]; then
+ fatal_and_exit "-E- remote incremental $zfs rollback command failed" $mailto
+ fi
+ # Now it should be safe to send the snaps
+ if [[ $throttle_enable == 1 && -e $throttle ]]; then
+ ssh $remote $zfs send -R -I${previous_backup_marker} ${current_backup_marker} |
+ $throttle $throttle_opt | $zfs receive -vF -d ${local_pool}/${remote_pool}
+ else
+ ssh $remote $zfs send -R -I${previous_backup_marker} ${current_backup_marker} |
+ $zfs receive -vF -d ${local_pool}/${remote_pool}
+ fi
+ if [ $? != 0 ]; then
+ fatal_and_exit "-E- remote incremental $zfs send command failed" $mailto
+ fi
+else
+ # We didn't find any backup markers, next check to see if we have a common snapshot.
+
+ # See what the most recent snapshot on the remote end is.
+ latest=$(tail -n 1 $remote_list)
+
+ # I did this to make sure that diff would always display the most recent common
+ # Since we're keying off the context of the diff, we need to ensure we will get context
+ # by injecting a known difference in case no others exist in the lists.
+ echo bogus.remote >> $remote_list
+ echo bogus.local >> $local_list
+ common=$(diff -u $remote_list $local_list | grep '^ ' | tail -n 1)
+
+ if [[ -n "$common" ]]; then
+ # We found a common snapshot, incrementally send the new snaps
+ if [[ $throttle_enable == 1 && -e $throttle ]]; then
+ ssh $remote $zfs send -R -I${common/*@/@} ${current_backup_marker} |
+ $throttle $throttle_opt | $zfs receive -vF -d ${local_pool}/${remote_pool}
else
- echo >&2 "Sending '$fs@$base' -> '$fs@$snap'"
- [ $test == 0 ] && zfs send -i "$fs@$base" "$fs@$snap" | zfs recv -v "$dstfs"
+ ssh $remote $zfs send -R -I${common/*@/@} ${current_backup_marker} |
+ $zfs receive -vF -d ${local_pool}/${remote_pool}
+ fi
+ if [ $? != 0 ]; then
+ fatal_and_exit "-E- remote incremental $zfs send command failed" $mailto
+ fi
+ else
+ # We did not find any markers or a common snapshot
+ # At this point, we'll have to send the entire filesystem
+ # Destroy the local filesystem if it exists before receving the full replicate
+ zfs list ${local_pool}/${remote_fs} > /dev/null 2>&1
+ if [ $? == 0 ]; then
+ if [[ $destroy_local_filesystem_on_full_replicate == 1 ]]; then
+ $zfs destroy -r ${local_pool}/${remote_fs}
+ if [ $? != 0 ]; then
+ fatal_and_exit "-E- remote full $zfs destroy command failed" $mailto
+ fi
+ else
+ echo "-W- We need to destroy a local filesystem before receiving a full stream."
+ echo " However, since the option is set to prevent this, skipping replicate operation."
+ fatal_and_exit "unable to destroy local filesystem:\n$zfs destroy -r ${local_pool}/${remote_fs} not able to run" $mailto
+ fi
+ fi
+ # Send the full filesystem
+ if [[ $throttle_enable == 1 && -e $throttle ]]; then
+ ssh $remote $zfs send -R ${current_backup_marker} |
+ $throttle $throttle_opt | $zfs receive -vF -d ${local_pool}/${remote_pool}
+ else
+ ssh $remote $zfs send -R ${current_backup_marker} |
+ $zfs receive -vF -d ${local_pool}/${remote_pool}
+ fi
+ if [ $? != 0 ]; then
+ fatal_and_exit "-E- remote full $zfs send command failed" $mailto
fi
- base=$snap
fi
-done
+fi
+
+# destroy the previous backup markers now that we've replicated past them
+# don't check the return codes here because these may not exist, and that is ok
+$zfs destroy ${local_pool}/${previous_backup_marker} > /dev/null 2>&1
+ssh $remote $zfs destroy ${previous_backup_marker} > /dev/null 2>&1
+
+# Rename the current backup marker to be the previous backup marker
+$zfs rename ${local_pool}/${current_backup_marker} ${local_pool}/${previous_backup_marker}
+if [ $? != 0 ]; then
+ fatal_and_exit "-E- local $zfs rename command failed" $mailto
+fi
+ssh $remote $zfs rename ${current_backup_marker} ${previous_backup_marker}
+if [ $? != 0 ]; then
+ fatal_and_exit "-E- remote $zfs rename command failed" $mailto
+fi
-true