#/bin/bash
-set -x
-
# Usage: replicate <hostname> <zfs filesystem>
remote=$1
remote_fs=$2
+remote_pool=${2%%/*}
+remote_lockdir="/tmp/zfs-admin-lock"
-# change to match the name of the local backup pool
+# Set the name of the local pool used to store the backup of the remote
local_pool=backups
+# Set the email address to send notification to
+mailto=root@pippins.net
+
+# When this variable is set, local filesystems will be destroyed
+# before receiving a full streams into them from the remote source.
+destroy_local_filesystem_on_full_replicate=0
+
# The ssh connection doesn't find zfs without this.
zfs=/usr/sbin/zfs
+# Setup our cleanup and exit trap
+cleanup() {
+ ssh $remote rm -rf "$remote_lockdir"
+ exit
+}
+trap cleanup INT
+trap cleanup EXIT
+
+# Make sure we have valid arguments
+if [[ -z "$remote" ]] || [[ -z "$remote_fs" ]]; then
+ echo "Usage: $0 <hostname> <zfs filesystem>"
+ exit 1
+fi
+
+# Make sure the local pool and local receiving filesystem exist, or print some errors
+zpool list -H "$local_pool" >/dev/null 2>&1
+if [ $? != 0 ]; then
+ echo >&2 "-E- The local pool, '$local_pool' doesn't seem to exist."
+ exit 1
+fi
+zfs list "$local_pool/$remote_pool" >/dev/null 2>&1
+if [ $? != 0 ]; then
+ echo >&2 "-I- The local filesystem for the remote pool, '$local_pool/$remote_pool' doesn't seem to exist."
+ echo >&2 " Creating the local filesystem to receive the remote pool into: $local_pool/$remote_pool"
+ $zfs create $local_pool/$remote_pool
+ if [ $? != 0 ]; then
+ echo "-E- remote $zfs create command failed"
+ exit 1
+ fi
+fi
+
+# Obtain the zpool guid for the local pool
+local_pool_guid=`zpool get guid $local_pool 2>&1 | grep $local_pool | awk '{ print $3 }'`
+zpool get guid $local_pool > /dev/null 2>&1
+if [ $? != 0 ]; then
+ echo >&2 "-E- Unable to extract the guid for the local pool: $local_pool"
+ exit 1
+fi
+
+# Turn on shell verbosity
+set -x
+
+# Create the remote lockdir before continuing with the replicate
+# Spinlock on creating the lock
+maxsleeptime=60
+maxattempts=100
+attempts=0
+while true; do
+ ssh $remote mkdir "$remote_lockdir" >/dev/null 2>&1
+ if [ $? != 0 ]; then
+ # Another zfs admin tool is running.
+ # Wait a random amount of time and try again
+ ransleep=$(($RANDOM % $maxsleeptime))
+ sleep $ransleep
+ ((attempts=attempts+1))
+ else
+ # No other zfs admin tool is running, we can now.
+ break
+ fi
+ if [[ $attempts -gt $maxattempts ]]; then
+ # We've exceeded our maximum while loop count
+ echo "-W- The zfs filesystem has been locked down. Skipping replicate operation."
+ ssh $remote ls -ld $remote_lockdir | /usr/bin/mailx -s "zfs-replicate-all unable to obtain zfs admin lock" $mailto
+ exit 1
+ fi
+done
+
+# Setup our backup marker names
+current_backup_marker=${remote_fs}@current-backup-${local_pool_guid}
+previous_backup_marker=${remote_fs}@previous-backup-${local_pool_guid}
+
# List the snapshots on the remote machine.
remote_list=$(mktemp /tmp/replicate.XXXXXX)
ssh $remote \
$zfs list -H -t snapshot |
grep ^${remote_fs}@ |
awk '{print$1}' > $remote_list
+if [ $? != 0 ]; then
+ echo "-E- remote $zfs list command failed"
+ exit 1
+fi
# List the snapshots on the local machine.
+# Don't list the current backup marker if it exists on the local side.
+# If you do, it can mess up the common finding algorithm below.
local_list=$(mktemp /tmp/replicate.XXXXXX)
$zfs list -H -t snapshot |
grep ^${local_pool}/${remote_fs}@ |
- awk '{gsub(/^${local_pool}./,"",$1); print$1}' > $local_list
+ grep -v ^${local_pool}/${current_backup_marker} |
+ awk "{gsub(/^$local_pool./,\"\",\$1); print\$1}" > $local_list
+if [ $? != 0 ]; then
+ echo "-E- local $zfs list command failed"
+ exit 1
+fi
+
+# Destroy the current backup marker snapshot on the remote system if it exists
+grep -q ${current_backup_marker} $remote_list
+if [ $? == 0 ]; then
+ ssh $remote $zfs destroy ${current_backup_marker}
+ if [ $? != 0 ]; then
+ echo "-E- remote $zfs destroy command failed"
+ exit 1
+ fi
+fi
+
+# Create the current backup marker snapshot on the remote system
+ssh $remote $zfs snapshot ${current_backup_marker}
+if [ $? != 0 ]; then
+ echo "-E- remote $zfs snapshot command failed"
+ exit 1
+fi
-# See what the most recent snapshot on the remote end is.
-latest=$(tail -n 1 $remote_list)
+# Check to see if the previous backup marker exists in the remote snapshot list.
+# Check to see if the previous backup marker exists in the local snapshot list.
+# If the previous backup markers exists, perform an incremental replicate. Else:
+# 1) check to see if a common snapshot exists, and perform an incremental replicate.
+# 2) if no common snapshot exists, destroy the local filesystem, and perform a full replicate.
+grep -q ${previous_backup_marker} $remote_list
+no_markers=$?
+grep -q ${previous_backup_marker} $local_list
+no_markers=$(($no_markers || $?))
-# I did this to make sure that diff would always display the most recent common
-echo bogus.remote >> $remote_list
-echo bogus.local >> $local_list
-common=$(diff -u $remote_list $local_list | grep '^ ' | tail -n 1)
+if [ $no_markers == 0 ]; then
+ # We found backup markers, incrementally send the new snaps
-if [ -n "$common" ]; then
- # We found a common snapshot
- ssh $remote $zfs send -R -I${common/*@/@} $latest |
- $zfs receive -vF -d ${local_pool}/${remote_fs%/*}
+ # First, rollback the local pool to the previous backup marker in case the previous
+ # backup was interrupted for some reason. If we don't do this, the zfs send -R command
+ # below may complain about snaps already existing as it tries to resend from the
+ # previous backup marker again from a previously interrupted replicate.
+ $zfs rollback -r ${local_pool}/${previous_backup_marker}
+ if [ $? != 0 ]; then
+ echo "-E- remote incremental $zfs rollback command failed"
+ exit 1
+ fi
+ # Now it should be safe to send the snaps
+ ssh $remote $zfs send -R -I${previous_backup_marker} ${current_backup_marker} |
+ $zfs receive -vF -d ${local_pool}/${remote_pool}
+ if [ $? != 0 ]; then
+ echo "-E- remote incremental $zfs send command failed"
+ exit 1
+ fi
else
- # We did not find a common snapshot, so send the entire filesystem
- ssh $remote $zfs send -R $latest |
- $zfs receive -vF -d ${local_pool}/${remote_fs%/*}
-endif
+ # We didn't find any backup markers, next check to see if we have a common snapshot.
+ # See what the most recent snapshot on the remote end is.
+ latest=$(tail -n 1 $remote_list)
+
+ # I did this to make sure that diff would always display the most recent common
+ # Since we're keying off the context of the diff, we need to ensure we will get context
+ # by injecting a known difference in case no others exist in the lists.
+ echo bogus.remote >> $remote_list
+ echo bogus.local >> $local_list
+ common=$(diff -u $remote_list $local_list | grep '^ ' | tail -n 1)
+
+ if [[ -n "$common" ]]; then
+ # We found a common snapshot, incrementally send the new snaps
+ ssh $remote $zfs send -R -I${common/*@/@} ${current_backup_marker} |
+ $zfs receive -vF -d ${local_pool}/${remote_pool}
+ if [ $? != 0 ]; then
+ echo "-E- remote incremental $zfs send command failed"
+ exit 1
+ fi
+ else
+ # We did not find any markers or a common snapshot
+ # At this point, we'll have to send the entire filesystem
+ # Destroy the local filesystem if it exists before receving the full replicate
+ zfs list ${local_pool}/${remote_fs} > /dev/null 2>&1
+ if [ $? == 0 ]; then
+ if [[ $destroy_local_filesystem_on_full_replicate == 1 ]]; then
+ $zfs destroy -r ${local_pool}/${remote_fs}
+ if [ $? != 0 ]; then
+ echo "-E- remote full $zfs destroy command failed"
+ exit 1
+ fi
+ else
+ echo "-W- We need to destroy a local filesystem before receiving a full stream."
+ echo " However, since the option is set to prevent this, skipping replicate operation."
+ echo "$zfs destroy -r ${local_pool}/${remote_fs} not able to run" | /usr/bin/mailx -s "zfs-replicate-all unable to destroy local filesystem" $mailto
+ exit 1
+ fi
+ fi
+ # Send the full filesystem
+ ssh $remote $zfs send -R ${current_backup_marker} |
+ $zfs receive -vF -d ${local_pool}/${remote_pool}
+ if [ $? != 0 ]; then
+ echo "-E- remote full $zfs send command failed"
+ exit 1
+ fi
+ fi
+fi
+
+# destroy the previous backup markers now that we've replicated past them
+# don't check the return codes here because these may not exist, and that is ok
+$zfs destroy ${local_pool}/${previous_backup_marker} > /dev/null 2>&1
+ssh $remote $zfs destroy ${previous_backup_marker} > /dev/null 2>&1
+
+# Rename the current backup marker to be the previous backup marker
+$zfs rename ${local_pool}/${current_backup_marker} ${local_pool}/${previous_backup_marker}
+if [ $? != 0 ]; then
+ echo "-E- local $zfs rename command failed"
+ exit 1
+fi
+ssh $remote $zfs rename ${current_backup_marker} ${previous_backup_marker}
+if [ $? != 0 ]; then
+ echo "-E- remote $zfs rename command failed"
+ exit 1
+fi
+
+# Remove tmp files
rm -f $local_list $remote_list
+