-#/bin/bash
+#!/bin/bash
+# Author: Carl Baldwin & Alan Pippin
+# Description: This script replicates a remote zfs filesystem to a local zfs pool.
+# This script will keep all snapshots in sync, removing the ones
+# that have been deleted since the last replicate was performed.
+# This script will only send the new, or missing, snapshots since
+# the last replicate was performed.
# Usage: replicate <hostname> <zfs filesystem>
+
+# source our configuration
+config="${0%/*}/zfs-scripts.conf"
+[ -e "${config}.dist" ] && . ${config}.dist
+[ -e "${config}" ] && . ${config}
+
+# command line arg parsing
remote=$1
remote_fs=$2
remote_pool=${2%%/*}
-remote_lockdir="/tmp/zfs-admin-lock"
-
-# Set the name of the local pool used to store the backup of the remote
-local_pool=backups
+hostname=`hostname`
-# Set the email address to send notification to
-mailto=alan@pippins.net
-
-# The ssh connection doesn't find zfs without this.
-zfs=/usr/sbin/zfs
+# Setup our cleanup and exit trap
+cleanup() {
+ if [[ -e "$local_list" ]]; then
+ rm -f $local_list
+ fi
+ if [[ -e "$remote_list" ]]; then
+ rm -f $remote_list
+ fi
+ if [[ -n "$remote" ]]; then
+ ssh $remote ls -d "$lockdir" > /dev/null 2>&1
+ if [[ $? == 0 ]]; then
+ ssh $remote rm -rf "$lockdir"
+ fi
+ fi
+}
+fatal_and_exit() {
+ echo -e 2>&1 "$1"
+ # Destroy the backup markers on the local filesystem if they exist
+ if [[ -n "$current_backup_marker" ]]; then
+ zfs list -t snapshot ${local_pool}/${current_backup_marker} > /dev/null 2>&1
+ if [ $? == 0 ]; then
+ $zfs destroy ${local_pool}/${current_backup_marker}
+ fi
+ fi
+ if [[ -n "$previous_backup_marker" ]]; then
+ zfs list -t snapshot ${local_pool}/${previous_backup_marker} > /dev/null 2>&1
+ if [ $? == 0 ]; then
+ $zfs destroy ${local_pool}/${previous_backup_marker}
+ fi
+ fi
+ # send email notification
+ if [[ -n "$2" ]]; then
+ echo -e "$1" | $mailx -s "zfs replicate on $hostname failed" "$2"
+ fi
+ # exit
+ exit 1
+}
+trap fatal_and_exit INT
+trap cleanup EXIT
# Make sure we have valid arguments
if [[ -z "$remote" ]] || [[ -z "$remote_fs" ]]; then
- echo "Usage: $0 <hostname> <zfs filesystem>"
- exit 1
+ fatal_and_exit "Usage: $0 <hostname> <zfs filesystem>"
fi
# Make sure the local pool and local receiving filesystem exist, or print some errors
zpool list -H "$local_pool" >/dev/null 2>&1
if [ $? != 0 ]; then
- echo >&2 "-E- The local pool, '$local_pool' doesn't seem to exist."
- exit 1
+ fatal_and_exit "-E- The local pool, '$local_pool' doesn't seem to exist." $mailto
fi
zfs list "$local_pool/$remote_pool" >/dev/null 2>&1
if [ $? != 0 ]; then
echo >&2 " Creating the local filesystem to receive the remote pool into: $local_pool/$remote_pool"
$zfs create $local_pool/$remote_pool
if [ $? != 0 ]; then
- echo "-E- remote $zfs create command failed"
- exit 1
+ fatal_and_exit "-E- remote $zfs create command failed" $mailto
fi
fi
local_pool_guid=`zpool get guid $local_pool 2>&1 | grep $local_pool | awk '{ print $3 }'`
zpool get guid $local_pool > /dev/null 2>&1
if [ $? != 0 ]; then
- echo >&2 "-E- Unable to extract the guid for the local pool: $local_pool"
- exit 1
+ fatal_and_exit "-E- Unable to extract the guid for the local pool: $local_pool" $mailto
fi
# Turn on shell verbosity
# Create the remote lockdir before continuing with the replicate
# Spinlock on creating the lock
maxsleeptime=60
-maxattempts=100
+maxattempts=500
attempts=0
while true; do
- ssh $remote mkdir "$remote_lockdir" >/dev/null 2>&1
+ ssh $remote mkdir "$lockdir" >/dev/null 2>&1
if [ $? != 0 ]; then
# Another zfs admin tool is running.
# Wait a random amount of time and try again
fi
if [[ $attempts -gt $maxattempts ]]; then
# We've exceeded our maximum while loop count
- echo "-W- The zfs filesystem has been locked down. Skipping replicate operation."
- ssh $remote ls -ld $remote_lockdir | /usr/bin/mailx -s "zfs-replicate-all unable to obtain zfs admin lock" $mailto
- exit 1
+ echo "-E- The zfs filesystem has been locked down. Skipping replicate operation."
+ fail_msg=`ssh $remote ls -ld $lockdir 2>&1`
+ fatal_and_exit "zfs-replicate-all unable to obtain zfs admin lock:\n$fail_msg" $mailto
fi
done
-# Declare a cleanup() method to remove the remote lockdir
-cleanup() { ssh $remote rm -rf "$remote_lockdir"; }
-trap cleanup EXIT
-
# Setup our backup marker names
current_backup_marker=${remote_fs}@current-backup-${local_pool_guid}
previous_backup_marker=${remote_fs}@previous-backup-${local_pool_guid}
grep ^${remote_fs}@ |
awk '{print$1}' > $remote_list
if [ $? != 0 ]; then
- echo "-E- remote $zfs list command failed"
- exit 1
+ fatal_and_exit "-E- remote $zfs list command failed" $mailto
fi
# List the snapshots on the local machine.
+# Don't list the current backup marker if it exists on the local side.
+# If you do, it can mess up the common finding algorithm below.
local_list=$(mktemp /tmp/replicate.XXXXXX)
$zfs list -H -t snapshot |
grep ^${local_pool}/${remote_fs}@ |
+ grep -v ^${local_pool}/${current_backup_marker} |
awk "{gsub(/^$local_pool./,\"\",\$1); print\$1}" > $local_list
if [ $? != 0 ]; then
- echo "-E- local $zfs list command failed"
- exit 1
+ fatal_and_exit "-E- local $zfs list command failed" $mailto
fi
# Destroy the current backup marker snapshot on the remote system if it exists
if [ $? == 0 ]; then
ssh $remote $zfs destroy ${current_backup_marker}
if [ $? != 0 ]; then
- echo "-E- remote $zfs destroy command failed"
- exit 1
+ fatal_and_exit "-E- remote $zfs destroy command failed" $mailto
fi
fi
# Create the current backup marker snapshot on the remote system
ssh $remote $zfs snapshot ${current_backup_marker}
if [ $? != 0 ]; then
- echo "-E- remote $zfs snapshot command failed"
- exit 1
+ fatal_and_exit "-E- remote $zfs snapshot command failed" $mailto
fi
# Check to see if the previous backup marker exists in the remote snapshot list.
# previous backup marker again from a previously interrupted replicate.
$zfs rollback -r ${local_pool}/${previous_backup_marker}
if [ $? != 0 ]; then
- echo "-E- remote incremental $zfs rollback command failed"
- exit 1
+ fatal_and_exit "-E- remote incremental $zfs rollback command failed" $mailto
fi
# Now it should be safe to send the snaps
- ssh $remote $zfs send -R -I${previous_backup_marker} ${current_backup_marker} |
- $zfs receive -vF -d ${local_pool}/${remote_pool}
+ if [[ $throttle_enable == 1 && -e $throttle ]]; then
+ ssh $remote $zfs send -R -I${previous_backup_marker} ${current_backup_marker} |
+ $throttle $throttle_opt | $zfs receive -vF -d ${local_pool}/${remote_pool}
+ else
+ ssh $remote $zfs send -R -I${previous_backup_marker} ${current_backup_marker} |
+ $zfs receive -vF -d ${local_pool}/${remote_pool}
+ fi
if [ $? != 0 ]; then
- echo "-E- remote incremental $zfs send command failed"
- exit 1
+ fatal_and_exit "-E- remote incremental $zfs send command failed" $mailto
fi
else
# We didn't find any backup markers, next check to see if we have a common snapshot.
if [[ -n "$common" ]]; then
# We found a common snapshot, incrementally send the new snaps
- ssh $remote $zfs send -R -I${common/*@/@} ${current_backup_marker} |
- $zfs receive -vF -d ${local_pool}/${remote_pool}
+ if [[ $throttle_enable == 1 && -e $throttle ]]; then
+ ssh $remote $zfs send -R -I${common/*@/@} ${current_backup_marker} |
+ $throttle $throttle_opt | $zfs receive -vF -d ${local_pool}/${remote_pool}
+ else
+ ssh $remote $zfs send -R -I${common/*@/@} ${current_backup_marker} |
+ $zfs receive -vF -d ${local_pool}/${remote_pool}
+ fi
if [ $? != 0 ]; then
- echo "-E- remote incremental $zfs send command failed"
- exit 1
+ fatal_and_exit "-E- remote incremental $zfs send command failed" $mailto
fi
else
# We did not find any markers or a common snapshot
# Destroy the local filesystem if it exists before receving the full replicate
zfs list ${local_pool}/${remote_fs} > /dev/null 2>&1
if [ $? == 0 ]; then
- zfs destroy -r ${local_pool}/${remote_fs}
- if [ $? != 0 ]; then
- echo "-E- remote full $zfs destroy command failed"
- exit 1
+ if [[ $destroy_local_filesystem_on_full_replicate == 1 ]]; then
+ $zfs destroy -r ${local_pool}/${remote_fs}
+ if [ $? != 0 ]; then
+ fatal_and_exit "-E- remote full $zfs destroy command failed" $mailto
+ fi
+ else
+ echo "-W- We need to destroy a local filesystem before receiving a full stream."
+ echo " However, since the option is set to prevent this, skipping replicate operation."
+ fatal_and_exit "unable to destroy local filesystem:\n$zfs destroy -r ${local_pool}/${remote_fs} not able to run" $mailto
fi
fi
# Send the full filesystem
- ssh $remote $zfs send -R ${current_backup_marker} |
- $zfs receive -vF -d ${local_pool}/${remote_pool}
+ if [[ $throttle_enable == 1 && -e $throttle ]]; then
+ ssh $remote $zfs send -R ${current_backup_marker} |
+ $throttle $throttle_opt | $zfs receive -vF -d ${local_pool}/${remote_pool}
+ else
+ ssh $remote $zfs send -R ${current_backup_marker} |
+ $zfs receive -vF -d ${local_pool}/${remote_pool}
+ fi
if [ $? != 0 ]; then
- echo "-E- remote full $zfs send command failed"
- exit 1
+ fatal_and_exit "-E- remote full $zfs send command failed" $mailto
fi
fi
fi
# Rename the current backup marker to be the previous backup marker
$zfs rename ${local_pool}/${current_backup_marker} ${local_pool}/${previous_backup_marker}
if [ $? != 0 ]; then
- echo "-E- local $zfs rename command failed"
- exit 1
+ fatal_and_exit "-E- local $zfs rename command failed" $mailto
fi
ssh $remote $zfs rename ${current_backup_marker} ${previous_backup_marker}
if [ $? != 0 ]; then
- echo "-E- remote $zfs rename command failed"
- exit 1
+ fatal_and_exit "-E- remote $zfs rename command failed" $mailto
fi
-
-# Remove tmp files
-rm -f $local_list $remote_list