X-Git-Url: http://git.pippins.net/embedvideo/.git/static/gitweb.js?a=blobdiff_plain;f=zfs-replicate;h=174d6dd5ef7f834e0efa252a5e5545e7e47c08dd;hb=33fdac775c0fe55f73bb79166b33846ac3381bde;hp=413d078e05cf28311896be3d7d94bcca454aa6d0;hpb=a7cc781d51dcfb24d9a062bdb60ba76ea79be676;p=zfs-ubuntu%2F.git diff --git a/zfs-replicate b/zfs-replicate index 413d078..174d6dd 100755 --- a/zfs-replicate +++ b/zfs-replicate @@ -1,26 +1,24 @@ -#/bin/bash +#!/bin/bash +# Author: Carl Baldwin & Alan Pippin +# Description: This script replicates a remote zfs filesystem to a local zfs pool. +# This script will keep all snapshots in sync, removing the ones +# that have been deleted since the last replicate was performed. +# This script will only send the new, or missing, snapshots since +# the last replicate was performed. # Usage: replicate + +# source our configuration +config="${0%/*}/zfs-scripts.conf" +[ -e "${config}.dist" ] && . ${config}.dist +[ -e "${config}" ] && . ${config} + +# command line arg parsing remote=$1 remote_fs=$2 remote_pool=${2%%/*} -remote_lockdir="/tmp/zfs-admin-lock" hostname=`hostname` -# Set the name of the local pool used to store the backup of the remote -local_pool=backups - -# Set the email address to send notification to -mailto=root@pippins.net -mailx=/usr/bin/mailx - -# When this variable is set, local filesystems will be destroyed -# before receiving a full streams into them from the remote source. -destroy_local_filesystem_on_full_replicate=0 - -# The ssh connection doesn't find zfs without this. -zfs=/usr/sbin/zfs - # Setup our cleanup and exit trap cleanup() { if [[ -e "$local_list" ]]; then @@ -29,18 +27,31 @@ cleanup() { if [[ -e "$remote_list" ]]; then rm -f $remote_list fi - if [[ -n "$remote" ]]; then - ssh $remote ls -d "$remote_lockdir" > /dev/null 2>&1 - if [[ $? == 0 ]]; then - ssh $remote rm -rf "$remote_lockdir" - fi + $ssh $remote ls -d "$lockdir" > /dev/null 2>&1 + if [[ $? == 0 ]]; then + $ssh $remote rm -rf "$lockdir" fi } fatal_and_exit() { echo -e 2>&1 "$1" + # Destroy the backup markers on the local filesystem if they exist + if [[ -n "$current_backup_marker" ]]; then + zfs list -t snapshot ${backup_pool}/${current_backup_marker} > /dev/null 2>&1 + if [ $? == 0 ]; then + $zfs destroy ${backup_pool}/${current_backup_marker} + fi + fi + if [[ -n "$previous_backup_marker" ]]; then + zfs list -t snapshot ${backup_pool}/${previous_backup_marker} > /dev/null 2>&1 + if [ $? == 0 ]; then + $zfs destroy ${backup_pool}/${previous_backup_marker} + fi + fi + # send email notification if [[ -n "$2" ]]; then echo -e "$1" | $mailx -s "zfs replicate on $hostname failed" "$2" fi + # exit exit 1 } trap fatal_and_exit INT @@ -51,26 +62,32 @@ if [[ -z "$remote" ]] || [[ -z "$remote_fs" ]]; then fatal_and_exit "Usage: $0 " fi -# Make sure the local pool and local receiving filesystem exist, or print some errors -zpool list -H "$local_pool" >/dev/null 2>&1 +# check for localhost +if [[ $remote = "localhost" ]]; then + remote="" + ssh="" +fi + +# Make sure the local backup pool and local receiving filesystem exist, or print some errors +zpool list -H "$backup_pool" >/dev/null 2>&1 if [ $? != 0 ]; then - fatal_and_exit "-E- The local pool, '$local_pool' doesn't seem to exist." $mailto + fatal_and_exit "-E- The local backup pool, '$backup_pool' doesn't seem to exist." $mailto fi -zfs list "$local_pool/$remote_pool" >/dev/null 2>&1 +zfs list "$backup_pool/$remote_pool" >/dev/null 2>&1 if [ $? != 0 ]; then - echo >&2 "-I- The local filesystem for the remote pool, '$local_pool/$remote_pool' doesn't seem to exist." - echo >&2 " Creating the local filesystem to receive the remote pool into: $local_pool/$remote_pool" - $zfs create $local_pool/$remote_pool + echo >&2 "-I- The local filesystem for the remote pool, '$backup_pool/$remote_pool' doesn't seem to exist." + echo >&2 " Creating the local filesystem to receive the remote pool into: $backup_pool/$remote_pool" + $zfs create $backup_pool/$remote_pool if [ $? != 0 ]; then fatal_and_exit "-E- remote $zfs create command failed" $mailto fi fi -# Obtain the zpool guid for the local pool -local_pool_guid=`zpool get guid $local_pool 2>&1 | grep $local_pool | awk '{ print $3 }'` -zpool get guid $local_pool > /dev/null 2>&1 +# Obtain the zpool guid for the local backup pool +backup_pool_guid=`zpool get guid $backup_pool 2>&1 | grep $backup_pool | awk '{ print $3 }'` +zpool get guid $backup_pool > /dev/null 2>&1 if [ $? != 0 ]; then - fatal_and_exit "-E- Unable to extract the guid for the local pool: $local_pool" $mailto + fatal_and_exit "-E- Unable to extract the guid for the local backup pool: $backup_pool" $mailto fi # Turn on shell verbosity @@ -79,10 +96,10 @@ set -x # Create the remote lockdir before continuing with the replicate # Spinlock on creating the lock maxsleeptime=60 -maxattempts=100 +maxattempts=500 attempts=0 while true; do - ssh $remote mkdir "$remote_lockdir" >/dev/null 2>&1 + $ssh $remote mkdir "$lockdir" >/dev/null 2>&1 if [ $? != 0 ]; then # Another zfs admin tool is running. # Wait a random amount of time and try again @@ -96,18 +113,18 @@ while true; do if [[ $attempts -gt $maxattempts ]]; then # We've exceeded our maximum while loop count echo "-E- The zfs filesystem has been locked down. Skipping replicate operation." - fail_msg=`ssh $remote ls -ld $remote_lockdir 2>&1` + fail_msg=`$ssh $remote ls -ld $lockdir 2>&1` fatal_and_exit "zfs-replicate-all unable to obtain zfs admin lock:\n$fail_msg" $mailto fi done # Setup our backup marker names -current_backup_marker=${remote_fs}@current-backup-${local_pool_guid} -previous_backup_marker=${remote_fs}@previous-backup-${local_pool_guid} +current_backup_marker=${remote_fs}@current-backup-${backup_pool_guid} +previous_backup_marker=${remote_fs}@previous-backup-${backup_pool_guid} # List the snapshots on the remote machine. remote_list=$(mktemp /tmp/replicate.XXXXXX) -ssh $remote \ +$ssh $remote \ $zfs list -H -t snapshot | grep ^${remote_fs}@ | awk '{print$1}' > $remote_list @@ -120,9 +137,9 @@ fi # If you do, it can mess up the common finding algorithm below. local_list=$(mktemp /tmp/replicate.XXXXXX) $zfs list -H -t snapshot | - grep ^${local_pool}/${remote_fs}@ | - grep -v ^${local_pool}/${current_backup_marker} | - awk "{gsub(/^$local_pool./,\"\",\$1); print\$1}" > $local_list + grep ^${backup_pool}/${remote_fs}@ | + grep -v ^${backup_pool}/${current_backup_marker} | + awk "{gsub(/^$backup_pool./,\"\",\$1); print\$1}" > $local_list if [ $? != 0 ]; then fatal_and_exit "-E- local $zfs list command failed" $mailto fi @@ -130,14 +147,14 @@ fi # Destroy the current backup marker snapshot on the remote system if it exists grep -q ${current_backup_marker} $remote_list if [ $? == 0 ]; then - ssh $remote $zfs destroy ${current_backup_marker} + $ssh $remote $zfs destroy ${current_backup_marker} if [ $? != 0 ]; then fatal_and_exit "-E- remote $zfs destroy command failed" $mailto fi fi # Create the current backup marker snapshot on the remote system -ssh $remote $zfs snapshot ${current_backup_marker} +$ssh $remote $zfs snapshot ${current_backup_marker} if [ $? != 0 ]; then fatal_and_exit "-E- remote $zfs snapshot command failed" $mailto fi @@ -155,17 +172,22 @@ no_markers=$(($no_markers || $?)) if [ $no_markers == 0 ]; then # We found backup markers, incrementally send the new snaps - # First, rollback the local pool to the previous backup marker in case the previous + # First, rollback the local backup pool to the previous backup marker in case the previous # backup was interrupted for some reason. If we don't do this, the zfs send -R command # below may complain about snaps already existing as it tries to resend from the # previous backup marker again from a previously interrupted replicate. - $zfs rollback -r ${local_pool}/${previous_backup_marker} + $zfs rollback -r ${backup_pool}/${previous_backup_marker} if [ $? != 0 ]; then fatal_and_exit "-E- remote incremental $zfs rollback command failed" $mailto fi # Now it should be safe to send the snaps - ssh $remote $zfs send -R -I${previous_backup_marker} ${current_backup_marker} | - $zfs receive -vF -d ${local_pool}/${remote_pool} + if [[ $throttle_enable == 1 && -e $throttle ]]; then + $ssh $remote $zfs send -R -I${previous_backup_marker} ${current_backup_marker} | + $throttle $throttle_opt | $zfs receive -vF -d ${backup_pool}/${remote_pool} + else + $ssh $remote $zfs send -R -I${previous_backup_marker} ${current_backup_marker} | + $zfs receive -vF -d ${backup_pool}/${remote_pool} + fi if [ $? != 0 ]; then fatal_and_exit "-E- remote incremental $zfs send command failed" $mailto fi @@ -184,8 +206,13 @@ else if [[ -n "$common" ]]; then # We found a common snapshot, incrementally send the new snaps - ssh $remote $zfs send -R -I${common/*@/@} ${current_backup_marker} | - $zfs receive -vF -d ${local_pool}/${remote_pool} + if [[ $throttle_enable == 1 && -e $throttle ]]; then + $ssh $remote $zfs send -R -I${common/*@/@} ${current_backup_marker} | + $throttle $throttle_opt | $zfs receive -vF -d ${backup_pool}/${remote_pool} + else + $ssh $remote $zfs send -R -I${common/*@/@} ${current_backup_marker} | + $zfs receive -vF -d ${backup_pool}/${remote_pool} + fi if [ $? != 0 ]; then fatal_and_exit "-E- remote incremental $zfs send command failed" $mailto fi @@ -193,22 +220,27 @@ else # We did not find any markers or a common snapshot # At this point, we'll have to send the entire filesystem # Destroy the local filesystem if it exists before receving the full replicate - zfs list ${local_pool}/${remote_fs} > /dev/null 2>&1 + zfs list ${backup_pool}/${remote_fs} > /dev/null 2>&1 if [ $? == 0 ]; then if [[ $destroy_local_filesystem_on_full_replicate == 1 ]]; then - $zfs destroy -r ${local_pool}/${remote_fs} + $zfs destroy -r ${backup_pool}/${remote_fs} if [ $? != 0 ]; then fatal_and_exit "-E- remote full $zfs destroy command failed" $mailto fi else echo "-W- We need to destroy a local filesystem before receiving a full stream." echo " However, since the option is set to prevent this, skipping replicate operation." - fatal_and_exit "unable to destroy local filesystem:\n$zfs destroy -r ${local_pool}/${remote_fs} not able to run" $mailto + fatal_and_exit "unable to destroy local filesystem:\n$zfs destroy -r ${backup_pool}/${remote_fs} not able to run" $mailto fi fi # Send the full filesystem - ssh $remote $zfs send -R ${current_backup_marker} | - $zfs receive -vF -d ${local_pool}/${remote_pool} + if [[ $throttle_enable == 1 && -e $throttle ]]; then + $ssh $remote $zfs send -R ${current_backup_marker} | + $throttle $throttle_opt | $zfs receive -vF -d ${backup_pool}/${remote_pool} + else + $ssh $remote $zfs send -R ${current_backup_marker} | + $zfs receive -vF -d ${backup_pool}/${remote_pool} + fi if [ $? != 0 ]; then fatal_and_exit "-E- remote full $zfs send command failed" $mailto fi @@ -217,16 +249,15 @@ fi # destroy the previous backup markers now that we've replicated past them # don't check the return codes here because these may not exist, and that is ok -$zfs destroy ${local_pool}/${previous_backup_marker} > /dev/null 2>&1 -ssh $remote $zfs destroy ${previous_backup_marker} > /dev/null 2>&1 +$zfs destroy ${backup_pool}/${previous_backup_marker} > /dev/null 2>&1 +$ssh $remote $zfs destroy ${previous_backup_marker} > /dev/null 2>&1 # Rename the current backup marker to be the previous backup marker -$zfs rename ${local_pool}/${current_backup_marker} ${local_pool}/${previous_backup_marker} +$zfs rename ${backup_pool}/${current_backup_marker} ${backup_pool}/${previous_backup_marker} if [ $? != 0 ]; then fatal_and_exit "-E- local $zfs rename command failed" $mailto fi -ssh $remote $zfs rename ${current_backup_marker} ${previous_backup_marker} +$ssh $remote $zfs rename ${current_backup_marker} ${previous_backup_marker} if [ $? != 0 ]; then fatal_and_exit "-E- remote $zfs rename command failed" $mailto fi -