X-Git-Url: http://git.pippins.net/embedvideo/.git/?a=blobdiff_plain;f=zfs-replicate;h=57680fc637c4910e887c69d40c85279de229c07c;hb=64e85d923d59ee7b95cb123f12bfa3750f42dc0f;hp=da3df12614ab76e8090bdeb0a468675f09930eeb;hpb=fbf2b5b666f478f05b11bf4e4dac64151d55a9cb;p=zfs-nexenta%2F.git diff --git a/zfs-replicate b/zfs-replicate index da3df12..57680fc 100755 --- a/zfs-replicate +++ b/zfs-replicate @@ -1,116 +1,214 @@ -#!/bin/bash - -# Author: Carl Baldwin & Alan Pippin -# Description: This script replicates a given zfs filesystem to a given zfs pool. -# This script will keep all snapshots in sync, removing the ones -# that have been deleted since the last replicate was performed. -# This script will only send the new, or missing, snapshots since -# the last replicate was performed. - -# In test mode (test=1) commands are echoed, not executed -test=0 - -[ $test == 0 ] && exec >> /var/log/zfs/zfs-replicate.log 2>&1 - -# Usage: zfs-backup [filesystem] [destination_pool] -# This script has a limitation with children under a given filesystem. -# You must initially backup the parent filesystems first using this script -# before backing up any of the children filesystems. - -fs=$1 -fs=${fs%/} -fsname=${1#*/} -srcpool=${1%%/*} -srcfs="${srcpool}/$fsname" -srcfs=${srcfs%/} -dstpool=$2 -dstfs="${dstpool}/$srcfs" -dstfs=${dstfs%/} -nodstsnaps=0 -common="" - -if [ $test == 1 ]; then - echo "fs: $fs" - echo "fsname: $fsname" - echo "srcpool: $srcpool" - echo "srcfs: $srcfs" - echo "dstpool: $dstpool" - echo "dstfs: $dstfs" +#/bin/bash + +# Usage: replicate +remote=$1 +remote_fs=$2 +remote_pool=${2%%/*} +remote_lockdir="/tmp/zfs-admin-lock" + +# Set the name of the local pool used to store the backup of the remote +local_pool=backups + +# Set the email address to send notification to +mailto=alan@pippins.net + +# The ssh connection doesn't find zfs without this. +zfs=/usr/sbin/zfs + +# Make sure we have valid arguments +if [[ -z "$remote" ]] || [[ -z "$remote_fs" ]]; then + echo "Usage: $0 " + exit 1 fi -if ! zpool list -H "$srcpool" >/dev/null 2>&1; then - echo >&2 "-E- The source pool, '$srcpool' doesn't seem to exist." +# Make sure the local pool and local receiving filesystem exist, or print some errors +zpool list -H "$local_pool" >/dev/null 2>&1 +if [ $? != 0 ]; then + echo >&2 "-E- The local pool, '$local_pool' doesn't seem to exist." exit 1 fi +zfs list "$local_pool/$remote_pool" >/dev/null 2>&1 +if [ $? != 0 ]; then + echo >&2 "-I- The local filesystem for the remote pool, '$local_pool/$remote_pool' doesn't seem to exist." + echo >&2 " Creating the local filesystem to receive the remote pool into: $local_pool/$remote_pool" + $zfs create $local_pool/$remote_pool + if [ $? != 0 ]; then + echo "-E- remote $zfs create command failed" + exit 1 + fi +fi -if ! zpool list -H "$dstpool" >/dev/null 2>&1; then - echo >&2 "-E- The destination pool, '$dstpool' doesn't seem to exist." +# Obtain the zpool guid for the local pool +local_pool_guid=`zpool get guid $local_pool 2>&1 | grep $local_pool | awk '{ print $3 }'` +zpool get guid $local_pool > /dev/null 2>&1 +if [ $? != 0 ]; then + echo >&2 "-E- Unable to extract the guid for the local pool: $local_pool" exit 1 fi -if ! zfs list -rH -t snapshot "$dstfs" 2>&1 | grep "$dstfs@" > /dev/null 2>&1; then - echo >&2 "-W- No snapshots detected on the destination drive for this filesystem: $dstfs" - if zfs list -t filesystem | grep "$dstfs"; then - echo >&2 "-I- Found zfs filesystem $dstfs on the destination pool $dstpool without any snapshots" - echo >&2 "-I- Removing the zfs filesystem: $dstfs" - zfs destroy "$dstfs" +# Turn on shell verbosity +set -x + +# Create the remote lockdir before continuing with the replicate +# Spinlock on creating the lock +maxsleeptime=60 +maxattempts=100 +attempts=0 +while true; do + ssh $remote mkdir "$remote_lockdir" >/dev/null 2>&1 + if [ $? != 0 ]; then + # Another zfs admin tool is running. + # Wait a random amount of time and try again + ransleep=$(($RANDOM % $maxsleeptime)) + sleep $ransleep + ((attempts=attempts+1)) + else + # No other zfs admin tool is running, we can now. + break + fi + if [[ $attempts -gt $maxattempts ]]; then + # We've exceeded our maximum while loop count + echo "-W- The zfs filesystem has been locked down. Skipping replicate operation." + ssh $remote ls -ld $remote_lockdir | /usr/bin/mailx -s "zfs-replicate-all unable to obtain zfs admin lock" $mailto + exit 1 fi - nodstsnaps=1 +done + +# Declare a cleanup() method to remove the remote lockdir +cleanup() { ssh $remote rm -rf "$remote_lockdir"; } +trap cleanup EXIT + +# Setup our backup marker names +current_backup_marker=${remote_fs}@current-backup-${local_pool_guid} +previous_backup_marker=${remote_fs}@previous-backup-${local_pool_guid} + +# List the snapshots on the remote machine. +remote_list=$(mktemp /tmp/replicate.XXXXXX) +ssh $remote \ + $zfs list -H -t snapshot | + grep ^${remote_fs}@ | + awk '{print$1}' > $remote_list +if [ $? != 0 ]; then + echo "-E- remote $zfs list command failed" + exit 1 +fi + +# List the snapshots on the local machine. +local_list=$(mktemp /tmp/replicate.XXXXXX) +$zfs list -H -t snapshot | + grep ^${local_pool}/${remote_fs}@ | + awk "{gsub(/^$local_pool./,\"\",\$1); print\$1}" > $local_list +if [ $? != 0 ]; then + echo "-E- local $zfs list command failed" + exit 1 fi -if [ $nodstsnaps == 0 ]; then - zfs list -rH -t snapshot $srcfs | grep "$srcfs@" | awk '{print $1}' > /tmp/source-list - zfs list -rH -t snapshot $dstfs | grep "$dstfs@" | sed "s,$dstpool/,," | awk '{print $1}' > /tmp/destination-list - diff -u /tmp/source-list /tmp/destination-list | grep -v '^+++' | awk '/^\+/ {print}' | sed "s,^\+,$dstpool/," > /tmp/obsolete-snapshots - rm -f /tmp/source-list /tmp/destination-list - - echo >&2 "Removing obsolete backups from the destination pool" - for snapshot in $(cat /tmp/obsolete-snapshots); do - echo >&2 "Removing '$snapshot' from destination." - [ $test == 0 ] && zfs destroy "$snapshot" - done - - echo >&2 "Rolling back to the most recent snapshot on the destination." - [ $test == 0 ] && zfs rollback $(zfs list -rH -t snapshot $dstfs | grep "$dstfs@" | awk '{snap=$1} END {print snap}') - - echo >&2 "Calculating the most recent common snapshot between the two filesystems." - if zfs list -H "$dstfs" > /dev/null 2>&1; then - for snap in $(zfs list -rH -t snapshot "$dstfs" | grep "$dstfs@" | - sed 's,.*@,,' | awk '{print$1}'); do - if zfs list -rH -t snapshot "$fs" | grep "$fs@" | sed 's,.*@,,' | awk '{print$1}' | grep "^${snap}$" >/dev/null 2>&1; then - common=$snap - fi - done +# Destroy the current backup marker snapshot on the remote system if it exists +grep -q ${current_backup_marker} $remote_list +if [ $? == 0 ]; then + ssh $remote $zfs destroy ${current_backup_marker} + if [ $? != 0 ]; then + echo "-E- remote $zfs destroy command failed" + exit 1 fi fi -base=$common -foundcommon=false -if [ -z "$common" ]; then - foundcommon=true +# Create the current backup marker snapshot on the remote system +ssh $remote $zfs snapshot ${current_backup_marker} +if [ $? != 0 ]; then + echo "-E- remote $zfs snapshot command failed" + exit 1 fi -for snap in $(zfs list -rH -t snapshot "$fs" | grep "$fs@" | - sed 's,.*@,,' | awk '{print$1}'); do - if [ "$snap" = "$common" ]; then - foundcommon=true - continue +# Check to see if the previous backup marker exists in the remote snapshot list. +# Check to see if the previous backup marker exists in the local snapshot list. +# If the previous backup markers exists, perform an incremental replicate. Else: +# 1) check to see if a common snapshot exists, and perform an incremental replicate. +# 2) if no common snapshot exists, destroy the local filesystem, and perform a full replicate. +grep -q ${previous_backup_marker} $remote_list +no_markers=$? +grep -q ${previous_backup_marker} $local_list +no_markers=$(($no_markers || $?)) + +if [ $no_markers == 0 ]; then + # We found backup markers, incrementally send the new snaps + + # First, rollback the local pool to the previous backup marker in case the previous + # backup was interrupted for some reason. If we don't do this, the zfs send -R command + # below may complain about snaps already existing as it tries to resend from the + # previous backup marker again from a previously interrupted replicate. + $zfs rollback -r ${local_pool}/${previous_backup_marker} + if [ $? != 0 ]; then + echo "-E- remote incremental $zfs rollback command failed" + exit 1 fi - - if $foundcommon; then - if [ -z "$base" ]; then - echo >&2 "Sending '$fs@$snap'" - [ $test == 0 ] && zfs set readonly=on "$dstpool" - [ $test == 0 ] && zfs set atime=off "$dstpool" - [ $test == 0 ] && zfs set sharenfs=off "$dstpool" - [ $test == 0 ] && zfs set mountpoint=legacy "$dstpool" - [ $test == 0 ] && zfs send "$fs@$snap" | zfs recv -v "$dstfs" - else - echo >&2 "Sending '$fs@$base' -> '$fs@$snap'" - [ $test == 0 ] && zfs send -i "$fs@$base" "$fs@$snap" | zfs recv -v "$dstfs" + # Now it should be safe to send the snaps + ssh $remote $zfs send -R -I${previous_backup_marker} ${current_backup_marker} | + $zfs receive -vF -d ${local_pool}/${remote_pool} + if [ $? != 0 ]; then + echo "-E- remote incremental $zfs send command failed" + exit 1 + fi +else + # We didn't find any backup markers, next check to see if we have a common snapshot. + + # See what the most recent snapshot on the remote end is. + latest=$(tail -n 1 $remote_list) + + # I did this to make sure that diff would always display the most recent common + # Since we're keying off the context of the diff, we need to ensure we will get context + # by injecting a known difference in case no others exist in the lists. + echo bogus.remote >> $remote_list + echo bogus.local >> $local_list + common=$(diff -u $remote_list $local_list | grep '^ ' | tail -n 1) + + if [[ -n "$common" ]]; then + # We found a common snapshot, incrementally send the new snaps + ssh $remote $zfs send -R -I${common/*@/@} ${current_backup_marker} | + $zfs receive -vF -d ${local_pool}/${remote_pool} + if [ $? != 0 ]; then + echo "-E- remote incremental $zfs send command failed" + exit 1 + fi + else + # We did not find any markers or a common snapshot + # At this point, we'll have to send the entire filesystem + # Destroy the local filesystem if it exists before receving the full replicate + zfs list ${local_pool}/${remote_fs} > /dev/null 2>&1 + if [ $? == 0 ]; then + zfs destroy -r ${local_pool}/${remote_fs} + if [ $? != 0 ]; then + echo "-E- remote full $zfs destroy command failed" + exit 1 + fi + fi + # Send the full filesystem + ssh $remote $zfs send -R ${current_backup_marker} | + $zfs receive -vF -d ${local_pool}/${remote_pool} + if [ $? != 0 ]; then + echo "-E- remote full $zfs send command failed" + exit 1 fi - base=$snap fi -done +fi + +# destroy the previous backup markers now that we've replicated past them +# don't check the return codes here because these may not exist, and that is ok +$zfs destroy ${local_pool}/${previous_backup_marker} > /dev/null 2>&1 +ssh $remote $zfs destroy ${previous_backup_marker} > /dev/null 2>&1 + +# Rename the current backup marker to be the previous backup marker +$zfs rename ${local_pool}/${current_backup_marker} ${local_pool}/${previous_backup_marker} +if [ $? != 0 ]; then + echo "-E- local $zfs rename command failed" + exit 1 +fi +ssh $remote $zfs rename ${current_backup_marker} ${previous_backup_marker} +if [ $? != 0 ]; then + echo "-E- remote $zfs rename command failed" + exit 1 +fi + +# Remove tmp files +rm -f $local_list $remote_list -true