3 # Author: Carl Baldwin & Alan Pippin
4 # Description: This script replicates a remote zfs filesystem to a local zfs pool.
5 # This script will keep all snapshots in sync, removing the ones
6 # that have been deleted since the last replicate was performed.
7 # This script will only send the new, or missing, snapshots since
8 # the last replicate was performed.
9 # Usage: replicate <hostname> <zfs filesystem>
11 # source our configuration
12 config="${0%/*}/zfs-scripts.conf"
13 [ -e "${config}.dist" ] && . ${config}.dist
14 [ -e "${config}" ] && . ${config}
16 # command line arg parsing
21 # Setup our cleanup and exit trap
23 if [[ -e "$local_list" ]]; then
26 if [[ -e "$remote_list" ]]; then
29 $ssh $remote ls -d "$lockdir" > /dev/null 2>&1
30 if [[ $? == 0 ]]; then
31 $ssh $remote rm -rf "$lockdir"
36 # Destroy the current backup markers from the local backup_pool and remote_pool if they exist
37 if [[ -n "$current_backup_marker" ]]; then
38 # Local backup pool current backup marker
39 $zfs list -t snapshot ${backup_pool}/${current_backup_marker} > /dev/null 2>&1
41 $zfs destroy ${backup_pool}/${current_backup_marker}
43 # Remote pool current backup marker
44 $ssh $remote zfs list -t snapshot ${current_backup_marker} > /dev/null 2>&1
46 $ssh $remote $zfs destroy ${current_backup_marker}
49 # send email notification
50 if [[ -n "$2" ]]; then
51 echo -e "$1" | $mailx -s "zfs replicate on $hostname failed" "$2"
56 trap fatal_and_exit INT
59 # Declare a function to handle the replicate operation
62 zfs_recv="$zfs receive -vF -d ${backup_pool}/${remote_pool}"
63 glue="$throttle $throttle_opt"
64 if [[ $throttle_enable == 1 && -e $throttle ]]; then
65 # handle using the glue in the local and remote host case properly
66 if [[ -z "$ssh" ]]; then
67 # local host glue case
68 $zfs_send | $glue | $zfs_recv
70 # remote host glue case
71 $ssh $remote "$zfs_send | $glue" | $zfs_recv
74 # no glue case - works for both the local and remote host cases
75 $ssh $remote $zfs_send | $zfs_recv
77 # The return code of the zfs_send | zfs_recv operation will be returned to the caller
80 # Make sure we have valid arguments
81 if [[ -z "$remote" ]] || [[ -z "$remote_fs" ]]; then
82 fatal_and_exit "Usage: $0 <hostname> <zfs filesystem>"
86 if [[ $remote = "localhost" ]]; then
91 # Make sure the local backup pool and local receiving filesystem exist, or print some errors
92 zpool list -H "$backup_pool" >/dev/null 2>&1
94 fatal_and_exit "-E- The local backup pool on $hostname, '$backup_pool' doesn't seem to exist." $mailto
96 zfs list "$backup_pool/$remote_pool" >/dev/null 2>&1
98 echo >&2 "-I- The local filesystem for the remote pool, '$backup_pool/$remote_pool' doesn't seem to exist."
99 echo >&2 " Creating the local filesystem to receive the remote pool into: $backup_pool/$remote_pool"
100 $zfs create $backup_pool/$remote_pool
102 fatal_and_exit "-E- remote $zfs on $hostname create command failed" $mailto
106 # Obtain the zpool guid for the local backup pool
107 backup_pool_guid=`zpool get guid $backup_pool 2>&1 | grep $backup_pool | awk '{ print $3 }'`
108 zpool get guid $backup_pool > /dev/null 2>&1
110 fatal_and_exit "-E- Unable to extract the guid for the local backup pool on $hostname: $backup_pool" $mailto
113 # Turn on shell verbosity
116 # Create the remote lockdir before continuing with the replicate
117 # Spinlock on creating the lock
122 $ssh $remote mkdir "$lockdir" >/dev/null 2>&1
124 # Another zfs admin tool is running.
125 # Wait a random amount of time and try again
126 ransleep=$(($RANDOM % $maxsleeptime))
128 ((attempts=attempts+1))
130 # No other zfs admin tool is running, we can now.
133 if [[ $attempts -gt $maxattempts ]]; then
134 # We've exceeded our maximum while loop count
135 echo "-E- The zfs filesystem has been locked down. Skipping replicate operation."
136 fail_msg=`$ssh $remote ls -ld $lockdir 2>&1`
137 fatal_and_exit "zfs-replicate-all on $hostname unable to obtain zfs admin lock:\n$fail_msg" $mailto
141 # Setup our backup marker names
142 current_backup_marker=${remote_fs}@current-backup-${backup_pool_guid}
143 previous_backup_marker=${remote_fs}@previous-backup-${backup_pool_guid}
145 # List the snapshots on the remote machine.
146 remote_list=$(mktemp /tmp/replicate.XXXXXX)
148 $zfs list -H -t snapshot |
149 grep ^${remote_fs}@ |
150 awk '{print$1}' > $remote_list
152 fatal_and_exit "-E- remote $zfs list on $hostname command failed" $mailto
155 # List the snapshots on the local machine.
156 # Don't list the current backup marker if it exists on the local side.
157 # If you do, it can mess up the common finding algorithm below.
158 local_list=$(mktemp /tmp/replicate.XXXXXX)
159 $zfs list -H -t snapshot |
160 grep ^${backup_pool}/${remote_fs}@ |
161 grep -v ^${backup_pool}/${current_backup_marker} |
162 awk "{gsub(/^$backup_pool./,\"\",\$1); print\$1}" > $local_list
164 fatal_and_exit "-E- local $zfs list on $hostname command failed" $mailto
167 # Destroy the current backup marker snapshot on the remote system if it exists
168 grep -q ${current_backup_marker} $remote_list
170 $ssh $remote $zfs destroy ${current_backup_marker}
172 fatal_and_exit "-E- remote $zfs destroy on $hostname command failed" $mailto
176 # Create the current backup marker snapshot on the remote system
177 $ssh $remote $zfs snapshot ${current_backup_marker}
179 fatal_and_exit "-E- remote $zfs snapshot on $hostname command failed" $mailto
182 # Check to see if the previous backup marker exists in the remote snapshot list.
183 # Check to see if the previous backup marker exists in the local snapshot list.
184 # If the previous backup markers exists, perform an incremental replicate. Else:
185 # 1) check to see if a common snapshot exists, and perform an incremental replicate.
186 # 2) if no common snapshot exists, destroy the local filesystem, and perform a full replicate.
187 grep -q ${previous_backup_marker} $remote_list
189 grep -q ${previous_backup_marker} $local_list
190 no_markers=$(($no_markers || $?))
192 if [ $no_markers == 0 ]; then
193 # We found backup markers, incrementally send the new snaps
195 # First, rollback the local backup pool to the previous backup marker in case the previous
196 # backup was interrupted for some reason. If we don't do this, the zfs send -R command
197 # below may complain about snaps already existing as it tries to resend from the
198 # previous backup marker again from a previously interrupted replicate.
199 $zfs rollback -r ${backup_pool}/${previous_backup_marker}
201 fatal_and_exit "-E- remote incremental $zfs rollback command failed on $hostname" $mailto
203 # Now it should be safe to send the snaps
204 replicate "$zfs send -R -I${previous_backup_marker} ${current_backup_marker}"
206 fatal_and_exit "-E- remote incremental $zfs send command failed on $hostname" $mailto
209 # We didn't find any backup markers, next check to see if we have a common snapshot.
211 # See what the most recent snapshot on the remote end is.
212 latest=$(tail -n 1 $remote_list)
214 # I did this to make sure that diff would always display the most recent common
215 # Since we're keying off the context of the diff, we need to ensure we will get context
216 # by injecting a known difference in case no others exist in the lists.
217 echo bogus.remote >> $remote_list
218 echo bogus.local >> $local_list
219 common=$(diff -u $remote_list $local_list | grep '^ ' | tail -n 1)
221 if [[ -n "$common" ]]; then
222 # We found a common snapshot, incrementally send the new snaps
223 replicate "$zfs send -R -I${common/*@/@} ${current_backup_marker}"
225 fatal_and_exit "-E- remote incremental $zfs send command failed on $hostname" $mailto
228 # We did not find any markers or a common snapshot
229 # At this point, we'll have to send the entire filesystem
230 # Destroy the local filesystem if it exists before receving the full replicate
231 zfs list ${backup_pool}/${remote_fs} > /dev/null 2>&1
233 if [[ $destroy_local_filesystem_on_full_replicate == 1 ]]; then
234 $zfs destroy -r ${backup_pool}/${remote_fs}
236 fatal_and_exit "-E- remote full $zfs destroy command failed on $hostname" $mailto
239 echo "-W- We need to destroy a local filesystem before receiving a full stream."
240 echo " However, since the option is set to prevent this, skipping replicate operation."
241 fatal_and_exit "unable to destroy local filesystem:\n$zfs destroy -r ${backup_pool}/${remote_fs} not able to run on $hostname" $mailto
244 # Send the full filesystem
245 replicate "$zfs send -R ${current_backup_marker}"
247 fatal_and_exit "-E- remote full $zfs send command failed on $hostname" $mailto
252 # destroy the previous backup markers now that we've replicated past them
253 # don't check the return codes here because these may not exist, and that is ok
254 $zfs destroy ${backup_pool}/${previous_backup_marker} > /dev/null 2>&1
255 $ssh $remote $zfs destroy ${previous_backup_marker} > /dev/null 2>&1
257 # Rename the current backup marker to be the previous backup marker
258 $zfs rename ${backup_pool}/${current_backup_marker} ${backup_pool}/${previous_backup_marker}
260 fatal_and_exit "-E- local $zfs rename command failed on $hostname" $mailto
262 $ssh $remote $zfs rename ${current_backup_marker} ${previous_backup_marker}
264 fatal_and_exit "-E- remote $zfs rename command failed on $hostname" $mailto