3 # Usage: replicate <hostname> <zfs filesystem>
7 remote_lockdir="/tmp/zfs-admin-lock"
9 # Set the name of the local pool used to store the backup of the remote
12 # Set the email address to send notification to
13 mailto=alan@pippins.net
15 # The ssh connection doesn't find zfs without this.
18 # Make sure we have valid arguments
19 if [[ -z "$remote" ]] || [[ -z "$remote_fs" ]]; then
20 echo "Usage: $0 <hostname> <zfs filesystem>"
24 # Make sure the local pool and local receiving filesystem exist, or print some errors
25 zpool list -H "$local_pool" >/dev/null 2>&1
27 echo >&2 "-E- The local pool, '$local_pool' doesn't seem to exist."
30 zfs list "$local_pool/$remote_pool" >/dev/null 2>&1
32 echo >&2 "-I- The local filesystem for the remote pool, '$local_pool/$remote_pool' doesn't seem to exist."
33 echo >&2 " Creating the local filesystem to receive the remote pool into: $local_pool/$remote_pool"
34 $zfs create $local_pool/$remote_pool
36 echo "-E- remote $zfs create command failed"
41 # Obtain the zpool guid for the local pool
42 local_pool_guid=`zpool get guid $local_pool 2>&1 | grep $local_pool | awk '{ print $3 }'`
43 zpool get guid $local_pool > /dev/null 2>&1
45 echo >&2 "-E- Unable to extract the guid for the local pool: $local_pool"
49 # Turn on shell verbosity
52 # Create the remote lockdir before continuing with the replicate
53 # Spinlock on creating the lock
58 ssh $remote mkdir "$remote_lockdir" >/dev/null 2>&1
60 # Another zfs admin tool is running.
61 # Wait a random amount of time and try again
62 ransleep=$(($RANDOM % $maxsleeptime))
64 ((attempts=attempts+1))
66 # No other zfs admin tool is running, we can now.
69 if [[ $attempts -gt $maxattempts ]]; then
70 # We've exceeded our maximum while loop count
71 echo "-W- The zfs filesystem has been locked down. Skipping replicate operation."
72 ssh $remote ls -ld $remote_lockdir | /usr/bin/mailx -s "zfs-replicate-all unable to obtain zfs admin lock" $mailto
77 # Declare a cleanup() method to remove the remote lockdir
78 cleanup() { ssh $remote rm -rf "$remote_lockdir"; }
81 # Setup our backup marker names
82 current_backup_marker=${remote_fs}@current-backup-${local_pool_guid}
83 previous_backup_marker=${remote_fs}@previous-backup-${local_pool_guid}
85 # List the snapshots on the remote machine.
86 remote_list=$(mktemp /tmp/replicate.XXXXXX)
88 $zfs list -H -t snapshot |
90 awk '{print$1}' > $remote_list
92 echo "-E- remote $zfs list command failed"
96 # List the snapshots on the local machine.
97 local_list=$(mktemp /tmp/replicate.XXXXXX)
98 $zfs list -H -t snapshot |
99 grep ^${local_pool}/${remote_fs}@ |
100 awk "{gsub(/^$local_pool./,\"\",\$1); print\$1}" > $local_list
102 echo "-E- local $zfs list command failed"
106 # Destroy the current backup marker snapshot on the remote system if it exists
107 grep -q ${current_backup_marker} $remote_list
109 ssh $remote $zfs destroy ${current_backup_marker}
111 echo "-E- remote $zfs destroy command failed"
116 # Create the current backup marker snapshot on the remote system
117 ssh $remote $zfs snapshot ${current_backup_marker}
119 echo "-E- remote $zfs snapshot command failed"
123 # Check to see if the previous backup marker exists in the remote snapshot list.
124 # Check to see if the previous backup marker exists in the local snapshot list.
125 # If the previous backup markers exists, perform an incremental replicate. Else:
126 # 1) check to see if a common snapshot exists, and perform an incremental replicate.
127 # 2) if no common snapshot exists, destroy the local filesystem, and perform a full replicate.
128 grep -q ${previous_backup_marker} $remote_list
130 grep -q ${previous_backup_marker} $local_list
131 no_markers=$(($no_markers || $?))
133 if [ $no_markers == 0 ]; then
134 # We found backup markers, incrementally send the new snaps
136 # First, rollback the local pool to the previous backup marker in case the previous
137 # backup was interrupted for some reason. If we don't do this, the zfs send -R command
138 # below may complain about snaps already existing as it tries to resend from the
139 # previous backup marker again from a previously interrupted replicate.
140 $zfs rollback -r ${local_pool}/${previous_backup_marker}
142 echo "-E- remote incremental $zfs rollback command failed"
145 # Now it should be safe to send the snaps
146 ssh $remote $zfs send -R -I${previous_backup_marker} ${current_backup_marker} |
147 $zfs receive -vF -d ${local_pool}/${remote_pool}
149 echo "-E- remote incremental $zfs send command failed"
153 # We didn't find any backup markers, next check to see if we have a common snapshot.
155 # See what the most recent snapshot on the remote end is.
156 latest=$(tail -n 1 $remote_list)
158 # I did this to make sure that diff would always display the most recent common
159 # Since we're keying off the context of the diff, we need to ensure we will get context
160 # by injecting a known difference in case no others exist in the lists.
161 echo bogus.remote >> $remote_list
162 echo bogus.local >> $local_list
163 common=$(diff -u $remote_list $local_list | grep '^ ' | tail -n 1)
165 if [[ -n "$common" ]]; then
166 # We found a common snapshot, incrementally send the new snaps
167 ssh $remote $zfs send -R -I${common/*@/@} ${current_backup_marker} |
168 $zfs receive -vF -d ${local_pool}/${remote_pool}
170 echo "-E- remote incremental $zfs send command failed"
174 # We did not find any markers or a common snapshot
175 # At this point, we'll have to send the entire filesystem
176 # Destroy the local filesystem if it exists before receving the full replicate
177 zfs list ${local_pool}/${remote_fs} > /dev/null 2>&1
179 zfs destroy -r ${local_pool}/${remote_fs}
181 echo "-E- remote full $zfs destroy command failed"
185 # Send the full filesystem
186 ssh $remote $zfs send -R ${current_backup_marker} |
187 $zfs receive -vF -d ${local_pool}/${remote_pool}
189 echo "-E- remote full $zfs send command failed"
195 # destroy the previous backup markers now that we've replicated past them
196 # don't check the return codes here because these may not exist, and that is ok
197 $zfs destroy ${local_pool}/${previous_backup_marker} > /dev/null 2>&1
198 ssh $remote $zfs destroy ${previous_backup_marker} > /dev/null 2>&1
200 # Rename the current backup marker to be the previous backup marker
201 $zfs rename ${local_pool}/${current_backup_marker} ${local_pool}/${previous_backup_marker}
203 echo "-E- local $zfs rename command failed"
206 ssh $remote $zfs rename ${current_backup_marker} ${previous_backup_marker}
208 echo "-E- remote $zfs rename command failed"
213 rm -f $local_list $remote_list