3 # Author: Carl Baldwin & Alan Pippin
4 # Description: This script replicates a remote zfs filesystem to a local zfs pool.
5 # This script will keep all snapshots in sync, removing the ones
6 # that have been deleted since the last replicate was performed.
7 # This script will only send the new, or missing, snapshots since
8 # the last replicate was performed.
9 # Usage: replicate <hostname> <zfs filesystem>
11 # source our configuration
12 config="${0%/*}/zfs-scripts.conf"
13 [ -e "${config}.dist" ] && . ${config}.dist
14 [ -e "${config}" ] && . ${config}
16 # command line arg parsing
22 # Setup our cleanup and exit trap
24 if [[ -e "$local_list" ]]; then
27 if [[ -e "$remote_list" ]]; then
30 if [[ -n "$remote" ]]; then
31 ssh $remote ls -d "$lockdir" > /dev/null 2>&1
32 if [[ $? == 0 ]]; then
33 ssh $remote rm -rf "$lockdir"
39 # Destroy the backup markers on the local filesystem if they exist
40 if [[ -n "$current_backup_marker" ]]; then
41 zfs list -t snapshot ${local_pool}/${current_backup_marker} > /dev/null 2>&1
43 $zfs destroy ${local_pool}/${current_backup_marker}
46 if [[ -n "$previous_backup_marker" ]]; then
47 zfs list -t snapshot ${local_pool}/${previous_backup_marker} > /dev/null 2>&1
49 $zfs destroy ${local_pool}/${previous_backup_marker}
52 # send email notification
53 if [[ -n "$2" ]]; then
54 echo -e "$1" | $mailx -s "zfs replicate on $hostname failed" "$2"
59 trap fatal_and_exit INT
62 # Make sure we have valid arguments
63 if [[ -z "$remote" ]] || [[ -z "$remote_fs" ]]; then
64 fatal_and_exit "Usage: $0 <hostname> <zfs filesystem>"
67 # Make sure the local pool and local receiving filesystem exist, or print some errors
68 zpool list -H "$local_pool" >/dev/null 2>&1
70 fatal_and_exit "-E- The local pool, '$local_pool' doesn't seem to exist." $mailto
72 zfs list "$local_pool/$remote_pool" >/dev/null 2>&1
74 echo >&2 "-I- The local filesystem for the remote pool, '$local_pool/$remote_pool' doesn't seem to exist."
75 echo >&2 " Creating the local filesystem to receive the remote pool into: $local_pool/$remote_pool"
76 $zfs create $local_pool/$remote_pool
78 fatal_and_exit "-E- remote $zfs create command failed" $mailto
82 # Obtain the zpool guid for the local pool
83 local_pool_guid=`zpool get guid $local_pool 2>&1 | grep $local_pool | awk '{ print $3 }'`
84 zpool get guid $local_pool > /dev/null 2>&1
86 fatal_and_exit "-E- Unable to extract the guid for the local pool: $local_pool" $mailto
89 # Turn on shell verbosity
92 # Create the remote lockdir before continuing with the replicate
93 # Spinlock on creating the lock
98 ssh $remote mkdir "$lockdir" >/dev/null 2>&1
100 # Another zfs admin tool is running.
101 # Wait a random amount of time and try again
102 ransleep=$(($RANDOM % $maxsleeptime))
104 ((attempts=attempts+1))
106 # No other zfs admin tool is running, we can now.
109 if [[ $attempts -gt $maxattempts ]]; then
110 # We've exceeded our maximum while loop count
111 echo "-E- The zfs filesystem has been locked down. Skipping replicate operation."
112 fail_msg=`ssh $remote ls -ld $lockdir 2>&1`
113 fatal_and_exit "zfs-replicate-all unable to obtain zfs admin lock:\n$fail_msg" $mailto
117 # Setup our backup marker names
118 current_backup_marker=${remote_fs}@current-backup-${local_pool_guid}
119 previous_backup_marker=${remote_fs}@previous-backup-${local_pool_guid}
121 # List the snapshots on the remote machine.
122 remote_list=$(mktemp /tmp/replicate.XXXXXX)
124 $zfs list -H -t snapshot |
125 grep ^${remote_fs}@ |
126 awk '{print$1}' > $remote_list
128 fatal_and_exit "-E- remote $zfs list command failed" $mailto
131 # List the snapshots on the local machine.
132 # Don't list the current backup marker if it exists on the local side.
133 # If you do, it can mess up the common finding algorithm below.
134 local_list=$(mktemp /tmp/replicate.XXXXXX)
135 $zfs list -H -t snapshot |
136 grep ^${local_pool}/${remote_fs}@ |
137 grep -v ^${local_pool}/${current_backup_marker} |
138 awk "{gsub(/^$local_pool./,\"\",\$1); print\$1}" > $local_list
140 fatal_and_exit "-E- local $zfs list command failed" $mailto
143 # Destroy the current backup marker snapshot on the remote system if it exists
144 grep -q ${current_backup_marker} $remote_list
146 ssh $remote $zfs destroy ${current_backup_marker}
148 fatal_and_exit "-E- remote $zfs destroy command failed" $mailto
152 # Create the current backup marker snapshot on the remote system
153 ssh $remote $zfs snapshot ${current_backup_marker}
155 fatal_and_exit "-E- remote $zfs snapshot command failed" $mailto
158 # Check to see if the previous backup marker exists in the remote snapshot list.
159 # Check to see if the previous backup marker exists in the local snapshot list.
160 # If the previous backup markers exists, perform an incremental replicate. Else:
161 # 1) check to see if a common snapshot exists, and perform an incremental replicate.
162 # 2) if no common snapshot exists, destroy the local filesystem, and perform a full replicate.
163 grep -q ${previous_backup_marker} $remote_list
165 grep -q ${previous_backup_marker} $local_list
166 no_markers=$(($no_markers || $?))
168 if [ $no_markers == 0 ]; then
169 # We found backup markers, incrementally send the new snaps
171 # First, rollback the local pool to the previous backup marker in case the previous
172 # backup was interrupted for some reason. If we don't do this, the zfs send -R command
173 # below may complain about snaps already existing as it tries to resend from the
174 # previous backup marker again from a previously interrupted replicate.
175 $zfs rollback -r ${local_pool}/${previous_backup_marker}
177 fatal_and_exit "-E- remote incremental $zfs rollback command failed" $mailto
179 # Now it should be safe to send the snaps
180 if [[ $throttle_enable == 1 && -e $throttle ]]; then
181 ssh $remote $zfs send -R -I${previous_backup_marker} ${current_backup_marker} |
182 $throttle $throttle_opt | $zfs receive -vF -d ${local_pool}/${remote_pool}
184 ssh $remote $zfs send -R -I${previous_backup_marker} ${current_backup_marker} |
185 $zfs receive -vF -d ${local_pool}/${remote_pool}
188 fatal_and_exit "-E- remote incremental $zfs send command failed" $mailto
191 # We didn't find any backup markers, next check to see if we have a common snapshot.
193 # See what the most recent snapshot on the remote end is.
194 latest=$(tail -n 1 $remote_list)
196 # I did this to make sure that diff would always display the most recent common
197 # Since we're keying off the context of the diff, we need to ensure we will get context
198 # by injecting a known difference in case no others exist in the lists.
199 echo bogus.remote >> $remote_list
200 echo bogus.local >> $local_list
201 common=$(diff -u $remote_list $local_list | grep '^ ' | tail -n 1)
203 if [[ -n "$common" ]]; then
204 # We found a common snapshot, incrementally send the new snaps
205 if [[ $throttle_enable == 1 && -e $throttle ]]; then
206 ssh $remote $zfs send -R -I${common/*@/@} ${current_backup_marker} |
207 $throttle $throttle_opt | $zfs receive -vF -d ${local_pool}/${remote_pool}
209 ssh $remote $zfs send -R -I${common/*@/@} ${current_backup_marker} |
210 $zfs receive -vF -d ${local_pool}/${remote_pool}
213 fatal_and_exit "-E- remote incremental $zfs send command failed" $mailto
216 # We did not find any markers or a common snapshot
217 # At this point, we'll have to send the entire filesystem
218 # Destroy the local filesystem if it exists before receving the full replicate
219 zfs list ${local_pool}/${remote_fs} > /dev/null 2>&1
221 if [[ $destroy_local_filesystem_on_full_replicate == 1 ]]; then
222 $zfs destroy -r ${local_pool}/${remote_fs}
224 fatal_and_exit "-E- remote full $zfs destroy command failed" $mailto
227 echo "-W- We need to destroy a local filesystem before receiving a full stream."
228 echo " However, since the option is set to prevent this, skipping replicate operation."
229 fatal_and_exit "unable to destroy local filesystem:\n$zfs destroy -r ${local_pool}/${remote_fs} not able to run" $mailto
232 # Send the full filesystem
233 if [[ $throttle_enable == 1 && -e $throttle ]]; then
234 ssh $remote $zfs send -R ${current_backup_marker} |
235 $throttle $throttle_opt | $zfs receive -vF -d ${local_pool}/${remote_pool}
237 ssh $remote $zfs send -R ${current_backup_marker} |
238 $zfs receive -vF -d ${local_pool}/${remote_pool}
241 fatal_and_exit "-E- remote full $zfs send command failed" $mailto
246 # destroy the previous backup markers now that we've replicated past them
247 # don't check the return codes here because these may not exist, and that is ok
248 $zfs destroy ${local_pool}/${previous_backup_marker} > /dev/null 2>&1
249 ssh $remote $zfs destroy ${previous_backup_marker} > /dev/null 2>&1
251 # Rename the current backup marker to be the previous backup marker
252 $zfs rename ${local_pool}/${current_backup_marker} ${local_pool}/${previous_backup_marker}
254 fatal_and_exit "-E- local $zfs rename command failed" $mailto
256 ssh $remote $zfs rename ${current_backup_marker} ${previous_backup_marker}
258 fatal_and_exit "-E- remote $zfs rename command failed" $mailto