3 # Usage: replicate <hostname> <zfs filesystem>
7 remote_lockdir="/tmp/zfs-admin-lock"
9 # Set the name of the local pool used to store the backup of the remote
12 # Set the email address to send notification to
13 mailto=alan@pippins.net
15 # When this variable is set, local filesystems will be destroyed
16 # before receiving a full streams into them from the remote source.
17 destroy_local_filesystem_on_full_replicate=0
19 # The ssh connection doesn't find zfs without this.
22 # Make sure we have valid arguments
23 if [[ -z "$remote" ]] || [[ -z "$remote_fs" ]]; then
24 echo "Usage: $0 <hostname> <zfs filesystem>"
28 # Make sure the local pool and local receiving filesystem exist, or print some errors
29 zpool list -H "$local_pool" >/dev/null 2>&1
31 echo >&2 "-E- The local pool, '$local_pool' doesn't seem to exist."
34 zfs list "$local_pool/$remote_pool" >/dev/null 2>&1
36 echo >&2 "-I- The local filesystem for the remote pool, '$local_pool/$remote_pool' doesn't seem to exist."
37 echo >&2 " Creating the local filesystem to receive the remote pool into: $local_pool/$remote_pool"
38 $zfs create $local_pool/$remote_pool
40 echo "-E- remote $zfs create command failed"
45 # Obtain the zpool guid for the local pool
46 local_pool_guid=`zpool get guid $local_pool 2>&1 | grep $local_pool | awk '{ print $3 }'`
47 zpool get guid $local_pool > /dev/null 2>&1
49 echo >&2 "-E- Unable to extract the guid for the local pool: $local_pool"
53 # Turn on shell verbosity
56 # Create the remote lockdir before continuing with the replicate
57 # Spinlock on creating the lock
62 ssh $remote mkdir "$remote_lockdir" >/dev/null 2>&1
64 # Another zfs admin tool is running.
65 # Wait a random amount of time and try again
66 ransleep=$(($RANDOM % $maxsleeptime))
68 ((attempts=attempts+1))
70 # No other zfs admin tool is running, we can now.
73 if [[ $attempts -gt $maxattempts ]]; then
74 # We've exceeded our maximum while loop count
75 echo "-W- The zfs filesystem has been locked down. Skipping replicate operation."
76 ssh $remote ls -ld $remote_lockdir | /usr/bin/mailx -s "zfs-replicate-all unable to obtain zfs admin lock" $mailto
81 # Declare a cleanup() method to remove the remote lockdir
82 cleanup() { ssh $remote rm -rf "$remote_lockdir"; }
85 # Setup our backup marker names
86 current_backup_marker=${remote_fs}@current-backup-${local_pool_guid}
87 previous_backup_marker=${remote_fs}@previous-backup-${local_pool_guid}
89 # List the snapshots on the remote machine.
90 remote_list=$(mktemp /tmp/replicate.XXXXXX)
92 $zfs list -H -t snapshot |
94 awk '{print$1}' > $remote_list
96 echo "-E- remote $zfs list command failed"
100 # List the snapshots on the local machine.
101 # Don't list the current backup marker if it exists on the local side.
102 # If you do, it can mess up the common finding algorithm below.
103 local_list=$(mktemp /tmp/replicate.XXXXXX)
104 $zfs list -H -t snapshot |
105 grep ^${local_pool}/${remote_fs}@ |
106 grep -v ^${local_pool}/${current_backup_marker} |
107 awk "{gsub(/^$local_pool./,\"\",\$1); print\$1}" > $local_list
109 echo "-E- local $zfs list command failed"
113 # Destroy the current backup marker snapshot on the remote system if it exists
114 grep -q ${current_backup_marker} $remote_list
116 ssh $remote $zfs destroy ${current_backup_marker}
118 echo "-E- remote $zfs destroy command failed"
123 # Create the current backup marker snapshot on the remote system
124 ssh $remote $zfs snapshot ${current_backup_marker}
126 echo "-E- remote $zfs snapshot command failed"
130 # Check to see if the previous backup marker exists in the remote snapshot list.
131 # Check to see if the previous backup marker exists in the local snapshot list.
132 # If the previous backup markers exists, perform an incremental replicate. Else:
133 # 1) check to see if a common snapshot exists, and perform an incremental replicate.
134 # 2) if no common snapshot exists, destroy the local filesystem, and perform a full replicate.
135 grep -q ${previous_backup_marker} $remote_list
137 grep -q ${previous_backup_marker} $local_list
138 no_markers=$(($no_markers || $?))
140 if [ $no_markers == 0 ]; then
141 # We found backup markers, incrementally send the new snaps
143 # First, rollback the local pool to the previous backup marker in case the previous
144 # backup was interrupted for some reason. If we don't do this, the zfs send -R command
145 # below may complain about snaps already existing as it tries to resend from the
146 # previous backup marker again from a previously interrupted replicate.
147 $zfs rollback -r ${local_pool}/${previous_backup_marker}
149 echo "-E- remote incremental $zfs rollback command failed"
152 # Now it should be safe to send the snaps
153 ssh $remote $zfs send -R -I${previous_backup_marker} ${current_backup_marker} |
154 $zfs receive -vF -d ${local_pool}/${remote_pool}
156 echo "-E- remote incremental $zfs send command failed"
160 # We didn't find any backup markers, next check to see if we have a common snapshot.
162 # See what the most recent snapshot on the remote end is.
163 latest=$(tail -n 1 $remote_list)
165 # I did this to make sure that diff would always display the most recent common
166 # Since we're keying off the context of the diff, we need to ensure we will get context
167 # by injecting a known difference in case no others exist in the lists.
168 echo bogus.remote >> $remote_list
169 echo bogus.local >> $local_list
170 common=$(diff -u $remote_list $local_list | grep '^ ' | tail -n 1)
172 if [[ -n "$common" ]]; then
173 # We found a common snapshot, incrementally send the new snaps
174 ssh $remote $zfs send -R -I${common/*@/@} ${current_backup_marker} |
175 $zfs receive -vF -d ${local_pool}/${remote_pool}
177 echo "-E- remote incremental $zfs send command failed"
181 # We did not find any markers or a common snapshot
182 # At this point, we'll have to send the entire filesystem
183 # Destroy the local filesystem if it exists before receving the full replicate
184 zfs list ${local_pool}/${remote_fs} > /dev/null 2>&1
186 if [[ $destroy_local_filesystem_on_full_replicate == 1 ]]; then
187 $zfs destroy -r ${local_pool}/${remote_fs}
189 echo "-E- remote full $zfs destroy command failed"
193 echo "-W- We need to destroy a local filesystem before receiving a full stream."
194 echo " However, since the option is set to prevent this, skipping replicate operation."
195 echo "$zfs destroy -r ${local_pool}/${remote_fs} not able to run" | /usr/bin/mailx -s "zfs-replicate-all unable to destroy local filesystem" $mailto
199 # Send the full filesystem
200 ssh $remote $zfs send -R ${current_backup_marker} |
201 $zfs receive -vF -d ${local_pool}/${remote_pool}
203 echo "-E- remote full $zfs send command failed"
209 # destroy the previous backup markers now that we've replicated past them
210 # don't check the return codes here because these may not exist, and that is ok
211 $zfs destroy ${local_pool}/${previous_backup_marker} > /dev/null 2>&1
212 ssh $remote $zfs destroy ${previous_backup_marker} > /dev/null 2>&1
214 # Rename the current backup marker to be the previous backup marker
215 $zfs rename ${local_pool}/${current_backup_marker} ${local_pool}/${previous_backup_marker}
217 echo "-E- local $zfs rename command failed"
220 ssh $remote $zfs rename ${current_backup_marker} ${previous_backup_marker}
222 echo "-E- remote $zfs rename command failed"
227 rm -f $local_list $remote_list