#!/bin/bash
-git ls-files | grep -v .gitignore | grep -v wrapper | xargs -I % ln -s zfs/% /etc/bin/% 2>/dev/null
+LINKTO=/etc/bin
+
+git ls-files | grep -v .gitignore | grep -v wrapper | xargs -I % ln -s zfs/% $LINKTO/% 2>/dev/null
# It also employs an intelligent algorithm to roll off,
# or destroy, old snapshots.
-PATH=/usr/sbin:/sbin:$PATH
+# source our configuration
+config="${0%/*}/zfs-scripts.conf"
+[ -e "${config}.dist" ] && . ${config}.dist
+[ -e "${config}" ] && . ${config}
-if [ -z "$SNAP_UNDER_TEST" ]; then
- exec >> /var/log/zfs/zfs-autosnap.log 2>&1
+if [[ -z "$SNAP_UNDER_TEST" ]]; then
+ exec >> $logdir/zfs-autosnap.log 2>&1
fi
# This script makes the following assumptions/requirements:
mountpoint=${2-/$1}
numsnapshots=${3-12}
maxagedays=${4-0}
-lockdir="/tmp/zfs-admin-lock"
pool=`echo "$filesystem" | awk -F '/' '{ print $1 }'`
if [ -z "$filesystem" ] || [ -z "$mountpoint" ] || [ -z "$numsnapshots" ] || [ -z "$maxagedays" ]; then
# Description: This script is a wrapper script that calls zfs-autosnap
# for each filesystem provided below.
+# source our configuration
+config="${0%/*}/zfs-scripts.conf"
+[ -e "${config}.dist" ] && . ${config}.dist
+[ -e "${config}" ] && . ${config}
+
# Setup some default values
-autosnap="/usr/local/etc/bin/zfs-autosnap"
-logfile="/var/log/zfs/zfs-autosnap.log"
-lockdir="/tmp/zfs-autosnap-all"
+logfile="$logdir/zfs-autosnap.log"
numsnapshots=20
maxagedays=365
date=`date`
+mylockdir="/tmp/zfs-autosnap-all"
# Make sure we aren't already running
-if ! mkdir "$lockdir" >/dev/null 2>&1; then
+if ! mkdir "$mylockdir" >/dev/null 2>&1; then
echo "$date Another $0 process is already running" >> $logfile
exit 1
fi
date >> $logfile
# Special filesystems
-$autosnap storage /storage $numsnapshots 15
-$autosnap tank/usr/videos /usr/videos $numsnapshots 15
+zfs-autosnap storage /storage $numsnapshots 15
+zfs-autosnap tank/usr/videos /usr/videos $numsnapshots 15
# Normal filesystems
-$autosnap tank / $numsnapshots $maxagedays
-$autosnap tank/backup /backup $numsnapshots $maxagedays
-$autosnap tank/usr /usr $numsnapshots $maxagedays
-$autosnap tank/usr/home /usr/home $numsnapshots $maxagedays
-$autosnap tank/usr/local /usr/local $numsnapshots $maxagedays
-$autosnap tank/usr/local/etc /usr/local/etc $numsnapshots $maxagedays
+zfs-autosnap tank / $numsnapshots $maxagedays
+zfs-autosnap tank/home /home $numsnapshots $maxagedays
# Daily filesystems (only perform these at midnight)
if [ `date +"%H:%M"` == "00:00" ]; then
# Weekly filesystems (only perform these on Sunday at midnight)
if [ `date +"%H:%M:%u"` == "00:00:7" ]; then
echo "Performing Weekly snapshots" >> $logfile
- $autosnap tank/var /var $numsnapshots 60
- $autosnap tank/usr/local/var /usr/local/var $numsnapshots 60
fi
-rm -rf "$lockdir"
+rm -rf "$mylockdir"
#/bin/bash
+# Author: Carl Baldwin & Alan Pippin
+# Description: This script replicates a remote zfs filesystem to a local zfs pool.
+# This script will keep all snapshots in sync, removing the ones
+# that have been deleted since the last replicate was performed.
+# This script will only send the new, or missing, snapshots since
+# the last replicate was performed.
# Usage: replicate <hostname> <zfs filesystem>
+
+# source our configuration
+config="${0%/*}/zfs-scripts.conf"
+[ -e "${config}.dist" ] && . ${config}.dist
+[ -e "${config}" ] && . ${config}
+
+# command line arg parsing
remote=$1
remote_fs=$2
remote_pool=${2%%/*}
-remote_lockdir="/tmp/zfs-admin-lock"
hostname=`hostname`
-# Set the name of the local pool used to store the backup of the remote
-local_pool=backups
-
-# Set the email address to send notification to
-mailto=root@pippins.net
-mailx=/usr/bin/mailx
-
-# When this variable is set, local filesystems will be destroyed
-# before receiving a full streams into them from the remote source.
-destroy_local_filesystem_on_full_replicate=0
-
-# The ssh connection doesn't find zfs without this.
-zfs=/usr/sbin/zfs
-
# Setup our cleanup and exit trap
cleanup() {
if [[ -e "$local_list" ]]; then
rm -f $remote_list
fi
if [[ -n "$remote" ]]; then
- ssh $remote ls -d "$remote_lockdir" > /dev/null 2>&1
+ ssh $remote ls -d "$lockdir" > /dev/null 2>&1
if [[ $? == 0 ]]; then
- ssh $remote rm -rf "$remote_lockdir"
+ ssh $remote rm -rf "$lockdir"
fi
fi
}
maxattempts=100
attempts=0
while true; do
- ssh $remote mkdir "$remote_lockdir" >/dev/null 2>&1
+ ssh $remote mkdir "$lockdir" >/dev/null 2>&1
if [ $? != 0 ]; then
# Another zfs admin tool is running.
# Wait a random amount of time and try again
if [[ $attempts -gt $maxattempts ]]; then
# We've exceeded our maximum while loop count
echo "-E- The zfs filesystem has been locked down. Skipping replicate operation."
- fail_msg=`ssh $remote ls -ld $remote_lockdir 2>&1`
+ fail_msg=`ssh $remote ls -ld $lockdir 2>&1`
fatal_and_exit "zfs-replicate-all unable to obtain zfs admin lock:\n$fail_msg" $mailto
fi
done
# Description: This script calls zfs-replicate for each filesystem needing
# to be backed up, or replicated, to another ZFS pool.
+# source our configuration
+config="${0%/*}/zfs-scripts.conf"
+[ -e "${config}.dist" ] && . ${config}.dist
+[ -e "${config}" ] && . ${config}
+
# Setup some default values
-zfsreplicate="/etc/bin/zfs-replicate"
-logdir="/var/log/zfs"
-logfile_parser="/etc/bin/zfs-log-parser"
logfile="$logdir/zfs-replicate.log"
mylogfile="$logdir/zfs-replicate-all.log"
-remote="tank.pippins.net"
-local_pool=backups
-mailto=root@pippins.net
date=`date`
starttime=`date +%s`
-zfs=/usr/sbin/zfs
-
-# Specify the list of filesystems to replicate
-filesystems_to_replicate='
-naspool/www
-naspool/git
-'
-
-# Specify the maximum run time in minutes that this script can run (0=no limit)
-maxruntime=240
# This function checks to see if our runtime has exceeded our stoptime
timeexceeded() {
# This function executes the replicate command and checks the stoptime
replicate() {
- $zfsreplicate $* >> $logfile 2>&1
+ zfs-replicate $* >> $logfile 2>&1
timeexceeded
if [ $? == 1 ]; then
cleanup_and_exit
echo `data` ZFS replicate complete | tee -a $mylogfile
# Parse the log file and extract our backup stats
-$logfile_parser "$logfile" "$date" >> $logfile
-$logfile_parser "$logfile" "$date" | tee -a $mylogfile
+zfs-log-parser "$logfile" "$date" >> $logfile
+zfs-log-parser "$logfile" "$date" | tee -a $mylogfile
--- /dev/null
+###########################################################################
+# local configuration file for ZFS scripts
+###########################################################################
+
+# setup your path here to find all the binaries the ZFS scripts call
+PATH=/usr/sbin:/sbin:/etc/bin:.:$PATH
+
+# specify the path to the zfs binary
+zfs=/usr/sbin/zfs
+
+# specify the path to your command line mailer
+mailx=/usr/bin/mailx
+
+# specify the path to the logdir the ZFS scripts should dump their logs to
+logdir="/var/log/zfs"
+
+# specify the name of the lockdir used when performing ZFS admin operations
+lockdir="/tmp/zfs-admin-lock"
+
+# specify the user to send email reports to
+mailto="root@pippins.net"
+
+# specify the name of the remote server to pull snapshots from to backup locally
+remote="tank.pippins.net"
+
+# specify the name of the local pool to store remotely pulled (backup) snapshots to
+local_pool="backups"
+
+# when this variable is set, local filesystems will be destroyed
+# before receiving a full streams into them from the remote source.
+destroy_local_filesystem_on_full_replicate=0
+
+# set this to 1 if you want the snapshot script to run in "test" mode (not really take any snaps)
+SNAP_UNDER_TEST=
+
+# Specify the maximum run time in minutes that the replicate script can run for (0=no limit)
+maxruntime=0
+
+# Specify the list of filesystems to replicate from the remote to the local_pool (1 per line)
+filesystems_to_replicate='
+naspool/www
+naspool/git
+'
+
--- /dev/null
+###########################################################################
+# This is a shell config script included by the other ZFS scripts.
+# You must copy this file and modify these values to match your system.
+# Save the resulting file as "zfs-scripts.conf" in this directory.
+# This will prevent your settings from being overwritten by future updates.
+###########################################################################
+
+# setup your path here to find all the binaries the ZFS scripts call
+PATH=/usr/sbin:/sbin:/etc/bin:.:$PATH
+
+# specify the path to the zfs binary
+zfs=/usr/sbin/zfs
+
+# specify the path to your command line mailer
+mailx=/usr/bin/mailx
+
+# specify the path to the logdir the ZFS scripts should dump their logs to
+logdir="/var/log/zfs"
+
+# specify the name of the lockdir used when performing ZFS admin operations
+lockdir="/tmp/zfs-admin-lock"
+
+# specify the user to send email reports to
+mailto=
+
+# specify the name of the remote server to pull snapshots from to backup locally
+remote=
+
+# specify the name of the local pool to store remotely pulled (backup) snapshots to
+local_pool=
+
+# when this variable is set, local filesystems will be destroyed
+# before receiving a full streams into them from the remote source.
+destroy_local_filesystem_on_full_replicate=0
+
+# set this to 1 if you want the snapshot script to run in "test" mode (not really take any snaps)
+SNAP_UNDER_TEST=
+
+# Specify the maximum run time in minutes that the replicate script can run for (0=no limit)
+maxruntime=0
+
+# Specify the list of filesystems to replicate from the remote to the local_pool (1 per line)
+# (enter 1 filesystem per line)
+filesystems_to_replicate='
+'
+
# running at any given time. This serializes the zfs
# scrub process for any pool.
-exec >> /var/log/zfs/zfs-scrub.log 2>&1
+# source our configuration
+config="${0%/*}/zfs-scripts.conf"
+[ -e "${config}.dist" ] && . ${config}.dist
+[ -e "${config}" ] && . ${config}
-PATH=/usr/sbin:/sbin:/etc/bin:$PATH
+exec >> $logdir/zfs-scrub.log 2>&1
pools="$*"
maxsleeptime=360
-mailto=root
if [ -z "$pools" ]; then
echo "-E- Usage: $0 <pools>"
zpool status $i | grep scrub: | grep "with 0 errors" > /dev/null 2>&1
if [ $? != 0 ]; then
# The scrub found errors
- zpool status $i | /usr/bin/mailx -s "zpool scrub $i found errors" $mailto
+ zpool status $i | $mailx -s "zpool scrub $i found errors" $mailto
fi
done
#!/bin/bash
-PATH=/usr/sbin:/sbin:/etc/bin:$PATH
+# source our configuration
+config="${0%/*}/zfs-scripts.conf"
+[ -e "${config}.dist" ] && . ${config}.dist
+[ -e "${config}" ] && . ${config}
-snapshot_totals="zfs-snapshot-totals"
-logfile="/var/log/zfs/zfs-snapshot-totals.log"
+logfile="$logdir/zfs-snapshot-totals.log"
date >> $logfile
-$snapshot_totals >> $logfile
+zfs-snapshot_totals >> $logfile
echo >> $logfile