From: Alan J. Pippin Date: Sat, 17 Jan 2009 04:10:44 +0000 (-0700) Subject: Added configuration scripts to hold customized options X-Git-Url: http://git.pippins.net/embedvideo/.git/%22%22.%24thumbnail.%22/%24link?a=commitdiff_plain;h=471f2873d53eebe62223121be58bc3f795511a12;p=zfs-ubuntu%2F.git Added configuration scripts to hold customized options --- diff --git a/update_links b/update_links index 8370aca..9a64660 100755 --- a/update_links +++ b/update_links @@ -1,4 +1,6 @@ #!/bin/bash -git ls-files | grep -v .gitignore | grep -v wrapper | xargs -I % ln -s zfs/% /etc/bin/% 2>/dev/null +LINKTO=/etc/bin + +git ls-files | grep -v .gitignore | grep -v wrapper | xargs -I % ln -s zfs/% $LINKTO/% 2>/dev/null diff --git a/zfs-autosnap b/zfs-autosnap index da90906..da650f5 100755 --- a/zfs-autosnap +++ b/zfs-autosnap @@ -5,10 +5,13 @@ # It also employs an intelligent algorithm to roll off, # or destroy, old snapshots. -PATH=/usr/sbin:/sbin:$PATH +# source our configuration +config="${0%/*}/zfs-scripts.conf" +[ -e "${config}.dist" ] && . ${config}.dist +[ -e "${config}" ] && . ${config} -if [ -z "$SNAP_UNDER_TEST" ]; then - exec >> /var/log/zfs/zfs-autosnap.log 2>&1 +if [[ -z "$SNAP_UNDER_TEST" ]]; then + exec >> $logdir/zfs-autosnap.log 2>&1 fi # This script makes the following assumptions/requirements: @@ -50,7 +53,6 @@ filesystem=$1 mountpoint=${2-/$1} numsnapshots=${3-12} maxagedays=${4-0} -lockdir="/tmp/zfs-admin-lock" pool=`echo "$filesystem" | awk -F '/' '{ print $1 }'` if [ -z "$filesystem" ] || [ -z "$mountpoint" ] || [ -z "$numsnapshots" ] || [ -z "$maxagedays" ]; then diff --git a/zfs-autosnap-wrapper b/zfs-autosnap-wrapper index 813d9fe..ab09272 100755 --- a/zfs-autosnap-wrapper +++ b/zfs-autosnap-wrapper @@ -4,16 +4,20 @@ # Description: This script is a wrapper script that calls zfs-autosnap # for each filesystem provided below. +# source our configuration +config="${0%/*}/zfs-scripts.conf" +[ -e "${config}.dist" ] && . ${config}.dist +[ -e "${config}" ] && . ${config} + # Setup some default values -autosnap="/usr/local/etc/bin/zfs-autosnap" -logfile="/var/log/zfs/zfs-autosnap.log" -lockdir="/tmp/zfs-autosnap-all" +logfile="$logdir/zfs-autosnap.log" numsnapshots=20 maxagedays=365 date=`date` +mylockdir="/tmp/zfs-autosnap-all" # Make sure we aren't already running -if ! mkdir "$lockdir" >/dev/null 2>&1; then +if ! mkdir "$mylockdir" >/dev/null 2>&1; then echo "$date Another $0 process is already running" >> $logfile exit 1 fi @@ -22,16 +26,12 @@ fi date >> $logfile # Special filesystems -$autosnap storage /storage $numsnapshots 15 -$autosnap tank/usr/videos /usr/videos $numsnapshots 15 +zfs-autosnap storage /storage $numsnapshots 15 +zfs-autosnap tank/usr/videos /usr/videos $numsnapshots 15 # Normal filesystems -$autosnap tank / $numsnapshots $maxagedays -$autosnap tank/backup /backup $numsnapshots $maxagedays -$autosnap tank/usr /usr $numsnapshots $maxagedays -$autosnap tank/usr/home /usr/home $numsnapshots $maxagedays -$autosnap tank/usr/local /usr/local $numsnapshots $maxagedays -$autosnap tank/usr/local/etc /usr/local/etc $numsnapshots $maxagedays +zfs-autosnap tank / $numsnapshots $maxagedays +zfs-autosnap tank/home /home $numsnapshots $maxagedays # Daily filesystems (only perform these at midnight) if [ `date +"%H:%M"` == "00:00" ]; then @@ -41,8 +41,6 @@ fi # Weekly filesystems (only perform these on Sunday at midnight) if [ `date +"%H:%M:%u"` == "00:00:7" ]; then echo "Performing Weekly snapshots" >> $logfile - $autosnap tank/var /var $numsnapshots 60 - $autosnap tank/usr/local/var /usr/local/var $numsnapshots 60 fi -rm -rf "$lockdir" +rm -rf "$mylockdir" diff --git a/zfs-replicate b/zfs-replicate index 413d078..a082ce3 100755 --- a/zfs-replicate +++ b/zfs-replicate @@ -1,26 +1,24 @@ #/bin/bash +# Author: Carl Baldwin & Alan Pippin +# Description: This script replicates a remote zfs filesystem to a local zfs pool. +# This script will keep all snapshots in sync, removing the ones +# that have been deleted since the last replicate was performed. +# This script will only send the new, or missing, snapshots since +# the last replicate was performed. # Usage: replicate + +# source our configuration +config="${0%/*}/zfs-scripts.conf" +[ -e "${config}.dist" ] && . ${config}.dist +[ -e "${config}" ] && . ${config} + +# command line arg parsing remote=$1 remote_fs=$2 remote_pool=${2%%/*} -remote_lockdir="/tmp/zfs-admin-lock" hostname=`hostname` -# Set the name of the local pool used to store the backup of the remote -local_pool=backups - -# Set the email address to send notification to -mailto=root@pippins.net -mailx=/usr/bin/mailx - -# When this variable is set, local filesystems will be destroyed -# before receiving a full streams into them from the remote source. -destroy_local_filesystem_on_full_replicate=0 - -# The ssh connection doesn't find zfs without this. -zfs=/usr/sbin/zfs - # Setup our cleanup and exit trap cleanup() { if [[ -e "$local_list" ]]; then @@ -30,9 +28,9 @@ cleanup() { rm -f $remote_list fi if [[ -n "$remote" ]]; then - ssh $remote ls -d "$remote_lockdir" > /dev/null 2>&1 + ssh $remote ls -d "$lockdir" > /dev/null 2>&1 if [[ $? == 0 ]]; then - ssh $remote rm -rf "$remote_lockdir" + ssh $remote rm -rf "$lockdir" fi fi } @@ -82,7 +80,7 @@ maxsleeptime=60 maxattempts=100 attempts=0 while true; do - ssh $remote mkdir "$remote_lockdir" >/dev/null 2>&1 + ssh $remote mkdir "$lockdir" >/dev/null 2>&1 if [ $? != 0 ]; then # Another zfs admin tool is running. # Wait a random amount of time and try again @@ -96,7 +94,7 @@ while true; do if [[ $attempts -gt $maxattempts ]]; then # We've exceeded our maximum while loop count echo "-E- The zfs filesystem has been locked down. Skipping replicate operation." - fail_msg=`ssh $remote ls -ld $remote_lockdir 2>&1` + fail_msg=`ssh $remote ls -ld $lockdir 2>&1` fatal_and_exit "zfs-replicate-all unable to obtain zfs admin lock:\n$fail_msg" $mailto fi done diff --git a/zfs-replicate-wrapper b/zfs-replicate-wrapper index a43e8fb..c6c3369 100755 --- a/zfs-replicate-wrapper +++ b/zfs-replicate-wrapper @@ -4,27 +4,16 @@ # Description: This script calls zfs-replicate for each filesystem needing # to be backed up, or replicated, to another ZFS pool. +# source our configuration +config="${0%/*}/zfs-scripts.conf" +[ -e "${config}.dist" ] && . ${config}.dist +[ -e "${config}" ] && . ${config} + # Setup some default values -zfsreplicate="/etc/bin/zfs-replicate" -logdir="/var/log/zfs" -logfile_parser="/etc/bin/zfs-log-parser" logfile="$logdir/zfs-replicate.log" mylogfile="$logdir/zfs-replicate-all.log" -remote="tank.pippins.net" -local_pool=backups -mailto=root@pippins.net date=`date` starttime=`date +%s` -zfs=/usr/sbin/zfs - -# Specify the list of filesystems to replicate -filesystems_to_replicate=' -naspool/www -naspool/git -' - -# Specify the maximum run time in minutes that this script can run (0=no limit) -maxruntime=240 # This function checks to see if our runtime has exceeded our stoptime timeexceeded() { @@ -52,7 +41,7 @@ trap cleanup_and_exit INT # This function executes the replicate command and checks the stoptime replicate() { - $zfsreplicate $* >> $logfile 2>&1 + zfs-replicate $* >> $logfile 2>&1 timeexceeded if [ $? == 1 ]; then cleanup_and_exit @@ -113,6 +102,6 @@ echo `date` ZFS replicate complete >> $logfile echo `data` ZFS replicate complete | tee -a $mylogfile # Parse the log file and extract our backup stats -$logfile_parser "$logfile" "$date" >> $logfile -$logfile_parser "$logfile" "$date" | tee -a $mylogfile +zfs-log-parser "$logfile" "$date" >> $logfile +zfs-log-parser "$logfile" "$date" | tee -a $mylogfile diff --git a/zfs-scripts.conf b/zfs-scripts.conf new file mode 100644 index 0000000..0d289ad --- /dev/null +++ b/zfs-scripts.conf @@ -0,0 +1,44 @@ +########################################################################### +# local configuration file for ZFS scripts +########################################################################### + +# setup your path here to find all the binaries the ZFS scripts call +PATH=/usr/sbin:/sbin:/etc/bin:.:$PATH + +# specify the path to the zfs binary +zfs=/usr/sbin/zfs + +# specify the path to your command line mailer +mailx=/usr/bin/mailx + +# specify the path to the logdir the ZFS scripts should dump their logs to +logdir="/var/log/zfs" + +# specify the name of the lockdir used when performing ZFS admin operations +lockdir="/tmp/zfs-admin-lock" + +# specify the user to send email reports to +mailto="root@pippins.net" + +# specify the name of the remote server to pull snapshots from to backup locally +remote="tank.pippins.net" + +# specify the name of the local pool to store remotely pulled (backup) snapshots to +local_pool="backups" + +# when this variable is set, local filesystems will be destroyed +# before receiving a full streams into them from the remote source. +destroy_local_filesystem_on_full_replicate=0 + +# set this to 1 if you want the snapshot script to run in "test" mode (not really take any snaps) +SNAP_UNDER_TEST= + +# Specify the maximum run time in minutes that the replicate script can run for (0=no limit) +maxruntime=0 + +# Specify the list of filesystems to replicate from the remote to the local_pool (1 per line) +filesystems_to_replicate=' +naspool/www +naspool/git +' + diff --git a/zfs-scripts.conf.dist b/zfs-scripts.conf.dist new file mode 100644 index 0000000..42f387d --- /dev/null +++ b/zfs-scripts.conf.dist @@ -0,0 +1,46 @@ +########################################################################### +# This is a shell config script included by the other ZFS scripts. +# You must copy this file and modify these values to match your system. +# Save the resulting file as "zfs-scripts.conf" in this directory. +# This will prevent your settings from being overwritten by future updates. +########################################################################### + +# setup your path here to find all the binaries the ZFS scripts call +PATH=/usr/sbin:/sbin:/etc/bin:.:$PATH + +# specify the path to the zfs binary +zfs=/usr/sbin/zfs + +# specify the path to your command line mailer +mailx=/usr/bin/mailx + +# specify the path to the logdir the ZFS scripts should dump their logs to +logdir="/var/log/zfs" + +# specify the name of the lockdir used when performing ZFS admin operations +lockdir="/tmp/zfs-admin-lock" + +# specify the user to send email reports to +mailto= + +# specify the name of the remote server to pull snapshots from to backup locally +remote= + +# specify the name of the local pool to store remotely pulled (backup) snapshots to +local_pool= + +# when this variable is set, local filesystems will be destroyed +# before receiving a full streams into them from the remote source. +destroy_local_filesystem_on_full_replicate=0 + +# set this to 1 if you want the snapshot script to run in "test" mode (not really take any snaps) +SNAP_UNDER_TEST= + +# Specify the maximum run time in minutes that the replicate script can run for (0=no limit) +maxruntime=0 + +# Specify the list of filesystems to replicate from the remote to the local_pool (1 per line) +# (enter 1 filesystem per line) +filesystems_to_replicate=' +' + diff --git a/zfs-scrub b/zfs-scrub index dffe080..939c798 100755 --- a/zfs-scrub +++ b/zfs-scrub @@ -6,13 +6,15 @@ # running at any given time. This serializes the zfs # scrub process for any pool. -exec >> /var/log/zfs/zfs-scrub.log 2>&1 +# source our configuration +config="${0%/*}/zfs-scripts.conf" +[ -e "${config}.dist" ] && . ${config}.dist +[ -e "${config}" ] && . ${config} -PATH=/usr/sbin:/sbin:/etc/bin:$PATH +exec >> $logdir/zfs-scrub.log 2>&1 pools="$*" maxsleeptime=360 -mailto=root if [ -z "$pools" ]; then echo "-E- Usage: $0 " @@ -61,7 +63,7 @@ do zpool status $i | grep scrub: | grep "with 0 errors" > /dev/null 2>&1 if [ $? != 0 ]; then # The scrub found errors - zpool status $i | /usr/bin/mailx -s "zpool scrub $i found errors" $mailto + zpool status $i | $mailx -s "zpool scrub $i found errors" $mailto fi done diff --git a/zfs-snapshot-totals-cron b/zfs-snapshot-totals-cron index 49779b8..d9c85ef 100755 --- a/zfs-snapshot-totals-cron +++ b/zfs-snapshot-totals-cron @@ -1,11 +1,13 @@ #!/bin/bash -PATH=/usr/sbin:/sbin:/etc/bin:$PATH +# source our configuration +config="${0%/*}/zfs-scripts.conf" +[ -e "${config}.dist" ] && . ${config}.dist +[ -e "${config}" ] && . ${config} -snapshot_totals="zfs-snapshot-totals" -logfile="/var/log/zfs/zfs-snapshot-totals.log" +logfile="$logdir/zfs-snapshot-totals.log" date >> $logfile -$snapshot_totals >> $logfile +zfs-snapshot_totals >> $logfile echo >> $logfile