summaryrefslogtreecommitdiff
path: root/archive
diff options
context:
space:
mode:
authorSean Whitton <spwhitton@spwhitton.name>2018-04-07 10:24:51 -0700
committerSean Whitton <spwhitton@spwhitton.name>2018-04-07 10:24:58 -0700
commit6cbc721e9a8152d85fcb7c510a1907570896ac90 (patch)
treedc48bbcecd65bd5e25d551e036d1897ecd672de5 /archive
parent1f9c1e2a69de6c7456d425b8cbb0293f66efab95 (diff)
downloaddotfiles-6cbc721e9a8152d85fcb7c510a1907570896ac90.tar.gz
single dotfiles archive
Diffstat (limited to 'archive')
-rwxr-xr-xarchive/bin/apple-setup.sh103
-rwxr-xr-xarchive/bin/backuptom314
-rwxr-xr-xarchive/bin/bashmount761
-rwxr-xr-xarchive/bin/bitlbee_startup8
-rwxr-xr-xarchive/bin/build144
-rwxr-xr-xarchive/bin/build_rpi_sd_card.sh195
-rwxr-xr-xarchive/bin/cabal-install-exec16
-rwxr-xr-xarchive/bin/cabal-link-bins9
-rwxr-xr-xarchive/bin/caffeinate-zenity3
-rwxr-xr-xarchive/bin/capture-mail13
-rwxr-xr-xarchive/bin/clean-github-pr.py106
-rwxr-xr-xarchive/bin/coldbkup131
-rw-r--r--archive/bin/ctrlnocaps.ahk1
-rw-r--r--archive/bin/ctrlswapcaps-nonuk.ahk37
-rw-r--r--archive/bin/ctrlswapcaps.ahk30
-rwxr-xr-xarchive/bin/dasl-setup.bat7
-rwxr-xr-xarchive/bin/develacc10
-rwxr-xr-xarchive/bin/develacc-inner33
-rwxr-xr-xarchive/bin/develacc-push22
-rwxr-xr-xarchive/bin/develacc-push-all22
-rwxr-xr-xarchive/bin/dionysusbk51
-rwxr-xr-xarchive/bin/doc_post_receive_hook61
-rw-r--r--archive/bin/doccheckin.bat10
-rwxr-xr-xarchive/bin/e21
-rwxr-xr-xarchive/bin/ed5
-rwxr-xr-xarchive/bin/emacs-pkg-subtree54
l---------archive/bin/es1
-rwxr-xr-xarchive/bin/extract_url.pl967
-rwxr-xr-xarchive/bin/firejail-skype10
-rwxr-xr-xarchive/bin/fmdavsetup27
-rwxr-xr-xarchive/bin/fmr-sync-suspend14
-rw-r--r--archive/bin/gbampersat.ahk2
-rwxr-xr-xarchive/bin/git-wip328
-rwxr-xr-xarchive/bin/goodmorning34
-rwxr-xr-xarchive/bin/grbk39
-rwxr-xr-xarchive/bin/greypdf77
-rwxr-xr-xarchive/bin/hestia-checkup19
-rwxr-xr-xarchive/bin/hestia-startup16
-rwxr-xr-xarchive/bin/httphtmltitle.py15
-rwxr-xr-xarchive/bin/idlesshclear8
-rwxr-xr-xarchive/bin/imap-pass23
-rwxr-xr-xarchive/bin/imap-password46
-rwxr-xr-xarchive/bin/its-all-text-wrapper8
-rwxr-xr-xarchive/bin/jnest10
-rwxr-xr-xarchive/bin/kill-ssh-and-umount7
-rwxr-xr-xarchive/bin/laptopinput109
-rwxr-xr-xarchive/bin/latesteconomist9
l---------archive/bin/m1
-rwxr-xr-xarchive/bin/ma_org_publish64
-rwxr-xr-xarchive/bin/ma_reboot_check14
-rwxr-xr-xarchive/bin/mdns-do6
-rwxr-xr-xarchive/bin/mutt_bgrun118
-rwxr-xr-xarchive/bin/normalise-artemis42
-rwxr-xr-xarchive/bin/offlineimap-run4
-rwxr-xr-xarchive/bin/offlineimap.py47
-rwxr-xr-xarchive/bin/org-mairix-el-store-link3
-rwxr-xr-xarchive/bin/orgblosxom2ikiwiki.py190
-rwxr-xr-xarchive/bin/planetnewspipe5
-rwxr-xr-xarchive/bin/podcastsup12
-rwxr-xr-xarchive/bin/pomodoro83
-rwxr-xr-xarchive/bin/privoxy-blocklist.sh189
-rwxr-xr-xarchive/bin/rdate.py141
-rwxr-xr-xarchive/bin/rdate.py-dir13
-rwxr-xr-xarchive/bin/reading.py100
-rwxr-xr-xarchive/bin/readme-pull-request19
-rw-r--r--archive/bin/sariulahk.ahk130
-rwxr-xr-xarchive/bin/sdfweb-post-update21
-rwxr-xr-xarchive/bin/searchmail65
-rwxr-xr-xarchive/bin/sendmyip9
-rwxr-xr-xarchive/bin/smtptun76
-rwxr-xr-xarchive/bin/spwd20180
-rwxr-xr-xarchive/bin/spwd20-roll113
-rwxr-xr-xarchive/bin/sscan253
-rwxr-xr-xarchive/bin/ssleep82
-rwxr-xr-xarchive/bin/sync-docs27
-rwxr-xr-xarchive/bin/update-recoll-db6
-rwxr-xr-xarchive/bin/urxvtma6
-rwxr-xr-xarchive/bin/urxvttmux41
-rwxr-xr-xarchive/bin/urxvttmux-prompt9
-rwxr-xr-xarchive/bin/weekly-backups58
-rw-r--r--archive/bin/win32setup.bat23
-rwxr-xr-xarchive/bin/workstation-uninstallable43
-rwxr-xr-xarchive/bin/workstation-uninstallable-alt30
-rwxr-xr-xarchive/bin/xlaunch20
-rwxr-xr-xarchive/bin/yankfmailpw29
-rwxr-xr-xarchive/bin/yt3
-rw-r--r--archive/texmf/bibtex/bst/spwchicago/spwchicago.bst1662
-rw-r--r--archive/texmf/tex/latex/spwdnd/spwdnd.cls46
-rw-r--r--archive/texmf/tex/latex/spwdoc/spwdoc.cls126
-rw-r--r--archive/texmf/tex/latex/spwessay/spwessay.cls186
-rw-r--r--archive/texmf/tex/latex/spworg/spworg.sty50
-rw-r--r--archive/texmf/tex/latex/spwoutline/spwoutline.cls172
-rw-r--r--archive/texmf/tex/latex/spwpaper/spwpaper.cls198
-rw-r--r--archive/texmf/tex/latex/spwtitle/spwtitle.sty26
94 files changed, 8377 insertions, 0 deletions
diff --git a/archive/bin/apple-setup.sh b/archive/bin/apple-setup.sh
new file mode 100755
index 00000000..71101fc2
--- /dev/null
+++ b/archive/bin/apple-setup.sh
@@ -0,0 +1,103 @@
+#!/bin/sh
+
+USB=/Volumes/SPWHITTON
+
+key()
+{
+ local k="$1"
+ osascript -e "tell application \"System Events\" to keystroke \"$k\""
+}
+
+code()
+{
+ local k="$1"
+ osascript -e "tell application \"System Events\" to key code $k"
+}
+
+# activate my keyboard preferences
+
+osascript -e 'tell application "System Preferences" to activate'
+osascript -e 'tell application "System Preferences" to set current pane to pane "com.apple.preference.keyboard"'
+osascript -e 'tell application "System Events" to key code 98 using {control down}' # ctrl-f7 to activate tabbing between controls
+sleep 1
+code 48
+code 48
+code 48
+code 48
+code 49
+code 48
+code 48
+code 48
+code 49
+code 125
+code 125
+code 52
+code 48
+code 48
+code 49
+code 125
+code 52
+code 48
+code 49
+code 126
+code 52
+code 48
+code 48
+code 48
+code 49
+osascript -e 'tell application "System Events" to key code 98 using {control down}'
+osascript -e 'tell application "System Events" to keystroke "q" using {command down}'
+
+# setup
+
+if ! [ -d "$HOME/src/dotfiles/.git" ]; then
+ git clone https://git.spwhitton.name/dotfiles $HOME/src/dotfiles
+ # this is currently out of action because GNU stow installs but
+ # doesn't seem to actually do anything on Mac OS
+ # $HOME/src/dotfiles/bin/bstraph.sh
+ cp $HOME/src/dotfiles/{.zshrc,.shenv} $HOME # instead
+fi
+pkill firefox
+/Applications/Firefox.app/Contents/MacOS/firefox -private-window http://u.arizona.edu/~spwhitton/bookmarks.shtml >/dev/null 2>/dev/null &
+mkdir -p $HOME/.ssh
+cp $USB/lib/{id_putty,known_hosts,config} $HOME/.ssh
+chmod 600 $HOME/.ssh/id_putty
+
+# run the shell
+
+cd $HOME
+/bin/zsh
+
+# cleanup
+
+pkill firefox
+pkill ssh # cached connection!
+echo "Please wait, securely deleting files..."
+rm -rfP $HOME/.ssh/{id_putty,config} $HOME/Downloads/* $HOME/.Trash/* $HOME/.zshist $HOME/.bash_history
+
+# reset keyboard preferences
+
+osascript -e 'tell application "System Events" to key code 98 using {control down}'
+osascript -e 'tell application "System Preferences" to activate'
+osascript -e 'tell application "System Preferences" to set current pane to pane "com.apple.preference.keyboard"'
+sleep 1
+code 48
+code 48
+code 48
+code 48
+code 49
+code 48
+code 48
+code 48
+code 49
+code 48
+code 48
+code 48
+code 48
+code 49
+code 48
+code 48
+code 49
+osascript -e 'tell application "System Events" to key code 98 using {control down}'
+osascript -e 'tell application "System Events" to keystroke "q" using {command down}'
+
diff --git a/archive/bin/backuptom3 b/archive/bin/backuptom3
new file mode 100755
index 00000000..80a0ecc2
--- /dev/null
+++ b/archive/bin/backuptom3
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+# backs up git repositories to m3 portable drive, excluding git annex
+
+if ! mount | grep -q "on /media/m3 type ext4"; then
+ echo "m3 drive not mounted"
+else
+ for dir in /media/m3/git/*; do
+ cd $dir
+ echo \> $(basename $dir):
+ git pull
+ done
+ sudo /root/local/bin/duply $(hostname -s)-seven backup
+fi
diff --git a/archive/bin/bashmount b/archive/bin/bashmount
new file mode 100755
index 00000000..d12840aa
--- /dev/null
+++ b/archive/bin/bashmount
@@ -0,0 +1,761 @@
+#!/bin/bash
+
+#=============================================================================#
+# FILE: bashmount #
+# VERSION: 3.2.0 #
+# DESCRIPTION: bashmount is a menu-driven bash script that can use different #
+# backends to easily mount, unmount or eject removable devices #
+# without dependencies on udisks or any GUI. An extensive #
+# configuration file allows many aspects of the script to be #
+# modified and custom commands to be run on devices. #
+# LICENSE: GPLv2 #
+# AUTHORS: Jamie Nguyen <j@jamielinux.com> #
+# Lukas B. #
+#=============================================================================#
+
+# Copyright (C) 2013-2014 Jamie Nguyen <j@jamielinux.com>
+# Copyright (C) 2014 Lukas B.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License v2 as published by the
+# Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+declare -r VERSION='3.2.0'
+
+if (( $# > 0 )); then
+ if [[ "${1}" = '-V' || "${1}" = '--version' ]]; then
+ cat << EOF
+bashmount ${VERSION}
+Copyright (C) 2013-2014 Jamie Nguyen <j@jamielinux.com>
+Copyright (C) 2014 Lukas B.
+License GPLv2: GNU GPL version 2 <http://www.gnu.org/licenses/gpl-2.0.html>.
+This is free software: you are free to change and redistribute it.
+There is NO WARRANTY, to the extent permitted by law.
+
+Written by Jamie Nguyen and Lukas B.
+EOF
+ exit 0
+ else
+ printf '%s\n' 'bashmount: invalid option.'
+ exit 64
+ fi
+fi
+
+#-------------------------------------#
+# CONFIGURATION #
+#-------------------------------------#
+# {{{
+# Make sure that user defined options will not interfere with grep.
+unset GREP_OPTIONS
+
+# Set defaults.
+declare udisks='auto'
+declare default_mount_options='--options nosuid,noexec,noatime'
+declare -i show_internal=1
+declare -i show_removable=1
+declare -i show_optical=1
+declare -i show_commands=1
+declare -i show_full_device_names=0
+declare -i colourize=1
+declare -i custom4_show=0
+declare -i custom5_show=0
+declare -i custom6_show=0
+declare -i run_post_mount=0
+declare -i run_post_unmount=0
+declare -a blacklist=()
+
+mount_command() {
+ if (( udisks == 0 )); then
+ read -r -e -p 'Choose the mountpoint directory: ' dir
+ while [[ ! -d "${dir}" ]] || findmnt "${dir}" >/dev/null 2>&1; do
+ error 'No such directory, or mountpoint is already in use.'
+ read -r -e -p 'Choose the mountpoint directory: ' dir
+ done
+ mount ${mount_options} "${1}" "${dir}"
+ else
+ udisksctl mount ${mount_options} --block-device "${1}"
+ fi
+}
+
+unmount_command() {
+ if (( udisks == 0 )); then
+ umount "${1}"
+ else
+ udisksctl unmount --block-device "${1}"
+ fi
+}
+
+filemanager() {
+ cd "${1}" && "$SHELL"
+ exit 0
+}
+
+post_mount() {
+ error "No command specified in 'bashmount.conf'."
+ return 1
+}
+
+post_unmount() {
+ error "No command specified in 'bashmount.conf'."
+ return 1
+}
+
+# Load configuration file.
+declare CONFIGFILE=
+
+if [[ -z "${XDG_CONFIG_HOME}" ]]; then
+ CONFIGFILE="${HOME}/.config/bashmount/config"
+else
+ CONFIGFILE="${XDG_CONFIG_HOME}/bashmount/config"
+fi
+
+if [[ ! -f "${CONFIGFILE}" ]]; then
+ CONFIGFILE='/etc/bashmount.conf'
+fi
+
+if [[ -f "${CONFIGFILE}" ]]; then
+ if ! . "${CONFIGFILE}"; then
+ printf '%s\n' 'bashmount: Failed to source configuration file.'
+ exit 78
+ fi
+fi
+
+if [[ "${udisks}" == "auto" ]]; then
+ type -p udisksctl >/dev/null 2>&1 && udisks=1 || udisks=0
+elif (( udisks == 1 )); then
+ if ! type -p udisksctl >/dev/null 2>&1; then
+ printf '%s\n' "bashmount: 'udisksctl': command not found"
+ exit 69
+ fi
+fi
+
+if ! type -p lsblk >/dev/null 2>&1; then
+ printf '%s\n' "bashmount: 'lsblk': command not found"
+ exit 69
+fi
+
+declare mount_options="${default_mount_options}"
+# }}}
+
+#-------------------------------------#
+# GENERAL FUNCTIONS #
+#-------------------------------------#
+# {{{
+unset ALL_OFF BOLD BLUE GREEN RED
+if (( colourize )); then
+ if tput setaf 0 >/dev/null 2>&1; then
+ ALL_OFF="$(tput sgr0)"
+ BOLD="$(tput bold)"
+ BLUE="${BOLD}$(tput setaf 4)"
+ GREEN="${BOLD}$(tput setaf 2)"
+ RED="${BOLD}$(tput setaf 1)"
+ else
+ ALL_OFF='\e[1;0m'
+ BOLD='\e[1;1m'
+ BLUE="${BOLD}\e[1;34m"
+ GREEN="${BOLD}\e[1;32m"
+ RED="${BOLD}\e[1;31m"
+ fi
+ readonly ALL_OFF BOLD BLUE GREEN RED
+fi
+
+msg() {
+ printf '%s\n' "${GREEN}==>${ALL_OFF}${BOLD} ${@}${ALL_OFF}" >&2
+}
+
+error() {
+ printf '%s\n' "${RED}==>${ALL_OFF}${BOLD} ERROR: ${@}${ALL_OFF}" >&2
+}
+
+print_commands() {
+ print_separator_commands
+ printf '%s' "${BLUE}e${ALL_OFF}: eject ${BLUE}i${ALL_OFF}: info"
+ printf '%s' " ${BLUE}m${ALL_OFF}: mount ${BLUE}o${ALL_OFF}: open"
+ printf '%s\n\n' " ${BLUE}u${ALL_OFF}: unmount"
+ printf '%s' "${BLUE}a${ALL_OFF}: unmount all"
+ printf '%s' " ${BLUE}r${ALL_OFF}: refresh"
+ printf '%s\n\n' " ${BLUE}q${ALL_OFF}: quit ${BLUE}?${ALL_OFF}: help"
+}
+
+print_submenu_commands() {
+ print_separator_commands
+ printf '%s' "${BLUE}e${ALL_OFF}: eject ${BLUE}i${ALL_OFF}: info"
+ if info_mounted "${devname}"; then
+ printf '%s' " ${BLUE}u${ALL_OFF}: unmount"
+ else
+ printf '%s' " ${BLUE}m${ALL_OFF}: mount"
+ fi
+ printf '%s\n\n' " ${BLUE}o${ALL_OFF}: open"
+ printf '%s' "${BLUE}b${ALL_OFF}: back ${BLUE}r${ALL_OFF}: refresh"
+ printf '%s\n' " ${BLUE}q${ALL_OFF}: quit ${BLUE}?${ALL_OFF}: help"
+
+ printf '\n'
+ printf '%s' "${BLUE}1${ALL_OFF}: read-only"
+ printf '%s' " ${BLUE}2${ALL_OFF}: luksOpen"
+ printf '%s' " ${BLUE}3${ALL_OFF}: luksClose"
+ printf '\n'
+
+ if (( custom4_show )) || (( custom5_show )) || (( custom6_show )); then
+ printf '\n'
+ fi
+
+ if (( custom4_show )) && [[ -n "${custom4_desc}" ]]; then
+ printf '%s' "${BLUE}4${ALL_OFF}: ${custom4_desc}"
+ fi
+
+ if (( custom5_show )) && [[ -n "${custom5_desc}" ]]; then
+ printf '%s' " ${BLUE}5${ALL_OFF}: ${custom5_desc}"
+ fi
+
+ if (( custom6_show )) && [[ -n "${custom6_desc}" ]]; then
+ printf '%s' " ${BLUE}6${ALL_OFF}: ${custom6_desc}"
+ fi
+
+ if (( custom4_show )) || (( custom5_show )) || (( custom6_show )); then
+ printf '\n'
+ fi
+}
+
+enter_to_continue() {
+ printf '\n'
+ read -r -e -p "Press [${BLUE}enter${ALL_OFF}] to continue: " null
+}
+
+invalid_command() {
+ printf '\n'
+ error 'Invalid command. See the help menu.'
+ enter_to_continue
+}
+
+print_separator() {
+ printf '%s\n\n' '====================================================='
+}
+
+print_separator_commands() {
+ printf '%s\n\n' '===================== COMMANDS ======================'
+}
+
+print_separator_device() {
+ printf '%s\n\n' '==================== DEVICE MENU ===================='
+}
+
+print_separator_optical() {
+ printf '%s\n\n' '=================== OPTICAL MEDIA ==================='
+}
+
+print_separator_removable() {
+ printf '%s\n\n' '================== REMOVABLE MEDIA =================='
+}
+
+print_separator_internal() {
+ printf '%s\n\n' '================== INTERNAL MEDIA ==================='
+}
+
+print_help() {
+ clear
+ print_commands
+ print_separator
+ printf '%s' "${GREEN}==>${ALL_OFF} "
+ printf '%s' "${BOLD}To mount the first device, enter ${ALL_OFF}"
+ printf '%s' "${BLUE}1m${ALL_OFF}"
+ printf '%s\n\n' "${BOLD}.${ALL_OFF}"
+ printf '%s' "${GREEN}==>${ALL_OFF} "
+ printf '%s\n\n' "${BOLD}To open the mountpath directory of the first${ALL_OFF}"
+ printf '%s' "${BOLD} device (mounting if required), enter "
+ printf '%s' "${BLUE}1o${ALL_OFF}"
+ printf '%s\n\n' "${BOLD}.${ALL_OFF}"
+ printf '%s' "${GREEN}==>${ALL_OFF} "
+ printf '%s' "${BOLD}To view a device sub-menu, "
+ printf '%s\n\n' "just enter the number.${ALL_OFF}"
+ printf '%s' "${GREEN}==>${ALL_OFF} "
+ printf '%s' "${BLUE}a${ALL_OFF}"
+ printf '%s' "${BOLD}, "
+ printf '%s' "${BLUE}r${ALL_OFF}"
+ printf '%s' "${BOLD}, "
+ printf '%s' "${BLUE}q${ALL_OFF} "
+ printf '%s' "${BOLD}and "
+ printf '%s' "${BLUE}?${ALL_OFF} "
+ printf '%s\n\n' "${BOLD}do not require a number.${ALL_OFF}"
+ print_separator
+ enter_to_continue
+}
+
+print_help_sub() {
+ clear
+ print_submenu_commands
+ printf '\n'
+ print_separator
+ printf '%s' "${GREEN}==>${ALL_OFF} "
+ printf '%s\n\n' "${BOLD}To perform a command, enter a character.${ALL_OFF}"
+ printf '%s' "${GREEN}==>${ALL_OFF} "
+ printf '%s' "${BOLD}For example, to mount this device, enter ${ALL_OFF}"
+ printf '%s' "${BLUE}m${ALL_OFF}"
+ printf '%s\n\n' "${BOLD}.${ALL_OFF}"
+ print_separator
+ enter_to_continue
+}
+
+print_device_name() {
+ # The padding between device location and device label.
+ local -i padding=22
+ # For device names that are too long, this defines how many characters from
+ # the end of the string we will show.
+ local -i post_length=6
+
+ info_label="$(info_fslabel "${devname}")"
+ if [[ -z "${info_label}" ]]; then
+ if [[ "${1}" == 'optical' ]]; then
+ info_label="$(lsblk -dno MODEL "${devname}")"
+ else
+ info_label="$(info_partlabel "${devname}")"
+ fi
+ [[ -z "${info_label}" ]] && info_label='No label'
+ fi
+
+ listed[device_number]="${devname}"
+ (( device_number++ ))
+
+ printf '%s' " ${BLUE}${device_number})${ALL_OFF}"
+ devnameshort="${devname##*/}"
+
+ if (( !show_full_device_names )) && (( ${#devnameshort} > padding )); then
+ pre_length=$(( padding - post_length - 3 ))
+ devnamepre="${devnameshort:0:pre_length}"
+ devnamepost="${devnameshort:${#devnameshort}-post_length}"
+ devnameshort="${devnamepre}...${devnamepost}"
+ fi
+
+ printf '%s' " ${devnameshort}:"
+
+ # Add padding between device location and device label.
+ devname_length="${#devnameshort}"
+ for (( i=padding ; i>devname_length ; i-- )); do
+ printf '%s' " "
+ done
+
+ printf '%s' " ${info_label}"
+ if info_mounted "${devname}"; then
+ printf '%s' " ${GREEN}[mounted]${ALL_OFF}"
+ mounted[${#mounted[*]}]="${devname}"
+ fi
+ printf '\n'
+}
+
+# }}}
+
+#-------------------------------------#
+# INFORMATION RETRIEVAL #
+#-------------------------------------#
+# {{{
+# Returns 0 if the device is registered as removable device in the kernel,
+# otherwise it returns 1.
+info_removable() {
+ [[ "$(lsblk -drno RM "${1}")" == '1' ]]
+}
+
+# Prints the device type, for example partition or disk.
+info_type() {
+ lsblk -drno TYPE "${1}"
+}
+
+# Prints the filesystem label, if present.
+info_fslabel() {
+ lsblk -drno LABEL "${1}"
+}
+
+# Prints the partition label, if present.
+info_partlabel() {
+ lsblk -drno PARTLABEL "${1}"
+}
+
+# Prints the mountpath, if mounted.
+info_mountpath() {
+ findmnt -no TARGET "${1}"
+}
+
+# Returns 0 if the device is mounted, 1 otherwise.
+info_mounted() {
+ findmnt -no TARGET "${1}" >/dev/null 2>&1
+}
+
+# Prints the filesystem type.
+info_fstype() {
+ lsblk -drno FSTYPE "${1}"
+}
+
+# Prints the device size.
+info_size() {
+ lsblk -drno SIZE "${1}"
+}
+# }}}
+
+#-------------------------------------#
+# DEVICE MANIPULATION #
+#-------------------------------------#
+# {{{
+check_device() {
+ if [[ ! -b "${1}" ]]; then
+ printf '\n'
+ error "${1} is no longer available."
+ enter_to_continue
+ return 1
+ fi
+ return 0
+}
+
+action_eject() {
+ check_device "${1}" || return 1
+ info_mounted "${1}" && action_unmount "${1}"
+ if ! info_mounted "${1}"; then
+ printf '\n'
+ msg "Ejecting ${1} ..."
+ printf '\n'
+ eject "${1}"
+ # Give the device some time to eject. If we don't then sometimes the ejected
+ # device will still be present when returning to the main menu.
+ enter_to_continue
+ sleep 2
+ fi
+}
+
+action_info() {
+ check_device "${1}" || return 1
+ lsblk -o NAME,FSTYPE,MOUNTPOINT,SIZE "${1}" | less
+}
+
+action_mount() {
+ check_device "${1}" || return 1
+ printf '\n'
+ if info_mounted "${1}"; then
+ error "${1} is already mounted."
+ else
+ msg "Mounting ${1} ..."
+ if mount_command "${1}"; then
+ msg "${1} mounted succesfullly."
+ (( run_post_mount )) && post_mount "${1}"
+ else
+ printf '\n'
+ error "${1} could not be mounted."
+ fi
+ fi
+ enter_to_continue
+}
+
+action_open() {
+ if ! info_mounted "${1}"; then
+ printf '\n'
+ msg "Mounting ${1} ..."
+ if mount_command "${1}"; then
+ msg "${1} mounted succesfullly."
+ (( run_post_mount )) && post_mount "${1}"
+ else
+ printf '\n'
+ error "${1} could not be mounted."
+ enter_to_continue
+ return 1
+ fi
+ fi
+ printf '\n'
+ msg "Opening ${1} ..."
+ printf '\n'
+ filemanager "$(info_mountpath "${1}")"
+ enter_to_continue
+}
+
+action_unmount() {
+ printf '\n'
+ if info_mounted "${1}"; then
+ msg "Unmounting ${1} ..."
+ printf '\n'
+ if unmount_command "${1}"; then
+ msg "${1} unmounted successfully."
+ (( run_post_unmount )) && post_unmount "${1}"
+ else
+ printf '\n'
+ error "${1} could not be unmounted."
+ fi
+ else
+ error "${1} is not mounted."
+ fi
+ enter_to_continue
+}
+# }}}
+
+#-------------------------------------#
+# MENU FUNCTIONS #
+#-------------------------------------#
+# {{{
+list_devices() {
+ local -a all=() removable=() internal=() optical=()
+ # The array "all" contains the sorted list of devices returned by lsblk.
+ all=( $(lsblk -plno NAME) )
+ # The array "listed" contains all devices that are shown to the user.
+ listed=()
+ # The array "mounted" contains all devices that are listed and mounted.
+ mounted=()
+ # "device_number" is the total number of devices listed and equals ${#listed[*]}.
+ device_number=0
+
+ for devname in ${all[@]}; do
+ local info_type=
+ # Hide blacklisted devices.
+ for string in ${blacklist[@]}; do
+ lsblk -dPno NAME,TYPE,FSTYPE,LABEL,MOUNTPOINT,PARTLABEL "${devname}" \
+ | grep -E "${string}" >/dev/null 2>&1
+ (( $? )) || continue 2
+ done
+ info_type=$(info_type "${devname}")
+ # Sort devices into arrays removable, internal, and optical.
+ if [[ "${info_type}" == 'part' || "${info_type}" == 'crypt' ]]; then
+ if info_removable "${devname}"; then
+ removable[${#removable[*]}]="${devname}"
+ else
+ internal[${#internal[*]}]="${devname}"
+ fi
+ # Normally we don't want to see a 'disk', but if it has no partitions
+ # (eg, internal storage on some portable media devices) then it should
+ # be visible.
+ elif [[ "${info_type}" == 'disk' ]]; then
+ [[ "${all[@]}" =~ ${devname}1 ]] && continue
+ if info_removable "${devname}"; then
+ removable[${#removable[*]}]="${devname}"
+ else
+ internal[${#internal[*]}]="${devname}"
+ fi
+ elif [[ "${info_type}" == 'rom' ]]; then
+ optical[${#optical[*]}]="${devname}"
+ else
+ continue
+ fi
+ done
+ # Print output.
+ # List internal media.
+ if (( show_internal )) && (( ${#internal[*]} )); then
+ print_separator_internal
+ for devname in ${internal[@]}; do
+ print_device_name
+ done
+ printf '\n'
+ fi
+ # List removable media.
+ if (( show_removable )) && (( ${#removable[*]} )); then
+ print_separator_removable
+ for devname in ${removable[@]}; do
+ print_device_name
+ done
+ printf '\n'
+ fi
+ # List optical media.
+ if (( show_optical )) && (( ${#optical[*]} )); then
+ print_separator_optical
+ for devname in ${optical[@]}; do
+ print_device_name optical
+ done
+ printf '\n'
+ fi
+ (( device_number )) || printf '%s\n' 'No devices.'
+}
+
+submenu() {
+ check_device "${devname}" || return 1
+ local info_label= info_fstype= info_size=
+ info_label="$(info_fslabel "${devname}")"
+ if [[ -z "${info_label}" ]]; then
+ info_label="$(info_partlabel "${devname}")"
+ if [[ -z "${info_label}" ]]; then
+ info_label='-'
+ fi
+ fi
+ info_fstype="$(info_fstype "${devname}")"
+ info_size="$(info_size "${devname}")"
+ clear
+ print_separator_device
+ printf '%s\n' "device : ${devname}"
+ printf '%s\n' "label : ${info_label}"
+ printf '%s' 'mounted : '
+ if info_mounted "${devname}"; then
+ printf '%s\n' "${GREEN}yes${ALL_OFF}"
+ printf '%s\n' "mountpath : $(info_mountpath "${devname}")"
+ else
+ printf '%s\n' "${RED}no${ALL_OFF}"
+ fi
+ printf '%s\n' "fstype : ${info_fstype}"
+ printf '%s\n' "size : ${info_size}"
+ if (( show_commands )); then
+ printf '\n'
+ print_submenu_commands
+ fi
+ printf '\n'
+ print_separator
+ read -r -e -p 'Command: ' action
+ case "${action}" in
+ 'e') action_eject "${devname}";;
+ 'i') action_info "${devname}";;
+ 'm') action_mount "${devname}";;
+ 'o') action_open "${devname}";;
+ 'u') action_unmount "${devname}";;
+ 'b') return 1;;
+ 'r') return 0;;
+ 'q') exit;;
+ '?')
+ print_help_sub
+ return 0;;
+ '1')
+ printf '\n'
+ msg 'Mounting read-only ...'
+ printf '\n'
+ mount_options="${default_mount_options}"' --read-only'
+ mount_command "${devname}"
+ mount_options="${default_mount_options}"
+ enter_to_continue
+ return 0;;
+ '2')
+ printf '\n'
+ msg 'Opening luks volume ...'
+ printf '\n'
+ if (( udisks == 0 )); then
+ cryptsetup open --type luks -v "${devname}" "luks-${devname##*/}"
+ else
+ udisksctl unlock --block-device "${devname}"
+ fi
+ enter_to_continue
+ return 0;;
+ '3')
+ printf '\n'
+ msg 'Closing luks volume ...'
+ printf '\n'
+ if (( udisks == 0 )); then
+ cryptsetup close --type luks "${devname}"
+ else
+ udisksctl lock --block-device "${devname}"
+ fi
+ enter_to_continue
+ return 0;;
+ '4')
+ if (( custom4_show )); then
+ printf '\n'
+ msg "Running custom command ${custom4_desc} ..."
+ printf '\n'
+ custom4_command "${devname}"
+ enter_to_continue
+ else
+ invalid_command
+ fi
+ return 0;;
+ '5')
+ if (( custom5_show )); then
+ printf '\n'
+ msg "Running custom command ${custom5_desc} ..."
+ printf '\n'
+ custom5_command "${devname}"
+ enter_to_continue
+ else
+ invalid_command
+ fi
+ return 0;;
+ '6')
+ if (( custom6_show )); then
+ printf '\n'
+ msg "Running custom command ${custom6_desc} ..."
+ printf '\n'
+ custom6_command "${devname}"
+ enter_to_continue
+ else
+ invalid_command
+ fi
+ return 0;;
+ *) invalid_command
+ return 0;;
+ esac
+}
+
+select_action() {
+ local devname= letter=
+ local -i number=
+ print_separator
+ read -r -e -p 'Command: ' action
+ if [[ "${action}" =~ ^[1-9] ]]; then
+ if [[ "${action}" =~ ^[1-9][0-9]*$ ]]; then
+ number="$(( action - 1 ))"
+ if (( number >= device_number )); then
+ invalid_command
+ return 1
+ fi
+ devname=${listed[number]}
+ while :; do
+ submenu || break
+ done
+ elif [[ "${action}" =~ ^[1-9][0-9]*[eimou]$ ]]; then
+ number="$(( ${action%?} - 1 ))"
+ letter="${action: -1}"
+ if (( number >= device_number )); then
+ invalid_command
+ return 1
+ fi
+ devname="${listed[number]}"
+ case "${letter}" in
+ 'e') action_eject "${devname}";;
+ 'i') action_info "${devname}";;
+ 'm') action_mount "${devname}";;
+ 'o') action_open "${devname}";;
+ 'u') action_unmount "${devname}";;
+ *) return 1;;
+ esac
+ return 0
+ else
+ invalid_command
+ return 1
+ fi
+ else
+ case "${action}" in
+ 'a')
+ printf '\n'
+ if (( ! ${#mounted[*]} )); then
+ error 'No devices mounted.'
+ enter_to_continue
+ return 1
+ fi
+ read -r -e -p 'Unmount all devices [y/N]?: ' unmount
+ if [[ "${unmount}" != 'y' ]] && [[ "${unmount}" != 'Y' ]]; then
+ return 0
+ fi
+ clear
+ for devname in ${mounted[@]}; do
+ action_unmount "${devname}" || continue
+ done
+ enter_to_contine
+ return 1;;
+ 'r'|"")
+ return 0;;
+ 'q'|'b')
+ exit 0;;
+ '?')
+ print_help
+ return 0;;
+ *)
+ invalid_command
+ return 1;;
+ esac
+ fi
+}
+# }}}
+
+declare -i device_number=
+declare -a mounted=()
+declare -a listed=()
+
+while true; do
+ clear
+ list_devices
+ (( show_commands )) && print_commands
+ select_action
+done
diff --git a/archive/bin/bitlbee_startup b/archive/bin/bitlbee_startup
new file mode 100755
index 00000000..aaabbf7e
--- /dev/null
+++ b/archive/bin/bitlbee_startup
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+. $HOME/.shenv
+
+unset PERL5LIB PKG_CONFIG_PATH LD_LIBRARY_PATH C_INCLUDE_PATH MODULEBUILDRC MODULEPATH MODULESHOME PERL_MM_OPT
+export PERL5LIB PKG_CONFIG_PATH LD_LIBRARY_PATH C_INCLUDE_PATH MODULEBUILDRC MODULEPATH MODULESHOME PERL_MM_OPT
+
+junest /usr/sbin/bitlbee -n >/dev/null 2>/dev/null &
diff --git a/archive/bin/build b/archive/bin/build
new file mode 100755
index 00000000..40669696
--- /dev/null
+++ b/archive/bin/build
@@ -0,0 +1,144 @@
+#!/bin/sh
+
+
+
+# Rewrite of this script is a WIP because I think I can get what I
+# want by just running debuild instead of dgit/gbp when I want to
+# ignore uncommitted changes. dpkg-buildpackage should unapply the
+# patches if it applied them. The only problem is if I applied some
+# with quilt. However, the only time I actually *need* to do that is
+# when refreshing patches; otherwise I should use gbp-pq, and then
+# test with `debuild -b`. After refreshing patches I can delete the
+# .pc directory and it should all work.
+
+
+
+
+# Copyright (C) 2016 Sean Whitton
+
+# Released under the GPL version 3
+
+# Description:
+
+# The purpose of this script is to give the right magic invocations to
+# build a Debian package, abstracting from the differences between
+#
+# - patches-applied repos intended to be built with dgit
+# - patches-unapplied repos intended to be built with gbp
+# - plain source packages not kept in git
+#
+# We do not consider patches-unapplied dgit repos (unreleased dgit
+# functionality) and patches-applied gbp repos (not popular, and
+# depends on d/source/local-options, which is to be discouraged as it
+# prevents dgit adoption). The former might make this script
+# obsolete.
+#
+# The following options are accepted, provided they come before any
+# other options, which are passed on to whatever we invoked to build
+# the package.
+#
+# - --ignore -- bypass checks for a dirty git working tree
+# - -b -- do a binary-only build (no .dsc)
+# - -S -- do a source-only build (no .deb)
+# - --sbuild -- use sbuild
+#
+# I recommend the following shell aliases:
+#
+# alias buildi="build --ignore"
+# alias buildib="build --ignore -b"
+# alias sbuild="build --sbuild"
+# alias sbuildi="build --ignore --sbuild"
+
+# Process arguments. We use -nc because we want to run the clean
+# ourselves before letting gbp or dgit check for uncommitted changes
+while "$1" in "--ignore --binary -S --sbuild"; do
+ case "$1" in
+ --ignore)
+ ignore=yes
+ ;;
+ -b)
+ buildtype="-nc -b"
+ ;;
+ -S)
+ buildtype="-nc -S"
+ ;;
+ --sbuild)
+ sbuild=yes
+ ;;
+ esac
+ shift
+done
+
+# default to a full build (mainly for full Lintian output)
+if [ "$buildtype" = "" ]; then
+ buildtype="-nc -F"
+fi
+
+# We try to run a clean unless this is an --ignore build. Basically,
+# --ignore builds are for iteratively hacking on stuff, but once we
+# don't pass --ignore, we want all the safety checks to fire
+if [ ! "$ignore" = "yes" ]; then
+ fakeroot debian/rules clean
+fi
+
+if git rev-parse --is-inside-work-tree >/dev/null 2>&1; then
+ if git branch -a | grep -q "remotes/dgit"; then
+ # ---- build with dgit
+
+ if [ "$ignore" = "yes" ]; then
+ arg="--ignore-dirty"
+ else
+ arg=""
+ fi
+ if [ "$sbuild" = "yes" ]; then
+ arg="$arg sbuild"
+ elif [ "$"] # TODO continue processing args
+
+ # safety check before git clean if we're not ignoring
+ if [ ! "$ignore" = "yes" ]; then
+ if git status --porcelain 2>/dev/null | grep -q "^??"; then
+ echo >$2 "abort: untracked files present (to override, try \`buildi')"
+ exit 1
+ fi
+ fi
+ dgit $arg "$@"
+ lintian
+ else
+ # ---- (assume) build with gbp
+ (
+ # gbp must be invoked from the root of the repository
+ cd "$(git rev-parse --show-toplevel)"
+
+ if [ -d ".pc" ]; then
+ quilt pop -a
+ fi
+ # handle ignore
+ if [ "$ignore" = "yes" ]; then
+ arg="--git-ignore-new"
+ else
+ arg=""
+ fi
+ fakeroot debian/rules clean
+ # safety check before git clean if we're not ignoring
+ if [ ! "$ignore" = "yes" ]; then
+ if git status --porcelain 2>/dev/null | grep -q "^??"; then
+ echo >&2 "abort: untracked files present (to override, try \`buildi')"
+ exit 1
+ fi
+ fi
+ git clean -xfd --exclude="debian/patches/*" # newly created patches
+ gbp buildpackage -F -nc -us -uc --source-options=--unapply-patches $arg "$@"
+ )
+ fi
+else
+ # ---- build with debuild
+ debuild $buildtype -us -uc
+fi
+
+# do a post-clean if we did a pre-clean
+if [ ! "$ignore" = "yes" ]; then
+ fakeroot debian/rules clean
+fi
+
+# TODO should we be doing so many cleans? There is probably a better way.
+# TODO similarly, we are popping and reapplying all the patches over and over.
diff --git a/archive/bin/build_rpi_sd_card.sh b/archive/bin/build_rpi_sd_card.sh
new file mode 100755
index 00000000..a8a90bbc
--- /dev/null
+++ b/archive/bin/build_rpi_sd_card.sh
@@ -0,0 +1,195 @@
+#!/bin/bash
+
+# build your own Raspberry Pi SD card
+#
+# by Klaus M Pfeiffer, http://blog.kmp.or.at/ , 2012-06-24
+
+# 2012-06-24
+# just checking for how partitions are called on the system (thanks to Ricky Birtles and Luke Wilkinson)
+# using http.debian.net as debian mirror, see http://rgeissert.blogspot.co.at/2012/06/introducing-httpdebiannet-debians.html
+# tested successfully in debian squeeze and wheezy VirtualBox
+# added hint for lvm2
+# added debconf-set-selections for kezboard
+# corrected bug in writing to etc/modules
+# 2012-06-16
+# improoved handling of local debian mirror
+# added hint for dosfstools (thanks to Mike)
+# added vchiq & snd_bcm2835 to /etc/modules (thanks to Tony Jones)
+# take the value fdisk suggests for the boot partition to start (thanks to Mike)
+# 2012-06-02
+# improoved to directly generate an image file with the help of kpartx
+# added deb_local_mirror for generating images with correct sources.list
+# 2012-05-27
+# workaround for https://github.com/Hexxeh/rpi-update/issues/4 just touching /boot/start.elf before running rpi-update
+# 2012-05-20
+# back to wheezy, http://bugs.debian.org/672851 solved, http://packages.qa.debian.org/i/ifupdown/news/20120519T163909Z.html
+# 2012-05-19
+# stage3: remove eth* from /lib/udev/rules.d/75-persistent-net-generator.rules
+# initial
+
+# you need at least
+# apt-get install binfmt-support qemu qemu-user-static debootstrap kpartx lvm2 dosfstools
+
+deb_mirror="http://http.debian.net/debian"
+#deb_local_mirror="http://debian.kmp.or.at:3142/debian"
+
+bootsize="64M"
+deb_release="wheezy"
+
+device=$1
+buildenv="/root/rpi"
+rootfs="${buildenv}/rootfs"
+bootfs="${rootfs}/boot"
+
+mydate=`date +%Y%m%d`
+
+if [ "$deb_local_mirror" == "" ]; then
+ deb_local_mirror=$deb_mirror
+fi
+
+image=""
+
+
+if [ $EUID -ne 0 ]; then
+ echo "this tool must be run as root"
+ exit 1
+fi
+
+if ! [ -b $device ]; then
+ echo "$device is not a block device"
+ exit 1
+fi
+
+if [ "$device" == "" ]; then
+ echo "no block device given, just creating an image"
+ mkdir -p $buildenv
+ image="${buildenv}/rpi_basic_${deb_release}_${mydate}.img"
+ dd if=/dev/zero of=$image bs=1MB count=1000
+ device=`losetup -f --show $image`
+ echo "image $image created and mounted as $device"
+else
+ dd if=/dev/zero of=$device bs=512 count=1
+fi
+
+fdisk $device << EOF
+n
+p
+1
+
++$bootsize
+t
+c
+n
+p
+2
+
+
+w
+EOF
+
+
+if [ "$image" != "" ]; then
+ losetup -d $device
+ device=`kpartx -va $image | sed -E 's/.*(loop[0-9])p.*/\1/g' | head -1`
+ device="/dev/mapper/${device}"
+ bootp=${device}p1
+ rootp=${device}p2
+else
+ if ! [ -b ${device}1 ]; then
+ bootp=${device}p1
+ rootp=${device}p2
+ if ! [ -b ${bootp} ]; then
+ echo "uh, oh, something went wrong, can't find bootpartition neither as ${device}1 nor as ${device}p1, exiting."
+ exit 1
+ fi
+ else
+ bootp=${device}1
+ rootp=${device}2
+ fi
+fi
+
+mkfs.vfat $bootp
+mkfs.ext4 $rootp
+
+mkdir -p $rootfs
+
+mount $rootp $rootfs
+
+cd $rootfs
+
+debootstrap --foreign --arch armel $deb_release $rootfs $deb_local_mirror
+cp /usr/bin/qemu-arm-static usr/bin/
+LANG=C chroot $rootfs /debootstrap/debootstrap --second-stage
+
+mount $bootp $bootfs
+
+echo "deb $deb_local_mirror $deb_release main contrib non-free
+" > etc/apt/sources.list
+
+echo "dwc_otg.lpm_enable=0 console=ttyAMA0,115200 kgdboc=ttyAMA0,115200 console=tty1 root=/dev/mmcblk0p2 rootfstype=ext4 rootwait" > boot/cmdline.txt
+
+echo "proc /proc proc defaults 0 0
+/dev/mmcblk0p1 /boot vfat defaults 0 0
+" > etc/fstab
+
+echo "raspberrypi" > etc/hostname
+
+echo "auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet dhcp
+" > etc/network/interfaces
+
+echo "vchiq
+snd_bcm2835
+" >> etc/modules
+
+echo "console-common console-data/keymap/policy select Select keymap from full list
+console-common console-data/keymap/full select de-latin1-nodeadkeys
+" > debconf.set
+
+echo "#!/bin/bash
+debconf-set-selections /debconf.set
+rm -f /debconf.set
+apt-get update
+apt-get -y install git-core binutils ca-certificates
+wget http://goo.gl/1BOfJ -O /usr/bin/rpi-update
+chmod +x /usr/bin/rpi-update
+mkdir -p /lib/modules/3.1.9+
+touch /boot/start.elf
+rpi-update
+apt-get -y install locales console-common ntp openssh-server less vim
+echo \"root:raspberry\" | chpasswd
+sed -i -e 's/KERNEL\!=\"eth\*|/KERNEL\!=\"/' /lib/udev/rules.d/75-persistent-net-generator.rules
+rm -f /etc/udev/rules.d/70-persistent-net.rules
+rm -f third-stage
+" > third-stage
+chmod +x third-stage
+LANG=C chroot $rootfs /third-stage
+
+echo "deb $deb_mirror $deb_release main contrib non-free
+" > etc/apt/sources.list
+
+echo "#!/bin/bash
+aptitude update
+aptitude clean
+apt-get clean
+rm -f cleanup
+" > cleanup
+chmod +x cleanup
+LANG=C chroot $rootfs /cleanup
+
+cd
+
+umount $bootp
+umount $rootp
+
+if [ "$image" != "" ]; then
+ kpartx -d $image
+ echo "created image $image"
+fi
+
+
+echo "done."
+
diff --git a/archive/bin/cabal-install-exec b/archive/bin/cabal-install-exec
new file mode 100755
index 00000000..fa4e2716
--- /dev/null
+++ b/archive/bin/cabal-install-exec
@@ -0,0 +1,16 @@
+#!/bin/sh
+
+cabal --no-require-sandbox update
+for var in "$@"; do
+ if [ -d "$HOME/local/src/${var}" ]; then
+ echo >&2 "$(basename $0): a version of $var is already installed; skipping"
+ else
+ mkdir -p $HOME/local/src/${var}
+ cd $HOME/local/src/${var}
+ touch .duplicity-ignore
+ cabal sandbox init
+ wget https://www.stackage.org/lts/cabal.config
+ cabal install $var --force-reinstalls
+ cabal-link-bins
+ fi
+done
diff --git a/archive/bin/cabal-link-bins b/archive/bin/cabal-link-bins
new file mode 100755
index 00000000..15a5af5a
--- /dev/null
+++ b/archive/bin/cabal-link-bins
@@ -0,0 +1,9 @@
+#!/bin/sh
+
+if ! [ -d .cabal-sandbox/bin ]; then
+ echo >&2 "$(basename $0): looks like you don't have a cabal sandbox yet"
+ exit 1
+fi
+
+mkdir -p $HOME/local/bin
+ln -rfs -t $HOME/local/bin .cabal-sandbox/bin/*
diff --git a/archive/bin/caffeinate-zenity b/archive/bin/caffeinate-zenity
new file mode 100755
index 00000000..cbeec673
--- /dev/null
+++ b/archive/bin/caffeinate-zenity
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+WINDOWID= caffeinate zenity --info --text="Press okay to resume screensaver and/or autosuspend"
diff --git a/archive/bin/capture-mail b/archive/bin/capture-mail
new file mode 100755
index 00000000..a34b894e
--- /dev/null
+++ b/archive/bin/capture-mail
@@ -0,0 +1,13 @@
+#!/bin/zsh
+
+msg=$(cat /dev/stdin)
+
+id=$(echo $msg | grep -i "^Message-ID:" | sed "s/^Message-I[dD]: //")
+from=$(echo $msg | grep -m 1 -i "^From:" | sed "s/^From: //" | sed "s/<.*$//" | sed 's/"//g' | sed 's/ $//')
+subject=$(echo $msg | grep -m 1 -i "^Subject:" | sed "s/^Subject: //")
+
+save-org-buffers
+
+echo "* TODO E-mail \"$subject\" from $from" >>$HOME/doc/org/refile.org
+echo "# Message-Id: $id" >>$HOME/doc/org/refile.org
+#emacsclient -t -e '(spw/end-of-refile)'
diff --git a/archive/bin/clean-github-pr.py b/archive/bin/clean-github-pr.py
new file mode 100755
index 00000000..34834958
--- /dev/null
+++ b/archive/bin/clean-github-pr.py
@@ -0,0 +1,106 @@
+#!/usr/bin/env python
+
+# clean-github-pr --- Create tidy repositories for pull requests
+#
+# Copyright (C) 2016 Sean Whitton
+#
+# clean-github-pr is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# clean-github-pr is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with clean-github-pr. If not, see <http://www.gnu.org/licenses/>.
+
+import github
+
+import sys
+import time
+import tempfile
+import shutil
+import subprocess
+import os
+
+CREDS_FILE = os.getenv("HOME") + "/.cache/clean-github-pr-creds"
+
+def main():
+ # check arguments
+ if len(sys.argv) != 2:
+ print sys.argv[0] + ": usage: " + sys.argv[0] + " USER/REPO"
+ sys.exit(1)
+
+ # check creds file
+ try:
+ f = open(CREDS_FILE, 'r')
+ except IOError:
+ print sys.argv[0] + ": please put your github username and password, separated by a colon, in the file ~/.cache/clean-github-pr-creds"
+ sys.exit(1)
+
+ # just to be sure
+ os.chmod(CREDS_FILE, 0600)
+
+ # make the fork
+ creds = f.readline()
+ username = creds.split(":")[0]
+ pword = creds.split(":")[1].strip()
+ token = f.readline().strip()
+
+ if len(token) != 0:
+ g = github.Github(token)
+ else:
+ g = github.Github(username, pword)
+
+ u = g.get_user()
+
+ source = sys.argv[1]
+ if '/' in source:
+ fork = sys.argv[1].split("/")[1]
+ print "forking repo " + source
+ u.create_fork(g.get_repo(source))
+ else:
+ fork = sys.argv[1]
+
+ while True:
+ try:
+ r = u.get_repo(fork)
+ except github.UnknownObjectException:
+ print "still waiting"
+ time.sleep(5)
+ else:
+ break
+
+ # set up & push github branch
+ user_work_dir = os.getcwd()
+ work_area = tempfile.mkdtemp()
+ os.chdir(work_area)
+ subprocess.call(["git", "clone", "https://github.com/" + username + "/" + fork])
+ os.chdir(work_area + "/" + fork)
+ subprocess.call(["git", "checkout", "--orphan", "github"])
+ subprocess.call(["git", "rm", "-rf", "."])
+ with open("README.md", 'w') as f:
+ f.write("This repository is just a fork made in order to submit a pull request; please ignore.")
+ subprocess.call(["git", "add", "README.md"])
+ subprocess.call(["git", "commit", "-m", "fork for a pull request; please ignore"])
+ subprocess.call(["git", "push", "origin", "+github"])
+ os.chdir(user_work_dir)
+ shutil.rmtree(work_area)
+
+ # make sure the branch has been pushed
+ time.sleep(5)
+
+ # set clean repository settings
+ r.edit(fork,
+ has_wiki=False,
+ description="Fork for a pull request; please ignore",
+ homepage="",
+ has_issues=False,
+ has_downloads=False,
+ default_branch="github")
+
+if __name__ == "__main__":
+ main()
diff --git a/archive/bin/coldbkup b/archive/bin/coldbkup
new file mode 100755
index 00000000..a98b2936
--- /dev/null
+++ b/archive/bin/coldbkup
@@ -0,0 +1,131 @@
+#!/bin/bash
+
+# backup to removable, offline media
+
+. $HOME/.shenv
+. $HOME/lib/tputfs.sh
+
+set -e
+
+# TODO syncing annexes should be more systematic. This first part of
+# the script is the worst. Detect if a git repo is an annex, and do
+# some sensible backup sync. So that, like with regular git repos,
+# coldbkup can also set up a new backup drive without me having to do
+# a lot of annex setting up (maybe a function: gitannexbk, or just
+# auto-detect (better)). We can init the annex and put it in the
+# archive or backup group so we get a sensible default preferred
+# content expression (and need to git remote add in $HOME)
+
+# determine removable media available and if it's m3, sync annex content
+# TODO do this by looking at size of removable media?
+if mount | grep -q "/media/${USER}/m3"; then
+ HDD=/media/${USER}/m3
+ status syncing annex content
+ # we sync both ma and m3 here so that ma knows everything that got
+ # put onto m3
+ cd $HOME/lib/annex
+ git annex sync --content origin m3
+ cd $HOME/lib/wikiannex
+ git annex sync --content origin athena m3
+ # TODO
+ # cd $HOME/lib/dionysus
+ # git annex sync --content m3
+ cd $HOME
+elif mount | grep -q "/media/${USER}/bkupsd"; then
+ HDD=/media/${USER}/bkupsd
+
+ status syncing wikiannex content
+ cd $HOME/lib/wikiannex
+ git annex sync --content origin athena bkupsd
+
+ status syncing dionysus annex content
+ cd $HOME/lib/dionysus
+ git annex sync --content # origin athena bkupsd
+ cd $HOME
+else
+ echo "coldbkup: removable media not mounted" >&2
+ exit 1
+fi
+DEST=${HDD}/git
+mkdir -p $DEST
+
+# function to backup a repo: first arg is ssh path to repo, second arg
+# is where to put it
+gitbk ()
+{
+ local long=$1
+ local short=$(basename $long)
+ local dest=$2
+ if [ -e "$dest/$short" ]; then
+ cd $dest/$short
+ git fetch origin "+refs/heads/*:refs/heads/*" --prune --tags
+ else
+ mkdir -p $dest
+ cd $dest
+ git clone --mirror $long $short
+ fi
+}
+
+# backup a repo from github
+githubbk ()
+{
+ status processing $1 from github
+ gitbk git@github.com:$1 $DEST/github
+}
+
+# backup a repo from alioth
+aliothbk ()
+{
+ status processing $1 from alioth
+ gitbk alioth:/git/$1 $DEST/alioth
+}
+
+# Stage 1 : Backup repos hosted on athena
+
+# TODO: don't use ls here (see http://mywiki.wooledge.org/ParsingLs)
+
+repos=$(ssh athena ls /home/git)
+for repo in $repos; do
+ status processing $repo from athena
+ # TODO look in git-remote-gcrypt to find how it detects a gcrypt
+ # repo; there must be a way
+ if [ "$repo" = "priv.git" -o "$repo" = "annex.git" -o "$repo" = "rt.git" ]; then
+ # might need a ssh:// and a /~/ in here to work with gcrypt
+ gitbk gcrypt::git@spwhitton.name:/home/git/$repo $DEST/athena
+ else
+ gitbk git@spwhitton.name:/home/git/$repo $DEST/athena
+ fi
+done
+
+# Stage 2 : Debian repos
+
+aliothbk pkg-emacsen/pkg/aggressive-indent-mode.git
+aliothbk pkg-emacsen/pkg/f-el.git
+aliothbk pkg-emacsen/pkg/emacs-async.git
+aliothbk pkg-emacsen/pkg/emacs-noflet.git
+aliothbk pkg-emacsen/pkg/perspective-el.git
+aliothbk pkg-emacsen/pkg/helm.git
+aliothbk pkg-emacsen/pkg/epl.git
+aliothbk pkg-emacsen/pkg/pkg-info-el.git
+aliothbk pkg-emacsen/pkg/flx.git
+aliothbk pkg-emacsen/pkg/projectile.git
+aliothbk pkg-emacsen/pkg/let-alist.git
+aliothbk pkg-emacsen/pkg/seq-el.git
+aliothbk pkg-emacsen/pkg/shut-up.git
+aliothbk pkg-emacsen/pkg/popup-el.git
+aliothbk pkg-emacsen/pkg/paredit-el.git
+aliothbk pkg-mozext/ublock-origin.git
+aliothbk pkg-mozext/y-u-no-validate.git
+aliothbk pkg-mozext/classic-theme-restorer.git
+aliothbk pkg-emacsen/pkg/flycheck.git
+aliothbk pkg-mozext/keysnail.git
+
+# Stage 3 : Starred repos on github
+
+# TODO Commented as downloading so many forks will be slow. Maybe run
+# it on athena instead?
+
+# (
+# cd $DEST/github
+# github-backup spwhitton
+# )
diff --git a/archive/bin/ctrlnocaps.ahk b/archive/bin/ctrlnocaps.ahk
new file mode 100644
index 00000000..0651fa49
--- /dev/null
+++ b/archive/bin/ctrlnocaps.ahk
@@ -0,0 +1 @@
+Capslock::Ctrl \ No newline at end of file
diff --git a/archive/bin/ctrlswapcaps-nonuk.ahk b/archive/bin/ctrlswapcaps-nonuk.ahk
new file mode 100644
index 00000000..b4927523
--- /dev/null
+++ b/archive/bin/ctrlswapcaps-nonuk.ahk
@@ -0,0 +1,37 @@
+; original source: http://lifehacker.com/5468862/create-a-shortcut-key-for-restoring-a-specific-window
+; but I've added TheExe parameter
+ToggleWinMinimize(TheWindowTitle, TheExe)
+{
+ SetTitleMatchMode,2
+ DetectHiddenWindows, Off
+ IfWinActive, %TheWindowTitle%
+ {
+ WinMinimize, %TheWindowTitle%
+ }
+ Else
+ {
+ IfWinExist, %TheWindowTitle%
+ {
+ WinGet, winid, ID, %TheWindowTitle%
+ DllCall("SwitchToThisWindow", "UInt", winid, "UInt", 1)
+ }
+ Else
+ {
+ Run, %TheExe%
+ }
+ }
+ Return
+}
+
+F11::Send !{F4}
+F12::ToggleWinMinimize("Mozilla Firefox", "Firefox")
+
+; for Emacs
+
+Capslock::Ctrl
+LCtrl::Capslock
+
+; some British keyboard layout conventions
+
+@::"
+"::@
diff --git a/archive/bin/ctrlswapcaps.ahk b/archive/bin/ctrlswapcaps.ahk
new file mode 100644
index 00000000..16fd4416
--- /dev/null
+++ b/archive/bin/ctrlswapcaps.ahk
@@ -0,0 +1,30 @@
+; original source: http://lifehacker.com/5468862/create-a-shortcut-key-for-restoring-a-specific-window
+; but I've added TheExe parameter
+ToggleWinMinimize(TheWindowTitle, TheExe)
+{
+ SetTitleMatchMode,2
+ DetectHiddenWindows, Off
+ IfWinActive, %TheWindowTitle%
+ {
+ WinMinimize, %TheWindowTitle%
+ }
+ Else
+ {
+ IfWinExist, %TheWindowTitle%
+ {
+ WinGet, winid, ID, %TheWindowTitle%
+ DllCall("SwitchToThisWindow", "UInt", winid, "UInt", 1)
+ }
+ Else
+ {
+ Run, %TheExe%
+ }
+ }
+ Return
+}
+
+F11::Send !{F4}
+F12::ToggleWinMinimize("Mozilla Firefox", "Firefox")
+
+Capslock::Ctrl
+LCtrl::Capslock
diff --git a/archive/bin/dasl-setup.bat b/archive/bin/dasl-setup.bat
new file mode 100755
index 00000000..afe02117
--- /dev/null
+++ b/archive/bin/dasl-setup.bat
@@ -0,0 +1,7 @@
+@echo off
+mkdir C:\%HOMEPATH%\SPW
+copy /y ..\lib\putty.exe.reg C:\%HOMEPATH%\SPW
+copy /y ..\lib\putty.exe-empty.reg C:\%HOMEPATH%\SPW
+copy /y ..\lib\spwhitton@putty.ppk C:\%HOMEPATH%\SPW
+copy /y ..\bin\ctrlswapcaps.exe C:\%HOMEPATH%\SPW
+explorer "C:\%HOMEPATH%\SPW"
diff --git a/archive/bin/develacc b/archive/bin/develacc
new file mode 100755
index 00000000..32e71367
--- /dev/null
+++ b/archive/bin/develacc
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+# TODO use `sudo machinectl shell spw@develacc $shell` instead because
+# it sets up the environment better afaict
+# shell=$(sudo enter-develacc "getent passwd spw | cut -d: -f5")
+# (^ enter-devleacc probably still best for non-interactive usage)
+
+# perhaps just a shell alias
+
+sudo $HOME/bin/develacc-inner
diff --git a/archive/bin/develacc-inner b/archive/bin/develacc-inner
new file mode 100755
index 00000000..cb094e4b
--- /dev/null
+++ b/archive/bin/develacc-inner
@@ -0,0 +1,33 @@
+#!/usr/bin/perl
+
+# Config
+
+my $machine = "develacc";
+my $user = "spw";
+
+# Code, based on enter-foo script from Propellor's systemd-nspawn
+# support
+
+# default command: calling user's login shell
+push @ARGV, $ENV{'SHELL'} unless (@ARGV);
+
+# get args
+my $pid=`machinectl show $machine -p Leader | cut -d= -f2`;
+chomp $pid;
+my $home=`echo ~$user`;
+chomp $home;
+my $uid=`stat --printf="%u" $home`;
+chomp $uid;
+my $gid=`stat --printf="%g" $home`;
+chomp $gid;
+
+# nsenter time
+if (length $pid) {
+ foreach my $var (keys %ENV) {
+ delete $ENV{$var} unless $var eq 'PATH' || $var eq 'TERM';
+ }
+ exec('nsenter', '-S', $uid, '-G', $gid, "--wd=$home", '-p', '-u', '-n', '-i', '-m', '-t', $pid, @ARGV);
+} else {
+ die 'container not running';
+}
+exit(1);
diff --git a/archive/bin/develacc-push b/archive/bin/develacc-push
new file mode 100755
index 00000000..7c9b5f1f
--- /dev/null
+++ b/archive/bin/develacc-push
@@ -0,0 +1,22 @@
+#!/usr/bin/perl
+
+use strict;
+use warnings;
+use lib "$ENV{HOME}/lib/perl5";
+
+use ScriptStatus;
+use Term::UI;
+
+my $term = Term::ReadLine->new('brand');
+
+@ARGV or die "tell me which repo to back up";
+my $repo = shift @ARGV;
+chdir "/home/spw/src/$repo" or die "repo does not exist";
+system "cat .git/config | grep \"url =\"";
+system "cat .git/config | grep \"insteadOf\"";
+system "cat .git/config | grep \"pushInsteadOf\"";
+exit unless $term->ask_yn(
+ prompt => "Intend to push to these URIs?",
+ default => 'n',
+ );
+system "git push --no-verify @ARGV";
diff --git a/archive/bin/develacc-push-all b/archive/bin/develacc-push-all
new file mode 100755
index 00000000..2d1dbd19
--- /dev/null
+++ b/archive/bin/develacc-push-all
@@ -0,0 +1,22 @@
+#!/usr/bin/perl
+
+use strict;
+use warnings;
+use lib "$ENV{HOME}/lib/perl5";
+
+use ScriptStatus;
+use Term::UI;
+
+my $term = Term::ReadLine->new('brand');
+
+@ARGV or die "tell me which repo to back up";
+my $repo = shift @ARGV;
+chdir "/home/spw/src/$repo" or die "repo does not exist";
+system "cat .git/config | grep \"url =\"";
+system "cat .git/config | grep \"insteadOf\"";
+system "cat .git/config | grep \"pushInsteadOf\"";
+exit unless $term->ask_yn(
+ prompt => "Intend to push to these URIs?",
+ default => 'n',
+ );
+system "git push-all --no-verify";
diff --git a/archive/bin/dionysusbk b/archive/bin/dionysusbk
new file mode 100755
index 00000000..9c6ad0d6
--- /dev/null
+++ b/archive/bin/dionysusbk
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+DIONYSUS="/media/usb0"
+DEST="$HOME/lib/annex/old/androidbk"
+
+# check we can go ahead setup the temporary directory
+
+if ! [ -d "$DIONYSUS/DCIM" ]; then
+ echo "$(basename $0): phone microsd doesn't look to be mounted"
+ exit 1
+fi
+
+cd $(TMPDIR=~/tmp mktemp -d)
+mkdir temp-tar
+
+# backup photos
+
+mkdir incoming-photos
+if rsync -qavc $DIONYSUS/DCIM/ incoming-photos; then
+ rm -r $DIONYSUS/DCIM/*
+fi
+
+mkdir incoming-img
+if rsync -qavc $DIONYSUS/Pictures/ incoming-img; then
+ rm -r $DIONYSUS/Pictures
+fi
+
+# backup Kakao
+
+if rsync -qavc $DIONYSUS/Chats/ temp-tar; then
+ rm -r $DIONYSUS/Chats
+fi
+
+# backup contacts CSV file
+
+if rsync -qavc $DIONYSUS/Backup.Contacts.*.csv temp-tar; then
+ rm $DIONYSUS/Backup.Contacts.*.csv
+fi
+
+# tar it all up
+cd temp-tar
+tar cf $DEST/$(date +dionysus_%F.tar) *
+cd ..
+
+# conclusion: output instructions for backup procedure
+
+echo "Done in $(pwd). Now see backup procedure notes in Emacs."
+
+# echo "Go there and open up the tar file and check everything is there."
+# echo "Then move the tar file into annex, check through and move photos,"
+# echo "into annex and nuke $(pwd)/temp-tar and Dropbox photos."
diff --git a/archive/bin/doc_post_receive_hook b/archive/bin/doc_post_receive_hook
new file mode 100755
index 00000000..fefe4734
--- /dev/null
+++ b/archive/bin/doc_post_receive_hook
@@ -0,0 +1,61 @@
+#!/bin/sh
+
+# TODO if one of the calls to `org-publish-project' coughs on some
+# syntax errors, it stops further files in that project being
+# published. This script could e-mail me a warning about that, so I
+# can try to publish interactively (not batch mode) to find the
+# problem (and an easy way to test that interactive publishing would
+# be good, in comproc.org)
+
+HOME=/home/swhitton
+
+. $HOME/.shenv
+
+# 1. prepare the union mount
+
+if ! [ -d "$HOME/local/src/org-publish/doc" ]; then
+ mkdir -p $HOME/local/src/org-publish
+ git clone /home/git/doc.git $HOME/local/src/org-publish/doc
+else
+ cd $HOME/local/src/org-publish/doc
+ git pull -f
+fi
+#if ! mount | grep "lib/fm" >/dev/null; then
+# mount $HOME/lib/fm
+#fi
+mkdir -p /var/www/spw/org /tmp/dionysus/Agenda "/tmp/dionysus/Org docs" "/tmp/dionysus/Philos notes"
+
+#TEMP=$(mktemp -d)
+if [ -e "/tmp/org-work" ]; then
+ # we cannot use our own `mktemp -d' because Org hardcodes paths to
+ # source files in its timestamps cache
+ echo "another instance of the Org publishing script is running or crashed"
+ exit
+else
+ mkdir /tmp/org-work
+ TEMP="/tmp/org-work"
+fi
+unionfs-fuse $HOME/local/src/org-publish=RW:$HOME=RW $TEMP
+
+# 2. change to the union mount and run Emacs
+
+HOME=$TEMP
+export HOME
+lisp=$(cat <<EOF
+(progn
+ (ignore-errors (org-batch-store-agenda-views))
+ (ignore-errors (org-publish-project "org-web"))
+ (ignore-errors (org-publish-project "org-web-static"))
+ (ignore-errors (org-publish-project "org-tmp"))
+ (ignore-errors (org-publish-project "philos")))
+EOF
+ )
+chronic emacs -batch \
+ -l $HOME/.emacs.d/init.el \
+ -l $HOME/.emacs.d/init-org.el \
+ -eval "$lisp"
+
+# 3. cleanup
+
+fusermount -u $TEMP
+rmdir $TEMP
diff --git a/archive/bin/doccheckin.bat b/archive/bin/doccheckin.bat
new file mode 100644
index 00000000..32815035
--- /dev/null
+++ b/archive/bin/doccheckin.bat
@@ -0,0 +1,10 @@
+@echo off
+REM Reimplementation of my ~/bin/doccheckin script with Git on Windows
+
+REM To run regularly, try `schtasks /Create /SC MINUTE /MO 15 /TN
+REM doccheckin /TR "C:\Users\swhitton\bin\doccheckin.bat"'. Then open up
+REM Scheduled Tasks and tick EM "run whether user is logged on or not" to
+REM avoid command prompt window EM flashing up.
+
+CD %HOMEPATH%\doc
+"C:\Program Files\Git\bin\sh.exe" -c "if ! git status | /bin/grep -q 'You have unmerged paths.'; then git add org/*.org playlists/*.m3u emacs-abbrevs emacs-bookmarks mutt-aliases mutt-groups || true; git commit -a -m \"auto commit on $(hostname)\" || true; fi"
diff --git a/archive/bin/e b/archive/bin/e
new file mode 100755
index 00000000..a1f3a67a
--- /dev/null
+++ b/archive/bin/e
@@ -0,0 +1,21 @@
+#!/bin/sh
+
+# TODO: conditional on emacs being running, or do a split running zile
+# (if that exists: otherwise nano): then this works great on remote
+# machines too and can incorporate into .zshrc, maybe
+
+if pgrep -u $USER emacs; then
+ command="emacsclient -nw"
+elif which zile; then
+ command="zile"
+else
+ command="nano"
+fi
+
+progname=`basename $0`
+
+if [ "$progname" = "es" ]; then
+ tmux split-window "$command $@"
+else
+ $command $@
+fi
diff --git a/archive/bin/ed b/archive/bin/ed
new file mode 100755
index 00000000..8fab919f
--- /dev/null
+++ b/archive/bin/ed
@@ -0,0 +1,5 @@
+#!/bin/zsh
+
+# string=`echo "emacsclient -nw --eval '(dired \\\\\\"$PWD\\\\\\")'"`
+
+tmux split-window "dired $PWD"
diff --git a/archive/bin/emacs-pkg-subtree b/archive/bin/emacs-pkg-subtree
new file mode 100755
index 00000000..0465b6e5
--- /dev/null
+++ b/archive/bin/emacs-pkg-subtree
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+# emacs-pkg-subtree --- manage Emacs packages as git subtrees in your dotfiles git repo
+
+# Author/maintainer : Sean Whitton <spwhitton@spwhitton.name>
+# Instructions for use : https://spwhitton.name/blog/entry/emacs-pkg-subtree/
+
+# Copyright (C) 2015 Sean Whitton. Released under the GNU GPL 3.
+
+DEST="$HOME/src/dotfiles/.emacs.d/pkg"
+
+set -e
+
+if [ "$3" = "" ]; then
+ echo "$(basename $0): usage: $(basename $0) add|pull git_clone_uri ref" >&2
+ exit 1
+fi
+
+cd $DEST
+
+op="$1"
+uri="$2"
+repo="$(basename $2)"
+pkg="${repo%%\.git}"
+ref="$3"
+top="$(git rev-parse --show-toplevel)"
+prefix="${DEST##$top/}/$pkg"
+
+cd $top
+clean="$(git status --porcelain)"
+if [ ! -z "$clean" ]; then
+ echo "commit first" >&2
+ exit 1
+fi
+
+if [ "$op" = "add" ]; then
+ if [ ! -e "$DEST/$pkg" ]; then
+ git subtree add --squash --prefix $prefix $uri $ref
+ echo "$uri $ref" >> $DEST/subtrees
+ git add $DEST/subtrees
+ git commit -m "updated Emacs packages record"
+ else
+ echo "you already have a subtree by that name" >&2
+ exit 1
+ fi
+elif [ "$op" = "pull" ]; then
+ git subtree pull --squash --prefix $prefix $uri $ref
+ sed -i -e "s|^${uri} .*$|${uri} ${ref}|" $DEST/subtrees
+ git add $DEST/subtrees
+ git commit -m "updated Emacs packages record"
+else
+ echo "$(basename $0): usage: $(basename $0) add|pull git_clone_uri ref" >&2
+ exit 1
+fi
diff --git a/archive/bin/es b/archive/bin/es
new file mode 120000
index 00000000..9cbe6ea5
--- /dev/null
+++ b/archive/bin/es
@@ -0,0 +1 @@
+e \ No newline at end of file
diff --git a/archive/bin/extract_url.pl b/archive/bin/extract_url.pl
new file mode 100755
index 00000000..3d32a081
--- /dev/null
+++ b/archive/bin/extract_url.pl
@@ -0,0 +1,967 @@
+#!/usr/bin/env perl
+
+# License: BSD-2-Clause (simplified)
+# URL: http://spdx.org/licenses/BSD-2-Clause
+#
+# Copyright (C) 2011-2013 Kyle Wheeler
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY KYLE WHEELER "AS IS" AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL KYLE WHEELER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+use MIME::Parser;
+use HTML::Parser;
+use Getopt::Std;
+use Pod::Usage;
+use Env;
+use strict;
+use warnings;
+
+my $LICENSE = "BSD-2-Clause";
+my $NAME = "extract_url";
+my $version = "1.5.8";
+my $txtonly = 0;
+my $list = '';
+my $help = '';
+
+sub HELP_MESSAGE {
+ pod2usage(0);
+}
+sub VERSION_MESSAGE {
+ print "$NAME $version License:$LICENSE\n";
+}
+
+my %options;
+eval "use Getopt::Long";
+if ($@) {
+ $Getopt::Std::STANDARD_HELP_VERSION = 1;
+ &getopts("hltV",\%options) or pod2usage(-exitval=>2,-verbose=>1);
+} else {
+ &GetOptions('Version' => sub { VERSION_MESSAGE(); exit; },
+ 'help' => sub { pod2usage(-exitval=>0,-verbose=>1); },
+ 'man' => sub { pod2usage(-exitval=>0, -verbose=>99); },
+ 'text' => \$txtonly,
+ 'list!' => \$list) or pod2usage(-exitval=>2,-verbose=>1);
+}
+my $fancymenu = 1;
+if ($options{'l'} || length $list) { $fancymenu = 0; }
+if ($options{'V'}) { &VERSION_MESSAGE(); exit; }
+if ($options{'h'}) { &HELP_MESSAGE(); }
+
+# create a hash of html tag names that may have links
+my %link_attr = (
+ 'a' => {'href'=>1},
+ 'applet' => {'archive'=>1,'codebase'=>1,'code'=>1},
+ 'area' => {'href'=>1},
+ 'blockquote' => {'cite'=>1},
+ #'body' => {'background'=>1},
+ 'embed' => {'pluginspage'=>1, 'src'=>1},
+ 'form' => {'action'=>1},
+ 'frame' => {'src'=>1, 'longdesc'=>1},
+ 'iframe' => {'src'=>1, 'longdesc'=>1},
+ #'ilayer' => {'background'=>1},
+ #'img' => {'src'=>1},
+ 'input' => {'src'=>1, 'usemap'=>1},
+ 'ins' => {'cite'=>1},
+ 'isindex' => {'action'=>1},
+ 'head' => {'profile'=>1},
+ #'layer' => {'background'=>1, 'src'=>1},
+ 'layer' => {'src'=>1},
+ 'link' => {'href'=>1},
+ 'object' => {'classid'=>1, 'codebase'=>1, 'data'=>1, 'archive'=>1,
+ 'usemap'=>1},
+ 'q' => {'cite'=>1},
+ 'script' => {'src'=>1, 'for'=>1},
+ #'table' => {'background'=>1},
+ #'td' => {'background'=>1},
+ #'th' => {'background'=>1},
+ #'tr' => {'background'=>1},
+ 'xmp' => {'href'=>1},
+);
+
+# find out the URLVIEW command
+my $urlviewcommand="";
+my $displaysanitized = 0; # means to display the pre-sanitized URL instead of the pretty one
+my $shortcut = 0; # means open it without checking if theres only 1 URL
+my $noreview = 0; # means don't display overly-long URLs to be checked before opening
+my $persist = 0; # means don't exit after viewing a URL (ignored if $shortcut == 0)
+my $ignore_empty = 0; # means to throw out URLs that don't have text in HTML
+my $default_view = "url"; # means what shows up in the list by default: urls or contexts
+my $alt_select_key = 'k';
+sub getprefs
+{
+ if (open(PREFFILE,'<',$ENV{'HOME'}."/.extract_urlview")) {
+ while (<PREFFILE>) {
+ my $lineread = $_;
+ if ($lineread =~ /^ALTSELECT [A-Za-fh-z0-9,.<>?;:{}|!@#$%^&*()_=+-`~]$/) {
+ $lineread =~ /ALTSELECT (.)/; $alt_select_key = $1;
+ } elsif ($lineread =~ /^SHORTCUT$/) { $shortcut = 1;
+ } elsif ($lineread =~ /^NOREVIEW$/) { $noreview = 1;
+ } elsif ($lineread =~ /^PERSISTENT$/) { $persist = 1;
+ } elsif ($lineread =~ /^DISPLAY_SANITIZED$/) { $displaysanitized = 1;
+ } elsif ($lineread =~ /^IGNORE_EMPTY_TAGS$/) { $ignore_empty = 1;
+ } elsif ($lineread =~ /^COMMAND (.*)/) {
+ $lineread =~ /^COMMAND (.*)/;
+ $urlviewcommand=$1;
+ chomp $urlviewcommand;
+ } elsif ($lineread =~ /^DEFAULT_VIEW (.*)/) {
+ $lineread =~ /^DEFAULT_VIEW (.*)/;
+ if ($1 =~ /^context$/) {
+ $default_view = "context";
+ } else {
+ $default_view = "url";
+ }
+ } elsif ($lineread =~ /^HTML_TAGS (.*)/) {
+ $lineread =~ /^HTML_TAGS (.*)/;
+ my @tags = split(',', $1);
+ my %tags_hash;
+ foreach my $tag (@tags) {
+ $tags_hash{lc $tag} = 1;
+ }
+ foreach my $tag (keys %link_attr) {
+ delete $link_attr{$tag} if (! exists($tags_hash{$tag}));
+ }
+ }
+ }
+ close PREFFILE;
+ } elsif (open(URLVIEW,'<',$ENV{'HOME'}."/.urlview")) {
+ while (<URLVIEW>) {
+ if (/^COMMAND (.*)/) {
+ $urlviewcommand=$1;
+ chomp $urlviewcommand;
+ last;
+ }
+ }
+ close URLVIEW;
+ }
+ if ($urlviewcommand eq "") {
+ if (exists $ENV{BROWSER}) {
+ $urlviewcommand=$ENV{BROWSER};
+ } else {
+ $urlviewcommand = "open";
+ }
+ }
+}
+
+my %link_hash;
+my %orig_text;
+my $newlink = 1;
+sub foundurl {
+ my ($uri) = @_;
+ #$uri =~ s/mailto:(.*)/$1/;
+ if (! $link_hash{$uri}) {
+ $link_hash{$uri} = $newlink++;
+ }
+}
+my $foundurl_text_curindex = 0;
+my $foundurl_text_lastindex = 0;
+my $foundurl_text_prevurl = "";
+my $foundurl_text_text;
+
+sub foundurl_text {
+ my ($uri,$orig) = @_;
+ $uri = &renderuri($uri);
+ $foundurl_text_curindex = index($$foundurl_text_text, $orig, $foundurl_text_lastindex);
+ my $sincelast;
+ if ($foundurl_text_curindex >= 0) {
+ # this is the expected behavior
+ $sincelast = &tidytext(substr($$foundurl_text_text,$foundurl_text_lastindex,($foundurl_text_curindex-$foundurl_text_lastindex)));
+ } else {
+ # something odd is going on. What's happened is that our URL finder has
+ # found a URL that isn't in the text following the last URL it found.
+ # It *may* be doing things out of order... but that's really strange.
+ # We rely on it finding URLs in order of appearance in order to get
+ # context information. I'll try to recover but whatever happens, we
+ # can't get context information for this URL, and our context info for
+ # other URLs may be seriously messed up!
+ $foundurl_text_curindex = index($$foundurl_text_text, $orig);
+ if ($foundurl_text_curindex >= 0) {
+ # okay, we can recover... we'll just pretend that *everything* is
+ # the sincelast text
+ $sincelast = &tidytext(substr($$foundurl_text_text, 0, $foundurl_text_curindex));
+ } else {
+ # Very strange... I can't even find the URL! The best we can do is
+ # continue without *any* context... but there's *SERIOUS* weirdness
+ # going on, and expectations have been *majorly* violated. Let's
+ # just hope the URL is already closed (and so already has context
+ # information). I'm setting the curindex so that it'll be zero for
+ # the next URL (i.e. we can pretend that everything up to the next
+ # url is "sincelast")
+ $foundurl_text_curindex = 0 - length($orig);
+ }
+ $sincelast = "";
+ }
+ $sincelast =~ s/<$//;
+ $sincelast =~ s/^>//;
+ &foundurl($uri);
+ &process_sincelast($uri, $foundurl_text_prevurl, $sincelast);
+ $foundurl_text_lastindex = $foundurl_text_curindex + length($orig);
+ $foundurl_text_prevurl = $uri;
+}
+sub unfindurl {
+ my($uri) = @_;
+ delete($link_hash{$uri});
+ delete($orig_text{$uri});
+}
+sub renderuri {
+ my($uri) = @_;
+ $uri =~ s/&amp;/&/gs;
+ $uri =~ s/%([0-7][a-fA-F0-9])/chr(hex($1))/egs;
+ return $uri;
+}
+sub sanitizeuri {
+ my($uri) = @_;
+ $uri =~ s/([^a-zA-Z0-9_.!*()\@:=\?\/%~+-])/sprintf("%%%X",ord($1))/egs;
+ return $uri;
+}
+
+my $parser = new MIME::Parser;
+
+my %closedurls;
+
+sub process_sincelast
+{
+ my($url,$prev,$sincelast) = @_;
+ if (length($prev) > 0 && ! exists($closedurls{$prev})) {
+ $orig_text{$prev} .= " ".substr($sincelast,0,30);
+ $closedurls{$prev} = 1;
+ #print "URL(".$link_hash{$prev}.":".$newlink."): $prev ->\n\t".$orig_text{$prev}."\n\n";
+ }
+ if (! exists($closedurls{$url})) {
+ my $beforetext = substr $sincelast, -30;
+ if (length($beforetext)) {
+ $orig_text{$url} = "$beforetext =>URL<=";
+ } else {
+ $orig_text{$url} = "=>URL<=";
+ }
+ }
+}
+
+sub extract_url_from_text {
+ ($foundurl_text_text) = @_;
+ # The idea here is to eliminate duplicate URLs - I want the
+ # %link_hash to be full of URLs. My regex (in the else statement)
+ # is decent, but imperfect. URI::Find is better.
+ my $fancyfind=1;
+ eval "use URI::Find::Schemeless";
+ $fancyfind=0 if ($@);
+ if ($fancyfind == 1) {
+ my $finder = URI::Find::Schemeless->new(\&foundurl_text);
+ $finder->find($foundurl_text_text);
+ } else {
+ $$foundurl_text_text =~ s{(((mms|ftp|http|https)://|news:)[][A-Za-z0-9_.~!*'();:@&=+,/?%#-]+[^](,.'">;[:space:]]|(mailto:)?[-a-zA-Z_0-9.+]+@[-a-zA-Z_0-9.]+)}{
+ &foundurl_text($1,$1);
+ }eg;
+ }
+}
+
+my $seenstart = 0;
+my $seenurl = "";
+my $beforetext = "";
+my $extendedskipped = "";
+my $last10words = "";
+my $words_since_link_end = "";
+
+sub tidytext
+{
+ my ($text) = @_;
+ my %rendermap = (
+ '[\n]' => ' ',
+ '[\r]' => ' ',
+ '&#[0-9]+;' => '',
+ '&#x[0-9a-f]+;' => '',
+ '&nbsp;' => ' ',
+ '&copy;' => '(c)',
+ '&mdash;' => '---',
+ '&quot;' => '"',
+ '&apos;' => "'",
+ '&lt;' => '<',
+ '&gt;' => '>',
+ '&([ACEINOUY])(grave|acute|circ|tilde|uml|ring|cedil);' => '$1',
+ '&amp;' => '&',
+ '\s\s+' => ' ',
+ );
+ foreach my $entity (keys %rendermap) {
+ my $construct = '$text =~ s/$entity/'.$rendermap{$entity}.'/ig';
+ eval $construct;
+ }
+ $text =~ s/^\s+//;
+ $text =~ s/\s+$//;
+ return $text;
+}
+
+sub subwords
+{
+ my ($string, $minlen) = @_;
+ my @words = split(/\s+/, $string);
+ return "" if @words == 0;
+ my $retstr = $words[0];
+ my $wordcount = 1;
+ while (length($retstr) < $minlen && $wordcount < @words) {
+ $retstr .= " " . $words[$wordcount];
+ $wordcount++;
+ }
+ return $retstr;
+}
+
+sub sublastwords
+{
+ my ($string, $minlen) = @_;
+ my @words = split(/\s+/, $string);
+ return "" if @words == 0;
+ my $retstr = $words[@words-1];
+ my $wordcount = 1;
+ while (length($retstr) < $minlen && $wordcount < @words) {
+ $wordcount++;
+ $retstr = $words[@words - $wordcount] . " $retstr";
+ }
+ return $retstr;
+}
+
+sub find_urls_rec
+{
+ my($ent) = @_;
+ #print "type: " . $ent->mime_type . " ... parts: ".$ent->parts."\n";
+ if ($ent->parts >= 1 or $ent->mime_type eq "multipart/mixed") {
+ for (my $i=0;$i<$ent->parts;$i++) {
+ find_urls_rec($ent->parts($i));
+ }
+ } else {
+ #print "type: " . $ent->mime_type . "\n";
+ if ($ent->mime_type eq "message/rfc822") { &find_urls_rec($ent->parts()); }
+ elsif ($ent->mime_type eq "text/html" ) {
+ my $parser = HTML::Parser->new(api_version=>3);
+ my $skipped_text = "";
+ #$parser->unbroken_text(1);
+ $parser->handler(start => sub {
+ my($tagname,$pos,$text) = @_;
+ if (my $link_attr = $link_attr{$tagname}) {
+ while (4 <= @$pos) {
+ my($k_offset, $k_len, $v_offset, $v_len) = splice(@$pos,-4);
+ my $attrname = lc(substr($text, $k_offset, $k_len));
+ next unless exists($link_attr->{$attrname});
+ next unless $v_offset; # 0 v_offset means no value
+
+ # This is REALLY hack-ish and fragile, but can
+ # sometimes be invaluable
+ &extract_url_from_text(\$skipped_text) if (length($skipped_text) > 0);
+
+ my $v = substr($text, $v_offset, $v_len);
+ $v =~ s/^([\'\"])(.*)\1$/$2/;
+ $v = &renderuri($v);
+ &foundurl($v);
+
+ $words_since_link_end .= " $skipped_text";
+ $last10words = &tidytext("$last10words $skipped_text");
+ $last10words = &sublastwords($last10words, 50);
+ $skipped_text = "";
+
+ $words_since_link_end = &tidytext($words_since_link_end);
+ if (length($seenurl) > 0 && ! exists($closedurls{$seenurl})) {
+ my $since_words = &subwords($words_since_link_end, 40);
+ if (length($since_words) > 0) {
+ my $space = " ";
+ $space = "" if ($since_words =~ /^[.,;!?)-]/);
+ $orig_text{$seenurl} .= "$space$since_words";
+ }
+ $closedurls{$seenurl} = 1;
+ }
+
+ $beforetext = &sublastwords($last10words, 30);
+ $seenstart = 1;
+ $seenurl = $v;
+ }
+ }
+ },
+ "tagname, tokenpos, text");
+ $parser->handler(end => sub {
+ my ($text) = @_;
+ $last10words = &tidytext("$last10words $skipped_text");
+ $last10words = &sublastwords($last10words, 50);
+ if ($seenstart == 1) {
+ if (! exists($closedurls{$seenurl})) {
+ my $mtext = "=>$skipped_text<=";
+ if (length($beforetext)) {
+ my $space = " ";
+ $space = "" if ($beforetext =~ /[(-]$/);
+ $orig_text{$seenurl} = "$beforetext$space$mtext";
+ } else {
+ $orig_text{$seenurl} = "$mtext";
+ }
+ }
+ if (length($skipped_text) == 0 && $ignore_empty == 1 && ! exists($closedurls{$seenurl})) {
+ &unfindurl($seenurl);
+ }
+ $seenstart = 0;
+ $extendedskipped .= " $skipped_text";
+ $words_since_link_end = "";
+ } else {
+ $words_since_link_end .= " $skipped_text";
+ }
+ $skipped_text = "";
+ },"text");
+ # the "text" handler is used, rather than skipped_text because
+ # otherwise blocks of text at the beginning of a "lightly html-ified"
+ # document can be ignored.
+ $parser->handler(text => sub {
+ my ($text) = @_;
+ $skipped_text = &tidytext("$skipped_text $text");
+ }, "text");
+ $parser->parse($ent->bodyhandle->as_string);
+ $parser->eof;
+ if (length($words_since_link_end) > 0) {
+ # This is REALLY hack-ish and fragile, but can
+ # sometimes be invaluable
+ &extract_url_from_text(\$words_since_link_end);
+ }
+ if (length($skipped_text) > 0) {
+ &extract_url_from_text(\$skipped_text);
+ }
+ } elsif ($ent->mime_type =~ /text\/.*/) {
+ $ent->head->unfold;
+ my $ctype = $ent->head->get('Content-type');
+ if (defined($ctype) and $ctype =~ m/format=flowed/) {
+ my @lines = $ent->bodyhandle->as_lines;
+ chomp(@lines);
+ my $body = "";
+ my $delsp;
+ if ($ctype =~ /delsp=yes/) {
+ #print "delsp=yes!\n";
+ $delsp=1;
+ } else {
+ #print "delsp=no!\n";
+ $delsp=0;
+ }
+ for (my $i=0;$i<@lines;$i++) {
+ my $col = 0;
+ my $quotetext = "";
+ #print "=> " . $lines[$i] . "\n";
+ while (substr($lines[$i],$col,1) eq ">") {
+ $quotetext .= ">";
+ $col++;
+ }
+ if ($col > 0) { $body .= "$quotetext "; }
+ while ($lines[$i] =~ / $/ && $lines[$i] =~ /^$quotetext[^>]/ && $lines[$i+1] =~ /^$quotetext[^>]/) {
+ my $line;
+ if ($delsp) {
+ $line = substr($lines[$i],$col,length($lines[$i])-$col-1);
+ } else {
+ $line = substr($lines[$i],$col);
+ }
+ $line =~ s/^\s+//;
+ $body .= $line;
+ $i++;
+ }
+ if ($lines[$i] =~ /^$quotetext[^>]/) {
+ my $line = substr($lines[$i],$col);
+ $line =~ s/^\s+//;
+ $body .= $line."\n";
+ }
+ }
+ &extract_url_from_text(\$body);
+ } else {
+ &extract_url_from_text(\$ent->bodyhandle->as_string);
+ }
+ }
+ }
+}
+
+sub urlwrap {
+ my($subseq,$text,$linelen,$breaker) = @_;
+ my $len = length($text);
+ my $i = 0;
+ my $output = "";
+ while ($len > $linelen) {
+ if ($i > 0) { $output .= $subseq; }
+ my $breakpoint = -1;
+ my $chunk = substr($text,$i,$linelen);
+ my @chars = ("!","*","'","(",")",";",":","@","&","=","+",",","/","?","%","#","[","]","-","_");
+ foreach my $chr ( @chars ) {
+ my $pt = rindex($chunk,$chr);
+ if ($breakpoint < $pt) { $breakpoint = $pt; }
+ }
+ if ($breakpoint == -1) { $breakpoint = $linelen; }
+ else { $breakpoint += 1; }
+ $output .= substr($text,$i,$breakpoint) . $breaker;
+ if ($i == 0) { $linelen -= length($subseq); }
+ $len -= $breakpoint;
+ $i += $breakpoint;
+ }
+ if ($i > 0) { $output .= $subseq; }
+ $output .= substr($text,$i);
+ return $output;
+}
+
+sub isOutputScreen {
+ use POSIX;
+ return 0 if POSIX::isatty( \*STDOUT) eq "" ; # pipe
+ return 1; # screen
+} # end of isOutputScreen
+
+&getprefs();
+$parser->output_to_core(1);
+my $filecontents;
+if ($#ARGV == 0) {
+ open(INPUT, "<$ARGV[0]") or die "Couldn't open input file $ARGV[0]: $!";
+ $filecontents = join('',<INPUT>);
+ close(INPUT);
+} else {
+ die "no input provided!\n" if POSIX::isatty( \*STDIN) ne "" ; # pipe
+ $filecontents = join('',<STDIN>);
+}
+
+if (not $txtonly) {
+ my $entity = $parser->parse_data($filecontents);
+ &find_urls_rec($entity);
+ if (scalar(keys %link_hash) == 0) {
+ &extract_url_from_text(\$filecontents);
+ }
+} else {
+ my @lines = <INPUT>; # slurp in the whole file
+ my $filebody = join("", @lines); # generate a single string from those lines
+ &extract_url_from_text(\$filebody);
+}
+
+if (&isOutputScreen) {
+ eval "use Curses::UI";
+ $fancymenu = 0 if ($@);
+} else {
+ $fancymenu = 0;
+}
+
+if ($fancymenu == 1) {
+ #use strict;
+
+ # This is the shortcut...
+ if ($shortcut == 1 && 1 == scalar keys %link_hash) {
+ my ($url) = each %link_hash;
+ $url = &sanitizeuri($url);
+ if ($urlviewcommand =~ m/%s/) {
+ $urlviewcommand =~ s/%s/'$url'/g;
+ } else {
+ $urlviewcommand .= " $url";
+ }
+ system $urlviewcommand;
+ exit 0;
+ }
+
+ # Curses support really REALLY wants to own STDIN
+ close(STDIN);
+ open(STDIN,"/dev/tty"); # looks like a hack, smells like a hack...
+
+ my $cui = new Curses::UI(
+ -color_support => 1,
+ -clear_on_exit => 1
+ );
+ my $wrapwidth = $cui->width() - 2;
+ my %listhash_url;
+ my %listhash_context;
+ my @listvals;
+ # $link_hash{url} = ordering of the urls in the document as first-seen
+ foreach my $url (sort {$link_hash{$a} <=> $link_hash{$b} } keys(%link_hash)) {
+ push(@listvals,$link_hash{$url});
+ if ($displaysanitized) {
+ $listhash_url{$link_hash{$url}} = &sanitizeuri($url);
+ } else {
+ $listhash_url{$link_hash{$url}} = $url;
+ }
+ $listhash_context{$link_hash{$url}} = $orig_text{$url};
+ }
+
+ my @menu = (
+ { -label => 'Keys: q=quit m=menu s=switch-view c=context g=top G=bottom',
+ -submenu => [
+ { -label => 'About a', -value => \&about },
+ { -label => 'Show Command C', -value => \&show_command },
+ { -label => 'Switch List View s', -value => \&switch_list },
+ { -label => 'Exit ^q', -value => \&exit_dialog }
+ ],
+ },
+ );
+ my $menu = $cui->add(
+ 'menu','Menubar',
+ -menu => \@menu,
+ );
+ my $win1 = $cui->add(
+ 'win1', 'Window',
+ -border => 1,
+ -y => 1,
+ -bfg => 'green',
+ );
+ sub about()
+ {
+ $cui->dialog(
+ -message => "$NAME $version License:$LICENSE"
+ );
+ }
+ sub show_command()
+ {
+ # This extra sprintf work is to ensure that the title
+ # is fully displayed even if $urlviewcommand is short
+ my $title = "The configured URL viewing command is:";
+ my $len = length($title);
+ my $cmd = sprintf("%-${len}s",$urlviewcommand);
+ $cui->dialog(
+ -title => "The configured URL viewing command is:",
+ -message => $cmd,
+ );
+ }
+ sub exit_dialog()
+ {
+ my $return = $cui->dialog(
+ -message => "Do you really want to quit?",
+ -buttons => ['yes', 'no'],
+ );
+ exit(0) if $return;
+ }
+
+ my $listbox_labels;
+ if ($default_view eq "url") {
+ $listbox_labels = \%listhash_url;
+ } else {
+ $listbox_labels = \%listhash_context;
+ }
+ my $listbox = $win1->add(
+ 'mylistbox', 'Listbox',
+ -values => \@listvals,
+ -labels => $listbox_labels,
+ );
+ $cui->set_binding(sub {$menu->focus()}, "\cX");
+ $cui->set_binding(sub {$menu->focus()}, "m");
+ $cui->set_binding( sub{exit}, "q" );
+ $cui->set_binding( \&exit_dialog , "\cQ");
+ $cui->set_binding( sub{exit} , "\cc");
+ $cui->set_binding(\&switch_list, "s");
+ $cui->set_binding(\&about, "a");
+ $cui->set_binding(\&show_command, "C");
+ $listbox->set_binding( 'option-last', "G");
+ $listbox->set_binding( 'option-first', "g");
+ sub switch_list()
+ {
+ if ($listbox_labels == \%listhash_url) {
+ $listbox->labels(\%listhash_context);
+ $listbox_labels = \%listhash_context;
+ } elsif ($listbox_labels == \%listhash_context) {
+ $listbox->labels(\%listhash_url);
+ $listbox_labels = \%listhash_url;
+ }
+ $listbox->focus();
+ }
+ sub madeselection_sub {
+ my ($stayopen) = @_;
+ my $rawurl = $listhash_url{$listbox->get_active_value()};
+ my $url = &sanitizeuri($rawurl);
+ my $command = $urlviewcommand;
+ if ($command =~ m/%s/) {
+ $command =~ s/%s/'$url'/g;
+ } else {
+ $command .= " $url";
+ }
+ my $return = 1;
+ if ($noreview != 1 && length($rawurl) > ($cui->width()-2)) {
+ $return = $cui->dialog(
+ -message => &urlwrap(" ",$rawurl,$cui->width()-8,"\n"),
+ -title => "Your Choice:",
+ -buttons => ['ok', 'cancel'],
+ );
+ }
+ if ($return) {
+ system $command;
+ if ($stayopen == 0) {
+ exit 0 if ($persist == 0);
+ } else {
+ exit 0 unless ($persist == 0);
+ }
+ }
+ }
+ sub madeselection { &madeselection_sub(0); }
+ sub altexit_madeselection { &madeselection_sub(1); }
+ $cui->set_binding( \&madeselection, " ");
+ $listbox->set_routine('option-select',\&madeselection);
+ $cui->set_binding( \&altexit_madeselection, $alt_select_key);
+ use Text::Wrap;
+ sub contextual {
+ my $rawurl = $listhash_url{$listbox->get_active_value()};
+ $Text::Wrap::columns = $cui->width()-8;
+ if (exists($orig_text{$rawurl}) && length($orig_text{$rawurl}) > 1) {
+ $cui->dialog(
+ -message => wrap('','',$orig_text{$rawurl}),
+ -title => "Context:",
+ -buttons => ['ok'],
+ );
+ } else {
+ $cui->error(
+ -message => "Sorry, I don't have any context for this link",
+ -buttons => ['ok'],
+ -bfg => 'red',
+ -tfg => 'red',
+ -fg => 'red',
+ );
+ }
+ }
+ $cui->set_binding( \&contextual, "c");
+
+ $listbox->focus();
+ $cui->mainloop();
+} else {
+ # using this as a pass-thru to URLVIEW
+ foreach my $value (sort {$link_hash{$a} <=> $link_hash{$b} } keys %link_hash)
+ {
+ $value = &sanitizeuri($value);
+ print "$value\n";
+ }
+}
+
+__END__
+
+=pod
+
+=head1 NAME
+
+extract_url -- extract URLs from email messages
+
+=head1 SYNOPSIS
+
+extract_url [options] I<file>
+
+=head1 DESCRIPTION
+
+This is a Perl script that extracts URLs from correctly-encoded
+I<MIME> email messages. This can be used either as a pre-parser for
+I<urlview>, or to replace I<urlview> entirely.
+
+I<Urlview> is a great program, but has some deficiencies. In particular,
+it isn't particularly configurable, and cannot handle URLs that have
+been broken over several lines in I<format=flowed delsp=yes> email
+messages. Nor can it handle I<quoted-printable> email messages. Also,
+I<urlview> doesn't eliminate duplicate URLs. This Perl script handles
+all of that. It also sanitizes URLs so that they can't break out of the
+command shell.
+
+This is designed primarily for use with the I<mutt> emailer. The idea is
+that if you want to access a URL in an email, you pipe the email to a
+URL extractor (like this one) which then lets you select a URL to view
+in some third program (such as Firefox). An alternative design is to
+access URLs from within mutt's pager by defining macros and tagging the
+URLs in the display to indicate which macro to use. A script you can use
+to do that is I<tagurl.pl>.
+
+=head1 OPTIONS
+
+=over 4
+
+=item B<-h, --help>
+
+Display this help and exit.
+
+=item B<-m, --man>
+
+Display the full man page documentation.
+
+=item B<-l, --list>
+
+Prevent use of Ncurses, and simply output a list of extracted URLs.
+
+=item B<-t, --text>
+
+Prevent MIME handling; treat the input as plain text.
+
+=item B<-V, --version>
+
+Output version information and exit.
+
+=back
+
+=head1 DEPENDENCIES
+
+Mandatory dependencies are B<MIME::Parser> and B<HTML::Parser>. These
+usually come with Perl.
+
+Optional dependencies are B<URI::Find> (recognizes more exotic URL
+variations in plain text (without HTML tags)), B<Curses::UI> (allows it
+to fully replace I<urlview>), and B<Getopt::Long> (if present,
+B<extract_url.pl> recognizes long options --version and --list).
+
+=head1 EXAMPLES
+
+This Perl script expects a valid email to be either piped in via STDIN or in a
+file listed as the script's only argument. Its STDOUT can be a pipe into
+I<urlview> (it will detect this). Here's how you can use it:
+
+ cat message.txt | extract_url.pl
+ cat message.txt | extract_url.pl | urlview
+ extract_url.pl message.txt
+ extract_url.pl message.txt | urlview
+
+For use with B<mutt 1.4.x>, here's a macro you can use:
+
+ macro index,pager \cb "\
+ <enter-command> \
+ unset pipe_decode<enter>\
+ <pipe-message>extract_url.pl<enter>" \
+ "get URLs"
+
+For use with B<mutt 1.5.x>, here's a more complicated macro you can use:
+
+ macro index,pager \cb "\
+ <enter-command> set my_pdsave=\$pipe_decode<enter>\
+ <enter-command> unset pipe_decode<enter>\
+ <pipe-message>extract_url.pl<enter>\
+ <enter-command> set pipe_decode=\$my_pdsave<enter>" \
+ "get URLs"
+
+Here's a suggestion for how to handle I<encrypted email>:
+
+ macro index,pager ,b "\
+ <enter-command> set my_pdsave=\$pipe_decode<enter>\
+ <enter-command> unset pipe_decode<enter>\
+ <pipe-message>extract_url.pl<enter>\
+ <enter-command> set pipe_decode=\$my_pdsave<enter>" \
+ "get URLs"
+
+ macro index,pager ,B "\
+ <enter-command> set my_pdsave=\$pipe_decode<enter>\
+ <enter-command> set pipe_decode<enter>\
+ <pipe-message>extract_url.pl<enter>\
+ <enter-command> set pipe_decode=\$my_pdsave<enter>" \
+ "decrypt message, then get URLs"
+
+ message-hook . 'macro index,pager \cb ,b "URL viewer"'
+ message-hook ~G 'macro index,pager \cb ,B "URL viewer"'
+
+=head1 CONFIGURATION
+
+If you're using it with B<Curses::UI> (i.e. as a standalone URL
+selector), this Perl script will try and figure out what command to use
+based on the contents of your F<~/.urlview> file. However, it also has
+its own configuration file (F<~/.extract_urlview>) that will be used
+instead, if it exists. So far, there are eight kinds of lines you can
+have in this file:
+
+=over 8
+
+=item COMMAND ...
+
+This line specifies the command that will be used to view URLs. This
+command CAN contain a I<%s>, which will be replaced by the URL inside
+single-quotes. If it does not contain a I<%s>, the URL will simply be
+appended to the command. If this line is not present, the command is
+taken from the environment variable $BROWSER. If BROWSER is not set, the
+command is assumed to be "open", which is the correct command for MacOS X
+systems.
+
+=item SHORTCUT
+
+This line specifies that if an email contains only 1 URL, that URL will
+be opened without prompting. The default (without this line) is to
+always prompt.
+
+=item NOREVIEW
+
+Normally, if a URL is too long to display on screen in the menu, the
+user will be prompted with the full URL before opening it, just to make
+sure it's correct. This line turns that behavior off.
+
+=item PERSISTENT
+
+By default, when a URL has been selected and viewed from the menu,
+B<extract_url.pl> will exit. If you would like it to be ready to view
+another URL without re-parsing the email (i.e. much like standard
+I<urlview> behavior), add this line to the config file.
+
+=item IGNORE_EMPTY_TAGS
+
+By default, the script collects all the URLs it can find. Sometimes,
+though, HTML messages contain links that don't correspond to any text
+(and aren't normally rendered or accessible). This tells the script to
+ignore these links.
+
+=item HTML_TAGS ...
+
+This line specifies which HTML tags will be examined for URLs. By
+default, the script is very generous, looking in I<a>, I<applet>,
+I<area>, I<blockquote>, I<embed>, I<form>, I<frame>, I<iframe>,
+I<input>, I<ins>, I<isindex>, I<head>, I<layer>, I<link>, I<object>,
+I<q>, I<script,> and I<xmp> tags for links. If you would like it to
+examine just a subset of these (e.g. you only want a tags to be
+examined), merely list the subset you want. The list is expected to be a
+comma-separated list. If there are multiple of these lines in the config
+file, the script will look for the minimum set of specified tags.
+
+=item ALTSELECT ...
+
+This line specifies a key for an alternate url viewing behavior. By
+default, B<extract_url.pl> will quit after the URL viewer has been
+launched for the selected URL. This key will then make B<extract_url.pl>
+launch the URL viewer but will not quit. However, if I<PERSISTENT> is
+specified in the config file, the opposite is true: normal selection of
+a URL will launch the URL viewer and will not cause B<extract_url.pl> to
+exit, but this key will. This setting defaults to I<k>.
+
+=item DEFAULT_VIEW {url|context}
+
+This line specifies whether to show the list of URLs at first or to show
+the url contexts when the program is run. By default, B<extract_url.pl>
+shows a list of URLs.
+
+=back
+
+Here is an example config file:
+
+ SHORTCUT
+ COMMAND mozilla-firefox -remote "openURL(%s,new-window)"
+ HTML_TAGS a,iframe,link
+ ALTSELECT Q
+ DEFAULT_VIEW context
+
+=head1 STANDARDS
+
+None.
+
+=head1 AVAILABILITY
+
+http://www.memoryhole.net/~kyle/extract_url/
+
+=head1 SEE ALSO
+
+mutt(1)
+urlview(1)
+urlscan(1)
+
+=head1 CAVEATS
+
+All URLs have any potentially dangerous shell characters (namely a
+single quote and a dollar sign) removed (transformed into
+I<percent-encoding>) before they are used in a shell. This should
+eliminate the possibility of a bad URL breaking the shell.
+
+If using B<Curses::UI>, and a URL is too big for your terminal, when you
+select it, B<extract_url.pl> will (by default) ask you to review it in a
+way that you can see the whole thing.
+
+=head1 AUTHOR
+
+Program was written by Kyle Wheeler <kyle@memoryhole.net>
+
+Released under license BSD-2-Cluase (simplified) For more information
+about the license, visit <http://spdx.org/licenses/BSD-2-Clause>.
+
+=cut
diff --git a/archive/bin/firejail-skype b/archive/bin/firejail-skype
new file mode 100755
index 00000000..5bf5f55d
--- /dev/null
+++ b/archive/bin/firejail-skype
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+# We can't just use propellor property Firejail.jailed ["skype"] because
+# 1. we can't specify --x11=xephyr in a profile file
+# 2. we can't launch openbox and then skype
+
+# So we have this script.
+
+firejail --x11=xephyr --net=eth0 --private --private-tmp openbox
+DISPLAY=$(firemon --x11 | grep "DISPLAY" | sed 's/ DISPLAY //') firejail --net=eth0 --private --private-tmp /usr/bin/skype
diff --git a/archive/bin/fmdavsetup b/archive/bin/fmdavsetup
new file mode 100755
index 00000000..8cb771ec
--- /dev/null
+++ b/archive/bin/fmdavsetup
@@ -0,0 +1,27 @@
+#!/bin/sh
+
+. $HOME/.shenv
+
+if ! [ -e "$HOME/local/auth/fmailsyncpass" ]; then
+ echo >&2 "$0: put mbsync password file in place first"
+ exit 1
+fi
+
+pass=$(cat $HOME/local/auth/fmailsyncpass)
+
+# davfs2 setup
+
+if ! cat $HOME/.davfs2/secrets 2>/dev/null | grep "myfiles\.messagingengine\.com" >/dev/null; then
+ mkdir -p $HOME/lib/fm $HOME/.davfs2
+ echo "https://myfiles.messagingengine.com/ \"spwhitton#fastmail.com\" \"${pass}\"" >> $HOME/.davfs2/secrets
+ chmod 600 $HOME/.davfs2/secrets
+fi
+
+# cadaver setup
+
+if ! cat $HOME/.netrc 2>/dev/null | grep "myfiles\.messagingengine\.com" >/dev/null; then
+ echo >>$HOME/.netrc "machine myfiles.messagingengine.com"
+ echo >>$HOME/.netrc " login spwhitton@fastmail.com"
+ echo >>$HOME/.netrc " password '${pass}'"
+fi
+chmod 600 $HOME/.netrc
diff --git a/archive/bin/fmr-sync-suspend b/archive/bin/fmr-sync-suspend
new file mode 100755
index 00000000..ab8d67d7
--- /dev/null
+++ b/archive/bin/fmr-sync-suspend
@@ -0,0 +1,14 @@
+#!/bin/bash
+# ^ different shells have different options for read()
+
+. $HOME/.shenv
+
+# Perform `fmr sync`, and then run sensible suspend if it succeeded
+
+MR_FAST=true mr sync
+
+if [ $? = 0 ]; then
+ sensible-suspend
+else
+ read -rsp $'Press any key to continue...\n' -n1 key
+fi
diff --git a/archive/bin/gbampersat.ahk b/archive/bin/gbampersat.ahk
new file mode 100644
index 00000000..ad995986
--- /dev/null
+++ b/archive/bin/gbampersat.ahk
@@ -0,0 +1,2 @@
+@::"
+"::@ \ No newline at end of file
diff --git a/archive/bin/git-wip b/archive/bin/git-wip
new file mode 100755
index 00000000..4a8d80bc
--- /dev/null
+++ b/archive/bin/git-wip
@@ -0,0 +1,328 @@
+#!/bin/sh
+#
+# Copyright Bart Trojanowski <bart@jukie.net>
+#
+# git-wip is a script that will manage Work In Progress (or WIP) branches.
+# WIP branches are mostly throw away but identify points of development
+# between commits. The intent is to tie this script into your editor so
+# that each time you save your file, the git-wip script captures that
+# state in git. git-wip also helps you return back to a previous state of
+# development.
+#
+# See also http://github.com/bartman/git-wip
+#
+# The code is licensed as GPL v2 or, at your option, any later version.
+# Please see http://www.gnu.org/licenses/gpl-2.0.txt
+#
+
+USAGE='[ info | save <message> [ --editor | --untracked ] | log [ --pretty ] | delete ] [ [--] <file>... ]'
+LONG_USAGE="Manage Work In Progress branches
+
+Commands:
+ git wip - create a new WIP commit
+ git wip save <message> - create a new WIP commit with custom message
+ git wip info [<branch>] - brief WIP info
+ git wip log [<branch>] - show changes on the WIP branch
+ git wip delete [<branch>] - delete a WIP branch
+
+Options for save:
+ -e --editor - be less verbose, assume called from an editor
+ -u --untracked - capture also untracked files
+ -i --ignored - capture also ignored files
+
+Options for log:
+ -p --pretty - show a pretty graph
+"
+
+SUBDIRECTORY_OK=Yes
+OPTIONS_SPEC=
+
+. git-sh-setup
+
+require_work_tree
+
+TMP="$GIT_DIR/.git-wip.$$"
+trap 'rm -f "$TMP-*"' 0
+
+WIP_INDEX="$TMP-INDEX"
+
+WIP_PREFIX=refs/wip/
+WIP_COMMAND=
+WIP_MESSAGE=WIP
+EDITOR_MODE=false
+
+dbg() {
+ if test -n "$WIP_DEBUG"
+ then
+ printf '# %s\n' "$*"
+ fi
+}
+
+# some errors are not worth reporting in --editor mode
+report_soft_error () {
+ $EDITOR_MODE && exit 0
+ die "$@"
+}
+
+cleanup () {
+ rm -f "$TMP-*"
+}
+
+get_work_branch () {
+ ref=$(git symbolic-ref -q HEAD) \
+ || report_soft_error "git-wip requires a branch"
+
+
+ branch=${ref#refs/heads/}
+ if [ $branch = $ref ] ; then
+ die "git-wip requires a local branch"
+ fi
+
+ echo $branch
+}
+
+get_wip_branch () {
+ return 0
+}
+
+check_files () {
+ local files=$@
+
+ for f in $files
+ do
+ [ -f "$f" -o -d "$f" ] || die "$f: No such file or directory."
+ done
+}
+
+build_new_tree () {
+ local untracked=$1 ; shift
+ local ignored=$1 ; shift
+ local files=$@
+
+ (
+ set -e
+ rm -f "$WIP_INDEX"
+ cp -p "$GIT_DIR/index" "$WIP_INDEX"
+ export GIT_INDEX_FILE="$WIP_INDEX"
+ git read-tree $wip_parent
+ if [ -n "$files" ]
+ then
+ git add $files
+ else
+ git add -u
+ fi
+ [ -n "$untracked" ] && git add .
+ [ -n "$ignored" ] && git add -f -A .
+ git write-tree
+ rm -f "$WIP_INDEX"
+ )
+}
+
+do_save () {
+ local msg="$1" ; shift
+ local add_untracked=
+ local add_ignored=
+
+ while test $# != 0
+ do
+ case "$1" in
+ -e|--editor)
+ EDITOR_MODE=true
+ ;;
+ -u|--untracked)
+ add_untracked=t
+ ;;
+ -i|--ignored)
+ add_ignored=t
+ ;;
+ --)
+ shift
+ break
+ ;;
+ *)
+ [ -f "$1" ] && break
+ die "Unknown option '$1'."
+ ;;
+ esac
+ shift
+ done
+ local files=$@
+ local "add_untracked=$add_untracked"
+ local "add_ignored=$add_ignored"
+
+ if test ${#files} -gt 0
+ then
+ check_files $files
+ fi
+
+ dbg "msg=$msg"
+ dbg "files=$files"
+
+ local work_branch=$(get_work_branch)
+ local wip_branch="$WIP_PREFIX$work_branch"
+
+ dbg "work_branch=$work_branch"
+ dbg "wip_branch=$wip_branch"
+
+ # enable reflog
+ local wip_branch_file="$GIT_DIR/logs/$wip_branch"
+ dbg "wip_branch_file=$wip_branch_file"
+ mkdir -p "$(dirname "$wip_branch_file")"
+ : >>"$wip_branch_file"
+
+ if ! work_last=$(git rev-parse --verify $work_branch)
+ then
+ report_soft_error "'$work_branch' branch has no commits."
+ fi
+
+ dbg "work_last=$work_last"
+
+ if wip_last=$(git rev-parse --quiet --verify $wip_branch)
+ then
+ local base=$(git merge-base $wip_last $work_last) \
+ || die "'work_branch' and '$wip_branch' are unrelated."
+
+ if [ $base = $work_last ] ; then
+ wip_parent=$wip_last
+ else
+ wip_parent=$work_last
+ fi
+ else
+ wip_parent=$work_last
+ fi
+
+ dbg "wip_parent=$wip_parent"
+
+ new_tree=$( build_new_tree "$add_untracked" "$add_ignored" $files ) \
+ || die "Cannot save the current worktree state."
+
+ dbg "new_tree=$new_tree"
+
+ if git diff-tree --exit-code --quiet $new_tree $wip_parent ; then
+ report_soft_error "no changes"
+ fi
+
+ dbg "... has changes"
+
+ new_wip=$(printf '%s\n' "$msg" | git commit-tree $new_tree -p $wip_parent) \
+ || die "Cannot record working tree state"
+
+ dbg "new_wip=$new_wip"
+
+ msg1=$(printf '%s\n' "$msg" | sed -e 1q)
+ git update-ref -m "git-wip: $msg1" $wip_branch $new_wip $wip_last
+
+ dbg "SUCCESS"
+}
+
+do_info () {
+ local branch=$1
+
+ die "info not implemented"
+}
+
+do_log () {
+ local work_branch=$1
+ [ -z $branch ] && work_branch=$(get_work_branch)
+ local wip_branch="$WIP_PREFIX$work_branch"
+
+ local git_log="git log"
+ if [ "$1" = --pretty -o "$1" = -p ]
+ then
+ shift
+ git_log="git log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr)%Creset' --abbrev-commit --date=relative"
+ fi
+
+ if ! work_last=$(git rev-parse --verify $work_branch)
+ then
+ die "'$work_branch' branch has no commits."
+ fi
+
+ dbg work_last=$work_last
+
+ if ! wip_last=$(git rev-parse --quiet --verify $wip_branch)
+ then
+ die "'$work_branch' branch has no commits."
+ fi
+
+ dbg wip_last=$wip_last
+
+ local base=$(git merge-base $wip_last $work_last)
+
+ dbg base=$base
+
+ echo $git_log $@ $wip_last $work_last "^$base~1" | sh
+}
+
+do_delete () {
+ local branch=$1
+
+ die "delete not implemented"
+}
+
+do_help () {
+ local rc=$1
+
+ cat <<END
+Usage: git wip $USAGE
+
+$LONG_USAGE
+END
+ exit $rc
+}
+
+
+if test $# -eq 0
+then
+ dbg "no arguments"
+
+ do_save "WIP"
+ exit $?
+fi
+
+dbg "args: $@"
+
+case "$1" in
+save)
+ WIP_COMMAND=$1
+ shift
+ if [ -n "$1" ]
+ then
+ WIP_MESSAGE="$1"
+ shift
+ fi
+ ;;
+info|log|delete)
+ WIP_COMMAND=$1
+ shift
+ ;;
+help)
+ do_help 0
+ ;;
+--*)
+ ;;
+*)
+ [ -f "$1" ] || die "Unknown command '$1'."
+ ;;
+esac
+
+case $WIP_COMMAND in
+save)
+ do_save "$WIP_MESSAGE" $@
+ ;;
+info)
+ do_info $@
+ ;;
+log)
+ do_log $@
+ ;;
+delete)
+ do_delete $@
+ ;;
+*)
+ usage
+ exit 1
+ ;;
+esac
+
+# vim: set noet sw=8
diff --git a/archive/bin/goodmorning b/archive/bin/goodmorning
new file mode 100755
index 00000000..e61a96cf
--- /dev/null
+++ b/archive/bin/goodmorning
@@ -0,0 +1,34 @@
+#!/bin/sh
+
+VOLUME=80
+ SONG="/home/spwhitton/lib/annex/music/Soundtracks/Cardcaptor Sakura/Cardcaptor Sakura OST/2-4-32 - Yokoku BGM (BGM for Preview).mp3"
+#SONG="/home/spwhitton/lib/annex/music/K-pop/Crayon Pop/The First Mini Album/02 - Saturday Night.mp3"
+
+# dow=$(date +%u)
+# # not at the weekend
+# if [ "$dow" = "6" -o "$dow" = "7" ]; then
+# exit 0
+# fi
+
+# systemd doesn't set $HOME for services
+HOME=/home/spwhitton
+export HOME
+
+. $HOME/.shenv
+
+# make sure it's not too loud nor muted. (`-c 0' ensures we're not
+# just interacting with PulseAudio)
+amixer sset Master ${VOLUME}%
+amixer sset Master unmute
+amixer -c 0 sset "Auto-Mute Mode" Disabled
+amixer -c 0 sset Master unmute
+amixer -c 0 sset Speaker 100%
+amixer -c 0 sset Speaker unmute
+amixer -c 0 sset PCM 100%
+# amixer -c 0 sset PCM unmute # invalid command
+
+# play the alarm (TODO: randomly choose from folder of symlinks into annex)
+mplayer "$SONG"
+
+# probably want auto-mute back on (TODO: turn on/off only if it's not already off)
+amixer -c 0 sset "Auto-Mute Mode" Enabled
diff --git a/archive/bin/grbk b/archive/bin/grbk
new file mode 100755
index 00000000..d7212c74
--- /dev/null
+++ b/archive/bin/grbk
@@ -0,0 +1,39 @@
+#!/bin/sh
+
+# DIR=$(TMPDIR=~/tmp mktemp -d)
+# cd $DIR
+# dirname=$(date +annotations_%F_%H%M)
+# mkdir $dirname
+
+if ! [ -d "$HOME/lib/fm/dionysus" ]; then
+ echo "please mount webdav share first" >&2
+ exit 1
+fi
+
+if [ -d "$HOME/lib/annex/doc/dionysus/old/$(date +"%Y-%m-%d")" ]; then
+ echo "don't run me more than once per day" >&2
+ exit 1
+fi
+
+cd $HOME/lib/annex
+git annex unlock --exclude "doc/dionysus/old/*" doc/dionysus
+rsync -Pvrtc --delete --backup --exclude "old/" --exclude "Agenda/" --exclude "Org docs/" --exclude "Philos notes/" --backup-dir=$HOME/lib/annex/doc/dionysus/old/$(date +"%Y-%m-%d") $HOME/lib/fm/dionysus/ $HOME/lib/annex/doc/dionysus/
+git annex add doc/dionysus
+
+# # first sync read-only unannotated copies of files out of annex into
+# # webdav
+# rsync -vrLt --delete \
+# --exclude="* - annotated - flattened.pdf" \
+# --exclude="* - annotated.pdf" \
+# $HOME/lib/annex/doc/dionysus/ $HOME/lib/fm/dionysus/
+# # now sync any edited, annotated files from webdav into a tar file
+# # dumped into annex
+# rsync -vart \
+# --include="* - annotated - flattened.pdf" \
+# --include="* - annotated.pdf" \
+# --exclude="**" \
+# $HOME/lib/fm/dionysus/ $dirname/
+
+# tar cf ${dirname}.tar $dirname
+# mv ${dirname}.tar $HOME/lib/annex/old/annotationsbk/
+# rm -rf ${DIR}
diff --git a/archive/bin/greypdf b/archive/bin/greypdf
new file mode 100755
index 00000000..4735e0f5
--- /dev/null
+++ b/archive/bin/greypdf
@@ -0,0 +1,77 @@
+#!/bin/bash
+# convert pdf to grayscale, preserving metadata
+# "AFAIK graphicx has no feature for manipulating colorspaces. " http://groups.google.com/group/latexusersgroup/browse_thread/thread/5ebbc3ff9978af05
+# "> Is there an easy (or just standard) way with pdflatex to do a > conversion from color to grayscale when a PDF file is generated? No." ... "If you want to convert a multipage document then you better have pdftops from the xpdf suite installed because Ghostscript's pdf to ps doesn't produce nice Postscript." http://osdir.com/ml/tex.pdftex/2008-05/msg00006.html
+# "Converting a color EPS to grayscale" - http://en.wikibooks.org/wiki/LaTeX/Importing_Graphics
+# "\usepackage[monochrome]{color} .. I don't know of a neat automatic conversion to monochrome (there might be such a thing) although there was something in Tugboat a while back about mapping colors on the fly. I would probably make monochrome versions of the pictures, and name them consistently. Then conditionally load each one" http://newsgroups.derkeiler.com/Archive/Comp/comp.text.tex/2005-08/msg01864.html
+# "Here comes optional.sty. By adding \usepackage{optional} ... \opt{color}{\includegraphics[width=0.4\textwidth]{intro/benzoCompounds_color}} \opt{grayscale}{\includegraphics[width=0.4\textwidth]{intro/benzoCompounds}} " - http://chem-bla-ics.blogspot.com/2008/01/my-phd-thesis-in-color-and-grayscale.html
+# with gs:
+# http://handyfloss.net/2008.09/making-a-pdf-grayscale-with-ghostscript/
+# note - this strips metadata! so:
+# http://etutorials.org/Linux+systems/pdf+hacks/Chapter+5.+Manipulating+PDF+Files/Hack+64+Get+and+Set+PDF+Metadata/
+COLORFILENAME=$1
+OVERWRITE=$2
+FNAME=${COLORFILENAME%.pdf}
+# NOTE: pdftk does not work with logical page numbers / pagination;
+# gs kills it as well;
+# so check for existence of 'pdfmarks' file in calling dir;
+# if there, use it to correct gs logical pagination
+# for example, see
+# http://askubuntu.com/questions/32048/renumber-pages-of-a-pdf/65894#65894
+PDFMARKS=
+if [ -e pdfmarks ] ; then
+PDFMARKS="pdfmarks"
+echo "$PDFMARKS exists, using..."
+# convert to gray pdf - this strips metadata!
+gs -sOutputFile=$FNAME-gs-gray.pdf -sDEVICE=pdfwrite \
+-sColorConversionStrategy=Gray -dProcessColorModel=/DeviceGray \
+-dCompatibilityLevel=1.4 -dNOPAUSE -dBATCH "$COLORFILENAME" "$PDFMARKS"
+else # not really needed ?!
+gs -sOutputFile=$FNAME-gs-gray.pdf -sDEVICE=pdfwrite \
+-sColorConversionStrategy=Gray -dProcessColorModel=/DeviceGray \
+-dCompatibilityLevel=1.4 -dNOPAUSE -dBATCH "$COLORFILENAME"
+fi
+# dump metadata from original color pdf
+## pdftk $COLORFILENAME dump_data output $FNAME.data.txt
+# also: pdfinfo -meta $COLORFILENAME
+# grep to avoid BookmarkTitle/Level/PageNumber:
+pdftk $COLORFILENAME dump_data output | grep 'Info\|Pdf' > $FNAME.data.txt
+# "pdftk can take a plain-text file of these same key/value pairs and update a PDF's Info dictionary to match. Currently, it does not update the PDF's XMP stream."
+pdftk $FNAME-gs-gray.pdf update_info $FNAME.data.txt output $FNAME-gray.pdf
+# (http://wiki.creativecommons.org/XMP_Implementations : Exempi ... allows reading/writing XMP metadata for various file formats, including PDF ... )
+# clean up
+rm $FNAME-gs-gray.pdf
+rm $FNAME.data.txt
+if [ "$OVERWRITE" == "y" ] ; then
+echo "Overwriting $COLORFILENAME..."
+mv $FNAME-gray.pdf $COLORFILENAME
+fi
+# BUT NOTE:
+# Mixing TEX & PostScript : The GEX Model - http://www.tug.org/TUGboat/Articles/tb21-3/tb68kost.pdf
+# VTEX is a (commercial) extended version of TEX, sold by MicroPress, Inc. Free versions of VTEX have recently been made available, that work under OS/2 and Linux. This paper describes GEX, a fast fully-integrated PostScript interpreter which functions as part of the VTEX code-generator. Unless specified otherwise, this article describes the functionality in the free- ware version of the VTEX compiler, as available on CTAN sites in systems/vtex.
+# GEX is a graphics counterpart to TEX. .. Since GEX may exercise subtle influence on TEX (load fonts, or change TEX registers), GEX is op- tional in VTEX implementations: the default oper- ation of the program is with GEX off; it is enabled by a command-line switch.
+# \includegraphics[width=1.3in, colorspace=grayscale 256]{macaw.jpg}
+# http://mail.tug.org/texlive/Contents/live/texmf-dist/doc/generic/FAQ-en/html/FAQ-TeXsystems.html
+# A free version of the commercial VTeX extended TeX system is available for use under Linux, which among other things specialises in direct production of PDF from (La)TeX input. Sadly, it���s no longer supported, and the ready-built images are made for use with a rather ancient Linux kernel.
+# NOTE: another way to capture metadata; if converting via ghostscript:
+# http://compgroups.net/comp.text.pdf/How-to-specify-metadata-using-Ghostscript
+# first:
+# grep -a 'Keywo' orig.pdf
+# /Author(xxx)/Title(ttt)/Subject()/Creator(LaTeX)/Producer(pdfTeX-1.40.12)/Keywords(kkkk)
+# then - copy this data in a file prologue.ini:
+#/pdfmark where {pop} {userdict /pdfmark /cleartomark load put} ifelse
+#[/Author(xxx)
+#/Title(ttt)
+#/Subject()
+#/Creator(LaTeX with hyperref package + gs w/ prologue)
+#/Producer(pdfTeX-1.40.12)
+#/Keywords(kkkk)
+#/DOCINFO pdfmark
+#
+# finally, call gs on the orig file,
+# asking to process pdfmarks in prologue.ini:
+# gs -sDEVICE=pdfwrite -dCompatibilityLevel=1.4 \
+# -dPDFSETTINGS=/screen -dNOPAUSE -dQUIET -dBATCH -dDOPDFMARKS \
+# -sOutputFile=out.pdf in.pdf prologue.ini
+# then the metadata will be in output too (which is stripped otherwise;
+# note bookmarks are preserved, however).
diff --git a/archive/bin/hestia-checkup b/archive/bin/hestia-checkup
new file mode 100755
index 00000000..55db742e
--- /dev/null
+++ b/archive/bin/hestia-checkup
@@ -0,0 +1,19 @@
+#!/bin/sh
+
+echo "hestia weather report . . .\n"
+
+uptime
+
+echo -n "\nrtorrent pid: "
+
+pgrep rtorrent
+
+echo -n "\ngit annex assistant pid: "
+
+pgrep git-annex
+
+echo -n "\n my IP address according to OpenDNS: "
+
+dig +short myip.opendns.com @resolver1.opendns.com
+
+echo "\nmight want to take a look if either of the PIDs are blank~~"
diff --git a/archive/bin/hestia-startup b/archive/bin/hestia-startup
new file mode 100755
index 00000000..ed251561
--- /dev/null
+++ b/archive/bin/hestia-startup
@@ -0,0 +1,16 @@
+#!/bin/sh
+
+# starts-up rtorrent and the git annex assistant--note that due to
+# the MTA setup being used the output of this script probably won't
+# reach my mailbox
+
+if ! pgrep -u $LOGNAME rtorrent >/dev/null; then
+ echo ">>> Removing probably-stale dtach socket and starting rtorrent..."
+ rm -f $HOME/local/rt/session/dtach.socket
+ TERM=xterm /usr/bin/nice -n 19 /usr/bin/dtach -n $HOME/local/rt/session/dtach.socket /usr/bin/rtorrent && echo "... ok"
+fi
+
+if ! pgrep -u $LOGNAME git-annex >/dev/null; then
+ echo ">>> Firing up the git annex assistant..."
+ /usr/bin/git annex assistant --autostart && echo "... ok"
+fi
diff --git a/archive/bin/httphtmltitle.py b/archive/bin/httphtmltitle.py
new file mode 100755
index 00000000..287d7a56
--- /dev/null
+++ b/archive/bin/httphtmltitle.py
@@ -0,0 +1,15 @@
+#!/usr/bin/env python
+
+# take url on the command line, output the HTML page's title if we can
+
+import urllib2
+import sys
+
+url = sys.argv[1]
+response = urllib2.urlopen(url)
+html = response.read()
+
+post_title_opening_tag = html.split('<title>')[1]
+title = post_title_opening_tag.split('</title>')[0]
+
+print title
diff --git a/archive/bin/idlesshclear b/archive/bin/idlesshclear
new file mode 100755
index 00000000..8cfcf8f4
--- /dev/null
+++ b/archive/bin/idlesshclear
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+while true; do
+ if [ $(xprintidle) -ge 300000 ]; then
+ ssh-add -D 2>/dev/null
+ fi
+ sleep 300
+done
diff --git a/archive/bin/imap-pass b/archive/bin/imap-pass
new file mode 100755
index 00000000..49f0cd4b
--- /dev/null
+++ b/archive/bin/imap-pass
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+
+# thanks to http://bloerg.net/2013/10/09/syncing-mails-with-mbsync-instead-of-offlineimap.html
+
+import argparse
+import keyring
+import getpass
+
+if __name__ == '__main__':
+ SERVICE = 'mbsync'
+
+ parser = argparse.ArgumentParser()
+ group = parser.add_mutually_exclusive_group(required=True)
+ group.add_argument('--set', '-s', type=str, help='Account to save password')
+ group.add_argument('--get', '-g', type=str, help='Account to get password')
+
+ args = parser.parse_args()
+
+ if args.set:
+ password = getpass.getpass()
+ keyring.set_password(SERVICE, args.set, password)
+ else:
+ print(keyring.get_password(SERVICE, args.get))
diff --git a/archive/bin/imap-password b/archive/bin/imap-password
new file mode 100755
index 00000000..38d8b521
--- /dev/null
+++ b/archive/bin/imap-password
@@ -0,0 +1,46 @@
+#!/usr/bin/python
+
+# http://www.kirya.net/articles/my-mail-client-setup/
+# http://www.clasohm.com/blog/one-entry?entry_id=90957
+
+import gtk
+import gnomekeyring as gkey
+import getpass
+
+class Keyring(object):
+ def __init__(self, name, server, protocol):
+ self._name = name
+ self._server = server
+ self._protocol = protocol
+ self._keyring = gkey.get_default_keyring_sync()
+
+ def has_credentials(self):
+ try:
+ attrs = {"server": self._server, "protocol": self._protocol}
+ items = gkey.find_items_sync(gkey.ITEM_NETWORK_PASSWORD, attrs)
+ return len(items) > 0
+ except gkey.DeniedError:
+ return False
+
+ def get_credentials(self):
+ attrs = {"server": self._server, "protocol": self._protocol}
+ items = gkey.find_items_sync(gkey.ITEM_NETWORK_PASSWORD, attrs)
+ return (items[0].attributes["user"], items[0].secret)
+
+ def set_credentials(self, (user, pw)):
+ attrs = {
+ "user": user,
+ "server": self._server,
+ "protocol": self._protocol,
+ }
+ gkey.item_create_sync(gkey.get_default_keyring_sync(),
+ gkey.ITEM_NETWORK_PASSWORD, self._name, attrs, pw, True)
+
+server = raw_input("Server: ")
+username = raw_input("Username: ")
+password = getpass.getpass(prompt="Password: ")
+print
+
+keyring = Keyring("offlineimap", server, "imap")
+
+keyring.set_credentials((username, password))
diff --git a/archive/bin/its-all-text-wrapper b/archive/bin/its-all-text-wrapper
new file mode 100755
index 00000000..805072ed
--- /dev/null
+++ b/archive/bin/its-all-text-wrapper
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+# It's All Text does not allow one to pass arguments to the editor
+# command, so need this wrapper
+
+. $HOME/.shenv
+
+emacsclient -c $@
diff --git a/archive/bin/jnest b/archive/bin/jnest
new file mode 100755
index 00000000..27b5681a
--- /dev/null
+++ b/archive/bin/jnest
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+. $HOME/.shenv
+
+unset PERL5LIB PKG_CONFIG_PATH LD_LIBRARY_PATH C_INCLUDE_PATH MODULEBUILDRC MODULEPATH MODULESHOME PERL_MM_OPT
+VISUAL=mg
+EDITOR=mg
+export PERL5LIB PKG_CONFIG_PATH LD_LIBRARY_PATH C_INCLUDE_PATH MODULEBUILDRC MODULEPATH MODULESHOME PERL_MM_OPT VISUAL EDITOR
+
+junest $@
diff --git a/archive/bin/kill-ssh-and-umount b/archive/bin/kill-ssh-and-umount
new file mode 100755
index 00000000..ccb8e3e0
--- /dev/null
+++ b/archive/bin/kill-ssh-and-umount
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+. "$HOME/.shenv"
+
+# TODO
+# lsof $1 | grep ssh | cut -d' ' -f
+sudo umount $1
diff --git a/archive/bin/laptopinput b/archive/bin/laptopinput
new file mode 100755
index 00000000..e4aa8186
--- /dev/null
+++ b/archive/bin/laptopinput
@@ -0,0 +1,109 @@
+#!/usr/bin/env runhaskell
+
+import Control.Applicative ((<$>))
+import Control.Monad (when)
+import Data.List (isInfixOf, isPrefixOf, isSuffixOf)
+import System.Environment (getArgs)
+import System.Process (readProcessWithExitCode, runCommand)
+
+-- constants
+
+-- | Keyboards in the output of lsusb(8) that are to be used instead
+-- of the laptops's built-in keyboard. If none are present, the
+-- laptop keyboard will not be disabled.
+pluggedKBs = ["04f2:0833 Chicony Electronics Co., Ltd "]
+
+-- | Pointing devices in the output of lsusb(8) that are to be used instead
+-- of the laptops's built-in pointing devices. If none are present,
+-- the laptop pointing devices will not be disabled.
+pluggedMice = ["045e:0053 Microsoft Corp. Optical Mouse"]
+
+-- | The laptop's built-in keyboard as listed by xinput(1).
+laptopKB = "AT Translated Set 2 keyboard"
+
+-- | The laptop's built-in pointing devices as listed by xinput(1).
+laptopMice = ["DualPoint Stick", "AlpsPS/2 ALPS DualPoint TouchPad"]
+
+-- string processing
+
+-- | Find a device's ID in the output of `xinput --list`
+findXInputID :: String -> String -> Maybe Int
+findXInputID device xinput = do
+ (line:[]) <- Just $ filter (device `isInfixOf`) $ lines xinput
+ (idW:[]) <- Just $ filter ("id=" `isPrefixOf`) $ words line
+ ((id, _):[]) <- Just $ reads $ drop 3 idW
+ return id
+
+-- | Determines whether a device is plugged in using lsusb(8) output.
+checkPlugged :: String -> String -> Bool
+checkPlugged device lsusb = length ls == 1
+ where
+ ls = filter (device `isSuffixOf`) $ lines lsusb
+
+-- | xinput(1) command to disable an input with a given id
+disableCommand :: Int -> String
+disableCommand id = "xinput set-int-prop " ++ show id ++ " \"Device Enabled\" 8 0"
+
+-- | xinput(1) command to enable an input with a given id
+enableCommand :: Int -> String
+enableCommand id = init (disableCommand id) ++ "1"
+
+-- IO actions to do the work
+
+enable :: IO ()
+enable = sequence_ $ enableInput <$> laptopKB : laptopMice
+
+-- disable :: IO ()
+-- disable = undefined
+
+disable :: IO ()
+disable = do
+ mousePlugged <- any (== True) <$> (sequence $ checkInput <$> pluggedMice)
+ when mousePlugged $ sequence_ $ disableInput <$> laptopMice
+ kbPlugged <- any (== True) <$> (sequence $ checkInput <$> pluggedKBs)
+ when kbPlugged $ disableInput laptopKB
+
+enableInput :: String -> IO ()
+enableInput device = do
+ xinput <- getXInputList
+ let id = findXInputID device xinput
+ case id of
+ Just i -> runCommand (enableCommand i) >> return ()
+ Nothing -> putStrLn $ "cannot enable input with xinput"
+
+disableInput :: String -> IO ()
+disableInput device = do
+ xinput <- getXInputList
+ let id = findXInputID device xinput
+ case id of
+ Just i -> runCommand (disableCommand i) >> return ()
+ Nothing -> putStrLn $ "cannot disable input with xinput"
+
+checkInput :: String -> IO Bool
+checkInput device = getUSBList >>= \lsusb ->
+ return $ checkPlugged device lsusb
+
+getXInputList :: IO String
+getXInputList = do
+ (_, output, _) <- readProcessWithExitCode "xinput" ["--list"] ""
+ return output
+
+getUSBList :: IO String
+getUSBList = do
+ (_, output, _) <- readProcessWithExitCode "lsusb" [] ""
+ return output
+
+-- handle command line arguments
+
+processArgs :: [String] -> IO ()
+processArgs [] = usage
+processArgs (o:_) =
+ case o of
+ "--enable" -> enable
+ "--maybe-disable" -> disable
+ _ -> usage
+
+main = getArgs >>= processArgs
+
+usage :: IO ()
+usage = putStrLn "usage: laptopinput --enable|--maybe-disable"
diff --git a/archive/bin/latesteconomist b/archive/bin/latesteconomist
new file mode 100755
index 00000000..769afe10
--- /dev/null
+++ b/archive/bin/latesteconomist
@@ -0,0 +1,9 @@
+#!/bin/sh
+
+SMTPOPT="--relay localhost --encryption-method=NONE spw@sdf.org"
+WORKDIR=`mktemp -d` || exit 1
+
+nice -n 19 ebook-convert "The Economist.recipe" $WORKDIR/economist.mobi --output-profile kindle
+calibre-smtp --attachment $WORKDIR/economist.mobi $SMTPOPT spwhitt0n@free.kindle.com ""
+
+rm -r $WORKDIR
diff --git a/archive/bin/m b/archive/bin/m
new file mode 120000
index 00000000..b60d5938
--- /dev/null
+++ b/archive/bin/m
@@ -0,0 +1 @@
+bashmount \ No newline at end of file
diff --git a/archive/bin/ma_org_publish b/archive/bin/ma_org_publish
new file mode 100755
index 00000000..2faab6f9
--- /dev/null
+++ b/archive/bin/ma_org_publish
@@ -0,0 +1,64 @@
+#!/bin/bash
+
+. $HOME/.shenv
+
+set -e
+
+lisp=$(cat <<EOF
+(progn
+ (org-batch-store-agenda-views)
+ (org-publish-project "spw-wiki")
+ (org-publish-project "philos")
+ (org-publish-project "spw-org")
+ (org-publish-project "spw-static")
+ (org-publish-project "blog"))
+EOF
+ )
+
+webdav_dir ()
+{
+ local dir=$1
+ cd "$dir"
+ local files="./*"
+ cadaver https://myfiles.messagingengine.com/ <<EOF
+cd "$dir"
+mput $files
+EOF
+}
+
+# assume throughout we're on the MetaArray
+[ "$(hostname -f)" = "ma.sdf.org" ] \
+ || ( echo >&2 "run this script only on MetaArray"; exit 1 )
+
+# 1. git update
+
+# Thanks to `set -e' above, any failure to fast-forward will mean I
+# get an e-mail from crond telling me to resolve the merge.
+for dir in "$HOME/doc $HOME/doc/www $HOME/doc/www/blog"; do
+ cd $dir
+ git pull --ff-only
+done
+
+# 2. prepare publishing destinations
+
+# this is why this script cannot be executed locally!
+mkdir -p $HOME/lib/fm/{,dionysus/Agenda}
+# [ "$(ls -A $HOME/lib/fm)" ] \
+# && echo >&2 "target dir not empty!" && exit 1
+
+# 3. have Org publish
+
+emacs -batch \
+ -l $HOME/.emacs.d/init.el \
+ -l $HOME/.emacs.d/init-org.el \
+ -eval "$lisp"
+
+# 4. cleanup
+
+cd $HOME/lib/fm
+webdav_dir "Philos notes"
+cd $HOME/lib/fm
+webdav_dir "dionysus/Agenda"
+rm -r $HOME/lib/fm/*
+
+rdate.py-dir $HOME/html/blog/entries
diff --git a/archive/bin/ma_reboot_check b/archive/bin/ma_reboot_check
new file mode 100755
index 00000000..70e04a7e
--- /dev/null
+++ b/archive/bin/ma_reboot_check
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+. $HOME/.shenv
+
+# @reboot cronjobs don't work on the MetaArray, so manually check if
+# we need to restart stuff
+
+# if I'm logged in then let me restart stuff manually
+who | grep spw && exit 0
+
+pgrep -u spw emacs || emacs --daemon
+pgrep -u spw bitlbee || biblbee_startup
+sleep 10
+pgrep -u spw irssi || ii
diff --git a/archive/bin/mdns-do b/archive/bin/mdns-do
new file mode 100755
index 00000000..e5d0c932
--- /dev/null
+++ b/archive/bin/mdns-do
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+# update mdns from SSH session
+
+ip=`echo $SSH_CLIENT | cut -f 1 -d' '`
+mdns $ip
diff --git a/archive/bin/mutt_bgrun b/archive/bin/mutt_bgrun
new file mode 100755
index 00000000..4d759d34
--- /dev/null
+++ b/archive/bin/mutt_bgrun
@@ -0,0 +1,118 @@
+#!/bin/sh
+# @(#) mutt_bgrun $Revision: 1.4 $
+
+# mutt_bgrun - run an attachment viewer from mutt in the background
+# Copyright (C) 1999-2002 Gary A. Johnson
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+# SYNOPSIS
+# mutt_bgrun viewer [viewer options] file
+#
+# DESCRIPTION
+# Mutt invokes external attachment viewers by writing the
+# attachment to a temporary file, executing the pipeline specified
+# for that attachment type in the mailcap file, waiting for the
+# pipeline to terminate, writing nulls over the temporary file,
+# then deleting it. This causes problems when using graphical
+# viewers such as qvpview and acroread to view attachments.
+#
+# If qvpview, for example, is executed in the foreground, the mutt
+# user interface is hung until qvpview exits, so the user can't do
+# anything else with mutt until he or she finishes reading the
+# attachment and exits qvpview. This is especially annoying when
+# a message contains several MS Office attachments--one would like
+# to have them all open at once.
+#
+# If qvpview is executed in the background, it must be given
+# enough time to completely read the file before returning control
+# to mutt, since mutt will then obliterate the file. Qvpview is
+# so slow that this time can exceed 20 seconds, and the bound is
+# unknown. So this is again annoying.
+#
+# The solution provided here is to invoke the specified viewer
+# from this script after first copying mutt's temporary file to
+# another temporary file. This script can then quickly return
+# control to mutt while the viewer can take as much time as it
+# needs to read and render the attachment.
+#
+# EXAMPLE
+# To use qvpview to view MS Office attachments from mutt, add the
+# following lines to mutt's mailcap file.
+#
+# application/msword; mutt_bgrun qvpview %s
+# application/vnd.ms-excel; mutt_bgrun qvpview %s
+# application/vnd.ms-powerpoint; mutt_bgrun qvpview %s
+#
+# AUTHOR
+# Gary A. Johnson
+# <garyjohn@spk.agilent.com>
+#
+# EDIT BY SEAN
+# Redirect viewer stdout and stderr as soffice pumps junk into
+# mutt's ncurses display.
+#
+# ACKNOWLEDGEMENTS
+# My thanks to the people who have commented on this script and
+# offered solutions to shortcomings and bugs, especially Edmund
+# GRIMLEY EVANS <edmundo@rano.org> and Andreas Somogyi
+# <aso@somogyi.nu>.
+
+prog=${0##*/}
+
+# Check the arguments first.
+
+if [ "$#" -lt "2" ]
+then
+ echo "usage: $prog viewer [viewer options] file" >&2
+ exit 1
+fi
+
+# Separate the arguments. Assume the first is the viewer, the last is
+# the file, and all in between are options to the viewer.
+
+viewer="$1"
+shift
+
+while [ "$#" -gt "1" ]
+do
+ options="$options $1"
+ shift
+done
+
+file=$1
+
+# Create a temporary directory for our copy of the temporary file.
+#
+# This is more secure than creating a temporary file in an existing
+# directory.
+
+tmpdir=/tmp/$LOGNAME$$
+umask 077
+mkdir "$tmpdir" || exit 1
+tmpfile="$tmpdir/${file##*/}"
+
+# Copy mutt's temporary file to our temporary directory so that we can
+# let mutt overwrite and delete it when we exit.
+
+cp "$file" "$tmpfile"
+
+# Run the viewer in the background and delete the temporary files when done.
+
+(
+ "$viewer" $options "$tmpfile" 2>/dev/null >/dev/null
+ rm -f "$tmpfile"
+ rmdir "$tmpdir"
+) &
diff --git a/archive/bin/normalise-artemis b/archive/bin/normalise-artemis
new file mode 100755
index 00000000..ebe3aede
--- /dev/null
+++ b/archive/bin/normalise-artemis
@@ -0,0 +1,42 @@
+#!/bin/sh
+
+EXT="VGA-1"
+INT="LVDS-1"
+EXTW="1680"
+EXTH="1050"
+INTW="1280"
+INTH="800"
+EXTL="us"
+INTL="gb"
+
+# if xrandr -q | grep "$EXT connected"; then
+# # desk mode
+# xrandr --output $INT --off
+# xrandr --output $EXT --off
+# xrandr --output $EXT --primary --mode $EXTW"x"$EXTH \
+# --output $INT --noprimary --mode $INTW"x"$INTH --pos 1680x250
+# setxkbmap -layout $EXTL -option ctrl:nocaps
+# numlockx on
+# else
+# # laptop mode
+# xrandr --output $INT --off
+# xrandr --output $EXT --off
+# xrandr --output $INT --mode $INTW"x"$INTH
+# setxkbmap -layout $INTL -option ctrl:nocaps
+# numlockx off
+# echo 13 | sudo tee /sys/class/backlight/acpi_video0/brightness
+# fi
+
+keyboard="ID 04f2:0833 Chicony Electronics Co., Ltd"
+
+if lsusb | grep -q "$keyboard"; then
+ setxkbmap -layout us -option ctrl:nocaps
+ numlockx on
+else
+ setxkbmap -layout gb -option ctrl:nocaps
+ numlockx off
+fi
+
+xmodmap ~/.Xmodmap
+# xfce4-panel --restart
+xset b off
diff --git a/archive/bin/offlineimap-run b/archive/bin/offlineimap-run
new file mode 100755
index 00000000..89add5b8
--- /dev/null
+++ b/archive/bin/offlineimap-run
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+export DBUS_SESSION_BUS_ADDRESS=$(cat ~/.tmp-dbus-addr)
+offlineimap
diff --git a/archive/bin/offlineimap.py b/archive/bin/offlineimap.py
new file mode 100755
index 00000000..10282dd1
--- /dev/null
+++ b/archive/bin/offlineimap.py
@@ -0,0 +1,47 @@
+#!/usr/bin/python
+
+# http://www.kirya.net/articles/my-mail-client-setup/
+# http://www.clasohm.com/blog/one-entry?entry_id=90957
+
+import sys
+import gtk
+import gnomekeyring as gkey
+
+class Keyring(object):
+ def __init__(self, name, server, protocol):
+ self._name = name
+ self._server = server
+ self._protocol = protocol
+ self._keyring = gkey.get_default_keyring_sync()
+
+ def has_credentials(self):
+ try:
+ attrs = {"server": self._server, "protocol": self._protocol}
+ items = gkey.find_items_sync(gkey.ITEM_NETWORK_PASSWORD, attrs)
+ return len(items) > 0
+ except gkey.DeniedError:
+ return False
+
+ def get_credentials(self):
+ attrs = {"server": self._server, "protocol": self._protocol}
+ items = gkey.find_items_sync(gkey.ITEM_NETWORK_PASSWORD, attrs)
+ return (items[0].attributes["user"], items[0].secret)
+
+ def set_credentials(self, (user, pw)):
+ attrs = {
+ "user": user,
+ "server": self._server,
+ "protocol": self._protocol,
+ }
+ gkey.item_create_sync(gkey.get_default_keyring_sync(),
+ gkey.ITEM_NETWORK_PASSWORD, self._name, attrs, pw, True)
+
+def get_username(server):
+ keyring = Keyring("offlineimap", server, "imap")
+ (username, password) = keyring.get_credentials()
+ return username
+
+def get_password(server):
+ keyring = Keyring("offlineimap", server, "imap")
+ (username, password) = keyring.get_credentials()
+ return password
diff --git a/archive/bin/org-mairix-el-store-link b/archive/bin/org-mairix-el-store-link
new file mode 100755
index 00000000..4a0b7f4d
--- /dev/null
+++ b/archive/bin/org-mairix-el-store-link
@@ -0,0 +1,3 @@
+#!/bin/zsh
+
+cat /dev/stdin | grep -i "^Message-ID:" | sed "s/^Message-I[dD]: <//" | sed "s/>$//" > ~/.org-mairix-el-link
diff --git a/archive/bin/orgblosxom2ikiwiki.py b/archive/bin/orgblosxom2ikiwiki.py
new file mode 100755
index 00000000..6d177e1d
--- /dev/null
+++ b/archive/bin/orgblosxom2ikiwiki.py
@@ -0,0 +1,190 @@
+#!/usr/bin/python
+# coding=utf-8
+
+import os
+import random
+import re
+import shutil
+import string
+import subprocess
+import sys
+import xml.etree.ElementTree as ET
+from PIL import Image # apt-get install python-imaging
+
+# to run: cd ~/src/wiki && rm -rf blog/entry && g co blog/entry && cd
+# ~/lib/wikiannex && git clean -f && rm -rf
+# blog/img/{jhcoip,oldtech,oliscrot} && cd $HOME &&
+# orgblosxom2ikiwiki.py && rm
+# ~/lib/wikiannex/blog/img/{jhcoip,oldtech,oliscrot}/*thumb*
+
+# everything in Unicode please
+reload(sys)
+sys.setdefaultencoding('utf-8')
+
+# input
+POSTS = "/home/swhitton/local/big/blog"
+COMMENTS = "/home/swhitton/local/big/comments"
+# output
+ENTRIES = "/home/swhitton/src/wiki/blog/entry"
+IMAGES = "/home/swhitton/lib/wikiannex/blog/img"
+
+def strip_smarts(text):
+ return text.replace(u"“", "\"").replace(u"”", "\"").replace(u"’", "\'").replace(u"‘", "\'").replace(u"—", "---").replace(u"–", "--").replace(u"…", "...")
+
+def fix_images(text):
+ fixed = []
+
+ for line in text.splitlines():
+ match = re.match(r'\[!\[\]\(http://spw.sdf.org/blog/(.*)\)\]\(http://spw.sdf.org/blog/(.*)\)', line)
+ if match:
+ thumb = match.group(1)
+ image = match.group(2)
+ contents = os.listdir(os.path.join(POSTS, os.path.dirname(image)))
+ exts = map(lambda x: os.path.splitext(x)[1], contents)
+ if ".org" not in exts: # dedicated image dir
+ link_path = os.path.join(os.path.dirname(image).rsplit("/", 1)[1], os.path.basename(image))
+ else:
+ link_path = os.path.basename(image)
+
+ im = Image.open(os.path.join(POSTS, thumb))
+ im_width, im_height = im.size
+ dimensions = str(im_width) + "x" + str(im_height)
+ fixed.append("[[!img blog/img/" + link_path + " size=" + dimensions + "]]")
+ else:
+ fixed.append(line)
+
+ return "\n".join(fixed)
+
+def fix_more(text):
+ before, more, after = map(lambda s: s.strip(), text.partition("BREAK"))
+ if "\nBREAK\n" in text:
+ return "\n".join([before + "\n", "[[!more linktext=\"continue reading this entry\" pages=\"!blog/entry/*\" text=\"\"\"", after, "\"\"\"]]"])
+ elif " BREAK " in text:
+ return before + " [[!more linktext=\"continue reading this entry\" pages=\"!blog/entry/*\" text=\"\"\"" + after + "\n\"\"\"]]"
+ else:
+ return text
+
+def convert_post(post):
+ with open(post, 'r') as h:
+ org = h.read()
+
+ title = org.splitlines()[0].replace('#+HTML: ', '')
+ title = strip_smarts(title)
+ title = "[[!meta title=\"" + title + "\"]]"
+
+ date = org.splitlines()[1].replace('#+HTML: #published ', '')
+ date = "[[!meta date=\"" + date + "\"]]"
+
+ tags = os.path.dirname(post).replace(POSTS, "")[1:].replace("/", " ")
+ tags = "[[!tag imported_PyBlosxom " + tags + "]]"
+
+ # this file generates a pandoc error:
+ # /home/swhitton/local/big/blog/linkdump/novdec14.org
+ pandoc = subprocess.Popen(["pandoc", "-f", "org", "-t", "markdown_strict"],
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+ body, error = pandoc.communicate(input=org)
+
+ body = strip_smarts(body)
+ body = fix_images(body)
+ body = fix_more(body)
+
+ return "\n".join([date, title, tags, "", body])
+
+def convert_comment(comment):
+ # print "attempting to parse", comment
+ tree = ET.parse(comment)
+ root = tree.getroot()
+
+ # reference:
+
+ # [[!comment format=mdwn
+ # username="spwhitton@171b57686690088a367b4b10ddf73c4ca6f16601"
+ # nickname="spwhitton"
+ # avatar="http://cdn.libravatar.org/avatar/40da86a5d03e6fa62515a9d762601ed2"
+ # subject="And a second one, gravatar free"
+ # date="2015-11-11T00:18:46Z"
+ # content="""
+ # Here it is
+ # """]]
+
+ slug = os.path.basename(root.find('parent').text)
+ address = root.find('email')
+ if address == None:
+ username = root.find('author').text
+ else:
+ username = address.text.partition('@')[0]
+
+ desc = root.find('description').text
+ pandoc = subprocess.Popen(["pandoc", "-f", "html", "-t", "markdown_strict"],
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+ desc, error = pandoc.communicate(input=desc)
+
+ comment = "\n".join([
+ "[[!comment format=mdwn",
+ " username=\"" + username + "\"",
+ " nickname=\"" + root.find('author').text + "\"",
+ " date=\"" + root.find('w3cdate').text + "\"",
+ " content=\"\"\"",
+ desc + "\"\"\"]]"
+ ])
+
+ the_dir = os.path.join(ENTRIES, slug)
+ if not os.path.exists(the_dir):
+ os.mkdir(the_dir, 0755)
+ rands = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))
+ # ^ http://stackoverflow.com/a/2257449
+ if not comment.endswith("\n"):
+ comment = comment + "\n"
+ with open(os.path.join(ENTRIES, slug, "comment_" + rands + "._comment"), 'w') as h:
+ h.write(comment)
+
+def main():
+ for root, dirs, files in os.walk(POSTS):
+
+ # skip all the templates stored in root of blog
+ if root == POSTS or root.startswith("/home/swhitton/local/big/blog/.git"):
+ continue
+
+ # 1. If there's no .org in this dir and we're at the bottom of
+ # a tree, then it's a dir for images only, so copy it
+ # verbatim. And we know from inspection with old Haskell
+ # script that there are no conflicts other than inside these
+ # image-only directories
+ exts = map(lambda x: os.path.splitext(x)[1], files)
+ if ".org" not in exts and not any(dirs):
+ dest = os.path.join(IMAGES, os.path.basename(root))
+ if not os.path.exists(dest):
+ shutil.copytree(root, dest)
+ # 2. now convert posts and images in the usual way
+ else:
+ for f in files:
+ ext = os.path.splitext(f)[1]
+ if ext == ".org":
+ # convert_post, unlike convert_comment, relies on
+ # us to decide where to save it
+ post = convert_post(os.path.join(root, f))
+ fname = os.path.join(ENTRIES, os.path.splitext(f)[0] + ".mdwn")
+ if os.path.exists(fname): # safety if inspection wrong
+ print "uh oh! conflict! " + fname + " exists!"
+ sys.exit()
+ else:
+ if not post.endswith("\n"):
+ post = post + "\n"
+ with open(fname, 'w') as h:
+ h.write(post)
+ elif "thumb." not in f:
+ if os.path.exists(os.path.join(IMAGES, f)): # safety if inspection wrong
+ print "uh oh! conflict! " + os.path.join(IMAGES, f) + " exists!"
+ sys.exit()
+ else:
+ shutil.copy(os.path.join(root, f), IMAGES)
+
+ for root, dirs, files in os.walk(COMMENTS):
+ for f in files:
+ if os.path.splitext(f)[1] == ".cmt":
+ # convert_comment does the saving since the post to which
+ # the comment is associated is stored within the comment
+ convert_comment(os.path.join(root, f))
+
+if __name__ == "__main__":
+ main()
diff --git a/archive/bin/planetnewspipe b/archive/bin/planetnewspipe
new file mode 100755
index 00000000..d9d4d507
--- /dev/null
+++ b/archive/bin/planetnewspipe
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+cd /meta/s/spw/local/src/venus
+/usr/pkg/bin/python2.7 ./planet.py /meta/s/spw/doc/conf/planet.ini
+/usr/bin/python /meta/s/spw/local/src/newspipe/newspipe.py -i /meta/s/spw/doc/conf/newspipe.ini
diff --git a/archive/bin/podcastsup b/archive/bin/podcastsup
new file mode 100755
index 00000000..27a3037a
--- /dev/null
+++ b/archive/bin/podcastsup
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+cd ~/lib/annex/big/audio/Podcasts
+
+# Add as much metadata as we can manage
+git config annex.genmetadata true
+
+# Add to the annex using addurl, but don't download any content.
+# Preferred content groupwanted expression for client group should
+# ensure that nothing gets downloaded automatically. Use ga get when
+# I want an episode
+xargs git-annex importfeed --fast < feeds
diff --git a/archive/bin/pomodoro b/archive/bin/pomodoro
new file mode 100755
index 00000000..9bb89df5
--- /dev/null
+++ b/archive/bin/pomodoro
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+
+"""Sean's pomodoro timer
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or (at
+your option) any later version.
+
+This program is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+import time
+import os
+import sys
+import spw
+
+WORK_MINUTES = 25
+BREAK_MINUTES = 5
+LONG_BREAK_MINUTES = 30
+POMODOROS_BLOCK = 4
+
+def main():
+ """Pomodoro technique loop"""
+ os.system('clear')
+ pomodoros_completed = 0
+ move_on_wait()
+
+ while True:
+ if pomodoros_completed < POMODOROS_BLOCK:
+ count_down(WORK_MINUTES, 'work')
+ pomodoros_completed = pomodoros_completed + 1
+
+ # do a short break unless it's time for a long break,
+ # which'll happen when we break out of the main while loop
+ if pomodoros_completed < POMODOROS_BLOCK:
+ spw.try_audible_notification(str(BREAK_MINUTES)
+ + ' minute break')
+ count_down(BREAK_MINUTES, 'rest')
+ spw.try_audible_notification('Time to start working again')
+ t0 = time.time()
+ move_on_wait()
+ t1 = time.time()
+ # accidental longer break
+ if t1 - t0 > 1200:
+ pomodoros_completed = 0
+ else:
+ spw.try_audible_notification(str(LONG_BREAK_MINUTES)
+ + ' minute break.'
+ ' Go and do something else')
+ count_down(LONG_BREAK_MINUTES, 'rest')
+ pomodoros_completed = 0
+ move_on_wait()
+
+def count_down(total_minutes, kind):
+ """Count down TOTAL_MINUTES of KIND."""
+ left = 0
+ left_string = ''
+
+ for minutes in range(total_minutes):
+ left = total_minutes - minutes
+ left_string = ' minutes' if left > 1 else ' minute'
+ left_string += ' of ' + kind + ' left'
+ spw.print_same_line(str(left) + left_string)
+ try:
+ time.sleep(60)
+ except KeyboardInterrupt:
+ spw.print_same_line()
+ sys.exit()
+
+def move_on_wait():
+ raw_input("Press enter when you are ready to move on")
+ sys.stdout.write("\033[F")
+
+if __name__ == "__main__":
+ main()
diff --git a/archive/bin/privoxy-blocklist.sh b/archive/bin/privoxy-blocklist.sh
new file mode 100755
index 00000000..0f91d6bf
--- /dev/null
+++ b/archive/bin/privoxy-blocklist.sh
@@ -0,0 +1,189 @@
+#!/bin/bash
+#
+######################################################################
+#
+# Author: Andrwe Lord Weber
+# Mail: lord-weber-andrwe<at>renona-studios<dot>org
+# Version: 0.2
+# URL: http://andrwe.dyndns.org/doku.php/blog/scripting/bash/privoxy-blocklist
+#
+##################
+#
+# Sumary:
+# This script downloads, converts and installs
+# AdblockPlus lists into Privoxy
+#
+######################################################################
+
+######################################################################
+#
+# TODO:
+# - implement:
+# domain-based filter
+#
+######################################################################
+
+######################################################################
+#
+# script variables and functions
+#
+######################################################################
+
+# array of URL for AdblockPlus lists
+URLS=("http://adblockplus.mozdev.org/easylist/easylist.txt" "https://easylist-downloads.adblockplus.org/easyprivacy.txt" "https://secure.fanboy.co.nz/adblock/fanboy-adblock.txt" "https://secure.fanboy.co.nz/adblock/fanboy-tracking.txt" "https://secure.fanboy.co.nz/adblock/fanboy-addon.txt" "https://adversity.googlecode.com/hg/Adversity.txt" "https://adversity.googlecode.com/hg/Antisocial.txt" "https://mywebspace.wisc.edu/sanoonan/web/adblock/adblock.txt")
+# urls from https://github.com/airmanopus/puller/blob/master/puller.sh
+
+# privoxy config dir (default: /etc/privoxy/)
+CONFDIR=/etc/privoxy
+# directory for temporary files
+TMPDIR=/tmp/privoxy-blocklist
+TMPNAME=$(basename ${0})
+
+######################################################################
+#
+# No changes needed after this line.
+#
+######################################################################
+
+function usage()
+{
+ echo "${TMPNAME} is a script to convert AdBlockPlus-lists into Privoxy-lists and install them."
+ echo " "
+ echo "Options:"
+ echo " -h: Show this help."
+ echo " -q: Don't give any output."
+ echo " -v 1: Enable verbosity 1. Show a little bit more output."
+ echo " -v 2: Enable verbosity 2. Show a lot more output."
+ echo " -v 3: Enable verbosity 3. Show all possible output and don't delete temporary files.(For debugging only!!)"
+ echo " -r: Remove all lists build by this script."
+}
+
+[ ${UID} -ne 0 ] && echo -e "Root privileges needed. Exit.\n\n" && usage && exit 1
+
+# check whether an instance is already running
+[ -e ${TMPDIR}/${TMPNAME}.lock ] && echo "An Instance of ${TMPNAME} is already running. Exit" && exit
+
+DBG=0
+
+function debug()
+{
+ [ ${DBG} -ge ${2} ] && echo -e "${1}"
+}
+
+function main()
+{
+ cpoptions=""
+ [ ${DBG} -gt 0 ] && cpoptions="-v"
+
+ for url in ${URLS[@]}
+ do
+ debug "Processing ${url} ...\n" 0
+ file=${TMPDIR}/$(basename ${url})
+ actionfile=${file%\.*}.script.action
+ filterfile=${file%\.*}.script.filter
+ list=$(basename ${file%\.*})
+
+ # download list
+ debug "Downloading ${url} ..." 0
+ wget -t 3 --no-check-certificate -O ${file} ${url} >${TMPDIR}/wget-${url//\//#}.log 2>&1
+ debug "$(cat ${TMPDIR}/wget-${url//\//#}.log)" 2
+ debug ".. downloading done." 0
+ [ "$(grep -E '^\[Adblock.*\]$' ${file})" == "" ] && echo "The list recieved from ${url} isn't an AdblockPlus list. Skipped" && continue
+
+ # convert AdblockPlus list to Privoxy list
+ # blacklist of urls
+ debug "Creating actionfile for ${list} ..." 1
+ echo -e "{ +block{${list}} }" > ${actionfile}
+ sed '/^!.*/d;1,1 d;/^@@.*/d;/\$.*/d;/#/d;s/\./\\./g;s/\?/\\?/g;s/\*/.*/g;s/(/\\(/g;s/)/\\)/g;s/\[/\\[/g;s/\]/\\]/g;s/\^/[\/\&:\?=_]/g;s/^||/\./g;s/^|/^/g;s/|$/\$/g;/|/d' ${file} >> ${actionfile}
+ debug "... creating filterfile for ${list} ..." 1
+ echo "FILTER: ${list} Tag filter of ${list}" > ${filterfile}
+ # set filter for html elements
+ sed '/^#/!d;s/^##//g;s/^#\(.*\)\[.*\]\[.*\]*/s|<([a-zA-Z0-9]+)\\s+.*id=.?\1.*>.*<\/\\1>||g/g;s/^#\(.*\)/s|<([a-zA-Z0-9]+)\\s+.*id=.?\1.*>.*<\/\\1>||g/g;s/^\.\(.*\)/s|<([a-zA-Z0-9]+)\\s+.*class=.?\1.*>.*<\/\\1>||g/g;s/^a\[\(.*\)\]/s|<a.*\1.*>.*<\/a>||g/g;s/^\([a-zA-Z0-9]*\)\.\(.*\)\[.*\]\[.*\]*/s|<\1.*class=.?\2.*>.*<\/\1>||g/g;s/^\([a-zA-Z0-9]*\)#\(.*\):.*[:[^:]]*[^:]*/s|<\1.*id=.?\2.*>.*<\/\1>||g/g;s/^\([a-zA-Z0-9]*\)#\(.*\)/s|<\1.*id=.?\2.*>.*<\/\1>||g/g;s/^\[\([a-zA-Z]*\).=\(.*\)\]/s|\1^=\2>||g/g;s/\^/[\/\&:\?=_]/g;s/\.\([a-zA-Z0-9]\)/\\.\1/g' ${file} >> ${filterfile}
+ debug "... filterfile created - adding filterfile to actionfile ..." 1
+ echo "{ +filter{${list}} }" >> ${actionfile}
+ echo "*" >> ${actionfile}
+ debug "... filterfile added ..." 1
+ debug "... creating and adding whitlist for urls ..." 1
+ # whitelist of urls
+ echo "{ -block }" >> ${actionfile}
+ sed '/^@@.*/!d;s/^@@//g;/\$.*/d;/#/d;s/\./\\./g;s/\?/\\?/g;s/\*/.*/g;s/(/\\(/g;s/)/\\)/g;s/\[/\\[/g;s/\]/\\]/g;s/\^/[\/\&:\?=_]/g;s/^||/\./g;s/^|/^/g;s/|$/\$/g;/|/d' ${file} >> ${actionfile}
+ debug "... created and added whitelist - creating and adding image handler ..." 1
+ # whitelist of image urls
+ echo "{ -block +handle-as-image }" >> ${actionfile}
+ sed '/^@@.*/!d;s/^@@//g;/\$.*image.*/!d;s/\$.*image.*//g;/#/d;s/\./\\./g;s/\?/\\?/g;s/\*/.*/g;s/(/\\(/g;s/)/\\)/g;s/\[/\\[/g;s/\]/\\]/g;s/\^/[\/\&:\?=_]/g;s/^||/\./g;s/^|/^/g;s/|$/\$/g;/|/d' ${file} >> ${actionfile}
+ debug "... created and added image handler ..." 1
+ debug "... created actionfile for ${list}." 1
+
+ # install Privoxy actionsfile
+ cp ${cpoptions} ${actionfile} ${CONFDIR}
+ if [ "$(grep $(basename ${actionfile}) ${CONFDIR}/config)" == "" ]
+ then
+ debug "\nModifying ${CONFDIR}/config ..." 0
+ sed "s/^actionsfile user\.action/actionsfile $(basename ${actionfile})\nactionsfile user.action/" ${CONFDIR}/config > ${TMPDIR}/config
+ debug "... modification done.\n" 0
+ debug "Installing new config ..." 0
+ cp ${cpoptions} ${TMPDIR}/config ${CONFDIR}
+ debug "... installation done\n" 0
+ fi
+ # install Privoxy filterfile
+ cp ${cpoptions} ${filterfile} ${CONFDIR}
+ if [ "$(grep $(basename ${filterfile}) ${CONFDIR}/config)" == "" ]
+ then
+ debug "\nModifying ${CONFDIR}/config ..." 0
+ sed "s/^\(#*\)filterfile user\.filter/filterfile $(basename ${filterfile})\n\1filterfile user.filter/" ${CONFDIR}/config > ${TMPDIR}/config
+ debug "... modification done.\n" 0
+ debug "Installing new config ..." 0
+ cp ${cpoptions} ${TMPDIR}/config ${CONFDIR}
+ debug "... installation done\n" 0
+ fi
+
+ debug "... ${url} installed successfully.\n" 0
+ done
+}
+
+# create temporary directory and lock file
+mkdir -p ${TMPDIR}
+touch ${TMPDIR}/${TMPNAME}.lock
+
+# set command to be run on exit
+[ ${DBG} -le 2 ] && trap "rm -fr ${TMPDIR};exit" INT TERM EXIT
+
+# loop for options
+while getopts ":hrqv:" opt
+do
+ case "${opt}" in
+ "h")
+ usage
+ exit 0
+ ;;
+ "v")
+ DBG="${OPTARG}"
+ ;;
+ "q")
+ DBG=-1
+ ;;
+ "r")
+ echo "Do you really want to remove all build lists?(y/N)"
+ read choice
+ [ "${choice}" != "y" ] && exit 0
+ rm -rf ${CONFDIR}/*.script.{action,filter} && \
+ sed '/^actionsfile .*\.script\.action$/d;/^filterfile .*\.script\.filter$/d' -i ${CONFDIR}/config && \
+ echo "Lists removed." && exit 0
+ echo -e "An error occured while removing the lists.\nPlease have a look into ${CONFDIR} whether there are .script.* files and search for *.script.* in ${CONFDIR}/config."
+ exit 1
+ ;;
+ ":")
+ echo "${TMPNAME}: -${OPTARG} requires an argument" >&2
+ exit 1
+ ;;
+ esac
+done
+
+debug "URL-List: ${URLS}\nPrivoxy-Configdir: ${CONFDIR}\nTemporary directory: ${TMPDIR}" 2
+main
+
+# restore default exit command
+trap - INT TERM EXIT
+[ ${DBG} -lt 2 ] && rm -r ${TMPDIR}
+[ ${DBG} -eq 2 ] && rm -vr ${TMPDIR}
+exit 0
diff --git a/archive/bin/rdate.py b/archive/bin/rdate.py
new file mode 100755
index 00000000..8f6ca902
--- /dev/null
+++ b/archive/bin/rdate.py
@@ -0,0 +1,141 @@
+#!/usr/pkg/bin/python2.7
+"""
+Remembers or restores the original posting date of entry.
+
+Filename may be given in commandline argument, if not so,
+program asks for it.
+
+If dealing with file first time, program reads the modified-time
+and stores it as a tag:
+
+#published %Y-%m-%d %H:%M:%S
+
+e.g.
+
+#published 2007-05-15 15:30:28
+
+right after entry title line. The current modified-time of file is
+preserved.
+
+If run on the same file again and if the modified-time has been
+changed since the time of tag storage, script restores the saved
+time (sets it as the current modified-time).
+
+That's all.
+
+Program is rather pyblosxom utility than plugin, so there is no
+reason to move it to plugins dir.
+
+There are many ways how to use the script. I personally use CVS as
+a primary storage for blog entries. But there is problem - CVS does
+not honor mtime. So, I run the rdate.py once on files before adding
+them to CVS. Then i update blog from the repository, but, we know,
+cvs does it without correct mtimes. So, after blog update I run the
+rdate.py on files once more to restore saved time from #published
+tags. Simple bash scripts does this automatically for me, so I care
+for blog entry times no more.
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without restriction,
+including without limitation the rights to use, copy, modify,
+merge, publish, distribute, sublicense, and/or sell copies of the
+Software, and to permit persons to whom the Software is furnished
+to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+Copyright 2007 David Zejda
+"""
+__author__ = "David Zejda blogger at zejda dot net"
+__version__ = "rememberdate.py,v 0.1 2007/05/15 12:00:00 zejdad"
+__url__ = "http://www.zejda.net/"
+__description__ = "Remembers or restores the original posting date of entry for PyBlosxom."
+
+import os, datetime, time, sys
+
+if len(sys.argv) > 1:
+ filename = sys.argv[1]
+else:
+ filename = raw_input("filename? ")
+
+if not filename:
+ print "No filename given. Quitting. :-|"
+ sys.exit(1)
+
+try:
+ filestats = os.stat(filename)
+except:
+ print "File not found. Quitting. :-( "
+ sys.exit(1)
+
+print "Targetted file:", filename
+filestats = os.stat(filename)
+atime, mtime = filestats[7:9]
+
+fmt = "#published %Y-%m-%d %H:%M:%S"
+
+fmtime = datetime.datetime.fromtimestamp(mtime)
+fmtime = fmtime.strftime(fmt)
+print " current mtime:", fmtime
+
+try:
+ try:
+ f = open(filename, "r")
+ lines = f.readlines();
+ except Exception, e:
+ print "Failed to open file for reading.", e
+ print "Quitting :-("
+ sys.exit(1)
+finally:
+ f.close()
+
+# restore
+remembered = lines[1].rstrip()
+if (len(lines) > 2) and ( remembered.find("#published") > -1):
+ print " saved mtime:", remembered
+ if remembered == fmtime:
+ print "Times are equal, quitting :-)"
+ else:
+ print "Trying to restore remembered mtime."
+ ttuple = time.strptime(remembered, fmt)
+ print " parsed tuple:", ttuple
+ epoch = time.mktime(ttuple)
+ print " epoch seconds:", epoch
+
+ os.utime(filename, (atime, epoch))
+ print "Succesfully restored :-)"
+ sys.exit(0)
+
+# remember
+try:
+ try:
+ f = open(filename, "w")
+ i = 0
+ for line in lines:
+ i = i + 1
+ if i == 2:
+ f.write(fmtime);
+ f.write("\n");
+ f.write(line);
+ except Exception, e:
+ print "Failed to open file for writing.", e
+ print "Quitting :-("
+ sys.exit(1)
+finally:
+ f.close()
+
+os.utime(filename, (atime, mtime))
+
+print "Successfully remembered :-)"
+
diff --git a/archive/bin/rdate.py-dir b/archive/bin/rdate.py-dir
new file mode 100755
index 00000000..6862c3f3
--- /dev/null
+++ b/archive/bin/rdate.py-dir
@@ -0,0 +1,13 @@
+#!/usr/pkg/bin/bash
+
+shopt -s nullglob
+
+for arg in $*
+do
+ if [ ! -d "$arg" ]
+ then
+ echo "only specify dirs on the cmd line please"
+ exit 1
+ fi
+ find $arg -type f -name "*txt" -exec $HOME/bin/rdate.py '{}' \;
+done
diff --git a/archive/bin/reading.py b/archive/bin/reading.py
new file mode 100755
index 00000000..c4fc6361
--- /dev/null
+++ b/archive/bin/reading.py
@@ -0,0 +1,100 @@
+#!/usr/bin/env python
+
+"""reading.py -- adds stuff to my reading list"""
+
+import subprocess
+import sys
+import os
+from readability.readability import Document
+import html2text
+import urllib2
+import time
+import socket
+import tempfile
+import shutil
+
+READINGDIR = "/home/swhitton/local/reading"
+READINGORG = "/home/swhitton/doc/org/reading.org"
+
+def main():
+ """Run the script"""
+ url = sys.argv[1]
+ req = urllib2.Request(url,
+ headers={'User-Agent':
+ 'Mozilla/5.0 (X11; Linux i686; rv:32.0)'
+ + 'Gecko/20100101 Firefox/32.0'
+ + 'Iceweasel/32.0'})
+ page = urllib2.urlopen(req)
+ unreadable_html = page.read()
+
+ # encoding stuff from
+ # http://cdn3.brettterpstra.com/downloads/Read2Text1.zip
+ try:
+ from feedparser import _getCharacterEncoding as enc
+ except ImportError:
+ enc = lambda x, y: ('utf-8', 1)
+
+ readable_html = Document(unreadable_html).summary().encode('ascii',
+ 'ignore')
+ readable_title = Document(unreadable_html).short_title().encode('ascii',
+ 'ignore')
+
+ encoding = enc(page.headers, readable_html)[0]
+ if encoding == 'us-ascii': encoding = 'utf-8'
+ data = readable_html.decode(encoding)
+ data_title = readable_title.decode(encoding)
+
+ h2t = html2text.HTML2Text()
+ h2t.ignore_links = True
+ markdown = h2t.handle(data)
+
+ filename = (READINGDIR
+ + "/"
+ + "".join(x if x.isalnum() else "_" for x in readable_title)
+ + str(int(time.time()))
+ + ".md")
+
+ try:
+ os.mkdir(READINGDIR)
+ except OSError:
+ pass
+
+ with open(filename, 'w') as markdown_file:
+ markdown_file.write("## "
+ + data_title.encode('utf8')
+ + "\n\n"
+ + markdown.encode('utf8'))
+
+ org = """
+* TODO [[{url}][{title}]]
+:PROPERTIES:
+:markdown: [[file:{mdfile}]]
+:machine: {hostname}
+:END:""".format(url=url,
+ title=readable_title,
+ mdfile=filename,
+ hostname=socket.gethostname())
+
+ with open(READINGORG, 'a') as org_file:
+ org_file.write(org)
+
+ workdir = tempfile.mkdtemp()
+ os.chdir(workdir)
+ subprocess.call(["pandoc", filename, "-o", "article.epub"])
+ subprocess.call(["makemobi", "article.epub", readable_title, ""])
+ subprocess.call(["sendtokindle", "article.mobi"])
+ shutil.rmtree(workdir)
+
+ dbf = open('/home/swhitton/.tmp-dbus-addr', 'r')
+ dbv = dbf.readline()
+ dbf.close()
+ os.environ['DBUS_SESSION_BUS_ADDRESS'] = dbv
+ os.environ['DISPLAY'] = "0:0"
+ zenerr = open('/tmp/zenityerr', 'a')
+ subprocess.Popen(['/usr/bin/notify-send',
+ '--hint=int:transient:1',
+ filename], stderr=zenerr, env=os.environ)
+ zenerr.close()
+
+if __name__ == "__main__":
+ main()
diff --git a/archive/bin/readme-pull-request b/archive/bin/readme-pull-request
new file mode 100755
index 00000000..2b961996
--- /dev/null
+++ b/archive/bin/readme-pull-request
@@ -0,0 +1,19 @@
+#!/bin/sh
+
+# pull requests to update READMEs
+
+set -e
+
+repo=$(echo $1 | cut -d'/' -f2)
+cd $HOME/tmp
+clean-github-pr.py $1
+git clone github:spwhitton/$repo
+cd $repo
+git checkout -b apt-get origin/master
+$EDITOR README*
+git commit -am "Debian and Ubuntu installation instructions
+
+This ELPA package is now available from the Debian and Ubuntu package repositories."
+git push origin +apt-get
+cd ..
+rm -rf $repo
diff --git a/archive/bin/sariulahk.ahk b/archive/bin/sariulahk.ahk
new file mode 100644
index 00000000..b4ea9618
--- /dev/null
+++ b/archive/bin/sariulahk.ahk
@@ -0,0 +1,130 @@
+SetWorkingDir, C:\Users\swhitton
+
+; original source: http://lifehacker.com/5468862/create-a-shortcut-key-for-restoring-a-specific-window
+; but I've added TheExe parameter
+ToggleWinMinimize(TheWindowTitle, TheExe)
+{
+ ; If VirtuaWin is running, switch to top-left desktop before raising
+ ; or starting. Assumes VirtuaWin's desktop wrapping is turned off!
+ Process, Exist, VirtuaWin.exe
+ VirtuaWinPID = %ErrorLevel%
+ if VirtuaWinPID != 0
+ {
+ Send ^!{Up}
+ Send ^!{Left}
+ Sleep, 100
+ }
+
+ ; main function body
+ SetTitleMatchMode,2
+ DetectHiddenWindows, Off
+ ; IfWinActive, %TheWindowTitle%
+ ; {
+ ; WinMinimize, %TheWindowTitle%
+ ; }
+ ; Else
+ ; {
+ IfWinExist, %TheWindowTitle%
+ {
+ WinGet, winid, ID, %TheWindowTitle%
+ DllCall("SwitchToThisWindow", "UInt", winid, "UInt", 1)
+ }
+ Else
+ {
+ Run, %TheExe%
+ }
+ ; }
+ Return
+}
+
+; same but by class not title
+ToggleWinMinimizeByClass(TheWindowClass, TheExe)
+{
+ ; If VirtuaWin is running, switch to top-left desktop before raising
+ ; or starting. Assumes VirtuaWin's desktop wrapping is turned off!
+ Process, Exist, VirtuaWin.exe
+ VirtuaWinPID = %ErrorLevel%
+ if VirtuaWinPID != 0
+ {
+ Send ^!{Up}
+ Send ^!{Left}
+ Sleep, 100
+ }
+
+ ; main function body
+ SetTitleMatchMode,2
+ DetectHiddenWindows, Off
+ ; IfWinActive, %TheWindowTitle%
+ ; {
+ ; WinMinimize, %TheWindowTitle%
+ ; }
+ ; Else
+ ; {
+ IfWinExist, ahk_class %TheWindowClass%
+ {
+ WinGet, winid, ID, ahk_class %TheWindowClass%
+ DllCall("SwitchToThisWindow", "UInt", winid, "UInt", 1)
+ }
+ Else
+ {
+ Run, %TheExe%
+ }
+ ; }
+ Return
+}
+
+KillCurrent()
+{
+ Send !{F4}
+
+ ; accept the 'are you sure you want to quit' dialogue of textbook
+ ; CDs
+ IfWinActive, ahk_class TPlayerForm
+ {
+ Sleep, 100
+ Send !y
+ }
+}
+
+IceMessenger()
+{
+ CoordMode Pixel
+ ; image file referenced in the below line should be a screenshot of
+ ; some central pixels from the Ice Messenger tray icon
+ ImageSearch, FoundX, FoundY, 0, 0, A_ScreenWidth, A_ScreenHeight, c:\Users\swhitton\Pictures\AHK\imtrayicon.png
+ CoordMode, Mouse, Screen
+ MouseMove, %FoundX%, %FoundY%
+ Click
+ WinWaitActive, Ice Messenger, , 2
+ CoordMode, Mouse, Relative
+ MouseMove, 130, 120
+ Click
+ MouseMove, 130, 370
+ Click right
+ Send, {Down}
+}
+
+F9::ToggleWinMinimizeByClass("Emacs", "c:\emacs\bin\runemacs.exe")
+; open a new Emacs client window
+#F9::Run, emacsclient -n -c
+F10::ToggleWinMinimize("MINGW32", "c:\Users\swhitton\Old shortcuts\Git Bash")
+F11::KillCurrent()
+F12::ToggleWinMinimize("Mozilla Firefox", "Firefox")
+
++F9::ToggleWinMinimize("3~4", "C:\e-Book\start\TitleList.exe")
++F10::ToggleWinMinimize(" CD", "c:\Users\swhitton\Old shortcuts\grades5and6")
++F11::ToggleWinMinimize("spw@ma", "c:\Users\swhitton\Software\putty.exe -load ma")
++F12::IceMessenger()
+
+; empty the Recycle Bin
++#r::FileRecycleEmpty, C:\
+
+; for Emacs
+
+Capslock::Ctrl
+LCtrl::Capslock
+
+; some British keyboard layout conventions
+
+@::"
+"::@
diff --git a/archive/bin/sdfweb-post-update b/archive/bin/sdfweb-post-update
new file mode 100755
index 00000000..331a3b9e
--- /dev/null
+++ b/archive/bin/sdfweb-post-update
@@ -0,0 +1,21 @@
+#!/bin/sh
+
+SOURCE=`pwd`
+REPO=`basename $SOURCE`
+DEST=$HOME/html/git/$REPO
+
+git update-server-info
+
+rm -rf $DEST
+cp -rf $SOURCE $DEST
+
+#chgrp -R www $DEST
+#for d in `find $DEST -type d`; do
+# chmod a+rx $d
+#done
+#for f in `find $DEST -type f`; do
+# chmod a+r $f
+#done
+/usr/local/bin/setwebperms
+
+echo "Updated http access"
diff --git a/archive/bin/searchmail b/archive/bin/searchmail
new file mode 100755
index 00000000..33d8bc4f
--- /dev/null
+++ b/archive/bin/searchmail
@@ -0,0 +1,65 @@
+#!/bin/bash
+
+# searchmail --- Wrapper around mairix to open results in mutt
+#
+# Copyright (C) 2017 Sean Whitton
+#
+# searchmail is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# searchmail is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with searchmail. If not, see <http://www.gnu.org/licenses/>.
+
+us="$(basename $0)"
+USAGE="$us: usage: $us [-n] search-patterns"
+mairix_opts=""
+mutt_opts=""
+
+# check args
+if [ "$#" = "0" ]; then
+ echo >&2 "$USAGE"
+ exit 1
+fi
+
+# process the one arg this script doesn't pass on to mairix
+if [ "$1" = "-n" ]; then
+ shift
+
+ # we assume the user has "sort = threads" in their ~/.muttrc, and
+ # override that when not including whole threads
+ mutt_opts="$mutt_opts -e \"set sort=date\""
+else
+ mairix_opts="$mairix_opts --threads"
+fi
+
+# set up temporary maildir
+mfolder=$(mktemp --tmpdir -d ${us}XXXX)
+mkdir $mfolder/cur $mfolder/new $mfolder/tmp
+chmod 700 $mfolder
+
+# pass the mfolder to mairix and mutt
+mairix_opts="$mairix_opts --mfolder $mfolder"
+mutt_opts="$mutt_opts -Rf $mfolder"
+
+# run mairix and check if there were any search results. We use the
+# special "$@" to ensure that each argument is individually quoted in
+# case they contain spaces, but note that mairix search terms should
+# not contain spaces. Instead of `f:"Sean Whitton"` you should pass
+# `f:Sean f:Whitton`
+output=$(mairix $mairix_opts "$@")
+if ! [ "$output" = "Matched 0 messages" ]; then
+ # set terminal title to search query
+ xtitle "mairix results: $@"
+ # there were some results; open them in mutt
+ eval "mutt $mutt_opts"
+fi
+
+# clean up
+rm -rf $mfolder
diff --git a/archive/bin/sendmyip b/archive/bin/sendmyip
new file mode 100755
index 00000000..0f9c5c3a
--- /dev/null
+++ b/archive/bin/sendmyip
@@ -0,0 +1,9 @@
+#!/bin/sh
+
+myip=$(dig +short myip.opendns.com @resolver1.opendns.com)
+
+heirloom-mailx -S smtp=athena.silentflame.com \
+ -S from=sean@silentflame.com \
+ -S to=sean@silentflame.com \
+ -s "hestia's current IP is $myip" ""
+
diff --git a/archive/bin/smtptun b/archive/bin/smtptun
new file mode 100755
index 00000000..4951dcef
--- /dev/null
+++ b/archive/bin/smtptun
@@ -0,0 +1,76 @@
+#!/bin/sh
+
+# fix $HOME when run from xinetd (which does run it as root...)
+if [ "$HOME" = "/" -o "$HOME" = "" ]; then
+ HOME="/root"
+ export HOME
+fi
+
+# set up standard environment
+. $HOME/.shenv
+
+# Make a tunnel with SSH to send e-mail via the SDF mail exchanger,
+# and then netcat to that tunnel. Designed to be run by xinetd.
+
+# An alternative approach is to just run `ssh foo@bar nc ..' but doing
+# it with a port-forward means that our SSH can be restricted to the
+# command rrsync and a particular port-forward, rather than netcat so
+# we can use one SSH key for both purposes.
+
+# Interesting notes for doing this on Windows:
+# <http://www.greenend.org.uk/rjk/sshfwd/>
+
+SOCKET="$HOME/tmp/smtptun_sock"
+ALTSOCKET="/tmp/ssh-swhitton-spw@ma.sdf.org:22"
+REMHOST="spw@ma.sdf.org"
+FORWARD="-L localhost:8025:mx.sdf.org:25"
+ID="-i $HOME/.ssh/id_rsa"
+
+# First see if I'm SSHing to the MetaArray already. Requires running
+# as root in order to be able to command the socket to add the port
+# forward.
+
+# if ssh -O check -S $ALTSOCKET $REMHOST 2>/dev/null; then
+# # Check the port isn't already bound ...
+# if ! ssh -O check -S $SOCKET $REMHOST 2>/dev/null; then
+# ssh -O forward $FORWARD -S $ALTSOCKET $REMHOST
+# fi
+# else
+
+ # Now see if another invocation of this script has already constructed a tunnel.
+ if ! ssh -O check -S $SOCKET $REMHOST 2>/dev/null; then
+
+ # Okay, we'd better set-up a tunnel. Make it only accessible from
+ # localhost and have it time out after two minutes of no e-mails
+ # getting sent down it.
+
+ # Due to an OpenSSH bug
+ # <https://bugzilla.mindrot.org/show_bug.cgi?id=1988>, we
+ # persist the socket for only five seconds. The following SSH
+ # process hangs on to STDERR, which means this script doesn't
+ # exit once the netcat command is finished.
+
+ ssh -M -S $SOCKET -o "ControlPersist=5s" \
+ -f -N $FORWARD $REMHOST $ID
+
+ fi
+# fi
+
+# Now connect to the tunnel we just made.
+
+nc localhost 8025
+
+# Remove forwarding from swhitton's connection if we added it, and no
+# other script invocations are using it (to prevent a situation where
+# the other script has started and passed the if-statement to not
+# construct the tunnel, but hasn't started netcat yet (if it's already
+# started netcat, then it's safe from the -O cancel we do here). A
+# crude check.
+
+# if ssh -O check -S $ALTSOCKET $REMHOST 2>/dev/null; then
+# us=$(basename $0)
+# count=$(pgrep $us | wc -l)
+# if ! [ "$count" -gt 3 ]; then
+# ssh -O cancel $FORWARD -S $ALTSOCKET $REMHOST
+# fi
+# fi
diff --git a/archive/bin/spwd20 b/archive/bin/spwd20
new file mode 100755
index 00000000..8f9784b7
--- /dev/null
+++ b/archive/bin/spwd20
@@ -0,0 +1,180 @@
+#!/usr/bin/env python
+
+# this makes few enough assumptions to work for all d20 games except
+# possibly 5e -- one of the core rulebooks says somewhere that
+# monsters of the same type roll for initiative jointly and all take
+# their turn together
+
+import re
+import readline
+import subprocess
+import csv
+from os.path import expanduser
+from operator import itemgetter
+import random
+
+def main():
+ readline.parse_and_bind('tab: complete') # Emacs input editing
+ while True:
+ # input loop: get input
+ try:
+ cmd = raw_input(">>> ")
+ except (EOFError, KeyboardInterrupt):
+ print ""
+ break
+ # and act on it
+ if re.match(r'([0-9]*[d]{0,1}[0-9]+[+-])*[0-9]*[d]{0,1}[0-9]+', cmd):
+ print "\n " + doRoll(cmd) + "\n"
+ elif cmd == "init":
+ doInit()
+ elif cmd == "hit" or cmd == "h":
+ print randDescrip(1)
+ elif cmd == "miss" or cmd == "m":
+ print randDescrip(0)
+ else:
+ print "invalid input"
+
+def doRoll(str):
+ # currently calling my Haskell code to do the dice rolls. Ideally
+ # that Haskell will do everything this Python wrapper does
+ return subprocess.check_output(['spwd20-roll', str]).rstrip()
+
+def doInit():
+ inits = []
+
+ # first do the monster inits (while players are rolling)
+ i = 0
+ while True:
+ try:
+ name = raw_input("Monster (group) name: ")
+ modifier = raw_input("Monster (group) init modifier: ")
+ hp = raw_input("Monster (group) HD or HP (split into 1 + (n - 1)): ")
+ except (EOFError, KeyboardInterrupt):
+ print ""
+ break
+ if name == "" or modifier == "":
+ break
+
+ if hp == "":
+ hp = "0"
+ else:
+ hp = doRoll(hp)
+ hp = int(hp.partition(' ')[0])
+ if int(modifier) < 0:
+ roll = doRoll("1d20-" + modifier[1:])
+ else:
+ roll = doRoll("1d20+" + modifier)
+ roll = int(roll.partition(' ')[0])
+ inits.append({'name': name,
+ 'modifier': int(modifier),
+ 'roll': roll,
+ 'hp': int(hp)})
+
+ # now do the party inits
+ with open(expanduser("~") + '/.spwd20-party', 'rb') as csvfile:
+ partyreader = csv.reader(csvfile,
+ delimiter = ',')
+ # fieldnames = ["name", "init"])
+ for member in partyreader:
+ try:
+ roll = raw_input(member[0] + "'s (modified) initiative roll? ")
+ except (EOFError, KeyboardInterrupt):
+ print ""
+ break
+ inits.append({'name': member[0],
+ 'modifier': int(member[1]),
+ 'roll': int(roll),
+ 'hp': 0})
+
+ # sort them
+ inits.sort(key = itemgetter('roll', 'modifier'))
+ inits.reverse()
+
+ # output in an Org table
+ table = ["COMBAT ROUND TALLY: |", "",
+ "|Creature|Mod|Init|HP|Damage|Status|",
+ "|-"]
+
+ for creature in inits:
+ if creature['modifier'] > 0:
+ printmod = "+" + str(creature['modifier'])
+ else:
+ printmod = str(creature['modifier'])
+ if creature['hp'] == 0:
+ printhp = "-"
+ else:
+ printhp = str(creature['hp'])
+ table.append("|" + creature['name']
+ + "|" + printmod
+ + "|" + str(creature['roll'])
+ + "|" + printhp
+ + "| | |")
+
+ print "\n"
+ tabled = "\n".join(table)
+ print tabled
+
+ # copy to X selection
+ xsel = subprocess.Popen(["xsel", "-i"], stdin=subprocess.PIPE)
+ xsel.communicate(input = tabled)
+
+ print "\n copied to X selection; Emacs: S-insert/C-y C-c C-c"
+
+def randDescrip(index):
+ descriptions = []
+ descriptions.append(["Your opponent side-steps out of the way",
+ "You strike your opponent buts its armour/hide is not pierced",
+ "Your opponent parries your strike",
+ "Abruptly your opponent slides away from where you expect",
+ "Your opponent dodges left, but your attack doesn't catch up to it",
+ "Your weapon flies wildly missing your opponent",
+ "From the corner of your eye, you are distracted by movement and your attack sails wide",
+ "Your attack connects, but flances off your opponent's armour/hide",
+ "You misjudge your opponent's movement and your attack misses",
+ "Your opponent stops your attack with his shield/bracer",
+ "Your weapon seems unbalanced and doesn't move as you expect",
+ "Your opponent raises his weapon and successfully parries",
+ "Your opponent howls, and you flinch throwing your attack off",
+ "You nick your opponent, but the strike is so minor that no damage is done",
+ "A light catches your eye and throws off your attack",
+ "Your opponent changes his stance and you miss",
+ "Your weapon only catches some of your adversary's clothes"
+ "Your attack hits only armour/hide",
+ "Timing your strike, your opponent shifts out of the way",
+ "Your opponent shifts his most heavily armoured/protected part to your weapon's path",
+ "The armour/hide of your opponent absorbs the attack",
+ "You change your grip to adjust to your opponent's move, but it it too little and too late",
+ "Your opponent's armour/hide is too strong for your strike",
+ "Your attack hits the most protected part of your opponent, doing no damage",
+ "A call from a party member distracts you and your attack does not connnect"])
+ descriptions.append(["You pierce the creature's armour/skin/scales",
+ "Your attack rings true",
+ "The creature winces in pain as your attack strikes",
+ "You successfully smash it",
+ "Your attack lands",
+ "You strike it",
+ "It cannot avoid your jab",
+ "Your weapon meets flesh",
+ "You find a soft spot in your foe's armour/skin/chitin",
+ "Your attack slip past your foe's parry and draws blood",
+ "Your strike tears into your opponent's flesh",
+ "You split the creature's hide",
+ "Your weapon bounces off the creature's parry and lands a blow",
+ "You guess the creature's feint and strike flesh",
+ "Your weapon sinks into the creature's flesh",
+ "Your weapon sneaks through the creature's armour/skin/scales",
+ "You strike glances off your opponent's shield but slides into the creature's flesh",
+ "Your weapon lands a heavy blow",
+ "Blood flies as your weapon strikes",
+ "Your opponent dodges--just as you expected and your weapon draws blood",
+ "The creature howls as your weapon leaves its mark",
+ "Your opponent begins to wheeze as your attack hits",
+ "Flesh falls as your attack lands",
+ "The creature stumbles from your latest successful hit",
+ "Your weapon finds flesh and bone"])
+
+ rand = random.randint(0, len(descriptions[index]) - 1)
+ return descriptions[index][rand]
+
+if __name__ == "__main__":
+ main()
diff --git a/archive/bin/spwd20-roll b/archive/bin/spwd20-roll
new file mode 100755
index 00000000..0c4506f1
--- /dev/null
+++ b/archive/bin/spwd20-roll
@@ -0,0 +1,113 @@
+#!/usr/bin/env runhaskell
+
+import Text.Regex.TDFA
+import Data.List.Split
+import qualified System.Random as Random
+-- import Control.Monad(when)
+import System.Environment
+
+-- *** functions for dice rolling ***
+
+parseRolls :: String -> [(Int, Bool)]
+parseRolls x
+ -- handle special case where we just have a number
+ | x =~ "^[0-9]+$"
+ = (read x :: Int, True):[]
+ | otherwise
+ = parseRollTerms $
+ if head x == '-' then l else "+":l
+ where l = split (oneOf "+-") . concat . words $ x
+
+parseRollTerms :: [String] -> [(Int, Bool)]
+parseRollTerms [] = []
+parseRollTerms [x] = [(read x, False)]
+
+parseRollTerms (x:y:xs)
+
+ -- failed attempt to handle special case
+ -- | y =~ "^[0-9]+$"
+ -- = (getSign x * read y :: Int, True):parseRollTerms xs
+
+ | y =~ "^[0-9]*d[0-9]+$"
+ = (take (read' a) $ repeat (getSign x * read b :: Int, True)) ++ parseRollTerms xs
+
+ | otherwise
+ = (getSign x * read y, False):parseRollTerms xs
+
+ where getSign "-" = -1
+ getSign _ = 1
+ [a, b] = splitOn "d" y
+ read' "" = 1
+ read' k = read k :: Int
+
+rolls :: (Random.RandomGen g) => g -> [(Int, Bool)] -> [Int]
+rolls _ [] = []
+rolls g (x:xs) = if b
+ then let (r,g') = Random.randomR (range a) g in
+ r:(rolls g' xs)
+ else a:(rolls g xs)
+ where a = fst x
+ b = snd x
+ range n = if n > 0 then (1,n) else (n,-1)
+
+doRoll :: (Random.RandomGen g) => g -> String -> String
+doRoll g s = (show $ sum r) ++ " = " ++ sumToStr r
+ where r = rolls g $ parseRolls $ s
+
+sumToStr :: (Show a, Num a, Ord a) => [a] -> String
+sumToStr [] = ""
+sumToStr xs = drop 3 $
+ foldr (\x a -> " "
+ ++ (if x > 0 then "+" else "-")
+ ++ " "
+ ++ (show . abs) x
+ ++ a) "" xs
+
+-- *** functions for initiative tracking ***
+
+-- rather skeletal
+
+-- getInit :: IO [String]
+-- getInit = sequence $
+-- [getLine]
+
+-- printInit :: [(String, Int, Int)] -> String
+-- printInit (x:xs) = "init table"
+
+-- parseInit :: [String] -> [(String, Int, Int)]
+-- parseInit (x:y:z:xs) = [("Andinicu", 2, 18)]
+
+-- *** input, output, execution ***
+
+-- disabled for non-interactive usage
+
+-- doInputLoop :: (Random.RandomGen g) => g -> IO ()
+-- doInputLoop g = do
+-- putStr ">>> "
+-- input <- getLine
+-- handleInput g input
+-- g' <- Random.newStdGen
+-- doInputLoop g'
+
+-- handleInput :: (Random.RandomGen g) => g -> String -> IO ()
+-- handleInput g input
+
+-- | input =~ "^([0-9]*[d]{0,1}[0-9]+[+-])*[0-9]*[d]{0,1}[0-9]+$"
+-- = putStrLn $ doRoll g input
+
+-- | input == "init" = do
+-- init <- getInit
+-- putStrLn $ printInit $ parseInit init
+
+-- | otherwise = putStrLn "unknown command"
+
+-- old main for interactive usage
+
+-- main = do
+-- g <- Random.getStdGen
+-- doInputLoop g
+
+main = do
+ g <- Random.getStdGen
+ args <- getArgs
+ putStrLn $ doRoll g (unwords args)
diff --git a/archive/bin/sscan b/archive/bin/sscan
new file mode 100755
index 00000000..33098d1d
--- /dev/null
+++ b/archive/bin/sscan
@@ -0,0 +1,253 @@
+#!/usr/bin/python
+
+import time
+import tempfile
+import shutil
+import subprocess
+import re
+import datetime
+import hurry.filesize
+
+import os
+import sys
+import termios
+import fcntl
+
+global INCOMINGDIR
+INCOMINGDIR = "/home/swhitton/lib/annex/doc/incoming"
+
+global WARMED
+WARMED = True
+
+# Epson scanner in Tucson doesn't support --depth
+global SCANNER_SUPPORTS_DEPTH
+SCANNER_SUPPORTS_DEPTH = False
+
+# from: http://stackoverflow.com/questions/510357/python-read-a-single-character-from-the-user
+def getch():
+ fd = sys.stdin.fileno()
+
+ oldterm = termios.tcgetattr(fd)
+ newattr = termios.tcgetattr(fd)
+ newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
+ termios.tcsetattr(fd, termios.TCSANOW, newattr)
+
+ oldflags = fcntl.fcntl(fd, fcntl.F_GETFL)
+ fcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)
+
+ try:
+ while 1:
+ try:
+ c = sys.stdin.read(1)
+ break
+ except IOError: pass
+ finally:
+ termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
+ fcntl.fcntl(fd, fcntl.F_SETFL, oldflags)
+ return c
+
+def main():
+ desktop = 1
+ while True:
+ if not(desktop):
+ savestr = "incoming folder"
+ else:
+ savestr = "desktop"
+
+ operation = menu("Choose an operation:",
+ [("Single page B&W PDF", '1'),
+ ("Multi-page B&W PDF", '2'),
+ ("Single page colour PDF", '3'),
+ ("Multi-page colour PDF", '4'),
+ ("Single page OCR'd PDF, letter size", '5'), # greyscale 400dpi best for ocr http://web.archive.org/web/20080529012847/http://groundstate.ca/ocr
+ ("Multi-page OCR'd PDF, letter size", '6'),
+ ("Standard dpi full colour scan to PNG, autocrop", '7'),
+ ("High dpi full colour scan to PNG, cropped to printed photo size", '8'),
+ ("Single page colour PDF, letter size", '9'),
+ ("Multi-page colour PDF, letter size", '0'),
+ ("Single-page colour 300 dpi to PNG", 'a'),
+ ("Multi-page gray 150 dpi to PDF, no OCR, letter size (for handwritten notes)", 'h'),
+ ("Toggle save location (currently: " + savestr + ")", 'd'),
+ ("Quit", 'q')])
+
+ outdir = lambda desktop: subprocess.check_output(['xdg-user-dir', 'DESKTOP']).rstrip() if desktop else INCOMINGDIR
+ outdir = outdir(desktop)
+
+ if operation == 'q':
+ sys.exit()
+ elif operation == 'd':
+ if desktop:
+ desktop = 0
+ else:
+ desktop = 1
+
+ elif operation == '1':
+ scan(outdir, 150)
+ elif operation == '2':
+ scan(outdir, 150, True)
+ elif operation == '3':
+ scan(outdir, colour=True)
+ elif operation == '4':
+ scan(outdir, colour=True, multi=True)
+ elif operation == '9':
+ scan(outdir, colour=True, crop=3)
+ elif operation == '0':
+ scan(outdir, colour=True, multi=True, crop=3)
+ elif operation == 'a':
+ scan(outdir, colour=True, multi=False, crop=4)
+ elif operation == '8':
+ scan(outdir, dpi=600, multi=False, colour=True, depth=16, crop=2)
+ elif operation == '7':
+ scan(outdir, colour=True, crop=0)
+ elif operation == '5':
+ scan(outdir, gray=True, ocr=True, crop=3)
+ elif operation == '6':
+ scan(outdir, gray=True, ocr=True, crop=3, multi=True)
+ elif operation == 'h':
+ scan(outdir, gray=True, ocr=False, crop=3, multi=True, dpi=75)
+
+def scan(outdir, dpi = 300, multi = False, colour = False, gray = False, ocr = False, depth = 8, crop = 1, lineartFilter = "None"):
+ workdir = tempfile.mkdtemp()
+ print "scanning at " + str(dpi) + " dpi"
+
+ # build the command
+ scanimage = ["scanimage", "-vp", "--format=tiff"]
+ scanimage.append("--resolution=" + str(dpi))
+
+ if SCANNER_SUPPORTS_DEPTH:
+ scanimage.append("--depth=" + str(depth))
+
+ if colour:
+ scanimage.append("--mode=Color")
+ elif gray:
+ scanimage.append("--mode=Gray")
+ else:
+ scanimage = scanimage + ["--mode=Lineart",
+ # "--swdespeck=yes",
+ # "--color-filter=" + lineartFilter
+ ]
+
+ if crop == 2:
+ scanimage = scanimage + ['-x', '150', '-y', '100'] # dimensions of a standard photo
+ elif crop == 0:
+ scanimage.append("--swcrop=yes")
+ elif crop == 1:
+ scanimage = scanimage + ['-x', '210', '-y', '297'] # dimensions of A4
+ elif crop == 3:
+ scanimage = scanimage + ['-x', '215.9', '-y', '279.4'] # dimensions of American letter paper
+
+ # do the scan
+ i = 1
+ if multi:
+ while True:
+ print "\nscanning page #" + str(i)
+ doScan(scanimage, workdir, str(i))
+ i = 1 + i
+ print "hit q to quit, anything else to scan another image"
+ choice = getch()
+ if choice == 'q':
+ i = i - 1
+ break
+ else:
+ tiff = "1"
+ doScan(scanimage, workdir, tiff)
+
+ # post-processing
+
+ if crop == 1 or crop == 3: # A4/LTR PDF output
+ output = workdir + '/output.pdf'
+ pages = []
+ for j in range(i):
+ pages.append(workdir + '/' + str(j + 1))
+ # if output TIFF isn't in monochrome, try first mogrify -monochrome file.png
+ subprocess.call(['convert'] + pages + [output])
+
+ if ocr:
+ print "commencing OCR; please be patient ...",
+ subprocess.call(['ocrmypdf', '-c', '-i', '-r',
+ '--title', 'scan of ' + datetime.datetime.now().strftime("%Y-%b-%d").lower(),
+ '--author', 'spw',
+ output, workdir + '/process.pdf'])
+ shutil.move(workdir + '/process.pdf', output)
+ print " done"
+ else:
+ # set PDF metadata
+ metadata = workdir + '/metadata'
+ metadataf = open(metadata, 'w')
+ metadataf.write("InfoKey: Title\nInfoValue: scan of "
+ + datetime.datetime.now().strftime("%Y-%b-%d").lower()
+ + "\nInfoKey: Author\nInfoValue: spw\n")
+ metadataf.close()
+ subprocess.call(['pdftk', output, 'update_info', metadata, 'output', workdir + '/process.pdf'])
+ shutil.move(workdir + '/process.pdf', output)
+
+ # compress PDF by flattening it
+ # more aggressive compression is possible: http://stackoverflow.com/questions/5296667/pdftk-compression-option
+
+ # (ocrmypdf does this for us, hence wrapped in else clause)
+ subprocess.call(['qpdf', '--linearize', output, output + '~'])
+ shutil.move(output + '~', output)
+ else: # single image output
+ output = workdir + '/output.png'
+ # subprocess.call(['convert', workdir + '/1', "-crop 7090x4760+0+0", output])
+ subprocess.call(['convert', workdir + '/1', output])
+
+ # output the file
+ outfile = outdir + '/' + str(int(time.time())) + '.' + output.split('.')[-1]
+ if not os.path.exists(outdir):
+ os.mkdir(outdir, 755)
+ shutil.copyfile(output, outfile)
+ print "wrote", hurry.filesize.size(os.path.getsize(outfile)), "to", outfile
+ shutil.rmtree(workdir)
+ print "press any key to return to main menu, or q to quit"
+ choice = getch()
+ if choice == 'q':
+ sys.exit()
+
+def doScan(scanimage, filedir, filename):
+
+ # # work around genesys bug by resetting scanner, rather than physically replugging it
+ # # the following code only required when connected via a USB 3.0 port
+ # print "\nResetting scanner's USB connection ..."
+ # lsusb = subprocess.check_output(['lsusb'])
+ # match = re.search('Bus ([0-9]+) Device ([0-9]+): ID 04a9:190a Canon, Inc. CanoScan LiDE 210', lsusb)
+ # dev = '/dev/bus/usb/' + match.group(1) + '/' + match.group(2)
+ # subprocess.call(['usbreset', dev])
+
+ # # Debian Jessie's version of scanimage doesn't tend to work unless
+ # # we first call "scanimage -T" (which is oddly noisy). xsane,
+ # # though, seems to work right off the bat. Sometimes the scanner
+ # # gets caught up and xsane must be run to reset it (when it shows
+ # # a red LED and doesn't move). Possibly a hardware fault while
+ # # scanner has been in storage as sound it makes while scanning has
+ # # also changed.
+ # global WARMED
+ # if not WARMED:
+ # print "\nPlease wait, warming up the scanner for first scan ..."
+ # subprocess.call(["scanimage", "-T"], stdout = None, stderr = None)
+ # WARMED = True
+ # print "... done\n"
+
+ tiff = open(filedir + '/' + filename, 'w')
+ subprocess.call(scanimage, stdout = tiff)
+ # TODO: attach stderr to terminal more consistently
+ tiff.close()
+ time.sleep(1)
+
+def menu(title, options):
+ while True:
+ os.system('clear')
+ print "\n", # to deal with random char at start of
+ # first line after non-existent answer
+ print title
+ print "=" * len(title),"\n"
+ for o in options:
+ print o[1] + ") " + o[0]
+ print "\n" + 'Your choice? ',
+ choice = getch()
+ if choice in [o[1] for o in options]:
+ print choice, "\n"
+ return choice
+
+if __name__ == "__main__":
+ main()
diff --git a/archive/bin/ssleep b/archive/bin/ssleep
new file mode 100755
index 00000000..1a070f98
--- /dev/null
+++ b/archive/bin/ssleep
@@ -0,0 +1,82 @@
+#!/usr/bin/env python
+
+"""Sean's alarm clock
+
+For this to work, BIOS power settings will need to be set to allow
+wakeup events to occur without AC power.
+
+"""
+
+import os
+import sys
+import time
+import subprocess
+from datetime import datetime, timedelta
+
+SEAN_UP = 'tomorrow 5:30am'
+ALARM = '~/lib/annex/music/' \
+ 'Mainstream/John Mayer/Continuum/' \
+ '01 - Waiting On The World To Change.mp3'
+
+def main():
+ """Main procedure"""
+
+ # bail if AC connected
+ battery_string = subprocess.check_output(['acpi', '-a'])
+ if 'on-line' in battery_string:
+ print '''Make sure that your battery is inserted and charged,
+unplug your AC adapter and run this program again.'''
+ sys.exit(1)
+
+ # check for command line input
+ if len(sys.argv) == 2:
+ sean_up_string = 'tomorrow ' + sys.argv[1]
+ else:
+ sean_up_string = SEAN_UP
+
+ # make a datetime telling us when we're to wake up
+ sean_up = subprocess.check_output(['date', '-d', sean_up_string, '+%s'])
+ sean_up = datetime.fromtimestamp(int(sean_up))
+ computer_up = sean_up - timedelta(minutes=15)
+
+ # if midnight has passed, must subtract one day
+ now = datetime.now()
+ if now.hour < computer_up.hour:
+ computer_up = computer_up - timedelta(days=1)
+
+ # sync e-mail and docs
+ subprocess.call(['mr sync'])
+
+ # bring network down since NetworkManager sucks at bringing
+ # it back up itself
+ subprocess.call(['sudo', 'nmcli', 'nm', 'sleep', 'true'])
+ time.sleep(3)
+
+ # go to sleep
+ rtctime = str(int(time.mktime(computer_up.timetuple())))
+ subprocess.call(['sudo', 'rtcwake',
+ '-m', 'mem',
+ '-t', rtctime])
+
+ # make sure that we've woken up
+ time.sleep(5)
+
+ # bring networking back up, which takes a fair while
+ subprocess.call(['sudo', 'nmcli', 'nm', 'sleep', 'false'])
+ time.sleep(20)
+
+ # sync e-mail & docs
+ subprocess.call(['mr sync'])
+
+ # now wait until it's time to wake Sean up
+ while sean_up > datetime.today():
+ time.sleep(1)
+
+ # and play the alarm clock
+ alarm_file = os.path.expanduser(ALARM)
+ subprocess.call(['amixer', 'set', 'Master', '60%'])
+ subprocess.call(['amixer', 'set', 'Master', 'unmute'])
+ subprocess.call(['vlc', alarm_file])
+
+if __name__ == "__main__":
+ main()
diff --git a/archive/bin/sync-docs b/archive/bin/sync-docs
new file mode 100755
index 00000000..f03adb88
--- /dev/null
+++ b/archive/bin/sync-docs
@@ -0,0 +1,27 @@
+#!/bin/sh
+
+. "$HOME/.shenv"
+. "$HOME/lib/spw.sh"
+. "$HOME/lib/tputfs.sh"
+
+# Sync ~/doc, taking account of how I access it from Emacs, both
+# locally and on athena
+
+set -e
+
+save-org-buffers
+(
+ cd $HOME/doc
+ if win32; then
+ cmd "/C doccheckin.bat" && git pull && git push
+ elif [ "$(hostname -s)" = "athena" ]; then
+ doccheckin && git pull && git push
+ else
+ status syncing on athena
+ athena_cmd doc "mr sync"
+ status syncing locally
+ doccheckin && git pull && git push
+ status syncing on athena again
+ athena_cmd doc "mr sync"
+ fi
+)
diff --git a/archive/bin/update-recoll-db b/archive/bin/update-recoll-db
new file mode 100755
index 00000000..a9e4bb4a
--- /dev/null
+++ b/archive/bin/update-recoll-db
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+. $HOME/.shenv
+
+RCLCRON_RCLINDEX= RECOLL_CONFDIR="$HOME/.recoll/" \
+ nice ionice -c 3 recollindex
diff --git a/archive/bin/urxvtma b/archive/bin/urxvtma
new file mode 100755
index 00000000..1cc61558
--- /dev/null
+++ b/archive/bin/urxvtma
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+urxvt ++iso14755 ++iso14755_52 -e /bin/sh -c 'ssh ma'
+sleep 10
+winid=`wmctrl -l | grep tmux | cut -d" " -f1`
+wmctrl -i -c $winid -b add,maximized_vert,maximized_horz
diff --git a/archive/bin/urxvttmux b/archive/bin/urxvttmux
new file mode 100755
index 00000000..720381ba
--- /dev/null
+++ b/archive/bin/urxvttmux
@@ -0,0 +1,41 @@
+#!/bin/bash
+
+export MOSH_TITLE_NOPREFIX=true
+
+# check if username was supplied
+if [ "$1" = "${1/@/}" ]; then
+ input="$USER@$1"
+else
+ input="$1"
+fi
+
+openurxvt ()
+{
+ cmd="$@"
+ urxvt ++iso14755 ++iso14755_52 -title "$tmuxtitle" -e /bin/sh -c "$cmd" &
+}
+
+if [ "$input" = "swhitton@local" ]; then
+ tmuxtitle="$USER@$(hostname -s)"
+ launchcmd="tmux attach"
+else if [ "$input" = "spw@ma" ]; then
+ tmuxtitle="$input"
+ launchcmd="ssh -t $input /usr/pkg/bin/tmux attach"
+ else
+ tmuxtitle="$input"
+ launchcmd="ssh -t $input tmux attach"
+ fi
+fi
+
+currwin=$(xprop -id $(xprop -root _NET_ACTIVE_WINDOW | cut -d ' ' -f 5) WM_NAME | cut -d '"' -f 2)
+
+# if we're already looking at tmux, maximise it
+if [ "$currwin" = "$tmuxtitle" ]; then
+ sleep 0.2
+ wmctrl -r :ACTIVE: -b add,maximized_vert,maximized_horz
+else
+ # We're not looking at tmux. Try to raise it. If that fails, launch it.b
+ if ! wmctrl -a "$tmuxtitle"; then
+ openurxvt $launchcmd
+ fi
+fi
diff --git a/archive/bin/urxvttmux-prompt b/archive/bin/urxvttmux-prompt
new file mode 100755
index 00000000..35a6a17f
--- /dev/null
+++ b/archive/bin/urxvttmux-prompt
@@ -0,0 +1,9 @@
+#!/bin/sh
+
+. $HOME/.shenv
+
+input=$(zenity --title "remote tmux" --text "[user@]host" --entry)
+
+if ! [ "$input" = "" ]; then
+ urxvttmux $input
+fi
diff --git a/archive/bin/weekly-backups b/archive/bin/weekly-backups
new file mode 100755
index 00000000..9e71b9f7
--- /dev/null
+++ b/archive/bin/weekly-backups
@@ -0,0 +1,58 @@
+#!/bin/sh
+
+# the idea is that if this script dies it can be safely be re-run from the beginning
+
+. "$HOME/.shenv"
+. "$HOME/lib/try.sh"
+. "$HOME/lib/tputfs.sh"
+
+if ! mount | grep -q bkupsd && ! mount | grep -q m3; then
+ echo >&2 "$(basename $0): plug in and mount a cold backup drive"
+ exit 1
+fi
+
+cd "$HOME"
+
+zero ls "$HOME/tmp"
+zero ls /tmp/debuild
+zero ssh athena ls tmp
+mount "$HOME/lib/fm" || true
+zero ls "$HOME/lib/fm/tmp"
+
+# TODO: write a file to ~/.cache and if that file was edited today, skip these
+ssh athena df -h
+ssh athena uptime
+status Press enter to confirm that athena has enough free disc space
+status and it has not been unexpectedly rebooted recently \(see RepeatedTasks.org\)
+status Or hit C-c and deal with the situation
+read _
+
+try ssh -t athena "sudo apt-get update && sudo apt-get upgrade"
+
+status running src-unregistered check
+status unregistered repos need to have 'mr register' run in them
+status "(and tracking branches set up, and new alioth repos added to coldbkup script)"
+status periodically remove old projects from ~/src
+zero src-unregistered
+
+# status "running restow operation: failures indicate symlinks replaced by updated files"
+# status "move these into the relevant stow'd repository"
+zero mr -ms misstowed --delete-unmodified # use 'mr adopt' on the result
+try mr -ms restow
+
+try mr -ms autoci
+# `mr -ms isclean` checks for stuff to be checked in ...
+try mr -ms isclean
+try mr -s up
+try mr -s push origin --tags :
+# ... then `mr -ms status` finds unpushed branches
+zero mr -ms status
+try coldbkup
+
+try sudo apt-get update
+try sudo apt-get upgrade
+try sudo apt-get dist-upgrade
+try sudo apt-get autoremove
+try git-pbuilder update --override-config
+
+zero check-mailqs
diff --git a/archive/bin/win32setup.bat b/archive/bin/win32setup.bat
new file mode 100644
index 00000000..c0e77342
--- /dev/null
+++ b/archive/bin/win32setup.bat
@@ -0,0 +1,23 @@
+@echo off
+REM See comproc.org for when to use this script
+REM ---- BEGIN WORK
+
+CD %HOMEPATH%
+mklink /J .emacs.d src\dotfiles\.emacs.d
+mklink /J bin src\dotfiles\bin
+mklink /J tmp Desktop
+copy /y src\dotfiles\home-mrconfig .mrconfig
+copy /y src\dotfiles\.gitconfig .gitconfig
+copy /y src\dotfiles\.globgitignore .globgitignore
+copy /y src\dotfiles\.bashrc .bashrc
+copy /y src\dotfiles\.shenv .shenv
+mkdir %APPDATA%\cabal
+REM copy /y src\dotfiles\.cabal\config %APPDATA%\cabal\config
+
+REM Could generalise the following for all dirs under dotfiles/AppData/Roaming
+mklink /J %APPDATA%\VirtuaWin src\dotfiles\AppData\Roaming\VirtuaWin
+
+REM ---- END WORK
+REM ECHO.
+REM ECHO That should be everything set-up.
+REM pause
diff --git a/archive/bin/workstation-uninstallable b/archive/bin/workstation-uninstallable
new file mode 100755
index 00000000..acb538c1
--- /dev/null
+++ b/archive/bin/workstation-uninstallable
@@ -0,0 +1,43 @@
+#!/usr/bin/perl
+
+# Run this script to find packages that I might want to uninstall, or
+# at least mark as automatically installed. We find all manually
+# installed packages that aren't part of a standard Debian system and
+# aren't depended on by anything else we have installed. Then we
+# filter out a list of packages we always want, and packages that
+# propellor keeps installed.
+
+use strict;
+use warnings;
+use autodie;
+
+use Array::Utils qw{ array_minus };
+use Path::Class;
+
+# manually installed packages that aren't part of a standard Debian
+# system and aren't depended on by anything else we have installed
+my @installed = split "\n", `aptitude search '~i !~M !~pstandard !~pimportant !~prequired !?reverse-Depends(?installed)' -F'%p' --disable-columns`;
+
+# packages we definitely want to keep
+my @wanted = qw/acpi acpi-support-base/;
+
+# packages propellor installs (roughly)
+my $dir = dir("$ENV{HOME}/src/propellor/src/Propellor/Property/SiteSpecific");
+my $file = $dir->file("SPW.hs");
+my $file_handle = $file->openr();
+while( my $line = $file_handle->getline() ) {
+ if ( $line =~ /[,\[] "([a-z0-9-]+)"$/ ) {
+ push @wanted, $1;
+ }
+}
+
+# these are all the packages I might want to remove from my
+# workstation, or at least `apt-mark auto`
+my @might_want_to_remove_or_mark_auto = array_minus( @installed, @wanted );
+
+# remove task-* packages since I probs. don't want to remove those
+@might_want_to_remove_or_mark_auto = grep { $_ !~ /^task-/ } @might_want_to_remove_or_mark_auto;
+
+# output
+print join "\n", @might_want_to_remove_or_mark_auto;
+print "\n";
diff --git a/archive/bin/workstation-uninstallable-alt b/archive/bin/workstation-uninstallable-alt
new file mode 100755
index 00000000..4f5e302c
--- /dev/null
+++ b/archive/bin/workstation-uninstallable-alt
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+# orig: http://askubuntu.com/a/630674
+
+# make this into a Haskell program that sorts this info in a sensible
+# way (e.g. by date). and possibly excludes packages installed by
+# propellor.
+
+# List of all packages currently installed
+current=$(dpkg -l | awk '{print $2}' | sort | uniq)
+
+# List of all packages that were installed with the system
+pre=$(cat /var/log/installer/status | sed -n 's/^Package: //p' | sort | uniq)
+
+# List of packages that don't depend on any other package
+manual=$(apt-mark showmanual | sort | uniq)
+
+# (Current - Pre) ∩ (Manual)
+packages=$(comm -12 <(comm -23 <(echo "$current") <(echo "$pre")) <(echo "$manual") )
+
+for pack in $packages; do
+ packname=$(echo $pack | cut -f 1 -d ":")
+ desc=$(apt-cache search "^$packname$" | sed -E 's/.* - (.*)/\1/')
+ date=$(date -r /var/lib/dpkg/info/$pack.list)
+
+ echo "# $desc"
+ echo "# $date"
+ echo "sudo apt-get install $pack"
+ echo -e ""
+done
diff --git a/archive/bin/xlaunch b/archive/bin/xlaunch
new file mode 100755
index 00000000..acf9b5ae
--- /dev/null
+++ b/archive/bin/xlaunch
@@ -0,0 +1,20 @@
+#!/bin/sh
+. $HOME/.shenv
+
+case $1 in
+ www)
+ wmctrl -a Firefox || firefox &
+ ;;
+ emacs)
+ if ! pgrep emacs; then
+ emacs --daemon
+ fi
+ wmctrl -a emacs@`hostname -f` || emacsclient -c -F "((fullscreen . maximized))" &
+ ;;
+ ma)
+ urxvttmux "spw@ma"
+ ;;
+ term)
+ urxvttmux local
+ ;;
+esac
diff --git a/archive/bin/yankfmailpw b/archive/bin/yankfmailpw
new file mode 100755
index 00000000..85c75d8d
--- /dev/null
+++ b/archive/bin/yankfmailpw
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+source /root/.shenv
+
+# yankfmailpw USER, run as root, tries to grab the fastmail password
+# from ~user/local/auth and put it in /etc/postfix for use with
+# postfix sasl client
+
+if [ -e "/etc/postfix/sasl_passwd" ]; then
+ echo "yankfmailpw: Looks like you already ran this script." >&2
+ exit 1
+fi
+
+if [[ $EUID -ne 0 ]]; then
+ echo "yankfmailpw: This script must be run as root." >&2
+ exit 1
+fi
+
+theirhome=$(eval echo "~${1}")
+
+if ! [ -e "$theirhome/local/auth/fmailsyncpass" ]; then
+ echo "yankfmailpw: $1 doesn't have the password in their homedir." >&2
+ exit 1
+fi
+
+fmailpass=$(cat $theirhome/local/auth/fmailsyncpass)
+echo "[127.0.0.1]:11565 spwhitton#fastmail.com:${fmailpass}" > /etc/postfix/sasl_passwd
+chmod 600 /etc/postfix/sasl_passwd
+postmap /etc/postfix/sasl_passwd
diff --git a/archive/bin/yt b/archive/bin/yt
new file mode 100755
index 00000000..2208ffad
--- /dev/null
+++ b/archive/bin/yt
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+quvi "$1" --exec "vlc %u"
diff --git a/archive/texmf/bibtex/bst/spwchicago/spwchicago.bst b/archive/texmf/bibtex/bst/spwchicago/spwchicago.bst
new file mode 100644
index 00000000..b227d2fd
--- /dev/null
+++ b/archive/texmf/bibtex/bst/spwchicago/spwchicago.bst
@@ -0,0 +1,1662 @@
+%%% Chicago hacked around a little to be more like
+%%% http://www.gs.cornell.edu/philrev/info/stylesheet.html
+
+%%% diff against
+%%% /usr/share/texmf-texlive/bibtex/bst/chicago/chicago/bst on Debian
+
+%%% TODO: volume numbers and translators don't come out correctly
+
+%%% ====================================================================
+%%% @BibTeX-style-file{
+%%% author = "Glenn Paulley",
+%%% version = "4",
+%%% date = "28 August 1992",
+%%% time = "10:23:39 199",
+%%% filename = "chicago.bst",
+%%% address = "Data Structuring Group
+%%% Department of Computer Science
+%%% University of Waterloo
+%%% Waterloo, Ontario, Canada
+%%% N2L 3G1",
+%%% telephone = "(519) 885-1211",
+%%% FAX = "(519) 885-1208",
+%%% checksum = "26323 1654 5143 37417",
+%%% email = "gnpaulle@bluebox.uwaterloo.ca",
+%%% codetable = "ISO/ASCII",
+%%% keywords = "",
+%%% supported = "yes",
+%%% abstract = "A BibTeX bibliography style that follows the
+%%% `B' reference style of the 13th Edition of
+%%% the Chicago Manual of Style. A detailed
+%%% feature list is given below.",
+%%% docstring = "The checksum field above contains a CRC-16
+%%% checksum as the first value, followed by the
+%%% equivalent of the standard UNIX wc (word
+%%% count) utility output of lines, words, and
+%%% characters. This is produced by Robert
+%%% Solovay's checksum utility.",
+%%% }
+%%% ====================================================================
+%
+% "Chicago" BibTeX style, chicago.bst
+% ===================================
+%
+% BibTeX `chicago' style file for BibTeX version 0.99c, LaTeX version 2.09
+% Place it in a file called chicago.bst in the BibTeX search path.
+% You need to include chicago.sty as a \documentstyle option.
+% (Placing it in the same directory as the LaTeX document should also work.)
+% This "chicago" style is based on newapa.bst (American Psych. Assoc.)
+% found at ymir.claremont.edu.
+%
+% Citation format: (author-last-name year)
+% (author-last-name and author-last-name year)
+% (author-last-name, author-last-name, and author-last-name year)
+% (author-last-name et al. year)
+% (author-last-name)
+% author-last-name (year)
+% (author-last-name and author-last-name)
+% (author-last-name et al.)
+% (year) or (year,year)
+% year or year,year
+%
+% Reference list ordering: alphabetical by author or whatever passes
+% for author in the absence of one.
+%
+% This BibTeX style has support for abbreviated author lists and for
+% year-only citations. This is done by having the citations
+% actually look like
+%
+% \citeauthoryear{full-author-info}{abbrev-author-info}{year}
+%
+% The LaTeX style has to have the following (or similar)
+%
+% \let\@internalcite\cite
+% \def\fullcite{\def\citeauthoryear##1##2##3{##1, ##3}\@internalcite}
+% \def\fullciteA{\def\citeauthoryear##1##2##3{##1}\@internalcite}
+% \def\shortcite{\def\citeauthoryear##1##2##3{##2, ##3}\@internalcite}
+% \def\shortciteA{\def\citeauthoryear##1##2##3{##2}\@internalcite}
+% \def\citeyear{\def\citeauthoryear##1##2##3{##3}\@internalcite}
+%
+% These TeX macro definitions are found in chicago.sty. Additional
+% commands to manipulate different components of a citation can be defined
+% so that, for example, you can list author's names without parentheses
+% if using a citation as a noun or object in a sentence.
+%
+% This file was originally copied from newapa.bst at ymir.claremont.edu.
+%
+% Features of chicago.bst:
+% =======================
+%
+% - full names used in citations, but abbreviated citations are available
+% (see above)
+% - if an entry has a "month", then the month and year are also printed
+% as part of that bibitem.
+% - all conjunctions use "and" instead of "\&"
+% - major modification from Chicago Manual of Style (13th ed.) is that
+% only the first author in a reference appears last name first-
+% additional authors appear as J. Q. Public.
+% - pages are listed as "pp. xx-xx" in all entry types except
+% article entries.
+% - book, inbook, and manual use "location: publisher" (or organization)
+% for address and publisher. All other types list publishers separately.
+% - "pp." are used to identify page numbers for all entry types except
+% articles.
+% - organization is used as a citation label if neither author nor editor
+% is present (for manuals).
+% - "et al." is used for long author and editor lists, or when "others"
+% is used.
+%
+% Modifications and bug fixes from newapa.bst:
+% ===========================================
+%
+% - added month, year to bib entries if month is present
+% - fixed bug with In proceedings, added necessary comma after title
+% - all conjunctions changed to "and" from "\&"
+% - fixed bug with author labels in my.full.label: "et al." now is
+% generated when "others" is an author name
+% - major modification from Chicago Manual of Style (13th ed.) is that
+% only the first author in a reference appears last name first-
+% additional authors appear as J. Q. Public.
+% - pages are listed as "pp. xx-xx" in all entry types except
+% article entries. Unnecessary (IMHO) "()" around page numbers
+% were removed, and page numbers now don't end with a period.
+% - created chicago.sty for use with this bibstyle (required).
+% - fixed bugs in FUNCTION {format.vol.num.pages} for missing volume,
+% number, and /or pages. Renamed to format.jour.vol.
+% - fixed bug in formatting booktitles: additional period an error if
+% book has a volume.
+% - fixed bug: editors usually given redundant period before next clause
+% (format.editors.dot) removed.
+% - added label support for organizations, if both author and editor
+% are missing (from alpha.bst). If organization is too long, then
+% the key field is used for abbreviated citations.
+% - In proceedings or books of several volumes, no comma was written
+% between the "Volume x" and the page numbers (this was intentional
+% in newapa.bst). Fixed.
+% - Some journals may not have volumes/numbers, only month/year (eg.
+% IEEE Computer). Fixed bug in article style that assumed volume/number
+% was always present.
+%
+% Original documentation for newapa.sty:
+% =====================================
+%
+% This version was made by modifying the master file made by
+% Oren Patashnik (PATASHNIK@SCORE.STANFORD.EDU), and the 'named' BibTeX
+% style of Peter F. Patel-Schneider.
+%
+% Copyright (C) 1985, all rights reserved.
+% Copying of this file is authorized only if either
+% (1) you make absolutely no changes to your copy, including name, or
+% (2) if you do make changes, you name it something other than 'newapa.bst'.
+% There are undoubtably bugs in this style. If you make bug fixes,
+% improvements, etc. please let me know. My e-mail address is:
+% spencer@cgrg.ohio.state.edu or 71160.3141@compuserve.com
+%
+% This style was made from 'plain.bst', 'named.bst', and 'apalike.bst',
+% with lots of tweaking to make it look like APA style, along with tips
+% from Young Ryu and Brian Reiser's modifications of 'apalike.bst'.
+
+ENTRY
+ { address
+ author
+ booktitle
+ chapter
+ edition
+ editor
+ howpublished
+ institution
+ journal
+ key
+ month
+ note
+ number
+ organization
+ pages
+ publisher
+ school
+ series
+ title
+ type
+ volume
+ year
+ }
+ {}
+ { label.year extra.label sort.year sort.label }
+
+INTEGERS { output.state before.all mid.sentence after.sentence after.block }
+
+FUNCTION {init.state.consts}
+{ #0 'before.all :=
+ #1 'mid.sentence :=
+ #2 'after.sentence :=
+ #3 'after.block :=
+}
+
+STRINGS { s t u }
+
+FUNCTION {output.nonnull}
+{ 's :=
+ output.state mid.sentence =
+ { ", " * write$ }
+ { output.state after.block =
+ { add.period$ write$
+ newline$
+ "\newblock " write$
+ }
+ { output.state before.all =
+ 'write$
+ { add.period$ " " * write$ }
+ if$
+ }
+ if$
+ mid.sentence 'output.state :=
+ }
+ if$
+ s
+}
+
+% Use a colon to separate output. Used only for address/publisher
+% combination in book/inbook types, address/institution for manuals,
+% and organization:publisher for proceedings (inproceedings).
+%
+FUNCTION {output.nonnull.colon}
+{ 's :=
+ output.state mid.sentence =
+ { ": " * write$ }
+ { output.state after.block =
+ { add.period$ write$
+ newline$
+ "\newblock " write$
+ }
+ { output.state before.all =
+ 'write$
+ { add.period$ " " * write$ }
+ if$
+ }
+ if$
+ mid.sentence 'output.state :=
+ }
+ if$
+ s
+}
+
+FUNCTION {output}
+{ duplicate$ empty$
+ 'pop$
+ 'output.nonnull
+ if$
+}
+
+FUNCTION {output.colon}
+{ duplicate$ empty$
+ 'pop$
+ 'output.nonnull.colon
+ if$
+}
+
+FUNCTION {output.check}
+{ 't :=
+ duplicate$ empty$
+ { pop$ "empty " t * " in " * cite$ * warning$ }
+ 'output.nonnull
+ if$
+}
+
+FUNCTION {output.check.colon}
+{ 't :=
+ duplicate$ empty$
+ { pop$ "empty " t * " in " * cite$ * warning$ }
+ 'output.nonnull.colon
+ if$
+}
+
+FUNCTION {output.year.check}
+{ year empty$
+ { "empty year in " cite$ * warning$ }
+ { write$
+ " " year * extra.label *
+ % month empty$
+ % { "." * }
+ % { ", " * month * "." * }
+ % if$
+ mid.sentence 'output.state :=
+ }
+ if$
+}
+
+
+FUNCTION {fin.entry}
+{ add.period$
+ write$
+ newline$
+}
+
+FUNCTION {new.block}
+{ output.state before.all =
+ 'skip$
+ { after.block 'output.state := }
+ if$
+}
+
+FUNCTION {new.sentence}
+{ output.state after.block =
+ 'skip$
+ { output.state before.all =
+ 'skip$
+ { after.sentence 'output.state := }
+ if$
+ }
+ if$
+}
+
+FUNCTION {not}
+{ { #0 }
+ { #1 }
+ if$
+}
+
+FUNCTION {and}
+{ 'skip$
+ { pop$ #0 }
+ if$
+}
+
+FUNCTION {or}
+{ { pop$ #1 }
+ 'skip$
+ if$
+}
+
+FUNCTION {new.block.checka}
+{ empty$
+ 'skip$
+ 'new.block
+ if$
+}
+
+FUNCTION {new.block.checkb}
+{ empty$
+ swap$ empty$
+ and
+ 'skip$
+ 'new.block
+ if$
+}
+
+FUNCTION {new.sentence.checka}
+{ empty$
+ 'skip$
+ 'new.sentence
+ if$
+}
+
+FUNCTION {new.sentence.checkb}
+{ empty$
+ swap$ empty$
+ and
+ 'skip$
+ 'new.sentence
+ if$
+}
+
+FUNCTION {field.or.null}
+{ duplicate$ empty$
+ { pop$ "" }
+ 'skip$
+ if$
+}
+
+%
+% Emphasize the top string on the stack.
+%
+FUNCTION {emphasize}
+{ duplicate$ empty$
+ { pop$ "" }
+ { "{\em " swap$ * "}" * }
+ if$
+}
+
+%
+% Emphasize the top string on the stack, but add a trailing space.
+%
+FUNCTION {emphasize.space}
+{ duplicate$ empty$
+ { pop$ "" }
+ { "{\em " swap$ * "\/}" * }
+ if$
+}
+
+INTEGERS { nameptr namesleft numnames }
+%
+% Format bibliographical entries with the first author last name first,
+% and subsequent authors with initials followed by last name.
+% All names are formatted in this routine.
+%
+FUNCTION {format.names}
+{ 's :=
+ #1 'nameptr := % nameptr = 1;
+ s num.names$ 'numnames := % numnames = num.name$(s);
+ numnames 'namesleft :=
+ { namesleft #0 > }
+
+ { nameptr #1 =
+ {s nameptr "{vv~}{ll}{, jj}{, ff}" format.name$ 't := }
+ {s nameptr "{f.~}{vv~}{ll}{, jj}" format.name$ 't := }
+ if$
+ nameptr #1 >
+ { namesleft #1 >
+ { ", " * t * }
+ { numnames #2 >
+ { "," * }
+ 'skip$
+ if$
+ t "others" =
+ { " et~al." * }
+ { " and " * t * } % from Chicago Manual of Style
+ if$
+ }
+ if$
+ }
+ 't
+ if$
+ nameptr #1 + 'nameptr := % nameptr += 1;
+ namesleft #1 - 'namesleft := % namesleft =- 1;
+ }
+ while$
+}
+
+FUNCTION {my.full.label}
+{ 's :=
+ #1 'nameptr := % nameptr = 1;
+ s num.names$ 'numnames := % numnames = num.name$(s);
+ numnames 'namesleft :=
+ { namesleft #0 > }
+
+ { s nameptr "{vv~}{ll}" format.name$ 't := % get the next name
+ nameptr #1 >
+ { namesleft #1 >
+ { ", " * t * }
+ { numnames #2 >
+ { "," * }
+ 'skip$
+ if$
+ t "others" =
+ { " et~al." * }
+ { " and " * t * } % from Chicago Manual of Style
+ if$
+ }
+ if$
+ }
+ 't
+ if$
+ nameptr #1 + 'nameptr := % nameptr += 1;
+ namesleft #1 - 'namesleft := % namesleft =- 1;
+ }
+ while$
+
+}
+
+FUNCTION {format.names.fml}
+%
+% Format names in "familiar" format, with first initial followed by
+% last name. Like format.names, ALL names are formatted.
+%
+{ 's :=
+ #1 'nameptr := % nameptr = 1;
+ s num.names$ 'numnames := % numnames = num.name$(s);
+ numnames 'namesleft :=
+ { namesleft #0 > }
+
+ { s nameptr "{f.~}{vv~}{ll}{, jj}" format.name$ 't :=
+
+ nameptr #1 >
+ { namesleft #1 >
+ { ", " * t * }
+ { numnames #2 >
+ { "," * }
+ 'skip$
+ if$
+ t "others" =
+ { " et~al." * }
+ { " and " * t * }
+% { " \& " * t * }
+ if$
+ }
+ if$
+ }
+ 't
+ if$
+ nameptr #1 + 'nameptr := % nameptr += 1;
+ namesleft #1 - 'namesleft := % namesleft =- 1;
+ }
+ while$
+}
+
+FUNCTION {format.authors}
+{ author empty$
+ { "" }
+ { author format.names * "." }
+ if$
+}
+
+FUNCTION {format.key}
+{ empty$
+ { key field.or.null }
+ { "" }
+ if$
+}
+
+%
+% Format editor names for use in the "in" types: inbook, incollection,
+% inproceedings: first initial, then last names. When editors are the
+% LABEL for an entry, then format.editor is used which lists editors
+% by last name first.
+%
+FUNCTION {format.editors.fml}
+{ editor empty$
+ { "" }
+ { editor format.names.fml
+ editor num.names$ #1 >
+ { "" * }
+ { "" * }
+ if$
+ }
+ if$
+}
+
+%
+% Format editor names for use in labels, last names first.
+%
+FUNCTION {format.editors}
+{ editor empty$
+ { "" }
+ { editor format.names
+ editor num.names$ #1 >
+ { "" * }
+ { "" * }
+ if$
+ }
+ if$
+}
+
+% FUNCTION {format.title}
+% { title empty$
+% { "" }
+% { title "t" change.case$ }
+% if$
+% }
+
+% Note that the APA style requres case changes
+% in article titles. The following does not
+% change cases. If you perfer it, uncomment the
+% following and comment out the above.
+
+FUNCTION {format.title}
+{ title empty$
+ { "" }
+ { title }
+ if$
+}
+
+FUNCTION {n.dashify}
+{ 't :=
+ ""
+ { t empty$ not }
+ { t #1 #1 substring$ "-" =
+ { t #1 #2 substring$ "--" = not
+ { "--" *
+ t #2 global.max$ substring$ 't :=
+ }
+ { { t #1 #1 substring$ "-" = }
+ { "-" *
+ t #2 global.max$ substring$ 't :=
+ }
+ while$
+ }
+ if$
+ }
+ { t #1 #1 substring$ *
+ t #2 global.max$ substring$ 't :=
+ }
+ if$
+ }
+ while$
+}
+
+FUNCTION {format.btitle}
+{ edition empty$
+ { title emphasize }
+ { title empty$
+ { title emphasize }
+ { editor empty$ % spw - check for editor, then don't need period TODO: volume too
+ { "{\em " title * "\/} (" * edition * " ed.)" * "." * }
+ { "{\em " title * "\/} (" * edition * " ed.)" * }
+ if$
+ }
+ if$
+ }
+ if$
+}
+
+FUNCTION {format.emphasize.booktitle}
+{ edition empty$
+ { booktitle emphasize }
+ { booktitle empty$
+ { booktitle emphasize }
+ { editor empty$ % spw - extra period an error if book has a editor TODO: if it has a volume, too
+ { "{\em " booktitle * "\/} (" * edition * " ed.)" * "." *}
+ { "{\em " booktitle * "\/} (" * edition * " ed.)" * }
+ if$
+ }
+ if$
+ }
+ if$
+ }
+
+
+FUNCTION {tie.or.space.connect}
+{ duplicate$ text.length$ #3 <
+ { " " }
+ { " " }
+ if$
+ swap$ * *
+}
+
+FUNCTION {either.or.check}
+{ empty$
+ 'pop$
+ { "can't use both " swap$ * " fields in " * cite$ * warning$ }
+ if$
+}
+
+FUNCTION {format.bvolume}
+{ volume empty$
+ { "" }
+ { "volume" volume tie.or.space.connect
+ series empty$
+ 'skip$
+ { " of " * series emphasize * }
+ if$
+ "volume and number" number either.or.check
+ }
+ if$
+}
+
+FUNCTION {format.number.series}
+{ volume empty$
+ { number empty$
+ { series field.or.null }
+ { output.state mid.sentence =
+ { "number" }
+ { "Number" }
+ if$
+ number tie.or.space.connect
+ series empty$
+ { "there's a number but no series in " cite$ * warning$ }
+ { " in " * series * }
+ if$
+ }
+ if$
+ }
+ { "" }
+ if$
+}
+
+INTEGERS { multiresult }
+
+FUNCTION {multi.page.check}
+{ 't :=
+ #0 'multiresult :=
+ { multiresult not
+ t empty$ not
+ and
+ }
+ { t #1 #1 substring$
+ duplicate$ "-" =
+ swap$ duplicate$ "," =
+ swap$ "+" =
+ or or
+ { #1 'multiresult := }
+ { t #2 global.max$ substring$ 't := }
+ if$
+ }
+ while$
+ multiresult
+}
+
+FUNCTION {format.pages}
+{ pages empty$
+ { "" }
+ { pages multi.page.check
+ { "" pages n.dashify tie.or.space.connect } % gnp - removed ()
+ { "" pages tie.or.space.connect }
+ if$
+ }
+ if$
+}
+
+% By Young (and Spencer)
+% GNP - fixed bugs with missing volume, number, and/or pages
+%
+% Format journal, volume, number, pages for article types.
+%
+FUNCTION {format.jour.vol}
+{ journal empty$
+ { "no journal in " cite$ * warning$
+ "" }
+ { journal emphasize.space }
+ if$
+ % number empty$
+ % { volume empty$
+ volume empty$
+ { "no number and no volume in " cite$ * warning$
+ "" * }
+ { " " * Volume * }
+ if$
+ % }
+ % { volume empty$
+ % {"no volume for " cite$ * warning$
+ % ":" * number * }
+ % { "~" *
+ % volume
+ % ":" * number * * }
+ % if$
+ % }
+ % if$
+ pages empty$
+ {"page numbers missing in " cite$ * warning$
+ "" * } % gnp - place a null string on the stack for output
+ { duplicate$ empty$
+ { pop$ format.pages }
+ { ":" * pages n.dashify * } % gnp - removed pp. for articles
+ if$
+ }
+ if$
+}
+
+FUNCTION {format.chapter.pages}
+{ chapter empty$
+ 'format.pages
+ { type empty$
+ { "chapter" }
+ { type "t" change.case$ }
+ if$
+ chapter tie.or.space.connect
+ pages empty$
+ {"page numbers missing in " cite$ * warning$} % gnp - added check
+ { ", " * format.pages * }
+ if$
+ }
+ if$
+}
+
+FUNCTION {format.in.ed.booktitle}
+{ booktitle empty$
+ { "" }
+ { editor empty$
+ { "In " format.emphasize.booktitle * }
+ { "In " format.emphasize.booktitle * ", edited by " * format.editors.fml * }
+ if$
+ }
+ if$
+}
+
+FUNCTION {format.thesis.type}
+{ type empty$
+ 'skip$
+ { pop$
+ type "t" change.case$
+ }
+ if$
+}
+
+FUNCTION {format.tr.number}
+{ type empty$
+ { "Technical Report" }
+ 'type
+ if$
+ number empty$
+ { "t" change.case$ }
+ { number tie.or.space.connect }
+ if$
+}
+
+FUNCTION {format.article.crossref}
+{ "See"
+ "\citeN{" * crossref * "}" *
+}
+
+FUNCTION {format.crossref.editor}
+{ editor #1 "{vv~}{ll}" format.name$
+ editor num.names$ duplicate$
+ #2 >
+ { pop$ " et~al." * }
+ { #2 <
+ 'skip$
+ { editor #2 "{ff }{vv }{ll}{ jj}" format.name$ "others" =
+ { " et~al." * }
+ { " and " * editor #2 "{vv~}{ll}" format.name$ * }
+ if$
+ }
+ if$
+ }
+ if$
+}
+
+FUNCTION {format.book.crossref}
+{ volume empty$
+ { "empty volume in " cite$ * "'s crossref of " * crossref * warning$
+ "In "
+ }
+ { "volume" volume tie.or.space.connect
+ " of " *
+ }
+ if$
+ editor empty$
+ editor field.or.null author field.or.null =
+ or
+ { key empty$
+ { series empty$
+ { "need editor, key, or series for " cite$ * " to crossref " *
+ crossref * warning$
+ "" *
+ }
+ { "{\em " * series * "\/}" * }
+ if$
+ }
+ { key * }
+ if$
+ }
+ { format.crossref.editor * }
+ if$
+ " \citeN{" * crossref * "}" *
+}
+
+FUNCTION {format.incoll.inproc.crossref}
+{ "See"
+ " \citeN{" * crossref * "}" *
+}
+
+% format.lab.names:
+%
+% determines "short" names for the abbreviated author information.
+% "Long" labels are created in calc.label, using the routine my.full.label
+% to format author and editor fields.
+%
+% There are 4 cases for labels. (n=3 in the example)
+% a) one author Foo
+% b) one to n Foo, Bar and Baz
+% c) use of "and others" Foo, Bar et al.
+% d) more than n Foo et al.
+%
+FUNCTION {format.lab.names}
+{ 's :=
+ s num.names$ 'numnames :=
+ numnames #2 > % change number to number of others allowed before
+ % forcing "et al".
+ { s #1 "{vv~}{ll}" format.name$ " et~al." * }
+ {
+ numnames #1 - 'namesleft :=
+ #2 'nameptr :=
+ s #1 "{vv~}{ll}" format.name$
+ { namesleft #0 > }
+ { nameptr numnames =
+ { s nameptr "{ff }{vv }{ll}{ jj}" format.name$ "others" =
+ { " et~al." * }
+ { " and " * s nameptr "{vv~}{ll}" format.name$ * }
+ if$
+ }
+ { ", " * s nameptr "{vv~}{ll}" format.name$ * }
+ if$
+ nameptr #1 + 'nameptr :=
+ namesleft #1 - 'namesleft :=
+ }
+ while$
+ }
+ if$
+}
+
+FUNCTION {author.key.label}
+{ author empty$
+ { key empty$
+ { "no key, author in " cite$ * warning$
+ cite$ #1 #3 substring$ }
+ 'key
+ if$
+ }
+ { author format.lab.names }
+ if$
+}
+
+FUNCTION {editor.key.label}
+{ editor empty$
+ { key empty$
+ { "no key, editor in " cite$ * warning$
+ cite$ #1 #3 substring$ }
+ 'key
+ if$
+ }
+ { editor format.lab.names }
+ if$
+}
+
+FUNCTION {author.key.organization.label}
+%
+% added - gnp. Provide label formatting by organization if author is null.
+%
+{ author empty$
+ { organization empty$
+ { key empty$
+ { "no key, author or organization in " cite$ * warning$
+ cite$ #1 #3 substring$ }
+ 'key
+ if$
+ }
+ { organization }
+ if$
+ }
+ { author format.lab.names }
+ if$
+}
+
+FUNCTION {editor.key.organization.label}
+%
+% added - gnp. Provide label formatting by organization if editor is null.
+%
+{ editor empty$
+ { organization empty$
+ { key empty$
+ { "no key, editor or organization in " cite$ * warning$
+ cite$ #1 #3 substring$ }
+ 'key
+ if$
+ }
+ { organization }
+ if$
+ }
+ { editor format.lab.names }
+ if$
+}
+
+FUNCTION {author.editor.key.label}
+{ author empty$
+ { editor empty$
+ { key empty$
+ { "no key, author, or editor in " cite$ * warning$
+ cite$ #1 #3 substring$ }
+ 'key
+ if$
+ }
+ { editor format.lab.names }
+ if$
+ }
+ { author format.lab.names }
+ if$
+}
+
+FUNCTION {calc.label}
+%
+% Changed - GNP. See also author.organization.sort, editor.organization.sort
+% Form label for BibTeX entry. The classification of which fields are used
+% for which type of entry (book, inbook, etc.) are taken from alpha.bst.
+% The change here from newapa is to also include organization as a
+% citation label if author or editor is missing.
+%
+{ type$ "book" =
+ type$ "inbook" =
+ or
+ 'author.editor.key.label
+ { type$ "proceedings" =
+ 'editor.key.organization.label
+ { type$ "manual" =
+ 'author.key.organization.label
+ 'author.key.label
+ if$
+ }
+ if$
+ }
+ if$
+
+ author empty$ % generate the full label citation information.
+ { editor empty$
+ { organization empty$
+ { "no author, editor, or organization in " cite$ * warning$
+ "??" }
+ { organization }
+ if$
+ }
+ { editor my.full.label }
+ if$
+ }
+ { author my.full.label }
+ if$
+
+% leave label on the stack, to be popped when required.
+
+ "}{" * swap$ * "}{" *
+% year field.or.null purify$ #-1 #4 substring$ *
+%
+% save the year for sort processing afterwards (adding a, b, c, etc.)
+%
+ year field.or.null purify$ #-1 #4 substring$
+ 'label.year :=
+}
+
+FUNCTION {output.bibitem}
+{ newline$
+
+ "\bibitem[\protect\citeauthoryear{" write$
+ calc.label write$
+ sort.year write$
+ "}]{" write$
+
+ cite$ write$
+ "}" write$
+ newline$
+ ""
+ before.all 'output.state :=
+}
+
+FUNCTION {article}
+{ output.bibitem
+ format.authors
+ "author" output.check
+ author format.key output % added
+ output.year.check % added
+ new.block
+ format.title
+ "title" output.check
+ new.block
+ crossref missing$
+ { format.jour.vol output
+ }
+ { format.article.crossref output.nonnull
+ format.pages output
+ }
+ if$
+ new.block
+ note output
+ fin.entry
+}
+
+FUNCTION {book}
+{ output.bibitem
+ author empty$
+ { format.editors
+ "author and editor" output.check }
+ { format.authors
+ output.nonnull
+ crossref missing$
+ { "author and editor" editor either.or.check }
+ 'skip$
+ if$
+ }
+ if$
+ output.year.check % added
+ new.block
+ format.btitle
+ "title" output.check
+ crossref missing$
+ { format.bvolume output
+ new.block
+ format.number.series output
+ new.sentence
+ address output
+ publisher "publisher" output.check.colon
+ }
+ { new.block
+ format.book.crossref output.nonnull
+ }
+ if$
+ new.block
+ note output
+ fin.entry
+}
+
+FUNCTION {booklet}
+{ output.bibitem
+ format.authors output
+ author format.key output % added
+ output.year.check % added
+ new.block
+ format.title
+ "title" output.check
+ new.block
+ howpublished output
+ address output
+ new.block
+ note output
+ fin.entry
+}
+
+FUNCTION {inbook}
+{ output.bibitem
+ author empty$
+ { format.editors
+ "author and editor" output.check
+ }
+ { format.authors output.nonnull
+ crossref missing$
+ { "author and editor" editor either.or.check }
+ 'skip$
+ if$
+ }
+ if$
+ output.year.check % added
+ new.block
+ format.btitle
+ "title" output.check
+ crossref missing$
+ { format.bvolume output
+ format.chapter.pages
+ "chapter and pages" output.check
+ new.block
+ format.number.series output
+ new.sentence
+ address output
+ publisher
+ "publisher" output.check.colon
+ }
+ { format.chapter.pages "chapter and pages" output.check
+ new.block
+ format.book.crossref output.nonnull
+ }
+ if$
+ new.block
+ note output
+ fin.entry
+}
+
+FUNCTION {incollection}
+{ output.bibitem
+ format.authors
+ "author" output.check
+ author format.key output % added
+ output.year.check % added
+ new.block
+ format.title
+ "title" output.check
+ new.block
+ crossref missing$
+ { format.in.ed.booktitle
+ "booktitle" output.check
+ format.bvolume output
+ format.number.series output
+ format.chapter.pages output % gnp - was special.output.nonnull
+% left out comma before page numbers
+ new.sentence
+ address output
+ publisher "publisher" output.check.colon
+ }
+ { format.incoll.inproc.crossref
+ output.nonnull
+ format.chapter.pages output
+ }
+ if$
+ new.block
+ note output
+ fin.entry
+}
+
+FUNCTION {inproceedings}
+{ output.bibitem
+ format.authors
+ "author" output.check
+ author format.key output % added
+ output.year.check % added
+ new.block
+ format.title
+ "title" output.check
+ new.block
+ crossref missing$
+ { format.in.ed.booktitle
+ "booktitle" output.check
+ format.bvolume output
+ format.number.series output
+ address output
+ format.pages output
+ new.sentence
+ organization output
+ publisher output.colon
+ }
+ { format.incoll.inproc.crossref output.nonnull
+ format.pages output
+ }
+ if$
+ new.block
+ note output
+ fin.entry
+}
+
+FUNCTION {conference} { inproceedings }
+
+FUNCTION {manual}
+{ output.bibitem
+ author empty$
+ { editor empty$
+ { organization "organization" output.check
+ organization format.key output } % if all else fails, use key
+ { format.editors "author and editor" output.check }
+ if$
+ }
+ { format.authors output.nonnull }
+ if$
+ output.year.check % added
+ new.block
+ format.btitle
+ "title" output.check
+ organization address new.block.checkb
+% Reversed the order of "address" and "organization", added the ":".
+ address output
+ organization "organization" output.check.colon
+% address output
+% ":" output
+% organization output
+ new.block
+ note output
+ fin.entry
+}
+
+FUNCTION {mastersthesis}
+{ output.bibitem
+ format.authors
+ "author" output.check
+ author format.key output % added
+ output.year.check % added
+ new.block
+ format.title
+ "title" output.check
+ new.block
+ "Master's thesis" format.thesis.type output.nonnull
+ school "school" output.check
+ address output
+ new.block
+ note output
+ fin.entry
+}
+
+FUNCTION {misc}
+{ output.bibitem
+ format.authors output
+ author format.key output % added
+ output.year.check % added
+ title howpublished new.block.checkb
+ format.title output
+ new.block
+ howpublished output
+ new.block
+ note output
+ fin.entry
+}
+
+FUNCTION {phdthesis}
+{ output.bibitem
+ format.authors
+ "author" output.check
+ author format.key output % added
+ output.year.check % added
+ new.block
+ format.btitle
+ "title" output.check
+ new.block
+ "Ph.\ D. thesis" format.thesis.type output.nonnull
+ school "school" output.check
+ address output
+ new.block
+ note output
+ fin.entry
+}
+
+FUNCTION {proceedings}
+{ output.bibitem
+ editor empty$
+ { organization output
+ organization format.key output } % gnp - changed from author format.key
+ { format.editors output.nonnull }
+ if$
+% author format.key output % gnp - removed (should be either
+% editor or organization
+ output.year.check % added (newapa)
+ new.block
+ format.btitle
+ "title" output.check
+ format.bvolume output
+ format.number.series output
+ address output
+ new.sentence
+ organization output
+ publisher output.colon
+ new.block
+ note output
+ fin.entry
+}
+
+FUNCTION {techreport}
+{ output.bibitem
+ format.authors
+ "author" output.check
+ author format.key output % added
+ output.year.check % added
+ new.block
+ format.title
+ "title" output.check
+ new.block
+ format.tr.number output.nonnull
+ institution
+ "institution" output.check
+ address output
+ new.block
+ note output
+ fin.entry
+}
+
+FUNCTION {unpublished}
+{ output.bibitem
+ format.authors
+ "author" output.check
+ author format.key output % added
+ output.year.check % added
+ new.block
+ format.title
+ "title" output.check
+ new.block
+ note "note" output.check
+ fin.entry
+}
+
+FUNCTION {default.type} { misc }
+
+MACRO {jan} {"January"}
+
+MACRO {feb} {"February"}
+
+MACRO {mar} {"March"}
+
+MACRO {apr} {"April"}
+
+MACRO {may} {"May"}
+
+MACRO {jun} {"June"}
+
+MACRO {jul} {"July"}
+
+MACRO {aug} {"August"}
+
+MACRO {sep} {"September"}
+
+MACRO {oct} {"October"}
+
+MACRO {nov} {"November"}
+
+MACRO {dec} {"December"}
+
+MACRO {acmcs} {"ACM Computing Surveys"}
+
+MACRO {acta} {"Acta Informatica"}
+
+MACRO {ai} {"Artificial Intelligence"}
+
+MACRO {cacm} {"Communications of the ACM"}
+
+MACRO {ibmjrd} {"IBM Journal of Research and Development"}
+
+MACRO {ibmsj} {"IBM Systems Journal"}
+
+MACRO {ieeese} {"IEEE Transactions on Software Engineering"}
+
+MACRO {ieeetc} {"IEEE Transactions on Computers"}
+
+MACRO {ieeetcad}
+ {"IEEE Transactions on Computer-Aided Design of Integrated Circuits"}
+
+MACRO {ipl} {"Information Processing Letters"}
+
+MACRO {jacm} {"Journal of the ACM"}
+
+MACRO {jcss} {"Journal of Computer and System Sciences"}
+
+MACRO {scp} {"Science of Computer Programming"}
+
+MACRO {sicomp} {"SIAM Journal on Computing"}
+
+MACRO {tocs} {"ACM Transactions on Computer Systems"}
+
+MACRO {tods} {"ACM Transactions on Database Systems"}
+
+MACRO {tog} {"ACM Transactions on Graphics"}
+
+MACRO {toms} {"ACM Transactions on Mathematical Software"}
+
+MACRO {toois} {"ACM Transactions on Office Information Systems"}
+
+MACRO {toplas} {"ACM Transactions on Programming Languages and Systems"}
+
+MACRO {tcs} {"Theoretical Computer Science"}
+
+READ
+
+FUNCTION {sortify}
+{ purify$
+ "l" change.case$
+}
+
+INTEGERS { len }
+
+FUNCTION {chop.word}
+{ 's :=
+ 'len :=
+ s #1 len substring$ =
+ { s len #1 + global.max$ substring$ }
+ 's
+ if$
+}
+
+
+
+FUNCTION {sort.format.names}
+{ 's :=
+ #1 'nameptr :=
+ ""
+ s num.names$ 'numnames :=
+ numnames 'namesleft :=
+ { namesleft #0 > }
+ { nameptr #1 >
+ { " " * }
+ 'skip$
+ if$
+ s nameptr "{vv{ } }{ll{ }}{ f{ }}{ jj{ }}" format.name$ 't :=
+ nameptr numnames = t "others" = and
+ { " et~al" * }
+ { t sortify * }
+ if$
+ nameptr #1 + 'nameptr :=
+ namesleft #1 - 'namesleft :=
+ }
+ while$
+}
+
+FUNCTION {sort.format.title}
+{ 't :=
+ "A " #2
+ "An " #3
+ "The " #4 t chop.word
+ chop.word
+ chop.word
+ sortify
+ #1 global.max$ substring$
+}
+
+FUNCTION {author.sort}
+{ author empty$
+ { key empty$
+ { "to sort, need author or key in " cite$ * warning$
+ "" }
+ { key sortify }
+ if$
+ }
+ { author sort.format.names }
+ if$
+}
+
+FUNCTION {editor.sort}
+{ editor empty$
+ { key empty$
+ { "to sort, need editor or key in " cite$ * warning$
+ ""
+ }
+ { key sortify }
+ if$
+ }
+ { editor sort.format.names }
+ if$
+}
+
+FUNCTION {author.editor.sort}
+{ author empty$
+ { "missing author in " cite$ * warning$
+ editor empty$
+ { key empty$
+ { "to sort, need author, editor, or key in " cite$ * warning$
+ ""
+ }
+ { key sortify }
+ if$
+ }
+ { editor sort.format.names }
+ if$
+ }
+ { author sort.format.names }
+ if$
+}
+
+FUNCTION {author.organization.sort}
+%
+% added - GNP. Stack author or organization for sorting (from alpha.bst).
+% Unlike alpha.bst, we need entire names, not abbreviations
+%
+{ author empty$
+ { organization empty$
+ { key empty$
+ { "to sort, need author, organization, or key in " cite$ * warning$
+ ""
+ }
+ { key sortify }
+ if$
+ }
+ { organization sortify }
+ if$
+ }
+ { author sort.format.names }
+ if$
+}
+
+FUNCTION {editor.organization.sort}
+%
+% added - GNP. Stack editor or organization for sorting (from alpha.bst).
+% Unlike alpha.bst, we need entire names, not abbreviations
+%
+{ editor empty$
+ { organization empty$
+ { key empty$
+ { "to sort, need editor, organization, or key in " cite$ * warning$
+ ""
+ }
+ { key sortify }
+ if$
+ }
+ { organization sortify }
+ if$
+ }
+ { editor sort.format.names }
+ if$
+}
+
+FUNCTION {presort}
+%
+% Presort creates the bibentry's label via a call to calc.label, and then
+% sorts the entries based on entry type. Chicago.bst adds support for
+% including organizations as the sort key; the following is stolen from
+% alpha.bst.
+%
+{ calc.label sortify % recalculate bibitem label
+ year field.or.null purify$ #-1 #4 substring$ * % add year
+ " "
+ *
+ type$ "book" =
+ type$ "inbook" =
+ or
+ 'author.editor.sort
+ { type$ "proceedings" =
+ 'editor.organization.sort
+ { type$ "manual" =
+ 'author.organization.sort
+ 'author.sort
+ if$
+ }
+ if$
+ }
+ if$
+ #1 entry.max$ substring$ % added for newapa
+ 'sort.label := % added for newapa
+ sort.label % added for newapa
+ *
+ " "
+ *
+ title field.or.null
+ sort.format.title
+ *
+ #1 entry.max$ substring$
+ 'sort.key$ :=
+}
+
+ITERATE {presort}
+
+SORT % by label, year, author/editor, title
+
+STRINGS { last.label next.extra }
+
+INTEGERS { last.extra.num }
+
+FUNCTION {initialize.extra.label.stuff}
+{ #0 int.to.chr$ 'last.label :=
+ "" 'next.extra :=
+ #0 'last.extra.num :=
+}
+
+FUNCTION {forward.pass}
+%
+% Pass through all entries, comparing current entry to last one.
+% Need to concatenate year to the stack (done by calc.label) to determine
+% if two entries are the same (see presort)
+%
+{ last.label
+ calc.label year field.or.null purify$ #-1 #4 substring$ * % add year
+ #1 entry.max$ substring$ = % are they equal?
+ { last.extra.num #1 + 'last.extra.num :=
+ last.extra.num int.to.chr$ 'extra.label :=
+ }
+ { "a" chr.to.int$ 'last.extra.num :=
+ "" 'extra.label :=
+ calc.label year field.or.null purify$ #-1 #4 substring$ * % add year
+ #1 entry.max$ substring$ 'last.label := % assign to last.label
+ }
+ if$
+}
+
+FUNCTION {reverse.pass}
+{ next.extra "b" =
+ { "a" 'extra.label := }
+ 'skip$
+ if$
+ label.year extra.label * 'sort.year :=
+ extra.label 'next.extra :=
+}
+
+EXECUTE {initialize.extra.label.stuff}
+
+ITERATE {forward.pass}
+
+REVERSE {reverse.pass}
+
+FUNCTION {bib.sort.order}
+{ sort.label
+ " "
+ *
+ year field.or.null sortify
+ *
+ " "
+ *
+ title field.or.null
+ sort.format.title
+ *
+ #1 entry.max$ substring$
+ 'sort.key$ :=
+}
+
+ITERATE {bib.sort.order}
+
+SORT % by sort.label, year, title --- giving final bib. order.
+
+FUNCTION {begin.bib}
+
+{ preamble$ empty$
+ 'skip$
+ { preamble$ write$ newline$ }
+ if$
+ "\begin{thebibliography}{}" write$ newline$
+}
+
+
+EXECUTE {begin.bib}
+
+EXECUTE {init.state.consts}
+
+ITERATE {call.type$}
+
+FUNCTION {end.bib}
+{ newline$
+ "\end{thebibliography}" write$ newline$
+}
+
+EXECUTE {end.bib}
+
diff --git a/archive/texmf/tex/latex/spwdnd/spwdnd.cls b/archive/texmf/tex/latex/spwdnd/spwdnd.cls
new file mode 100644
index 00000000..ac1b817c
--- /dev/null
+++ b/archive/texmf/tex/latex/spwdnd/spwdnd.cls
@@ -0,0 +1,46 @@
+\ProvidesClass{spwdnd}
+\DeclareOption*{\PassOptionsToClass{\CurrentOption}{article}}
+\ProcessOptions
+\LoadClass[12pt,a4paper,twoside,twocolumn]{article}
+
+\setlength{\columnsep}{1cm}
+
+\RequirePackage[protrusion=true,expansion=true]{microtype}
+\RequirePackage{geometry}
+\geometry{includehead,rmargin=1.5in,lmargin=.4in,tmargin=.7in,bmargin=1.6in,headheight=.36in,footskip=1.96cm}
+\RequirePackage{fancyhdr}
+\RequirePackage{ifthen}
+\RequirePackage{amssymb}
+\RequirePackage[charter]{mathdesign}
+\RequirePackage[scaled]{berasans}
+\RequirePackage[sf,bf,compact,medium]{titlesec}
+\RequirePackage{textcomp}
+\RequirePackage[onehalf]{spwtitle}
+\RequirePackage{ellipsis}
+\RequirePackage{stmaryrd}
+\RequirePackage{amsmath}
+
+\RequirePackage{framed}
+\RequirePackage{color}
+\definecolor{shadecolor}{gray}{0.8}
+
+%% allow lots and lots of images
+\RequirePackage{etex}
+\reserveinserts{18}
+\RequirePackage{morefloats}
+
+\pagestyle{fancy}
+
+\fancyhf{}
+\fancyhead[EL]{\hskip 0.75in \nouppercase{\leftmark}}
+\fancyhead[OR]{\nouppercase{\leftmark} \hskip 0.7in}
+\fancyfoot[EL]{\hskip -1.1cm \LARGE\textsf{\thepage}}
+\fancyfoot[OR]{\LARGE\textsf{\thepage} \hskip -1.35cm}
+\renewcommand{\headrulewidth}{0pt}
+
+\fancypagestyle{plain}{ %
+ \fancyhf{} % remove everything
+ \fancyhead[OR]{\nouppercase{\leftmark} \hskip 0.7in}
+ \fancyfoot[OR]{\LARGE\textsf{\thepage} \hskip -1.204cm}
+\renewcommand{\headrulewidth}{0pt} % remove lines as well
+\renewcommand{\footrulewidth}{0pt}}
diff --git a/archive/texmf/tex/latex/spwdoc/spwdoc.cls b/archive/texmf/tex/latex/spwdoc/spwdoc.cls
new file mode 100644
index 00000000..1c365f5e
--- /dev/null
+++ b/archive/texmf/tex/latex/spwdoc/spwdoc.cls
@@ -0,0 +1,126 @@
+\ProvidesClass{spwdoc}
+\DeclareOption*{\PassOptionsToClass{\CurrentOption}{article}}
+\ProcessOptions
+\LoadClass[12pt,a4paper]{article}
+
+\RequirePackage[protrusion=true,expansion=true]{microtype}
+\RequirePackage{geometry}
+\geometry{includeheadfoot,lmargin=1in,rmargin=1in,tmargin=.7in,bmargin=.7in}
+\RequirePackage{fancyhdr}
+\RequirePackage{ifthen}
+\RequirePackage{amssymb}
+%\RequirePackage[charter]{mathdesign}
+%\RequirePackage[scaled]{berasans}
+\RequirePackage[sf,bf,compact,medium]{titlesec}
+\RequirePackage{textcomp}
+\RequirePackage[onehalf]{spwtitle}
+%\RequirePackage{ellipsis}
+\RequirePackage{stmaryrd}
+\RequirePackage{amsmath}
+
+%%% BEGIN BIBTEX STUFF
+
+\RequirePackage[%
+authorformat=smallcaps,%
+titleformat=italic,%
+titleformat=commasep,%
+commabeforerest,%
+ibidem=nostrict,% % used to be strictdoublepage
+citefull=first,%
+oxford,%
+pages=format,% pages=test doesn't seem to work but is otherwise better to use
+idem,%
+super,%
+opcit,%
+% human,%
+bibformat=ibidem
+]{jurabib}
+\makeatletter
+\jb@dotfalse
+\makeatother
+
+\AddTo\bibsenglish{%
+ \def\edbyname{ed.}%
+ \def\editorname{(ed.)}%
+ \def\editorsname{(eds.)}%
+ \def\incollinname{in}%
+ \def\inname{in}%
+ \def\transby{$\!\!$}%
+ \def\bibchaptername{ch.}%
+ \def\Bibchaptername{Ch.}%
+ \def\Volumename{Vol.}%
+ \def\volumename{vol.}%
+}
+
+\bibliographystyle{jox}
+
+\renewcommand{\jbbtasep}{ \& }
+\renewcommand{\jbbstasep}{ \& }
+\renewcommand{\jbbtesep}{ \& }
+\renewcommand{\jbbstesep}{ \& }
+\renewcommand{\bibbtasep}{ \& }
+\renewcommand{\bibbstasep}{ \& }
+\renewcommand{\bibbtesep}{ \& }
+\renewcommand{\bibbstesep}{ \& }
+
+% to use this put this at end of essay: \nobibliography{/home/swhitton/doc/swhittonfhs}
+
+%%% END BIBTEX STUFF
+
+%%% BEGIN JURABIB HACKING
+
+% want translator's name in smallcaps
+% much commenting out here to fix some pdflatex errors, and since I
+% don't do anything in German
+\DeclareRobustCommand{\translator}[3]{%
+ \unskip\unskip\space%
+ \ifthenelse{\equal{#1}{}}{% translator missing
+ \ifjboxford\transfrom\else\Transfrom\fi\jbflanguage{#2}%
+ }{% translator given
+ % \ifthenelse{\equal{\bbl@main@language}{german}}{% main language german
+ % \ifthenelse{\equal{#2}{}}{% language missing
+ % {\"U}bers. \transby{} #1%
+ % }{% language given
+ % \ifjboxford\transfrom\else\Transfrom\fi\jbflanguage{#2} ^^fcbers. \transby{} #1%
+ % }%
+ % }{%
+ \ifthenelse{\equal{#2}{}}{% language missing
+ \trans{} \transby{} \textsc{#1}%
+ }{% language given
+ \trans{} \transfrom{}\jbflanguage{#2} \transby{} \textsc{#1}%
+ }%
+ % }%
+ }% dot or not?
+ \ifjbchicago
+ \ifthenelse{\equal{#3}{1}}{% \(type=incollection \and editor given\) \or type=article
+ \bibatsep{}%
+ }{}%
+ \else
+ \ifthenelse{\equal{#3}{1}\and\not\boolean{jboxford}}{%
+ \bibatsep{}%
+ }{}%
+ \fi
+}%
+
+%%% END JURABIB HACKING
+
+\makeatletter
+\newcommand*{\textoverline}[1]{$\overline{\hbox{#1}}\m@th$}
+\makeatother
+
+% from http://stackoverflow.com/questions/2522173/how-to-get-the-value-of-the-document-title-in-latex
+\makeatletter\def\title#1{\gdef\@title{#1}\gdef\THETITLE{#1}}\makeatother
+\makeatletter\def\author#1{\gdef\@author{#1}\gdef\THEAUTHOR{#1}}\makeatother
+\makeatletter\def\date#1{\gdef\@date{#1}\gdef\THEDATE{#1}}\makeatother
+
+\pagestyle{fancy}
+
+\fancyhead{}
+\fancyhead[R]{\textsf{\thepage}}
+\fancyfoot{}
+\renewcommand{\headrulewidth}{0pt}
+
+\fancypagestyle{plain}{ %
+\fancyhf{} % remove everything
+\renewcommand{\headrulewidth}{0pt} % remove lines as well
+\renewcommand{\footrulewidth}{0pt}}
diff --git a/archive/texmf/tex/latex/spwessay/spwessay.cls b/archive/texmf/tex/latex/spwessay/spwessay.cls
new file mode 100644
index 00000000..a35ed680
--- /dev/null
+++ b/archive/texmf/tex/latex/spwessay/spwessay.cls
@@ -0,0 +1,186 @@
+\ProvidesClass{spwessay}
+\DeclareOption*{\PassOptionsToClass{\CurrentOption}{article}}
+\ProcessOptions
+\LoadClass[twoside,a4paper]{article}
+%\setlength{\evensidemargin}{63pt}
+%\setlength{\oddsidemargin}{\evensidemargin}
+\usepackage[margin=1.75in]{geometry}
+
+\RequirePackage[protrusion=true,expansion=true]{microtype}
+\RequirePackage{fancyhdr}
+\RequirePackage{ifthen}
+\RequirePackage{amssymb} % for $\blacksquare$ at end
+
+%\RequirePackage[charter]{mathdesign}
+\RequirePackage{pslatex}
+%\RequirePackage[scaled]{berasans}
+
+\RequirePackage[sf,bf,compact,medium]{titlesec}
+\RequirePackage{textcomp}
+\RequirePackage[onehalf]{spwtitle}
+\RequirePackage[norule,splitrule,stable,multiple]{footmisc}
+\RequirePackage[utf8]{inputenc}
+\RequirePackage{ellipsis}
+\RequirePackage{stmaryrd}
+\RequirePackage{amsmath}
+
+%%% BEGIN BIBTEX STUFFn
+
+\RequirePackage[%
+%authorformat=smallcaps,%
+titleformat=italic,%
+titleformat=commasep,%
+commabeforerest,%
+ibidem=nostrict,% % used to be strictdoublepage
+citefull=first,%
+oxford,%
+pages=format,% pages=test doesn't seem to work but is otherwise better to use
+%idem,%
+super,%
+opcit,%
+% human,%
+bibformat=ibidem
+]{jurabib}
+\makeatletter
+\jb@dotfalse
+\makeatother
+
+\AddTo\bibsenglish{%
+ \def\edbyname{ed.}%
+ \def\editorname{(ed.)}%
+ \def\editorsname{(eds.)}%
+ \def\incollinname{in}%
+ \def\inname{in}%
+ \def\transby{$\!\!$}%
+ \def\bibchaptername{ch.}%
+ \def\Bibchaptername{Ch.}%
+ \def\Volumename{Vol.}%
+ \def\volumename{vol.}%
+}
+
+\bibliographystyle{jox}
+
+\renewcommand{\jbbtasep}{ \& }
+\renewcommand{\jbbstasep}{ \& }
+\renewcommand{\jbbtesep}{ \& }
+\renewcommand{\jbbstesep}{ \& }
+\renewcommand{\bibbtasep}{ \& }
+\renewcommand{\bibbstasep}{ \& }
+\renewcommand{\bibbtesep}{ \& }
+\renewcommand{\bibbstesep}{ \& }
+
+% to use this put this at end of essay: \nobibliography{/home/swhitton/doc/swhittonfhs}
+
+%%% END BIBTEX STUFF
+
+%%% BEGIN BIBLIOGRAPHY HACKING
+
+\makeatletter
+\renewenvironment{thebibliography}[1]
+{\subsection*{\bibname}% <-- this line was changed from \chapter* to \section*
+ \@mkboth{\MakeUppercase\bibname}{\MakeUppercase\bibname}%
+ \list{\@biblabel{\@arabic\c@enumiv}}%
+ {\settowidth\labelwidth{\@biblabel{#1}}%
+ \leftmargin\labelwidth
+ \advance\leftmargin\labelsep
+ \@openbib@code
+ \usecounter{enumiv}%
+ \let\p@enumiv\@empty
+ \renewcommand\theenumiv{\@arabic\c@enumiv}}%
+ \sloppy
+ \clubpenalty4000
+ \@clubpenalty \clubpenalty
+ \widowpenalty4000%
+ \sfcode`\.\@m}
+{\def\@noitemerr
+ {\@latex@warning{Empty `thebibliography' environment}}%
+ \endlist}
+\makeatother
+
+\newcommand{\bibname}{Bibliography}
+
+%%% END BIBLIOGRAPHY HACKING
+
+%%% BEGIN JURABIB HACKING
+
+% % want translator's name in smallcaps
+% % much commenting out here to fix some pdflatex errors, and since I
+% % don't do anything in German
+% \DeclareRobustCommand{\translator}[3]{%
+% \unskip\unskip\space%
+% \ifthenelse{\equal{#1}{}}{% translator missing
+% \ifjboxford\transfrom\else\Transfrom\fi\jbflanguage{#2}%
+% }{% translator given
+% % \ifthenelse{\equal{\bbl@main@language}{german}}{% main language german
+% % \ifthenelse{\equal{#2}{}}{% language missing
+% % {\"U}bers. \transby{} #1%
+% % }{% language given
+% % \ifjboxford\transfrom\else\Transfrom\fi\jbflanguage{#2} ^^fcbers. \transby{} #1%
+% % }%
+% % }{%
+% \ifthenelse{\equal{#2}{}}{% language missing
+% \trans{} \transby{} \textsc{#1}%
+% }{% language given
+% \trans{} \transfrom{}\jbflanguage{#2} \transby{} \textsc{#1}%
+% }%
+% % }%
+% }% dot or not?
+% \ifjbchicago
+% \ifthenelse{\equal{#3}{1}}{% \(type=incollection \and editor given\) \or type=article
+% \bibatsep{}%
+% }{}%
+% \else
+% \ifthenelse{\equal{#3}{1}\and\not\boolean{jboxford}}{%
+% \bibatsep{}%
+% }{}%
+% \fi
+% }%
+
+%%% END JURABIB HACKING
+
+\makeatletter
+\newcommand*{\textoverline}[1]{$\overline{\hbox{#1}}\m@th$}
+\makeatother
+
+% from http://stackoverflow.com/questions/2522173/how-to-get-the-value-of-the-document-title-in-latex
+\makeatletter\def\title#1{\gdef\@title{#1}\gdef\THETITLE{#1}}\makeatother
+\makeatletter\def\author#1{\gdef\@author{#1}\gdef\THEAUTHOR{#1}}\makeatother
+\makeatletter\def\date#1{\gdef\@date{#1}\gdef\THEDATE{#1}}\makeatother
+
+\pagestyle{fancy}
+
+\fancyhead{}
+\fancyhead[LE]{\textsf{\thepage} \hfill \textsl{\textsf{\THEAUTHOR}} \hfill}
+\fancyhead[RO]{\hfill \textsf{\THETITLE} \hfill \textsf{\thepage}}
+\fancyfoot{}
+%\fancyfoot[LE,RO]{\textsf{\thepage}}
+
+\renewcommand{\headrulewidth}{0pt}
+
+\fancypagestyle{plain}{ %
+\fancyhf{} % remove everything
+\renewcommand{\headrulewidth}{0pt} % remove lines as well
+\renewcommand{\footrulewidth}{0pt}}
+
+%\renewcommand{\section}[1]{\addtocounter{section}{1}\textbf{\textsection \thesection.} $\quad$\ifthenelse{\equal{#1}{}}{}{ \textsc{#1} $\quad$}}
+
+% following is the above with no blank line - as it should be, rather
+% than requiring one to not end a para
+% http://tex.stackexchange.com/questions/8657/prevent-a-paragraph-break-after-a-section-heading
+\renewcommand{\section}[1]{%
+ \par
+ \ifthenelse{\equal{\thesection}{0}}{}{\bigskip}
+ \pagebreak[2]%
+ \refstepcounter{section}%
+ \everypar={%
+ {\setbox0=\lastbox}% Remove the indentation
+% \addcontentsline{toc}{section}{%
+% {\protect\makebox[0.3in][r]{\textsection \thesection.} \hspace*{3pt}}}%
+% \textbf{\textsection \thesection.} $\quad$% old version with \textsection symbol
+ \thesection. $\quad$%
+ \ifthenelse{\equal{#1}{}}{}{ \textsl{#1} $\quad$}% textsl here used to be textsc
+% \textsc{\thesubsection\space\space{#1} }%
+ \everypar={}%
+ }%
+ \ignorespaces
+}
diff --git a/archive/texmf/tex/latex/spworg/spworg.sty b/archive/texmf/tex/latex/spworg/spworg.sty
new file mode 100644
index 00000000..3117bd5a
--- /dev/null
+++ b/archive/texmf/tex/latex/spworg/spworg.sty
@@ -0,0 +1,50 @@
+\ProvidesPackage{spworg}
+
+\usepackage[protrusion=true,expansion=true]{microtype}
+\usepackage{multicol}
+\usepackage[a4paper]{geometry}
+\geometry{a4paper,verbose,includehead,tmargin=30pt,headsep=15pt,bmargin=50pt,lmargin=60pt,rmargin=80pt}
+\usepackage{fancyhdr} % this has to go after the geometry or the header appears too short
+%\pagestyle{fancy}
+%\usepackage{spwmaths}
+\usepackage[charter]{mathdesign}
+\usepackage[scaled]{berasans}
+\usepackage[sf,bf,compact,medium]{titlesec}
+%\setlength{\parindent}{0pt} % not sure why this was here and it was
+% meaning no indent on top level - be willing to uncomment
+
+\setlength{\headheight}{15pt}
+
+\pagestyle{fancy}
+\renewcommand{\chaptermark}[1]{\markboth{#1}{}}
+\renewcommand{\sectionmark}[1]{\markright{#1}{}}
+
+\usepackage[onehalf]{spwtitle}
+
+\fancyhead{}
+\fancyhead[LE,RO]{\textsf{\thepage}}
+\makeatletter\fancyhead[CE]{\textsc{\nouppercase{\@title}}}\makeatother
+\fancyhead[CO]{\textsc{\nouppercase{\rightmark}}}
+\fancyfoot{}
+
+\fancypagestyle{plain}{ %
+\fancyhf{} % remove everything
+\renewcommand{\headrulewidth}{0pt} % remove lines as well
+\renewcommand{\footrulewidth}{0pt}}
+
+\setlength{\columnsep}{25pt}
+\setlength{\columnseprule}{.5pt}
+
+\newenvironment{lowitemize}{\begin{list}{$\quad\boldsymbol{\star}\;$}{%
+\setlength{\itemsep}{0pt}%
+\setlength{\parsep}{3pt}%
+\setlength{\leftmargin}{20pt}%
+}}{\end{list}}
+\renewenvironment{itemize}{\begin{list}{--}{%
+\setlength{\itemsep}{0pt}%
+\setlength{\parsep}{3pt}%
+\setlength{\leftmargin}{40pt}%
+}}{\end{list}}
+
+\renewcommand{\subsubsection}[1]{\par\bigskip\textbf{#1}\bigskip\par}
+%\renewcommand{\paragraph}[1]{\par\bigskip\textsc{#1}\bigskip\par}
diff --git a/archive/texmf/tex/latex/spwoutline/spwoutline.cls b/archive/texmf/tex/latex/spwoutline/spwoutline.cls
new file mode 100644
index 00000000..cdf9a807
--- /dev/null
+++ b/archive/texmf/tex/latex/spwoutline/spwoutline.cls
@@ -0,0 +1,172 @@
+% spwoutline.cls - export Org-mode outlines to LaTeX in an indented hierarchy
+% Copyright (C) 2010-2012 Sean Whitton
+
+% This program is free software; you can redistribute it and/or
+% modify it under the terms of the GNU General Public License
+% as published by the Free Software Foundation; either version 2
+% of the License, or (at your option) any later version.
+
+% This program is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+% GNU General Public License for more details.
+
+% You should have received a copy of the GNU General Public License
+% along with this program; if not, write to the Free Software
+% Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+
+% Org-mode config to make use of this class:
+% (add-to-list 'org-export-latex-classes
+% '("spwoutline"
+% "\\documentclass{spwoutline}"
+% ("\\section{%s}" . "\\section*{%s}")
+% ("\\subsection{%s}" . "\\subsection*{%s}")
+% ("\\subsubsection{%s}" . "\\subsubsection*{%s}")))
+% (setq org-export-latex-low-levels '("\\begin{lowitemize}\\setlength{\\parindent}{2em}" "\\end{lowitemize}" "\\item \\textbf{%s}\\indent %s")) ; four slashes used to be a \\par
+
+% ; used after things like e.g. to prevent a double space
+% (setq org-entities-user '(("space" "\\ " nil " " " " " " " ")))
+
+\ProvidesClass{spwoutline}
+\DeclareOption*{\PassOptionsToClass{\CurrentOption}{article}}
+\ProcessOptions
+\LoadClass[a4paper]{article}
+
+\RequirePackage[protrusion=true,expansion=true]{microtype}
+\RequirePackage{multicol}
+\RequirePackage[a4paper]{geometry}
+\geometry{a4paper,verbose,includehead,tmargin=30pt,headsep=15pt,bmargin=50pt,lmargin=60pt,rmargin=80pt}
+\RequirePackage{fancyhdr} % this has to go after the geometry or the header appears too short
+%\pagestyle{fancy}
+%\RequirePackage{spwmaths}
+\RequirePackage[charter]{mathdesign}
+\RequirePackage[scaled]{berasans}
+\RequirePackage[sf,bf,compact,medium]{titlesec}
+%\setlength{\parindent}{0pt} % not sure why this was here and it was
+% meaning no indent on top level - be willing to uncomment
+\RequirePackage{ellipsis}
+\RequirePackage{amsbsy} % boldsymbol
+\RequirePackage[norule,splitrule,stable,multiple]{footmisc}
+
+%%% BEGIN BIBTEX STUFF
+
+\RequirePackage[%
+authorformat=smallcaps,%
+titleformat=italic,%
+titleformat=commasep,%
+commabeforerest,%
+ibidem=strictdoublepage,%
+citefull=first,%
+oxford,%
+pages=format,% pages=test doesn't seem to work but is otherwise better to use
+idem,%
+super,%
+opcit,%
+% human,%
+bibformat=ibidem
+]{jurabib}
+\makeatletter
+\jb@dotfalse
+\makeatother
+
+\AddTo\bibsenglish{%
+ \def\edbyname{ed.}%
+ \def\editorname{(ed.)}%
+ \def\editorsname{(eds.)}%
+ \def\incollinname{in}%
+ \def\inname{in}%
+ \def\transby{$\!\!$}%
+ \def\bibchaptername{ch.}%
+ \def\Bibchaptername{Ch.}%
+ \def\Volumename{Vol.}%
+ \def\volumename{vol.}%
+}
+
+\bibliographystyle{jox}
+
+\renewcommand{\jbbtasep}{ \& }
+\renewcommand{\jbbstasep}{ \& }
+\renewcommand{\jbbtesep}{ \& }
+\renewcommand{\jbbstesep}{ \& }
+\renewcommand{\bibbtasep}{ \& }
+\renewcommand{\bibbstasep}{ \& }
+\renewcommand{\bibbtesep}{ \& }
+\renewcommand{\bibbstesep}{ \& }
+
+% to use this put this at end of essay: \nobibliography{/home/swhitton/doc/swhittonfhs}
+
+%%% END BIBTEX STUFF
+
+%%% BEGIN JURABIB HACKING
+
+% want translator's name in smallcaps
+% much commenting out here to fix some pdflatex errors, and since I
+% don't do anything in German
+\DeclareRobustCommand{\translator}[3]{%
+ \unskip\unskip\space%
+ \ifthenelse{\equal{#1}{}}{% translator missing
+ \ifjboxford\transfrom\else\Transfrom\fi\jbflanguage{#2}%
+ }{% translator given
+ % \ifthenelse{\equal{\bbl@main@language}{german}}{% main language german
+ % \ifthenelse{\equal{#2}{}}{% language missing
+ % {\"U}bers. \transby{} #1%
+ % }{% language given
+ % \ifjboxford\transfrom\else\Transfrom\fi\jbflanguage{#2} ^^fcbers. \transby{} #1%
+ % }%
+ % }{%
+ \ifthenelse{\equal{#2}{}}{% language missing
+ \trans{} \transby{} \textsc{#1}%
+ }{% language given
+ \trans{} \transfrom{}\jbflanguage{#2} \transby{} \textsc{#1}%
+ }%
+ % }%
+ }% dot or not?
+ \ifjbchicago
+ \ifthenelse{\equal{#3}{1}}{% \(type=incollection \and editor given\) \or type=article
+ \bibatsep{}%
+ }{}%
+ \else
+ \ifthenelse{\equal{#3}{1}\and\not\boolean{jboxford}}{%
+ \bibatsep{}%
+ }{}%
+ \fi
+}%
+
+%%% END JURABIB HACKING
+
+\setlength{\headheight}{15pt}
+
+\pagestyle{fancy}
+%\renewcommand{\chaptermark}[1]{\markboth{#1}{}}
+\renewcommand{\sectionmark}[1]{\markright{#1}{}}
+\newcommand{\chaptermark}[1]{\markboth{#1}{}}
+
+\RequirePackage[onehalf]{spwtitle}
+
+\fancyhead{}
+\fancyhead[LE,RO]{\textsf{\thepage}}
+\makeatletter\fancyhead[CE]{\textsf{\nouppercase{\@title}}}\makeatother
+\fancyhead[CO]{\textsf{\nouppercase{\rightmark}}}
+\fancyfoot{}
+
+\fancypagestyle{plain}{ %
+\fancyhf{} % remove everything
+\renewcommand{\headrulewidth}{0pt} % remove lines as well
+\renewcommand{\footrulewidth}{0pt}}
+
+\setlength{\columnsep}{25pt}
+\setlength{\columnseprule}{.5pt}
+
+\newenvironment{lowitemize}{\begin{list}{$\quad\boldsymbol{\star}\;$}{%
+\setlength{\itemsep}{0pt}%
+\setlength{\parsep}{3pt}%
+\setlength{\leftmargin}{20pt}%
+}}{\end{list}}
+\renewenvironment{itemize}{\begin{list}{--}{%
+\setlength{\itemsep}{0pt}%
+\setlength{\parsep}{3pt}%
+\setlength{\leftmargin}{40pt}%
+}}{\end{list}}
+
+\renewcommand{\subsubsection}[1]{\par\bigskip\textbf{#1}\bigskip\par}
+%\renewcommand{\paragraph}[1]{\par\bigskip\textsc{#1}\bigskip\par}
diff --git a/archive/texmf/tex/latex/spwpaper/spwpaper.cls b/archive/texmf/tex/latex/spwpaper/spwpaper.cls
new file mode 100644
index 00000000..fe1a1281
--- /dev/null
+++ b/archive/texmf/tex/latex/spwpaper/spwpaper.cls
@@ -0,0 +1,198 @@
+\ProvidesClass{spwpaper}
+
+\newcommand{\initialdouble}{\RequirePackage[doublespacing]{setspace}\setstretch{2}}
+\newcommand{\startdouble}{\doublespacing\setstretch{2}}
+\newcommand{\finishdouble}{\onehalfspacing}
+\DeclareOption{onehalf}{\renewcommand{\initialdouble}{\RequirePackage{setspace}\onehalfspacing}\renewcommand{\startdouble}{}}
+\DeclareOption{pseudodouble}{\renewcommand{\initialdouble}{\RequirePackage[doublespacing]{setspace}\onehalfspacing}\renewcommand{\startdouble}{\doublespacing}}
+\DeclareOption{single}{\renewcommand{\initialdouble}{\RequirePackage{setspace}}\renewcommand{\startdouble}{}\renewcommand{\finishdouble}{}}
+
+\newcommand{\papersettings}{\LoadClass[12pt,a4paper]{article}\RequirePackage[a4paper,headheight=0.5in,margin=1in,nofoot]{geometry}}
+\DeclareOption{letterpaper}{\renewcommand{\papersettings}{\LoadClass[12pt,letterpaper]{article}\RequirePackage[letterpaper,headheight=0.5in,margin=1in,nofoot]{geometry}}}
+
+\newcommand{\notesandrefs}[1]{\noteshere
+ \bibhere{#1}}
+\newcommand{\doendnotes}{
+ \renewcommand{\footnote}[1]{\endnote{##1}}}
+\DeclareOption{footnotes}{\renewcommand{\notesandrefs}[1]{\bibhere{#1}}\renewcommand{\doendnotes}{}}
+
+\DeclareOption*{\PassOptionsToClass{\CurrentOption}{article}}
+\ProcessOptions
+\papersettings
+% \RequirePackage[headings]{fullpage}
+
+\RequirePackage{amsmath}
+
+\RequirePackage{enumitem}
+\setenumerate[1]{resume,label={\arabic*.},ref={(\arabic*)},itemindent=0.30in,labelsep=1em,itemsep=-1em,before=\vspace{-0.5em},after=\vspace{-0.5em}}
+
+\initialdouble % replaces below four lines to allow it to be disabled
+ % in class options
+%%% \RequirePackage[doublespacing]{setspace}
+%%% % \doublespacing
+%%% % \renewcommand{\baselinestretch}{2}
+%%% \setstretch{2} % setspace *doesn't* get doublespacing right
+\setlength\parindent{.40in}
+
+\RequirePackage[protrusion=true,expansion=true]{microtype}
+\RequirePackage{fancyhdr}
+\RequirePackage{ifthen}
+
+\DeclareSymbolFont{symbolsC}{U}{txsyc}{m}{n}
+\DeclareMathSymbol{\strictif}{\mathrel}{symbolsC}{74}
+\DeclareMathSymbol{\boxright}{\mathrel}{symbolsC}{128} % from
+% http://www.latex-community.org/forum/viewtopic.php?f=48&t=6262
+\RequirePackage{wasysym}
+
+\RequirePackage{pslatex}
+%\RequirePackage[scaled]{berasans}
+
+\RequirePackage[sf,bf,compact,medium]{titlesec}
+\RequirePackage{textcomp}
+\RequirePackage[norule,splitrule,stable,multiple]{footmisc}
+\RequirePackage[utf8]{inputenc}
+\RequirePackage{ellipsis}
+\RequirePackage{stmaryrd}
+
+\RequirePackage{endnotes}
+\doendnotes
+
+\RequirePackage[all]{nowidow}
+
+\pagestyle{fancy}
+
+\fancyhf{}
+\fancyhead[R]{\textsf{\thepage}}
+
+\fancypagestyle{plain}{ %
+ \fancyhf{} % remove everything
+ \renewcommand{\headrulewidth}{0pt} % remove lines as well
+ \renewcommand{\footrulewidth}{0pt}}
+
+\renewcommand{\headrulewidth}{0pt}
+
+\makeatletter\renewcommand{\maketitle}{\finishdouble\thispagestyle{plain}
+ \begin{center}\textsf{\@title}
+
+
+ \textsf{\@author}\ifthenelse{\equal{\@date}{}}{}{$\qquad\quad$}\textsf{\@date}
+
+ % \hfill\large\textsf{\@author}\par\hfill\large\textsf{\@date}\normalsize
+ % \vspace{8.5pt}
+\end{center}\startdouble
+}\makeatother
+
+% following is the above with no blank line - as it should be, rather
+% than requiring one to not end a para
+% http://tex.stackexchange.com/questions/8657/prevent-a-paragraph-break-after-a-section-heading
+\renewcommand{\section}[1]{%
+ \par
+ \ifthenelse{\equal{\thesection}{0}}{}{\bigskip}
+ \pagebreak[2]%
+ \refstepcounter{section}%
+ \everypar={%
+ {\setbox0=\lastbox}% Remove the indentation
+% \addcontentsline{toc}{section}{%
+% {\protect\makebox[0.3in][r]{\textsection \thesection.} \hspace*{3pt}}}%
+% \textbf{\textsection \thesection.} $\quad$% old version with \textsection symbol
+ \thesection. $\quad$%
+ \ifthenelse{\equal{#1}{}}{}{ \textsl{#1} $\quad$}% textsl here used to be textsc
+% \textsc{\thesubsection\space\space{#1} }%
+ \everypar={}%
+ }%
+ \ignorespaces
+}
+
+% endnote and quotation formatting, some from Gregory Wheeler's
+% philosophy.sty
+\makeatletter
+\renewcommand{\@makeenmark}{\hbox{$^{\tt\@theenmark}$}}
+
+\newcommand{\noteshere}{%
+ \begingroup
+ \singlespacing
+ \parindent 0pt
+ \parskip 2ex
+ \def\enotesize{\normalsize}
+ \def\enoteformat{\rightskip=.75in \leftskip\z@ \parindent=0em
+ \leavevmode\llap{\hbox{$^{\tt \@theenmark}$}}}%(tt) added, from AC's brain_damage.sty
+ \theendnotes
+ \endgroup}
+\renewcommand{\enoteformat}{\singlespacing\rightskip\z@ \leftskip\z@ \parindent=.40in
+ \leavevmode\llap{\hbox{$^{\@theenmark}$}}}
+\renewcommand{\enoteheading}{\subsection*{\textsc{\notesname}
+ \@mkboth{\uppercase{\notesname}}{\uppercase{\notesname}}}}
+ % \leavevmode\par\vskip-\baselineskip}
+\makeatother
+
+\renewenvironment{quote}% No extra indent on first line
+{\list{}%
+ \small\item[]}
+{\endlist}
+
+% bibliography management
+% \RequirePackage{bibtex}
+\RequirePackage[authoryear,sort,elide]{natbib}
+% \RequirePackage{chicago}
+\bibliographystyle{spwchicago}
+
+\makeatletter \let\citeN\citealt \makeatother % fixes situations where
+ % citing multiple articles from one
+ % edited collection
+
+\newcommand{\bibhere}[1]{%
+ \begingroup
+ \singlespacing
+ \parindent 0pt
+ \parskip 2ex
+ \bibliography{#1}
+ \endgroup}
+
+\setlength{\itemindent}{5em}
+\makeatletter
+\def\thebibliography#1{\subsection*{\textsc{\refname}\@mkboth
+ {\uppercase{\refname}}{\uppercase{\refname}}}\list
+ {[\arabic{enumi}]}{\settowidth\labelwidth{[#1]}
+ \leftmargin\labelwidth
+ \advance\leftmargin\labelsep
+ % \advance\leftmargin\bibindent
+ \advance\leftmargin.35em
+ \itemindent -\bibindent
+ \listparindent \itemindent
+ \parsep \z@
+ \usecounter{enumi}}
+ \def\newblock{}
+ \sloppy
+ \sfcode`\.=1000\relax}
+\makeatother
+%\setcitestyle{aysep={}}
+\bibpunct{(}{)}{;}{a}{}{,}
+
+%% get resizeable Quine Corner Quotes
+\makeatletter
+\DeclareFontFamily{OMX}{MnSymbolE}{}
+\DeclareSymbolFont{MnLargeSymbols}{OMX}{MnSymbolE}{m}{n}
+\SetSymbolFont{MnLargeSymbols}{bold}{OMX}{MnSymbolE}{b}{n}
+\DeclareFontShape{OMX}{MnSymbolE}{m}{n}{
+ <-6> MnSymbolE5
+ <6-7> MnSymbolE6
+ <7-8> MnSymbolE7
+ <8-9> MnSymbolE8
+ <9-10> MnSymbolE9
+ <10-12> MnSymbolE10
+ <12-> MnSymbolE12
+}{}
+\DeclareFontShape{OMX}{MnSymbolE}{b}{n}{
+ <-6> MnSymbolE-Bold5
+ <6-7> MnSymbolE-Bold6
+ <7-8> MnSymbolE-Bold7
+ <8-9> MnSymbolE-Bold8
+ <9-10> MnSymbolE-Bold9
+ <10-12> MnSymbolE-Bold10
+ <12-> MnSymbolE-Bold12
+}{}
+\DeclareMathDelimiter{\ulcorner}
+ {\mathopen}{MnLargeSymbols}{'036}{MnLargeSymbols}{'036}
+\DeclareMathDelimiter{\urcorner}
+ {\mathclose}{MnLargeSymbols}{'043}{MnLargeSymbols}{'043}
+\makeatother
diff --git a/archive/texmf/tex/latex/spwtitle/spwtitle.sty b/archive/texmf/tex/latex/spwtitle/spwtitle.sty
new file mode 100644
index 00000000..fd9a9653
--- /dev/null
+++ b/archive/texmf/tex/latex/spwtitle/spwtitle.sty
@@ -0,0 +1,26 @@
+\ProvidesPackage{spwtitle}
+
+% onehalf option is for documents that want onehalfspacing in general.
+% This has to be set here because we force onehalf for the title and
+% need to know whether to switch it off or not after
+\RequirePackage{setspace}
+\RequirePackage{ifthen}
+\newcommand{\spaceafter}{\singlespacing}
+\DeclareOption{onehalf}{\renewcommand{\spaceafter}{}}
+\ProcessOptions
+
+\usepackage{setspace}
+
+\makeatletter\renewcommand{\maketitle}{\onehalfspacing\thispagestyle{plain}
+%\noindent\rule{\columnwidth}{2pt}
+%\begin{center}\textnormal{\LARGE\@title}
+\noindent\textnormal{\LARGE\textsf{\@title}}
+
+% \hfill\large\@author$\qquad\quad$\@date$\qquad$\normalsize
+\vspace{5pt}
+\hfill\large\textsf{\@author}\par\hfill\large\textsf{\@date}\normalsize
+\vspace{8.5pt}
+%\rule{\columnwidth}{1.5pt}
+%\end{center}
+\spaceafter
+}\makeatother