--- /dev/null
+
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=python-beanstalkc
+PKG_VERSION:=v0.4.0
+PKG_RELEASE:=1
+
+PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.bz2
+PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION)
+PKG_SOURCE_URL:=https://github.com/earl/beanstalkc.git
+PKG_SOURCE_PROTO:=git
+PKG_SOURCE_VERSION:=$(PKG_VERSION)
+PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
+
+PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
+PKG_BUILD_DEPENDS:=python python-setuptools
+
+include $(INCLUDE_DIR)/package.mk
+$(call include_mk, python-package.mk)
+
+define Package/python-beanstalkc
+ SECTION:=language-python
+ CATEGORY:=Languages
+ SUBMENU:=Python
+ TITLE:=Python Beanstalkc
+endef
+
+define Package/python-beanstalkc/description
+ A simple beanstalkd client library for Python.
+endef
+
+define Build/Compile
+ $(call Build/Compile/PyMod,., \
+ install --prefix="/usr" --root="$(PKG_INSTALL_DIR)", \
+ PYTHONPATH="$(PYTHON_LIB_DIR):$(STAGING_DIR_ROOT)/$(PYTHON_PKG_DIR)"; \
+ )
+endef
+
+define Package/python-beanstalkc/install
+ $(INSTALL_DIR) $(1)$(PYTHON_PKG_DIR)
+ $(CP) \
+ $(PKG_INSTALL_DIR)$(PYTHON_PKG_DIR)/* \
+ $(1)$(PYTHON_PKG_DIR)/
+endef
+
+$(eval $(call BuildPackage,python-beanstalkc))
--- /dev/null
+#
+# Copyright (C) 2009-2010 OpenWrt.org
+#
+# This is free software, licensed under the GNU General Public License v2.
+# See /LICENSE for more information.
+#
+
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=python-cython
+PKG_VERSION:=0.20.2
+PKG_RELEASE:=1
+
+PKG_SOURCE:=Cython-$(PKG_VERSION).tar.gz
+PKG_SOURCE_URL:=http://www.cython.org/release/
+PKG_BUILD_DIR:=$(BUILD_DIR)/Cython-$(PKG_VERSION)
+PKG_MD5SUM:=7fc13e1c665bdf7cea19ad08906af91f
+
+PKG_BUILD_DEPENDS:=python python-cython/host
+HOST_BUILD_DEPENDS:=python
+
+HOST_BUILD_DIR:=$(BUILD_DIR_HOST)/Cython-$(PKG_VERSION)
+
+include $(INCLUDE_DIR)/host-build.mk
+include $(INCLUDE_DIR)/package.mk
+$(call include_mk, python-package.mk)
+
+define Package/python-cython
+ SUBMENU:=Python
+ SECTION:=lang
+ CATEGORY:=Languages
+ TITLE:=python-cython
+ URL:=http://www.cython.org
+ DEPENDS:=+python
+endef
+
+define Package/python-cython/description
+ Cython is a language that should make writing C extensions for the Python language easier
+endef
+
+define Build/Compile
+ echo build-compile
+ # <--no-cython-compile> required, otherwise setup.py calls the target-gcc to create so-files which obviously can't be used by <python> on the host system.
+ $(call Build/Compile/PyMod,,install --single-version-externally-managed --no-cython-compile --prefix="/usr" --root="$(PKG_INSTALL_DIR)")
+endef
+
+#$(call Build/Compile/PyMod,,install --single-version-externally-managed --no-cython-compile --prefix="/usr" --root="$(PKG_INSTALL_DIR)")
+
+define Build/InstallDev
+ echo build-installdev
+ $(INSTALL_DIR) $(1)$(PYTHON_PKG_DIR)
+ $(CP) \
+ $(PKG_INSTALL_DIR)/usr/lib/python2.7/site-packages/* \
+ $(1)$(PYTHON_PKG_DIR)
+ $(CP) \
+ $(PKG_INSTALL_DIR)/usr/lib/python2.7/site-packages/* \
+ $(STAGING_DIR_HOST)/lib/python2.7/site-packages/
+endef
+
+# TODO: this should be moved to python package
+# $(1) => build subdir
+# $(2) => additional arguments to setup.py
+# $(3) => additional variables
+define Host/Compile/PyMod
+ $(call HostPython, \
+ cd $(HOST_BUILD_DIR)/$(strip $(1)); \
+ CFLAGS="$(HOST_CFLAGS)" \
+ CPPFLAGS="$(HOST_CPPFLAGS)" \
+ LDFLAGS="$(HOST_LDFLAGS)" \
+ $(3) \
+ , \
+ ./setup.py $(2) \
+ )
+endef
+
+define Host/Compile
+ echo host-compile
+ $(call Host/Compile/PyMod,,install --single-version-externally-managed --no-cython-compile --prefix="/usr" --root="$(STAGING_DIR_HOST)")
+endef
+
+#$(call Host/Compile/PyMod,,install --single-version-externally-managed --no-cython-compile --prefix="/usr" --root="$(STAGING_DIR_HOST)")
+
+define Host/Install
+endef
+
+$(eval $(call HostBuild))
+
+$(eval $(call BuildPackage,python-cython))
--- /dev/null
+
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=python-django-1.4
+PKG_VERSION:=1.4.14
+PKG_RELEASE:=1
+
+PKG_SOURCE:=Django-$(PKG_VERSION).tar.gz
+PKG_SOURCE_URL:=https://pypi.python.org/packages/source/D/Django/
+PKG_MD5SUM:=80dc1b9866487afc2ab3f774e29181bc
+
+PKG_BUILD_DIR:=$(BUILD_DIR)/Django-$(PKG_VERSION)
+PKG_BUILD_DEPENDS:=python
+
+include $(INCLUDE_DIR)/package.mk
+$(call include_mk, python-package.mk)
+
+define Package/python-django-1.4
+ SECTION:=language-python
+ CATEGORY:=Languages
+ SUBMENU:=Python
+ TITLE:=Django Web framework v1.4
+ URL:=https://www.djangoproject.com/
+ DEPENDS:=+python
+endef
+
+define Package/python-django-1.4/description
+ Django is a high-level Python Web framework that encourages rapid
+ development and clean, pragmatic design.
+endef
+
+define Build/Compile
+ $(call Build/Compile/PyMod,., \
+ install --prefix="/usr" --root="$(PKG_INSTALL_DIR)" \
+ )
+endef
+
+define Package/python-django-1.4/install
+ $(INSTALL_DIR) $(1)$(PYTHON_PKG_DIR)
+ $(CP) \
+ $(PKG_INSTALL_DIR)$(PYTHON_PKG_DIR)/* \
+ $(1)$(PYTHON_PKG_DIR)/
+endef
+
+$(eval $(call BuildPackage,python-django-1.4))
--- /dev/null
+#
+# Copyright (C) 2013 OpenWrt.org
+#
+# This is free software, licensed under the GNU General Public License v2.
+# See /LICENSE for more information.
+#
+
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=python-gevent
+PKG_VERSION:=2014-12-14-72119c8c3e704b75a2d91d8555ca8673928ebabb
+PKG_RELEASE:=1
+
+PKG_SOURCE_PROTO:=git
+PKG_SOURCE_URL:=https://github.com/surfly/gevent.git
+PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION)
+PKG_SOURCE_VERSION:=72119c8c3e704b75a2d91d8555ca8673928ebabb
+PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
+
+PKG_BUILD_DEPENDS:=python python-cython
+PKG_USE_MIPS16:=0
+
+include $(INCLUDE_DIR)/package.mk
+$(call include_mk, python-package.mk)
+
+define Package/python-gevent
+ SUBMENU:=Python
+ SECTION:=libs
+ CATEGORY:=Libraries
+ TITLE:=gevent
+ URL:=http://www.gevent.org/
+ DEPENDS:=+python +python-greenlet
+endef
+
+# This is required in addition to PKG_USE_MIPS16:=0 because otherwise MIPS16
+# flags are inherited from the Python base package (via sysconfig module)
+ifdef CONFIG_USE_MIPS16
+TARGET_CFLAGS += -mno-mips16 -mno-interlink-mips16
+endif
+
+define Package/python-gevent/description
+ gevent is a coroutine-based Python networking library.
+endef
+
+define Build/Compile
+ (cd $(PKG_BUILD_DIR)/libev; \
+ $(TARGET_CONFIGURE_OPTS) \
+ CFLAGS="$(TARGET_CFLAGS)" \
+ CPPFLAGS="-I$(STAGING_DIR)/usr/include -I$(STAGING_DIR)/include" \
+ LDFLAGS="-L$(STAGING_DIR)/usr/lib -L$(STAGING_DIR)/lib" \
+ $(PKG_BUILD_DIR)/libev/configure \
+ --target=$(GNU_TARGET_NAME) \
+ --host=$(GNU_TARGET_NAME) \
+ --build=$(GNU_HOST_NAME) \
+ --prefix=/usr \
+ --without-libiconv-prefix \
+ --without-libintl-prefix \
+ --disable-nls \
+ );
+ $(call Build/Compile/PyMod,., \
+ install --prefix="/usr" --root="$(PKG_INSTALL_DIR)", \
+ CYTHON="$(STAGING_DIR_HOST)/usr/bin/cython" \
+ )
+endef
+
+define Package/python-gevent/install
+ $(INSTALL_DIR) $(1)$(PYTHON_PKG_DIR)
+ $(CP) \
+ $(PKG_INSTALL_DIR)$(PYTHON_PKG_DIR)/* \
+ $(1)$(PYTHON_PKG_DIR)
+endef
+
+$(eval $(call BuildPackage,python-gevent))
--- /dev/null
+#
+# Copyright (C) 2013 OpenWrt.org
+#
+# This is free software, licensed under the GNU General Public License v2.
+# See /LICENSE for more information.
+#
+
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=python-greenlet
+PKG_VERSION:=0.4.2
+PKG_RELEASE:=1
+
+PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.bz2
+PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION)
+PKG_SOURCE_URL:=https://github.com/python-greenlet/greenlet.git
+PKG_SOURCE_PROTO:=git
+PKG_SOURCE_VERSION:=$(PKG_VERSION)
+PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
+PKG_BUILD_DEPENDS:=python
+
+include $(INCLUDE_DIR)/package.mk
+$(call include_mk, python-package.mk)
+
+define Package/python-greenlet
+ SUBMENU:=Python
+ SECTION:=lang
+ CATEGORY:=Languages
+ TITLE:=greenlet
+ URL:=https://github.com/python-greenlet/greenlet
+ DEPENDS:=+python
+endef
+
+define Package/python-greenlet/description
+ The greenlet package is a spin-off of Stackless, a version of CPython
+ that supports micro-threads called "tasklets". Tasklets run
+ pseudo-concurrently (typically in a single or a few OS-level threads)
+ and are synchronized with data exchanges on "channels".
+
+ A "greenlet", on the other hand, is a still more primitive notion of
+ micro-thread with no implicit scheduling; coroutines, in other
+ words. This is useful when you want to control exactly when your code
+ runs. You can build custom scheduled micro-threads on top of greenlet;
+ however, it seems that greenlets are useful on their own as a way to
+ make advanced control flow structures. For example, we can recreate
+ generators; the difference with Python's own generators is that our
+ generators can call nested functions and the nested functions can
+ yield values too. Additionally, you don't need a "yield" keyword. See
+ the example in tests/test_generator.py.
+
+ Greenlets are provided as a C extension module for the regular
+ unmodified interpreter.
+
+ Greenlets are lightweight coroutines for in-process concurrent
+ programming.
+endef
+
+define Build/Compile
+ $(call Build/Compile/PyMod,,install --prefix="$(PKG_INSTALL_DIR)/usr")
+endef
+
+define Package/python-greenlet/install
+ $(INSTALL_DIR) $(1)$(PYTHON_PKG_DIR)
+ $(CP) \
+ $(PKG_INSTALL_DIR)/usr/* \
+ $(1)/usr/
+endef
+
+$(eval $(call BuildPackage,python-greenlet))
--- /dev/null
+
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=python-gunicorn
+PKG_VERSION:=19.2.0
+PKG_RELEASE:=1
+
+PKG_SOURCE:=gunicorn-$(PKG_VERSION).tar.gz
+PKG_SOURCE_URL:=https://pypi.python.org/packages/source/g/gunicorn/
+PKG_MD5SUM:=010e23e52324375ed9e2265d6e3be692
+
+PKG_BUILD_DIR:=$(BUILD_DIR)/gunicorn-$(PKG_VERSION)
+PKG_BUILD_DEPENDS:=python python-setuptools
+
+include $(INCLUDE_DIR)/package.mk
+$(call include_mk, python-package.mk)
+
+define Package/python-gunicorn
+ SECTION:=language-python
+ CATEGORY:=Languages
+ SUBMENU:=Python
+ TITLE:=Python Gunicorn
+ URL:=https://pypi.python.org/pypi/gunicorn/
+ DEPENDS:=+python-setuptools
+endef
+
+define Package/python-gunicorn/description
+ Gunicorn 'Green Unicorn' is a Python WSGI HTTP Server for UNIX.
+ It's a pre-fork worker model ported from Ruby's Unicorn project.
+ The Gunicorn server is broadly compatible with various web frameworks,
+ simply implemented, light on server resources, and fairly speedy.
+endef
+
+define Build/Compile
+ $(call Build/Compile/PyMod,., \
+ install --prefix="/usr" --root="$(PKG_INSTALL_DIR)", \
+ PYTHONPATH="$(PYTHON_LIB_DIR):$(STAGING_DIR_ROOT)/$(PYTHON_PKG_DIR)"; \
+ )
+endef
+
+define Package/python-gunicorn/install
+ $(INSTALL_DIR) $(1)$(PYTHON_PKG_DIR)
+ $(CP) \
+ $(PKG_INSTALL_DIR)$(PYTHON_PKG_DIR)/* \
+ $(1)$(PYTHON_PKG_DIR)/
+
+ $(INSTALL_DIR) $(1)/usr/bin
+ $(CP) \
+ $(PKG_INSTALL_DIR)/usr/bin/gunicorn \
+ $(1)/usr/bin/
+ sed 's/#!.*/#!\/usr\/bin\/python/' -i $(1)/usr/bin/gunicorn
+endef
+
+$(eval $(call BuildPackage,python-gunicorn))
--- /dev/null
+
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=python-requests
+PKG_VERSION:=v2.3.0
+PKG_RELEASE:=1
+
+PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.bz2
+PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION)
+PKG_SOURCE_URL:=https://github.com/kennethreitz/requests.git
+PKG_SOURCE_PROTO:=git
+PKG_SOURCE_VERSION:=$(PKG_VERSION)
+PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
+PKG_BUILD_DEPENDS:=python
+
+include $(INCLUDE_DIR)/package.mk
+$(call include_mk, python-package.mk)
+
+define Package/python-requests
+ SECTION:=language-python
+ CATEGORY:=Languages
+ SUBMENU:=Python
+ TITLE:=Python Requests
+ URL:=http://docs.python-requests.org/en/latest/
+endef
+
+define Package/python-requests/description
+ Requests is an elegant and simple HTTP library for Python, built for human beings.
+endef
+
+define Build/Compile
+ $(call Build/Compile/PyMod,., \
+ install --prefix="/usr" --root="$(PKG_INSTALL_DIR)" \
+ )
+endef
+
+define Package/python-requests/install
+ $(INSTALL_DIR) $(1)$(PYTHON_PKG_DIR)
+ $(CP) \
+ $(PKG_INSTALL_DIR)$(PYTHON_PKG_DIR)/* \
+ $(1)$(PYTHON_PKG_DIR)/
+endef
+
+$(eval $(call BuildPackage,python-requests))
-#
-# Copyright (C) 2014 OpenWrt.org
-#
-# This is free software, licensed under the GNU General Public License v2.
-# See /LICENSE for more information.
-#
include $(TOPDIR)/rules.mk
PKG_NAME:=python-setuptools
-PKG_VERSION:=7.0
+PKG_VERSION:=5.2
PKG_RELEASE:=1
PKG_SOURCE:=setuptools-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://pypi.python.org/packages/source/s/setuptools/
-PKG_MD5SUM:=6245d6752e2ef803c365f560f7f2f940
+PKG_MD5SUM:=ea6ed9ab1a4abe978dade73a592a229c
PKG_BUILD_DIR:=$(BUILD_DIR)/setuptools-$(PKG_VERSION)
+PKG_BUILD_DEPENDS:=python
include $(INCLUDE_DIR)/package.mk
$(call include_mk, python-package.mk)
define Package/python-setuptools
- SUBMENU:=Python
- SECTION:=lang
+ SECTION:=language-python
CATEGORY:=Languages
- TITLE:=Tool for installing Python packages.
- URL:=https://bitbucket.org/pypa/setuptools
- DEPENDS:=+python
+ SUBMENU:=Python
+ TITLE:=Python Setuptools
+ URL:=https://pypi.python.org/pypi/setuptools/
endef
define Package/python-setuptools/description
- Easily download, build, install, upgrade, and uninstall Python packages
+ Easily download, build, install, upgrade, and uninstall Python packages.
endef
define Build/Compile
- $(call Build/Compile/PyMod,,\
- install --prefix="$(PKG_INSTALL_DIR)/usr" \
+ $(call Build/Compile/PyMod,., \
+ install --prefix="/usr" --root="$(PKG_INSTALL_DIR)" \
)
endef
-define Build/InstallDev
- $(INSTALL_DIR) $(PYTHON_LIB_DIR)
+define Package/python-setuptools/install
+ $(INSTALL_DIR) $(1)$(PYTHON_PKG_DIR)
$(CP) \
$(PKG_INSTALL_DIR)$(PYTHON_PKG_DIR)/* \
- $(PYTHON_LIB_DIR)
+ $(1)$(PYTHON_PKG_DIR)/
endef
-define PyPackage/python-setuptools/install
- $(INSTALL_DIR) $(1)/usr/bin
- $(CP) $(PKG_INSTALL_DIR)/usr/bin/* $(1)/usr/bin/
-endef
-
-$(eval $(call PyPackage,python-setuptools))
$(eval $(call BuildPackage,python-setuptools))
-
--- /dev/null
+
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=python-six
+PKG_VERSION:=1.7.2
+PKG_RELEASE:=1
+
+PKG_SOURCE:=six-$(PKG_VERSION).tar.gz
+PKG_SOURCE_URL:=https://pypi.python.org/packages/source/s/six/
+PKG_MD5SUM:=4c26276583b01dfc73474cb32327af91
+
+PKG_BUILD_DIR:=$(BUILD_DIR)/six-$(PKG_VERSION)
+PKG_BUILD_DEPENDS:=python
+
+include $(INCLUDE_DIR)/package.mk
+$(call include_mk, python-package.mk)
+
+define Package/python-six
+ SECTION:=language-python
+ CATEGORY:=Languages
+ SUBMENU:=Python
+ TITLE:=Python Six
+ URL:=https://pypi.python.org/pypi/six
+endef
+
+define Package/python-six/description
+ Python 2 and 3 compatibility utilities.
+endef
+
+define Build/Compile
+ $(call Build/Compile/PyMod,., \
+ install --prefix="/usr" --root="$(PKG_INSTALL_DIR)" \
+ )
+endef
+
+define Package/python-six/install
+ $(INSTALL_DIR) $(1)$(PYTHON_PKG_DIR)
+ $(CP) \
+ $(PKG_INSTALL_DIR)$(PYTHON_PKG_DIR)/* \
+ $(1)$(PYTHON_PKG_DIR)/
+endef
+
+$(eval $(call BuildPackage,python-six))
--- /dev/null
+
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=python-unidecode
+PKG_VERSION:=0.04.16
+PKG_RELEASE:=1
+
+PKG_SOURCE:=Unidecode-$(PKG_VERSION).tar.gz
+PKG_SOURCE_URL:=https://pypi.python.org/packages/source/U/Unidecode/
+PKG_MD5SUM:=cd6e265fd61010a1cbfeb9dd42c6bcce
+
+PKG_BUILD_DIR:=$(BUILD_DIR)/Unidecode-$(PKG_VERSION)/Unidecode-$(PKG_VERSION)
+PKG_BUILD_DEPENDS:=python
+
+include $(INCLUDE_DIR)/package.mk
+$(call include_mk, python-package.mk)
+
+define Package/python-unidecode
+ SECTION:=language-python
+ CATEGORY:=Languages
+ SUBMENU:=Python
+ TITLE:=Python Unidecode
+ URL:=https://pypi.python.org/pypi/Unidecode
+endef
+
+define Package/python-unidecode/description
+ ASCII transliterations of Unicode text.
+endef
+
+define Build/Compile
+ $(call Build/Compile/PyMod,., \
+ install --prefix="/usr" --root="$(PKG_INSTALL_DIR)" \
+ )
+endef
+
+define Package/python-unidecode/install
+ $(INSTALL_DIR) $(1)$(PYTHON_PKG_DIR)
+ $(CP) \
+ $(PKG_INSTALL_DIR)$(PYTHON_PKG_DIR)/* \
+ $(1)$(PYTHON_PKG_DIR)/
+endef
+
+$(eval $(call BuildPackage,python-unidecode))
--- /dev/null
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=exim4
+PKG_VERSION:=4.85
+PKG_RELEASE:=1
+
+PKG_SOURCE:=exim-$(PKG_VERSION).tar.gz
+PKG_SOURCE_URL:=http://mirror.switch.ch/ftp/mirror/exim/exim/exim4/
+PKG_MD5SUM:=56c1840a1491e03b3bff25855de3c17c
+
+PKG_BUILD_DIR:=$(BUILD_DIR)/exim-$(PKG_VERSION)
+
+CC="$(TARGET_CC)"
+
+include $(INCLUDE_DIR)/package.mk
+
+define Package/exim4
+ SECTION:=mail
+ CATEGORY:=Mail
+ TITLE:=Exim Internet Mailer Version 4
+ DEPENDS:=+libdb47 +libiconv +libpcre +@CCACHE
+ URL:=http://www.exim.org/
+endef
+
+define Package/exim4/description
+ Exim is a mail transfer agent used on Unix-like operating systems.
+ Exim is free software distributed under the terms of the GNU General Public
+ License, and it aims to be a general and flexible mailer with extensive
+ facilities for checking incoming e-mail.
+endef
+
+define Build/Configure
+ cp ./files/Makefile $(PKG_BUILD_DIR)/Local/Makefile
+endef
+
+define Build/Compile
+ cd $(PKG_BUILD_DIR) && EXIM_ARCHTYPE=i386 make
+endef
+
+define Package/exim4/install
+ $(INSTALL_DIR) $(1)/usr/exim/bin
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build-Linux-i386/exim $(1)/usr/exim/bin/exim
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build-Linux-i386/exim_dumpdb $(1)/usr/exim/bin/exim_dumpdb
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build-Linux-i386/exim_fixdb $(1)/usr/exim/bin/exim_fixdb
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build-Linux-i386/exim_tidydb $(1)/usr/exim/bin/exim_tidydb
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build-Linux-i386/exinext $(1)/usr/exim/bin/exinext
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build-Linux-i386/exiwhat $(1)/usr/exim/bin/exiwhat
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build-Linux-i386/exim_dbmbuild $(1)/usr/exim/bin/exim_dbmbuild
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build-Linux-i386/exicyclog $(1)/usr/exim/bin/exicyclog
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build-Linux-i386/exigrep $(1)/usr/exim/bin/exigrep
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build-Linux-i386/eximstats $(1)/usr/exim/bin/eximstats
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build-Linux-i386/exipick $(1)/usr/exim/bin/exipick
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build-Linux-i386/exiqgrep $(1)/usr/exim/bin/exiqgrep
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build-Linux-i386/exiqsumm $(1)/usr/exim/bin/exiqsumm
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build-Linux-i386/exim_lock $(1)/usr/exim/bin/exim_lock
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build-Linux-i386/exim_checkaccess $(1)/usr/exim/bin/exim_checkaccess
+
+ $(INSTALL_DIR) $(1)/usr/sbin
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build-Linux-i386/exim $(1)/usr/sbin/exim4
+
+ $(INSTALL_DIR) $(1)/etc/init.d
+ $(INSTALL_BIN) ./files/exim.init $(1)/etc/init.d/exim
+endef
+
+$(eval $(call BuildPackage,exim4))
--- /dev/null
+##################################################
+# The Exim mail transport agent #
+##################################################
+
+# This is the template for Exim's main build-time configuration file. It
+# contains settings that are independent of any operating system. These are
+# things that are mostly sysadmin choices. The items below are divided into
+# those you must specify, those you probably want to specify, those you might
+# often want to specify, and those that you almost never need to mention.
+
+# Edit this file and save the result to a file called Local/Makefile within the
+# Exim distribution directory before running the "make" command.
+
+# Things that depend on the operating system have default settings in
+# OS/Makefile-Default, but these are overridden for some OS by files called
+# called OS/Makefile-<osname>. You can further override these by creating files
+# called Local/Makefile-<osname>, where "<osname>" stands for the name of your
+# operating system - look at the names in the OS directory to see which names
+# are recognized.
+
+# However, if you are building Exim for a single OS only, you don't need to
+# worry about setting up Local/Makefile-<osname>. Any build-time configuration
+# settings you require can in fact be placed in the one file called
+# Local/Makefile. It is only if you are building for several OS from the same
+# source files that you need to worry about splitting off your own OS-dependent
+# settings into separate files. (There's more explanation about how this all
+# works in the toplevel README file, under "Modifying the building process", as
+# well as in the Exim specification.)
+
+# One OS-specific thing that may need to be changed is the command for running
+# the C compiler; the overall default is gcc, but some OS Makefiles specify cc.
+# You can override anything that is set by putting CC=whatever in your
+# Local/Makefile.
+CC=ccache_cc
+
+# NOTE: You should never need to edit any of the distributed Makefiles; all
+# overriding can be done in your Local/Makefile(s). This will make it easier
+# for you when the next release comes along.
+
+# The location of the X11 libraries is something else that is quite variable
+# even between different versions of the same operating system (and indeed
+# there are different versions of X11 as well, of course). The four settings
+# concerned here are X11, XINCLUDE, XLFLAGS (linking flags) and X11_LD_LIB
+# (dynamic run-time library). You need not worry about X11 unless you want to
+# compile the Exim monitor utility. Exim itself does not use X11.
+
+# Another area of variability between systems is the type and location of the
+# DBM library package. Exim has support for ndbm, gdbm, tdb, and Berkeley DB.
+# By default the code assumes ndbm; this often works with gdbm or DB, provided
+# they are correctly installed, via their compatibility interfaces. However,
+# Exim can also be configured to use the native calls for Berkeley DB (obsolete
+# versions 1.85, 2.x, 3.x, or the current 4.x version) and also for gdbm.
+
+# For some operating systems, a default DBM library (other than ndbm) is
+# selected by a setting in the OS-specific Makefile. Most modern OS now have
+# a DBM library installed as standard, and in many cases this will be selected
+# for you by the OS-specific configuration. If Exim compiles without any
+# problems, you probably do not have to worry about the DBM library. If you
+# do want or need to change it, you should first read the discussion in the
+# file doc/dbm.discuss.txt, which also contains instructions for testing Exim's
+# interface to the DBM library.
+
+# In Local/Makefiles blank lines and lines starting with # are ignored. It is
+# also permitted to use the # character to add a comment to a setting, for
+# example
+#
+# EXIM_GID=42 # the "mail" group
+#
+# However, with some versions of "make" this works only if there is no white
+# space between the end of the setting and the #, so perhaps it is best
+# avoided. A consequence of this facility is that it is not possible to have
+# the # character present in any setting, but I can't think of any cases where
+# this would be wanted.
+###############################################################################
+
+
+
+###############################################################################
+# THESE ARE THINGS YOU MUST SPECIFY #
+###############################################################################
+
+# Exim will not build unless you specify BIN_DIRECTORY, CONFIGURE_FILE, and
+# EXIM_USER. You also need EXIM_GROUP if EXIM_USER specifies a uid by number.
+
+# If you don't specify SPOOL_DIRECTORY, Exim won't fail to build. However, it
+# really is a very good idea to specify it here rather than at run time. This
+# is particularly true if you let the logs go to their default location in the
+# spool directory, because it means that the location of the logs is known
+# before Exim has read the run time configuration file.
+
+#------------------------------------------------------------------------------
+# BIN_DIRECTORY defines where the exim binary will be installed by "make
+# install". The path is also used internally by Exim when it needs to re-invoke
+# itself, either to send an error message, or to recover root privilege. Exim's
+# utility binaries and scripts are also installed in this directory. There is
+# no "standard" place for the binary directory. Some people like to keep all
+# the Exim files under one directory such as /usr/exim; others just let the
+# Exim binaries go into an existing directory such as /usr/sbin or
+# /usr/local/sbin. The installation script will try to create this directory,
+# and any superior directories, if they do not exist.
+
+BIN_DIRECTORY=/usr/exim/bin
+
+
+#------------------------------------------------------------------------------
+# CONFIGURE_FILE defines where Exim's run time configuration file is to be
+# found. It is the complete pathname for the file, not just a directory. The
+# location of all other run time files and directories can be changed in the
+# run time configuration file. There is a lot of variety in the choice of
+# location in different OS, and in the preferences of different sysadmins. Some
+# common locations are in /etc or /etc/mail or /usr/local/etc or
+# /usr/local/etc/mail. Another possibility is to keep all the Exim files under
+# a single directory such as /usr/exim. Whatever you choose, the installation
+# script will try to make the directory and any superior directories if they
+# don't exist. It will also install a default runtime configuration if this
+# file does not exist.
+
+CONFIGURE_FILE=/usr/exim/configure
+
+# It is possible to specify a colon-separated list of files for CONFIGURE_FILE.
+# In this case, Exim will use the first of them that exists when it is run.
+# However, if a list is specified, the installation script no longer tries to
+# make superior directories or to install a default runtime configuration.
+
+
+#------------------------------------------------------------------------------
+# The Exim binary must normally be setuid root, so that it starts executing as
+# root, but (depending on the options with which it is called) it does not
+# always need to retain the root privilege. These settings define the user and
+# group that is used for Exim processes when they no longer need to be root. In
+# particular, this applies when receiving messages and when doing remote
+# deliveries. (Local deliveries run as various non-root users, typically as the
+# owner of a local mailbox.) Specifying these values as root is not supported.
+
+EXIM_USER=110
+
+# If you specify EXIM_USER as a name, this is looked up at build time, and the
+# uid number is built into the binary. However, you can specify that this
+# lookup is deferred until runtime. In this case, it is the name that is built
+# into the binary. You can do this by a setting of the form:
+
+# EXIM_USER=ref:exim
+
+# In other words, put "ref:" in front of the user name. If you set EXIM_USER
+# like this, any value specified for EXIM_GROUP is also passed "by reference".
+# Although this costs a bit of resource at runtime, it is convenient to use
+# this feature when building binaries that are to be run on multiple systems
+# where the name may refer to different uids. It also allows you to build Exim
+# on a system where there is no Exim user defined.
+
+# If the setting of EXIM_USER is numeric (e.g. EXIM_USER=42), there must
+# also be a setting of EXIM_GROUP. If, on the other hand, you use a name
+# for EXIM_USER (e.g. EXIM_USER=exim), you don't need to set EXIM_GROUP unless
+# you want to use a group other than the default group for the given user.
+
+EXIM_GROUP=110
+
+# Many sites define a user called "exim", with an appropriate default group,
+# and use
+#
+# EXIM_USER=exim
+#
+# while leaving EXIM_GROUP unspecified (commented out).
+
+
+#------------------------------------------------------------------------------
+# SPOOL_DIRECTORY defines the directory where all the data for messages in
+# transit is kept. It is strongly recommended that you define it here, though
+# it is possible to leave this till the run time configuration.
+
+# Exim creates the spool directory if it does not exist. The owner and group
+# will be those defined by EXIM_USER and EXIM_GROUP, and this also applies to
+# all the files and directories that are created in the spool directory.
+
+# Almost all installations choose this:
+
+#TODO
+SPOOL_DIRECTORY=/var/spool/exim
+
+
+
+###############################################################################
+# THESE ARE THINGS YOU PROBABLY WANT TO SPECIFY #
+###############################################################################
+
+# If you need extra header file search paths on all compiles, put the -I
+# options in INCLUDE. If you want the extra searches only for certain
+# parts of the build, see more specific xxx_INCLUDE variables below.
+
+# INCLUDE=-I/example/include
+
+# You need to specify some routers and transports if you want the Exim that you
+# are building to be capable of delivering mail. You almost certainly need at
+# least one type of lookup. You should consider whether you want to build
+# the Exim monitor or not.
+
+
+#------------------------------------------------------------------------------
+# These settings determine which individual router drivers are included in the
+# Exim binary. There are no defaults in the code; those routers that are wanted
+# must be defined here by setting the appropriate variables to the value "yes".
+# Including a router in the binary does not cause it to be used automatically.
+# It has also to be configured in the run time configuration file. By
+# commenting out those you know you don't want to use, you can make the binary
+# a bit smaller. If you are unsure, leave all of these included for now.
+
+ROUTER_ACCEPT=yes
+ROUTER_DNSLOOKUP=yes
+ROUTER_IPLITERAL=yes
+ROUTER_MANUALROUTE=yes
+ROUTER_QUERYPROGRAM=yes
+ROUTER_REDIRECT=yes
+
+# This one is very special-purpose, so is not included by default.
+
+# ROUTER_IPLOOKUP=yes
+
+
+#------------------------------------------------------------------------------
+# These settings determine which individual transport drivers are included in
+# the Exim binary. There are no defaults; those transports that are wanted must
+# be defined here by setting the appropriate variables to the value "yes".
+# Including a transport in the binary does not cause it to be used
+# automatically. It has also to be configured in the run time configuration
+# file. By commenting out those you know you don't want to use, you can make
+# the binary a bit smaller. If you are unsure, leave all of these included for
+# now.
+
+TRANSPORT_APPENDFILE=yes
+TRANSPORT_AUTOREPLY=yes
+TRANSPORT_PIPE=yes
+TRANSPORT_SMTP=yes
+
+# This one is special-purpose, and commonly not required, so it is not
+# included by default.
+
+# TRANSPORT_LMTP=yes
+
+
+#------------------------------------------------------------------------------
+# The appendfile transport can write messages to local mailboxes in a number
+# of formats. The code for three specialist formats, maildir, mailstore, and
+# MBX, is included only when requested. If you do not know what this is about,
+# leave these settings commented out.
+
+SUPPORT_MAILDIR=yes
+# SUPPORT_MAILSTORE=yes
+# SUPPORT_MBX=yes
+
+
+#------------------------------------------------------------------------------
+# See below for dynamic lookup modules.
+#
+# If not using package management but using this anyway, then think about how
+# you perform upgrades and revert them. You should consider the benefit of
+# embedding the Exim version number into LOOKUP_MODULE_DIR, so that you can
+# maintain two concurrent sets of modules.
+#
+# *BEWARE*: ability to modify the files in LOOKUP_MODULE_DIR is equivalent to
+# the ability to modify the Exim binary, which is often setuid root! The Exim
+# developers only intend this functionality be used by OS software packagers
+# and we suggest that such packagings' integrity checks should be paranoid
+# about the permissions of the directory and the files within.
+
+# LOOKUP_MODULE_DIR=/usr/lib/exim/lookups/
+
+# To build a module dynamically, you'll need to define CFLAGS_DYNAMIC for
+# your platform. Eg:
+# CFLAGS_DYNAMIC=-shared -rdynamic
+# CFLAGS_DYNAMIC=-shared -rdynamic -fPIC
+
+#------------------------------------------------------------------------------
+# These settings determine which file and database lookup methods are included
+# in the binary. See the manual chapter entitled "File and database lookups"
+# for discussion. DBM and lsearch (linear search) are included by default. If
+# you are unsure about the others, leave them commented out for now.
+# LOOKUP_DNSDB does *not* refer to general mail routing using the DNS. It is
+# for the specialist case of using the DNS as a general database facility (not
+# common).
+# If set to "2" instead of "yes" then the corresponding lookup will be
+# built as a module and must be installed into LOOKUP_MODULE_DIR. You need to
+# add -export-dynamic -rdynamic to EXTRALIBS. You may also need to add -ldl to
+# EXTRALIBS so that dlopen() is available to Exim. You need to define
+# LOOKUP_MODULE_DIR above so the exim binary actually loads dynamic lookup
+# modules.
+# Also, instead of adding all the libraries/includes to LOOKUP_INCLUDE and
+# LOOKUP_LIBS, add them to the respective LOOKUP_*_INCLUDE and LOOKUP_*_LIBS
+# (where * is the name as given here in this list). That ensures that only
+# the dynamic library and not the exim binary will be linked against the
+# library.
+# NOTE: LDAP cannot be built as a module!
+#
+# If your system has pkg-config then the _INCLUDE/_LIBS setting can be
+# handled for you automatically by also defining the _PC variable to reference
+# the name of the pkg-config package, if such is available.
+
+LOOKUP_DBM=no
+LOOKUP_LSEARCH=yes
+LOOKUP_DNSDB=yes
+
+# LOOKUP_CDB=yes
+# LOOKUP_DSEARCH=yes
+# LOOKUP_IBASE=yes
+# LOOKUP_LDAP=yes
+# LOOKUP_MYSQL=yes
+# LOOKUP_NIS=yes
+# LOOKUP_NISPLUS=yes
+# LOOKUP_ORACLE=yes
+# LOOKUP_PASSWD=yes
+# LOOKUP_PGSQL=yes
+# LOOKUP_SQLITE=yes
+# LOOKUP_SQLITE_PC=sqlite3
+# LOOKUP_WHOSON=yes
+
+# These two settings are obsolete; all three lookups are compiled when
+# LOOKUP_LSEARCH is enabled. However, we retain these for backward
+# compatibility. Setting one forces LOOKUP_LSEARCH if it is not set.
+
+# LOOKUP_WILDLSEARCH=yes
+# LOOKUP_NWILDLSEARCH=yes
+
+
+#------------------------------------------------------------------------------
+# If you have set LOOKUP_LDAP=yes, you should set LDAP_LIB_TYPE to indicate
+# which LDAP library you have. Unfortunately, though most of their functions
+# are the same, there are minor differences. Currently Exim knows about four
+# LDAP libraries: the one from the University of Michigan (also known as
+# OpenLDAP 1), OpenLDAP 2, the Netscape SDK library, and the library that comes
+# with Solaris 7 onwards. Uncomment whichever of these you are using.
+
+# LDAP_LIB_TYPE=OPENLDAP1
+# LDAP_LIB_TYPE=OPENLDAP2
+# LDAP_LIB_TYPE=NETSCAPE
+# LDAP_LIB_TYPE=SOLARIS
+
+# If you don't set any of these, Exim assumes the original University of
+# Michigan (OpenLDAP 1) library.
+
+
+#------------------------------------------------------------------------------
+# The PCRE library is required for exim. There is no longer an embedded
+# version of the PCRE library included with the source code, instead you
+# must use a system library or build your own copy of PCRE.
+# In either case you must specify the library link info here. If the
+# PCRE header files are not in the standard search path you must also
+# modify the INCLUDE path (above)
+#
+# Use PCRE_CONFIG to query the pcre-config command (first found in $PATH)
+# to find the include files and libraries, else use PCRE_LIBS and set INCLUDE
+# too if needed.
+
+PCRE_CONFIG=yes
+# PCRE_LIBS=-lpcre
+
+
+#------------------------------------------------------------------------------
+# Additional libraries and include directories may be required for some
+# lookup styles (e.g. LDAP, MYSQL or PGSQL). LOOKUP_LIBS is included only on
+# the command for linking Exim itself, not on any auxiliary programs. You
+# don't need to set LOOKUP_INCLUDE if the relevant directories are already
+# specified in INCLUDE. The settings below are just examples; -lpq is for
+# PostgreSQL, -lgds is for Interbase, -lsqlite3 is for SQLite.
+#
+# You do not need to use this for any lookup information added via pkg-config.
+
+# LOOKUP_INCLUDE=-I /usr/local/ldap/include -I /usr/local/mysql/include -I /usr/local/pgsql/include
+# LOOKUP_LIBS=-L/usr/local/lib -lldap -llber -lmysqlclient -lpq -lgds -lsqlite3
+
+
+#------------------------------------------------------------------------------
+# Compiling the Exim monitor: If you want to compile the Exim monitor, a
+# program that requires an X11 display, then EXIM_MONITOR should be set to the
+# value "eximon.bin". Comment out this setting to disable compilation of the
+# monitor. The locations of various X11 directories for libraries and include
+# files are defaulted in the OS/Makefile-Default file, but can be overridden in
+# local OS-specific make files.
+
+#EXIM_MONITOR=eximon.bin
+
+
+#------------------------------------------------------------------------------
+# Compiling Exim with content scanning support: If you want to compile Exim
+# with support for message body content scanning, set WITH_CONTENT_SCAN to
+# the value "yes". This will give you malware and spam scanning in the DATA ACL,
+# and the MIME ACL. Please read the documentation to learn more about these
+# features.
+
+# WITH_CONTENT_SCAN=yes
+
+# If you want to use the deprecated "demime" condition in the DATA ACL,
+# uncomment the line below. Doing so will also explicitly turn on the
+# WITH_CONTENT_SCAN option. If possible, use the MIME ACL instead of
+# the "demime" condition.
+
+# WITH_OLD_DEMIME=yes
+
+# If you're using ClamAV and are backporting fixes to an old version, instead
+# of staying current (which is the more usual approach) then you may need to
+# use an older API which uses a STREAM command, now deprecated, instead of
+# zINSTREAM. If you need to set this, please let the Exim developers know, as
+# if nobody reports a need for it, we'll remove this option and clean up the
+# code. zINSTREAM was introduced with ClamAV 0.95.
+#
+# WITH_OLD_CLAMAV_STREAM=yes
+
+#------------------------------------------------------------------------------
+# By default Exim includes code to support DKIM (DomainKeys Identified
+# Mail, RFC4871) signing and verification. Verification of signatures is
+# turned on by default. See the spec for information on conditionally
+# disabling it. To disable the inclusion of the entire feature, set
+# DISABLE_DKIM to "yes"
+
+# DISABLE_DKIM=yes
+
+
+#------------------------------------------------------------------------------
+# By default, Exim has support for checking the AD bit in a DNS response, to
+# determine if DNSSEC validation was successful. If your system libraries
+# do not support that bit, then set DISABLE_DNSSEC to "yes"
+
+# DISABLE_DNSSEC=yes
+
+
+#------------------------------------------------------------------------------
+# Compiling Exim with experimental features. These are documented in
+# experimental-spec.txt. "Experimental" means that the way these features are
+# implemented may still change. Backward compatibility is not guaranteed.
+
+# Uncomment the following line to add support for talking to dccifd. This
+# defaults the socket path to /usr/local/dcc/var/dccifd.
+
+# EXPERIMENTAL_DCC=yes
+
+# Uncomment the following lines to add SPF support. You need to have libspf2
+# installed on your system (www.libspf2.org). Depending on where it is installed
+# you may have to edit the CFLAGS and LDFLAGS lines.
+
+# EXPERIMENTAL_SPF=yes
+# CFLAGS += -I/usr/local/include
+# LDFLAGS += -lspf2
+
+# Uncomment the following lines to add SRS (Sender rewriting scheme) support.
+# You need to have libsrs_alt installed on your system (srs.mirtol.com).
+# Depending on where it is installed you may have to edit the CFLAGS and
+# LDFLAGS lines.
+
+# EXPERIMENTAL_SRS=yes
+# CFLAGS += -I/usr/local/include
+# LDFLAGS += -lsrs_alt
+
+# Uncomment the following lines to add Brightmail AntiSpam support. You need
+# to have the Brightmail client SDK installed. Please check the experimental
+# documentation for implementation details. You need to edit the CFLAGS and
+# LDFLAGS lines.
+
+# EXPERIMENTAL_BRIGHTMAIL=yes
+# CFLAGS += -I/opt/brightmail/bsdk-6.0/include
+# LDFLAGS += -lxml2_single -lbmiclient_single -L/opt/brightmail/bsdk-6.0/lib
+
+# Uncomment the following line to add OCSP stapling support in TLS, if Exim
+# was built using OpenSSL.
+
+# EXPERIMENTAL_OCSP=yes
+
+# Uncomment the following line to add DMARC checking capability, implemented
+# using libopendmarc libraries.
+# EXPERIMENTAL_DMARC=yes
+# CFLAGS += -I/usr/local/include
+# LDFLAGS += -lopendmarc
+
+# Uncomment the following line to add Per-Recipient-Data-Response support.
+# EXPERIMENTAL_PRDR=yes
+
+# Uncomment the following line to support Transport post-delivery actions,
+# eg. for logging to a database.
+# EXPERIMENTAL_TPDA=yes
+
+# Uncomment the following line to add Redis lookup support
+# You need to have hiredis installed on your system (https://github.com/redis/hiredis).
+# Depending on where it is installed you may have to edit the CFLAGS and LDFLAGS lines.
+# EXPERIMENTAL_REDIS=yes
+# CFLAGS += -I/usr/local/include
+# LDFLAGS += -lhiredis
+
+
+###############################################################################
+# THESE ARE THINGS YOU MIGHT WANT TO SPECIFY #
+###############################################################################
+
+# The items in this section are those that are commonly changed according to
+# the sysadmin's preferences, but whose defaults are often acceptable. The
+# first five are concerned with security issues, where differing levels of
+# paranoia are appropriate in different environments. Sysadmins also vary in
+# their views on appropriate levels of defence in these areas. If you do not
+# understand these issues, go with the defaults, which are used by many sites.
+
+
+#------------------------------------------------------------------------------
+# Although Exim is normally a setuid program, owned by root, it refuses to run
+# local deliveries as root by default. There is a runtime option called
+# "never_users" which lists the users that must never be used for local
+# deliveries. There is also the setting below, which provides a list that
+# cannot be overridden at runtime. This guards against problems caused by
+# unauthorized changes to the runtime configuration. You are advised not to
+# remove "root" from this option, but you can add other users if you want. The
+# list is colon-separated. It must NOT contain any spaces.
+
+# FIXED_NEVER_USERS=root:bin:daemon
+FIXED_NEVER_USERS=root
+
+
+#------------------------------------------------------------------------------
+# By default, Exim insists that its configuration file be owned by root. You
+# can specify one additional permitted owner here.
+
+# CONFIGURE_OWNER=
+
+# If the configuration file is group-writeable, Exim insists by default that it
+# is owned by root. You can specify one additional permitted group owner here.
+
+# CONFIGURE_GROUP=
+
+# If you specify CONFIGURE_OWNER or CONFIGURE_GROUP as a name, this is looked
+# up at build time, and the uid or gid number is built into the binary.
+# However, you can specify that the lookup is deferred until runtime. In this
+# case, it is the name that is built into the binary. You can do this by a
+# setting of the form:
+
+# CONFIGURE_OWNER=ref:mail
+# CONFIGURE_GROUP=ref:sysadmin
+
+# In other words, put "ref:" in front of the user or group name. Although this
+# costs a bit of resource at runtime, it is convenient to use this feature when
+# building binaries that are to be run on multiple systems where the names may
+# refer to different uids or gids. It also allows you to build Exim on a system
+# where the relevant user or group is not defined.
+
+
+#------------------------------------------------------------------------------
+# The -C option allows Exim to be run with an alternate runtime configuration
+# file. When this is used by root, root privilege is retained by the binary
+# (for any other caller including the Exim user, it is dropped). You can
+# restrict the location of alternate configurations by defining a prefix below.
+# Any file used with -C must then start with this prefix (except that /dev/null
+# is also permitted if the caller is root, because that is used in the install
+# script). If the prefix specifies a directory that is owned by root, a
+# compromise of the Exim account does not permit arbitrary alternate
+# configurations to be used. The prefix can be more restrictive than just a
+# directory (the second example).
+
+# ALT_CONFIG_PREFIX=/some/directory/
+# ALT_CONFIG_PREFIX=/some/directory/exim.conf-
+
+
+#------------------------------------------------------------------------------
+# When a user other than root uses the -C option to override the configuration
+# file (including the Exim user when re-executing Exim to regain root
+# privileges for local message delivery), this will normally cause Exim to
+# drop root privileges. The TRUSTED_CONFIG_LIST option, specifies a file which
+# contains a list of trusted configuration filenames, one per line. If the -C
+# option is used by the Exim user or by the user specified in the
+# CONFIGURE_OWNER setting, to specify a configuration file which is listed in
+# the TRUSTED_CONFIG_LIST file, then root privileges are not dropped by Exim.
+
+# TRUSTED_CONFIG_LIST=/usr/exim/trusted_configs
+
+
+#------------------------------------------------------------------------------
+# Uncommenting this option disables the use of the -D command line option,
+# which changes the values of macros in the runtime configuration file.
+# This is another protection against somebody breaking into the Exim account.
+
+# DISABLE_D_OPTION=yes
+
+
+#------------------------------------------------------------------------------
+# By contrast, you might be maintaining a system which relies upon the ability
+# to override values with -D and assumes that these will be passed through to
+# the delivery processes. As of Exim 4.73, this is no longer the case by
+# default. Going forward, we strongly recommend that you use a shim Exim
+# configuration file owned by root stored under TRUSTED_CONFIG_LIST.
+# That shim can set macros before .include'ing your main configuration file.
+#
+# As a strictly transient measure to ease migration to 4.73, the
+# WHITELIST_D_MACROS value definies a colon-separated list of macro-names
+# which are permitted to be overridden from the command-line which will be
+# honoured by the Exim user. So these are macros that can persist to delivery
+# time.
+# Examples might be -DTLS or -DSPOOL=/some/dir. The values on the
+# command-line are filtered to only permit: [A-Za-z0-9_/.-]*
+#
+# This option is highly likely to be removed in a future release. It exists
+# only to make 4.73 as easy as possible to migrate to. If you use it, we
+# encourage you to schedule time to rework your configuration to not depend
+# upon it. Most people should not need to use this.
+#
+# By default, no macros are whitelisted for -D usage.
+
+# WHITELIST_D_MACROS=TLS:SPOOL
+
+#------------------------------------------------------------------------------
+# Exim has support for the AUTH (authentication) extension of the SMTP
+# protocol, as defined by RFC 2554. If you don't know what SMTP authentication
+# is, you probably won't want to include this code, so you should leave these
+# settings commented out. If you do want to make use of SMTP authentication,
+# you must uncomment at least one of the following, so that appropriate code is
+# included in the Exim binary. You will then need to set up the run time
+# configuration to make use of the mechanism(s) selected.
+
+AUTH_CRAM_MD5=yes
+# AUTH_CYRUS_SASL=yes
+# AUTH_DOVECOT=yes
+# AUTH_GSASL=yes
+# AUTH_GSASL_PC=libgsasl
+# AUTH_HEIMDAL_GSSAPI=yes
+# AUTH_HEIMDAL_GSSAPI_PC=heimdal-gssapi
+AUTH_PLAINTEXT=yes
+# AUTH_SPA=yes
+
+
+#------------------------------------------------------------------------------
+# If you specified AUTH_CYRUS_SASL above, you should ensure that you have the
+# Cyrus SASL library installed before trying to build Exim, and you probably
+# want to uncomment the first line below.
+# Similarly for GNU SASL, unless pkg-config is used via AUTH_GSASL_PC.
+# Ditto for AUTH_HEIMDAL_GSSAPI(_PC).
+
+# AUTH_LIBS=-lsasl2
+# AUTH_LIBS=-lgsasl
+# AUTH_LIBS=-lgssapi -lheimntlm -lkrb5 -lhx509 -lcom_err -lhcrypto -lasn1 -lwind -lroken -lcrypt
+
+
+#------------------------------------------------------------------------------
+# When Exim is decoding MIME "words" in header lines, most commonly for use
+# in the $header_xxx expansion, it converts any foreign character sets to the
+# one that is set in the headers_charset option. The default setting is
+# defined by this setting:
+
+HEADERS_CHARSET="ISO-8859-1"
+
+# If you are going to make use of $header_xxx expansions in your configuration
+# file, or if your users are going to use them in filter files, and the normal
+# character set on your host is something other than ISO-8859-1, you might
+# like to specify a different default here. This value can be overridden in
+# the runtime configuration, and it can also be overridden in individual filter
+# files.
+#
+# IMPORTANT NOTE: The iconv() function is needed for character code
+# conversions. Please see the next item...
+
+
+#------------------------------------------------------------------------------
+# Character code conversions are possible only if the iconv() function is
+# installed on your operating system. There are two places in Exim where this
+# is relevant: (a) The $header_xxx expansion (see the previous item), and (b)
+# the Sieve filter support. For those OS where iconv() is known to be installed
+# as standard, the file in OS/Makefile-xxxx contains
+#
+# HAVE_ICONV=yes
+#
+# If you are not using one of those systems, but have installed iconv(), you
+# need to uncomment that line above. In some cases, you may find that iconv()
+# and its header file are not in the default places. You might need to use
+# something like this:
+#
+HAVE_ICONV=no
+# CFLAGS=-O -I/usr/local/include
+# EXTRALIBS_EXIM=-L/usr/local/lib -liconv
+#
+# but of course there may need to be other things in CFLAGS and EXTRALIBS_EXIM
+# as well.
+#CFLAGS="-Os -pipe -march=i486 -fno-caller-saves -fhonour-copts -Wno-error=unused-but-set-variable"
+#CFLAGS="$(TARGET_CFLAGS)"
+# todo: Makefile.arch with -march=XYZ if it doesn't compile correctly
+CFLAGS=-O -I/usr/local/include
+
+
+#------------------------------------------------------------------------------
+# The passwords for user accounts are normally encrypted with the crypt()
+# function. Comparisons with encrypted passwords can be done using Exim's
+# "crypteq" expansion operator. (This is commonly used as part of the
+# configuration of an authenticator for use with SMTP AUTH.) At least one
+# operating system has an extended function called crypt16(), which uses up to
+# 16 characters of a password (the normal crypt() uses only the first 8). Exim
+# supports the use of crypt16() as well as crypt() but note the warning below.
+
+# You can always indicate a crypt16-encrypted password by preceding it with
+# "{crypt16}". If you want the default handling (without any preceding
+# indicator) to use crypt16(), uncomment the following line:
+
+# DEFAULT_CRYPT=crypt16
+
+# If you do that, you can still access the basic crypt() function by preceding
+# an encrypted password with "{crypt}". For more details, see the description
+# of the "crypteq" condition in the manual chapter on string expansions.
+
+# Some operating systems do not include a crypt16() function, so Exim has one
+# of its own, which it uses unless HAVE_CRYPT16 is defined. Normally, that will
+# be set in an OS-specific Makefile for the OS that have such a function, so
+# you should not need to bother with it.
+
+# *** WARNING *** WARNING *** WARNING *** WARNING *** WARNING ***
+# It turns out that the above is not entirely accurate. As well as crypt16()
+# there is a function called bigcrypt() that some operating systems have. This
+# may or may not use the same algorithm, and both of them may be different to
+# Exim's built-in crypt16() that is used unless HAVE_CRYPT16 is defined.
+#
+# However, since there is now a move away from the traditional crypt()
+# functions towards using SHA1 and other algorithms, tidying up this area of
+# Exim is seen as very low priority. In practice, if you need to, you can
+# define DEFAULT_CRYPT to the name of any function that has the same interface
+# as the traditional crypt() function.
+# *** WARNING *** WARNING *** WARNING *** WARNING *** WARNING ***
+
+
+#------------------------------------------------------------------------------
+# Exim can be built to support the SMTP STARTTLS command, which implements
+# Transport Layer Security using SSL (Secure Sockets Layer). To do this, you
+# must install the OpenSSL library package or the GnuTLS library. Exim contains
+# no cryptographic code of its own. Uncomment the following lines if you want
+# to build Exim with TLS support. If you don't know what this is all about,
+# leave these settings commented out.
+
+# This setting is required for any TLS support (either OpenSSL or GnuTLS)
+# SUPPORT_TLS=yes
+
+# Uncomment one of these settings if you are using OpenSSL; pkg-config vs not
+# USE_OPENSSL_PC=openssl
+# TLS_LIBS=-lssl -lcrypto
+
+# Uncomment the first and either the second or the third of these if you
+# are using GnuTLS. If you have pkg-config, then the second, else the third.
+# USE_GNUTLS=yes
+# USE_GNUTLS_PC=gnutls
+# TLS_LIBS=-lgnutls -ltasn1 -lgcrypt
+
+# The security fix we provide with the gnutls_allow_auto_pkcs11 option
+# (4.82 PP/09) introduces a compatibility regression. The symbol is
+# not available if GnuTLS is build without p11-kit (--without-p11-kit
+# configure option). In this case use AVOID_GNUTLS_PKCS11=yes when
+# building Exim.
+# AVOID_GNUTLS_PKCS11=yes
+
+# If you are running Exim as a server, note that just building it with TLS
+# support is not all you need to do. You also need to set up a suitable
+# certificate, and tell Exim about it by means of the tls_certificate
+# and tls_privatekey run time options. You also need to set tls_advertise_hosts
+# to specify the hosts to which Exim advertises TLS support. On the other hand,
+# if you are running Exim only as a client, building it with TLS support
+# is all you need to do.
+
+# If you are using pkg-config then you should not need to worry where the
+# libraries and headers are installed, as the pkg-config .pc specification
+# should include all -L/-I information necessary. If not using pkg-config
+# then you might need to specify the locations too.
+
+# Additional libraries and include files are required for both OpenSSL and
+# GnuTLS. The TLS_LIBS settings above assume that the libraries are installed
+# with all your other libraries. If they are in a special directory, you may
+# need something like
+
+# TLS_LIBS=-L/usr/local/openssl/lib -lssl -lcrypto
+# or
+# TLS_LIBS=-L/opt/gnu/lib -lgnutls -ltasn1 -lgcrypt
+
+# TLS_LIBS is included only on the command for linking Exim itself, not on any
+# auxiliary programs. If the include files are not in a standard place, you can
+# set TLS_INCLUDE to specify where they are, for example:
+
+# TLS_INCLUDE=-I/usr/local/openssl/include/
+# or
+# TLS_INCLUDE=-I/opt/gnu/include
+
+# You don't need to set TLS_INCLUDE if the relevant directories are already
+# specified in INCLUDE.
+
+
+#------------------------------------------------------------------------------
+# The default distribution of Exim contains only the plain text form of the
+# documentation. Other forms are available separately. If you want to install
+# the documentation in "info" format, first fetch the Texinfo documentation
+# sources from the ftp directory and unpack them, which should create files
+# with the extension "texinfo" in the doc directory. You may find that the
+# version number of the texinfo files is different to your Exim version number,
+# because the main documentation isn't updated as often as the code. For
+# example, if you have Exim version 4.43, the source tarball upacks into a
+# directory called exim-4.43, but the texinfo tarball unpacks into exim-4.40.
+# In this case, move the contents of exim-4.40/doc into exim-4.43/doc after you
+# have unpacked them. Then set INFO_DIRECTORY to the location of your info
+# directory. This varies from system to system, but is often /usr/share/info.
+# Once you have done this, "make install" will build the info files and
+# install them in the directory you have defined.
+
+# INFO_DIRECTORY=/usr/share/info
+
+
+#------------------------------------------------------------------------------
+# Exim log directory and files: Exim creates several log files inside a
+# single log directory. You can define the directory and the form of the
+# log file name here. If you do not set anything, Exim creates a directory
+# called "log" inside its spool directory (see SPOOL_DIRECTORY above) and uses
+# the filenames "mainlog", "paniclog", and "rejectlog". If you want to change
+# this, you can set LOG_FILE_PATH to a path name containing one occurrence of
+# %s. This will be replaced by one of the strings "main", "panic", or "reject"
+# to form the final file names. Some installations may want something like this:
+
+# LOG_FILE_PATH=/var/log/exim_%slog
+
+# which results in files with names /var/log/exim_mainlog, etc. The directory
+# in which the log files are placed must exist; Exim does not try to create
+# it for itself. It is also your responsibility to ensure that Exim is capable
+# of writing files using this path name. The Exim user (see EXIM_USER above)
+# must be able to create and update files in the directory you have specified.
+
+# You can also configure Exim to use syslog, instead of or as well as log
+# files, by settings such as these
+
+# LOG_FILE_PATH=syslog
+# LOG_FILE_PATH=syslog:/var/log/exim_%slog
+
+# The first of these uses only syslog; the second uses syslog and also writes
+# to log files. Do not include white space in such a setting as it messes up
+# the building process.
+
+
+#------------------------------------------------------------------------------
+# When logging to syslog, the following option caters for syslog replacements
+# that are able to accept log entries longer than the 1024 characters allowed
+# by RFC 3164. It is up to you to make sure your syslog daemon can handle this.
+# Non-printable characters are usually unacceptable regardless, so log entries
+# are still split on newline characters.
+
+# SYSLOG_LONG_LINES=yes
+
+# If you are not interested in the process identifier (pid) of the Exim that is
+# making the call to syslog, then comment out the following line.
+
+SYSLOG_LOG_PID=yes
+
+
+#------------------------------------------------------------------------------
+# Cycling log files: this variable specifies the maximum number of old
+# log files that are kept by the exicyclog log-cycling script. You don't have
+# to use exicyclog. If your operating system has other ways of cycling log
+# files, you can use them instead. The exicyclog script isn't run by default;
+# you have to set up a cron job for it if you want it.
+
+EXICYCLOG_MAX=10
+
+
+#------------------------------------------------------------------------------
+# The compress command is used by the exicyclog script to compress old log
+# files. Both the name of the command and the suffix that it adds to files
+# need to be defined here. See also the EXICYCLOG_MAX configuration.
+
+COMPRESS_COMMAND=/usr/bin/gzip
+COMPRESS_SUFFIX=gz
+
+
+#------------------------------------------------------------------------------
+# If the exigrep utility is fed compressed log files, it tries to uncompress
+# them using this command.
+
+ZCAT_COMMAND=/usr/bin/zcat
+
+
+#------------------------------------------------------------------------------
+# Compiling in support for embedded Perl: If you want to be able to
+# use Perl code in Exim's string manipulation language and you have Perl
+# (version 5.004 or later) installed, set EXIM_PERL to perl.o. Using embedded
+# Perl costs quite a lot of resources. Only do this if you really need it.
+
+# EXIM_PERL=perl.o
+
+
+#------------------------------------------------------------------------------
+# Support for dynamically-loaded string expansion functions via ${dlfunc. If
+# you are using gcc the dynamically-loaded object must be compiled with the
+# -shared option, and you will need to add -export-dynamic to EXTRALIBS so
+# that the local_scan API is made available by the linker. You may also need
+# to add -ldl to EXTRALIBS so that dlopen() is available to Exim.
+
+# EXPAND_DLFUNC=yes
+
+
+#------------------------------------------------------------------------------
+# Exim has support for PAM (Pluggable Authentication Modules), a facility
+# which is available in the latest releases of Solaris and in some GNU/Linux
+# distributions (see http://ftp.kernel.org/pub/linux/libs/pam/). The Exim
+# support, which is intended for use in conjunction with the SMTP AUTH
+# facilities, is included only when requested by the following setting:
+
+# SUPPORT_PAM=yes
+
+# You probably need to add -lpam to EXTRALIBS, and in some releases of
+# GNU/Linux -ldl is also needed.
+
+
+#------------------------------------------------------------------------------
+# Support for authentication via Radius is also available. The Exim support,
+# which is intended for use in conjunction with the SMTP AUTH facilities,
+# is included only when requested by setting the following parameter to the
+# location of your Radius configuration file:
+
+# RADIUS_CONFIG_FILE=/etc/radiusclient/radiusclient.conf
+# RADIUS_CONFIG_FILE=/etc/radius.conf
+
+# If you have set RADIUS_CONFIG_FILE, you should also set one of these to
+# indicate which RADIUS library is used:
+
+# RADIUS_LIB_TYPE=RADIUSCLIENT
+# RADIUS_LIB_TYPE=RADIUSCLIENTNEW
+# RADIUS_LIB_TYPE=RADLIB
+
+# RADIUSCLIENT is the radiusclient library; you probably need to add
+# -lradiusclient to EXTRALIBS.
+#
+# The API for the radiusclient library was changed at release 0.4.0.
+# Unfortunately, the header file does not define a version number that clients
+# can use to support both the old and new APIs. If you are using version 0.4.0
+# or later of the radiusclient library, you should use RADIUSCLIENTNEW.
+#
+# RADLIB is the Radius library that comes with FreeBSD (the header file is
+# called radlib.h); you probably need to add -lradius to EXTRALIBS.
+#
+# If you do not set RADIUS_LIB_TYPE, Exim assumes the radiusclient library,
+# using the original API.
+
+
+#------------------------------------------------------------------------------
+# Support for authentication via the Cyrus SASL pwcheck daemon is available.
+# Note, however, that pwcheck is now deprecated in favour of saslauthd (see
+# next item). The Exim support for pwcheck, which is intented for use in
+# conjunction with the SMTP AUTH facilities, is included only when requested by
+# setting the following parameter to the location of the pwcheck daemon's
+# socket.
+#
+# There is no need to install all of SASL on your system. You just need to run
+# ./configure --with-pwcheck, cd to the pwcheck directory within the sources,
+# make and make install. You must create the socket directory (default
+# /var/pwcheck) and chown it to exim's user and group. Once you have installed
+# pwcheck, you should arrange for it to be started by root at boot time.
+
+# CYRUS_PWCHECK_SOCKET=/var/pwcheck/pwcheck
+
+
+#------------------------------------------------------------------------------
+# Support for authentication via the Cyrus SASL saslauthd daemon is available.
+# The Exim support, which is intented for use in conjunction with the SMTP AUTH
+# facilities, is included only when requested by setting the following
+# parameter to the location of the saslauthd daemon's socket.
+#
+# There is no need to install all of SASL on your system. You just need to run
+# ./configure --with-saslauthd (and any other options you need, for example, to
+# select or deselect authentication mechanisms), cd to the saslauthd directory
+# within the sources, make and make install. You must create the socket
+# directory (default /var/state/saslauthd) and chown it to exim's user and
+# group. Once you have installed saslauthd, you should arrange for it to be
+# started by root at boot time.
+
+# CYRUS_SASLAUTHD_SOCKET=/var/state/saslauthd/mux
+
+
+#------------------------------------------------------------------------------
+# TCP wrappers: If you want to use tcpwrappers from within Exim, uncomment
+# this setting. See the manual section entitled "Use of tcpwrappers" in the
+# chapter on building and installing Exim.
+#
+# USE_TCP_WRAPPERS=yes
+#
+# You may well also have to specify a local "include" file and an additional
+# library for TCP wrappers, so you probably need something like this:
+#
+# USE_TCP_WRAPPERS=yes
+# CFLAGS=-O -I/usr/local/include
+# EXTRALIBS_EXIM=-L/usr/local/lib -lwrap
+#
+# but of course there may need to be other things in CFLAGS and EXTRALIBS_EXIM
+# as well.
+#
+# To use a name other than exim in the tcpwrappers config file,
+# e.g. if you're running multiple daemons with different access lists,
+# or multiple MTAs with the same access list, define
+# TCP_WRAPPERS_DAEMON_NAME accordingly
+#
+# TCP_WRAPPERS_DAEMON_NAME="exim"
+
+
+#------------------------------------------------------------------------------
+# The default action of the exim_install script (which is run by "make
+# install") is to install the Exim binary with a unique name such as
+# exim-4.43-1, and then set up a symbolic link called "exim" to reference it,
+# moving the symbolic link from any previous version. If you define NO_SYMLINK
+# (the value doesn't matter), the symbolic link is not created or moved. You
+# will then have to "turn Exim on" by setting up the link manually.
+
+# NO_SYMLINK=yes
+
+
+#------------------------------------------------------------------------------
+# Another default action of the install script is to install a default runtime
+# configuration file if one does not exist. This configuration has a router for
+# expanding system aliases. The default assumes that these aliases are kept
+# in the traditional file called /etc/aliases. If such a file does not exist,
+# the installation script creates one that contains just comments (no actual
+# aliases). The following setting can be changed to specify a different
+# location for the system alias file.
+
+SYSTEM_ALIASES_FILE=/etc/aliases
+
+
+#------------------------------------------------------------------------------
+# There are some testing options (-be, -bt, -bv) that read data from the
+# standard input when no arguments are supplied. By default, the input lines
+# are read using the standard fgets() function. This does not support line
+# editing during interactive input (though the terminal's "erase" character
+# works as normal). If your operating system has the readline() function, and
+# in addition supports dynamic loading of library functions, you can cause
+# Exim to use readline() for the -be testing option (only) by uncommenting the
+# following setting. Dynamic loading is used so that the library is loaded only
+# when the -be testing option is given; by the time the loading occurs,
+# Exim has given up its root privilege and is running as the calling user. This
+# is the reason why readline() is NOT supported for -bt and -bv, because Exim
+# runs as root or as exim, respectively, for those options. When USE_READLINE
+# is "yes", as well as supporting line editing, a history of input lines in the
+# current run is maintained.
+
+# USE_READLINE=yes
+
+# You may need to add -ldl to EXTRALIBS when you set USE_READLINE=yes.
+# Note that this option adds to the size of the Exim binary, because the
+# dynamic loading library is not otherwise included.
+
+
+#------------------------------------------------------------------------------
+# Uncomment this setting to include IPv6 support.
+
+HAVE_IPV6=yes
+
+###############################################################################
+# THINGS YOU ALMOST NEVER NEED TO MENTION #
+###############################################################################
+
+# The settings in this section are available for use in special circumstances.
+# In the vast majority of installations you need not change anything below.
+
+
+#------------------------------------------------------------------------------
+# The following commands live in different places in some OS. Either the
+# ultimate default settings, or the OS-specific files should already point to
+# the right place, but they can be overridden here if necessary. These settings
+# are used when building various scripts to ensure that the correct paths are
+# used when the scripts are run. They are not used in the Makefile itself. Perl
+# is not necessary for running Exim unless you set EXIM_PERL (see above) to get
+# it embedded, but there are some utilities that are Perl scripts. If you
+# haven't got Perl, Exim will still build and run; you just won't be able to
+# use those utilities.
+
+# CHOWN_COMMAND=/usr/bin/chown
+# CHGRP_COMMAND=/usr/bin/chgrp
+# CHMOD_COMMAND=/usr/bin/chmod
+# MV_COMMAND=/bin/mv
+# RM_COMMAND=/bin/rm
+# TOUCH_COMMAND=/usr/bin/touch
+# PERL_COMMAND=/usr/bin/perl
+
+
+#------------------------------------------------------------------------------
+# The following macro can be used to change the command for building a library
+# of functions. By default the "ar" command is used, with options "cq".
+# Only in rare circumstances should you need to change this.
+
+# AR=ar cq
+
+
+#------------------------------------------------------------------------------
+# In some operating systems, the value of the TMPDIR environment variable
+# controls where temporary files are created. Exim does not make use of
+# temporary files, except when delivering to MBX mailboxes. However, if Exim
+# calls any external libraries (e.g. DBM libraries), they may use temporary
+# files, and thus be influenced by the value of TMPDIR. For this reason, when
+# Exim starts, it checks the environment for TMPDIR, and if it finds it is set,
+# it replaces the value with what is defined here. Commenting this setting
+# suppresses the check altogether.
+
+TMPDIR="/tmp"
+
+
+#------------------------------------------------------------------------------
+# The following macros can be used to change the default modes that are used
+# by the appendfile transport. In most installations the defaults are just
+# fine, and in any case, you can change particular instances of the transport
+# at run time if you want.
+
+# APPENDFILE_MODE=0600
+# APPENDFILE_DIRECTORY_MODE=0700
+# APPENDFILE_LOCKFILE_MODE=0600
+
+
+#------------------------------------------------------------------------------
+# In some installations there may be multiple machines sharing file systems,
+# where a different configuration file is required for Exim on the different
+# machines. If CONFIGURE_FILE_USE_NODE is defined, then Exim will first look
+# for a configuration file whose name is that defined by CONFIGURE_FILE,
+# with the node name obtained by uname() tacked on the end, separated by a
+# period (for example, /usr/exim/configure.host.in.some.domain). If this file
+# does not exist, then the bare configuration file name is tried.
+
+# CONFIGURE_FILE_USE_NODE=yes
+
+
+#------------------------------------------------------------------------------
+# In some esoteric configurations two different versions of Exim are run,
+# with different setuid values, and different configuration files are required
+# to handle the different cases. If CONFIGURE_FILE_USE_EUID is defined, then
+# Exim will first look for a configuration file whose name is that defined
+# by CONFIGURE_FILE, with the effective uid tacked on the end, separated by
+# a period (for eximple, /usr/exim/configure.0). If this file does not exist,
+# then the bare configuration file name is tried. In the case when both
+# CONFIGURE_FILE_USE_EUID and CONFIGURE_FILE_USE_NODE are set, four files
+# are tried: <name>.<euid>.<node>, <name>.<node>, <name>.<euid>, and <name>.
+
+# CONFIGURE_FILE_USE_EUID=yes
+
+
+#------------------------------------------------------------------------------
+# The size of the delivery buffers: These specify the sizes (in bytes) of
+# the buffers that are used when copying a message from the spool to a
+# destination. There is rarely any need to change these values.
+
+# DELIVER_IN_BUFFER_SIZE=8192
+# DELIVER_OUT_BUFFER_SIZE=8192
+
+
+#------------------------------------------------------------------------------
+# The mode of the database directory: Exim creates a directory called "db"
+# in its spool directory, to hold its databases of hints. This variable
+# determines the mode of the created directory. The default value in the
+# source is 0750.
+
+# EXIMDB_DIRECTORY_MODE=0750
+
+
+#------------------------------------------------------------------------------
+# Database file mode: The mode of files created in the "db" directory defaults
+# to 0640 in the source, and can be changed here.
+
+# EXIMDB_MODE=0640
+
+
+#------------------------------------------------------------------------------
+# Database lock file mode: The mode of zero-length files created in the "db"
+# directory to use for locking purposes defaults to 0640 in the source, and
+# can be changed here.
+
+# EXIMDB_LOCKFILE_MODE=0640
+
+
+#------------------------------------------------------------------------------
+# This parameter sets the maximum length of the header portion of a message
+# that Exim is prepared to process. The default setting is one megabyte. The
+# limit exists in order to catch rogue mailers that might connect to your SMTP
+# port, start off a header line, and then just pump junk at it for ever. The
+# message_size_limit option would also catch this, but it may not be set.
+# The value set here is the default; it can be changed at runtime.
+
+# HEADER_MAXSIZE="(1024*1024)"
+
+
+#------------------------------------------------------------------------------
+# The mode of the input directory: The input directory is where messages are
+# kept while awaiting delivery. Exim creates it if necessary, using a mode
+# which can be defined here (default 0750).
+
+# INPUT_DIRECTORY_MODE=0750
+
+
+#------------------------------------------------------------------------------
+# The mode of Exim's log directory, when it is created by Exim inside the spool
+# directory, defaults to 0750 but can be changed here.
+
+# LOG_DIRECTORY_MODE=0750
+
+
+#------------------------------------------------------------------------------
+# The log files themselves are created as required, with a mode that defaults
+# to 0640, but which can be changed here.
+
+# LOG_MODE=0640
+
+
+#------------------------------------------------------------------------------
+# The TESTDB lookup is for performing tests on the handling of lookup results,
+# and is not useful for general running. It should be included only when
+# debugging the code of Exim.
+
+# LOOKUP_TESTDB=yes
+
+
+#------------------------------------------------------------------------------
+# /bin/sh is used by default as the shell in which to run commands that are
+# defined in the makefiles. This can be changed if necessary, by uncommenting
+# this line and specifying another shell, but note that a Bourne-compatible
+# shell is expected.
+
+# MAKE_SHELL=/bin/sh
+
+
+#------------------------------------------------------------------------------
+# The maximum number of named lists of each type (address, domain, host, and
+# local part) can be increased by changing this value. It should be set to
+# a multiple of 16.
+
+# MAX_NAMED_LIST=16
+
+
+#------------------------------------------------------------------------------
+# Network interfaces: Unless you set the local_interfaces option in the runtime
+# configuration file to restrict Exim to certain interfaces only, it will run
+# code to find all the interfaces there are on your host. Unfortunately,
+# the call to the OS that does this requires a buffer large enough to hold
+# data for all the interfaces - it was designed in the days when a host rarely
+# had more than three or four interfaces. Nowadays hosts can have very many
+# virtual interfaces running on the same hardware. If you have more than 250
+# virtual interfaces, you will need to uncomment this setting and increase the
+# value.
+
+# MAXINTERFACES=250
+
+
+#------------------------------------------------------------------------------
+# Per-message logs: While a message is in the process of being delivered,
+# comments on its progress are written to a message log, for the benefit of
+# human administrators. These logs are held in a directory called "msglog"
+# in the spool directory. Its mode defaults to 0750, but can be changed here.
+# The message log directory is also used for storing files that are used by
+# transports for returning data to a message's sender (see the "return_output"
+# option for transports).
+
+# MSGLOG_DIRECTORY_MODE=0750
+
+
+#------------------------------------------------------------------------------
+# There are three options which are used when compiling the Perl interface and
+# when linking with Perl. The default values for these are placed automatically
+# at the head of the Makefile by the script which builds it. However, if you
+# want to override them, you can do so here.
+
+# PERL_CC=
+# PERL_CCOPTS=
+# PERL_LIBS=
+
+
+#------------------------------------------------------------------------------
+# If you wish to disable valgrind in the binary, define NVALGRIND=1.
+# This should not be needed.
+
+# NVALGRIND=1
+
+#------------------------------------------------------------------------------
+# Identifying the daemon: When an Exim daemon starts up, it writes its pid
+# (process id) to a file so that it can easily be identified. The path of the
+# file can be specified here. Some installations may want something like this:
+
+PID_FILE_PATH=/var/run/exim.pid
+
+# If PID_FILE_PATH is not defined, Exim writes a file in its spool directory
+# using the name "exim-daemon.pid".
+
+# If you start up a daemon without the -bd option (for example, with just
+# the -q15m option), a pid file is not written. Also, if you override the
+# configuration file with the -oX option, no pid file is written. In other
+# words, the pid file is written only for a "standard" daemon.
+
+
+#------------------------------------------------------------------------------
+# If Exim creates the spool directory, it is given this mode, defaulting in the
+# source to 0750.
+
+# SPOOL_DIRECTORY_MODE=0750
+
+
+#------------------------------------------------------------------------------
+# The mode of files on the input spool which hold the contents of messages can
+# be changed here. The default is 0640 so that information from the spool is
+# available to anyone who is a member of the Exim group.
+
+# SPOOL_MODE=0640
+
+
+#------------------------------------------------------------------------------
+# Moving frozen messages: If the following is uncommented, Exim is compiled
+# with support for automatically moving frozen messages out of the main spool
+# directory, a facility that is found useful by some large installations. A
+# run time option is required to cause the moving actually to occur. Such
+# messages become "invisible" to the normal management tools.
+
+# SUPPORT_MOVE_FROZEN_MESSAGES=yes
+
+
+#------------------------------------------------------------------------------
+# Expanding match_* second paramters: BE CAREFUL IF ENABLING THIS!
+# It has proven too easy in practice for administrators to configure security
+# problems into their Exim install, by treating match_domain{}{} and friends
+# as a form of string comparison, where the second string comes from untrusted
+# data. Because these options take lists, which can include lookup;LOOKUPDATA
+# style elements, a foe can then cause Exim to, eg, execute an arbitrary MySQL
+# query, dropping tables.
+# From Exim 4.77 onwards, the second parameter is not expanded; it can still
+# be a list literal, or a macro, or a named list reference. There is also
+# the new expansion condition "inlisti" which does expand the second parameter,
+# but treats it as a list of strings; also, there's "eqi" which is probably
+# what is normally wanted.
+#
+# If you really need to have the old behaviour, know what you are doing and
+# will not complain if your system is compromised as a result of doing so, then
+# uncomment this option to get the old behaviour back.
+
+# EXPAND_LISTMATCH_RHS=yes
+
+#------------------------------------------------------------------------------
+# Disabling the use of fsync(): DO NOT UNCOMMENT THE FOLLOWING LINE unless you
+# really, really, really know what you are doing. And even then, think again.
+# You should never uncomment this when compiling a binary for distribution.
+# Use it only when compiling Exim for your own use.
+#
+# Uncommenting this line enables the use of a runtime option called
+# disable_fsync, which can be used to stop Exim using fsync() to ensure that
+# files are written to disc before proceeding. When this is disabled, crashes
+# and hardware problems such as power outages can cause data to be lost. This
+# feature should only be used in very exceptional circumstances. YOU HAVE BEEN
+# WARNED.
+
+# ENABLE_DISABLE_FSYNC=yes
+
+# End of EDITME for Exim 4.
--- /dev/null
+#!/bin/sh /etc/rc.common
+# Exim
+
+START=88
+STOP=12
+PIDFILE=/var/run/exim.pid
+
+start() {
+ chown root:exim /usr/exim/configure
+ chmod 640 /usr/exim/configure
+ /usr/exim/bin/exim -bd -q30m
+}
+
+stop() {
+ kill $(cat "$PIDFILE")
+ rm "$PIDFILE"
+}
+
+restart() {
+ stop
+ start
+}
+
--- /dev/null
+--- a/OS/Makefile-Base
++++ b/OS/Makefile-Base
+@@ -112,8 +112,8 @@ allexim: config.h $(EXIM_MONITOR) exicyclog exinext exiwhat \
+
+ # Targets for special-purpose configuration header builders
+ buildconfig: buildconfig.c
+- @echo "$(CC) buildconfig.c"
+- $(FE)$(CC) $(CFLAGS) $(INCLUDE) -o buildconfig buildconfig.c $(LIBS)
++ @echo "$(HOSTCC_WRAPPER) buildconfig.c"
++ $(FE)$(HOSTCC_WRAPPER) $(CFLAGS) $(INCLUDE) -o buildconfig buildconfig.c $(LIBS)
+
+
+ # Target for the exicyclog utility script
--- /dev/null
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=beanstalkd
+PKG_VERSION:=v1.9
+PKG_RELEASE:=1
+
+PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.bz2
+PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION)
+PKG_SOURCE_URL:=https://github.com/kr/beanstalkd.git
+PKG_SOURCE_PROTO:=git
+PKG_SOURCE_VERSION:=$(PKG_VERSION)
+PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
+
+CC="$(TARGET_CC)"
+
+include $(INCLUDE_DIR)/package.mk
+
+define Package/beanstalkd
+ SECTION:=net
+ CATEGORY:=Network
+ TITLE:=Beanstalk
+endef
+
+define Package/beanstalkd/description
+ Beanstalk is a simple, fast work queue.
+endef
+
+define Build/Configure
+endef
+
+define Build/Compile
+ cd $(PKG_BUILD_DIR) && make CC="$(TARGET_CC)" CFLAGS="$(TARGET_CFLAGS)"
+endef
+
+define Package/beanstalkd/install
+ $(INSTALL_DIR) $(1)/usr/bin
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/beanstalkd $(1)/usr/bin/beanstalkd
+
+ $(INSTALL_DIR) $(1)/etc/init.d
+ $(INSTALL_BIN) ./files/beanstalkd.init $(1)/etc/init.d/beanstalkd
+endef
+
+$(eval $(call BuildPackage,beanstalkd))
--- /dev/null
+#!/bin/sh /etc/rc.common
+# Beanstalk
+
+START=89
+STOP=11
+
+start() {
+ /usr/bin/beanstalkd &
+}
+
+stop() {
+ killall beanstalkd
+}
+
+restart() {
+ stop
+ start
+}
+
--- /dev/null
+--- a/linux.c
++++ b/linux.c
+@@ -19,7 +19,7 @@ static int epfd;
+ int
+ rawfalloc(int fd, int len)
+ {
+- return posix_fallocate(fd, 0, len);
++ return ftruncate(fd, len);
+ }
+
+
--- /dev/null
+
+choice
+ depends on PACKAGE_cfengine-promises
+ prompt "Network profile"
+ default NETWORK_PROFILE_APU
+
+ config NETWORK_PROFILE_ALIX
+ bool "ALIX"
+
+ config NETWORK_PROFILE_APU
+ bool "APU"
+
+ config NETWORK_PROFILE_RASPI
+ bool "Rasperry Pi"
+
+endchoice
+
--- /dev/null
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=cfengine-promises
+PKG_VERSION:=1.3.22
+PKG_RELEASE:=1
+
+include $(INCLUDE_DIR)/package.mk
+
+define Package/cfengine-promises
+ SECTION:=administration
+ CATEGORY:=Enigmabox
+ TITLE:=CFEngine promises
+ DEPENDS:=+cfengine
+endef
+
+define Package/cfengine-promises/config
+ source "$(SOURCE)/Config.in"
+endef
+
+define Package/cfengine-promises/description
+ The promises for CFEngine which configure the Enigmabox.
+endef
+
+define Build/Configure
+endef
+
+define Build/Compile
+endef
+
+define Package/cfengine-promises/install
+ $(INSTALL_DIR) $(1)/opt/cfengine
+ $(INSTALL_BIN) ./files/root/bootstrap.cf $(1)/opt/cfengine/bootstrap.cf
+ $(INSTALL_BIN) ./files/root/site.cf $(1)/opt/cfengine/site.cf
+
+ $(INSTALL_DIR) $(1)/opt/enigmabox/cfengine-promises
+ cp -rv ./files/root/* $(1)/opt/enigmabox/cfengine-promises/
+
+ $(INSTALL_DIR) $(1)/usr/sbin
+ $(INSTALL_BIN) ./files/cfengine-apply $(1)/usr/sbin/cfengine-apply
+ $(INSTALL_BIN) ./files/init-mysql $(1)/usr/sbin/init-mysql
+
+ $(INSTALL_DIR) $(1)/usr/bin
+ $(INSTALL_BIN) ./files/hello $(1)/usr/bin/hello
+ $(INSTALL_BIN) ./files/speedtest $(1)/usr/bin/speedtest
+
+ $(INSTALL_DIR) $(1)/etc/init.d
+ $(INSTALL_BIN) ./files/cfengine-promises.init $(1)/etc/init.d/cfengine-promises
+ $(INSTALL_BIN) ./files/rebuild-iptables.init $(1)/etc/init.d/rebuild-iptables
+ $(INSTALL_BIN) ./files/setup-cjdns-networking.init $(1)/etc/init.d/setup-cjdns-networking
+
+ $(INSTALL_DIR) $(1)/etc
+ $(INSTALL_DATA) ./files/resolv.conf $(1)/etc/resolv.conf.enigmabox
+
+ # network profile
+ $(INSTALL_DIR) $(1)/etc/enigmabox
+ if [ "$(CONFIG_NETWORK_PROFILE_ALIX)" == "y" ]; then echo alix > $(1)/etc/enigmabox/network-profile; fi
+ if [ "$(CONFIG_NETWORK_PROFILE_APU)" == "y" ]; then echo apu > $(1)/etc/enigmabox/network-profile; fi
+ if [ "$(CONFIG_NETWORK_PROFILE_RASPI)" == "y" ]; then echo raspi > $(1)/etc/enigmabox/network-profile; fi
+
+ # create empty crontab so that the cron service starts
+ $(INSTALL_DIR) $(1)/etc/crontabs
+ touch $(1)/etc/crontabs/root
+endef
+
+$(eval $(call BuildPackage,cfengine-promises))
--- /dev/null
+#!/bin/ash
+
+#set -xv
+
+# variables
+cfengine_site="/box/.cf-site.json"
+cfagent="/opt/cfengine/bin/cf-agent"
+webinterface="http://127.0.0.1:8000"
+pidfile="/var/run/cfengine-apply.pid"
+dynamic_output="/tmp/dynamic_output"
+dynamic_status="/tmp/apply-in-progress"
+
+
+
+run() {
+ background=""
+ if [[ "$1" == "background" || "$2" == "background" ]]; then
+ > "$dynamic_output"
+ background=1
+ fi
+
+ touch "$dynamic_status"
+
+ echo "running cfengine and applying promises..."
+ curl "$webinterface/cfengine/site.json" > "$cfengine_site"
+
+ if [[ "$background" == 1 ]]; then
+ (
+ cfengine_run > "$dynamic_output"
+ inform_webinterface
+ ) &
+ else
+ cfengine_run && inform_webinterface
+ fi
+
+}
+
+inform_webinterface() {
+ curl --data "key=config_changed&value=False" -X POST "$webinterface/api/v1/set_option" &> /dev/null
+ rm "$dynamic_status"
+}
+
+cfengine_run() {
+ "$cfagent" --no-lock --inform --color=always /opt/enigmabox/cfengine-promises/site.cf
+}
+
+
+
+# Check if this script is already running
+kill -0 $(cat "$pidfile" 2> /dev/null) 2> /dev/null
+if [[ "$?" == "0" ]]; then
+ echo "Script is already running, exiting"
+ exit 1
+else
+ [[ -f "$pidfile" ]] && rm "$pidfile"
+fi
+echo $$ > "$pidfile"
+
+
+
+# Option parsing
+while getopts "s:b" opt; do
+ case "$opt" in
+ s)
+ sleep="$OPTARG"
+ ;;
+ b)
+ background="background"
+ ;;
+ :)
+ echo "Option -$OPTARG requires an argument."
+ exit 1
+ ;;
+ esac
+done
+
+
+
+if [[ -n "$sleep" ]]; then
+ echo "sleeping for $sleep seconds..."
+ sleep "$sleep"
+fi
+
+run $background
+
+rm "$pidfile"
+
--- /dev/null
+#!/bin/sh /etc/rc.common
+# cfengine promises apply service
+
+START=92
+
+start() {
+ /usr/sbin/cfengine-apply
+ (sleep 60 && /usr/sbin/asterisk -rx 'sip notify gsreboot 100') &
+}
+
--- /dev/null
+#!/bin/ash
+################################################################################
+less << EOF
+Hello.
+
+Thank you for joining us in our mission to provide the infrastructure
+for an independent, decentralized network built on secure protocols
+to protect humanity against tyranny and oppressive states.
+
+This is the Enigmabox, providing ready to use applications like telephony
+and email for use over cjdns networks.
+
+
+
+Enigmabox - folder structure
+============================
+
+/box - personal data
+/etc/enigmabox - enigmabox specific data
+/opt/enigmabox - enigmabox applications
+/opt/enigmabox/cfengine-promises - system configuration templates
+
+
+
+Enigmabox - commands
+====================
+
+cfengine-apply - run CFEngine, apply the configuration
+rebuild-iptables - apply firewall rules
+setup-cjdns-networking - manually run network setup script
+speedtest - conduct a bandwidth test
+
+
+The following commands are available for subscribers:
+
+addressbook pull - get global addressbook
+addressbook push - publish your address in the directory
+updater check - check for updates
+updater install [package] - install a package
+updater apply - apply updates - this will reboot your box!
+upgrader download - download newest firmware image
+upgrader verify - verify firmware image
+upgrader write - write firmware image
+
+
+
+cjdns - tools
+=============
+
+cjdnslog '' - watch cjdns at work
+dumptable - dump all nodes known to this cjdns instance
+findnodes - details about nodes, e.g. link quality, cjdns version, path
+peerStats - statistics about your direct peers
+
+EOF
--- /dev/null
+#!/bin/ash
+
+/usr/bin/mysql_install_db --force
+
+/etc/init.d/mysqld start
+/etc/init.d/mysqld enable
+
+mysqladmin -u root password 'root'
+
--- /dev/null
+#!/bin/sh /etc/rc.common
+# rebuild iptables
+
+START=86
+
+start() {
+ /usr/sbin/rebuild-iptables
+}
+
--- /dev/null
+domain box
+search box
+nameserver ::1
+nameserver 127.0.0.1
+nameserver 4.2.2.1
+nameserver 4.2.2.2
+nameserver 4.2.2.3
+nameserver 4.2.2.4
+nameserver 8.8.8.8
--- /dev/null
+
+bundle agent app_cjdns
+{
+ vars:
+ "json"
+ data => readjson("$(g.site)", 64000);
+
+ classes:
+ "cjdns_master"
+ expression => regcmp("master", "$(json[cjdns_version])");
+
+ "cjdns_v6"
+ expression => regcmp("v6", "$(json[cjdns_version])");
+
+ files:
+ "/box/cjdroute.conf"
+ create => "true",
+ template_method => "mustache",
+ template_data => readjson("$(g.site)", 64000),
+ edit_template => "$(this.promise_dirname)/templates/cjdroute.conf.mustache",
+ edit_defaults => no_backup,
+ classes => if_repaired("restart_cjdns");
+
+ "/root/.cjdnsadmin"
+ create => "true",
+ template_method => "mustache",
+ template_data => readjson("$(g.site)", 64000),
+ edit_template => "$(this.promise_dirname)/templates/cjdnsadmin.mustache",
+ edit_defaults => no_backup;
+
+ "/etc/wpa_supplicant/."
+ create => "true";
+
+ "/etc/wpa_supplicant/wpa_supplicant.conf"
+ create => "true",
+ template_method => "mustache",
+ template_data => readjson("$(g.site)", 64000),
+ edit_template => "$(this.promise_dirname)/templates/wpa_supplicant.conf.mustache",
+ edit_defaults => no_backup;
+
+ "/usr/sbin/setup-cjdns-networking"
+ create => "true",
+ perms => script,
+ template_method => "mustache",
+ template_data => readjson("$(g.site)", 64000),
+ edit_template => "$(this.promise_dirname)/templates/setup-cjdns-networking.mustache",
+ edit_defaults => no_backup;
+
+ cjdns_master::
+ "/usr/sbin/cjdroute"
+ copy_from => local_copy("/usr/sbin/cjdroute.master"),
+ classes => if_repaired("restart_cjdns");
+
+ cjdns_v6::
+ "/usr/sbin/cjdroute"
+ copy_from => local_copy("/usr/sbin/cjdroute.v6"),
+ classes => if_repaired("restart_cjdns");
+
+ commands:
+ restart_cjdns::
+ "/etc/init.d/cjdns restart";
+
+ reports:
+ "checking cjdns: done";
+}
+
--- /dev/null
+{
+ "addr": "127.0.0.1",
+ "port": 11234,
+ "password": "6v60ggy58qu0x0013vh64gzsu2lch0y"
+}
--- /dev/null
+// Autogenerated cjdns.conf
+
+{
+ "privateKey": "{{cjdns_private_key}}",
+ "publicKey": "{{cjdns_public_key}}",
+ "ipv6": "{{cjdns_ipv6}}",
+ "authorizedPasswords":
+ [
+{{#if_peering_password}}
+{"password": "{{peering_password}}"}
+{{/if_peering_password}}
+ ],
+
+ "admin":
+ {
+ "bind": "127.0.0.1:11234",
+ "password": "6v60ggy58qu0x0013vh64gzsu2lch0y"
+ },
+
+ // Interfaces to connect to the switch core.
+ "interfaces":
+ {
+ "UDPInterface":
+ [
+ {
+ "bind": "0.0.0.0:{{peering_port}}",
+ "connectTo":
+ {
+{{#if_peerings}}
+{{#peerings}}
+ "{{address}}":{"password":"{{password}}","publicKey":"{{public_key}}"},
+{{/peerings}}
+{{/if_peerings}}
+ }
+ }
+ ]
+ "ETHInterface":
+ [
+{{#autopeering}}
+ {
+ "bind": "{{interface}}",
+ "beacon": 2,
+ },
+{{/autopeering}}
+{{#if_meshmode}}
+ {
+ "bind": "wlan0",
+ "beacon": 2,
+ },
+{{/if_meshmode}}
+ ]
+ },
+
+ // Configuration for the router.
+ "router":
+ {
+ "interface":
+ {
+ "type": "TUNInterface"
+ //"tunDevice": "tun0"
+ },
+ "ipTunnel":
+ {
+ "allowedConnections":
+ [
+ ],
+ "outgoingConnections":
+ [
+{{#if_internet_gateway}}
+{{#internet_gateway}}
+ "{{public_key}}",
+{{/internet_gateway}}
+{{/if_internet_gateway}}
+ ]
+ }
+ },
+
+ "resetAfterInactivitySeconds": 100,
+ //"pidFile": "cjdroute.pid",
+ "security":
+ [
+ "nofiles",
+ {
+ "setuser": "nobody",
+ "exemptAngel": 1
+ }
+ ],
+ "logging":
+ {
+ // "logTo":"stdout"
+ }
+}
--- /dev/null
+#!/bin/ash
+export LC_ALL=en_US.UTF-8
+
+# TODO
+# * dns und gateway vom dhcp-server übernehmen, erst setzen, wenn ip erhalten
+
+# vars
+
+remoteaddresses="{{#peerings}}{{ip}} {{/peerings}}{{#missioncontrol}}{{ip}} {{/missioncontrol}}"
+
+orggatewayfile="/tmp/cjdns_org_gw"
+
+network_profile="$(cat /etc/enigmabox/network-profile)"
+[[ "$network_profile" = "alix" ]] && clearnet_interface="eth0"
+[[ "$network_profile" = "apu" ]] && clearnet_interface="eth2"
+[[ "$network_profile" = "raspi" ]] && clearnet_interface="eth1"
+
+ping="ping -c 5 -W 5"
+cjdns_request_tries="/tmp/cjdns_request_tries"
+netstat_file="/tmp/netstat"
+pidfile="/tmp/setup-cjdns-networking.pid"
+opmode="{{wlan_opmode}}"
+ssid="{{wlan_ssid}}"
+wep_pass="{{wlan_pass}}"
+security="{{wlan_security}}"
+dynamic_output="/tmp/dynamic_output"
+
+{{#if_internet_gateway}}
+request_internet="yes"
+{{/if_internet_gateway}}
+
+{{^if_internet_gateway}}
+request_internet="no"
+{{/if_internet_gateway}}
+
+
+
+# check if its already running
+if [[ "$1" != "startwifi" ]]; then
+ kill -0 $(cat "$pidfile" 2> /dev/null) 2> /dev/null
+ if [[ "$?" == "0" ]]; then
+ echo "script is already running"
+ exit 0
+ fi
+
+ echo $$ > "$pidfile"
+fi
+
+
+
+# functions
+
+e() {
+ echo 1>&2
+ echo 1>&2
+ echo "$1" 1>&2
+}
+
+startwifi() {
+ echo "please wait, configuring system..." > "$dynamic_output"
+ /usr/sbin/cfengine-apply &> /dev/null
+ echo "done" > "$dynamic_output"
+
+ /usr/sbin/setup-cjdns-networking &> "$dynamic_output"
+
+ exit 0 #the script should end here
+}
+
+dhcp() {
+ ifconfig "$clearnet_interface" up
+
+ if [[ $( route -n | grep ^0.0.0.0 | wc -l ) -eq 0 ]]; then
+ e "dhcp request $clearnet_interface"
+ udhcpc -i "$clearnet_interface" --now
+ fi
+}
+
+start_wpa() {
+ e "start WPA session"
+
+ ifconfig wlan0 down
+ killall wpa_supplicant
+
+ # make sure wpa_supplicant is absent
+ rm "/var/run/wpa_supplicant/wlan0" 2> /dev/null
+
+ sleep 2
+
+ ifconfig wlan0 up
+ wpa_supplicant -i wlan0 -D wext -c /etc/wpa_supplicant/wpa_supplicant.conf -B
+
+ sleep 2
+}
+
+start_wep() {
+{{#if_wlan_pass}}
+ e "start WEP session"
+ ifconfig wlan0 up
+ iwconfig wlan0 essid "$ssid"
+ iwconfig wlan0 key "d:0:$wep_pass"
+{{/if_wlan_pass}}
+{{^if_wlan_pass}}
+ e "connecting to AP"
+ ifconfig wlan0 up
+ iwconfig wlan0 essid "$ssid"
+{{/if_wlan_pass}}
+}
+
+request_cjdns_internet() {
+ try=$(cat "$cjdns_request_tries" 2> /dev/null)
+ try=$(($try+1))
+ echo "$try" > "$cjdns_request_tries"
+
+ # try another countryserver after the 2nd try
+ if [[ "$try" -ge 2 ]]; then
+ e "switching to an alternative server"
+ curl http://127.0.0.1:8000/api/v1/set_next_country &> /dev/null
+ /usr/sbin/cfengine-apply
+ fi
+
+ # request cjdns internet via script after the 3rd try
+ if [[ "$try" -ge 3 ]]; then
+ e "request cjdns internet"
+ /usr/sbin/request-internet
+ fi
+}
+
+start_cjdns() {
+ if [[ "$(/etc/init.d/cjdns status)" != "running" ]]; then
+ e "starting cjdns"
+ /etc/init.d/cjdns start
+ fi
+}
+
+restart_cjdns() {
+ e "restarting cjdns"
+ /etc/init.d/cjdns restart
+}
+
+get_vpn_gateway() {
+ ifconfig tun0 2> /dev/null | grep "inet addr" | cut -d: -f2 | awk '{ print $1 }'
+}
+
+get_original_gateway() {
+ if [[ -f "$orggatewayfile" ]]; then
+ org_gw=$(cat "$orggatewayfile")
+ else
+ org_gw=$(route -n | grep '^0.0.0.0' | awk '{ print $2 }')
+ echo "$org_gw" > "$orggatewayfile"
+ fi
+ echo "$org_gw"
+}
+
+gateway_is_up() {
+ vpn_gateway=$(get_vpn_gateway)
+ if [[ "$vpn_gateway" != "" ]]; then
+ echo true
+ fi
+}
+
+interface_dhcp_success() {
+ if [[ "$(ifconfig "$clearnet_interface" | grep 'inet addr' | wc -l)" -gt 0 ]]; then
+ echo true
+ fi
+}
+
+mtu() {
+ if [[ "$(ifconfig tun0 2> /dev/null | grep -i mtu | awk '{ print $6 }' | cut -d: -f 2)" -ne 1300 ]]; then
+ e "setting mtu"
+ ifconfig tun0 mtu 1300
+ fi
+}
+
+original_gateway() {
+ original_gateway=$(get_original_gateway)
+ for remoteaddress in $remoteaddresses; do
+ if [[ "$(route -n | egrep "$remoteaddress.*?$original_gateway" | wc -l)" -eq 0 ]]; then
+ e "setting route $remoteaddress via $original_gateway dev $clearnet_interface"
+ route add "$remoteaddress" gw "$original_gateway" "$clearnet_interface"
+ fi
+ done
+}
+
+defaultroute() {
+ original_gateway=$(get_original_gateway)
+ vpn_gateway=$(get_vpn_gateway)
+ if [[ "$(route -n | egrep "0.0.0.0.*?$vpn_gateway" | wc -l)" -eq 0 ]]; then
+ e "setting defaultroute"
+ route del default
+ route add default gw "$vpn_gateway" tun0
+ fi
+}
+
+set_network_parameters() {
+ mtu
+ original_gateway
+ defaultroute
+}
+
+check_for_internet() {
+ # check for internet. if only one server with a direct route is pingable,
+ # we have an internet connection
+ for remoteaddress in $remoteaddresses; do
+ if [[ "$($ping "$remoteaddress" | grep 'bytes from')" ]]; then
+ echo true
+ break
+ fi
+ done
+}
+
+set_status() {
+ key=$1
+ val=$2
+ echo "$val" > "$netstat_file-$key"
+}
+
+
+
+# params
+[[ "$1" == "startwifi" ]] && startwifi "$2"
+
+
+
+# logic
+
+# ensure dhcpd is running
+if [[ $( pidof dhcpd | wc -l ) -eq 0 ]]; then
+ /etc/init.d/dhcpd restart
+fi
+
+# ensure radvd is running
+if [[ "$(pidof radvd | wc -l)" -eq 0 ]]; then
+ /etc/init.d/radvd restart
+fi
+
+# setup wifi if available
+if [[ -e "/sys/class/net/wlan0" ]]; then
+ e "wifi detected"
+
+ if [[ "$opmode" = "mesh" ]]; then
+ e "opmode: mesh"
+
+ # check if wlan0 has already started
+ if [[ "$(iwconfig wlan0 | grep 'ESSID' | grep 'cjdns' | wc -l)" -eq 0 \
+ || "$(iwconfig wlan0 | grep 'Mode:' | grep 'Ad-Hoc' | wc -l)" -eq 0 ]]; then
+ e "starting ad-hoc mesh"
+ ifconfig wlan0 down
+ iwconfig wlan0 mode ad-hoc
+ iwconfig wlan0 essid cjdns
+ ifconfig wlan0 up
+ restart_cjdns
+ else
+ e "ad-hoc mesh is running fine"
+ fi
+ fi
+
+ if [[ "$opmode" = "client" ]]; then
+ e "opmode: client"
+
+ clearnet_interface=wlan0
+
+ # check if wlan0 has already started
+ if [[ "$(ifconfig wlan0 | grep 'inet addr' | wc -l)" -eq 0 \
+ || "$(iwconfig wlan0 | grep 'Access Point: Not-Associated' | wc -l)" -gt 0 ]]; then
+ if [[ "$security" = "WPA" ]]; then
+ start_wpa
+ else
+ start_wep
+ fi
+ else
+ e "wlan client is running fine"
+ fi
+ #TODO: connect to unencrypted wifi
+ fi
+fi
+
+if [[ "$(gateway_is_up)" == "true" ]]; then
+ set_network_parameters
+ e "checking internet connectivity over cjdns"
+ if [[ "$($ping 8.8.8.8 | grep 'bytes from')" ]]; then
+ echo "We have internet. Good."
+ set_status "dhcp" 1
+ set_status "internet" 1
+ set_status "cjdns" 1
+ set_status "cjdns_internet" 1
+ rm "$cjdns_request_tries" 2> /dev/null
+ exit
+ fi
+fi
+
+echo "No internet via cjdns. Checking for regular internet connection..."
+set_status "dhcp" 0
+set_status "internet" 0
+set_status "cjdns" 0
+set_status "cjdns_internet" 0
+
+# request dhcp
+dhcp
+
+if [[ "$(interface_dhcp_success)" == "true" ]]; then
+ set_status "dhcp" 1
+fi
+
+wehaveinternet="no"
+if [[ "$(check_for_internet)" == "true" ]]; then
+ set_status "internet" 1
+ wehaveinternet="yes"
+fi
+
+if [[ "$wehaveinternet" == "yes" && "$request_internet" == "yes" ]]; then
+ request_cjdns_internet
+ restart_cjdns
+ set_status "cjdns" 1
+ for i in $(seq 60 -1 1); do
+ echo "waiting $i seconds for gateway to come up..."
+ if [[ "$(gateway_is_up)" == "true" ]]; then
+ e "gateway is up."
+ set_network_parameters
+ e "checking internet connectivity over cjdns"
+ if [[ "$($ping 8.8.8.8 | grep 'bytes from')" ]]; then
+ echo "We have internet. Good."
+ set_status "dhcp" 1
+ set_status "internet" 1
+ set_status "cjdns" 1
+ set_status "cjdns_internet" 1
+ rm "$cjdns_request_tries" 2> /dev/null
+ exit
+ else
+ echo "Gateway is up, but no internet. Requesting..."
+ /usr/sbin/request-internet
+ exit
+ fi
+ fi
+ sleep 1
+ done
+else
+ e "no internet via cjdns."
+ # just ensure that cjdns is running, but DO NOT restart it!
+ # since local phone calls may be active.
+ start_cjdns
+ set_status "cjdns" 1
+fi
+
--- /dev/null
+
+# docs: http://wiki.ubuntuusers.de/WLAN/wpa_supplicant
+
+ctrl_interface=/var/run/wpa_supplicant
+eapol_version=1
+ap_scan=1
+
+network={
+ ssid="{{wlan_ssid}}"
+ scan_ssid=1
+ proto=RSN
+ key_mgmt=WPA-PSK
+ group={{wlan_group}}
+ pairwise={{wlan_pairwise}}
+ psk="{{wlan_pass}}"
+}
+
--- /dev/null
+
+bundle agent app_database
+{
+ files:
+ "/etc/my.cnf"
+ create => "true",
+ edit_template => "$(this.promise_dirname)/templates/my.cnf",
+ edit_defaults => no_backup,
+ classes => if_repaired("init_mysql");
+
+ "/www/db/mysql/."
+ create => "true";
+
+ commands:
+ init_mysql::
+ "/usr/sbin/init-mysql";
+
+ reports:
+ "checking database: done";
+}
+
--- /dev/null
+[client]
+port = 3306
+socket = /var/run/mysqld.sock
+
+[mysqld]
+user = root
+socket = /var/run/mysqld.sock
+port = 3306
+basedir = /usr
+
+############ Don't put this on the NAND #############
+# Figure out where you are going to put the databases
+# And run mysql_install_db --force
+datadir = /www/db/mysql/
+
+######### This should also not go on the NAND #######
+tmpdir = /tmp
+
+skip-external-locking
+
+bind-address = 127.0.0.1
+
+# Fine Tuning
+key_buffer = 16M
+max_allowed_packet = 16M
+thread_stack = 192K
+thread_cache_size = 8
+
+# Here you can see queries with especially long duration
+#log_slow_queries = /var/log/mysql/mysql-slow.log
+#long_query_time = 2
+#log-queries-not-using-indexes
+
+# The following can be used as easy to replay backup logs or for replication.
+#server-id = 1
+#log_bin = /var/log/mysql/mysql-bin.log
+#expire_logs_days = 10
+#max_binlog_size = 100M
+#binlog_do_db = include_database_name
+#binlog_ignore_db = include_database_name
+
+
+[mysqldump]
+quick
+quote-names
+max_allowed_packet = 16M
+
+[mysql]
+#no-auto-rehash # faster start of mysql but no tab completition
+
+[isamchk]
+key_buffer = 16M
+
+
--- /dev/null
+
+bundle agent app_email
+{
+ vars:
+ "pwd[uucp]" string => "uucp:x:10:10:uucp:/var/spool/uucp:/bin/sh";
+ "pwd[mail]" string => "mail:x:8:8:mail:/var/mail:/bin/sh";
+ "pwd[exim]" string => "exim:x:110:110:exim:/var/mail:/bin/sh";
+ "pwd[dovecot]" string => "dovecot:x:106:106:Dovecot mail server:/usr/lib/dovecot:/bin/false";
+ "pwd[dovenull]" string => "dovenull:x:107:107:Dovecot login user:/nonexistent:/bin/false";
+ #"pwd[vmail]" string => "vmail:x:5000:5000:virtual mail user:/box/vmail:/bin/sh";
+ "grp[uucp]" string => "uucp:x:10:";
+ "grp[mail]" string => "mail:x:8:";
+ "grp[exim]" string => "exim:x:110:";
+ "grp[dovecot]" string => "dovecot:x:106:";
+ #"grp[vmail]" string => "vmail:x:5000:";
+
+ files:
+ "/etc/passwd"
+ edit_line => append_users_starting("app_email.pwd");
+
+ "/etc/group"
+ edit_line => append_groups_starting("app_email.grp");
+
+ "/usr/exim/configure"
+ create => "true",
+ template_method => "mustache",
+ template_data => readjson("$(g.site)", 64000),
+ edit_template => "$(this.promise_dirname)/templates/exim4.conf.mustache",
+ edit_defaults => no_backup,
+ classes => if_repaired("restart_exim");
+
+ "/etc/dovecot/dovecot.conf"
+ create => "true",
+ edit_template => "$(this.promise_dirname)/templates/dovecot.conf",
+ edit_defaults => no_backup,
+ classes => if_repaired("restart_dovecot");
+
+ "/box/vmail/."
+ create => "true",
+ perms => vmail;
+
+ "/etc/dovecot/users.conf"
+ create => "true",
+ perms => file,
+ template_method => "mustache",
+ template_data => readjson("$(g.site)", 64000),
+ edit_template => "$(this.promise_dirname)/templates/users.conf.mustache",
+ edit_defaults => no_backup,
+ classes => if_repaired("restart_dovecot");
+
+ "/etc/php.ini"
+ create => "true",
+ edit_template => "$(this.promise_dirname)/templates/php.ini",
+ edit_defaults => no_backup,
+ classes => if_repaired("restart_php");
+
+ commands:
+ restart_exim::
+ "/etc/init.d/exim restart";
+
+ restart_dovecot::
+ "/etc/init.d/dovecot restart";
+
+ restart_php::
+ "/etc/init.d/php5-fastcgi restart";
+
+ reports:
+ "checking email: done";
+}
+
+body perms vmail
+{
+ mode => "755";
+ owners => { "exim" };
+ groups => { "exim" };
+}
+
--- /dev/null
+auth_mechanisms = plain login
+disable_plaintext_auth = no
+auth_verbose = yes
+first_valid_gid = 110
+first_valid_uid = 110
+last_valid_gid = 110
+last_valid_uid = 110
+log_timestamp = "%Y-%m-%d %H:%M:%S "
+mail_location = maildir:/box/vmail/%n/Maildir
+mail_privileged_group = exim
+passdb {
+ args = scheme=SHA1 /etc/dovecot/users.conf
+ driver = passwd-file
+}
+protocols = imap pop3
+
+# try to work around that crappy slow ssl dipshit
+# we really really don't need ssl
+ssl = no
+ssl_parameters_regenerate = 0
+
+userdb {
+ args = uid=110 gid=110 home=/box/vmail/%n allow_all_users=yes
+ driver = static
+}
+protocol lda {
+ auth_socket_path = /var/run/dovecot/auth-master
+ log_path =
+ mail_plugins = sieve
+ postmaster_address = postmaster@example.org
+}
--- /dev/null
+#####################################################
+### main/01_exim4-config_listmacrosdefs
+#####################################################
+######################################################################
+# Runtime configuration file for Exim 4 (Debian Packaging) #
+######################################################################
+
+######################################################################
+# /etc/exim4/exim4.conf.template is only used with the non-split
+# configuration scheme.
+# /etc/exim4/conf.d/main/01_exim4-config_listmacrosdefs is only used
+# with the split configuration scheme.
+# If you find this comment anywhere else, somebody copied it there.
+# Documentation about the Debian exim4 configuration scheme can be
+# found in /usr/share/doc/exim4-base/README.Debian.gz.
+######################################################################
+
+######################################################################
+# MAIN CONFIGURATION SETTINGS #
+######################################################################
+
+# Just for reference and scripts.
+# On Debian systems, the main binary is installed as exim4 to avoid
+# conflicts with the exim 3 packages.
+exim_path = /usr/sbin/exim4
+
+# Macro defining the main configuration directory.
+# We do not use absolute paths.
+.ifndef CONFDIR
+CONFDIR = /etc/exim4
+.endif
+
+MAIN_HARDCODE_PRIMARY_HOSTNAME = enigma.box
+MAIN_LOCAL_DOMAINS = @[]
+MAIN_RELAY_NETS = 127.0.0.1
+MAIN_RELAY_TO_DOMAINS = *
+ETC_MAILNAME = localhost
+MAIN_ALLOW_DOMAIN_LITERALS = 1
+REMOTE_SMTP_HEADERS_REWRITE = mail@* "mail@[${lookup dnsdb{aaaa=$domain}}]" tcb
+MESSAGE_SIZE_LIMIT = 150M
+
+# debconf-driven macro definitions get inserted after this line
+UPEX4CmacrosUPEX4C = 1
+
+# Create domain and host lists for relay control
+# '@' refers to 'the name of the local host'
+
+# List of domains considered local for exim. Domains not listed here
+# need to be deliverable remotely.
+domainlist local_domains = MAIN_LOCAL_DOMAINS
+
+# List of recipient domains to relay _to_. Use this list if you're -
+# for example - fallback MX or mail gateway for domains.
+domainlist relay_to_domains = MAIN_RELAY_TO_DOMAINS
+
+# List of sender networks (IP addresses) to _unconditionally_ relay
+# _for_. If you intend to be SMTP AUTH server, you do not need to enter
+# anything here.
+hostlist relay_from_hosts = MAIN_RELAY_NETS
+
+
+# Decide which domain to use to add to all unqualified addresses.
+# If MAIN_PRIMARY_HOSTNAME_AS_QUALIFY_DOMAIN is defined, the primary
+# hostname is used. If not, but MAIN_QUALIFY_DOMAIN is set, the value
+# of MAIN_QUALIFY_DOMAIN is used. If both macros are not defined,
+# the first line of /etc/mailname is used.
+.ifndef MAIN_PRIMARY_HOSTNAME_AS_QUALIFY_DOMAIN
+.ifndef MAIN_QUALIFY_DOMAIN
+qualify_domain = ETC_MAILNAME
+.else
+qualify_domain = MAIN_QUALIFY_DOMAIN
+.endif
+.endif
+
+# listen on all all interfaces?
+.ifdef MAIN_LOCAL_INTERFACES
+local_interfaces = MAIN_LOCAL_INTERFACES
+.endif
+
+.ifndef LOCAL_DELIVERY
+# The default transport, set in /etc/exim4/update-exim4.conf.conf,
+# defaulting to mail_spool. See CONFDIR/conf.d/transport/ for possibilities
+LOCAL_DELIVERY=mail_spool
+.endif
+
+# The gecos field in /etc/passwd holds not only the name. see passwd(5).
+gecos_pattern = ^([^,:]*)
+gecos_name = $1
+
+# define macros to be used in acl/30_exim4-config_check_rcpt to check
+# recipient local parts for strange characters.
+
+# This macro definition really should be in
+# acl/30_exim4-config_check_rcpt but cannot be there due to
+# http://www.exim.org/bugzilla/show_bug.cgi?id=101 as of exim 4.62.
+
+# These macros are documented in acl/30_exim4-config_check_rcpt,
+# can be changed here or overridden by a locally added configuration
+# file as described in README.Debian chapter 2.1.2
+
+.ifndef CHECK_RCPT_LOCAL_LOCALPARTS
+CHECK_RCPT_LOCAL_LOCALPARTS = ^[.] : ^.*[@%!/|`#&?]
+.endif
+
+.ifndef CHECK_RCPT_REMOTE_LOCALPARTS
+CHECK_RCPT_REMOTE_LOCALPARTS = ^[./|] : ^.*[@%!`#&?] : ^.*/\\.\\./
+.endif
+
+# always log tls_peerdn as we use TLS for outgoing connects by default
+.ifndef MAIN_LOG_SELECTOR
+MAIN_LOG_SELECTOR = +tls_peerdn
+.endif
+#####################################################
+### end main/01_exim4-config_listmacrosdefs
+#####################################################
+#####################################################
+### main/02_exim4-config_options
+#####################################################
+
+### main/02_exim4-config_options
+#################################
+
+
+# Defines the access control list that is run when an
+# SMTP MAIL command is received.
+#
+.ifndef MAIN_ACL_CHECK_MAIL
+MAIN_ACL_CHECK_MAIL = acl_check_mail
+.endif
+acl_smtp_mail = MAIN_ACL_CHECK_MAIL
+
+
+# Defines the access control list that is run when an
+# SMTP RCPT command is received.
+#
+.ifndef MAIN_ACL_CHECK_RCPT
+MAIN_ACL_CHECK_RCPT = acl_check_rcpt
+.endif
+acl_smtp_rcpt = MAIN_ACL_CHECK_RCPT
+
+
+# Defines the access control list that is run when an
+# SMTP DATA command is received.
+#
+.ifndef MAIN_ACL_CHECK_DATA
+MAIN_ACL_CHECK_DATA = acl_check_data
+.endif
+acl_smtp_data = MAIN_ACL_CHECK_DATA
+
+
+# Message size limit. The default (used when MESSAGE_SIZE_LIMIT
+# is unset) is 50 MB
+.ifdef MESSAGE_SIZE_LIMIT
+message_size_limit = MESSAGE_SIZE_LIMIT
+.endif
+
+
+# If you are running exim4-daemon-heavy or a custom version of Exim that
+# was compiled with the content-scanning extension, you can cause incoming
+# messages to be automatically scanned for viruses. You have to modify the
+# configuration in two places to set this up. The first of them is here,
+# where you define the interface to your scanner. This example is typical
+# for ClamAV; see the manual for details of what to set for other virus
+# scanners. The second modification is in the acl_check_data access
+# control list.
+
+# av_scanner = clamd:/var/run/clamav/clamd.ctl
+
+
+# For spam scanning, there is a similar option that defines the interface to
+# SpamAssassin. You do not need to set this if you are using the default, which
+# is shown in this commented example. As for virus scanning, you must also
+# modify the acl_check_data access control list to enable spam scanning.
+
+# spamd_address = 127.0.0.1 783
+
+# Domain used to qualify unqualified recipient addresses
+# If this option is not set, the qualify_domain value is used.
+# qualify_recipient = <value of qualify_domain>
+
+
+# Allow Exim to recognize addresses of the form "user@[10.11.12.13]",
+# where the domain part is a "domain literal" (an IP address) instead
+# of a named domain. The RFCs require this facility, but it is disabled
+# in the default config since it is seldomly used and frequently abused.
+# Domain literal support also needs a special router, which is automatically
+# enabled if you use the enable macro MAIN_ALLOW_DOMAIN_LITERALS.
+# Additionally, you might want to make your local IP addresses (or @[])
+# local domains.
+.ifdef MAIN_ALLOW_DOMAIN_LITERALS
+allow_domain_literals
+.endif
+
+
+# Do a reverse DNS lookup on all incoming IP calls, in order to get the
+# true host name. If you feel this is too expensive, the networks for
+# which a lookup is done can be listed here.
+.ifndef DC_minimaldns
+.ifndef MAIN_HOST_LOOKUP
+MAIN_HOST_LOOKUP = *
+.endif
+host_lookup = MAIN_HOST_LOOKUP
+.endif
+
+
+# In a minimaldns setup, update-exim4.conf guesses the hostname and
+# dumps it here to avoid DNS lookups being done at Exim run time.
+.ifdef MAIN_HARDCODE_PRIMARY_HOSTNAME
+primary_hostname = MAIN_HARDCODE_PRIMARY_HOSTNAME
+.endif
+
+# The settings below, which are actually the same as the defaults in the
+# code, cause Exim to make RFC 1413 (ident) callbacks for all incoming SMTP
+# calls. You can limit the hosts to which these calls are made, and/or change
+# the timeout that is used. If you set the timeout to zero, all RFC 1413 calls
+# are disabled. RFC 1413 calls are cheap and can provide useful information
+# for tracing problem messages, but some hosts and firewalls are
+# misconfigured to drop the requests instead of either answering or
+# rejecting them. This can result in a timeout instead of an immediate refused
+# connection, leading to delays on starting up SMTP sessions. (The default was
+# reduced from 30s to 5s for release 4.61.)
+# rfc1413_hosts = *
+rfc1413_query_timeout = 0s
+
+# When using an external relay tester (such as rt.njabl.org and/or the
+# currently defunct relay-test.mail-abuse.org, the test may be aborted
+# since exim complains about "too many nonmail commands". If you want
+# the test to complete, add the host from where "your" relay tester
+# connects from to the MAIN_SMTP_ACCEPT_MAX_NOMAIL_HOSTS macro.
+# Please note that a non-empty setting may cause extra DNS lookups to
+# happen, which is the reason why this option is commented out in the
+# default settings.
+# MAIN_SMTP_ACCEPT_MAX_NOMAIL_HOSTS = !rt.njabl.org
+.ifdef MAIN_SMTP_ACCEPT_MAX_NOMAIL_HOSTS
+smtp_accept_max_nonmail_hosts = MAIN_SMTP_ACCEPT_MAX_NOMAIL_HOSTS
+.endif
+
+# By default, exim forces a Sender: header containing the local
+# account name at the local host name in all locally submitted messages
+# that don't have the local account name at the local host name in the
+# From: header, deletes any Sender: header present in the submitted
+# message and forces the envelope sender of all locally submitted
+# messages to the local account name at the local host name.
+# The following settings allow local users to specify their own envelope sender
+# in a locally submitted message. Sender: headers existing in a locally
+# submitted message are not removed, and no automatic Sender: headers
+# are added. These settings are fine for most hosts.
+# If you run exim on a classical multi-user systems where all users
+# have local mailboxes that can be reached via SMTP from the Internet
+# with the local FQDN as the domain part of the address, you might want
+# to disable the following three lines for traceability reasons.
+.ifndef MAIN_FORCE_SENDER
+local_from_check = false
+local_sender_retain = true
+untrusted_set_sender = *
+.endif
+
+
+# By default, Exim expects all envelope addresses to be fully qualified, that
+# is, they must contain both a local part and a domain. Configure exim
+# to accept unqualified addresses from certain hosts. When this is done,
+# unqualified addresses are qualified using the settings of qualify_domain
+# and/or qualify_recipient (see above).
+# sender_unqualified_hosts = <unset>
+# recipient_unqualified_hosts = <unset>
+
+
+# Configure Exim to support the "percent hack" for certain domains.
+# The "percent hack" is the feature by which mail addressed to x%y@z
+# (where z is one of the domains listed) is locally rerouted to x@y
+# and sent on. If z is not one of the "percent hack" domains, x%y is
+# treated as an ordinary local part. The percent hack is rarely needed
+# nowadays but frequently abused. You should not enable it unless you
+# are sure that you really need it.
+# percent_hack_domains = <unset>
+
+
+# Bounce handling
+.ifndef MAIN_IGNORE_BOUNCE_ERRORS_AFTER
+MAIN_IGNORE_BOUNCE_ERRORS_AFTER = 2d
+.endif
+ignore_bounce_errors_after = MAIN_IGNORE_BOUNCE_ERRORS_AFTER
+
+.ifndef MAIN_TIMEOUT_FROZEN_AFTER
+MAIN_TIMEOUT_FROZEN_AFTER = 7d
+.endif
+timeout_frozen_after = MAIN_TIMEOUT_FROZEN_AFTER
+
+.ifndef MAIN_FREEZE_TELL
+MAIN_FREEZE_TELL = postmaster
+.endif
+freeze_tell = MAIN_FREEZE_TELL
+
+
+# Define spool directory
+.ifndef SPOOLDIR
+SPOOLDIR = /var/spool/exim4
+.endif
+spool_directory = SPOOLDIR
+
+
+# trusted users can set envelope-from to arbitrary values
+.ifndef MAIN_TRUSTED_USERS
+MAIN_TRUSTED_USERS = uucp
+.endif
+trusted_users = MAIN_TRUSTED_USERS
+.ifdef MAIN_TRUSTED_GROUPS
+trusted_groups = MAIN_TRUSTED_GROUPS
+.endif
+
+
+# users in admin group can do many other things
+# admin_groups = <unset>
+
+
+# SMTP Banner. The example includes the Debian version in the SMTP dialog
+# MAIN_SMTP_BANNER = "${primary_hostname} ESMTP Exim ${version_number} (Debian package MAIN_PACKAGE_VERSION) ${tod_full}"
+# smtp_banner = $smtp_active_hostname ESMTP Exim $version_number $tod_full
+smtp_banner = Enigmabox ESMTP Stealth Mailer $tod_full
+
+#####################################################
+### end main/02_exim4-config_options
+#####################################################
+#####################################################
+### main/03_exim4-config_tlsoptions
+#####################################################
+
+### main/03_exim4-config_tlsoptions
+#################################
+
+# TLS/SSL configuration for exim as an SMTP server.
+# See /usr/share/doc/exim4-base/README.Debian.gz for explanations.
+
+.ifdef MAIN_TLS_ENABLE
+# Defines what hosts to 'advertise' STARTTLS functionality to. The
+# default, *, will advertise to all hosts that connect with EHLO.
+.ifndef MAIN_TLS_ADVERTISE_HOSTS
+MAIN_TLS_ADVERTISE_HOSTS = *
+.endif
+tls_advertise_hosts = MAIN_TLS_ADVERTISE_HOSTS
+
+
+# Full paths to Certificate and Private Key. The Private Key file
+# must be kept 'secret' and should be owned by root.Debian-exim mode
+# 640 (-rw-r-----). exim-gencert takes care of these prerequisites.
+# Normally, exim4 looks for certificate and key in different files:
+# MAIN_TLS_CERTIFICATE - path to certificate file,
+# CONFDIR/exim.crt if unset
+# MAIN_TLS_PRIVATEKEY - path to private key file
+# CONFDIR/exim.key if unset
+# You can also configure exim to look for certificate and key in the
+# same file, set MAIN_TLS_CERTKEY to that file to enable. This takes
+# precedence over all other settings regarding certificate and key file.
+.ifdef MAIN_TLS_CERTKEY
+tls_certificate = MAIN_TLS_CERTKEY
+.else
+.ifndef MAIN_TLS_CERTIFICATE
+MAIN_TLS_CERTIFICATE = CONFDIR/exim.crt
+.endif
+tls_certificate = MAIN_TLS_CERTIFICATE
+
+.ifndef MAIN_TLS_PRIVATEKEY
+MAIN_TLS_PRIVATEKEY = CONFDIR/exim.key
+.endif
+tls_privatekey = MAIN_TLS_PRIVATEKEY
+.endif
+
+# Pointer to the CA Certificates against which client certificates are
+# checked. This is controlled by the `tls_verify_hosts' and
+# `tls_try_verify_hosts' lists below.
+# If you want to check server certificates, you need to add an
+# tls_verify_certificates statement to the smtp transport.
+# /etc/ssl/certs/ca-certificates.crt is generated by
+# the "ca-certificates" package's update-ca-certificates(8) command.
+.ifndef MAIN_TLS_VERIFY_CERTIFICATES
+MAIN_TLS_VERIFY_CERTIFICATES = ${if exists{/etc/ssl/certs/ca-certificates.crt}\
+ {/etc/ssl/certs/ca-certificates.crt}\
+ {/dev/null}}
+.endif
+tls_verify_certificates = MAIN_TLS_VERIFY_CERTIFICATES
+
+
+# A list of hosts which are constrained by `tls_verify_certificates'. A host
+# that matches `tls_verify_host' must present a certificate that is
+# verifyable through `tls_verify_certificates' in order to be accepted as an
+# SMTP client. If it does not, the connection is aborted.
+.ifdef MAIN_TLS_VERIFY_HOSTS
+tls_verify_hosts = MAIN_TLS_VERIFY_HOSTS
+.endif
+
+# A weaker form of checking: if a client matches `tls_try_verify_hosts' (but
+# not `tls_verify_hosts'), request a certificate and check it against
+# `tls_verify_certificates' but do not abort the connection if there is no
+# certificate or if the certificate presented does not match. (This
+# condition can be tested for in ACLs through `verify = certificate')
+# By default, this check is done for all hosts. It is known that some
+# clients (including incredimail's version downloadable in February
+# 2008) choke on this. To disable, set MAIN_TLS_TRY_VERIFY_HOSTS to an
+# empty value.
+.ifdef MAIN_TLS_TRY_VERIFY_HOSTS
+tls_try_verify_hosts = MAIN_TLS_TRY_VERIFY_HOSTS
+.endif
+
+.endif
+#####################################################
+### end main/03_exim4-config_tlsoptions
+#####################################################
+#####################################################
+### main/90_exim4-config_log_selector
+#####################################################
+
+### main/90_exim4-config_log_selector
+#################################
+
+# uncomment this for debugging
+# MAIN_LOG_SELECTOR == MAIN_LOG_SELECTOR +all -subject -arguments
+
+.ifdef MAIN_LOG_SELECTOR
+log_selector = MAIN_LOG_SELECTOR
+.endif
+#####################################################
+### end main/90_exim4-config_log_selector
+#####################################################
+#####################################################
+### acl/00_exim4-config_header
+#####################################################
+
+######################################################################
+# ACL CONFIGURATION #
+# Specifies access control lists for incoming SMTP mail #
+######################################################################
+begin acl
+
+
+#####################################################
+### end acl/00_exim4-config_header
+#####################################################
+#####################################################
+### acl/20_exim4-config_local_deny_exceptions
+#####################################################
+
+### acl/20_exim4-config_local_deny_exceptions
+#################################
+
+# This is used to determine whitelisted senders and hosts.
+# It checks for CONFDIR/host_local_deny_exceptions and
+# CONFDIR/sender_local_deny_exceptions.
+#
+# It is meant to be used from some other acl entry.
+#
+# See exim4-config_files(5) for details.
+#
+# If the files do not exist, the white list never matches, which is
+# the desired behaviour.
+#
+# The old file names CONFDIR/local_host_whitelist and
+# CONFDIR/local_sender_whitelist will continue to be honored for a
+# transition period. Their use is deprecated.
+
+acl_local_deny_exceptions:
+ accept
+ hosts = ${if exists{CONFDIR/host_local_deny_exceptions}\
+ {CONFDIR/host_local_deny_exceptions}\
+ {}}
+ accept
+ senders = ${if exists{CONFDIR/sender_local_deny_exceptions}\
+ {CONFDIR/sender_local_deny_exceptions}\
+ {}}
+ accept
+ hosts = ${if exists{CONFDIR/local_host_whitelist}\
+ {CONFDIR/local_host_whitelist}\
+ {}}
+ accept
+ senders = ${if exists{CONFDIR/local_sender_whitelist}\
+ {CONFDIR/local_sender_whitelist}\
+ {}}
+
+ # This hook allows you to hook in your own ACLs without having to
+ # modify this file. If you do it like we suggest, you'll end up with
+ # a small performance penalty since there is an additional file being
+ # accessed. This doesn't happen if you leave the macro unset.
+ .ifdef LOCAL_DENY_EXCEPTIONS_LOCAL_ACL_FILE
+ .include LOCAL_DENY_EXCEPTIONS_LOCAL_ACL_FILE
+ .endif
+
+ # this is still supported for a transition period and is deprecated.
+ .ifdef WHITELIST_LOCAL_DENY_LOCAL_ACL_FILE
+ .include WHITELIST_LOCAL_DENY_LOCAL_ACL_FILE
+ .endif
+#####################################################
+### end acl/20_exim4-config_local_deny_exceptions
+#####################################################
+#####################################################
+### acl/30_exim4-config_check_mail
+#####################################################
+
+### acl/30_exim4-config_check_mail
+#################################
+
+# This access control list is used for every MAIL command in an incoming
+# SMTP message. The tests are run in order until the address is either
+# accepted or denied.
+#
+acl_check_mail:
+ .ifdef CHECK_MAIL_HELO_ISSUED
+ deny
+ message = no HELO given before MAIL command
+ condition = ${if def:sender_helo_name {no}{yes}}
+ .endif
+
+ accept
+#####################################################
+### end acl/30_exim4-config_check_mail
+#####################################################
+#####################################################
+### acl/30_exim4-config_check_rcpt
+#####################################################
+
+### acl/30_exim4-config_check_rcpt
+#################################
+
+# This access control list is used for every RCPT command in an incoming
+# SMTP message. The tests are run in order until the address is either
+# accepted or denied.
+#
+acl_check_rcpt:
+
+ # Accept if the source is local SMTP (i.e. not over TCP/IP). We do this by
+ # testing for an empty sending host field.
+ accept
+ hosts = :
+ control = dkim_disable_verify
+
+ # Do not try to verify DKIM signatures of incoming mail if DC_minimaldns
+ # or DISABLE_DKIM_VERIFY are set.
+.ifdef DC_minimaldns
+ warn
+ control = dkim_disable_verify
+.else
+.ifdef DISABLE_DKIM_VERIFY
+ warn
+ control = dkim_disable_verify
+.endif
+.endif
+
+ # The following section of the ACL is concerned with local parts that contain
+ # certain non-alphanumeric characters. Dots in unusual places are
+ # handled by this ACL as well.
+ #
+ # Non-alphanumeric characters other than dots are rarely found in genuine
+ # local parts, but are often tried by people looking to circumvent
+ # relaying restrictions. Therefore, although they are valid in local
+ # parts, these rules disallow certain non-alphanumeric characters, as
+ # a precaution.
+ #
+ # Empty components (two dots in a row) are not valid in RFC 2822, but Exim
+ # allows them because they have been encountered. (Consider local parts
+ # constructed as "firstinitial.secondinitial.familyname" when applied to
+ # a name without a second initial.) However, a local part starting
+ # with a dot or containing /../ can cause trouble if it is used as part of a
+ # file name (e.g. for a mailing list). This is also true for local parts that
+ # contain slashes. A pipe symbol can also be troublesome if the local part is
+ # incorporated unthinkingly into a shell command line.
+ #
+ # These ACL components will block recipient addresses that are valid
+ # from an RFC2822 point of view. We chose to have them blocked by
+ # default for security reasons.
+ #
+ # If you feel that your site should have less strict recipient
+ # checking, please feel free to change the default values of the macros
+ # defined in main/01_exim4-config_listmacrosdefs or override them from a
+ # local configuration file.
+ #
+ # Two different rules are used. The first one has a quite strict
+ # default, and is applied to messages that are addressed to one of the
+ # local domains handled by this host.
+
+ # The default value of CHECK_RCPT_LOCAL_LOCALPARTS is defined in
+ # main/01_exim4-config_listmacrosdefs:
+ # CHECK_RCPT_LOCAL_LOCALPARTS = ^[.] : ^.*[@%!/|`#&?]
+ # This blocks local parts that begin with a dot or contain a quite
+ # broad range of non-alphanumeric characters.
+ .ifdef CHECK_RCPT_LOCAL_LOCALPARTS
+ deny
+ domains = +local_domains
+ local_parts = CHECK_RCPT_LOCAL_LOCALPARTS
+ message = restricted characters in address
+ .endif
+
+
+ # The second rule applies to all other domains, and its default is
+ # considerably less strict.
+
+ # The default value of CHECK_RCPT_REMOTE_LOCALPARTS is defined in
+ # main/01_exim4-config_listmacrosdefs:
+ # CHECK_RCPT_REMOTE_LOCALPARTS = ^[./|] : ^.*[@%!`#&?] : ^.*/\\.\\./
+
+ # It allows local users to send outgoing messages to sites
+ # that use slashes and vertical bars in their local parts. It blocks
+ # local parts that begin with a dot, slash, or vertical bar, but allows
+ # these characters within the local part. However, the sequence /../ is
+ # barred. The use of some other non-alphanumeric characters is blocked.
+ # Single quotes might probably be dangerous as well, but they're
+ # allowed by the default regexps to avoid rejecting mails to Ireland.
+ # The motivation here is to prevent local users (or local users' malware)
+ # from mounting certain kinds of attack on remote sites.
+ .ifdef CHECK_RCPT_REMOTE_LOCALPARTS
+ deny
+ domains = !+local_domains
+ local_parts = CHECK_RCPT_REMOTE_LOCALPARTS
+ message = restricted characters in address
+ .endif
+
+
+ # Accept mail to postmaster in any local domain, regardless of the source,
+ # and without verifying the sender.
+ #
+ accept
+ .ifndef CHECK_RCPT_POSTMASTER
+ local_parts = postmaster
+ .else
+ local_parts = CHECK_RCPT_POSTMASTER
+ .endif
+ domains = +local_domains : +relay_to_domains
+
+
+ # Deny unless the sender address can be verified.
+ #
+ # This is disabled by default so that DNSless systems don't break. If
+ # your system can do DNS lookups without delay or cost, you might want
+ # to enable this feature.
+ #
+ # This feature does not work in smarthost and satellite setups as
+ # with these setups all domains pass verification. See spec.txt chapter
+ # 39.31 with the added information that a smarthost/satellite setup
+ # routes all non-local e-mail to the smarthost.
+ .ifdef CHECK_RCPT_VERIFY_SENDER
+ deny
+ message = Sender verification failed
+ !acl = acl_local_deny_exceptions
+ !verify = sender
+ .endif
+
+ # Verify senders listed in local_sender_callout with a callout.
+ #
+ # In smarthost and satellite setups, this causes the callout to be
+ # done to the smarthost. Verification will thus only be reliable if the
+ # smarthost does reject illegal addresses in the SMTP dialog.
+ deny
+ !acl = acl_local_deny_exceptions
+ senders = ${if exists{CONFDIR/local_sender_callout}\
+ {CONFDIR/local_sender_callout}\
+ {}}
+ !verify = sender/callout
+
+
+ # Accept if the message comes from one of the hosts for which we are an
+ # outgoing relay. It is assumed that such hosts are most likely to be MUAs,
+ # so we set control=submission to make Exim treat the message as a
+ # submission. It will fix up various errors in the message, for example, the
+ # lack of a Date: header line. If you are actually relaying out out from
+ # MTAs, you may want to disable this. If you are handling both relaying from
+ # MTAs and submissions from MUAs you should probably split them into two
+ # lists, and handle them differently.
+
+ # Recipient verification is omitted here, because in many cases the clients
+ # are dumb MUAs that don't cope well with SMTP error responses. If you are
+ # actually relaying out from MTAs, you should probably add recipient
+ # verification here.
+
+ # Note that, by putting this test before any DNS black list checks, you will
+ # always accept from these hosts, even if they end up on a black list. The
+ # assumption is that they are your friends, and if they get onto black
+ # list, it is a mistake.
+ accept
+ hosts = +relay_from_hosts
+ control = submission/sender_retain
+ control = dkim_disable_verify
+
+
+ # Accept if the message arrived over an authenticated connection, from
+ # any host. Again, these messages are usually from MUAs, so recipient
+ # verification is omitted, and submission mode is set. And again, we do this
+ # check before any black list tests.
+ accept
+ authenticated = *
+ control = submission/sender_retain
+ control = dkim_disable_verify
+
+
+ # Insist that any other recipient address that we accept is either in one of
+ # our local domains, or is in a domain for which we explicitly allow
+ # relaying. Any other domain is rejected as being unacceptable for relaying.
+ require
+ message = relay not permitted
+ domains = +local_domains : +relay_to_domains
+
+
+ # We also require all accepted addresses to be verifiable. This check will
+ # do local part verification for local domains, but only check the domain
+ # for remote domains.
+ require
+ verify = recipient
+
+
+ # Verify recipients listed in local_rcpt_callout with a callout.
+ # This is especially handy for forwarding MX hosts (secondary MX or
+ # mail hubs) of domains that receive a lot of spam to non-existent
+ # addresses. The only way to check local parts for remote relay
+ # domains is to use a callout (add /callout), but please read the
+ # documentation about callouts before doing this.
+ deny
+ !acl = acl_local_deny_exceptions
+ recipients = ${if exists{CONFDIR/local_rcpt_callout}\
+ {CONFDIR/local_rcpt_callout}\
+ {}}
+ !verify = recipient/callout
+
+
+ # CONFDIR/local_sender_blacklist holds a list of envelope senders that
+ # should have their access denied to the local host. Incoming messages
+ # with one of these senders are rejected at RCPT time.
+ #
+ # The explicit white lists are honored as well as negative items in
+ # the black list. See exim4-config_files(5) for details.
+ deny
+ message = sender envelope address $sender_address is locally blacklisted here. If you think this is wrong, get in touch with postmaster
+ !acl = acl_local_deny_exceptions
+ senders = ${if exists{CONFDIR/local_sender_blacklist}\
+ {CONFDIR/local_sender_blacklist}\
+ {}}
+
+
+ # deny bad sites (IP address)
+ # CONFDIR/local_host_blacklist holds a list of host names, IP addresses
+ # and networks (CIDR notation) that should have their access denied to
+ # The local host. Messages coming in from a listed host will have all
+ # RCPT statements rejected.
+ #
+ # The explicit white lists are honored as well as negative items in
+ # the black list. See exim4-config_files(5) for details.
+ deny
+ message = sender IP address $sender_host_address is locally blacklisted here. If you think this is wrong, get in touch with postmaster
+ !acl = acl_local_deny_exceptions
+ hosts = ${if exists{CONFDIR/local_host_blacklist}\
+ {CONFDIR/local_host_blacklist}\
+ {}}
+
+
+ # Warn if the sender host does not have valid reverse DNS.
+ #
+ # If your system can do DNS lookups without delay or cost, you might want
+ # to enable this.
+ # If sender_host_address is defined, it's a remote call. If
+ # sender_host_name is not defined, then reverse lookup failed. Use
+ # this instead of !verify = reverse_host_lookup to catch deferrals
+ # as well as outright failures.
+ .ifdef CHECK_RCPT_REVERSE_DNS
+ warn
+ condition = ${if and{{def:sender_host_address}{!def:sender_host_name}}\
+ {yes}{no}}
+ add_header = X-Host-Lookup-Failed: Reverse DNS lookup failed for $sender_host_address (${if eq{$host_lookup_failed}{1}{failed}{deferred}})
+ .endif
+
+
+ # Use spfquery to perform a pair of SPF checks (for details, see
+ # http://www.openspf.org/)
+ #
+ # This is quite costly in terms of DNS lookups (~6 lookups per mail). Do not
+ # enable if that's an issue. Also note that if you enable this, you must
+ # install "spf-tools-perl" which provides the spfquery command.
+ # Missing spf-tools-perl will trigger the "Unexpected error in
+ # SPF check" warning.
+ .ifdef CHECK_RCPT_SPF
+ deny
+ message = [SPF] $sender_host_address is not allowed to send mail from \
+ ${if def:sender_address_domain {$sender_address_domain}{$sender_helo_name}}. \
+ Please see \
+ http://www.openspf.org/Why?scope=${if def:sender_address_domain \
+ {mfrom}{helo}};identity=${if def:sender_address_domain \
+ {$sender_address}{$sender_helo_name}};ip=$sender_host_address
+ log_message = SPF check failed.
+ !acl = acl_local_deny_exceptions
+ condition = ${run{/usr/bin/spfquery.mail-spf-perl --ip \
+ ${quote:$sender_host_address} --identity \
+ ${if def:sender_address_domain \
+ {--scope mfrom --identity ${quote:$sender_address}}\
+ {--scope helo --identity ${quote:$sender_helo_name}}}}\
+ {no}{${if eq {$runrc}{1}{yes}{no}}}}
+
+ defer
+ message = Temporary DNS error while checking SPF record. Try again later.
+ !acl = acl_local_deny_exceptions
+ condition = ${if eq {$runrc}{5}{yes}{no}}
+
+ warn
+ condition = ${if <={$runrc}{6}{yes}{no}}
+ add_header = Received-SPF: ${if eq {$runrc}{0}{pass}\
+ {${if eq {$runrc}{2}{softfail}\
+ {${if eq {$runrc}{3}{neutral}\
+ {${if eq {$runrc}{4}{permerror}\
+ {${if eq {$runrc}{6}{none}{error}}}}}}}}}\
+ } client-ip=$sender_host_address; \
+ ${if def:sender_address_domain \
+ {envelope-from=${sender_address}; }{}}\
+ helo=$sender_helo_name
+
+ warn
+ log_message = Unexpected error in SPF check.
+ condition = ${if >{$runrc}{6}{yes}{no}}
+ .endif
+
+
+ # Check against classic DNS "black" lists (DNSBLs) which list
+ # sender IP addresses
+ .ifdef CHECK_RCPT_IP_DNSBLS
+ warn
+ dnslists = CHECK_RCPT_IP_DNSBLS
+ add_header = X-Warning: $sender_host_address is listed at $dnslist_domain ($dnslist_value: $dnslist_text)
+ log_message = $sender_host_address is listed at $dnslist_domain ($dnslist_value: $dnslist_text)
+ .endif
+
+
+ # Check against DNSBLs which list sender domains, with an option to locally
+ # whitelist certain domains that might be blacklisted.
+ #
+ # Note: If you define CHECK_RCPT_DOMAIN_DNSBLS, you must append
+ # "/$sender_address_domain" after each domain. For example:
+ # CHECK_RCPT_DOMAIN_DNSBLS = rhsbl.foo.org/$sender_address_domain \
+ # : rhsbl.bar.org/$sender_address_domain
+ .ifdef CHECK_RCPT_DOMAIN_DNSBLS
+ warn
+ !senders = ${if exists{CONFDIR/local_domain_dnsbl_whitelist}\
+ {CONFDIR/local_domain_dnsbl_whitelist}\
+ {}}
+ dnslists = CHECK_RCPT_DOMAIN_DNSBLS
+ add_header = X-Warning: $sender_address_domain is listed at $dnslist_domain ($dnslist_value: $dnslist_text)
+ log_message = $sender_address_domain is listed at $dnslist_domain ($dnslist_value: $dnslist_text)
+ .endif
+
+
+ # This hook allows you to hook in your own ACLs without having to
+ # modify this file. If you do it like we suggest, you'll end up with
+ # a small performance penalty since there is an additional file being
+ # accessed. This doesn't happen if you leave the macro unset.
+ .ifdef CHECK_RCPT_LOCAL_ACL_FILE
+ .include CHECK_RCPT_LOCAL_ACL_FILE
+ .endif
+
+
+ # disable relaying from foreign hosts
+ deny
+ hosts = <; fc00::/8
+ !domains = <; ${if match_ip{${sg{$domain}{\\[|\\]}{}}}{<;{{cjdns_ipv6}}}{*}{}}
+
+
+ #############################################################################
+ # This check is commented out because it is recognized that not every
+ # sysadmin will want to do it. If you enable it, the check performs
+ # Client SMTP Authorization (csa) checks on the sending host. These checks
+ # do DNS lookups for SRV records. The CSA proposal is currently (May 2005)
+ # an Internet draft. You can, of course, add additional conditions to this
+ # ACL statement to restrict the CSA checks to certain hosts only.
+ #
+ # require verify = csa
+ #############################################################################
+
+
+ # Accept if the address is in a domain for which we are an incoming relay,
+ # but again, only if the recipient can be verified.
+
+ accept
+ domains = +relay_to_domains
+ endpass
+ verify = recipient
+
+
+ # At this point, the address has passed all the checks that have been
+ # configured, so we accept it unconditionally.
+
+ accept
+#####################################################
+### end acl/30_exim4-config_check_rcpt
+#####################################################
+#####################################################
+### acl/40_exim4-config_check_data
+#####################################################
+
+### acl/40_exim4-config_check_data
+#################################
+
+# This ACL is used after the contents of a message have been received. This
+# is the ACL in which you can test a message's headers or body, and in
+# particular, this is where you can invoke external virus or spam scanners.
+
+acl_check_data:
+
+ # Deny unless the address list headers are syntactically correct.
+ #
+ # If you enable this, you might reject legitimate mail.
+ .ifdef CHECK_DATA_VERIFY_HEADER_SYNTAX
+ deny
+ message = Message headers fail syntax check
+ !acl = acl_local_deny_exceptions
+ !verify = header_syntax
+ .endif
+
+
+ # require that there is a verifiable sender address in at least
+ # one of the "Sender:", "Reply-To:", or "From:" header lines.
+ .ifdef CHECK_DATA_VERIFY_HEADER_SENDER
+ deny
+ message = No verifiable sender address in message headers
+ !acl = acl_local_deny_exceptions
+ !verify = header_sender
+ .endif
+
+
+ # Deny if the message contains malware. Before enabling this check, you
+ # must install a virus scanner and set the av_scanner option in the
+ # main configuration.
+ #
+ # exim4-daemon-heavy must be used for this section to work.
+ #
+ # deny
+ # malware = *
+ # message = This message was detected as possible malware ($malware_name).
+
+
+ # Add headers to a message if it is judged to be spam. Before enabling this,
+ # you must install SpamAssassin. You also need to set the spamd_address
+ # option in the main configuration.
+ #
+ # exim4-daemon-heavy must be used for this section to work.
+ #
+ # Please note that this is only suiteable as an example. There are
+ # multiple issues with this configuration method. For example, if you go
+ # this way, you'll give your spamassassin daemon write access to the
+ # entire exim spool which might be a security issue in case of a
+ # spamassassin exploit.
+ #
+ # See the exim docs and the exim wiki for more suitable examples.
+ #
+ # warn
+ # spam = Debian-exim:true
+ # add_header = X-Spam_score: $spam_score\n\
+ # X-Spam_score_int: $spam_score_int\n\
+ # X-Spam_bar: $spam_bar\n\
+ # X-Spam_report: $spam_report
+
+
+ # This hook allows you to hook in your own ACLs without having to
+ # modify this file. If you do it like we suggest, you'll end up with
+ # a small performance penalty since there is an additional file being
+ # accessed. This doesn't happen if you leave the macro unset.
+ .ifdef CHECK_DATA_LOCAL_ACL_FILE
+ .include CHECK_DATA_LOCAL_ACL_FILE
+ .endif
+
+
+ # accept otherwise
+ accept
+#####################################################
+### end acl/40_exim4-config_check_data
+#####################################################
+#####################################################
+### router/00_exim4-config_header
+#####################################################
+
+######################################################################
+# ROUTERS CONFIGURATION #
+# Specifies how addresses are handled #
+######################################################################
+# THE ORDER IN WHICH THE ROUTERS ARE DEFINED IS IMPORTANT! #
+# An address is passed to each router in turn until it is accepted. #
+######################################################################
+
+begin routers
+
+#####################################################
+### end router/00_exim4-config_header
+#####################################################
+#####################################################
+### router/100_exim4-config_domain_literal
+#####################################################
+
+### router/100_exim4-config_domain_literal
+#################################
+
+# This router handles e-mail addresses in "domain literal" form like
+# <user@[10.11.12.13]>. The RFCs require this facility, but it is disabled
+# in the default config since it is seldomly used and frequently abused.
+# Domain literal support also needs to be enabled in the main config,
+# which is automatically done if you use the enable macro
+# MAIN_ALLOW_DOMAIN_LITERALS.
+
+.ifdef MAIN_ALLOW_DOMAIN_LITERALS
+domain_literal:
+ debug_print = "R: domain_literal for $local_part@$domain"
+ driver = ipliteral
+ domains = ! +local_domains
+ transport = remote_smtp
+.endif
+#####################################################
+### end router/100_exim4-config_domain_literal
+#####################################################
+
+route_friends:
+ driver = redirect
+ data = mail@[${lookup dnsdb{aaaa=$domain}}]
+
+my_domains:
+ driver = accept
+ transport = my_mailboxes
+ no_more
+
+#####################################################
+### router/150_exim4-config_hubbed_hosts
+#####################################################
+
+# router/150_exim4-config_hubbed_hosts
+#################################
+
+# route specific domains manually.
+#
+# see exim4-config_files(5) and spec.txt chapter 20.3 through 20.7 for
+# more detailed documentation.
+
+hubbed_hosts:
+ debug_print = "R: hubbed_hosts for $domain"
+ driver = manualroute
+ domains = "${if exists{CONFDIR/hubbed_hosts}\
+ {partial-lsearch;CONFDIR/hubbed_hosts}\
+ fail}"
+ same_domain_copy_routing = yes
+ route_data = ${lookup{$domain}partial-lsearch{CONFDIR/hubbed_hosts}}
+ transport = remote_smtp
+#####################################################
+### end router/150_exim4-config_hubbed_hosts
+#####################################################
+#####################################################
+### router/200_exim4-config_primary
+#####################################################
+
+### router/200_exim4-config_primary
+#################################
+# This file holds the primary router, responsible for nonlocal mails
+
+.ifdef DCconfig_internet
+# configtype=internet
+#
+# deliver mail to the recipient if recipient domain is a domain we
+# relay for. We do not ignore any target hosts here since delivering to
+# a site local or even a link local address might be wanted here, and if
+# such an address has found its way into the MX record of such a domain,
+# the local admin is probably in a place where that broken MX record
+# could be fixed.
+
+dnslookup_relay_to_domains:
+ debug_print = "R: dnslookup_relay_to_domains for $local_part@$domain"
+ driver = dnslookup
+ domains = ! +local_domains : +relay_to_domains
+ transport = remote_smtp
+ same_domain_copy_routing = yes
+ no_more
+
+# deliver mail directly to the recipient. This router is only reached
+# for domains that we do not relay for. Since we most probably can't
+# have broken MX records pointing to site local or link local IP
+# addresses fixed, we ignore target hosts pointing to these addresses.
+
+dnslookup:
+ debug_print = "R: dnslookup for $local_part@$domain"
+ driver = dnslookup
+ domains = ! +local_domains
+ transport = remote_smtp
+ same_domain_copy_routing = yes
+ # ignore private rfc1918 and APIPA addresses
+ ignore_target_hosts = 0.0.0.0 : 127.0.0.0/8 : 192.168.0.0/16 :\
+ 172.16.0.0/12 : 10.0.0.0/8 : 169.254.0.0/16 :\
+ 255.255.255.255
+ no_more
+
+.endif
+
+
+.ifdef DCconfig_local
+# configtype=local
+#
+# Stand-alone system, so generate an error for mail to a non-local domain
+nonlocal:
+ debug_print = "R: nonlocal for $local_part@$domain"
+ driver = redirect
+ domains = ! +local_domains
+ allow_fail
+ data = :fail: Mailing to remote domains not supported
+ no_more
+
+.endif
+
+
+.ifdef DCconfig_smarthost DCconfig_satellite
+# configtype=smarthost or configtype=satellite
+#
+# Send all non-local mail to a single other machine (smarthost).
+#
+# This means _ALL_ non-local mail goes to the smarthost. This will most
+# probably not do what you want for domains that are listed in
+# relay_domains. The most typical use for relay_domains is to control
+# relaying for incoming e-mail on secondary MX hosts. In that case,
+# it doesn't make sense to send the mail to the smarthost since the
+# smarthost will probably send the message right back here, causing a
+# loop.
+#
+# If you want to use a smarthost while being secondary MX for some
+# domains, you'll need to copy the dnslookup_relay_to_domains router
+# here so that mail to relay_domains is handled separately.
+
+smarthost:
+ debug_print = "R: smarthost for $local_part@$domain"
+ driver = manualroute
+ domains = ! +local_domains
+ transport = remote_smtp_smarthost
+ route_list = * DCsmarthost byname
+ host_find_failed = defer
+ same_domain_copy_routing = yes
+ no_more
+
+.endif
+
+
+# The "no_more" above means that all later routers are for
+# domains in the local_domains list, i.e. just like Exim 3 directors.
+#####################################################
+### end router/200_exim4-config_primary
+#####################################################
+#####################################################
+### router/300_exim4-config_real_local
+#####################################################
+
+### router/300_exim4-config_real_local
+#################################
+
+# This router allows reaching a local user while avoiding local
+# processing. This can be used to inform a user of a broken .forward
+# file, for example. The userforward router does this.
+
+COND_LOCAL_SUBMITTER = "\
+ ${if match_ip{$sender_host_address}{:@[]}\
+ {1}{0}\
+ }"
+
+real_local:
+ debug_print = "R: real_local for $local_part@$domain"
+ driver = accept
+ domains = +local_domains
+ condition = COND_LOCAL_SUBMITTER
+ local_part_prefix = real-
+ check_local_user
+ transport = LOCAL_DELIVERY
+
+#####################################################
+### end router/300_exim4-config_real_local
+#####################################################
+#####################################################
+### router/400_exim4-config_system_aliases
+#####################################################
+
+### router/400_exim4-config_system_aliases
+#################################
+
+# This router handles aliasing using a traditional /etc/aliases file.
+#
+##### NB You must ensure that /etc/aliases exists. It used to be the case
+##### NB that every Unix had that file, because it was the Sendmail default.
+##### NB These days, there are systems that don't have it. Your aliases
+##### NB file should at least contain an alias for "postmaster".
+#
+# This router handles the local part in a case-insensitive way which
+# satisfies the RFCs requirement that postmaster be reachable regardless
+# of case. If you decide to handle /etc/aliases in a caseful way, you
+# need to make arrangements for a caseless postmaster.
+#
+# Delivery to arbitrary directories, files, and piping to programs in
+# /etc/aliases is disabled per default.
+# If that is a problem for you, see
+# /usr/share/doc/exim4-base/README.Debian.gz
+# for explanation and some workarounds.
+
+system_aliases:
+ debug_print = "R: system_aliases for $local_part@$domain"
+ driver = redirect
+ domains = +local_domains
+ allow_fail
+ allow_defer
+ data = ${lookup{$local_part}lsearch{/etc/aliases}}
+ .ifdef SYSTEM_ALIASES_USER
+ user = SYSTEM_ALIASES_USER
+ .endif
+ .ifdef SYSTEM_ALIASES_GROUP
+ group = SYSTEM_ALIASES_GROUP
+ .endif
+ .ifdef SYSTEM_ALIASES_FILE_TRANSPORT
+ file_transport = SYSTEM_ALIASES_FILE_TRANSPORT
+ .endif
+ .ifdef SYSTEM_ALIASES_PIPE_TRANSPORT
+ pipe_transport = SYSTEM_ALIASES_PIPE_TRANSPORT
+ .endif
+ .ifdef SYSTEM_ALIASES_DIRECTORY_TRANSPORT
+ directory_transport = SYSTEM_ALIASES_DIRECTORY_TRANSPORT
+ .endif
+#####################################################
+### end router/400_exim4-config_system_aliases
+#####################################################
+#####################################################
+### router/500_exim4-config_hubuser
+#####################################################
+
+### router/500_exim4-config_hubuser
+#################################
+
+.ifdef DCconfig_satellite
+# This router is only used for configtype=satellite.
+# It takes care to route all mail targetted to <somelocaluser@this.machine>
+# to the host where we read our mail
+#
+hub_user:
+ debug_print = "R: hub_user for $local_part@$domain"
+ driver = redirect
+ domains = +local_domains
+ data = ${local_part}@DCreadhost
+ check_local_user
+
+# Grab the redirected mail and deliver it.
+# This is a duplicate of the smarthost router, needed because
+# DCreadhost might end up as part of +local_domains
+hub_user_smarthost:
+ debug_print = "R: hub_user_smarthost for $local_part@$domain"
+ driver = manualroute
+ domains = DCreadhost
+ transport = remote_smtp_smarthost
+ route_list = * DCsmarthost byname
+ host_find_failed = defer
+ same_domain_copy_routing = yes
+ check_local_user
+.endif
+
+
+#####################################################
+### end router/500_exim4-config_hubuser
+#####################################################
+#####################################################
+### router/600_exim4-config_userforward
+#####################################################
+
+### router/600_exim4-config_userforward
+#################################
+
+# This router handles forwarding using traditional .forward files in users'
+# home directories. It also allows mail filtering with a forward file
+# starting with the string "# Exim filter" or "# Sieve filter".
+#
+# The no_verify setting means that this router is skipped when Exim is
+# verifying addresses. Similarly, no_expn means that this router is skipped if
+# Exim is processing an EXPN command.
+#
+# The check_ancestor option means that if the forward file generates an
+# address that is an ancestor of the current one, the current one gets
+# passed on instead. This covers the case where A is aliased to B and B
+# has a .forward file pointing to A.
+#
+# The four transports specified at the end are those that are used when
+# forwarding generates a direct delivery to a directory, or a file, or to a
+# pipe, or sets up an auto-reply, respectively.
+#
+userforward:
+ debug_print = "R: userforward for $local_part@$domain"
+ driver = redirect
+ domains = +local_domains
+ check_local_user
+ file = $home/.forward
+ require_files = $local_part:$home/.forward
+ no_verify
+ no_expn
+ check_ancestor
+ allow_filter
+ forbid_smtp_code = true
+ directory_transport = address_directory
+ file_transport = address_file
+ pipe_transport = address_pipe
+ reply_transport = address_reply
+ skip_syntax_errors
+ syntax_errors_to = real-$local_part@$domain
+ syntax_errors_text = \
+ This is an automatically generated message. An error has\n\
+ been found in your .forward file. Details of the error are\n\
+ reported below. While this error persists, you will receive\n\
+ a copy of this message for every message that is addressed\n\
+ to you. If your .forward file is a filter file, or if it is\n\
+ a non-filter file containing no valid forwarding addresses,\n\
+ a copy of each incoming message will be put in your normal\n\
+ mailbox. If a non-filter file contains at least one valid\n\
+ forwarding address, forwarding to the valid addresses will\n\
+ happen, and those will be the only deliveries that occur.
+
+#####################################################
+### end router/600_exim4-config_userforward
+#####################################################
+#####################################################
+### router/700_exim4-config_procmail
+#####################################################
+
+procmail:
+ debug_print = "R: procmail for $local_part@$domain"
+ driver = accept
+ domains = +local_domains
+ check_local_user
+ transport = procmail_pipe
+ # emulate OR with "if exists"-expansion
+ require_files = ${local_part}:\
+ ${if exists{/etc/procmailrc}\
+ {/etc/procmailrc}{${home}/.procmailrc}}:\
+ +/usr/bin/procmail
+ no_verify
+ no_expn
+
+#####################################################
+### end router/700_exim4-config_procmail
+#####################################################
+#####################################################
+### router/800_exim4-config_maildrop
+#####################################################
+
+### router/800_exim4-config_maildrop
+#################################
+
+maildrop:
+ debug_print = "R: maildrop for $local_part@$domain"
+ driver = accept
+ domains = +local_domains
+ check_local_user
+ transport = maildrop_pipe
+ require_files = ${local_part}:${home}/.mailfilter:+/usr/bin/maildrop
+ no_verify
+ no_expn
+
+#####################################################
+### end router/800_exim4-config_maildrop
+#####################################################
+#####################################################
+### router/850_exim4-config_lowuid
+#####################################################
+
+### router/850_exim4-config_lowuid
+#################################
+
+.ifndef FIRST_USER_ACCOUNT_UID
+FIRST_USER_ACCOUNT_UID = 0
+.endif
+
+.ifndef DEFAULT_SYSTEM_ACCOUNT_ALIAS
+DEFAULT_SYSTEM_ACCOUNT_ALIAS = :fail: no mail to system accounts
+.endif
+
+COND_SYSTEM_USER_AND_REMOTE_SUBMITTER = "\
+ ${if and{{! match_ip{$sender_host_address}{:@[]}}\
+ {<{$local_user_uid}{FIRST_USER_ACCOUNT_UID}}}\
+ {1}{0}\
+ }"
+
+lowuid_aliases:
+ debug_print = "R: lowuid_aliases for $local_part@$domain (UID $local_user_uid)"
+ check_local_user
+ driver = redirect
+ allow_fail
+ domains = +local_domains
+ condition = COND_SYSTEM_USER_AND_REMOTE_SUBMITTER
+ data = ${if exists{CONFDIR/lowuid-aliases}\
+ {${lookup{$local_part}lsearch{CONFDIR/lowuid-aliases}\
+ {$value}{DEFAULT_SYSTEM_ACCOUNT_ALIAS}}}\
+ {DEFAULT_SYSTEM_ACCOUNT_ALIAS}}
+#####################################################
+### end router/850_exim4-config_lowuid
+#####################################################
+#####################################################
+### router/900_exim4-config_local_user
+#####################################################
+
+### router/900_exim4-config_local_user
+#################################
+
+# This router matches local user mailboxes. If the router fails, the error
+# message is "Unknown user".
+
+local_user:
+ debug_print = "R: local_user for $local_part@$domain"
+ driver = accept
+ domains = +local_domains
+ check_local_user
+ local_parts = ! root
+ transport = LOCAL_DELIVERY
+ cannot_route_message = Unknown user
+#####################################################
+### end router/900_exim4-config_local_user
+#####################################################
+#####################################################
+### router/mmm_mail4root
+#####################################################
+
+### router/mmm_mail4root
+#################################
+# deliver mail addressed to root to /var/mail/mail as user mail:mail
+# if it was not redirected in /etc/aliases or by other means
+# Exim cannot deliver as root since 4.24 (FIXED_NEVER_USERS)
+
+mail4root:
+ debug_print = "R: mail4root for $local_part@$domain"
+ driver = redirect
+ domains = +local_domains
+ data = /var/mail/mail
+ file_transport = address_file
+ local_parts = root
+ user = mail
+ group = mail
+
+#####################################################
+### end router/mmm_mail4root
+#####################################################
+#####################################################
+### transport/00_exim4-config_header
+#####################################################
+
+######################################################################
+# TRANSPORTS CONFIGURATION #
+######################################################################
+# ORDER DOES NOT MATTER #
+# Only one appropriate transport is called for each delivery. #
+######################################################################
+
+# A transport is used only when referenced from a router that successfully
+# handles an address.
+
+begin transports
+
+#####################################################
+### end transport/00_exim4-config_header
+#####################################################
+#####################################################
+### transport/10_exim4-config_transport-macros
+#####################################################
+
+### transport/10_exim4-config_transport-macros
+#################################
+
+.ifdef HIDE_MAILNAME
+REMOTE_SMTP_HEADERS_REWRITE=*@+local_domains $1@DCreadhost frs : *@ETC_MAILNAME $1@DCreadhost frs
+REMOTE_SMTP_RETURN_PATH=${if match_domain{$sender_address_domain}{+local_domains}{${sender_address_local_part}@DCreadhost}{${if match_domain{$sender_address_domain}{ETC_MAILNAME}{${sender_address_local_part}@DCreadhost}fail}}}
+.endif
+
+.ifdef REMOTE_SMTP_HELO_FROM_DNS
+.ifdef REMOTE_SMTP_HELO_DATA
+REMOTE_SMTP_HELO_DATA==${lookup dnsdb {ptr=$sending_ip_address}{$value}{$primary_hostname}}
+.else
+REMOTE_SMTP_HELO_DATA=${lookup dnsdb {ptr=$sending_ip_address}{$value}{$primary_hostname}}
+.endif
+.endif
+#####################################################
+### end transport/10_exim4-config_transport-macros
+#####################################################
+#####################################################
+### transport/30_exim4-config_address_file
+#####################################################
+
+# This transport is used for handling deliveries directly to files that are
+# generated by aliasing or forwarding.
+#
+address_file:
+ debug_print = "T: address_file for $local_part@$domain"
+ driver = appendfile
+ delivery_date_add
+ envelope_to_add
+ return_path_add
+
+#####################################################
+### end transport/30_exim4-config_address_file
+#####################################################
+#####################################################
+### transport/30_exim4-config_address_pipe
+#####################################################
+
+# This transport is used for handling pipe deliveries generated by
+# .forward files. If the commands fails and produces any output on standard
+# output or standard error streams, the output is returned to the sender
+# of the message as a delivery error.
+address_pipe:
+ debug_print = "T: address_pipe for $local_part@$domain"
+ driver = pipe
+ return_fail_output
+
+#####################################################
+### end transport/30_exim4-config_address_pipe
+#####################################################
+#####################################################
+### transport/30_exim4-config_address_reply
+#####################################################
+
+# This transport is used for handling autoreplies generated by the filtering
+# option of the userforward router.
+#
+address_reply:
+ debug_print = "T: autoreply for $local_part@$domain"
+ driver = autoreply
+
+#####################################################
+### end transport/30_exim4-config_address_reply
+#####################################################
+#####################################################
+### transport/30_exim4-config_mail_spool
+#####################################################
+
+### transport/30_exim4-config_mail_spool
+
+# This transport is used for local delivery to user mailboxes in traditional
+# BSD mailbox format.
+#
+mail_spool:
+ debug_print = "T: appendfile for $local_part@$domain"
+ driver = appendfile
+ file = /var/mail/$local_part
+ delivery_date_add
+ envelope_to_add
+ return_path_add
+ group = mail
+ mode = 0660
+ mode_fail_narrower = false
+
+#####################################################
+### end transport/30_exim4-config_mail_spool
+#####################################################
+#####################################################
+### transport/30_exim4-config_maildir_home
+#####################################################
+
+### transport/30_exim4-config_maildir_home
+#################################
+
+# Use this instead of mail_spool if you want to to deliver to Maildir in
+# home-directory - change the definition of LOCAL_DELIVERY
+#
+maildir_home:
+ debug_print = "T: maildir_home for $local_part@$domain"
+ driver = appendfile
+ .ifdef MAILDIR_HOME_MAILDIR_LOCATION
+ directory = MAILDIR_HOME_MAILDIR_LOCATION
+ .else
+ directory = $home/Maildir
+ .endif
+ .ifdef MAILDIR_HOME_CREATE_DIRECTORY
+ create_directory
+ .endif
+ .ifdef MAILDIR_HOME_CREATE_FILE
+ create_file = MAILDIR_HOME_CREATE_FILE
+ .endif
+ delivery_date_add
+ envelope_to_add
+ return_path_add
+ maildir_format
+ .ifdef MAILDIR_HOME_DIRECTORY_MODE
+ directory_mode = MAILDIR_HOME_DIRECTORY_MODE
+ .else
+ directory_mode = 0700
+ .endif
+ .ifdef MAILDIR_HOME_MODE
+ mode = MAILDIR_HOME_MODE
+ .else
+ mode = 0600
+ .endif
+ mode_fail_narrower = false
+ # This transport always chdirs to $home before trying to deliver. If
+ # $home is not accessible, this chdir fails and prevents delivery.
+ # If you are in a setup where home directories might not be
+ # accessible, uncomment the current_directory line below.
+ # current_directory = /
+#####################################################
+### end transport/30_exim4-config_maildir_home
+#####################################################
+#####################################################
+### transport/30_exim4-config_maildrop_pipe
+#####################################################
+
+maildrop_pipe:
+ debug_print = "T: maildrop_pipe for $local_part@$domain"
+ driver = pipe
+ path = "/bin:/usr/bin:/usr/local/bin"
+ command = "/usr/bin/maildrop"
+ return_path_add
+ delivery_date_add
+ envelope_to_add
+
+#####################################################
+### end transport/30_exim4-config_maildrop_pipe
+#####################################################
+#####################################################
+### transport/30_exim4-config_procmail_pipe
+#####################################################
+
+procmail_pipe:
+ debug_print = "T: procmail_pipe for $local_part@$domain"
+ driver = pipe
+ path = "/bin:/usr/bin:/usr/local/bin"
+ command = "/usr/bin/procmail"
+ return_path_add
+ delivery_date_add
+ envelope_to_add
+
+#####################################################
+### end transport/30_exim4-config_procmail_pipe
+#####################################################
+#####################################################
+### transport/30_exim4-config_remote_smtp
+#####################################################
+
+### transport/30_exim4-config_remote_smtp
+#################################
+# This transport is used for delivering messages over SMTP connections.
+
+remote_smtp:
+ debug_print = "T: remote_smtp for $local_part@$domain"
+ driver = smtp
+.ifdef REMOTE_SMTP_HOSTS_AVOID_TLS
+ hosts_avoid_tls = REMOTE_SMTP_HOSTS_AVOID_TLS
+.endif
+.ifdef REMOTE_SMTP_HEADERS_REWRITE
+ headers_rewrite = REMOTE_SMTP_HEADERS_REWRITE
+.endif
+.ifdef REMOTE_SMTP_RETURN_PATH
+ return_path = REMOTE_SMTP_RETURN_PATH
+.endif
+.ifdef REMOTE_SMTP_HELO_DATA
+ helo_data=REMOTE_SMTP_HELO_DATA
+.endif
+.ifdef DKIM_DOMAIN
+dkim_domain = DKIM_DOMAIN
+.endif
+.ifdef DKIM_SELECTOR
+dkim_selector = DKIM_SELECTOR
+.endif
+.ifdef DKIM_PRIVATE_KEY
+dkim_private_key = DKIM_PRIVATE_KEY
+.endif
+.ifdef DKIM_CANON
+dkim_canon = DKIM_CANON
+.endif
+.ifdef DKIM_STRICT
+dkim_strict = DKIM_STRICT
+.endif
+.ifdef DKIM_SIGN_HEADERS
+dkim_sign_headers = DKIM_SIGN_HEADERS
+.endif
+.ifdef TLS_DH_MIN_BITS
+tls_dh_min_bits = TLS_DH_MIN_BITS
+.endif
+#####################################################
+### end transport/30_exim4-config_remote_smtp
+#####################################################
+#####################################################
+### transport/30_exim4-config_remote_smtp_smarthost
+#####################################################
+
+### transport/30_exim4-config_remote_smtp_smarthost
+#################################
+
+# This transport is used for delivering messages over SMTP connections
+# to a smarthost. The local host tries to authenticate.
+# This transport is used for smarthost and satellite configurations.
+
+remote_smtp_smarthost:
+ debug_print = "T: remote_smtp_smarthost for $local_part@$domain"
+ driver = smtp
+ hosts_try_auth = <; ${if exists{CONFDIR/passwd.client} \
+ {\
+ ${lookup{$host}nwildlsearch{CONFDIR/passwd.client}{$host_address}}\
+ }\
+ {} \
+ }
+.ifdef REMOTE_SMTP_SMARTHOST_HOSTS_AVOID_TLS
+ hosts_avoid_tls = REMOTE_SMTP_SMARTHOST_HOSTS_AVOID_TLS
+.endif
+.ifdef REMOTE_SMTP_HEADERS_REWRITE
+ headers_rewrite = REMOTE_SMTP_HEADERS_REWRITE
+.endif
+.ifdef REMOTE_SMTP_RETURN_PATH
+ return_path = REMOTE_SMTP_RETURN_PATH
+.endif
+.ifdef REMOTE_SMTP_HELO_DATA
+ helo_data=REMOTE_SMTP_HELO_DATA
+.endif
+.ifdef TLS_DH_MIN_BITS
+tls_dh_min_bits = TLS_DH_MIN_BITS
+.endif
+#####################################################
+### end transport/30_exim4-config_remote_smtp_smarthost
+#####################################################
+#####################################################
+### transport/35_exim4-config_address_directory
+#####################################################
+# This transport is used for handling file addresses generated by alias
+# or .forward files if the path ends in "/", which causes it to be treated
+# as a directory name rather than a file name.
+
+address_directory:
+ debug_print = "T: address_directory for $local_part@$domain"
+ driver = appendfile
+ delivery_date_add
+ envelope_to_add
+ return_path_add
+ check_string = ""
+ escape_string = ""
+ maildir_format
+
+#####################################################
+### end transport/35_exim4-config_address_directory
+#####################################################
+#####################################################
+### retry/00_exim4-config_header
+#####################################################
+
+my_mailboxes:
+ driver = appendfile
+ user = exim
+ maildir_format = true
+ directory = /box/vmail/$local_part/Maildir
+ create_directory
+ delivery_date_add
+ envelope_to_add
+ return_path_add
+ group = exim
+ mode = 0600
+ headers_rewrite = * "${sg{$sender_host_name}{\\\\.box}{}} <mail@${sg{$sender_host_name}{\\\\.box}{}}>" fw :\
+ * "Enigmabox <mail@box>" tcb
+ headers_remove = ${if eq{$sender_host_name}{}{From}{}}
+ headers_add = ${if eq{$sender_host_name}{}{From: mail@${sg{$sender_rcvhost}{\\].*}{]}}\n}{}}
+
+
+######################################################################
+# RETRY CONFIGURATION #
+######################################################################
+
+begin retry
+
+#####################################################
+### end retry/00_exim4-config_header
+#####################################################
+#####################################################
+### retry/30_exim4-config
+#####################################################
+
+### retry/30_exim4-config
+#################################
+
+# This single retry rule applies to all domains and all errors. It specifies
+# retries every 15 minutes for 2 hours, then increasing retry intervals,
+# starting at 1 hour and increasing each time by a factor of 1.5, up to 16
+# hours, then retries every 6 hours until 4 days have passed since the first
+# failed delivery.
+
+# Please note that these rules only limit the frequency of retries, the
+# effective retry-time depends on the frequency of queue-running, too.
+# See QUEUEINTERVAL in /etc/default/exim4.
+
+# Address or Domain Error Retries
+# ----------------- ----- -------
+
+#* * F,2h,15m; G,16h,1h,1.5; F,4d,6h
+* * F,1h,1m; F,1d,1h; F,30d,4h
+
+#####################################################
+### end retry/30_exim4-config
+#####################################################
+#####################################################
+### rewrite/00_exim4-config_header
+#####################################################
+
+######################################################################
+# REWRITE CONFIGURATION #
+######################################################################
+
+begin rewrite
+
+#####################################################
+### end rewrite/00_exim4-config_header
+#####################################################
+#####################################################
+### rewrite/31_exim4-config_rewriting
+#####################################################
+
+### rewrite/31_exim4-config_rewriting
+#################################
+
+# This rewriting rule is particularily useful for dialup users who
+# don't have their own domain, but could be useful for anyone.
+# It looks up the real address of all local users in a file
+.ifndef NO_EAA_REWRITE_REWRITE
+*@+local_domains "${lookup{${local_part}}lsearch{/etc/email-addresses}\
+ {$value}fail}" Ffrs
+# identical rewriting rule for /etc/mailname
+*@ETC_MAILNAME "${lookup{${local_part}}lsearch{/etc/email-addresses}\
+ {$value}fail}" Ffrs
+.endif
+
+
+
+#####################################################
+### end rewrite/31_exim4-config_rewriting
+#####################################################
+#####################################################
+### auth/00_exim4-config_header
+#####################################################
+
+######################################################################
+# AUTHENTICATION CONFIGURATION #
+######################################################################
+
+begin authenticators
+
+
+#####################################################
+### end auth/00_exim4-config_header
+#####################################################
+#####################################################
+### auth/30_exim4-config_examples
+#####################################################
+
+### auth/30_exim4-config_examples
+#################################
+
+# The examples below are for server side authentication, when the
+# local exim is SMTP server and clients authenticate to the local exim.
+
+# They allow two styles of plain-text authentication against an
+# CONFDIR/passwd file whose syntax is described in exim4_passwd(5).
+
+# Hosts that are allowed to use AUTH are defined by the
+# auth_advertise_hosts option in the main configuration. The default is
+# "*", which allows authentication to all hosts over all kinds of
+# connections if there is at least one authenticator defined here.
+# Authenticators which rely on unencrypted clear text passwords don't
+# advertise on unencrypted connections by default. Thus, it might be
+# wise to set up TLS to allow encrypted connections. If TLS cannot be
+# used for some reason, you can set AUTH_SERVER_ALLOW_NOTLS_PASSWORDS to
+# advertise unencrypted clear text password based authenticators on all
+# connections. As this is severely reducing security, using TLS is
+# preferred over allowing clear text password based authenticators on
+# unencrypted connections.
+
+# PLAIN authentication has no server prompts. The client sends its
+# credentials in one lump, containing an authorization ID (which we do not
+# use), an authentication ID, and a password. The latter two appear as
+# $auth2 and $auth3 in the configuration and should be checked against a
+# valid username and password. In a real configuration you would typically
+# use $auth2 as a lookup key, and compare $auth3 against the result of the
+# lookup, perhaps using the crypteq{}{} condition.
+
+# plain_server:
+# driver = plaintext
+# public_name = PLAIN
+# server_condition = "${if crypteq{$auth3}{${extract{1}{:}{${lookup{$auth2}lsearch{CONFDIR/passwd}{$value}{*:*}}}}}{1}{0}}"
+# server_set_id = $auth2
+# server_prompts = :
+# .ifndef AUTH_SERVER_ALLOW_NOTLS_PASSWORDS
+# server_advertise_condition = ${if eq{$tls_cipher}{}{}{*}}
+# .endif
+
+# LOGIN authentication has traditional prompts and responses. There is no
+# authorization ID in this mechanism, so unlike PLAIN the username and
+# password are $auth1 and $auth2. Apart from that you can use the same
+# server_condition setting for both authenticators.
+
+# login_server:
+# driver = plaintext
+# public_name = LOGIN
+# server_prompts = "Username:: : Password::"
+# server_condition = "${if crypteq{$auth2}{${extract{1}{:}{${lookup{$auth1}lsearch{CONFDIR/passwd}{$value}{*:*}}}}}{1}{0}}"
+# server_set_id = $auth1
+# .ifndef AUTH_SERVER_ALLOW_NOTLS_PASSWORDS
+# server_advertise_condition = ${if eq{$tls_cipher}{}{}{*}}
+# .endif
+#
+# cram_md5_server:
+# driver = cram_md5
+# public_name = CRAM-MD5
+# server_secret = ${extract{2}{:}{${lookup{$auth1}lsearch{CONFDIR/passwd}{$value}fail}}}
+# server_set_id = $auth1
+
+# Here is an example of CRAM-MD5 authentication against PostgreSQL:
+#
+# psqldb_auth_server:
+# driver = cram_md5
+# public_name = CRAM-MD5
+# server_secret = ${lookup pgsql{SELECT pw FROM users WHERE username = '${quote_pgsql:$auth1}'}{$value}fail}
+# server_set_id = $auth1
+
+# Authenticate against local passwords using sasl2-bin
+# Requires exim_uid to be a member of sasl group, see README.Debian.gz
+# plain_saslauthd_server:
+# driver = plaintext
+# public_name = PLAIN
+# server_condition = ${if saslauthd{{$auth2}{$auth3}}{1}{0}}
+# server_set_id = $auth2
+# server_prompts = :
+# .ifndef AUTH_SERVER_ALLOW_NOTLS_PASSWORDS
+# server_advertise_condition = ${if eq{$tls_cipher}{}{}{*}}
+# .endif
+#
+# login_saslauthd_server:
+# driver = plaintext
+# public_name = LOGIN
+# server_prompts = "Username:: : Password::"
+# # don't send system passwords over unencrypted connections
+# server_condition = ${if saslauthd{{$auth1}{$auth2}}{1}{0}}
+# server_set_id = $auth1
+# .ifndef AUTH_SERVER_ALLOW_NOTLS_PASSWORDS
+# server_advertise_condition = ${if eq{$tls_cipher}{}{}{*}}
+# .endif
+#
+# ntlm_sasl_server:
+# driver = cyrus_sasl
+# public_name = NTLM
+# server_realm = <short main hostname>
+# server_set_id = $auth1
+# .ifndef AUTH_SERVER_ALLOW_NOTLS_PASSWORDS
+# server_advertise_condition = ${if eq{$tls_cipher}{}{}{*}}
+# .endif
+#
+# digest_md5_sasl_server:
+# driver = cyrus_sasl
+# public_name = DIGEST-MD5
+# server_realm = <short main hostname>
+# server_set_id = $auth1
+# .ifndef AUTH_SERVER_ALLOW_NOTLS_PASSWORDS
+# server_advertise_condition = ${if eq{$tls_cipher}{}{}{*}}
+# .endif
+
+# Authentcate against cyrus-sasl
+# This is mainly untested, please report any problems to
+# pkg-exim4-users@lists.alioth.debian.org.
+# cram_md5_sasl_server:
+# driver = cyrus_sasl
+# public_name = CRAM-MD5
+# server_realm = <short main hostname>
+# server_set_id = $auth1
+#
+# plain_sasl_server:
+# driver = cyrus_sasl
+# public_name = PLAIN
+# server_realm = <short main hostname>
+# server_set_id = $auth1
+# .ifndef AUTH_SERVER_ALLOW_NOTLS_PASSWORDS
+# server_advertise_condition = ${if eq{$tls_cipher}{}{}{*}}
+# .endif
+#
+# login_sasl_server:
+# driver = cyrus_sasl
+# public_name = LOGIN
+# server_realm = <short main hostname>
+# server_set_id = $auth1
+# .ifndef AUTH_SERVER_ALLOW_NOTLS_PASSWORDS
+# server_advertise_condition = ${if eq{$tls_cipher}{}{}{*}}
+# .endif
+
+# Authenticate against courier authdaemon
+
+# This is now the (working!) example from
+# http://www.exim.org/eximwiki/FAQ/Policy_controls/Q0730
+# Possible pitfall: access rights on /var/run/courier/authdaemon/socket.
+# plain_courier_authdaemon:
+# driver = plaintext
+# public_name = PLAIN
+# server_condition = \
+# ${extract {ADDRESS} \
+# {${readsocket{/var/run/courier/authdaemon/socket} \
+# {AUTH ${strlen:exim\nlogin\n$auth2\n$auth3\n}\nexim\nlogin\n$auth2\n$auth3\n} }} \
+# {yes} \
+# fail}
+# server_set_id = $auth2
+# .ifndef AUTH_SERVER_ALLOW_NOTLS_PASSWORDS
+# server_advertise_condition = ${if eq{$tls_cipher}{}{}{*}}
+# .endif
+
+# login_courier_authdaemon:
+# driver = plaintext
+# public_name = LOGIN
+# server_prompts = Username:: : Password::
+# server_condition = \
+# ${extract {ADDRESS} \
+# {${readsocket{/var/run/courier/authdaemon/socket} \
+# {AUTH ${strlen:exim\nlogin\n$auth1\n$auth2\n}\nexim\nlogin\n$auth1\n$auth2\n} }} \
+# {yes} \
+# fail}
+# server_set_id = $auth1
+# .ifndef AUTH_SERVER_ALLOW_NOTLS_PASSWORDS
+# server_advertise_condition = ${if eq{$tls_cipher}{}{}{*}}
+# .endif
+
+# This one is a bad hack to support the broken version 4.xx of
+# Microsoft Outlook Express which violates the RFCs by demanding
+# "250-AUTH=" instead of "250-AUTH ".
+# If your list of offered authenticators is other than PLAIN and LOGIN,
+# you need to adapt the public_name line manually.
+# It has to be the last authenticator to work and has not been tested
+# well. Use at your own risk.
+# See the thread entry point from
+# http://www.exim.org/mail-archives/exim-users/Week-of-Mon-20050214/msg00213.html
+# for the related discussion on the exim-users mailing list.
+# Thanks to Fred Viles for this great work.
+
+# support_broken_outlook_express_4_server:
+# driver = plaintext
+# public_name = "\r\n250-AUTH=PLAIN LOGIN"
+# server_prompts = User Name : Password
+# server_condition = no
+# .ifndef AUTH_SERVER_ALLOW_NOTLS_PASSWORDS
+# server_advertise_condition = ${if eq{$tls_cipher}{}{}{*}}
+# .endif
+
+##############
+# See /usr/share/doc/exim4-base/README.Debian.gz
+##############
+
+# These examples below are the equivalent for client side authentication.
+# They get the passwords from CONFDIR/passwd.client, whose format is
+# defined in exim4_passwd_client(5)
+
+# Because AUTH PLAIN and AUTH LOGIN send the password in clear, we
+# only allow these mechanisms over encrypted connections by default.
+# You can set AUTH_CLIENT_ALLOW_NOTLS_PASSWORDS to allow unencrypted
+# clear text password authentication on all connections.
+
+cram_md5:
+ driver = cram_md5
+ public_name = CRAM-MD5
+ client_name = ${extract{1}{:}{${lookup{$host}nwildlsearch{CONFDIR/passwd.client}{$value}fail}}}
+ client_secret = ${extract{2}{:}{${lookup{$host}nwildlsearch{CONFDIR/passwd.client}{$value}fail}}}
+
+# this returns the matching line from passwd.client and doubles all ^
+PASSWDLINE=${sg{\
+ ${lookup{$host}nwildlsearch{CONFDIR/passwd.client}{$value}fail}\
+ }\
+ {\\N[\\^]\\N}\
+ {^^}\
+ }
+
+plain:
+ driver = plaintext
+ public_name = PLAIN
+.ifndef AUTH_CLIENT_ALLOW_NOTLS_PASSWORDS
+ client_send = "<; ${if !eq{$tls_cipher}{}\
+ {^${extract{1}{:}{PASSWDLINE}}\
+ ^${sg{PASSWDLINE}{\\N([^:]+:)(.*)\\N}{\\$2}}\
+ }fail}"
+.else
+ client_send = "<; ^${extract{1}{:}{PASSWDLINE}}\
+ ^${sg{PASSWDLINE}{\\N([^:]+:)(.*)\\N}{\\$2}}"
+.endif
+
+login:
+ driver = plaintext
+ public_name = LOGIN
+.ifndef AUTH_CLIENT_ALLOW_NOTLS_PASSWORDS
+ # Return empty string if not non-TLS AND looking up $host in passwd-file
+ # yields a non-empty string; fail otherwise.
+ client_send = "<; ${if and{\
+ {!eq{$tls_cipher}{}}\
+ {!eq{PASSWDLINE}{}}\
+ }\
+ {}fail}\
+ ; ${extract{1}{::}{PASSWDLINE}}\
+ ; ${sg{PASSWDLINE}{\\N([^:]+:)(.*)\\N}{\\$2}}"
+.else
+ # Return empty string if looking up $host in passwd-file yields a
+ # non-empty string; fail otherwise.
+ client_send = "<; ${if !eq{PASSWDLINE}{}\
+ {}fail}\
+ ; ${extract{1}{::}{PASSWDLINE}}\
+ ; ${sg{PASSWDLINE}{\\N([^:]+:)(.*)\\N}{\\$2}}"
+.endif
+#####################################################
+### end auth/30_exim4-config_examples
+#####################################################
--- /dev/null
+[PHP]
+
+zend.ze1_compatibility_mode = Off
+
+; Language Options
+
+engine = On
+;short_open_tag = Off
+precision = 12
+y2k_compliance = On
+output_buffering = Off
+;output_handler =
+zlib.output_compression = Off
+;zlib.output_compression_level = -1
+;zlib.output_handler =
+implicit_flush = Off
+unserialize_callback_func =
+serialize_precision = 100
+
+;open_basedir =
+disable_functions =
+disable_classes =
+
+; Colors for Syntax Highlighting mode. Anything that's acceptable in
+; <span style="color: ???????"> would work.
+;highlight.string = #DD0000
+;highlight.comment = #FF9900
+;highlight.keyword = #007700
+;highlight.bg = #FFFFFF
+;highlight.default = #0000BB
+;highlight.html = #000000
+
+;ignore_user_abort = On
+;realpath_cache_size = 16k
+;realpath_cache_ttl = 120
+
+; Miscellaneous
+
+expose_php = On
+
+; Resource Limits
+
+max_execution_time = 600 ; Maximum execution time of each script, in seconds.
+max_input_time = 600 ; Maximum amount of time each script may spend parsing request data.
+;max_input_nesting_level = 64
+memory_limit = 16M ; Maximum amount of memory a script may consume.
+
+; Error handling and logging
+
+; Error Level Constants:
+; E_ALL - All errors and warnings (includes E_STRICT as of PHP 6.0.0)
+; E_ERROR - fatal run-time errors
+; E_RECOVERABLE_ERROR - almost fatal run-time errors
+; E_WARNING - run-time warnings (non-fatal errors)
+; E_PARSE - compile-time parse errors
+; E_NOTICE - run-time notices (these are warnings which often result
+; from a bug in your code, but it's possible that it was
+; intentional (e.g., using an uninitialized variable and
+; relying on the fact it's automatically initialized to an
+; empty string)
+; E_STRICT - run-time notices, enable to have PHP suggest changes
+; to your code which will ensure the best interoperability
+; and forward compatibility of your code
+; E_CORE_ERROR - fatal errors that occur during PHP's initial startup
+; E_CORE_WARNING - warnings (non-fatal errors) that occur during PHP's
+; initial startup
+; E_COMPILE_ERROR - fatal compile-time errors
+; E_COMPILE_WARNING - compile-time warnings (non-fatal errors)
+; E_USER_ERROR - user-generated error message
+; E_USER_WARNING - user-generated warning message
+; E_USER_NOTICE - user-generated notice message
+; E_DEPRECATED - warn about code that will not work in future versions
+; of PHP
+; E_USER_DEPRECATED - user-generated deprecation warnings
+;
+; Common Values:
+; E_ALL & ~E_NOTICE (Show all errors, except for notices and coding standards warnings.)
+; E_ALL & ~E_NOTICE | E_STRICT (Show all errors, except for notices)
+; E_COMPILE_ERROR|E_RECOVERABLE_ERROR|E_ERROR|E_CORE_ERROR (Show only errors)
+; E_ALL | E_STRICT (Show all errors, warnings and notices including coding standards.)
+; Default Value: E_ALL & ~E_NOTICE
+error_reporting = E_ALL & ~E_NOTICE & ~E_STRICT
+
+display_errors = On
+display_startup_errors = Off
+log_errors = Off
+log_errors_max_len = 1024
+ignore_repeated_errors = Off
+ignore_repeated_source = Off
+report_memleaks = On
+;report_zend_debug = 0
+track_errors = Off
+;html_errors = Off
+;docref_root = "/phpmanual/"
+;docref_ext = .html
+;error_prepend_string = "<font color=#ff0000>"
+;error_append_string = "</font>"
+; Log errors to specified file.
+;error_log = /var/log/php_errors.log
+; Log errors to syslog.
+;error_log = syslog
+
+; Data Handling
+
+;arg_separator.output = "&"
+;arg_separator.input = ";&"
+variables_order = "EGPCS"
+request_order = "GP"
+register_globals = Off
+register_long_arrays = Off
+register_argc_argv = On
+auto_globals_jit = On
+post_max_size = 100M
+;magic_quotes_gpc = Off
+magic_quotes_runtime = Off
+magic_quotes_sybase = Off
+auto_prepend_file =
+auto_append_file =
+default_mimetype = "text/html"
+;default_charset = "iso-8859-1"
+always_populate_raw_post_data = -1
+
+; Paths and Directories
+
+; UNIX: "/path1:/path2"
+;include_path = ".:/php/includes"
+;doc_root = "/www"
+user_dir =
+extension_dir = "/usr/lib/php"
+enable_dl = On
+cgi.force_redirect = 1
+;cgi.nph = 1
+cgi.redirect_status_env = "yes";
+cgi.fix_pathinfo=1
+;fastcgi.impersonate = 1;
+;fastcgi.logging = 0
+;cgi.rfc2616_headers = 0
+
+; File Uploads
+
+file_uploads = On
+upload_tmp_dir = "/tmp"
+upload_max_filesize = 100M
+max_file_uploads = 20
+
+; Fopen wrappers
+
+allow_url_fopen = On
+allow_url_include = Off
+;from="john@doe.com"
+;user_agent="PHP"
+default_socket_timeout = 60
+;auto_detect_line_endings = Off
+
+; Dynamic Extensions
+
+;extension=ctype.so
+;extension=curl.so
+;extension=dom.so
+;extension=exif.so
+;extension=ftp.so
+;extension=gd.so
+;extension=gmp.so
+;extension=hash.so
+;extension=iconv.so
+;extension=json.so
+;extension=ldap.so
+;extension=mbstring.so
+;extension=mcrypt.so
+;extension=mysql.so
+;extension=openssl.so
+;extension=pcre.so
+;extension=pdo.so
+;extension=pdo-mysql.so
+;extension=pdo-pgsql.so
+;extension=pdo_sqlite.so
+;extension=pgsql.so
+;extension=session.so
+;extension=soap.so
+;extension=sockets.so
+;extension=sqlite.so
+;extension=sqlite3.so
+;extension=tokenizer.so
+;extension=xml.so
+;extension=xmlreader.so
+;extension=xmlwriter.so
+
+; Module Settings
+
+[APC]
+apc.enabled = 1
+apc.shm_segments = 1 ;The number of shared memory segments to allocate for the compiler cache.
+apc.shm_size = 4M ;The size of each shared memory segment.
+
+[Date]
+date.timezone = UTC
+;date.default_latitude = 31.7667
+;date.default_longitude = 35.2333
+;date.sunrise_zenith = 90.583333
+;date.sunset_zenith = 90.583333
+
+[filter]
+;filter.default = unsafe_raw
+;filter.default_flags =
+
+[iconv]
+;iconv.input_encoding = ISO-8859-1
+;iconv.internal_encoding = ISO-8859-1
+;iconv.output_encoding = ISO-8859-1
+
+[sqlite]
+;sqlite.assoc_case = 0
+
+[sqlite3]
+;sqlite3.extension_dir =
+
+[Pdo_mysql]
+pdo_mysql.cache_size = 2000
+pdo_mysql.default_socket=
+
+[MySQL]
+mysql.allow_local_infile = On
+mysql.allow_persistent = On
+mysql.cache_size = 2000
+mysql.max_persistent = -1
+mysql.max_links = -1
+mysql.default_port =
+mysql.default_socket =
+mysql.default_host =
+mysql.default_user =
+mysql.default_password =
+mysql.connect_timeout = 60
+mysql.trace_mode = Off
+
+[PostgresSQL]
+pgsql.allow_persistent = On
+pgsql.auto_reset_persistent = Off
+pgsql.max_persistent = -1
+pgsql.max_links = -1
+pgsql.ignore_notice = 0
+pgsql.log_notice = 0
+
+[Session]
+session.save_handler = files
+session.save_path = "/tmp"
+session.use_cookies = 1
+;session.cookie_secure =
+session.use_only_cookies = 1
+session.name = PHPSESSID
+session.auto_start = 0
+session.cookie_lifetime = 0
+session.cookie_path = /
+session.cookie_domain =
+session.cookie_httponly =
+session.serialize_handler = php
+session.gc_probability = 1
+session.gc_divisor = 100
+session.gc_maxlifetime = 1440
+session.bug_compat_42 = On
+session.bug_compat_warn = On
+session.referer_check =
+session.entropy_length = 0
+;session.entropy_file = /dev/urandom
+session.entropy_file =
+;session.entropy_length = 16
+session.cache_limiter = nocache
+session.cache_expire = 180
+session.use_trans_sid = 0
+session.hash_function = 0
+session.hash_bits_per_character = 4
+url_rewriter.tags = "a=href,area=href,frame=src,input=src,form=,fieldset="
+
+[mbstring]
+;mbstring.language = Japanese
+;mbstring.internal_encoding = EUC-JP
+;mbstring.http_input = auto
+;mbstring.http_output = SJIS
+;mbstring.encoding_translation = Off
+;mbstring.detect_order = auto
+;mbstring.substitute_character = none;
+;mbstring.func_overload = 0
+;mbstring.strict_detection = Off
+;mbstring.http_output_conv_mimetype=
+;mbstring.script_encoding=
+
+[gd]
+;gd.jpeg_ignore_warning = 0
+
+[exif]
+;exif.encode_unicode = ISO-8859-15
+;exif.decode_unicode_motorola = UCS-2BE
+;exif.decode_unicode_intel = UCS-2LE
+;exif.encode_jis =
+;exif.decode_jis_motorola = JIS
+;exif.decode_jis_intel = JIS
+
+[soap]
+soap.wsdl_cache_enabled=1
+soap.wsdl_cache_dir="/tmp"
+soap.wsdl_cache_ttl=86400
+soap.wsdl_cache_limit = 5
+
+[sysvshm]
+;sysvshm.init_mem = 10000
+
+[ldap]
+ldap.max_links = -1
+
+[mcrypt]
+;mcrypt.algorithms_dir=
+;mcrypt.modes_dir=
--- /dev/null
+mail@box:{SHA1}{{mailbox_password}}
--- /dev/null
+
+bundle agent app_security
+{
+ vars:
+ "site" data => readjson("$(g.site)", 64000);
+
+ classes:
+ "webinterface_password" expression => strcmp("$(site[if_webinterface_password])", "true");
+
+ files:
+ "/etc/lighttpd/auth/."
+ create => "true";
+
+ webinterface_password::
+ "/etc/lighttpd/auth/webinterface.conf"
+ create => "true",
+ template_method => "mustache",
+ template_data => readjson("$(g.site)", 64000),
+ edit_template => "$(this.promise_dirname)/templates/webinterface.conf.mustache",
+ edit_defaults => no_backup,
+ classes => if_repaired("restart_lighttpd");
+
+ "/etc/lighttpd/auth/webinterface.htpasswd"
+ create => "true",
+ template_method => "mustache",
+ template_data => readjson("$(g.site)", 64000),
+ edit_template => "$(this.promise_dirname)/templates/webinterface.htpasswd.mustache",
+ edit_defaults => no_backup,
+ perms => wwwdata,
+ classes => if_repaired("restart_lighttpd");
+
+ !webinterface_password::
+ "/etc/lighttpd/auth/webinterface.conf"
+ delete => tidy,
+ classes => if_repaired("restart_lighttpd");
+
+ commands:
+ restart_lighttpd::
+ "/etc/init.d/lighttpd restart";
+
+ reports:
+ "checking security: done";
+}
+
+body perms wwwdata
+{
+ mode => "600";
+ owners => { "nobody" };
+ groups => { "nogroup" };
+}
+
--- /dev/null
+auth.backend = "plain"
+auth.backend.plain.userfile = "/etc/lighttpd/auth/webinterface.htpasswd"
+
+auth.require = ("" =>
+ (
+ "method" => "basic",
+{{#if_hostid}}
+ "realm" => "Enigmabox Webinterface (Hostid: {{hostid}})",
+{{/if_hostid}}
+{{^if_hostid}}
+ "realm" => "Enigmabox Webinterface",
+{{/if_hostid}}
+ "require" => "valid-user"
+ )
+)
--- /dev/null
+admin:{{webinterface_password}}
--- /dev/null
+
+bundle agent app_telephony
+{
+ files:
+ "/etc/asterisk/sip.conf"
+ create => "true",
+ template_method => "mustache",
+ template_data => readjson("$(g.site)", 64000),
+ edit_template => "$(this.promise_dirname)/templates/sip.conf.mustache",
+ edit_defaults => no_backup,
+ classes => if_repaired("restart_asterisk");
+
+ "/etc/asterisk/extensions.conf"
+ create => "true",
+ template_method => "mustache",
+ template_data => readjson("$(g.site)", 64000),
+ edit_template => "$(this.promise_dirname)/templates/extensions.conf.mustache",
+ edit_defaults => no_backup,
+ classes => if_repaired("restart_asterisk");
+
+ "/etc/asterisk/sip_notify.conf"
+ create => "true",
+ edit_template => "$(this.promise_dirname)/templates/sip_notify.conf",
+ edit_defaults => no_backup,
+ classes => if_repaired("restart_asterisk");
+
+ commands:
+ restart_asterisk::
+ "/etc/init.d/asterisk restart";
+
+ reports:
+ "checking telephony: done";
+}
+
--- /dev/null
+; extensions.conf - the Asterisk dial plan
+
+[incoming]
+exten => _X.,1,Answer()
+exten => _X.,n,Playback(ascending-2tone)
+exten => _X.,n,Read(get,"silence/1",,,,1)
+exten => _X.,n,GotoIf($[ 1${get} = 18 ]?confcall:dial)
+exten => _X.,n(confcall),ConfBridge(8)
+exten => _X.,n(dial),Dial(SIP/100,30)
+exten => _X.,n,Hangup()
+
+[friends]
+{{#addresses}}
+exten => {{phone}},1,Dial(SIP/100@{{hostname}})
+{{/addresses}}
+
+;global addresses
+{{#global_addresses}}
+exten => 01{{phone}},1,Dial(SIP/100@{{hostname}}.eb)
+{{/global_addresses}}
+
+;echo test
+exten => 1,1,Answer()
+;exten => 1,n,Playback(tt-somethingwrong)
+exten => 1,n,Playback(tt-weasels)
+exten => 1,n,Playback(tt-monkeysintro)
+exten => 1,n,Playback(tt-monkeys)
+exten => 1,n,Echo()
+exten => 1,n,Playback(vm-goodbye)
+exten => 1,n,Hangup()
+
+;confcall
+exten => 8,1,ConfBridge(8)
+
--- /dev/null
+; SIP Configuration
+
+[general]
+bindaddr=::
+transport=tcp
+tcpenable=yes
+canreinvite=no
+context=incoming
+disallow=all
+allow=gsm
+allow=ulaw
+allow=alaw
+allow=h263
+;jbenable=yes
+;jbforce=yes
+
+; this settings work well when the net is fucked up and laggy
+;jbmaxsize=100
+;jbresyncthreshold=500
+
+; little less treshold
+;jbmaxsize=50
+;jbresyncthreshold=250
+
+; this settings work well when the net is ok
+;jbmaxsize=25
+;jbresyncthreshold=200
+
+
+
+; my own phone
+
+[100]
+type=peer
+qualify=yes
+secret=100
+host=dynamic
+context=friends
+transport=udp
+
+
+
+; my addresses
+
+[basic-options](!)
+ type=peer
+ qualify=5000
+ qualifyfreq=10
+ context=incoming
+ encryption=yes
+
+{{#addresses}}
+[{{hostname}}](basic-options)
+ host={{ipv6}}
+ callerid="{{display_name}} <{{hostname}}>"
+
+{{/addresses}}
+
+
+
+; global addresses
+
+[global-addresses](!)
+ type=peer
+ context=incoming
+ encryption=yes
+
+{{#global_addresses}}
+[{{hostname}}.eb](global-addresses)
+ host={{ipv6}}
+ callerid="{{hostname}}.eb <01{{phone}}>"
+
+{{/global_addresses}}
+
--- /dev/null
+; rfc3842
+; put empty "Content=>" at the end to have CRLF after last body line
+
+[clear-mwi]
+Event=>message-summary
+Content-type=>application/simple-message-summary
+Content=>Messages-Waiting: no
+Content=>Message-Account: sip:asterisk@127.0.0.1
+Content=>Voice-Message: 0/0 (0/0)
+Content=>
+
+; Aastra
+
+[aastra-check-cfg]
+Event=>check-sync
+
+[aastra-xml]
+Event=>aastra-xml
+
+; Digium
+
+[digium-check-cfg]
+Event=>check-sync
+
+; Linksys
+
+[linksys-cold-restart]
+Event=>reboot_now
+
+[linksys-warm-restart]
+Event=>restart_now
+
+; Polycom
+
+[polycom-check-cfg]
+Event=>check-sync
+
+; Sipura
+
+[sipura-check-cfg]
+Event=>resync
+
+[sipura-get-report]
+Event=>report
+
+; snom
+
+[snom-check-cfg]
+Event=>check-sync\;reboot=false
+
+[snom-reboot]
+Event=>check-sync\;reboot=true
+
+; Cisco
+
+[cisco-check-cfg]
+Event=>check-sync
+
+; Grandstream
+[gsreboot]
+Event=>check-sync
+
--- /dev/null
+
+bundle agent app_webfilter
+{
+ files:
+ "/etc/config/privoxy"
+ create => "true",
+ template_method => "mustache",
+ template_data => readjson("$(g.site)", 64000),
+ edit_template => "$(this.promise_dirname)/templates/etc-config-privoxy.mustache",
+ edit_defaults => no_backup,
+ classes => if_repaired("restart_privoxy");
+
+ "/etc/privoxy/match-all.action"
+ create => "true",
+ edit_template => "$(this.promise_dirname)/templates/match-all.action",
+ edit_defaults => no_backup,
+ classes => if_repaired("restart_privoxy");
+
+ "/etc/privoxy/user.action"
+ create => "true",
+ template_method => "mustache",
+ template_data => readjson("$(g.site)", 64000),
+ edit_template => "$(this.promise_dirname)/templates/user.action.mustache",
+ edit_defaults => no_backup,
+ classes => if_repaired("restart_privoxy");
+
+ commands:
+ restart_privoxy::
+ "/etc/init.d/privoxy restart";
+
+ reports:
+ "checking webfilter: done";
+}
+
--- /dev/null
+config privoxy 'privoxy'
+ option confdir '/etc/privoxy'
+ option logdir '/var/log'
+ option logfile 'privoxy'
+{{#if_webfilter_filter_ads}}
+ list filterfile 'default.filter'
+ list actionsfile 'default.action'
+{{/if_webfilter_filter_ads}}
+ list actionsfile 'user.action'
+ list actionsfile 'match-all.action'
+ list listen_address '0.0.0.0:8888'
+ option toggle '1'
+ option enable_remote_toggle '1'
+ option enable_remote_http_toggle '0'
+ option enable_edit_actions '1'
+ option enforce_blocks '0'
+ option buffer_limit '4096'
+ option forwarded_connect_retries '0'
+ option accept_intercepted_requests '1'
+ option allow_cgi_request_crunching '0'
+ option split_large_forms '0'
+ option keep_alive_timeout '300'
+ option socket_timeout '300'
+ list permit_access '192.168.100.0/24'
+ list permit_access '192.168.101.0/24'
+ option debug '0'
+
--- /dev/null
+#############################################################################
+# Id: match-all.action,v
+#
+# This file contains the actions that are applied to all requests and
+# may be overruled later on by other actions files. Less experienced
+# users should only edit this file through the actions file editor.
+#
+#############################################################################
+{ \
++change-x-forwarded-for{block} \
++client-header-tagger{css-requests} \
++client-header-tagger{image-requests} \
+#+deanimate-gifs{last} \
++filter{refresh-tags} \
++filter{img-reorder} \
++filter{banners-by-size} \
++filter{webbugs} \
++filter{jumping-windows} \
++filter{ie-exploits} \
++hide-from-header{block} \
++hide-referrer{conditional-block} \
++session-cookies-only \
++set-image-blocker{pattern} \
++hide-user-agent{Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.101 Safari/537.36} \
+}
+/ # Match all URLs
--- /dev/null
+######################################################################
+#
+# File : Source: /cvsroot/ijbswa/current/user.action,v
+#
+# Id: user.action,v
+#
+# Purpose : User-maintained actions file, see
+# http://www.privoxy.org/user-manual/actions-file.html
+#
+######################################################################
+
+# This is the place to add your personal exceptions and additions to
+# the general policies as defined in default.action. (Here they will be
+# safe from updates to default.action.) Later defined actions always
+# take precedence, so anything defined here should have the last word.
+
+# See http://www.privoxy.org/user-manual/actions-file.html, or the
+# comments in default.action, for an explanation of what an "action" is
+# and what each action does.
+
+# The examples included here either use bogus sites, or have the actual
+# rules commented out (with the '#' character). Useful aliases are
+# included in the top section as a convenience.
+
+#############################################################################
+# Aliases
+#############################################################################
+{{=<% %>=}}
+{{alias}}
+<%={{ }}=%>
+#############################################################################
+#
+# You can define a short form for a list of permissions - e.g., instead
+# of "-crunch-incoming-cookies -crunch-outgoing-cookies -filter -fast-redirects",
+# you can just write "shop". This is called an alias.
+#
+# Currently, an alias can contain any character except space, tab, '=', '{'
+# or '}'.
+# But please use only 'a'-'z', '0'-'9', '+', and '-'.
+#
+# Alias names are not case sensitive.
+#
+# Aliases beginning with '+' or '-' may be used for system action names
+# in future releases - so try to avoid alias names like this. (e.g.
+# "+crunch-all-cookies" below is not a good name)
+#
+# Aliases must be defined before they are used.
+#
+# These aliases just save typing later:
+#
++crunch-all-cookies = +crunch-incoming-cookies +crunch-outgoing-cookies
+-crunch-all-cookies = -crunch-incoming-cookies -crunch-outgoing-cookies
+ allow-all-cookies = -crunch-all-cookies -session-cookies-only -filter{content-cookies}
+ allow-popups = -filter{all-popups} -filter{unsolicited-popups}
++block-as-image = +block{Blocked image request.} +handle-as-image
+-block-as-image = -block
+
+# These aliases define combinations of actions
+# that are useful for certain types of sites:
+#
+fragile = -block -crunch-all-cookies -filter -fast-redirects -hide-referer -prevent-compression
+shop = -crunch-all-cookies allow-popups
+
+# Your favourite blend of filters:
+#
+myfilters = +filter{html-annoyances} +filter{js-annoyances} +filter{all-popups}\
+ +filter{webbugs} +filter{banners-by-size}
+
+# Allow ads for selected useful free sites:
+#
+allow-ads = -block -filter{banners-by-size} -filter{banners-by-link}
+#... etc. Customize to your heart's content.
+
+## end aliases ########################################################
+#######################################################################
+
+# Begin examples: #####################################################
+
+# Say you have accounts on some sites that you visit regularly, and you
+# don't want to have to log in manually each time. So you'd like to allow
+# persistent cookies for these sites. The allow-all-cookies alias defined
+# above does exactly that, i.e. it disables crunching of cookies in any
+# direction, and the processing of cookies to make them only temporary.
+#
+{ allow-all-cookies }
+#.sourceforge.net
+#sunsolve.sun.com
+#slashdot.org
+#.yahoo.com
+#.msdn.microsoft.com
+#.redhat.com
+
+# Say the site where you do your homebanking needs to open popup
+# windows, but you have chosen to kill popups uncoditionally by default.
+# This will allow it for your-example-bank.com:
+#
+{ -filter{all-popups} }
+.banking.example.com
+
+# Some hosts and some file types you may not want to filter for
+# various reasons:
+#
+{ -filter }
+
+# Technical documentation is likely to contain strings that might
+# erroneously get altered by the JavaScript-oriented filters:
+#
+#.tldp.org
+#/(.*/)?selfhtml/
+
+# And this stupid host sends streaming video with a wrong MIME type,
+# so that Privoxy thinks it is getting HTML and starts filtering:
+#
+stupid-server.example.com/
+
+
+# Example of a simple "block" action. Say you've seen an ad on your
+# favourite page on example.com that you want to get rid of. You have
+# right-clicked the image, selected "copy image location" and pasted
+# the URL below while removing the leading http://, into a { +block{reason} }
+# section. Note that { +handle-as-image } need not be specified, since
+# all URLs ending in .gif will be tagged as images by the general rules
+# as set in default.action anyway:
+#
+{ +block{Nasty ads.} }
+www.example.com/nasty-ads/sponsor.gif
+
+# The URLs of dynamically generated banners, especially from large banner
+# farms, often don't use the well-known image file name extensions, which
+# makes it impossible for Privoxy to guess the file type just by looking
+# at the URL.
+# You can use the +block-as-image alias defined above for these cases.
+# Note that objects which match this rule but then turn out NOT to be an
+# image are typically rendered as a "broken image" icon by the browser.
+# Use cautiously.
+#
+{ +block-as-image }
+#.doubleclick.net
+#/Realmedia/ads/
+#ar.atwola.com/
+
+# Now you noticed that the default configuration breaks Forbes
+# Magazine, but you were too lazy to find out which action is the
+# culprit, and you were again too lazy to give feedback, so you just
+# used the fragile alias on the site, and -- whoa! -- it worked. The
+# 'fragile' aliases disables those actions that are most likely to break
+# a site. Also, good for testing purposes to see if it is Privoxy that
+# is causing the problem or not.
+#
+{ fragile }
+#.forbes.com
+
+# Here are some sites we wish to support, and we will allow their ads
+# through.
+#
+{ allow-ads }
+#.sourceforge.net
+#.slashdot.org
+#.osdn.net
+
+# user.action is generally the best place to define exceptions and
+# additions to the default policies of default.action. Some actions are
+# safe to have their default policies set here though. So let's set a
+# default policy to have a 'blank' image as opposed to the checkerboard
+# pattern for ALL sites. '/' of course matches all URLs.
+# patterns:
+#
+{ +set-image-blocker{blank} }
+#/
+
+# Enable the following section (not the regression-test directives)
+# to rewrite and redirect click-tracking URLs on news.google.com.
+# Disabling JavaScript should work as well and probably works more reliably.
+#
+# Redirected URL = http://news.google.com/news/url?ct2=us%2F0_0_s_1_1_a&sa=t&usg=AFQjCNHJWPc7ffoSXPSqBRz55jDA0KgxOQ&cid=8797762374160&url=http%3A%2F%2Fonline.wsj.com%2Farticle%2FSB10001424052970204485304576640791304008536.html&ei=YcqeTsymCIjxggf8uQE&rt=HOMEPAGE&vm=STANDARD&bvm=section&did=-6537064229385238098
+# Redirect Destination = http://online.wsj.com/article/SB10001424052970204485304576640791304008536.html
+# Ignore = Yes
+#
+#{+fast-redirects{check-decoded-url}}
+#news.google.com/news/url.*&url=http.*&
+
+# Enable the following section (not the regression-test directives)
+# to block various Facebook "like" and similar tracking URLs. At the
+# time this section was added it was reported to not break Facebook
+# itself but this may have changed by the time you read this. This URL
+# list is probably incomplete and if you don't have an account anyway,
+# you may prefer to block the whole domain.
+#
+# Blocked URL = http://www.facebook.com/plugins/likebox.php?href=http%3A%2F%2Ffacebook.com%2Farstechnica&width=300&colorscheme=light&show_faces=false&stream=false&header=false&height=62&border_color=%23FFFFFF
+# Ignore = Yes
+# Blocked URL = http://www.facebook.com/plugins/activity.php?site=arstechnica.com&width=300&height=370&header=false&colorscheme=light&recommendations=false&border_color=%23FFFFFF
+# Ignore = Yes
+# Blocked URL = http://www.facebook.com/plugins/fan.php?api_key=368513495882&connections=10&height=250&id=8304333127&locale=en_US&sdk=joey&stream=false&width=377
+# Ignore = Yes
+# Blocked URL = http://www.facebook.com/plugins/like.php?api_key=368513495882&channel_url=http%3A%2F%2Fstatic.ak.fbcdn.net%2Fconnect%2Fxd_proxy.php%3Fversion%3D3%23cb%3Df13997452c%26origin%3Dhttp%253A%252F%252Fonline.wsj.com%252Ff1b037e354%26relation%3Dparent.parent%26transport%3Dpostmessage&extended_social_context=false&href=http%3A%2F%2Fonline.wsj.com%2Farticle%2FSB10001424052970204485304576640791304008536.html&layout=button_count&locale=en_US&node_type=link&ref=wsj_share_FB&sdk=joey&send=false&show_faces=false&width=90
+# Ignore = Yes
+#
+{+block{Facebook "like" and similar tracking URLs.}}
+www.facebook.com/(extern|plugins)/(login_status|like(box)?|activity|fan)\.php
+
+{{#if_webfilter_block_google}}
+{+block{Block all google}}
+.*google.*
+{{/if_webfilter_block_google}}
+
+{{#if_webfilter_block_facebook}}
+{+block{Block all facebook}}
+.*facebook.*
+{{/if_webfilter_block_facebook}}
+
+{{#if_webfilter_block_twitter}}
+{+block{Block all twitter}}
+.*twitter.*
+{{/if_webfilter_block_twitter}}
+
+{{#if_webfilter_custom_rules}}
+{+block{Block custom rules}}
+{{#webfilter_custom_rules_text}}
+.*{{rule}}.*
+{{/webfilter_custom_rules_text}}
+{{/if_webfilter_custom_rules}}
--- /dev/null
+#!/opt/cfengine/bin/cf-agent --no-lock
+
+bundle common g
+{
+ vars:
+ "workdir" string => "/opt/enigmabox/cfengine-promises";
+ "network_profile" string => readfile("/etc/enigmabox/network-profile" , "33");
+}
+
+
+
+body common control
+{
+ inputs => {
+ "$(g.workdir)/lib/files.cf",
+ "$(g.workdir)/lib/services.cf",
+ "$(g.workdir)/system_base/bundle.cf",
+ };
+
+ bundlesequence => {
+ "system_base",
+ };
+}
+
+body perms script
+{
+ mode => "755";
+}
+
--- /dev/null
+############################################################################
+# Copyright (C) CFEngine AS
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License LGPL as published by the
+# Free Software Foundation; version 3.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# To the extent this program is licensed as part of the Enterprise
+# versions of CFEngine, the applicable Commercial Open Source License
+# (COSL) may apply to this file if you as a licensee so wish it. See
+# included file COSL.txt.
+###########################################################################
+#
+# CFEngine Community Open Promise-Body Library
+#
+# This initiative started by CFEngine promotes a
+# standardized set of names and promise specifications
+# for template functionality within CFEngine 3.
+#
+# The aim is to promote an industry standard for
+# naming of configuration patterns, leading to a
+# de facto middleware of standardized syntax.
+#
+# Names should be intuitive and parameters should be
+# minimal to assist readability and comprehensibility.
+
+# Contributions to this file are voluntarily given to
+# the cfengine community, and are moderated by CFEngine.
+# No liability or warranty for misuse is implied.
+#
+# If you add to this file, please try to make the
+# contributions "self-documenting". Comments made
+# after the bundle/body statement are retained in
+# the online docs
+#
+
+# For CFEngine Core: 3.6.0 to 3.6.x
+# Bundles
+
+###################################################
+# If you find CFEngine useful, please consider #
+# purchasing a commercial version of the software.#
+###################################################
+
+bundle common bundles_common
+# @ignore
+{
+ vars:
+ "inputs" slist => { "$(this.promise_dirname)/files.cf" };
+}
+
+body file control
+# @ignore
+{
+ inputs => { @(bundles_common.inputs) };
+}
+
+###################################################
+# agent bundles
+###################################################
+
+bundle agent cronjob(commands,user,hours,mins)
+# @brief Defines a cron job for `user`
+#
+# Adds a line to crontab, if necessary.
+#
+# @param commands The commands that should be run
+# @param user The owner of crontab
+# @param hours The hours at which the job should run
+# @param mins The minutes at which the job should run
+#
+# **Example:**
+#
+# ```cf3
+# methods:
+# "cron" usebundle => cronjob("/bin/ls","mark","*","5,10");
+# ```
+{
+ vars:
+ SuSE::
+ "crontab" string => "/var/spool/cron/tabs";
+ redhat|fedora::
+ "crontab" string => "/var/spool/cron";
+ freebsd::
+ "crontab" string => "/var/cron/tabs";
+ !(SuSE|redhat|fedora|freebsd)::
+ "crontab" string => "/var/spool/cron/crontabs";
+
+ files:
+
+ !windows::
+ "$(crontab)/$(user)"
+
+ comment => "A user's regular batch jobs are added to this file",
+ create => "true",
+ edit_line => append_if_no_line("$(mins) $(hours) * * * $(commands)"),
+ perms => mo("600","$(user)"),
+ classes => if_repaired("changed_crontab");
+
+ processes:
+
+ changed_crontab::
+ "cron"
+ comment => "Most crons need to be huped after file changes",
+ signals => { "hup" };
+
+}
+
+bundle agent rm_rf(name)
+# @brief recursively remove `name` to any depth, including base
+# @depends rm_rf_depth
+# @param name the file or directory name
+#
+# This bundle will remove `name` to any depth, including `name` itself.
+#
+# **Example:**
+#
+# ```cf3
+# methods:
+# "bye" usebundle => rm_rf("/var/tmp/oldstuff");
+# ```
+{
+ methods:
+ "rm" usebundle => rm_rf_depth($(name),"inf");
+
+}
+
+bundle agent rm_rf_depth(name,depth)
+# @brief recursively remove `name` to depth `depth`, including base
+# @depends recurse_with_base tidy all
+# @param name the file or directory name
+# @param depth how far to descend
+#
+# This bundle will remove `name` to depth `depth`, including `name` itself.
+#
+# **Example:**
+#
+# ```cf3
+# methods:
+# "bye" usebundle => rm_rf_depth("/var/tmp/oldstuff", "100");
+# ```
+{
+ classes:
+ "isdir" expression => isdir($(name));
+ files:
+ isdir::
+ "$(name)"
+ file_select => all,
+ depth_search => recurse_with_base($(depth)),
+ delete => tidy;
+
+ !isdir::
+ "$(name)" delete => tidy;
+}
+
+bundle agent fileinfo(f)
+# @brief provide access to file stat fields from the bundle caller and report
+# file stat info for file "f" if "verbose_mode" class is defined
+# @param f file or files to stat
+#
+# **Example:**
+#
+# ```cf3
+# bundle agent example
+# {
+# vars:
+# "files" slist => { "/tmp/example1", "/tmp/example2" };
+#
+# files:
+# "$(files)"
+# create => "true",
+# classes => if_ok("verbose_mode"),
+# comment => "verbose_mode is defined because the fileinfo bundle restricts the report of the file info to verbose mode";
+#
+# "/tmp/example3"
+# create => "true",
+# classes => if_ok("verbose_mode"),
+# comment => "verbose_mode is defined because the fileinfo bundle restricts the report of the file info to verbose mode";
+#
+#
+# methods:
+# "fileinfo" usebundle => fileinfo( @(files) );
+# "fileinfo" usebundle => fileinfo( "/tmp/example3" );
+#
+# reports:
+# "$(this.bundle): $(files): $(fileinfo.fields) = '$(fileinfo.stat[$(files)][$(fileinfo.fields)])'";
+# "$(this.bundle): $(fileinfo.stat[/tmp/example3][size])";
+# "$(this.bundle): $(fileinfo.stat[/tmp/example3][gid])";
+# "$(this.bundle): $(fileinfo.stat[/tmp/example3][uid])";
+# "$(this.bundle): $(fileinfo.stat[/tmp/example3][ino])";
+# "$(this.bundle): $(fileinfo.stat[/tmp/example3][nlink])";
+# "$(this.bundle): $(fileinfo.stat[/tmp/example3][ctime])";
+# "$(this.bundle): $(fileinfo.stat[/tmp/example3][atime])";
+# "$(this.bundle): $(fileinfo.stat[/tmp/example3][mtime])";
+# "$(this.bundle): $(fileinfo.stat[/tmp/example3][mode])";
+# "$(this.bundle): $(fileinfo.stat[/tmp/example3][modeoct])";
+# "$(this.bundle): $(fileinfo.stat[/tmp/example3][permstr])";
+# "$(this.bundle): $(fileinfo.stat[/tmp/example3][permoct])";
+# "$(this.bundle): $(fileinfo.stat[/tmp/example3][type])";
+# "$(this.bundle): $(fileinfo.stat[/tmp/example3][devno])";
+# "$(this.bundle): $(fileinfo.stat[/tmp/example3][dev_minor])";
+# "$(this.bundle): $(fileinfo.stat[/tmp/example3][dev_major])";
+# "$(this.bundle): $(fileinfo.stat[/tmp/example3][basename])";
+# "$(this.bundle): $(fileinfo.stat[/tmp/example3][dirname])";
+# }
+# ```
+{
+ vars:
+ "fields" slist => splitstring("size,gid,uid,ino,nlink,ctime,atime,mtime,mode,modeoct,permstr,permoct,type,devno,dev_minor,dev_major,basename,dirname,linktarget,linktarget_shallow", ",", 999);
+
+ "stat[$(f)][$(fields)]" string => filestat($(f), $(fields));
+
+ reports:
+ verbose_mode::
+ "$(this.bundle): file $(f) has $(fields) = $(stat[$(f)][$(fields)])";
+}
+
+bundle agent logrotate(log_files, max_size, rotate_levels)
+# @brief rotate specified "log_files" larger than "max_size". Keep
+# "rotate_levels" versions of the files before overwriting the oldest one
+# @depends rotate
+# @depends bigger_than
+# @param log_files single file or list of files to evaluate for rotation
+# @param max_size minimum size in bytes that the file will grow to before being rotated
+# @param rotate_levels number of rotations to keep before overwriting the oldest one
+#
+# **Example:**
+#
+# ```cf3
+# bundle agent example
+# {
+# vars:
+# "logdirs" slist => { "/var/log/syslog", "/var/log/maillog"};
+#
+# methods:
+# "logrotate" usebundle => logrotate( @(logdirs), "1M", "2" );
+# "logrotate" usebundle => logrotate( "/var/log/mylog, "1", "5" );
+# "logrotate" usebundle => logrotate( "/var/log/alog, "500k", "7" );
+# }
+# ```
+{
+ files:
+ "$(log_files)"
+ comment => "Rotate file if above specified size",
+ rename => rotate("$(rotate_levels)"),
+ file_select => bigger_than("$(max_size)");
+}
+
+bundle agent probabilistic_usebundle(probability, bundlename)
+# @brief activate named bundle probabilistically
+# @param probability probability that the named bundle will be activated during
+# a given agent execution
+# @param bundlename the bundle to activate based on the probability
+#
+# **Example:**
+#
+# ```cf3
+# bundle agent example
+# {
+# methods:
+# "Toss Coin"
+# usebundle => probabilistic_usebundle("50", "heads"),
+# comment => "Call bundle heads ~ 50% of the time";
+#
+# "Trick Coin"
+# usebundle => probabilistic_usebundle("75", "heads"),
+# comment => "Call bundle heads ~ 75% of the time";
+# }
+# ```
+{
+ classes:
+ "fifty_fifty"
+ expression => strcmp("$(probability)", "50"),
+ comment => "We have to special case 50 because of the way dist classes
+ work you would always get 50 defined";
+ "not_fifty_fifty" expression => "!fifty_fifty";
+ "have_remainder" expression => isvariable("remainder");
+
+ fifty_fifty.have_remainder::
+ "activate_bundle"
+ dist => { "$(probability)000", "$(remainder)"};
+
+ not_fifty_fifty.have_remainder::
+ "activate_bundle"
+ dist => { "$(probability)", "$(remainder)"};
+
+ vars:
+ fifty_fifty::
+ "remainder"
+ string => format("%d", eval("((100 - $(probability)) * 1000) +1", "math", "infix"));
+
+ not_fifty_fifty::
+ "remainder"
+ string => format("%d", eval("100 - $(probability)", "math", "infix"));
+
+ methods:
+ fifty_fifty::
+ "Activate bundle probabilistically"
+ handle => "probabilistic_usebundle_methods_special_case_fifty_fifty_activate_bundle",
+ usebundle => $(bundlename),
+ ifvarclass => "activate_bundle_$(probability)000",
+ comment => "Activate $(bundlename) $(probability)%ish of the time";
+
+ not_fifty_fifty::
+ "Activate bundle probabilistically"
+ handle => "probabilistic_usebundle_methods_activate_bundle",
+ usebundle => $(bundlename),
+ ifvarclass => "activate_bundle_$(probability)",
+ comment => "Activate $(bundlename) $(probability)% of the time";
+
+
+ reports:
+ DEBUG.fifty_fifty::
+ "$(this.bundle) Special case for 50/50";
+
+ "$(this.bundle) activate_bundle_$(probability)000"
+ ifvarclass => "activate_bundle_$(probability)000";
+
+ "$(this.bundle) activate_bundle_$(probability)001"
+ ifvarclass => "activate_bundle_$(probability)001";
+}
+
+bundle agent prunedir(dir, max_days)
+# @brief delete plain files inside "dir" older than "max_days" (not recursively).
+# @depends tidy
+# @depends recurse
+# @depends filetype_older_than
+# @param dir directory to examine for files
+# @param max_days maximum number of days old a files mtime is allowed to before deletion
+#
+# **Example:**
+#
+# ```cf3
+# bundle agent example
+# {
+# vars:
+# "dirs" slist => { "/tmp/logs", "/tmp/logs2" };
+#
+# methods:
+# "prunedir" usebundle => prunedir( @(dirs), "1" );
+# }
+# ```
+{
+ files:
+ "$(dir)"
+ comment => "Delete plain files inside directory older than max_days",
+ delete => tidy,
+ file_select => filetype_older_than("plain", "$(max_days)"),
+ depth_search => recurse("1");
+}
+
+bundle agent tcdb_fix
+# @brief Optimize tcdb and repair tcdb corruption
+#
+# **Example**:
+# ```cf3
+# methods:
+# "Manage Tokyo Cabinet Corruption"
+# usebundle => tcdb_fix,
+# handle => "main_methods_tcdb_fix",
+# comment => "Optimize/Repair or regenerate corrupt tcdb files";
+# ```
+# This bundle works around corruption issues with Tokyo Cabinet database files
+# in CFEngine 3.5. Find all tcdb files in $(sys.workdir) and run tchmgr
+# optimize on them. If any invalid record headers are found we remove the
+# affected database so that it can be re-created. This occurs hourly based on
+# `splayclass` and assumes a 5 minute agent execution interval.
+{
+ vars:
+
+ linux::
+ "db" slist => splitstring( execresult("/usr/bin/find $(sys.workdir) -name '*.tcdb' 2>/dev/null", "useshell"), "\n", "1000");
+
+
+ classes:
+
+ # NOTE: assumes that CFEngine is set to run every 5 minutes
+ "hourly_class" expression => splayclass("$(sys.host)$(sys.ipv4)", "hourly");
+
+ hourly_class.linux::
+ "detected_invalid_record_$(db)" expression => returnszero("/var/cfengine/bin/tchmgr optimize $(db) 2>&1 | grep -q 'invalid record header'", "useshell");
+
+
+ commands:
+
+ "$(paths.rm)"
+ args => "-f $(db)",
+ ifvarclass => canonify("detected_invalid_record_$(db)"),
+ classes => scoped_classes_generic("bundle", "absent_$(db)"),
+ handle => "fix_tcdb_commands_detected_invalid_record_rm_$(db)",
+ comment => "Invalid record headers indicate that the database corruption is beyond repair. It will be automatically re-created.";
+
+
+ reports:
+
+ "$(this.bundle) $(sys.fqhost): Detected invalid record header in $(db) - tried to repair"
+ ifvarclass => canonify("detected_invalid_record_$(db)");
+
+ "$(this.bundle) $(sys.fqhost): Repair failed, removed corrupt database: $(db)"
+ ifvarclass => canonify("absent_$(db)_repaired");
+}
+
+bundle agent url_ping(host, method, port, uri)
+# @brief ping HOST:PORT/URI using METHOD
+# @param host the host name
+# @param method the HTTP method (HEAD or GET)
+# @param port the port number, e.g. 80
+# @param uri the URI, e.g. /path/to/resource
+#
+# This bundle will send a simple HTTP request and read 20 bytes back,
+# then compare them to `200 OK.*` (ignoring leading spaces).
+#
+# If the data matches, the global class "url_ok_HOST" will be set, where
+# HOST is the canonified host name, i.e. `canonify($(host))`
+#
+# **Example:**
+#
+# ```cf3
+# methods:
+# "check" usebundle => url_ping("cfengine.com", "HEAD", "80", "/bill/was/here");
+# reports:
+# url_ok_cfengine_com::
+# "CFEngine's web site is up";
+# url_not_ok_cfengine_com::
+# "CFEngine's web site *may* be down. Or you're offline.";
+# ```
+{
+ vars:
+ "url_check" string => readtcp($(host),
+ $(port),
+ "$(method) $(uri) HTTP/1.1$(const.r)$(const.n)Host:$(host)$(const.r)$(const.n)$(const.r)$(const.n)",
+ 20);
+
+ "chost" string => canonify($(host));
+
+ classes:
+ "url_ok_$(chost)"
+ scope => "namespace",
+ expression => regcmp("[^\n]*200 OK.*\n.*",
+ $(url_check));
+
+ "url_not_ok_$(chost)"
+ scope => "namespace",
+ not => regcmp("[^\n]*200 OK.*\n.*",
+ $(url_check));
+
+ reports:
+ verbose_mode::
+ "$(this.bundle): $(method) $(host):$(port)/$(uri) got 200 OK"
+ ifvarclass => "url_ok_$(chost)";
+ "$(this.bundle): $(method) $(host):$(port)/$(uri) did *not* get 200 OK"
+ ifvarclass => "url_not_ok_$(chost)";
+}
+
+bundle agent git_init(repo_path)
+# @brief initializes a new git repository if it does not already exist
+# @depends git
+# @param repo_path absolute path of where to initialize a git repository
+#
+# **Example:**
+#
+# ```cf3
+# bundle agent my_git_repositories
+# {
+# vars:
+# "basedir" string => "/var/git";
+# "repos" slist => { "myrepo", "myproject", "myPlugForMoreHaskell" };
+#
+# files:
+# "$(basedir)/$(repos)/."
+# create => "true";
+#
+# methods:
+# "git_init" usebundle => git_init("$(basedir)/$(repos)");
+# }
+# ```
+{
+ classes:
+ "ok_norepo" not => fileexists("$(repo_path)/.git");
+
+ methods:
+ ok_norepo::
+ "git_init" usebundle => git("$(repo_path)", "init", "");
+}
+
+bundle agent git_add(repo_path, file)
+# @brief adds files to the supplied repository's index
+# @depends git
+# @param repo_path absolute path to a git repository
+# @param file a file to stage in the index
+#
+# **Example:**
+#
+# ```cf3
+# bundle agent add_files_to_git_index
+# {
+# vars:
+# "repo" string => "/var/git/myrepo";
+# "files" slist => { "fileA", "fileB", "fileC" };
+#
+# methods:
+# "git_add" usebundle => git_add("$(repo)", "$(files)");
+# }
+# ```
+{
+ classes:
+ "ok_repo" expression => fileexists("$(repo_path)/.git");
+
+ methods:
+ ok_repo::
+ "git_add" usebundle => git("$(repo_path)", "add", "$(file)");
+}
+
+bundle agent git_checkout(repo_path, branch)
+# @brief checks out an existing branch in the supplied git repository
+# @depends git
+# @param repo_path absolute path to a git repository
+# @param branch the name of an existing git branch to checkout
+#
+# **Example:**
+#
+# ```cf3
+# bundle agent git_checkout_some_existing_branch
+# {
+# vars:
+# "repo" string => "/var/git/myrepo";
+# "branch" string => "dev/some-topic-branch";
+#
+# methods:
+# "git_checkout" usebundle => git_checkout("$(repo)", "$(branch)");
+# }
+# ```
+{
+ classes:
+ "ok_repo" expression => fileexists("$(repo_path)/.git");
+
+ methods:
+ ok_repo::
+ "git_checkout" usebundle => git("$(repo_path)", "checkout", "$(branch)");
+}
+
+bundle agent git_checkout_new_branch(repo_path, new_branch)
+# @brief checks out and creates a new branch in the supplied git repository
+# @depends git
+# @param repo_path absolute path to a git repository
+# @param new_branch the name of the git branch to create and checkout
+#
+# **Example:**
+#
+# ```cf3
+# bundle agent git_checkout_new_branches
+# {
+# vars:
+# "repo[myrepo]" string => "/var/git/myrepo";
+# "branch[myrepo]" string => "dev/some-new-topic-branch";
+#
+# "repo[myproject]" string => "/var/git/myproject";
+# "branch[myproject]" string => "dev/another-new-topic-branch";
+#
+# "repo_names" slist => getindices("repo");
+#
+# methods:
+# "git_checkout_new_branch" usebundle => git_checkout_new_branch("$(repo[$(repo_names)])", "$(branch[$(repo_names)])");
+# }
+# ```
+{
+ classes:
+ "ok_repo" expression => fileexists("$(repo_path)/.git");
+
+ methods:
+ ok_repo::
+ "git_checkout" usebundle => git("$(repo_path)", "checkout -b", "$(branch)");
+}
+
+bundle agent git_commit(repo_path, message)
+# @brief executes a commit to the specificed git repository
+# @depends git
+# @param repo_path absolute path to a git repository
+# @param message the message to associate to the commmit
+#
+# **Example:**
+#
+# ```cf3
+# bundle agent make_git_commit
+# {
+# vars:
+# "repo" string => "/var/git/myrepo";
+# "msg" string => "dituri added some bundles for common git operations";
+#
+# methods:
+# "git_commit" usebundle => git_commit("$(repo)", "$(msg)");
+# }
+# ```
+{
+ classes:
+ "ok_repo" expression => fileexists("$(repo_path)/.git");
+
+ methods:
+ ok_repo::
+ "git_commit" usebundle => git("$(repo_path)", "commit", '-m "$(message)"');
+}
+
+bundle agent git(repo_path, subcmd, args)
+# @brief generic interface to git
+# @param repo_path absolute path to a new or existing git repository
+# @param subcmd any valid git sub-command
+# @param args a single string of arguments to pass
+#
+# **Example:**
+#
+# ```cf3
+# bundle agent git_rm_files_from_staging
+# {
+# vars:
+# "repo" string => "/var/git/myrepo";
+# "git_cmd" string => "reset --soft";
+# "files" slist => { "fileA", "fileB", "fileC" };
+#
+# methods:
+# "git_reset" usebundle => git("$(repo)", "$(git_cmd)", "HEAD -- $(files)");
+# }
+# ```
+{
+ commands:
+ "$(paths.path[git])"
+ args => "$(subcmd) $(args)",
+ contain => in_dir("$(repo_path)");
+}
+
+bundle agent cmerge(varlist)
+# @brief bundle to merge many data containers into one
+# @param varlist a list of variable names (**MUST** be a list)
+#
+# The result will be in `cmerge.all`. You can also use
+# `cmerge.all_str` for a string version of the merged containers.
+#
+# If you merge a key-value map into an array or vice versa, the map
+# always wins. So this example will result in a key-value map even
+# though `cmerge.all` starts as an array.
+#
+# **Example:**
+#
+# ```cf3
+# bundle agent run
+# {
+# vars:
+# # the "mymerge" tag is user-defined
+# "a" data => parsejson('{ "a": "1" }'), meta => { "mymerge" };
+# "b" data => parsejson('{ "b": "2" }'), meta => { "mymerge" };
+# "c" data => parsejson('{ "c": "3" }'), meta => { "mymerge" };
+# "d" data => parsejson('{ "d": "4" }'), meta => { "mymerge" };
+#
+# # you can list them explicitly: "default:run.a" through "default:run.d"
+# "todo" slist => variablesmatching(".*", "mymerge");
+#
+# # you can use cmerge.all_str, but this is how you access the result
+# "merged_str" string => format("%S", "cmerge.all");
+#
+# methods:
+# "go" usebundle => cmerge(@(todo)); # a, b, c, d
+#
+# reports:
+# "merged = $(cmerge.all_str)";
+# }
+# ```
+{
+ vars:
+ "all" data => parsejson('[]'), policy => "free";
+ "all" data => mergedata(all, $(varlist)), policy => "free"; # iterates!
+ "all_str" string => format("%S", all), policy => "free";
+}
--- /dev/null
+############################################################################
+# Copyright (C) CFEngine AS
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License LGPL as published by the
+# Free Software Foundation; version 3.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# To the extent this program is licensed as part of the Enterprise
+# versions of CFEngine, the applicable Commercial Open Source License
+# (COSL) may apply to this file if you as a licensee so wish it. See
+# included file COSL.txt.
+###########################################################################
+#
+# CFEngine Community Open Promise-Body Library
+#
+# This initiative started by CFEngine promotes a
+# standardized set of names and promise specifications
+# for template functionality within CFEngine 3.
+#
+# The aim is to promote an industry standard for
+# naming of configuration patterns, leading to a
+# de facto middleware of standardized syntax.
+#
+# Names should be intuitive and parameters should be
+# minimal to assist readability and comprehensibility.
+
+# Contributions to this file are voluntarily given to
+# the cfengine community, and are moderated by CFEngine.
+# No liability or warranty for misuse is implied.
+#
+# If you add to this file, please try to make the
+# contributions "self-documenting". Comments made
+# after the bundle/body statement are retained in
+# the online docs
+#
+
+# For CFEngine Core: 3.6.0 to 3.6.x
+# Commands bodies
+
+###################################################
+# If you find CFEngine useful, please consider #
+# purchasing a commercial version of the software.#
+###################################################
+
+##-------------------------------------------------------
+## contain
+##-------------------------------------------------------
+
+body contain silent
+# @brief suppress command output
+{
+ no_output => "true";
+}
+
+##
+
+body contain in_dir(dir)
+# @brief run command after switching to directory "dir"
+# @param dir directory to change into
+#
+# **Example:**
+#
+# ```cf3
+# commands:
+# "/bin/pwd"
+# contain => in_dir("/tmp");
+# ```
+{
+ chdir => "$(dir)";
+}
+
+##
+
+body contain in_dir_shell(dir)
+# @brief run command after switching to directory "dir" with full shell
+# @param dir directory to change into
+#
+# **Example:**
+#
+# ```cf3
+# commands:
+# "/bin/pwd | /bin/cat"
+# contain => in_dir_shell("/tmp");
+# ```
+{
+ chdir => "$(dir)";
+ useshell => "true"; # canonical "useshell" but this is backwards-compatible
+}
+
+##
+
+body contain silent_in_dir(dir)
+# @brief run command after switching to directory and suppress output
+# @param dir directory to change into
+#
+# **Example:**
+#
+# ```cf3
+# "/bin/pwd"
+# contain => silent_in_dir("/tmp");
+# ```
+{
+ chdir => "$(dir)";
+ no_output => "true";
+}
+
+##
+
+body contain in_shell
+# @brief run command in shell
+#
+# **Example:**
+#
+# ```cf3
+# commands:
+# "/bin/pwd | /bin/cat"
+# contain => in_shell;
+# ```
+{
+ useshell => "true"; # canonical "useshell" but this is backwards-compatible
+}
+
+##
+
+body contain in_shell_bg
+# @brief deprecated
+# This bundle previously had an invalid background attribute that was caught by
+# parser strictness enhancements. Backgrounding is handeled by the body action
+# background attribute.
+{
+ useshell => "true"; # canonical "useshell" but this is backwards-compatible
+}
+
+##
+
+body contain in_shell_and_silent
+# @brief run command in shell and suppress output
+#
+# **Example:**
+#
+# ```cf3
+# commands:
+# "/bin/pwd | /bin/cat"
+# contain => in_shell_and_silent,
+# comment => "Silently run command in shell";
+# ```
+{
+ useshell => "true"; # canonical "useshell" but this is backwards-compatible
+ no_output => "true";
+}
+
+##
+
+body contain in_dir_shell_and_silent(dir)
+# @brief run command in shell after switching to 'dir' and suppress output
+# @param dir directory to change into
+#
+# **Example:**
+#
+# ```cf3
+# commands:
+# "/bin/pwd | /bin/cat"
+# contain => in_dir_shell_and_silent("/tmp"),
+# comment => "Silently run command in shell";
+# ```
+
+{
+ useshell => "true"; # canonical "useshell" but this is backwards-compatible
+ no_output => "true";
+ chdir => "$(dir)";
+}
+
+##
+
+body contain setuid(owner)
+# @brief run command as specified user
+# @param owner username or uid to run command as
+#
+# **Example:**
+#
+# ```cf3
+# commands:
+# "/usr/bin/id"
+# contain => setuid("apache");
+# "/usr/bin/id"
+# contain => setuid("503");
+# ```
+{
+ exec_owner => "$(owner)";
+}
+
+##
+
+body contain setuid_sh(owner)
+# @brief run command as specified user in shell
+# @param owner username or uid to run command as
+#
+# **Example:**
+#
+# ```cf3
+# commands:
+# "/usr/bin/id | /bin/cat"
+# contain => setuid("apache");
+# "/usr/bin/id | /bin/cat"
+# contain => setuid("503");
+# ```
+{
+ exec_owner => "$(owner)";
+ useshell => "true"; # canonical "useshell" but this is backwards-compatible
+}
+
+##
+
+body contain setuidgid_sh(owner,group)
+# @brief run command as specified owner and group in shell
+# @param owner username or uid to run command as
+# @param group groupname or gid to run command as
+{
+ exec_owner => "$(owner)";
+ exec_group => "$(group)";
+ useshell => "true"; # canonical "useshell" but this is backwards-compatible
+}
+
+##
+
+body contain jail(owner,jail_root,dir)
+# @brief run command as specified user in specified directory of jail
+# @param owner username or uid to run command as
+# @param jail_root path that will be the root directory for the process
+# @param dir directory to change to before running command (must be within 'jail_root')
+{
+ exec_owner => "$(owner)";
+ useshell => "true"; # canonical "useshell" but this is backwards-compatible
+ chdir => "$(dir)";
+ chroot => "$(jail_root)";
+}
+
+##
+
+body contain setuid_umask(owner, umask)
+# @brief run command as specified user with umask
+#
+#
+# | Valid Values | Umask | Octal (files) | Symbolic (files) | Octal (dirs) | Symbolic (dirs) |
+# |--------------|-------|-------|-------------|-------|-------------|
+# | `0` | `000` | `666` | `(rw-rw-rw-)` | `777` | `(rwxrwxrwx)` |
+# | `002` | `002` | `664` | `(rw-rw-r--)` | `775` | `(rwxrwxr-x)` |
+# | `22`, `022` | `022` | `644` | `(rw-r--r--)` | `755` | `(rwxr-xr-x)` |
+# | `27`, `027` | `027` | `640` | `(rw-r-----)` | `750` | `(rwxr-x---)` |
+# | `77`, `077` | `077` | `600` | `(rw-------)` | `700` | `(rwx------)` |
+# | `72`, `072` | `072` | `604` | `(rw----r--)` | `705` | `(rwx---r-x)` |
+#
+# @param owner username or uid to run command as
+# @param umask controls permissions of created files and directories
+#
+# **Example:**
+#
+# ```cf3
+# commands:
+# "/usr/bin/git pull"
+# contain => setuid_umask("git", "022");
+# ```
+{
+ exec_owner => "$(owner)";
+ umask => "$(umask)";
+}
+
+# temporarily adding the "_dc_" prefix
+body contain _dc_setuid_gid_umask(uid, gid, umask)
+# @brief run command as specified user with umask
+#
+#
+# | Valid Values | Umask | Octal (files) | Symbolic (files) | Octal (dirs) | Symbolic (dirs) |
+# |--------------|-------|-------|-------------|-------|-------------|
+# | `0` | `000` | `666` | `(rw-rw-rw-)` | `777` | `(rwxrwxrwx)` |
+# | `002` | `002` | `664` | `(rw-rw-r--)` | `775` | `(rwxrwxr-x)` |
+# | `22`, `022` | `022` | `644` | `(rw-r--r--)` | `755` | `(rwxr-xr-x)` |
+# | `27`, `027` | `027` | `640` | `(rw-r-----)` | `750` | `(rwxr-x---)` |
+# | `77`, `077` | `077` | `600` | `(rw-------)` | `700` | `(rwx------)` |
+# | `72`, `072` | `072` | `604` | `(rw----r--)` | `705` | `(rwx---r-x)` |
+#
+# @param uid username or uid to run command as
+# @param gid group name or gid to run command as
+# @param umask controls permissions of created files and directories
+#
+# **Example:**
+#
+# ```cf3
+# commands:
+# "/usr/bin/git pull"
+# contain => setuid_gid_umask("git", "minions", "022");
+# ```
+{
+ exec_owner => "$(uid)";
+ exec_group => "$(uid)";
+ umask => "$(umask)";
+}
--- /dev/null
+############################################################################
+# Copyright (C) CFEngine AS
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License LGPL as published by the
+# Free Software Foundation; version 3.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# To the extent this program is licensed as part of the Enterprise
+# versions of CFEngine, the applicable Commercial Open Source License
+# (COSL) may apply to this file if you as a licensee so wish it. See
+# included file COSL.txt.
+###########################################################################
+#
+# CFEngine Community Open Promise-Body Library
+#
+# This initiative started by CFEngine promotes a
+# standardized set of names and promise specifications
+# for template functionality within CFEngine 3.
+#
+# The aim is to promote an industry standard for
+# naming of configuration patterns, leading to a
+# de facto middleware of standardized syntax.
+#
+# Names should be intuitive and parameters should be
+# minimal to assist readability and comprehensibility.
+
+# Contributions to this file are voluntarily given to
+# the cfengine community, and are moderated by CFEngine.
+# No liability or warranty for misuse is implied.
+#
+# If you add to this file, please try to make the
+# contributions "self-documenting". Comments made
+# after the bundle/body statement are retained in
+# the online docs
+#
+
+# For CFEngine Core: 3.6.0 to 3.6.x
+# Common bodies
+
+###################################################
+# If you find CFEngine useful, please consider #
+# purchasing a commercial version of the software.#
+###################################################
+
+####################################################
+## agent bodyparts
+####################################################
+
+##-------------------------------------------------------
+## action
+##-------------------------------------------------------
+
+body action if_elapsed(x)
+# @brief Evaluate the promise every `x` minutes
+# @param x The time in minutes between promise evaluations
+{
+ ifelapsed => "$(x)";
+ expireafter => "$(x)";
+}
+
+##
+
+body action if_elapsed_day
+# @brief Evalute the promise once every 24 hours
+{
+ ifelapsed => "1440"; # 60 x 24
+ expireafter => "1400";
+}
+
+##
+
+body action measure_performance(x)
+# @brief Measure repairs of the promiser every `x` minutes
+#
+# Repair-attempts are cancelled after `x` minutes.
+#
+# @param x The time in minutes between promise evaluations.
+{
+ measurement_class => "Detect changes in $(this.promiser)";
+ ifelapsed => "$(x)";
+ expireafter => "$(x)";
+}
+
+##
+
+body action warn_only
+# @brief Warn once an hour if the promise needs to be repaired
+#
+# The promise does not get repaired.
+{
+ action_policy => "warn";
+ ifelapsed => "60";
+}
+
+##
+
+body action bg(elapsed,expire)
+# @brief Evaluate the promise in the background every `elapsed` minutes, for at most `expire` minutes
+# @param elapsed The time in minutes between promise evaluations
+# @param expire The time in minutes after which a repair-attempt gets cancelled
+{
+ ifelapsed => "$(elapsed)";
+ expireafter => "$(expire)";
+ background => "true";
+}
+
+##
+
+body action ifwin_bg
+# @brief Evaluate the promise in the background when running on Windows
+{
+ windows::
+ background => "true";
+}
+
+##
+
+body action immediate
+# @brief Evaluate the promise at every `cf-agent` execution.
+{
+ ifelapsed => "0";
+}
+
+##
+
+body action policy(p)
+# @brief Set the `action_policy` to `p`
+# @param p The action policy
+{
+ action_policy => "$(p)";
+}
+
+##
+
+body action log_repaired(log,message)
+# @brief Log `message` to a file `log`=[/file|stdout]
+# @param log The log file for repaired messages
+# @param message The log message
+{
+ log_string => "$(sys.date), $(message)";
+ log_repaired => "$(log)";
+}
+
+###
+
+body action log_verbose
+# @brief Sets the `log_level` attribute to "verbose"
+{
+ log_level => "verbose";
+}
+
+##
+
+body action sample_rate(x)
+# @brief Evaluate the promise every `x` minutes,
+# A repair-attempt is cancelled after 10 minutes
+# @param x The time in minutes between promise evaluation
+{
+ ifelapsed => "$(x)";
+ expireafter => "10";
+}
+
+##-------------------------------------------------------
+## classes
+##-------------------------------------------------------
+
+body classes if_repaired(x)
+# @brief Define class `x` if the promise has been repaired
+# @param x The name of the class
+{
+ promise_repaired => { "$(x)" };
+}
+
+##
+
+body classes if_else(yes,no)
+# @brief Define the classes `yes` or `no` depending on promise outcome
+# @param yes The name of the class that should be defined if the promise is kept or repaired
+# @param no The name of the class that should be defined if the promise could not be repaired
+{
+ promise_kept => { "$(yes)" };
+ promise_repaired => { "$(yes)" };
+ repair_failed => { "$(no)" };
+ repair_denied => { "$(no)" };
+ repair_timeout => { "$(no)" };
+}
+
+##
+
+body classes cf2_if_else(yes,no)
+# @brief Define the classes `yes` or `no`, depending on promise outcome
+#
+# A version of `if_else` that matches CFEngine2 semantics. Neither class is set if the promise
+# does not require any repair.
+#
+# @param yes The name of the class that should be defined if the promise is repaired
+# @param no The name of the class that should be defind if teh promise could not be repaired
+{
+ promise_repaired => { "$(yes)" };
+ repair_failed => { "$(no)" };
+ repair_denied => { "$(no)" };
+ repair_timeout => { "$(no)" };
+}
+
+##
+
+body classes if_notkept(x)
+# @brief Define the class `x` if the promise is not kept and cannot be repaired.
+# @param x The name of the class that should be defined
+{
+ repair_failed => { "$(x)" };
+ repair_denied => { "$(x)" };
+ repair_timeout => { "$(x)" };
+}
+
+##
+
+body classes if_ok(x)
+# @brief Define the class `x` if the promise is kept or could be repaired
+# @param x The name of the class that should be defined
+{
+ promise_repaired => { "$(x)" };
+ promise_kept => { "$(x)" };
+}
+
+##
+
+body classes if_ok_cancel(x)
+# @brief Cancel the class `x` if the promise ks kept or repaired
+# @param x The name of the class that should be cancelled
+{
+ cancel_repaired => { "$(x)" };
+ cancel_kept => { "$(x)" };
+}
+
+##
+
+body classes cmd_repair(code,cl)
+# @brief Define the class `cl` if an external command in a `commands`, `file` or `packages`
+# promise is executed with return code `code`
+# @param code The return codes that indicate a successful repair
+# @param cl The name of the class that should be defined
+#
+# **See also:** `repaired_returncodes`
+{
+ repaired_returncodes => { "$(code)" };
+ promise_repaired => { "$(cl)" };
+}
+
+body classes classes_generic(x)
+# @brief Define `x` prefixed/suffixed with promise outcome
+# @param x The unique part of the classes to be defined
+{
+ promise_repaired => { "promise_repaired_$(x)", "$(x)_repaired", "$(x)_ok", "$(x)_reached" };
+ repair_failed => { "repair_failed_$(x)", "$(x)_failed", "$(x)_not_ok", "$(x)_not_kept", "$(x)_not_repaired", "$(x)_reached" };
+ repair_denied => { "repair_denied_$(x)", "$(x)_denied", "$(x)_not_ok", "$(x)_not_kept", "$(x)_not_repaired", "$(x)_reached" };
+ repair_timeout => { "repair_timeout_$(x)", "$(x)_timeout", "$(x)_not_ok", "$(x)_not_kept", "$(x)_not_repaired", "$(x)_reached" };
+ promise_kept => { "promise_kept_$(x)", "$(x)_kept", "$(x)_ok", "$(x)_not_repaired", "$(x)_reached" };
+}
+
+body classes scoped_classes_generic(scope, x)
+# @brief Define `x` prefixed/suffixed with promise outcome
+# **See also:** `scope`
+#
+# @param scope The scope in which the class should be defined
+# @param x The unique part of the classes to be defined
+{
+ scope => "$(scope)";
+ promise_repaired => { "promise_repaired_$(x)", "$(x)_repaired", "$(x)_ok", "$(x)_reached" };
+ repair_failed => { "repair_failed_$(x)", "$(x)_failed", "$(x)_not_ok", "$(x)_not_kept", "$(x)_not_repaired", "$(x)_reached" };
+ repair_denied => { "repair_denied_$(x)", "$(x)_denied", "$(x)_not_ok", "$(x)_not_kept", "$(x)_not_repaired", "$(x)_reached" };
+ repair_timeout => { "repair_timeout_$(x)", "$(x)_timeout", "$(x)_not_ok", "$(x)_not_kept", "$(x)_not_repaired", "$(x)_reached" };
+ promise_kept => { "promise_kept_$(x)", "$(x)_kept", "$(x)_ok", "$(x)_not_repaired", "$(x)_reached" };
+}
+
+##-------------------------------------------------------
+## Persistent classes
+##-------------------------------------------------------
+
+body classes state_repaired(x)
+# @brief Define `x` for 10 minutes if the promise was repaired
+# @param x The name of the class that should be defined
+{
+ promise_repaired => { "$(x)" };
+ persist_time => "10";
+}
+
+##
+
+body classes enumerate(x)
+# @brief Define `x` for 15 minutes if the promise is either kept or repaired
+# This is used by commercial editions to count instances of jobs in a cluster
+# @param x The unqiue part of the class that should be defind
+# The class defined is prefixed with `mXC_`
+{
+ promise_repaired => { "mXC_$(x)" };
+ promise_kept => { "mXC_$(x)" };
+ persist_time => "15";
+}
+
+##
+
+body classes always(x)
+# @brief Define class `x` no matter what the outcome of the promise is
+# @param x The name of the class to be defined
+{
+ promise_repaired => { "$(x)" };
+ promise_kept => { "$(x)" };
+ repair_failed => { "$(x)" };
+ repair_denied => { "$(x)" };
+ repair_timeout => { "$(x)" };
+}
--- /dev/null
+############################################################################
+# Copyright (C) CFEngine AS
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License LGPL as published by the
+# Free Software Foundation; version 3.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# To the extent this program is licensed as part of the Enterprise
+# versions of CFEngine, the applicable Commercial Open Source License
+# (COSL) may apply to this file if you as a licensee so wish it. See
+# included file COSL.txt.
+###########################################################################
+#
+# CFEngine Community Open Promise-Body Library
+#
+# This initiative started by CFEngine promotes a
+# standardized set of names and promise specifications
+# for template functionality within CFEngine 3.
+#
+# The aim is to promote an industry standard for
+# naming of configuration patterns, leading to a
+# de facto middleware of standardized syntax.
+#
+# Names should be intuitive and parameters should be
+# minimal to assist readability and comprehensibility.
+
+# Contributions to this file are voluntarily given to
+# the cfengine community, and are moderated by CFEngine.
+# No liability or warranty for misuse is implied.
+#
+# If you add to this file, please try to make the
+# contributions "self-documenting". Comments made
+# after the bundle/body statement are retained in
+# the online docs
+#
+
+# For CFEngine Core: 3.6.0 to 3.6.x
+# Databases bodies
+
+###################################################
+# If you find CFEngine useful, please consider #
+# purchasing a commercial version of the software.#
+###################################################
+
+##-------------------------------------------------------
+## database promises
+##-------------------------------------------------------
+
+body database_server local_mysql(username, password)
+# @brief Defines a MySQL server running on localhost
+# @param username The username for the server connection
+# @param password The password for the server connection
+#
+# **See also:** `db_server_owner`, `db_server_password`
+{
+ db_server_owner => "$(username)";
+ db_server_password => "$(password)";
+ db_server_host => "localhost";
+ db_server_type => "mysql";
+ db_server_connection_db => "mysql";
+}
+
+##
+
+body database_server local_postgresql(username, password)
+# @brief Defines a PostgreSQL server running on localhost
+# @param username The username for the server connection
+# @param password The password for the server connection
+#
+# **See also:** `db_server_owner`, `db_server_password`
+{
+ db_server_owner => "$(username)";
+ db_server_password => "$(password)";
+ db_server_host => "localhost";
+ db_server_type => "postgres";
+ db_server_connection_db => "postgres";
+}
--- /dev/null
+bundle agent feature
+# @brief Finds feature_set_X and feature_unset_X classes and sets/unsets X persistently
+#
+# Finds all classes named `feature_unset_X` and clear class X.
+#
+# Finds all classes named `feature_set_DURATION_X` and sets class X
+# persistently for DURATION. DURATION can be any digits followed by
+# `k`, `m`, or `g`.
+#
+# In inform mode (`-I`) it will report what it does.
+#
+# **Example:**
+# Set class `xyz` for 10 minutes, class `qpr` for 100 minutes, and
+# `ijk` for 90m minutes. Unset class `abc`.
+# `cf-agent -I -f ./feature.cf -b feature -Dfeature_set_10_xyz,feature_set_100_qpr,feature_set_90m_ijk,feature_unset_abc`
+{
+ classes:
+ "parsed_$(on)" expression => regextract("feature_set_([0-9]+[kmgKMG]?)_(.*)",
+ $(on),
+ "extract_$(on)");
+
+ "parsed_$(off)" expression => regextract("feature_unset_(.*)",
+ $(off),
+ "extract_$(off)");
+
+ "$(extract_$(on)[2])" expression => "parsed_$(on)",
+ persistence => "$(extract_$(on)[1])";
+
+ vars:
+ "on" slist => classesmatching("feature_set_.*");
+ "off" slist => classesmatching("feature_unset_.*");
+
+ "_$(off)" string => "off", classes => feature_cancel("$(extract_$(off)[1])");
+
+ reports:
+ inform_mode::
+ "$(this.bundle): $(on) => SET class '$(extract_$(on)[2]) for '$(extract_$(on)[1])'"
+ ifvarclass => "parsed_$(on)";
+
+ "$(this.bundle): $(off) => UNSET class '$(extract_$(off)[1])'"
+ ifvarclass => "parsed_$(off)";
+
+ "$(this.bundle): have $(extract_$(on)[2])" ifvarclass => "$(extract_$(on)[2])";
+ "$(this.bundle): have no $(extract_$(on)[2])" ifvarclass => "!$(extract_$(on)[2])";
+
+ "$(this.bundle): have $(extract_$(off)[1])" ifvarclass => "$(extract_$(off)[1])";
+ "$(this.bundle): have no $(extract_$(off)[1])" ifvarclass => "!$(extract_$(off)[1])";
+}
+
+bundle agent feature_test
+# @brief Finds feature_set_X and feature_unset_X classes and reports X
+#
+# Note that this bundle is intended to be used exactly like `feature`
+# and just show what's defined or undefined.
+#
+# **Example:**
+# Check classes `xyz`, `qpr`, `ijk`, and `abc`.
+# `cf-agent -I -f ./feature.cf -b feature_test -Dfeature_set_10_xyz,feature_set_100_qpr,feature_set_90m_ijk,feature_unset_abc`
+{
+ classes:
+ "parsed_$(on)" expression => regextract("feature_set_([0-9]+[kmgKMG]?)_(.*)",
+ $(on),
+ "extract_$(on)");
+
+ "parsed_$(off)" expression => regextract("feature_unset_(.*)",
+ $(off),
+ "extract_$(off)");
+
+ vars:
+ "on" slist => classesmatching("feature_set_.*");
+ "off" slist => classesmatching("feature_unset_.*");
+
+ reports:
+ "$(this.bundle): have $(extract_$(on)[2])" ifvarclass => "$(extract_$(on)[2])";
+ "$(this.bundle): have no $(extract_$(on)[2])" ifvarclass => "!$(extract_$(on)[2])";
+
+ "$(this.bundle): have $(extract_$(off)[1])" ifvarclass => "$(extract_$(off)[1])";
+ "$(this.bundle): have no $(extract_$(off)[1])" ifvarclass => "!$(extract_$(off)[1])";
+}
+
+body classes feature_cancel(x)
+# @ignore
+# Used internally by bundle `feature`
+{
+ cancel_kept => { "$(x)" };
+ cancel_repaired => { "$(x)" };
+}
--- /dev/null
+############################################################################
+# Copyright (C) CFEngine AS
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License LGPL as published by the
+# Free Software Foundation; version 3.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# To the extent this program is licensed as part of the Enterprise
+# versions of CFEngine, the applicable Commercial Open Source License
+# (COSL) may apply to this file if you as a licensee so wish it. See
+# included file COSL.txt.
+###########################################################################
+#
+# CFEngine Community Open Promise-Body Library
+#
+# This initiative started by CFEngine promotes a
+# standardized set of names and promise specifications
+# for template functionality within CFEngine 3.
+#
+# The aim is to promote an industry standard for
+# naming of configuration patterns, leading to a
+# de facto middleware of standardized syntax.
+#
+# Names should be intuitive and parameters should be
+# minimal to assist readability and comprehensibility.
+
+# Contributions to this file are voluntarily given to
+# the cfengine community, and are moderated by CFEngine.
+# No liability or warranty for misuse is implied.
+#
+# If you add to this file, please try to make the
+# contributions "self-documenting". Comments made
+# after the bundle/body statement are retained in
+# the online docs
+#
+
+# For CFEngine Core: 3.6.0 to 3.6.x
+# Files bodies
+
+###################################################
+# If you find CFEngine useful, please consider #
+# purchasing a commercial version of the software.#
+###################################################
+
+bundle common files_common
+# @ignore
+{
+ vars:
+ "inputs" slist => { "$(this.promise_dirname)/common.cf" };
+}
+
+body file control
+# @ignore
+{
+ inputs => { @(files_common.inputs) };
+}
+
+###################################################
+# edit_line bundles
+###################################################
+
+bundle edit_line insert_lines(lines)
+# @brief Append `lines` if they don't exist in the file
+# @param lines The lines to be appended
+#
+# **See also:** [`insert_lines`][insert_lines] in
+# [`edit_line`][bundle edit_line]
+{
+ insert_lines:
+
+ "$(lines)"
+ comment => "Append lines if they don't exist";
+}
+
+##
+
+bundle edit_line insert_file(templatefile)
+# @brief Reads the lines from `templatefile` and inserts those into the
+# file being edited.
+# @param templatefile The name of the file from which to import lines.
+{
+ insert_lines:
+
+ "$(templatefile)"
+ comment => "Insert the template file into the file being edited",
+ insert_type => "file";
+}
+
+##
+
+bundle edit_line comment_lines_matching(regex,comment)
+# @brief Comment lines in the file that matching an [anchored] regex
+# @param regex Anchored regex that the entire line needs to match
+# @param comment A string that is prepended to matching lines
+{
+ replace_patterns:
+
+ "^($(regex))$"
+
+ replace_with => comment("$(comment)"),
+ comment => "Search and replace string";
+}
+
+##
+
+bundle edit_line uncomment_lines_matching(regex,comment)
+# @brief Uncomment lines of the file where the regex matches
+# the entire text after the comment string
+# @param regex The regex that lines need to match after `comment`
+# @param comment The prefix of the line that is removed
+{
+ replace_patterns:
+
+ "^$(comment)\s?($(regex))$"
+
+ replace_with => uncomment,
+ comment => "Uncomment lines matching a regular expression";
+}
+
+##
+
+bundle edit_line comment_lines_containing(regex,comment)
+# @brief Comment lines of the file matching a regex
+# @param regex A regex that a part of the line needs to match
+# @param comment A string that is prepended to matching lines
+{
+ replace_patterns:
+
+ "^((?!$(comment)).*$(regex).*)$"
+
+ replace_with => comment("$(comment)"),
+ comment => "Comment out lines in a file";
+}
+
+##
+
+bundle edit_line uncomment_lines_containing(regex,comment)
+# @brief Uncomment lines of the file where the regex matches
+# parts of the text after the comment string
+# @param regex The regex that lines need to match after `comment`
+# @param comment The prefix of the line that is removed
+{
+ replace_patterns:
+
+ "^$(comment)\s?(.*$(regex).*)$"
+
+ replace_with => uncomment,
+ comment => "Uncomment a line containing a fragment";
+}
+
+##
+
+bundle edit_line delete_lines_matching(regex)
+# @brief Delete lines matching a regular expression
+# @param regex The regular expression that the lines need to match
+{
+ delete_lines:
+
+ "$(regex)"
+
+ comment => "Delete lines matching regular expressions";
+}
+
+##
+
+bundle edit_line warn_lines_matching(regex)
+# @brief Warn about lines matching a regular expression
+# @param regex The regular expression that the lines need to match
+{
+ delete_lines:
+
+ "$(regex)"
+
+ comment => "Warn about lines in a file",
+ action => warn_only;
+}
+
+##
+
+# temporarily adding the "_dc_" prefix
+bundle edit_line _dc_prepend_if_no_line(string)
+# @brief Prepend `string` if it doesn't exist in the file
+# @param string The string to be prepended
+#
+# **See also:** [`insert_lines`][insert_lines] in
+# [`edit_line`][bundle edit_line]
+{
+ insert_lines:
+ "$(string)"
+ location => start,
+ comment => "Prepend a line to the file if it doesn't already exist";
+}
+
+bundle edit_line append_if_no_line(str)
+# @ignore
+# This duplicates the insert_lines bundle
+{
+ insert_lines:
+
+ "$(str)"
+
+ comment => "Append a line to the file if it doesn't already exist";
+}
+
+##
+
+bundle edit_line append_if_no_lines(list)
+# @ignore
+# This duplicates the insert_lines bundle
+{
+ insert_lines:
+
+ "$(list)"
+
+ comment => "Append lines to the file if they don't already exist";
+}
+
+##
+
+bundle edit_line replace_line_end(start,end)
+# @brief Give lines starting with `start` the ending given in `end`
+#
+# Whitespaces will be left unmodified. For example,
+# `replace_line_end("ftp", "2121/tcp")` would replace
+#
+# `"ftp 21/tcp"`
+#
+# with
+#
+# `"ftp 2121/tcp"`
+#
+# @param start The string lines have to start with
+# @param end The string lines should end with
+{
+ field_edits:
+
+ "\s*$(start)\s.*"
+ comment => "Replace lines with $(this.start) and $(this.end)",
+ edit_field => line("(^|\s)$(start)\s*", "2", "$(end)","set");
+}
+
+##
+
+bundle edit_line append_to_line_end(start,end)
+# @brief Append `end` to any lines beginning with `start`
+#
+# `end` will be appended to all lines starting with `start` and not
+# already ending with `end`. Whitespaces will be left unmodified.
+#
+# For example, `append_to_line_end("kernel", "vga=791")` would replace
+# `kernel /boot/vmlinuz root=/dev/sda7`
+#
+# with
+#
+# `kernel /boot/vmlinuz root=/dev/sda7 vga=791`
+#
+# **WARNING**: Be careful not to have multiple promises matching the same line, which would result in the line growing indefinitely.
+#
+# @param start pattern to match lines of interest
+# @param end string to append to matched lines
+#
+# **Example:**
+#
+# ```cf3
+# files:
+# "/tmp/boot-options" edit_line => append_to_line_end("kernel", "vga=791");
+# ```
+#
+{
+ field_edits:
+
+ "\s*$(start)\s.*"
+ comment => "Append lines with $(this.start) and $(this.end)",
+ edit_field => line("(^|\s)$(start)\s*", "2", "$(end)","append");
+}
+
+##
+
+bundle edit_line regex_replace(find,replace)
+# @brief Find exactly a regular expression and replace exactly the match with a string.
+# You can think of this like a PCRE powered sed.
+# @param find The regular expression
+# @param replace The replacement string
+{
+ replace_patterns:
+
+ "$(find)"
+ replace_with => value("$(replace)"),
+ comment => "Search and replace string";
+}
+
+##
+
+bundle edit_line resolvconf(search,list)
+# @brief Adds search domains and name servers to the system
+# resolver configuration.
+#
+# Use this bundle to modify `resolv.conf`. Existing entries for
+# `search` and `nameserver` are replaced.
+#
+# @param search The search domains with space
+# @param list An slist of nameserver addresses
+{
+ delete_lines:
+
+ "search.*" comment => "Reset search lines from resolver";
+ "nameserver.*" comment => "Reset nameservers in resolver";
+
+ insert_lines:
+
+ "search $(search)" comment => "Add search domains to resolver";
+ "nameserver $(list)" comment => "Add name servers to resolver";
+}
+
+##
+
+bundle edit_line resolvconf_o(search,list,options)
+# @brief Adds search domains, name servers and options to the system
+# resolver configuration.
+#
+# Use this bundle to modify `resolv.conf`. Existing entries for
+# `search`, `nameserver` and `options` are replaced.
+#
+# @param search The search domains with space
+# @param list An slist of nameserver addresses
+# @param options is an slist of variables to modify the resolver
+
+{
+ delete_lines:
+
+ "search.*" comment => "Reset search lines from resolver";
+ "nameserver.*" comment => "Reset nameservers in resolver";
+ "options.*" comment => "Reset options in resolver";
+
+ insert_lines:
+
+ "search $(search)" comment => "Add search domains to resolver";
+ "nameserver $(list)" comment => "Add name servers to resolver";
+ "options $(options)" comment => "Add options to resolver";
+}
+
+##
+
+bundle edit_line manage_variable_values_ini(tab, sectionName)
+# @brief Sets the RHS of configuration items in the file of the form
+# `LHS=RHS`
+#
+# If the line is commented out with `#`, it gets uncommented first.
+# Adds a new line if none exists.
+# Removes any variable value pairs not defined for the ini section.
+#
+# @param tab An associative array containing `tab[sectionName][LHS]="RHS"`.
+# The value is not changed when the `RHS` is "dontchange"
+# @param sectionName The section in the file within which values should be
+# modified
+#
+# **See also:** `set_variable_values_ini()`
+{
+ vars:
+ "index" slist => getindices("$(tab)[$(sectionName)]");
+
+ # Be careful if the index string contains funny chars
+ "cindex[$(index)]" string => canonify("$(index)");
+
+ classes:
+ "edit_$(cindex[$(index)])" not => strcmp("$($(tab)[$(sectionName)][$(index)])","dontchange"),
+ comment => "Create conditions to make changes";
+
+ field_edits:
+
+ # If the line is there, but commented out, first uncomment it
+ "#+\s*$(index)\s*=.*"
+ select_region => INI_section("$(sectionName)"),
+ edit_field => col("=","1","$(index)","set"),
+ ifvarclass => "edit_$(cindex[$(index)])";
+
+ # match a line starting like the key something
+ "$(index)\s*=.*"
+ edit_field => col("=","2","$($(tab)[$(sectionName)][$(index)])","set"),
+ select_region => INI_section("$(sectionName)"),
+ classes => if_ok("manage_variable_values_ini_not_$(cindex[$(index)])"),
+ ifvarclass => "edit_$(cindex[$(index)])";
+
+ delete_lines:
+ ".*"
+ select_region => INI_section("$(sectionName)"),
+ comment => "Remove all entries in the region so there are no extra entries";
+
+ insert_lines:
+ "[$(sectionName)]"
+ location => start,
+ comment => "Insert lines";
+
+ "$(index)=$($(tab)[$(sectionName)][$(index)])"
+ select_region => INI_section("$(sectionName)"),
+ ifvarclass => "!manage_variable_values_ini_not_$(cindex[$(index)]).edit_$(cindex[$(index)])";
+
+}
+
+##
+
+bundle edit_line set_variable_values_ini(tab, sectionName)
+# @brief Sets the RHS of configuration items in the file of the form
+# `LHS=RHS`
+#
+# If the line is commented out with `#`, it gets uncommented first.
+# Adds a new line if none exists.
+#
+# @param tab An associative array containing `tab[sectionName][LHS]="RHS"`.
+# The value is not changed when the `RHS` is "dontchange"
+# @param sectionName The section in the file within which values should be
+# modified
+#
+# **See also:** `set_variable_values_ini()`
+{
+ vars:
+ "index" slist => getindices("$(tab)[$(sectionName)]");
+
+ # Be careful if the index string contains funny chars
+ "cindex[$(index)]" string => canonify("$(index)");
+
+ classes:
+ "edit_$(cindex[$(index)])" not => strcmp("$($(tab)[$(sectionName)][$(index)])","dontchange"),
+ comment => "Create conditions to make changes";
+
+ field_edits:
+
+ # If the line is there, but commented out, first uncomment it
+ "#+\s*$(index)\s*=.*"
+ select_region => INI_section("$(sectionName)"),
+ edit_field => col("=","1","$(index)","set"),
+ ifvarclass => "edit_$(cindex[$(index)])";
+
+ # match a line starting like the key something
+ "$(index)\s*=.*"
+ edit_field => col("=","2","$($(tab)[$(sectionName)][$(index)])","set"),
+ select_region => INI_section("$(sectionName)"),
+ classes => if_ok("set_variable_values_ini_not_$(cindex[$(index)])"),
+ ifvarclass => "edit_$(cindex[$(index)])";
+
+ insert_lines:
+ "[$(sectionName)]"
+ location => start,
+ comment => "Insert lines";
+
+ "$(index)=$($(tab)[$(sectionName)][$(index)])"
+ select_region => INI_section("$(sectionName)"),
+ ifvarclass => "!set_variable_values_ini_not_$(cindex[$(index)]).edit_$(cindex[$(index)])";
+
+}
+
+# temporarily adding the "_dc_" prefix
+bundle edit_line _dc_insert_ini_section(name, config)
+# @brief Inserts a INI section with content
+#
+# ```
+# # given an array "barray"
+# files:
+# "myfile.ini" edit_line => insert_innit_section("foo", "barray");
+# ```
+#
+# Inserts a section in an INI file with the given configuration
+# key-values from the array `config`.
+#
+# @param name the name of the INI section
+# @param config The fully-qualified name of an associative array containing `v[LHS]="rhs"`
+{
+ vars:
+ "k" slist => getindices($(config));
+
+ insert_lines:
+ "[$(name)]"
+ location => start,
+ comment => "Prepend a line to the file if it doesn't already exist";
+
+ "$(k)=$($(config)[$(k)])";
+}
+
+
+bundle edit_line set_quoted_values(v)
+# @brief Sets the RHS of variables in shell-like files of the form:
+#
+# ```
+# LHS="RHS"
+# ```
+#
+# Adds a new line if no LHS exists, and replaces RHS values if one does exist.
+# If the line is commented out with #, it gets uncommented first.
+#
+# @param v The fully-qualified name of an associative array containing `v[LHS]="rhs"`
+#
+# **Example:**
+#
+# ```cf3
+# vars:
+# "stuff[lhs-1]" string => "rhs1";
+# "stuff[lhs-2]" string => "rhs2";
+#
+# files:
+# "myfile"
+# edit_line => set_quoted_values(stuff)
+# ```
+#
+# **See also:** `set_variable_values()`
+{
+ meta:
+ "tags"
+ slist =>
+ {
+ "deprecated=3.6.0",
+ "deprecation-reason=Generic reimplementation",
+ "replaced-by=set_line_based"
+ };
+
+ vars:
+ "index" slist => getindices("$(v)");
+ # Be careful if the index string contains funny chars
+
+ "cindex[$(index)]" string => canonify("$(index)");
+
+ field_edits:
+ # If the line is there, but commented out, first uncomment it
+ "#+\s*$(index)\s*=.*"
+ edit_field => col("=","1","$(index)","set");
+
+ # match a line starting like the key = something
+ "\s*$(index)\s*=.*"
+ edit_field => col("=","2",'"$($(v)[$(index)])"',"set"),
+ classes => if_ok("$(cindex[$(index)])_in_file"),
+ comment => "Match a line starting like key = something";
+
+ insert_lines:
+ '$(index)="$($(v)[$(index)])"'
+ comment => "Insert a variable definition",
+ ifvarclass => "!$(cindex[$(index)])_in_file";
+}
+
+##
+
+bundle edit_line set_variable_values(v)
+# @brief Sets the RHS of variables in files of the form:
+#
+# ```
+# LHS=RHS
+# ```
+#
+# Adds a new line if no LHS exists, and replaces RHS values if one does exist.
+# If the line is commented out with #, it gets uncommented first.
+#
+# @param v The fully-qualified name of an associative array containing `v[LHS]="rhs"`
+#
+# **Example:**
+#
+# ```cf3
+# vars:
+# "stuff[lhs-1]" string => "rhs1";
+# "stuff[lhs-2]" string => "rhs2";
+#
+# files:
+# "myfile"
+# edit_line => set_quoted_values(stuff)
+# ```
+#
+# **See also:** `set_quoted_values()`
+{
+ meta:
+ "tags"
+ slist =>
+ {
+ "deprecated=3.6.0",
+ "deprecation-reason=Generic reimplementation",
+ "replaced-by=set_line_based"
+ };
+
+ vars:
+
+ "index" slist => getindices("$(v)");
+
+ # Be careful if the index string contains funny chars
+
+ "cindex[$(index)]" string => canonify("$(index)");
+ "cv" string => canonify("$(v)");
+
+ field_edits:
+
+ # match a line starting like the key = something
+
+ "\s*$(index)\s*=.*"
+
+ edit_field => col("\s*$(index)\s*=","2","$($(v)[$(index)])","set"),
+ classes => if_ok("$(cv)_$(cindex[$(index)])_in_file"),
+ comment => "Match a line starting like key = something";
+
+ insert_lines:
+
+ "$(index)=$($(v)[$(index)])"
+
+ comment => "Insert a variable definition",
+ ifvarclass => "!$(cv)_$(cindex[$(index)])_in_file";
+}
+
+bundle edit_line set_config_values(v)
+# @brief Sets the RHS of configuration items in the file of the form:
+#
+# ```
+# LHS RHS
+# ```
+#
+# If the line is commented out with `#`, it gets uncommented first.
+#
+# Adds a new line if none exists.
+#
+# @param v The fully-qualified name of an associative array containing `v[LHS]="rhs"`
+{
+ meta:
+ "tags"
+ slist =>
+ {
+ "deprecated=3.6.0",
+ "deprecation-reason=Generic reimplementation",
+ "replaced-by=set_line_based"
+ };
+
+ vars:
+ "index" slist => getindices("$(v)");
+
+ # Be careful if the index string contains funny chars
+ "cindex[$(index)]" string => canonify("$(index)");
+
+ # Escape the value (had a problem with special characters and regex's)
+ "ev[$(index)]" string => escape("$($(v)[$(index)])");
+
+ # Do we have more than one line commented out?
+ "index_comment_matches_$(cindex[$(index)])" int => countlinesmatching("^\s*#\s*($(index)\s+.*|$(index))$","$(edit.filename)");
+
+
+ classes:
+ # Check to see if this line exists
+ "line_exists_$(cindex[$(index)])" expression => regline("^\s*($(index)\s.*|$(index))$","$(edit.filename)");
+
+ # if there's more than one comment, just add new (don't know who to use)
+ "multiple_comments_$(cindex[$(index)])" expression => isgreaterthan("$(index_comment_matches_$(cindex[$(index)]))","1");
+
+
+ replace_patterns:
+ # If the line is commented out, uncomment and replace with
+ # the correct value
+ "^\s*#\s*($(index)\s+.*|$(index))$"
+ comment => "Uncommented the value $(index)",
+ replace_with => value("$(index) $($(v)[$(index)])"),
+ ifvarclass => "!line_exists_$(cindex[$(index)]).!replace_attempted_$(cindex[$(index)]).!multiple_comments_$(cindex[$(index)])",
+ classes => always("uncommented_$(cindex[$(index)])");
+
+ # If the line is there with the wrong value, replace with
+ # the correct value
+ "^\s*($(index)\s+(?!$(ev[$(index)])$).*|$(index))$"
+ comment => "Correct the value $(index)",
+ replace_with => value("$(index) $($(v)[$(index)])"),
+ classes => always("replace_attempted_$(cindex[$(index)])");
+
+ insert_lines:
+ # If the line doesn't exist, or there is more than one occurance
+ # of the LHS commented out, insert a new line and try to place it
+ # after the commented LHS (keep new line with old comments)
+ "$(index) $($(v)[$(index)])"
+ comment => "Insert the value, marker exists $(index)",
+ location => after("^\s*#\s*($(index)\s+.*|$(index))$"),
+ ifvarclass => "replace_attempted_$(cindex[$(index)]).multiple_comments_$(cindex[$(index)])";
+
+ # If the line doesn't exist and there are no occurances
+ # of the LHS commented out, insert a new line at the eof
+ "$(index) $($(v)[$(index)])"
+ comment => "Insert the value, marker doesn't exist $(index)",
+ ifvarclass => "replace_attempted_$(cindex[$(index)]).!multiple_comments_$(cindex[$(index)])";
+
+}
+
+bundle edit_line set_line_based(v, sep, bp, kp, cp)
+# @brief Sets the RHS of configuration items in the file of the form:
+#
+# ```
+# LHS$(sep)RHS
+# ```
+#
+# Example usage for `x=y` lines (e.g. rsyncd.conf):
+#
+# ```cf3
+# "myfile"
+# edit_line => set_line_based("test.config", "=", "\s*=\s*", ".*", "\s*#\s*");
+# ```
+#
+# Example usage for `x y` lines (e.g. sshd_config):
+#
+# ```cf3
+# "myfile"
+# edit_line => set_line_based("test.config", " ", "\s+", ".*", "\s*#\s*");
+# ```
+#
+# If the line is commented out with `$(cp)`, it gets uncommented first.
+#
+# Adds a new line if none exists or if more than one commented-out
+# possible matches exist.
+#
+# Originally `set_config_values` by Ed King.
+#
+# @param v The fully-qualified name of an associative array containing `v[LHS]="rhs"`
+# @param sep The separator to insert, e.g. ` ` for space-separated
+# @param bp The key-value separation regex, e.g. `\s+` for space-separated
+# @param kp The keys to select from v, use `.*` for all
+# @param cp The comment pattern from line-start, e.g. `\s*#\s*`
+{
+ meta:
+ "tags"
+ slist =>
+ {
+ "replaces=set_config_values",
+ "replaces=set_config_values_matching",
+ "replaces=set_variable_values",
+ "replaces=set_quoted_values",
+ "replaces=maintain_key_values",
+ };
+
+ vars:
+ "vkeys" slist => getindices("$(v)");
+ "i" slist => grep($(kp), vkeys);
+
+ # Be careful if the index string contains funny chars
+ "ci[$(i)]" string => canonify("$(i)");
+
+ # Escape the value (had a problem with special characters and regex's)
+ "ev[$(i)]" string => escape("$($(v)[$(i)])");
+
+ # Do we have more than one line commented out?
+ "comment_matches_$(ci[$(i)])"
+ int => countlinesmatching("^$(cp)($(i)$(bp).*|$(i))$",
+ $(edit.filename));
+
+
+ classes:
+ # Check to see if this line exists
+ "exists_$(ci[$(i)])"
+ expression => regline("^\s*($(i)$(bp).*|$(i))$",
+ $(edit.filename));
+
+ # if there's more than one comment, just add new (don't know who to use)
+ "multiple_comments_$(ci[$(i)])"
+ expression => isgreaterthan("$(comment_matches_$(ci[$(i)]))",
+ "1");
+
+
+ replace_patterns:
+ # If the line is commented out, uncomment and replace with
+ # the correct value
+ "^$(cp)($(i)$(bp).*|$(i))$"
+ comment => "Uncommented the value $(i)",
+ replace_with => value("$(i)$(sep)$($(v)[$(i)])"),
+ ifvarclass => "!exists_$(ci[$(i)]).!replace_attempted_$(ci[$(i)]).!multiple_comments_$(ci[$(i)])",
+ classes => always("uncommented_$(ci[$(i)])");
+
+ # If the line is there with the wrong value, replace with
+ # the correct value
+ "^\s*($(i)$(bp)(?!$(ev[$(i)])$).*|$(i))$"
+ comment => "Correct the value $(i)",
+ replace_with => value("$(i)$(sep)$($(v)[$(i)])"),
+ classes => always("replace_attempted_$(ci[$(i)])");
+
+ insert_lines:
+ # If the line doesn't exist, or there is more than one occurance
+ # of the LHS commented out, insert a new line and try to place it
+ # after the commented LHS (keep new line with old comments)
+ "$(i)$(sep)$($(v)[$(i)])"
+ comment => "Insert the value, marker exists $(i)",
+ location => after("^$(cp)($(i)$(bp).*|$(i))$"),
+ ifvarclass => "replace_attempted_$(ci[$(i)]).multiple_comments_$(ci[$(i)])";
+
+ # If the line doesn't exist and there are no occurances
+ # of the LHS commented out, insert a new line at the eof
+ "$(i)$(sep)$($(v)[$(i)])"
+ comment => "Insert the value, marker doesn't exist $(i)",
+ ifvarclass => "replace_attempted_$(ci[$(i)]).!multiple_comments_$(ci[$(i)])";
+}
+
+bundle edit_line set_config_values_matching(v,pat)
+# @brief Sets the RHS of configuration items in the file of the form
+#
+# ```
+# LHS RHS
+# ```
+#
+# If the line is commented out with `#`, it gets uncommented first.
+# Adds a new line if none exists.
+#
+# @param v the fully-qualified name of an associative array containing v[LHS]="rhs"
+# @param pat Only elements of `v` that match the regex `pat` are use
+{
+ meta:
+ "tags"
+ slist =>
+ {
+ "deprecated=3.6.0",
+ "deprecation-reason=Generic reimplementation",
+ "replaced-by=set_line_based"
+ };
+
+ vars:
+ "allparams" slist => getindices("$(v)");
+ "index" slist => grep("$(pat)", "allparams");
+
+ # Be careful if the index string contains funny chars
+ "cindex[$(index)]" string => canonify("$(index)");
+
+ replace_patterns:
+ # If the line is there, maybe commented out, uncomment and replace with
+ # the correct value
+ "^\s*($(index)\s+(?!$($(v)[$(index)])).*|# ?$(index)\s+.*)$"
+ comment => "Correct the value",
+ replace_with => value("$(index) $($(v)[$(index)])"),
+ classes => always("replace_attempted_$(cindex[$(index)])");
+
+ insert_lines:
+ "$(index) $($(v)[$(index)])"
+ ifvarclass => "replace_attempted_$(cindex[$(index)])";
+
+}
+
+##
+
+bundle edit_line maintain_key_values(v,sep)
+# @ignore
+# @brief Sets the RHS of configuration items with an giving separator
+#
+# Contributed by David Lee
+{
+ meta:
+ "tags"
+ slist =>
+ {
+ "deprecated=3.6.0",
+ "deprecation-reason=Generic reimplementation",
+ "replaced-by=set_line_based"
+ };
+
+ vars:
+ "index" slist => getindices("$(v)");
+ # Be careful if the index string contains funny chars
+ "cindex[$(index)]" string => canonify("$(index)");
+ # Matching pattern for line (basically key-and-separator)
+ "keypat[$(index)]" string => "\s*$(index)\s*$(sep)\s*";
+
+ # Values may contain regexps. Escape them for replace_pattern matching.
+ "ve[$(index)]" string => escape("$($(v)[$(index)])");
+
+ classes:
+ "$(cindex[$(index)])_key_in_file"
+ comment => "Dynamic Class created if patterns matching",
+ expression => regline("^$(keypat[$(index)]).*", "$(edit.filename)");
+
+ replace_patterns:
+ # For convergence need to use negative lookahead on value:
+ # "key sep (?!value).*"
+ "^($(keypat[$(index)]))(?!$(ve[$(index)])$).*"
+ comment => "Replace definition of $(index)",
+ replace_with => value("$(match.1)$($(v)[$(index)])");
+
+ insert_lines:
+ "$(index)$(sep)$($(v)[$(index)])"
+ comment => "Insert definition of $(index)",
+ ifvarclass => "!$(cindex[$(index)])_key_in_file";
+}
+
+##
+
+bundle edit_line append_users_starting(v)
+# @brief For adding to `/etc/passwd` or `etc/shadow`
+# @param v An array `v[username] string => "line..."`
+#
+# **Note:** To manage local users with CFEngine 3.6 and later,
+# consider making `users` promises instead of modifying system files.
+{
+ vars:
+
+ "index" slist => getindices("$(v)");
+
+ classes:
+
+ "add_$(index)" not => userexists("$(index)"),
+ comment => "Class created if user does not exist";
+
+ insert_lines:
+
+ "$($(v)[$(index)])"
+
+ comment => "Append users into a password file format",
+ ifvarclass => "add_$(index)";
+}
+
+##
+
+bundle edit_line append_groups_starting(v)
+# @brief For adding groups to `/etc/group`
+# @param v An array `v[groupname] string => "line..."`
+#
+# **Note:** To manage local users with CFEngine 3.6 and later,
+# consider making `users` promises instead of modifying system files.
+{
+ vars:
+
+ "index" slist => getindices("$(v)");
+
+ classes:
+
+ "add_$(index)" not => groupexists("$(index)"),
+ comment => "Class created if group does not exist";
+
+ insert_lines:
+
+ "$($(v)[$(index)])"
+
+ comment => "Append users into a group file format",
+ ifvarclass => "add_$(index)";
+
+}
+
+##
+
+bundle edit_line set_colon_field(key,field,val)
+# @brief Set the value of field number `field` of the line whose
+# first field is `key` to the value `val`, in a colon-separated file.
+# @param key The value the first field has to match
+# @param field The field to be modified
+# @param val The new value of `field`
+{
+ field_edits:
+
+ "$(key):.*"
+
+ comment => "Edit a colon-separated file, using the first field as a key",
+ edit_field => col(":","$(field)","$(val)","set");
+}
+
+##
+
+bundle edit_line set_user_field(user,field,val)
+# @brief Set the value of field number "field" in a `:-field`
+# formatted file like `/etc/passwd`
+# @param user The user to be modified
+# @param field The field that should be modified
+# @param val THe value for `field`
+#
+# **Note:** To manage local users with CFEngine 3.6 and later,
+# consider making `users` promises instead of modifying system files.
+{
+ field_edits:
+
+ "$(user):.*"
+
+ comment => "Edit a user attribute in the password file",
+ edit_field => col(":","$(field)","$(val)","set");
+}
+
+##
+
+bundle edit_line append_user_field(group,field,allusers)
+# @brief For adding users to to a file like `/etc/group`
+# at field position `field`, comma separated subfields
+# @param group The group to be modified
+# @param field The field where users should be added
+# @param allusers The list of users to add to `field`
+#
+# **Note:** To manage local users with CFEngine 3.6 and later,
+# consider making `users` promises instead of modifying system files.
+{
+ vars:
+
+ "val" slist => { @(allusers) };
+
+ field_edits:
+
+ "$(group):.*"
+
+ comment => "Append users into a password file format",
+ edit_field => col(":","$(field)","$(val)","alphanum");
+}
+
+##
+
+bundle edit_line expand_template(templatefile)
+# @brief Read in the named text file and expand `$(var)` inside the file
+# @param templatefile The name of the file
+{
+ insert_lines:
+
+ "$(templatefile)"
+
+ insert_type => "file",
+ comment => "Expand variables in the template file",
+ expand_scalars => "true";
+}
+
+bundle edit_line replace_or_add(pattern,line)
+# @brief Replace a pattern in a file with a single line.
+#
+# If the pattern is not found, add the line to the file.
+#
+# @param pattern The pattern that should be replaced
+# The pattern must match the whole line (it is automatically
+# anchored to the start and end of the line) to avoid
+# ambiguity.
+# @param line The line with which to replace matches of `pattern`
+{
+ vars:
+ "cline" string => canonify("$(line)");
+ "eline" string => escape("$(line)");
+
+ replace_patterns:
+ "^(?!$(eline)$)$(pattern)$"
+ comment => "Replace a pattern here",
+ replace_with => value("$(line)"),
+ classes => always("replace_done_$(cline)");
+
+ insert_lines:
+ "$(line)"
+ ifvarclass => "replace_done_$(cline)";
+}
+
+# temporarily adding the "_dc_" prefix
+bundle edit_line _dc_converge(marker, lines)
+# @brief Converge `lines` marked with `marker`
+#
+# Any content marked with `marker` is removed, then `lines` are
+# inserted. Every `line` should contain `marker`.
+#
+# @param marker The marker (not a regular expression; will be escaped)
+# @param lines The lines to insert; all must contain `marker`
+{
+ vars:
+ "regex" string => escape($(marker));
+
+ delete_lines:
+ "$(regex)" comment => "Delete lines matching the marker";
+ insert_lines:
+ "$(lines)" comment => "Insert the given lines";
+}
+
+bundle edit_line fstab_option_editor(method, mount, option)
+# @brief Add or remove `/etc/fstab` options for a mount
+#
+# This bundle edits the options field of a mount. The `method` is a
+# `field_operation` which can be `append`, `prepend`, `set`, `delete`,
+# or `alphanum`. The option is OS-specific.
+#
+# @param method `field_operation` to apply
+# @param mount the mount point
+# @param option the option to add or remove
+#
+# **Example:**
+#
+# ```cf3
+# files:
+# "/etc/fstab" edit_line => fstab_option_editor("delete", "/", "acl");
+# "/etc/fstab" edit_line => fstab_option_editor("append", "/", "acl");
+# ```
+{
+ field_edits:
+ "(?!#)\S+\s+$(mount)\s.+"
+ edit_field => fstab_options($(option), $(method));
+}
+
+###################################################
+# edit_xml bundles
+###################################################
+
+bundle edit_xml xml_insert_tree_nopath(treestring)
+# @brief Insert XML tree with no path
+#
+# This `edit_xml` bundle inserts the given XML tree. Use with an
+# empty XML document.
+#
+# @param treestring The XML tree, as a string
+#
+# **Example:**
+#
+# ```cf3
+# files:
+# "/newfile" edit_xml => xml_insert_tree_nopath('<x>y</x>');
+# ```
+{
+ insert_tree:
+ '$(treestring)';
+}
+
+bundle edit_xml xml_insert_tree(treestring, xpath)
+# @brief Insert XML tree at the given XPath
+#
+# This `edit_xml` bundle inserts the given XML tree at a specific
+# XPath. Uses `insert_tree`.
+#
+# @param treestring The XML tree, as a string
+# @param xpath A valid XPath string
+#
+# **Example:**
+#
+# ```cf3
+# files:
+# "/file.xml" edit_xml => xml_insert_tree('<x>y</x>', '/a/b/c');
+# ```
+{
+ insert_tree:
+ '$(treestring)';
+}
+
+bundle edit_xml xml_set_value(value, xpath)
+# @brief Sets or replaces a value in XML at the given XPath
+#
+# This `edit_xml` bundle sets or replaces the value at a specific
+# XPath with the given value. Uses `set_text`.
+#
+# @param value The new value
+# @param xpath A valid XPath string
+#
+# **Example:**
+#
+# ```cf3
+# files:
+# "/file.xml" edit_xml => xml_set_value('hello', '/a/b/c');
+# ```
+{
+ set_text:
+ "$(value)"
+ select_xpath => "$(xpath)";
+}
+
+bundle edit_xml xml_set_attribute(attr, value, xpath)
+# @brief Sets or replaces an attribute in XML at the given XPath
+#
+# This `edit_xml` bundle sets or replaces an attribute at a specific
+# XPath with the given value. Uses `set_attribute`.
+#
+# @param attr The attribute name
+# @param value The new attribute value
+# @param xpath A valid XPath string
+#
+# **Example:**
+#
+# ```cf3
+# files:
+# "/file.xml" edit_xml => xml_set_attribute('parameter', 'ha', '/a/b/c');
+# ```
+{
+ set_attribute:
+ "$(attr)"
+ attribute_value => "$(value)",
+ select_xpath => "$(xpath)";
+
+}
+
+##-------------------------------------------------------
+## editing bodies
+##-------------------------------------------------------
+
+body edit_field fstab_options(newval, method)
+# @brief Edit the options field in a fstab format
+# @param newval the new option
+# @param method `field_operation` to apply
+#
+# This body edits the options field in the fstab file format. The
+# `method` is a `field_operation` which can be `append`, `prepend`,
+# `set`, `delete`, or `alphanum`. The `newval` option is OS-specific.
+#
+# **Example:**
+#
+# ```cf3
+# # from the `fstab_options_editor`
+# field_edits:
+# "(?!#)\S+\s+$(mount)\s.+"
+# edit_field => fstab_options($(option), $(method));
+# ```
+{
+ field_separator => "\s+";
+ select_field => "4";
+ value_separator => ",";
+ field_value => "$(newval)";
+ field_operation => "$(method)";
+}
+
+body edit_field quoted_var(newval,method)
+# @brief Edit the quoted value of the matching line
+# @param newval The new value
+# @param method The method by which to edit the field
+{
+ field_separator => "\"";
+ select_field => "2";
+ value_separator => " ";
+ field_value => "$(newval)";
+ field_operation => "$(method)";
+ extend_fields => "false";
+ allow_blank_fields => "true";
+}
+
+##
+
+body edit_field col(split,col,newval,method)
+# @brief Edit tabluar data with comma-separated sub-values
+# @param split The separator that defines columns
+# @param col The (1-based) index of the value to change
+# @param newval The new value
+# @param method The method by which to edit the field
+{
+ field_separator => "$(split)";
+ select_field => "$(col)";
+ value_separator => ",";
+ field_value => "$(newval)";
+ field_operation => "$(method)";
+ extend_fields => "true";
+ allow_blank_fields => "true";
+}
+
+##
+
+body edit_field line(split,col,newval,method)
+# @brief Edit tabular data with space-separated sub-values
+# @param split The separator that defines columns
+# @param col The (1-based) index of the value to change
+# @param newval The new value
+# @param method The method by which to edit the field
+{
+ field_separator => "$(split)";
+ select_field => "$(col)";
+ value_separator => " ";
+ field_value => "$(newval)";
+ field_operation => "$(method)";
+ extend_fields => "true";
+ allow_blank_fields => "true";
+}
+
+##
+
+body replace_with value(x)
+# @brief Replace matching lines
+# @param x The replacement string
+{
+ replace_value => "$(x)";
+ occurrences => "all";
+}
+
+##
+
+body select_region INI_section(x)
+# @brief Restrict the `edit_line` promise to the lines in section `[x]`
+# @param x The name of the section in an INI-like configuration file
+{
+ select_start => "\[$(x)\]\s*";
+ select_end => "\[.*\]\s*";
+}
+
+##-------------------------------------------------------
+## edit_defaults
+##-------------------------------------------------------
+
+body edit_defaults std_defs
+# @brief Standard definitions for `edit_defaults`
+# Don't empty the file before editing starts and don't make a backup.
+{
+ empty_file_before_editing => "false";
+ edit_backup => "false";
+ #max_file_size => "300000";
+}
+
+##
+
+body edit_defaults empty
+# @brief Empty the file before editing
+#
+# No backup is made
+{
+ empty_file_before_editing => "true";
+ edit_backup => "false";
+ #max_file_size => "300000";
+}
+
+##
+
+body edit_defaults no_backup
+# @brief Don't make a backup of the file before editing
+{
+ edit_backup => "false";
+}
+
+##
+
+body edit_defaults backup_timestamp
+# @brief Make a timestamped backup of the file before editing
+{
+ empty_file_before_editing => "false";
+ edit_backup => "timestamp";
+ #max_file_size => "300000";
+}
+
+##-------------------------------------------------------
+## location
+##-------------------------------------------------------
+
+body location start
+# @brief Editing occurs before the matched line
+{
+ before_after => "before";
+}
+
+##
+
+body location after(str)
+# @brief Editing occurs after the line matching `str`
+# @param str Regular expression matching the file line location
+{
+ before_after => "after";
+ select_line_matching => "$(str)";
+}
+
+##
+
+body location before(str)
+# @brief Editing occurs before the line matching `str`
+# @param str Regular expression matching the file line location
+{
+ before_after => "before";
+ select_line_matching => "$(str)";
+}
+
+##-------------------------------------------------------
+## replace_with
+##-------------------------------------------------------
+
+##
+
+body replace_with comment(c)
+# @brief Comment all lines matching the pattern by preprending `c`
+# @param c The prefix that comments out lines
+{
+ replace_value => "$(c) $(match.1)";
+ occurrences => "all";
+}
+
+##
+
+body replace_with uncomment
+# @brief Uncomment all lines matching the pattern by removing
+# anything outside the matching string
+{
+ replace_value => "$(match.1)";
+ occurrences => "all";
+}
+
+##-------------------------------------------------------
+## copy_from
+##-------------------------------------------------------
+
+body copy_from secure_cp(from,server)
+# @brief Download a file from a remote server over an encrypted channel
+#
+# Only copy the file if it is different from the local copy, and verify
+# that the copy is correct.
+#
+# @param from The location of the file on the remote server
+# @param server The hostname or IP of the server from which to download
+{
+ source => "$(from)";
+ servers => { "$(server)" };
+ compare => "digest";
+ encrypt => "true";
+ verify => "true";
+}
+
+##
+
+body copy_from remote_cp(from,server)
+# @brief Download a file from a remote server.
+#
+# @param from The location of the file on the remote server
+# @param server The hostname or IP of the server from which to download
+{
+ servers => { "$(server)" };
+ source => "$(from)";
+ compare => "mtime";
+}
+
+##
+
+body copy_from remote_dcp(from,server)
+# @brief Download a file from a remote server if it is different from the local copy.
+#
+# @param from The location of the file on the remote server
+# @param server The hostname or IP of the server from which to download
+{
+ servers => { "$(server)" };
+ source => "$(from)";
+ compare => "digest";
+}
+
+##
+
+body copy_from local_cp(from)
+# @brief Copy a local file.
+#
+# @param from The path to the source file.
+{
+ source => "$(from)";
+}
+
+##
+
+body copy_from local_dcp(from)
+# @brief Copy a local file if it is different from the existing copy.
+#
+# @param from The path to the source file.
+{
+ source => "$(from)";
+ compare => "digest";
+}
+
+##
+
+body copy_from perms_cp(from)
+# @brief Copy a local file and preserve file permissions on the local copy.
+#
+# @param from The path to the source file.
+{
+ source => "$(from)";
+ preserve => "true";
+}
+
+body copy_from backup_local_cp(from)
+# @brief Copy a local file and keep a backup of old versions.
+#
+# @param from The path to the source file.
+{
+ source => "$(from)";
+ copy_backup => "timestamp";
+}
+
+##
+
+body copy_from seed_cp(from)
+# @brief Copy a local file if the file does not already exist, i.e. seed the placement
+#
+# @param from The path to the source file.
+{
+ source => "$(from)";
+ compare => "exists";
+}
+
+##
+
+body copy_from sync_cp(from,server)
+# @brief Download a file if the local copy does not already exist, i.e. seed the placement
+#
+# @param from The location of the file on the remote server
+# @param server The hostname or IP of the server from which to download
+{
+ servers => { "$(server)" };
+ source => "$(from)";
+ purge => "true";
+ preserve => "true";
+ type_check => "false";
+}
+
+##
+
+body copy_from no_backup_cp(from)
+# @brief Copy a local file and don't make any backup of the previous version
+#
+# @param from The path to the source file.
+{
+ source => "$(from)";
+ copy_backup => "false";
+}
+
+##
+
+body copy_from no_backup_dcp(from)
+# @brief Copy a local file if contents have changed, and don't make any backup
+# of the previous version
+#
+# @param from The path to the source file.
+{
+ source => "$(from)";
+ copy_backup => "false";
+ compare => "digest";
+}
+
+##
+
+body copy_from no_backup_rcp(from,server)
+# @brief Download a file if it's newer than the local copy, and don't make any
+# backup of the previous version
+#
+# @param from The location of the file on the remote server
+# @param server The hostname or IP of the server from which to download
+{
+ servers => { "$(server)" };
+ source => "$(from)";
+ compare => "mtime";
+ copy_backup => "false";
+}
+
+##-------------------------------------------------------
+## link_from
+##-------------------------------------------------------
+
+body link_from ln_s(x)
+# @brief Create a symbolink link to `x`
+# The link is created even if the source of the link does not exist.
+# @param x The source of the link
+{
+ link_type => "symlink";
+ source => "$(x)";
+ when_no_source => "force";
+}
+
+##
+
+body link_from linkchildren(tofile)
+# @brief Create a symbolink link to `tofile`
+# If the promiser is a directory, children are linked to the source, unless
+# entries with identical names already exist.
+# The link is created even if the source of the link does not exist.
+#
+# @param tofile The source of the link
+{
+ source => "$(tofile)";
+ link_type => "symlink";
+ when_no_source => "force";
+ link_children => "true";
+ when_linking_children => "if_no_such_file"; # "override_file";
+}
+
+##-------------------------------------------------------
+## perms
+##-------------------------------------------------------
+
+body perms m(mode)
+# @brief Set the file mode
+# @param mode The new mode
+{
+ mode => "$(mode)";
+}
+
+##
+
+body perms mo(mode,user)
+# @brief Set the file's mode and owners
+# @param mode The new mode
+# @param user The username of the new owner
+{
+ owners => { "$(user)" };
+ mode => "$(mode)";
+}
+
+##
+
+body perms mog(mode,user,group)
+# @brief Set the file's mode, owner and group
+# @param mode The new mode
+# @param user The username of the new owner
+# @param group The group name
+{
+ owners => { "$(user)" };
+ groups => { "$(group)" };
+ mode => "$(mode)";
+}
+
+##
+
+body perms og(u,g)
+# @brief Set the file's owner and group
+# @param u The username of the new owner
+# @param g The group name
+{
+ owners => { "$(u)" };
+ groups => { "$(g)" };
+}
+
+##
+
+body perms owner(user)
+# @brief Set the file's owner
+# @param user The username of the new owner
+{
+ owners => { "$(user)" };
+}
+
+body perms system_owned(mode)
+# @brief Set the file owner and group to the system default
+# @param mode the access permission in octal format
+#
+# **Example:**
+#
+# ```cf3
+# files:
+# "/etc/passwd" perms => system_owned("0644");
+# ```
+{
+ mode => "$(mode)";
+ owners => { "root" };
+
+ freebsd|openbsd|netbsd|darwin::
+ groups => { "wheel" };
+
+ linux::
+ groups => { "root" };
+
+ solaris::
+ groups => { "sys" };
+}
+
+##-------------------------------------------------------
+## ACLS (extended Unix perms)
+##-------------------------------------------------------
+
+body acl access_generic(acl)
+# @brief Set the `aces` of the access control as specified
+#
+# Default/inherited ACLs are left unchanged. This body is
+# applicable for both files and directories on all platforms.
+#
+# @param acl The aces to be set
+{
+ acl_method => "overwrite";
+ aces => { "@(acl)" };
+
+ windows::
+ acl_type => "ntfs";
+
+ !windows::
+ acl_type => "posix";
+}
+
+##
+
+body acl ntfs(acl)
+# @brief Set the `aces` on NTFS file systems, and overwrite
+# existing ACLs.
+#
+# This body requires CFEngine Enterprise.
+#
+# @param acl The aces to be set
+{
+ acl_type => "ntfs";
+ acl_method => "overwrite";
+ aces => { "@(acl)" };
+}
+
+##
+
+body acl strict
+# @brief Limit file access via ACLs to users with administrator privileges,
+# overwriting existing ACLs.
+#
+# **Note:** May need to take ownership of file/dir to be sure no-one else is
+# allowed access.
+{
+ acl_method => "overwrite";
+
+ windows::
+ aces => { "user:Administrator:rwx" };
+ !windows::
+ aces => { "user:root:rwx" };
+}
+
+##-------------------------------------------------------
+## depth_search
+##-------------------------------------------------------
+
+body depth_search recurse(d)
+# @brief Search files and direcories recursively, up to the specified depth
+# Directories on different devices are included.
+#
+# @param d The maximum search depth
+{
+ depth => "$(d)";
+ xdev => "true";
+}
+
+##
+
+body depth_search recurse_ignore(d,list)
+# @brief Search files and directories recursively,
+# but don't recurse into the specified directories
+#
+# @param d The maximum search depth
+# @param list The list of directories to be excluded
+{
+ depth => "$(d)";
+ exclude_dirs => { @(list) };
+}
+
+##
+
+body depth_search include_base
+# @brief Search files and directories recursively,
+# starting from the base directory.
+{
+ include_basedir => "true";
+}
+
+body depth_search recurse_with_base(d)
+# @brief Search files and directories recursively up to the specified
+# depth, starting from the base directory and including directories on
+# other devices.
+#
+# @param d The maximum search depth
+{
+ depth => "$(d)";
+ xdev => "true";
+ include_basedir => "true";
+}
+
+##-------------------------------------------------------
+## delete
+##-------------------------------------------------------
+
+body delete tidy
+# @brief Delete the file and remove empty directories
+# and links to directories
+{
+ dirlinks => "delete";
+ rmdirs => "true";
+}
+
+##-------------------------------------------------------
+## rename
+##-------------------------------------------------------
+
+body rename disable
+# @brief Disable the file
+{
+ disable => "true";
+}
+
+##
+
+body rename rotate(level)
+# @brief Rotate and store up to `level` backups of the file
+# @param level The number of backups to store
+{
+ rotate => "$(level)";
+}
+
+##
+
+body rename to(file)
+# @brief Rename the file to `file`
+# @param file The new name of the file
+{
+ newname => "$(file)";
+}
+
+##-------------------------------------------------------
+## file_select
+##-------------------------------------------------------
+
+body file_select name_age(name,days)
+# @brief Select files that have a matching `name` and have not been modified for at least `days`
+# @param name A regex that matches the file name
+# @param days Number of days
+{
+ leaf_name => { "$(name)" };
+ mtime => irange(0,ago(0,0,"$(days)",0,0,0));
+ file_result => "mtime.leaf_name";
+}
+
+##
+
+body file_select days_old(days)
+# @brief Select files that have not been modified for at least `days`
+# @param days Number of days
+{
+ mtime => irange(0,ago(0,0,"$(days)",0,0,0));
+ file_result => "mtime";
+}
+
+##
+
+body file_select size_range(from,to)
+# @brief Select files that have a size within the specified range
+# @param from The lower bound of the allowed file size
+# @param to The upper bound of the allowed file size
+{
+ search_size => irange("$(from)","$(to)");
+ file_result => "size";
+}
+
+##
+
+body file_select bigger_than(size)
+# @brief Select files that are above a given size
+# @param size The number of bytes files have
+{
+ search_size => irange("0","$(size)");
+ file_result => "!size";
+}
+
+##
+
+body file_select exclude(name)
+# @brief Select all files except those that match `name`
+# @param name A regular expression
+{
+ leaf_name => { "$(name)"};
+ file_result => "!leaf_name";
+}
+
+##
+
+body file_select plain
+# @brief Select plain, regular files
+{
+ file_types => { "plain" };
+ file_result => "file_types";
+}
+
+body file_select dirs
+# @brief Select directories
+{
+ file_types => { "dir" };
+ file_result => "file_types";
+}
+
+##
+
+body file_select by_name(names)
+# @brief Select files that match `names`
+# @param names A regular expression
+{
+ leaf_name => { @(names)};
+ file_result => "leaf_name";
+}
+
+##
+
+body file_select ex_list(names)
+# @brief Select all files except those that match `names`
+# @param names A list of regular expressions
+{
+ leaf_name => { @(names)};
+ file_result => "!leaf_name";
+}
+
+##
+
+body file_select all
+# @brief Select all file system entries
+{
+ leaf_name => { ".*" };
+ file_result => "leaf_name";
+}
+
+##
+
+body file_select older_than(years, months, days, hours, minutes, seconds)
+# @brief Select files older than the date-time specified
+# @param years Number of years
+# @param months Number of months
+# @param days Number of days
+# @param hours Number of hours
+# @param minutes Number of minutes
+# @param seconds Number of seconds
+#
+# Generic older_than selection body, aimed to have a common definition handy
+# for every case possible.
+{
+ mtime => irange(0,ago("$(years)","$(months)","$(days)","$(hours)","$(minutes)","$(seconds)"));
+ file_result => "mtime";
+}
+
+##
+
+body file_select filetype_older_than(filetype, days)
+# @brief Select files of specified type older than specified number of days
+#
+# @param filetype File type to select
+# @param days Number of days
+#
+# This body only takes a single filetype, see `filetypes_older_than()`
+# if you want to select more than one type of file.
+{
+ file_types => { "$(filetype)" };
+ mtime => irange(0,ago(0,0,"$(days)",0,0,0));
+ file_result => "file_types.mtime";
+}
+
+##
+
+body file_select filetypes_older_than(filetypes, days)
+# @brief Select files of specified types older than specified number of days
+#
+# This body only takes a list of filetypes
+#
+# @param filetypes A list of file types
+# @param days Number of days
+#
+# **See also:** `filetype_older_than()`
+{
+ file_types => { @(filetypes) };
+ mtime => irange(0,ago(0,0,"$(days)",0,0,0));
+ file_result => "file_types.mtime";
+}
+
+##-------------------------------------------------------
+## changes
+##-------------------------------------------------------
+
+body changes detect_all_change
+# @brief Detect all file changes using the best hash method
+#
+# This is fierce, and will cost disk cycles
+#
+{
+ hash => "best";
+ report_changes => "all";
+ update_hashes => "yes";
+}
+
+##
+
+body changes detect_all_change_using(hash)
+# @brief Detect all file changes using a given hash method
+#
+# Detect all changes using a configurable hashing algorithm
+# for times when you care about both content and file stats e.g. mtime
+#
+# @param hash supported hashing algorithm (md5, sha1, sha224, sha256, sha384, sha512, best)
+{
+ hash => "$(hash)";
+ report_changes => "all";
+ update_hashes => "yes";
+}
+
+##
+
+body changes detect_content
+# @brief Detect file content changes using md5
+#
+# This is a cheaper alternative
+{
+ hash => "md5";
+ report_changes => "content";
+ update_hashes => "yes";
+}
+
+##
+
+body changes detect_content_using(hash)
+# @brief Detect file content changes using a given hash algorithm.
+#
+# For times when you only care about content, not file stats e.g. mtime
+# @param hash - supported hashing algorithm (md5, sha1, sha224, sha256, sha384,
+# sha512, best)
+{
+ hash => "$(hash)";
+ report_changes => "content";
+ update_hashes => "yes";
+}
+
+##
+
+body changes noupdate
+# @brief Detect content changes in (small) files that should never change
+{
+ hash => "sha256";
+ report_changes => "content";
+ update_hashes => "no";
+}
+
+##
+
+body changes diff
+# @brief Detect file content changes using sha256
+# and report the diff to CFEngine Enterprise
+{
+ hash => "sha256";
+ report_changes => "content";
+ report_diffs => "true";
+ update_hashes => "yes";
+}
+
+##
+
+body changes all_changes
+# @brief Detect all file changes using sha256
+# and report the diff to CFEngine Enterprise
+{
+ hash => "sha256";
+ report_changes => "all";
+ report_diffs => "true";
+ update_hashes => "yes";
+}
+
+##
+
+body changes diff_noupdate
+# @brief Detect content changes in (small) files
+# and report the diff to CFEngine Enterprise
+{
+ hash => "sha256";
+ report_changes => "content";
+ report_diffs => "true";
+ update_hashes => "no";
+}
--- /dev/null
+############################################################################
+# Copyright (C) CFEngine AS
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License LGPL as published by the
+# Free Software Foundation; version 3.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# To the extent this program is licensed as part of the Enterprise
+# versions of CFEngine, the applicable Commercial Open Source License
+# (COSL) may apply to this file if you as a licensee so wish it. See
+# included file COSL.txt.
+###########################################################################
+#
+# CFEngine Community Open Promise-Body Library
+#
+# This initiative started by CFEngine promotes a
+# standardized set of names and promise specifications
+# for template functionality within CFEngine 3.
+#
+# The aim is to promote an industry standard for
+# naming of configuration patterns, leading to a
+# de facto middleware of standardized syntax.
+#
+# Names should be intuitive and parameters should be
+# minimal to assist readability and comprehensibility.
+
+# Contributions to this file are voluntarily given to
+# the cfengine community, and are moderated by CFEngine.
+# No liability or warranty for misuse is implied.
+#
+# If you add to this file, please try to make the
+# contributions "self-documenting". Comments made
+# after the bundle/body statement are retained in
+# the online docs
+#
+
+# For CFEngine Core: 3.6.0 to 3.6.x
+# Guest environments bodies
+
+###################################################
+# If you find CFEngine useful, please consider #
+# purchasing a commercial version of the software.#
+###################################################
+
+##-------------------------------------------------------
+## guest_environment promises
+##-------------------------------------------------------
+
+body environment_resources kvm(name, arch, cpu_count, mem_kb, disk_file)
+# @brief An `environment_resources` body for a KVM virtual machine.
+#
+# The `env_spec` attribute is set to a KVM XML specification.
+#
+# @param name The name of the virtual machine
+# @param arch The architecture
+# @param cpu_count The number of CPUs the virtual machine should have
+# @param mem_kb The amount of RAM in kilobyte
+# @param disk_file The file on the host system for the virtual machine's harddrive
+#
+# **Example:**
+#
+# ```cf3
+# bundle agent manage_vm
+# {
+# guest_environments:
+# am_vm_host::
+# "db_server"
+# environment_host => atlas,
+# environment_type => "kvm",
+# environment_state => "create",
+# environment_resources => kvm("PSQL1, "x86_64", "4", "4096", "/var/lib/libvirt/images/psql1.iso")
+# }
+# ```
+{
+ env_spec =>
+ "<domain type='kvm'>
+ <name>$(name)</name>
+ <memory>$(mem_kb)</memory>
+ <currentMemory>$(mem_kb)</currentMemory>
+ <vcpu>$(cpu_count)</vcpu>
+ <os>
+ <type arch='$(arch)'>hvm</type>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>restart</on_crash>
+ <devices>
+ <emulator>/usr/bin/kvm</emulator>
+ <disk type='file' device='disk'>
+ <source file='$(disk_file)'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <interface type='network'>
+ <source network='default'/>
+ </interface>
+ <input type='mouse' bus='ps2'/>
+ <graphics type='vnc' port='-1' autoport='yes'/>
+ </devices>
+</domain>";
+}
--- /dev/null
+############################################################################
+# Copyright (C) CFEngine AS
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License LGPL as published by the
+# Free Software Foundation; version 3.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# To the extent this program is licensed as part of the Enterprise
+# versions of CFEngine, the applicable Commercial Open Source License
+# (COSL) may apply to this file if you as a licensee so wish it. See
+# included file COSL.txt.
+###########################################################################
+#
+# CFEngine Community Open Promise-Body Library
+#
+# This initiative started by CFEngine promotes a
+# standardized set of names and promise specifications
+# for template functionality within CFEngine 3.
+#
+# The aim is to promote an industry standard for
+# naming of configuration patterns, leading to a
+# de facto middleware of standardized syntax.
+#
+# Names should be intuitive and parameters should be
+# minimal to assist readability and comprehensibility.
+
+# Contributions to this file are voluntarily given to
+# the cfengine community, and are moderated by CFEngine.
+# No liability or warranty for misuse is implied.
+#
+# If you add to this file, please try to make the
+# contributions "self-documenting". Comments made
+# after the bundle/body statement are retained in
+# the online docs
+#
+
+# For CFEngine Core: 3.6.0 to 3.6.x
+# Monitor bodies
+
+###################################################
+# If you find CFEngine useful, please consider #
+# purchasing a commercial version of the software.#
+###################################################
+
+####################################################
+## monitor bodyparts
+####################################################
+
+body match_value scan_log(line)
+# @brief Selects lines matching `line` in a growing file
+# @param line Regular expression for matching lines.
+#
+# **See also:** `select_line_matching`, `track_growing_file`
+{
+ select_line_matching => "$(line)";
+ track_growing_file => "true";
+}
+
+##
+
+body match_value scan_changing_file(line)
+# @brief Selects lines matching `line` in a changing file
+# @param line Regular expression for matching lines.
+#
+# **See also:** `select_line_matching`, `track_growing_file`
+{
+ select_line_matching => "$(line)";
+ track_growing_file => "false";
+}
+
+##
+
+body match_value single_value(regex)
+# @brief Extract lines matching `regex` as values
+# @param regex Regular expression matching lines and values
+#
+# **See also:** `select_line_matching`, `extraction_regex`
+{
+ select_line_matching => "$(regex)";
+ extraction_regex => "($(regex))";
+}
+
+##
+
+body match_value line_match_value(line_match, extract_regex)
+# @brief Find lines matching line_match and extract a value matching extract_regex
+# @param line_match Regular expression matching line where value is found
+# @param extract_regex Regular expression matching value to extract
+#
+# **See also:** `select_line_matching`, `extraction_regex`
+#
+# **Example:**
+#
+# ```cf3
+# bundle monitor example
+# {
+# vars:
+# "regex_vsz" string => "root\s+[0-9]+\s+[0-9]+\s+[0-9]+\s+[0-9.]+\s+[0-9.]+\s+([0-9]+).*";
+# measurements:
+# "/var/cfengine/state/cf_procs"
+# handle => "cf_serverd_vsz",
+# comment => "Tracking the memory consumption of a process can help us identify possible memory leaks",
+# stream_type => "file",
+# data_type => "int",
+# history_type => "weekly",
+# units => "kB",
+# match_value => line_match_value(".*cf-serverd.*", "$(regex_vsz)");
+# }
+# ```
+{
+ select_line_matching => "$(line_match)";
+ extraction_regex => "$(extract_regex)";
+}
--- /dev/null
+############################################################################
+# Copyright (C) CFEngine AS
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License LGPL as published by the
+# Free Software Foundation; version 3.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# To the extent this program is licensed as part of the Enterprise
+# versions of CFEngine, the applicable Commercial Open Source License
+# (COSL) may apply to this file if you as a licensee so wish it. See
+# included file COSL.txt.
+###########################################################################
+#
+# CFEngine Community Open Promise-Body Library
+#
+# This initiative started by CFEngine promotes a
+# standardized set of names and promise specifications
+# for template functionality within CFEngine 3.
+#
+# The aim is to promote an industry standard for
+# naming of configuration patterns, leading to a
+# de facto middleware of standardized syntax.
+#
+# Names should be intuitive and parameters should be
+# minimal to assist readability and comprehensibility.
+
+# Contributions to this file are voluntarily given to
+# the cfengine community, and are moderated by CFEngine.
+# No liability or warranty for misuse is implied.
+#
+# If you add to this file, please try to make the
+# contributions "self-documenting". Comments made
+# after the bundle/body statement are retained in
+# the online docs
+#
+
+# For CFEngine Core: 3.6.0 to 3.6.x
+# Packages bodies
+
+###################################################
+# If you find CFEngine useful, please consider #
+# purchasing a commercial version of the software.#
+###################################################
+
+bundle common packages_common
+# @ignore
+{
+ vars:
+ "inputs" slist => { "$(this.promise_dirname)/paths.cf" };
+}
+
+body file control
+# @ignore
+{
+ inputs => { @(packages_common.inputs) };
+}
+
+##--------------------------------------------------------------
+## Packages promises
+##--------------------------------------------------------------
+
+bundle common common_knowledge
+# @brief common packages knowledge bundle
+#
+# This common bundle defines general things about platforms.
+{
+ vars:
+ "list_update_ifelapsed" string => "240";
+}
+
+bundle common debian_knowledge
+# @depends paths
+# @brief common Debian knowledge bundle
+#
+# This common bundle has useful information about Debian.
+{
+ vars:
+ "apt_prefix" string => "/usr/bin/env DEBIAN_FRONTEND=noninteractive LC_ALL=C PATH=/bin:/sbin/:/usr/bin:/usr/sbin";
+ "call_dpkg" string => "$(apt_prefix) $(paths.path[dpkg])";
+ "call_apt_get" string => "$(apt_prefix) $(paths.path[apt_get])";
+ "call_aptitude" string => "$(apt_prefix) $(paths.path[aptitude])";
+ "dpkg_options" string => "-o Dpkg::Options::=--force-confold -o Dpkg::Options::=--force-confdef";
+
+ "dpkg_compare_equal" string => "$(call_dpkg) --compare-versions $(v1) eq $(v2)";
+ "dpkg_compare_less" string => "$(call_dpkg) --compare-versions $(v1) lt $(v2)";
+
+ "list_name_regex" string => "^.i\s+([^\s:]+).*";
+ "list_version_regex" string => "^.i\s+[^\s]+\s+([^\s]+).*";
+
+ "patch_name_regex" string => "^Inst\s+(\S+)\s+.*";
+ "patch_version_regex" string => "^Inst\s+(\S+)\s+.*";
+}
+
+bundle common redhat_knowledge
+# @depends paths
+# @brief common Redhat knowledge bundle
+#
+# This common bundle has useful information about Redhat.
+{
+ vars:
+ "call_yum" string => "$(paths.path[yum])";
+ "call_rpm" string => "$(paths.path[rpm])";
+
+ "yum_options" string => "--quiet";
+
+ # used with rpm format 'i | repos | %{name} | %{version}-%{release} | %{arch}\n'
+
+ "rpm_name_regex" string => "[^|]+\|[^|]+\|\s+([^\s|]+).*";
+ "rpm_version_regex" string => "[^|]+\|[^|]+\|[^|]+\|\s+([^\s|]+).*";
+ "rpm_arch_regex" string => "[^|]+\|[^|]+\|[^|]+\|[^|]+\|\s+([^\s]+).*";
+
+ # used with rpm format '%{name} %{version}-%{release} %{arch}\n'
+
+ "rpm2_name_regex" string => "^(\S+?)\s\S+?\s\S+$";
+ "rpm2_version_regex" string => "^\S+?\s(\S+?)\s\S+$";
+ "rpm2_arch_regex" string => "^\S+?\s\S+?\s(\S+)$";
+
+ # used with rpm format '%{name}.%{arch} %{version}-%{release}\n'
+
+ "rpm3_name_regex" string => "([^.]+).*";
+ "rpm3_version_regex" string => "[^\s]\s+([^\s]+).*";
+ "rpm3_arch_regex" string => "[^.]+\.([^\s]+).*";
+}
+
+bundle common darwin_knowledge
+# @depends paths
+# @brief common Darwin / Mac OS X knowledge bundle
+#
+# This common bundle has useful information about Darwin / Mac OS X.
+{
+ vars:
+ "call_brew" string => "$(paths.path[brew])";
+ "call_sudo" string => "$(paths.path[sudo])";
+
+ # used with brew list --versions format '%{name} %{version}\n'
+
+ "brew_name_regex" string => "([\S]+)\s[\S]+";
+ "brew_version_regex" string => "[\S]+\s([\S]+)";
+}
+
+bundle common npm_knowledge
+# @depends paths
+# @brief Node.js `npm' knowledge bundle
+#
+# This common bundle has useful information about the Node.js `npm' package manager.
+{
+ vars:
+ "call_npm" string => "$(paths.path[npm])";
+
+ "npm_list_name_regex" string => "^[^ /]+ ([\w\d-._~]+)@[\d.]+";
+ "npm_list_version_regex" string => "^[^ /]+ [\w\d-._~]+@([\d.]+)";
+ "npm_installed_regex" string => "^[^ /]+ ([\w\d-._~]+@[\d.]+)";
+}
+
+bundle common pip_knowledge
+# @depends paths
+# @brief Python `pip' knowledge bundle
+#
+# This common bundle has useful information about the Python `pip' package manager.
+{
+ vars:
+ "call_pip" string => "$(paths.path[pip])";
+
+ "pip_list_name_regex" string => "^([[:alnum:]-_]+)\s\([\d.]+\)";
+ "pip_list_version_regex" string => "^[[:alnum:]-_]+\s\(([\d.]+)\)";
+ "pip_installed_regex" string => "^([[:alnum:]-_]+\s\([\d.]+\))";
+}
+
+body package_method pip(flags)
+# @depends common_knowledge pip_knowledge
+# @brief Python `pip' package management
+#
+# `pip' is a package manager for Python
+# http://www.pip-installer.org/en/latest/
+#
+# Available commands : add, delete, (add)update, verify
+#
+# @param flags The command line parameter passed to `pip`
+#
+# Note: "update" command preforms recursive upgrade (of dependencies) by
+# default. Set $flags to "--no-deps" to preform non-recursive upgrade.
+# http://www.pip-installer.org/en/latest/cookbook.html#non-recursive-upgrades
+#
+# **Example:**
+#
+# ```cf3
+# packages:
+# "Django" package_method => pip(""), package_policy => "add";
+# "django-registration" package_method => pip(""), package_policy => "delete";
+# "requests" package_method => pip(""), package_policy => "verify";
+#
+# ```
+#
+# **Note:** "Django" with a capital 'D' in the example above.
+# Explicitly match the name of the package, capitalization does count!
+#
+# ```console
+# $ pip search django | egrep "^Django\s+-"
+# Django - A high-level Python Web framework [..output trimmed..]
+# ```
+{
+ package_changes => "individual";
+
+ package_noverify_regex => "";
+
+ package_list_update_ifelapsed => "$(common_knowledge.list_update_ifelapsed)";
+
+ package_list_name_regex => "$(pip_list_name_regex)";
+ package_list_version_regex => "$(pip_list_version_regex)";
+ package_installed_regex => "$(pip_installed_regex)";
+
+ package_name_convention => "$(name)";
+ package_delete_convention => "$(name)";
+
+ package_list_command => "$(paths.path[pip]) list $(flags)";
+ package_verify_command => "$(paths.path[pip]) show $(flags)";
+ package_add_command => "$(paths.path[pip]) install $(flags)";
+ package_delete_command => "$(paths.path[pip]) uninstall --yes $(flags)";
+ package_update_command => "$(paths.path[pip]) install --upgrade $(flags)";
+}
+
+body package_method npm(dir)
+# @depends common_knowledge npm_knowledge
+# @brief Node.js `npm' local-mode package management
+#
+# `npm' is a package manager for Node.js
+# https://npmjs.org/package/npm
+#
+# Available commands : add, delete, (add)update, verify
+#
+# For the difference between local and global install see here:
+# https://npmjs.org/doc/cli/npm-install.html
+#
+# @param dir The prefix path to ./node_modules/
+#
+# **Example:**
+#
+# ```cf3
+# vars:
+# "dirs" slist => { "/root/myproject", "/home/somedev/someproject" };
+#
+# packages:
+# "express" package_method => npm("$(dirs)"), package_policy => "add";
+# "redis" package_method => npm("$(dirs)"), package_policy => "delete";
+# "mongoose-amqp-plugin" package_method => npm("$(dirs)"), package_policy => "verify";
+# ```
+{
+ package_changes => "individual";
+
+ package_noverify_regex => "";
+
+ package_list_update_ifelapsed => "$(common_knowledge.list_update_ifelapsed)";
+
+ package_list_name_regex => "$(npm_knowledge.npm_list_name_regex)";
+ package_list_version_regex => "$(npm_knowledge.npm_list_version_regex)";
+ package_installed_regex => "$(npm_knowledge.npm_installed_regex)";
+
+ package_name_convention => "$(name)";
+ package_delete_convention => "$(name)";
+
+ package_list_command => "$(npm_knowledge.call_npm) list --prefix $(dir)";
+ package_verify_command => "$(npm_knowledge.call_npm) list --prefix $(dir)";
+ package_add_command => "$(npm_knowledge.call_npm) install --prefix $(dir)";
+ package_delete_command => "$(npm_knowledge.call_npm) remove --prefix $(dir)";
+ package_update_command => "$(npm_knowledge.call_npm) update --prefix $(dir)";
+}
+
+body package_method npm_g
+# @depends common_knowledge npm_knowledge
+# @brief Node.js `npm' global-mode package management
+#
+# `npm' is a package manager for Node.js
+# https://npmjs.org/package/npm
+#
+# Available commands : add, delete, (add)update, verify
+#
+# For the difference between global and local install see here:
+# https://npmjs.org/doc/cli/npm-install.html
+#
+# **Example:**
+#
+# ```cf3
+# packages:
+# "express" package_method => npm_g, package_policy => "add";
+# "redis" package_method => npm_g, package_policy => "delete";
+# "mongoose-amqp-plugin" package_method => npm_g, package_policy => "verify";
+# ```
+{
+ package_changes => "individual";
+
+ package_noverify_regex => "";
+
+ package_list_update_ifelapsed => "$(common_knowledge.list_update_ifelapsed)";
+
+ package_list_name_regex => "$(npm_knowledge.npm_list_name_regex)";
+ package_list_version_regex => "$(npm_knowledge.npm_list_version_regex)";
+ package_installed_regex => "$(npm_knowledge.npm_installed_regex)";
+
+ package_name_convention => "$(name)";
+ package_delete_convention => "$(name)";
+
+ package_list_command => "$(npm_knowledge.call_npm) list --global";
+ package_verify_command => "$(npm_knowledge.call_npm) list --global";
+ package_add_command => "$(npm_knowledge.call_npm) install --global";
+ package_delete_command => "$(npm_knowledge.call_npm) remove --global";
+ package_update_command => "$(npm_knowledge.call_npm) update --global";
+}
+
+body package_method brew(user)
+# @depends common_knowledge darwin_knowledge
+# @brief Darwin/Mac OS X + Homebrew installation method
+#
+# Homebrew is a package manager for OS X -- http://brew.sh
+#
+# Available commands : add, delete, (add)update (with package_version).
+#
+# @param user The user under which to run the commands
+#
+# Homebrew expects a regular (non-root) user to install packages.
+# https://github.com/mxcl/homebrew/wiki/FAQ#why-does-homebrew-say-sudo-is-bad
+# As CFEngine doesn't give the possibility to run package_add_command
+# with a different user, this body uses sudo -u.
+#
+# **Example:**
+#
+# ```cf3
+# packages:
+# "mypackage" package_method => brew("adminuser"), package_policy => "add";
+# "uppackage" package_method => brew("adminuser"), package_policy => "update", package_version => "3.5.2";
+# ```
+{
+
+ package_changes => "bulk";
+ package_add_command => "$(darwin_knowledge.call_sudo) -u $(user) $(darwin_knowledge.call_brew) install";
+ package_delete_command => "$(darwin_knowledge.call_sudo) -u $(user) $(darwin_knowledge.call_brew) uninstall";
+ package_delete_convention => "$(name)";
+ package_name_convention => "$(name)";
+
+ # Homebrew can list only installed packages along versions.
+ # for a complete list of packages, we could use `brew search`, but there's no easy
+ # way to determine the version or wether it's installed.
+ package_installed_regex => ".*";
+ package_list_command => "$(darwin_knowledge.call_sudo) -u $(user) $(darwin_knowledge.call_brew) list --versions";
+ package_list_name_regex => "$(darwin_knowledge.brew_name_regex)";
+ package_list_version_regex => "$(darwin_knowledge.brew_version_regex)";
+ package_list_update_command => "$(darwin_knowledge.call_sudo) -u $(user) $(darwin_knowledge.call_brew) update";
+ package_list_update_ifelapsed => "$(common_knowledge.list_update_ifelapsed)";
+
+ # brew list [package] will print the installed files and return 1 if not found.
+ package_verify_command => "$(darwin_knowledge.call_sudo) -u $(user) $(darwin_knowledge.call_brew) list";
+ package_noverify_returncode => "1";
+
+ # remember to specify the package version
+ package_update_command => "$(darwin_knowledge.call_sudo) -u $(user) $(darwin_knowledge.call_brew) upgrade";
+
+}
+
+body package_method apt
+# @depends common_knowledge debian_knowledge
+# @brief APT installation package method
+#
+# This package method interacts with the APT package manager through `aptitude`.
+#
+# **Example:**
+#
+# ```cf3
+# packages:
+# "mypackage" package_method => apt, package_policy => "add";
+# ```
+{
+ package_changes => "bulk";
+ package_list_command => "$(debian_knowledge.call_dpkg) -l";
+ package_list_name_regex => "$(debian_knowledge.list_name_regex)";
+ package_list_version_regex => "$(debian_knowledge.list_version_regex)";
+ package_installed_regex => ".i.*"; # packages that have been uninstalled may be listed
+ package_name_convention => "$(name)";
+
+ # set it to "0" to avoid caching of list during upgrade
+ package_list_update_ifelapsed => "$(common_knowledge.list_update_ifelapsed)";
+
+ # make correct version comparisons
+ package_version_less_command => "$(debian_knowledge.dpkg_compare_less)";
+ package_version_equal_command => "$(debian_knowledge.dpkg_compare_equal)";
+
+ have_aptitude::
+ package_add_command => "$(debian_knowledge.call_aptitude) $(debian_knowledge.dpkg_options) --assume-yes install";
+ package_list_update_command => "$(debian_knowledge.call_aptitude) update";
+ package_delete_command => "$(debian_knowledge.call_aptitude) $(debian_knowledge.dpkg_options) --assume-yes -q remove";
+ package_update_command => "$(debian_knowledge.call_aptitude) $(debian_knowledge.dpkg_options) --assume-yes install";
+ package_patch_command => "$(debian_knowledge.call_aptitude) $(debian_knowledge.dpkg_options) --assume-yes install";
+ package_verify_command => "$(debian_knowledge.call_aptitude) show";
+ package_noverify_regex => "(State: not installed|E: Unable to locate package .*)";
+
+ package_patch_list_command => "$(debian_knowledge.call_aptitude) --assume-yes --simulate --verbose full-upgrade";
+ package_patch_name_regex => "$(debian_knowledge.patch_name_regex)";
+ package_patch_version_regex => "$(debian_knowledge.patch_version_regex)";
+
+ !have_aptitude::
+ package_add_command => "$(debian_knowledge.call_apt_get) $(debian_knowledge.dpkg_options) --yes install";
+ package_list_update_command => "$(debian_knowledge.call_apt_get) update";
+ package_delete_command => "$(debian_knowledge.call_apt_get) $(debian_knowledge.dpkg_options) --yes -q remove";
+ package_update_command => "$(debian_knowledge.call_apt_get) $(debian_knowledge.dpkg_options) --yes install";
+ package_patch_command => "$(debian_knowledge.call_apt_get) $(debian_knowledge.dpkg_options) --yes install";
+ package_verify_command => "$(debian_knowledge.call_dpkg) -s";
+ package_noverify_returncode => "1";
+
+ package_patch_list_command => "$(debian_knowledge.call_apt_get) --just-print dist-upgrade";
+ package_patch_name_regex => "$(debian_knowledge.patch_name_regex)";
+ package_patch_version_regex => "$(debian_knowledge.patch_version_regex)";
+
+}
+
+# Ignore aptitude because:
+# 1) aptitude will remove "unneeded" packages unexpectly
+# 2) aptitude return codes are useless
+# 3) aptitude is a high level interface
+# 4) aptitude provides little benefit
+# 5) have_aptitude is a hard class and thus cannot be unset
+body package_method apt_get
+# @depends common_knowledge debian_knowledge
+# @brief APT installation package method
+#
+# This package method interacts with the APT package manager through `apt-get`.
+#
+# **Example:**
+#
+# ```cf3
+# packages:
+# "mypackage" package_method => apt_get, package_policy => "add";
+# ```
+{
+ package_changes => "bulk";
+ package_list_command => "$(debian_knowledge.call_dpkg) -l";
+ package_list_name_regex => "$(debian_knowledge.list_name_regex)";
+ package_list_version_regex => "$(debian_knowledge.list_version_regex)";
+ package_installed_regex => ".i.*"; # packages that have been uninstalled may be listed
+ package_name_convention => "$(name)=$(version)";
+
+ # set it to "0" to avoid caching of list during upgrade
+ package_list_update_ifelapsed => "$(common_knowledge.list_update_ifelapsed)";
+
+ # Target a specific release, such as backports
+ package_add_command => "$(debian_knowledge.call_apt_get) $(debian_knowledge.dpkg_options) --yes install";
+ package_list_update_command => "$(debian_knowledge.call_apt_get) update";
+ package_delete_command => "$(debian_knowledge.call_apt_get) $(debian_knowledge.dpkg_options) --yes -q remove";
+ package_update_command => "$(debian_knowledge.call_apt_get) $(debian_knowledge.dpkg_options) --yes install";
+ package_patch_command => "$(debian_knowledge.call_apt_get) $(debian_knowledge.dpkg_options) --yes install";
+ package_verify_command => "$(debian_knowledge.call_dpkg) -s";
+ package_noverify_returncode => "1";
+
+ package_patch_list_command => "$(debian_knowledge.call_apt_get) --just-print dist-upgrade";
+ package_patch_name_regex => "$(debian_knowledge.patch_name_regex)";
+ package_patch_version_regex => "$(debian_knowledge.patch_version_regex)";
+
+ # make correct version comparisons
+ package_version_less_command => "$(debian_knowledge.dpkg_compare_less)";
+ package_version_equal_command => "$(debian_knowledge.dpkg_compare_equal)";
+
+}
+
+# This is a great use case for CFEngine body inheritance.
+
+body package_method apt_get_noupdate
+# @depends debian_knowledge
+# @brief APT installation package method without updates
+#
+# This package method interacts with the APT package manager through
+# `apt-get`. It will never run "apt-get update" but is otherwise
+# exactly like the `apt_get` package method and *may* use the network
+# to install packages, as APT may decide.
+#
+# It doesn't work to use a class.
+#
+# **Example:**
+#
+# ```cf3
+# packages:
+# "mypackage" package_method => apt_get_noupdate, package_policy => "add";
+# ```
+{
+ package_changes => "bulk";
+ package_list_command => "$(debian_knowledge.call_dpkg) -l";
+ package_list_name_regex => "$(debian_knowledge.list_name_regex)";
+ package_list_version_regex => "$(debian_knowledge.list_version_regex)";
+ package_installed_regex => ".i.*"; # packages that have been uninstalled may be listed
+ package_name_convention => "$(name)=$(version)";
+
+ # Target a specific release, such as backports
+ package_add_command => "$(debian_knowledge.call_apt_get) $(debian_knowledge.dpkg_options) --yes install";
+ package_delete_command => "$(debian_knowledge.call_apt_get) $(debian_knowledge.dpkg_options) --yes -q remove";
+ package_update_command => "$(debian_knowledge.call_apt_get) $(debian_knowledge.dpkg_options) --yes install";
+ package_patch_command => "$(debian_knowledge.call_apt_get) $(debian_knowledge.dpkg_options) --yes install";
+ package_verify_command => "$(debian_knowledge.call_dpkg) -s";
+ package_noverify_returncode => "1";
+
+ package_patch_list_command => "$(debian_knowledge.call_apt_get) --just-print dist-upgrade";
+ package_patch_name_regex => "$(debian_knowledge.patch_name_regex)";
+ package_patch_version_regex => "$(debian_knowledge.patch_version_regex)";
+
+ # make correct version comparisons
+ package_version_less_command => "$(debian_knowledge.dpkg_compare_less)";
+ package_version_equal_command => "$(debian_knowledge.dpkg_compare_equal)";
+
+}
+
+body package_method apt_get_release(release)
+# @depends common_knowledge debian_knowledge
+# @brief APT installation package method
+# @param release specific release to use
+#
+# This package method interacts with the APT package manager through `apt-get` but sets a specific target release.
+#
+# **Example:**
+#
+# ```cf3
+# packages:
+# "mypackage" package_method => apt_get_release("xyz"), package_policy => "add";
+# ```
+{
+ package_changes => "bulk";
+ package_list_command => "$(debian_knowledge.call_dpkg) -l";
+ package_list_name_regex => "$(debian_knowledge.list_name_regex)";
+ package_list_version_regex => "$(debian_knowledge.list_version_regex)";
+ package_installed_regex => ".i.*"; # packages that have been uninstalled may be listed
+ package_name_convention => "$(name)";
+
+ # set it to "0" to avoid caching of list during upgrade
+ package_list_update_ifelapsed => "$(common_knowledge.list_update_ifelapsed)";
+
+ # Target a specific release, such as backports
+ package_add_command => "$(debian_knowledge.call_apt_get) $(debian_knowledge.dpkg_options) --yes --target-release $(release) install";
+ package_list_update_command => "$(debian_knowledge.call_apt_get) update";
+ package_delete_command => "$(debian_knowledge.call_apt_get) $(debian_knowledge.dpkg_options) --yes -q remove";
+ package_update_command => "$(debian_knowledge.call_apt_get) $(debian_knowledge.dpkg_options) --yes --target-release $(release) install";
+ package_patch_command => "$(debian_knowledge.call_apt_get) $(debian_knowledge.dpkg_options) --yes --target-release $(release) install";
+ package_verify_command => "$(debian_knowledge.call_dpkg) -s";
+ package_noverify_returncode => "1";
+
+ package_patch_list_command => "$(debian_knowledge.call_apt_get) --just-print dist-upgrade";
+ package_patch_name_regex => "$(debian_knowledge.patch_name_regex)";
+ package_patch_version_regex => "$(debian_knowledge.patch_version_regex)";
+
+ # make correct version comparisons
+ package_version_less_command => "$(debian_knowledge.dpkg_compare_less)";
+ package_version_equal_command => "$(debian_knowledge.dpkg_compare_equal)";
+
+}
+
+##
+
+body package_method dpkg_version(repo)
+# @depends common_knowledge debian_knowledge
+# @brief dpkg installation package method
+# @param repo specific repo to use
+#
+# This package method interacts with `dpkg`.
+#
+# **Example:**
+#
+# ```cf3
+# packages:
+# "mypackage" package_method => dpkg_version("xyz"), package_policy => "add";
+# ```
+{
+ package_changes => "individual";
+ package_list_command => "$(debian_knowledge.call_dpkg) -l";
+
+ # set it to "0" to avoid caching of list during upgrade
+ package_list_update_command => "$(debian_knowledge.call_apt_get) update";
+ package_list_update_ifelapsed => "$(common_knowledge.list_update_ifelapsed)";
+
+ package_list_name_regex => "$(debian_knowledge.list_name_regex)";
+ package_list_version_regex => "$(debian_knowledge.list_version_regex)";
+
+ package_installed_regex => ".i.*"; # packages that have been uninstalled may be listed
+
+ package_file_repositories => { "$(repo)" };
+
+ debian.x86_64::
+ package_name_convention => "$(name)_$(version)_amd64.deb";
+
+ debian.i686::
+ package_name_convention => "$(name)_$(version)_i386.deb";
+
+ have_aptitude::
+ package_patch_list_command => "$(debian_knowledge.call_aptitude) --assume-yes --simulate --verbose full-upgrade";
+ package_patch_name_regex => "$(debian_knowledge.patch_name_regex)";
+ package_patch_version_regex => "$(debian_knowledge.patch_version_regex)";
+
+ !have_aptitude::
+ package_patch_list_command => "$(debian_knowledge.call_apt_get) --just-print dist-upgrade";
+ package_patch_name_regex => "$(debian_knowledge.patch_name_regex)";
+ package_patch_version_regex => "$(debian_knowledge.patch_version_regex)";
+
+ debian::
+ package_add_command => "$(debian_knowledge.call_dpkg) --install";
+ package_delete_command => "$(debian_knowledge.call_dpkg) --purge";
+ package_update_command => "$(debian_knowledge.call_dpkg) --install";
+ package_patch_command => "$(debian_knowledge.call_dpkg) --install";
+
+ # make correct version comparisons
+ package_version_less_command => "$(debian_knowledge.dpkg_compare_less)";
+ package_version_equal_command => "$(debian_knowledge.dpkg_compare_equal)";
+}
+
+##
+
+body package_method rpm_version(repo)
+# @depends common_knowledge redhat_knowledge
+# @brief RPM direct installation method
+# @param repo the specific repository for `package_file_repositories`
+#
+# This package method interacts with the RPM package manager for a specific repo.
+#
+# **Example:**
+#
+# ```cf3
+# packages:
+# "mypackage" package_method => rpm_version("myrepo"), package_policy => "add";
+# ```
+{
+ package_changes => "individual";
+
+ package_list_command => "$(redhat_knowledge.call_rpm) -qa --queryformat \"i | repos | %{name} | %{version}-%{release} | %{arch}\n\"";
+
+ # set it to "0" to avoid caching of list during upgrade
+ package_list_update_command => "$(redhat_knowledge.call_yum) $(redhat_knowledge.yum_options) check-update";
+ package_list_update_ifelapsed => "$(common_knowledge.list_update_ifelapsed)";
+
+ package_list_name_regex => "$(redhat_knowledge.rpm_name_regex)";
+ package_list_version_regex => "$(redhat_knowledge.rpm_version_regex)";
+ package_list_arch_regex => "$(redhat_knowledge.rpm_arch_regex)";
+
+ package_installed_regex => "i.*";
+
+ package_file_repositories => { "$(repo)" };
+
+ package_name_convention => "$(name)-$(version).$(arch).rpm";
+
+ package_add_command => "$(redhat_knowledge.call_rpm) -ivh ";
+ package_update_command => "$(redhat_knowledge.call_rpm) -Uvh ";
+ package_patch_command => "$(redhat_knowledge.call_rpm) -Uvh ";
+ package_delete_command => "$(redhat_knowledge.call_rpm) -e --nodeps";
+ package_verify_command => "$(redhat_knowledge.call_rpm) -V";
+ package_noverify_regex => ".*[^\s].*";
+}
+
+##
+
+body package_method windows_feature
+# @brief Method for managing Windows features
+{
+ package_changes => "individual";
+
+ package_name_convention => "$(name)";
+ package_delete_convention => "$(name)";
+
+ package_installed_regex => ".*";
+ package_list_name_regex => "(.*)";
+ package_list_version_regex => "(.*)"; # FIXME: the listing does not give version, so takes name for version too now
+
+ package_add_command => "$(sys.winsysdir)\\WindowsPowerShell\\v1.0\\powershell.exe -Command \"Import-Module ServerManager; Add-WindowsFeature -Name\"";
+ package_delete_command => "$(sys.winsysdir)\\WindowsPowerShell\\v1.0\\powershell.exe -Command \"Import-Module ServerManager; Remove-WindowsFeature -confirm:$false -Name\"";
+ package_list_command => "$(sys.winsysdir)\\WindowsPowerShell\\v1.0\\powershell.exe -Command \"Import-Module ServerManager; Get-WindowsFeature | where {$_.installed -eq $True} |foreach {$_.Name}\"";
+}
+
+##
+
+body package_method msi_implicit(repo)
+# @brief Windows MSI method
+# @param repo The package file repository
+#
+# Uses the whole file name as promiser, e.g. "7-Zip-4.50-x86_64.msi".
+# The name, version and arch is then deduced from the promiser.
+#
+# **See also:** `msi_explicit()`
+{
+ package_changes => "individual";
+ package_file_repositories => { "$(repo)" };
+
+ package_installed_regex => ".*";
+
+ package_name_convention => "$(name)-$(version)-$(arch).msi";
+ package_delete_convention => "$(firstrepo)$(name)-$(version)-$(arch).msi";
+
+ package_name_regex => "^(\S+)-(\d+\.?)+";
+ package_version_regex => "^\S+-((\d+\.?)+)";
+ package_arch_regex => "^\S+-[\d\.]+-(.*).msi";
+
+ package_add_command => "\"$(sys.winsysdir)\msiexec.exe\" /qn /i";
+ package_update_command => "\"$(sys.winsysdir)\msiexec.exe\" /qn /i";
+ package_delete_command => "\"$(sys.winsysdir)\msiexec.exe\" /qn /x";
+}
+
+##
+
+body package_method msi_explicit(repo)
+# @brief Windows MSI method
+# @param repo The package file repository
+#
+# Uses software name as promiser, e.g. "7-Zip", and explicitly
+# specify any `package_version` and `package_arch`.
+#
+# **See also:** `msi_implicit()`
+{
+ package_changes => "individual";
+ package_file_repositories => { "$(repo)" };
+
+ package_installed_regex => ".*";
+
+ package_name_convention => "$(name)-$(version)-$(arch).msi";
+ package_delete_convention => "$(firstrepo)$(name)-$(version)-$(arch).msi";
+
+ package_add_command => "\"$(sys.winsysdir)\msiexec.exe\" /qn /i";
+ package_update_command => "\"$(sys.winsysdir)\msiexec.exe\" /qn /i";
+ package_delete_command => "\"$(sys.winsysdir)\msiexec.exe\" /qn /x";
+}
+
+##
+
+body package_method yum
+# @depends common_knowledge redhat_knowledge
+# @brief Yum+RPM installation method
+#
+# This package method interacts with the Yum and RPM package managers.
+# It is a copy of `yum_rpm()`, which was contributed by Trond Hasle
+# Amundsen. The old `yum` package method has been removed.
+#
+# This is an efficient package method for RPM-based systems - uses `rpm`
+# instead of `yum` to list installed packages.
+#
+# It will use `rpm -e` to remove packages. Please note that if several packages
+# with the same name but varying versions or architectures are installed,
+# `rpm -e` will return an error and not delete any of them.
+#
+# **Example:**
+#
+# ```cf3
+# packages:
+# "mypackage" package_method => yum, package_policy => "add";
+# ```
+{
+ package_changes => "bulk";
+ package_list_command => "$(redhat_knowledge.call_rpm) -qa --qf '%{name}.%{arch} %{version}-%{release}\n'";
+ package_patch_list_command => "$(redhat_knowledge.call_yum) $(redhat_knowledge.yum_options) check-update";
+
+ package_list_name_regex => "$(redhat_knowledge.rpm3_name_regex)";
+ package_list_version_regex => "$(redhat_knowledge.rpm3_version_regex)";
+ package_list_arch_regex => "$(redhat_knowledge.rpm3_arch_regex)";
+
+ package_installed_regex => ".*";
+ package_name_convention => "$(name)-$(version).$(arch)";
+
+ # just give the package name to rpm to delete, otherwise it gets "name.*" (from package_name_convention above)
+ package_delete_convention => "$(name)";
+
+ # set it to "0" to avoid caching of list during upgrade
+ package_list_update_command => "$(redhat_knowledge.call_yum) $(redhat_knowledge.yum_options) check-update";
+ package_list_update_ifelapsed => "$(common_knowledge.list_update_ifelapsed)";
+
+ package_patch_name_regex => "([^.]+).*";
+ package_patch_version_regex => "[^\s]\s+([^\s]+).*";
+ package_patch_arch_regex => "[^.]+\.([^\s]+).*";
+
+ package_add_command => "$(redhat_knowledge.call_yum) $(redhat_knowledge.yum_options) -y install";
+ package_update_command => "$(redhat_knowledge.call_yum) $(redhat_knowledge.yum_options) -y update";
+ package_patch_command => "$(redhat_knowledge.call_yum) $(redhat_knowledge.yum_options) -y update";
+ package_delete_command => "$(redhat_knowledge.call_rpm) -e --nodeps";
+ package_verify_command => "$(redhat_knowledge.call_rpm) -V";
+}
+
+##
+
+body package_method yum_rpm
+# @depends common_knowledge redhat_knowledge
+# @brief Yum+RPM installation method
+#
+# This package method interacts with the Yum and RPM package managers.
+#
+# Contributed by Trond Hasle Amundsen
+#
+# This is an efficient package method for RPM-based systems - uses `rpm`
+# instead of `yum` to list installed packages.
+#
+# It will use `rpm -e` to remove packages. Please note that if several packages
+# with the same name but varying versions or architectures are installed,
+# `rpm -e` will return an error and not delete any of them.
+#
+# **Example:**
+#
+# ```cf3
+# packages:
+# "mypackage" package_method => yum_rpm, package_policy => "add";
+# ```
+{
+ package_changes => "bulk";
+ package_list_command => "$(redhat_knowledge.call_rpm) -qa --qf '%{name}.%{arch} %{version}-%{release}\n'";
+ package_patch_list_command => "$(redhat_knowledge.call_yum) $(redhat_knowledge.yum_options) check-update";
+
+ package_list_name_regex => "$(redhat_knowledge.rpm3_name_regex)";
+ package_list_version_regex => "$(redhat_knowledge.rpm3_version_regex)";
+ package_list_arch_regex => "$(redhat_knowledge.rpm3_arch_regex)";
+
+ package_installed_regex => ".*";
+ package_name_convention => "$(name)-$(version).$(arch)";
+
+ # just give the package name to rpm to delete, otherwise it gets "name.*" (from package_name_convention above)
+ package_delete_convention => "$(name)";
+
+ # set it to "0" to avoid caching of list during upgrade
+ package_list_update_command => "$(redhat_knowledge.call_yum) $(redhat_knowledge.yum_options) check-update";
+ package_list_update_ifelapsed => "$(common_knowledge.list_update_ifelapsed)";
+
+ package_patch_name_regex => "([^.]+).*";
+ package_patch_version_regex => "[^\s]\s+([^\s]+).*";
+ package_patch_arch_regex => "[^.]+\.([^\s]+).*";
+
+ package_add_command => "$(redhat_knowledge.call_yum) $(redhat_knowledge.yum_options) -y install";
+ package_update_command => "$(redhat_knowledge.call_yum) $(redhat_knowledge.yum_options) -y update";
+ package_patch_command => "$(redhat_knowledge.call_yum) $(redhat_knowledge.yum_options) -y update";
+ package_delete_command => "$(redhat_knowledge.call_rpm) -e --nodeps";
+ package_verify_command => "$(redhat_knowledge.call_rpm) -V";
+}
+
+# This is a great use case for CFEngine body inheritance.
+# It doesn't work to use a class.
+
+body package_method yum_rpm_noupdate
+# @depends common_knowledge redhat_knowledge
+# @brief Yum+RPM installation method without updates
+#
+# This package method interacts with the Yum and RPM package managers.
+#
+# Contributed by Trond Hasle Amundsen
+#
+# This is an efficient package method for RPM-based systems - uses `rpm`
+# instead of `yum` to list installed packages.
+#
+# It will never run "yum update" but is otherwise exactly like the
+# `yum_rpm()` package method and *may* use the network to install
+# packages, as Yum may decide.
+#
+# It will use `rpm -e` to remove packages. Please note that if several
+# packages with the same name but varying versions or architectures
+# are installed, `rpm -e` will return an error and not delete any of
+# them.
+#
+# **Example:**
+#
+# ```cf3
+# packages:
+# "mypackage" package_method => yum_rpm, package_policy => "add";
+# ```
+{
+ package_changes => "bulk";
+ package_list_command => "$(redhat_knowledge.call_rpm) -qa --qf '%{name}.%{arch} %{version}-%{release}\n'";
+ package_patch_list_command => "$(redhat_knowledge.call_yum) $(redhat_knowledge.yum_options) check-update";
+
+ package_list_name_regex => "$(redhat_knowledge.rpm3_name_regex)";
+ package_list_version_regex => "$(redhat_knowledge.rpm3_version_regex)";
+ package_list_arch_regex => "$(redhat_knowledge.rpm3_arch_regex)";
+
+ package_installed_regex => ".*";
+ package_name_convention => "$(name)-$(version).$(arch)";
+
+ # just give the package name to rpm to delete, otherwise it gets "name.*" (from package_name_convention above)
+ package_delete_convention => "$(name)";
+
+ package_patch_name_regex => "([^.]+).*";
+ package_patch_version_regex => "[^\s]\s+([^\s]+).*";
+ package_patch_arch_regex => "[^.]+\.([^\s]+).*";
+
+ package_add_command => "$(redhat_knowledge.call_yum) $(redhat_knowledge.yum_options) -y install";
+ package_update_command => "$(redhat_knowledge.call_yum) $(redhat_knowledge.yum_options) -y update";
+ package_patch_command => "$(redhat_knowledge.call_yum) $(redhat_knowledge.yum_options) -y update";
+ package_delete_command => "$(redhat_knowledge.call_rpm) -e --nodeps";
+ package_verify_command => "$(redhat_knowledge.call_rpm) -V";
+}
+
+##
+
+body package_method yum_rpm_enable_repo(repoid)
+# @depends common_knowledge redhat_knowledge
+# @brief Yum+RPM repo-specific installation method
+# @param repoid the repository name as in `yum --enablerepo=???`
+#
+# This package method interacts with the RPM package manager for a specific repo.
+#
+# Based on `yum_rpm()` with addition to enable a repository for the install.
+#
+# Sometimes repositories are configured but disabled by default. For example
+# this pacakge_method could be used when installing a package that exists in
+# the EPEL, which normally you do not want to install packages from.
+#
+# **Example:**
+#
+# ```cf3
+# packages:
+# "mypackage" package_method => yum_rpm_enable_repo("myrepo"), package_policy => "add";
+# ```
+{
+ package_changes => "bulk";
+ package_list_command => "$(redhat_knowledge.call_rpm) -qa --qf '%{name} %{version}-%{release} %{arch}\n'";
+ package_patch_list_command => "$(redhat_knowledge.call_yum) $(redhat_knowledge.yum_options) check-update";
+
+ package_list_name_regex => "$(redhat_knowledge.rpm2_name_regex)";
+ package_list_version_regex => "$(redhat_knowledge.rpm2_version_regex)";
+ package_list_arch_regex => "$(redhat_knowledge.rpm2_arch_regex)";
+
+ package_installed_regex => ".*";
+ package_name_convention => "$(name)";
+
+ # set it to "0" to avoid caching of list during upgrade
+ package_list_update_command => "$(redhat_knowledge.call_yum) $(redhat_knowledge.yum_options) check-update";
+ package_list_update_ifelapsed => "$(common_knowledge.list_update_ifelapsed)";
+
+ package_patch_name_regex => "([^.]+).*";
+ package_patch_version_regex => "[^\s]\s+([^\s]+).*";
+ package_patch_arch_regex => "[^.]+\.([^\s]+).*";
+
+ package_add_command => "$(redhat_knowledge.call_yum) $(redhat_knowledge.yum_options) --enablerepo=$(repoid) -y install";
+ package_update_command => "$(redhat_knowledge.call_yum) $(redhat_knowledge.yum_options) --enablerepo=$(repoid) -y update";
+ package_patch_command => "$(redhat_knowledge.call_yum) $(redhat_knowledge.yum_options) -y update";
+ package_delete_command => "$(redhat_knowledge.call_rpm) -e --nodeps --allmatches";
+ package_verify_command => "$(redhat_knowledge.call_rpm) -V";
+}
+
+##
+
+body package_method yum_group
+# @depends common_knowledge redhat_knowledge
+# @brief RPM direct installation method
+#
+# Makes use of the "groups of packages" feature of Yum possible. (`yum
+# groupinstall`, `yum groupremove`)
+#
+# Groups must be specified by their groupids, available through `yum
+# grouplist -v` (between parentheses). For example, below
+# `network-tools` is the groupid.
+#
+# ```console
+# $ yum grouplist -v|grep Networking|head -n 1
+# Networking Tools (network-tools)
+# ```
+#
+# **Example:**
+#
+# ```cf3
+# Policies examples:
+#
+# -Install "web-server" group:
+# ----------------------------
+#
+# packages:
+# "web-server"
+# package_policy => "add",
+# package_method => yum_group;
+#
+# -Remove "debugging" and "php" groups:
+# -------------------------------------
+#
+# vars:
+# "groups" slist => { "debugging", "php" };
+#
+# packages:
+# "$(groups)"
+# package_policy => "delete",
+# package_method => yum_group;
+# ```
+{
+ package_add_command => "$(redhat_knowledge.call_yum) $(redhat_knowledge.yum_options) groupinstall -y";
+ package_changes => "bulk";
+ package_delete_command => "$(redhat_knowledge.call_yum) $(redhat_knowledge.yum_options) groupremove -y";
+ package_delete_convention => "$(name)";
+ package_installed_regex => "^i.*";
+
+ # Generate a dpkg -l like listing, "i" means installed, "a" available, and a dummy version 1
+ package_list_command =>
+ "$(redhat_knowledge.call_yum) grouplist -v|awk '$0 ~ /^Done$/ {next} {sub(/.*\(/, \"\");sub(/\).*/, \"\")} /Available/ {h=\"a\";next} /Installed/ {h=\"i\";next} h==\"i\" || h==\"a\" {print h\" \"$0\" 1\"}'";
+
+ package_list_name_regex => "a|i ([^\s]+) 1";
+ package_list_update_command => "$(redhat_knowledge.call_yum) $(redhat_knowledge.yum_options) check-update";
+ package_list_update_ifelapsed => "$(common_knowledge.list_update_ifelapsed)";
+ package_list_version_regex => "(1)";
+ package_name_convention => "$(name)";
+ package_name_regex => "(.*)";
+ package_noverify_returncode => "0";
+ package_update_command => "$(redhat_knowledge.call_yum) $(redhat_knowledge.yum_options) groupupdate";
+
+ # grep -x to only get full line matching
+ package_verify_command => "$(redhat_knowledge.call_yum) grouplist -v|awk '$0 ~ /^Done$/ {next} {sub(/.*\(/, \"\");sub(/\).*/, \"\")} /Available/ {h=\"a\";next} /Installed/ {h=\"i\";next} h==\"i\"|grep -qx";
+}
+
+##
+
+body package_method rpm_filebased(path)
+# @depends common_knowledge redhat_knowledge
+# @brief install packages from local filesystem-based RPM repository.
+# @param path the path to the local package repository
+#
+# Contributed by Aleksey Tsalolikhin. Written on 29-Feb-2012.
+# Based on `yum_rpm()` body by Trond Hasle Amundsen.
+#
+# **Example:**
+#
+# ```cf3
+# packages:
+# "epel-release"
+# package_policy => "add",
+# package_version => "5-4",
+# package_architectures => { "noarch" },
+# package_method => rpm_filebased("/repo/RPMs");
+# ```
+{
+ package_file_repositories => { "$(path)" };
+ # the above is an addition to Trond's yum_rpm body
+
+ package_add_command => "$(redhat_knowledge.call_rpm) -ihv ";
+ # The above is a change from Trond's yum_rpm body, this makes the commands rpm only.
+ # The reason I changed the install command from yum to rpm is yum will be default
+ # refuse to install the epel-release RPM as it does not have the EPEL GPG key,
+ # but rpm goes ahead and installs the epel-release RPM and the EPEL GPG key.
+
+ package_name_convention => "$(name)-$(version).$(arch).rpm";
+ # The above is a change from Tron's yum_rpm body. When package_file_repositories is in play,
+ # package_name_convention has to match the file name, not the package name, per the
+ # CFEngine 3 Reference Manual
+
+ # set it to "0" to avoid caching of list during upgrade
+ package_list_update_command => "$(redhat_knowledge.call_yum) $(redhat_knowledge.yum_options) check-update";
+ package_list_update_ifelapsed => "$(common_knowledge.list_update_ifelapsed)";
+
+ # The rest is unchanged from Trond's yum_rpm body
+ package_changes => "bulk";
+ package_list_command => "$(redhat_knowledge.call_rpm) -qa --qf '%{name} %{version}-%{release} %{arch}\n'";
+
+ package_list_name_regex => "$(redhat_knowledge.rpm2_name_regex)";
+ package_list_version_regex => "$(redhat_knowledge.rpm2_version_regex)";
+ package_list_arch_regex => "$(redhat_knowledge.rpm2_arch_regex)";
+
+ package_installed_regex => ".*";
+
+ package_delete_command => "$(redhat_knowledge.call_rpm) -e --allmatches";
+ package_verify_command => "$(redhat_knowledge.call_rpm) -V";
+}
+
+##
+
+body package_method ips
+# @depends paths
+# @depends common_knowledge
+# @brief Image Package System method, used by OpenSolaris based systems (Solaris 11, Illumos, etc)
+#
+# A note about Solaris 11.1 versioning format:
+#
+# ```
+# $ pkg list -v --no-refresh zsh
+# FMRI IFO
+# pkg://solaris/shell/zsh@4.3.17,5.11-0.175.1.0.0.24.0:20120904T174236Z i--
+# name--------- |<----->| |/________________________\|
+# version---------------- |\ /|
+# ```
+#
+# Notice that the publisher and timestamp aren't used. And that the package
+# version then must have the commas replaced by underscores.
+#
+# Thus,
+# 4.3.17,5.11-0.175.1.0.0.24.0
+# Becomes:
+# 4.3.17_5.11-0.175.1.0.0.24.0
+#
+# Therefore, a properly formatted package promise looks like this:
+#
+# ```cf3
+# "shell/zsh"
+# package_policy => "addupdate",
+# package_method => ips,
+# package_select => ">=",
+# package_version => "4.3.17_5.11-0.175.1.0.0.24.0";
+# ```
+{
+ package_changes => "bulk";
+ package_list_command => "$(paths.path[pkg]) list -v --no-refresh";
+ package_list_name_regex => "pkg://.+?(?<=/)([^\s]+)@.*$";
+ package_list_version_regex => "[^\s]+@([^\s]+):.*";
+ package_installed_regex => ".*(i..)"; # all reported are installed
+
+ # set it to "0" to avoid caching of list during upgrade
+ package_list_update_command => "$(paths.path[pkg]) refresh --full";
+ package_list_update_ifelapsed => "$(common_knowledge.list_update_ifelapsed)";
+
+ package_add_command => "$(paths.path[pkg]) install --accept ";
+ package_delete_command => "$(paths.path[pkg]) uninstall";
+ package_update_command => "$(paths.path[pkg]) install --accept";
+ package_patch_command => "$(paths.path[pkg]) install --accept";
+ package_verify_command => "$(paths.path[pkg]) list -a -v --no-refresh";
+ package_noverify_regex => "(.*---|pkg list: no packages matching .* installed)";
+}
+
+##
+
+body package_method smartos
+# @depends common_knowledge
+# @brief pkgin method for SmartOS (solaris 10 fork by Joyent)
+{
+ package_changes => "bulk";
+ package_list_command => "/opt/local/bin/pkgin list";
+ package_list_name_regex => "(.*)\-[0-9]+.*";
+ package_list_version_regex => ".*\-([0-9][^\s]+).*";
+
+ package_installed_regex => ".*"; # all reported are installed
+
+ package_list_update_command => "/opt/local/bin/pkgin -y update";
+ package_list_update_ifelapsed => "$(common_knowledge.list_update_ifelapsed)";
+
+ package_add_command => "/opt/local/bin/pkgin -y install";
+
+ package_delete_command => "/opt/local/bin/pkgin -y remove";
+ package_update_command => "/opt/local/bin/pkgin upgrade";
+}
+
+body package_method opencsw
+# @depends common_knowledge
+# @brief OpenCSW (Solaris software packages) method
+{
+ package_changes => "bulk";
+ package_list_command => "/opt/csw/bin/pkgutil -c";
+ package_list_name_regex => "CSW(.*?)\s.*";
+ package_list_version_regex => ".*?\s+(.*),.*";
+
+ package_installed_regex => ".*"; # all reported are installed
+
+ package_list_update_command => "/opt/csw/bin/pkgutil -U";
+ package_list_update_ifelapsed => "$(common_knowledge.list_update_ifelapsed)";
+
+ package_add_command => "/opt/csw/bin/pkgutil -yi";
+
+ package_delete_command => "/opt/csw/bin/pkgutil -yr";
+ package_update_command => "/opt/csw/bin/pkgutil -yu";
+}
+
+body package_method solaris(pkgname, spoolfile, adminfile)
+# @depends paths
+# @brief Package method for old Solaris package system
+#
+# @param pkgname Not used
+# @param spoolfile The spool file, located in `/tmp`
+# @param adminfile The admin file, located in `/tmp`
+#
+# The older solaris package system is poorly designed, with too many different
+# names to track. See the example in tests/units/unit_package_solaris.cf
+# to see how to use this.
+{
+ package_changes => "individual";
+ package_list_command => "$(paths.path[pkginfo]) -l";
+ package_multiline_start => "\s*PKGINST:\s+[^\s]+.*";
+ package_list_name_regex => "\s*PKGINST:\s+([^\s]+).*";
+ package_list_version_regex => "\s*VERSION:\s+([^\s]+).*";
+ package_list_arch_regex => "\s*ARCH:\s+([^\s]+)";
+ package_installed_regex => "\s*STATUS:\s*(completely|partially)\s+installed.*";
+ package_name_convention => "$(name)";
+ package_add_command => "$(paths.path[pkgadd]) -n -a /tmp/$(adminfile) -d /tmp/$(spoolfile)";
+ package_delete_command => "$(paths.path[pkgrm]) -n -a /tmp/$(adminfile)";
+}
+
+##
+
+bundle edit_line create_solaris_admin_file
+# @brief The following bundle is part of a package setup for solaris
+#
+# See unit examples.
+{
+ insert_lines:
+
+ "mail=
+instance=unique
+partial=nocheck
+runlevel=nocheck
+idepend=nocheck
+rdepend=nocheck
+space=nocheck
+setuid=nocheck
+conflict=nocheck
+action=nocheck
+networktimeout=60
+networkretries=3
+authentication=quit
+keystore=/var/sadm/security
+proxy=
+basedir=default"
+ comment => "Insert contents of Solaris admin file (automatically install packages)";
+}
+
+##
+
+body package_method freebsd
+# @depends common_knowledge
+# @brief FreeBSD pkg_add installation package method
+#
+# This package method interacts with FreeBSD pkg_add to install from remote
+# repositories.
+#
+# **Example:**
+# NOTE: Do not use this method on pkgng systems! It will appear to operate
+# normally but is highly likely to break your package system.
+#
+# This example installs "perl5" from a non-default repository:
+#
+# ```cf3
+# ----------------------------
+#
+# vars:
+# environment => { "PACKAGESITE=http://repo.example.com/private/8_STABLE/" };
+# packages:
+# "perl5"
+# package_policy => "add",
+# package_method => freebsd;
+#
+# ```
+{
+ package_changes => "individual";
+
+ # Could use rpm for this
+ package_list_command => "/usr/sbin/pkg_info";
+
+ # Remember to escape special characters like |
+
+ package_list_name_regex => "([^\s]+)-.*";
+ package_list_version_regex => "[^\s]+-([^\s]+).*";
+
+ package_name_regex => "([^\s]+)-.*";
+ package_version_regex => "[^\s]+-([^\s]+).*";
+
+ package_installed_regex => ".*";
+
+ package_name_convention => "$(name)-$(version)";
+
+ package_add_command => "/usr/sbin/pkg_add -r";
+ package_delete_command => "/usr/sbin/pkg_delete";
+}
+
+body package_method freebsd_portmaster
+# @depends common_knowledge
+# @brief FreeBSD portmaster package installation method
+#
+# This package method interacts with portmaster to build and install packages.
+#
+# Note that you must use the complete package name as it appears in
+# /usr/ports/*/name, such as 'perl5.14' rather than 'perl5'.
+# Repositories are hard-coded to /usr/ports; alternate locations are
+# unsupported at this time.
+# This method supports both pkg_* and pkgng systems.
+#
+# **Example:**
+#
+# ```cf3
+#
+# packages:
+# "perl5.14"
+# package_policy => "add",
+# package_method => freebsd_portmaster;
+#
+# ```
+{
+ package_changes => "individual";
+
+ package_list_command => "/usr/sbin/pkg_info";
+
+ package_list_name_regex => "([^\s]+)-.*";
+ package_list_version_regex => "[^\s]+-([^\s]+).*";
+
+ package_installed_regex => ".*";
+
+ package_name_convention => "$(name)";
+ package_delete_convention => "$(name)-$(version)";
+
+ package_file_repositories => {
+ "/usr/ports/accessibility/",
+ "/usr/port/arabic/",
+ "/usr/ports/archivers/",
+ "/usr/ports/astro/",
+ "/usr/ports/audio/",
+ "/usr/ports/benchmarks/",
+ "/usr/ports/biology/",
+ "/usr/ports/cad/",
+ "/usr/ports/chinese/",
+ "/usr/ports/comms/",
+ "/usr/ports/converters/",
+ "/usr/ports/databases/",
+ "/usr/ports/deskutils/",
+ "/usr/ports/devel/",
+ "/usr/ports/dns/",
+ "/usr/ports/editors/",
+ "/usr/ports/emulators/",
+ "/usr/ports/finance/",
+ "/usr/ports/french/",
+ "/usr/ports/ftp/",
+ "/usr/ports/games/",
+ "/usr/ports/german/",
+ "/usr/ports/graphics/",
+ "/usr/ports/hebrew/",
+ "/usr/ports/hungarian/",
+ "/usr/ports/irc/",
+ "/usr/ports/japanese/",
+ "/usr/ports/java/",
+ "/usr/ports/korean/",
+ "/usr/ports/lang/",
+ "/usr/ports/mail/",
+ "/usr/ports/math/",
+ "/usr/ports/mbone/",
+ "/usr/ports/misc/",
+ "/usr/ports/multimedia/",
+ "/usr/ports/net/",
+ "/usr/ports/net-im/",
+ "/usr/ports/net-mgmt/",
+ "/usr/ports/net-p2p/",
+ "/usr/ports/news/",
+ "/usr/ports/packages/",
+ "/usr/ports/palm/",
+ "/usr/ports/polish/",
+ "/usr/ports/ports-mgmt/",
+ "/usr/ports/portuguese/",
+ "/usr/ports/print/",
+ "/usr/ports/russian/",
+ "/usr/ports/science/",
+ "/usr/ports/security/",
+ "/usr/ports/shells/",
+ "/usr/ports/sysutils/",
+ "/usr/ports/textproc/",
+ "/usr/ports/ukrainian/",
+ "/usr/ports/vietnamese/",
+ "/usr/ports/www/",
+ "/usr/ports/x11/",
+ "/usr/ports/x11-clocks/",
+ "/usr/ports/x11-drivers/",
+ "/usr/ports/x11-fm/",
+ "/usr/ports/x11-fonts/",
+ "/usr/ports/x11-servers/",
+ "/usr/ports/x11-themes/",
+ "/usr/ports/x11-toolkits/",
+ "/usr/ports/x11-wm/",
+ };
+
+ package_add_command => "/usr/local/sbin/portmaster -D -G --no-confirm";
+ package_update_command => "/usr/local/sbin/portmaster -D -G --no-confirm";
+ package_delete_command => "/usr/local/sbin/portmaster --no-confirm -e";
+}
+
+##
+
+body package_method alpinelinux
+# @brief Alpine Linux apk package installation method
+#
+# This package method interacts with apk to manage packages.
+#
+# **Example:**
+#
+# ```cf3
+#
+# packages:
+# "vim"
+# package_policy => "add",
+# package_method => alpinelinux;
+#
+# ```
+{
+ package_changes => "bulk";
+ package_list_command => "/sbin/apk info -v";
+ package_list_name_regex => "([^\s]+)-.*";
+ package_list_version_regex => "[^\s]+-([^\s]+).*";
+ package_name_regex => ".*";
+ package_installed_regex => ".*";
+ package_name_convention => "$(name)";
+ package_add_command => "/sbin/apk add";
+ package_delete_command => "/sbin/apk del";
+}
+
+##
+
+body package_method emerge
+# @depends common_knowledge
+# @brief Gentoo emerge package installation method
+#
+# This package method interacts with emerge to build and install packages.
+#
+# **Example:**
+#
+# ```cf3
+#
+# packages:
+# "zsh"
+# package_policy => "add",
+# package_method => emerge;
+#
+# ```
+{
+ package_changes => "individual";
+ package_list_command => "/bin/sh -c '/bin/ls -d /var/db/pkg/*/* | cut -c 13-'";
+ package_list_name_regex => ".*/([^\s]+)-\d.*";
+ package_list_version_regex => ".*/[^\s]+-(\d.*)";
+ package_installed_regex => ".*"; # all reported are installed
+ package_name_convention => "$(name)";
+ package_list_update_command => "/bin/true"; # I prefer manual syncing
+ #package_list_update_command => "/usr/bin/emerge --sync"; # if you like automatic
+ package_list_update_ifelapsed => "$(common_knowledge.list_update_ifelapsed)";
+
+ package_add_command => "/usr/bin/emerge -q --quiet-build";
+ package_delete_command => "/usr/bin/emerge --depclean";
+ package_update_command => "/usr/bin/emerge --update";
+ package_patch_command => "/usr/bin/emerge --update";
+ package_verify_command => "/usr/bin/emerge -s";
+ package_noverify_regex => ".*(Not Installed|Applications found : 0).*";
+}
+
+##
+
+body package_method pacman
+# @depends common_knowledge
+# @brief Arch Linux pacman package management method
+{
+ package_changes => "bulk";
+
+ package_list_command => "/usr/bin/pacman -Q";
+ package_verify_command => "/usr/bin/pacman -Q";
+ package_noverify_regex => "error:\b.*\bwas not found";
+
+ # set it to "0" to avoid caching of list during upgrade
+ package_list_update_ifelapsed => "$(common_knowledge.list_update_ifelapsed)";
+
+ package_list_name_regex => "(.*)\s+.*";
+ package_list_version_regex => ".*\s+(.*)";
+ package_installed_regex => ".*";
+
+ package_name_convention => "$(name)";
+ package_add_command => "/usr/bin/pacman -S --noconfirm --noprogressbar --needed";
+ package_delete_command => "/usr/bin/pacman -Rs --noconfirm";
+ package_update_command => "/usr/bin/pacman -S --noconfirm --noprogressbar --needed";
+}
+
+body package_method zypper
+# @depends paths
+# @depends common_knowledge redhat_knowledge
+# @brief SuSE installation method
+#
+# This package method interacts with the SuSE Zypper package manager
+#
+# **Example:**
+#
+# ```cf3
+# packages:
+# "mypackage" package_method => zypper, package_policy => "add";
+# ```
+{
+ package_changes => "bulk";
+
+ package_list_command => "$(paths.path[rpm]) -qa --queryformat \"i | repos | %{name} | %{version}-%{release} | %{arch}\n\"";
+
+ # set it to "0" to avoid caching of list during upgrade
+ package_list_update_command => "$(paths.path[zypper]) list-updates";
+ package_list_update_ifelapsed => "$(common_knowledge.list_update_ifelapsed)";
+
+ package_patch_list_command => "$(paths.path[zypper]) patches";
+ package_installed_regex => "i.*";
+ package_list_name_regex => "$(redhat_knowledge.rpm_name_regex)";
+ package_list_version_regex => "$(redhat_knowledge.rpm_version_regex)";
+ package_list_arch_regex => "$(redhat_knowledge.rpm_arch_regex)";
+
+ package_patch_installed_regex => ".*Installed.*|.*Not Applicable.*";
+ package_patch_name_regex => "[^|]+\|\s+([^\s]+).*";
+ package_patch_version_regex => "[^|]+\|[^|]+\|\s+([^\s]+).*";
+
+ package_name_convention => "$(name)";
+ package_add_command => "$(paths.path[zypper]) --non-interactive install";
+ package_delete_command => "$(paths.path[zypper]) --non-interactive remove --force-resolution";
+ package_update_command => "$(paths.path[zypper]) --non-interactive update";
+ package_patch_command => "$(paths.path[zypper]) --non-interactive patch$"; # $ means no args
+ package_verify_command => "$(paths.path[zypper]) --non-interactive verify$";
+}
+
+body package_method generic
+# @depends paths
+# @depends common_knowledge debian_knowledge redhat_knowledge
+# @brief Generic installation package method
+#
+# This package method attempts to handle all platforms.
+#
+# The Redhat section is a verbatim insertion of `yum_rpm()`, which was
+# contributed by Trond Hasle Amundsen.
+#
+# **Example:**
+#
+# ```cf3
+# packages:
+# "mypackage" package_method => generic, package_policy => "add";
+# ```
+{
+ SuSE::
+ package_changes => "bulk";
+ package_list_command => "$(redhat_knowledge.call_rpm) -qa --queryformat \"i | repos | %{name} | %{version}-%{release} | %{arch}\n\"";
+ # set it to "0" to avoid caching of list during upgrade
+ package_list_update_command => "$(paths.path[zypper]) list-updates";
+ package_list_update_ifelapsed => "0";
+ package_patch_list_command => "$(paths.path[zypper]) patches";
+ package_installed_regex => "i.*";
+ package_list_name_regex => "$(redhat_knowledge.rpm_name_regex)";
+ package_list_version_regex => "$(redhat_knowledge.rpm_version_regex)";
+ package_list_arch_regex => "$(redhat_knowledge.rpm_arch_regex)";
+ package_patch_installed_regex => ".*Installed.*|.*Not Applicable.*";
+ package_patch_name_regex => "[^|]+\|\s+([^\s]+).*";
+ package_patch_version_regex => "[^|]+\|[^|]+\|\s+([^\s]+).*";
+ package_name_convention => "$(name)";
+ package_add_command => "$(paths.path[zypper]) --non-interactive install";
+ package_delete_command => "$(paths.path[zypper]) --non-interactive remove --force-resolution";
+ package_update_command => "$(paths.path[zypper]) --non-interactive update";
+ package_patch_command => "$(paths.path[zypper]) --non-interactive patch$"; # $ means no args
+ package_verify_command => "$(paths.path[zypper]) --non-interactive verify$";
+
+ redhat::
+ package_changes => "bulk";
+ package_list_command => "$(redhat_knowledge.call_rpm) -qa --qf '%{name}.%{arch} %{version}-%{release}\n'";
+ package_patch_list_command => "$(redhat_knowledge.call_yum) $(redhat_knowledge.yum_options) check-update";
+
+ package_list_name_regex => "$(redhat_knowledge.rpm3_name_regex)";
+ package_list_version_regex => "$(redhat_knowledge.rpm3_version_regex)";
+ package_list_arch_regex => "$(redhat_knowledge.rpm3_arch_regex)";
+
+ package_installed_regex => ".*";
+ package_name_convention => "$(name)-$(version).$(arch)";
+
+ # just give the package name to rpm to delete, otherwise it gets "name.*" (from package_name_convention above)
+ package_delete_convention => "$(name)";
+
+ # set it to "0" to avoid caching of list during upgrade
+ package_list_update_command => "$(redhat_knowledge.call_yum) $(redhat_knowledge.yum_options) check-update";
+ package_list_update_ifelapsed => "$(common_knowledge.list_update_ifelapsed)";
+
+ package_patch_name_regex => "([^.]+).*";
+ package_patch_version_regex => "[^\s]\s+([^\s]+).*";
+ package_patch_arch_regex => "[^.]+\.([^\s]+).*";
+
+ package_add_command => "$(redhat_knowledge.call_yum) $(redhat_knowledge.yum_options) -y install";
+ package_update_command => "$(redhat_knowledge.call_yum) $(redhat_knowledge.yum_options) -y update";
+ package_patch_command => "$(redhat_knowledge.call_yum) $(redhat_knowledge.yum_options) -y update";
+ package_delete_command => "$(redhat_knowledge.call_rpm) -e --nodeps";
+ package_verify_command => "$(redhat_knowledge.call_rpm) -V";
+
+ debian::
+ package_changes => "bulk";
+ package_list_command => "$(debian_knowledge.call_dpkg) -l";
+ package_list_name_regex => "$(debian_knowledge.list_name_regex)";
+ package_list_version_regex => "$(debian_knowledge.list_version_regex)";
+ package_installed_regex => ".i.*"; # packages that have been uninstalled may be listed
+ package_name_convention => "$(name)";
+ package_list_update_ifelapsed => "$(common_knowledge.list_update_ifelapsed)";
+
+ # make correct version comparisons
+ package_version_less_command => "$(debian_knowledge.dpkg_compare_less)";
+ package_version_equal_command => "$(debian_knowledge.dpkg_compare_equal)";
+
+ debian.have_aptitude::
+ package_add_command => "$(debian_knowledge.call_aptitude) $(debian_knowledge.dpkg_options) --assume-yes install";
+ package_list_update_command => "$(debian_knowledge.call_aptitude) update";
+ package_delete_command => "$(debian_knowledge.call_aptitude) $(debian_knowledge.dpkg_options) --assume-yes remove";
+ package_update_command => "$(debian_knowledge.call_aptitude) $(debian_knowledge.dpkg_options) --assume-yes install";
+ package_patch_command => "$(debian_knowledge.call_aptitude) $(debian_knowledge.dpkg_options) --assume-yes install";
+ package_verify_command => "$(debian_knowledge.call_aptitude) show";
+ package_noverify_regex => "(State: not installed|E: Unable to locate package .*)";
+
+ package_patch_list_command => "$(debian_knowledge.call_aptitude) --assume-yes --simulate --verbose full-upgrade";
+ package_patch_name_regex => "$(debian_knowledge.patch_name_regex)";
+ package_patch_version_regex => "$(debian_knowledge.patch_version_regex)";
+
+ debian.!have_aptitude::
+ package_add_command => "$(debian_knowledge.call_apt_get) $(debian_knowledge.dpkg_options) --yes install";
+ package_list_update_command => "$(debian_knowledge.call_apt_get) update";
+ package_delete_command => "$(debian_knowledge.call_apt_get) $(debian_knowledge.dpkg_options) --yes remove";
+ package_update_command => "$(debian_knowledge.call_apt_get) $(debian_knowledge.dpkg_options) --yes install";
+ package_patch_command => "$(debian_knowledge.call_apt_get) $(debian_knowledge.dpkg_options) --yes install";
+ package_verify_command => "$(debian_knowledge.call_dpkg) -s";
+ package_noverify_returncode => "1";
+
+ package_patch_list_command => "$(debian_knowledge.call_apt_get) --just-print dist-upgrade";
+ package_patch_name_regex => "$(debian_knowledge.patch_name_regex)";
+ package_patch_version_regex => "$(debian_knowledge.patch_version_regex)";
+
+ freebsd::
+ package_changes => "individual";
+ package_list_command => "/usr/sbin/pkg_info";
+ package_list_name_regex => "([^\s]+)-.*";
+ package_list_version_regex => "[^\s]+-([^\s]+).*";
+ package_name_regex => "([^\s]+)-.*";
+ package_version_regex => "[^\s]+-([^\s]+).*";
+ package_installed_regex => ".*";
+ package_name_convention => "$(name)-$(version)";
+ package_add_command => "/usr/sbin/pkg_add -r";
+ package_delete_command => "/usr/sbin/pkg_delete";
+
+ alpinelinux::
+ package_changes => "bulk";
+ package_list_command => "/sbin/apk info -v";
+ package_list_name_regex => "([^\s]+)-.*";
+ package_list_version_regex => "[^\s]+-([^\s]+).*";
+ package_name_regex => ".*";
+ package_installed_regex => ".*";
+ package_name_convention => "$(name)";
+ package_add_command => "/sbin/apk add";
+ package_delete_command => "/sbin/apk del";
+
+ gentoo::
+ package_changes => "individual";
+ package_list_command => "/bin/sh -c '/bin/ls -d /var/db/pkg/*/* | cut -c 13-'";
+ package_list_name_regex => ".*/([^\s]+)-\d.*";
+ package_list_version_regex => ".*/[^\s]+-(\d.*)";
+ package_installed_regex => ".*"; # all reported are installed
+ package_name_convention => "$(name)";
+ package_list_update_command => "/bin/true"; # I prefer manual syncing
+ #package_list_update_command => "/usr/bin/emerge --sync"; # if you like automatic
+ package_list_update_ifelapsed => "$(common_knowledge.list_update_ifelapsed)";
+
+ package_add_command => "/usr/bin/emerge -q --quiet-build";
+ package_delete_command => "/usr/bin/emerge --depclean";
+ package_update_command => "/usr/bin/emerge --update";
+ package_patch_command => "/usr/bin/emerge --update";
+ package_verify_command => "/usr/bin/emerge -s";
+ package_noverify_regex => ".*(Not Installed|Applications found : 0).*";
+
+ archlinux::
+ package_changes => "bulk";
+ package_list_command => "/usr/bin/pacman -Q";
+ package_verify_command => "/usr/bin/pacman -Q";
+ package_noverify_regex => "error:\b.*\bwas not found";
+ package_list_name_regex => "(.*)\s+.*";
+ package_list_version_regex => ".*\s+(.*)";
+ package_installed_regex => ".*";
+ package_name_convention => "$(name)";
+ package_list_update_ifelapsed => "$(common_knowledge.list_update_ifelapsed)";
+ package_add_command => "/usr/bin/pacman -S --noconfirm --noprogressbar --needed";
+ package_delete_command => "/usr/bin/pacman -Rs --noconfirm";
+ package_update_command => "/usr/bin/pacman -S --noconfirm --noprogressbar --needed";
+}
+
+## Useful bundles ##
+
+bundle agent cfe_package_ensure_absent(package)
+# @depends cfe_package_ensure
+# @brief Ensure package is absent
+# @param package the packages to remove
+#
+# This package method will remove `package`, using
+# `cfe_package_ensure`.
+#
+# **Example:**
+#
+# ```cf3
+# methods:
+# "nozip" usebundle => cfe_package_ensure_absent("zip");
+# ```
+{
+ methods:
+ "ensure" usebundle => cfe_package_ensure($(package), "delete");
+}
+
+bundle agent cfe_package_ensure_present(package)
+# @depends cfe_package_ensure
+# @brief Ensure package is present
+# @param package the packages to install
+#
+# This package method will install `package`, using
+# `cfe_package_ensure`.
+#
+# **Example:**
+#
+# ```cf3
+# methods:
+# "pleasezip" usebundle => cfe_package_ensure_present("zip");
+# ```
+{
+ methods:
+ "ensure" usebundle => cfe_package_ensure($(package), "add");
+}
+
+bundle agent cfe_package_ensure_upgrade(package)
+# @depends cfe_package_ensure
+# @brief Ensure package is present and updated
+# @param package the package to add/update
+#
+# This package method will add or update `package`, using
+# `cfe_package_ensure`.
+#
+# **Example:**
+#
+# ```cf3
+# methods:
+# "upgradezip" usebundle => cfe_package_ensure_upgrade("zip");
+# ```
+{
+ methods:
+ "ensure" usebundle => cfe_package_ensure($(package), "addupdate");
+}
+
+bundle agent cfe_package_ensure(package_name, desired)
+# @depends apt_get yum_rpm generic
+# @brief Ensure `package_name` has the `desired` state
+# @param package_name the packages to ensure
+# @param desired the desired `package_policy`, add or delete or ...
+#
+# This package method will add or delete `packages` with
+# `package_policy` set to `desired`.
+#
+# On Debian, it will use `apt_get`. On Red Hat, `yum_rpm`.
+# Otherwise, `generic`.
+#
+# **Example:**
+#
+# ```cf3
+# methods:
+# "nozip" usebundle => cfe_package_ensure("zip", "delete");
+# "pleasezip" usebundle => cfe_package_ensure("zip", "add");
+# ```
+{
+
+ packages:
+
+ debian::
+
+ "$(package_name)"
+ package_policy => $(desired),
+ package_method => apt_get;
+
+
+ redhat::
+
+ "$(package_name)"
+ package_policy => $(desired),
+ package_method => yum_rpm;
+
+ !debian.!redhat::
+
+ "$(package_name)"
+ package_policy => $(desired),
+ package_method => generic;
+}
+
+bundle agent cfe_package_named_ensure_present(packageorfile, select, package_version, package_arch)
+# @depends cfe_package_ensure_named
+# @brief Ensure package is present
+# @param packageorfile the package or full filename to add
+# @param select the `package_select` method
+# @param package_version the `package_version` desired
+# @param package_arch a string determining the `package_architectures` desired
+#
+# This package method will add `packageorfile` as a package or file,
+# using `cfe_package_ensure_full`.
+#
+# **Example:**
+#
+# ```cf3
+# methods:
+# "addfilezip"
+# usebundle => cfe_package_named_file_ensure_present("/mydir/zip",
+# "==",
+# "3.0-7",
+# ifelse("debian", "amd64",
+# "x86_64"));
+# ```
+{
+ methods:
+ "ensure" usebundle => cfe_package_ensure_named($(package), "add", $(select), $(package_version), $(package_arch));
+}
+
+bundle agent cfe_package_named_ensure_upgrade(packageorfile, select, package_version, package_arch)
+# @depends cfe_package_ensure_named
+# @brief Ensure package is added or updated
+# @param packageorfile the package or full filename to add or update
+# @param select the `package_select` method
+# @param package_version the `package_version` desired
+# @param package_arch a string determining the `package_architectures` desired
+#
+# This package method will add or update `packageorfile` as a package
+# or file, using `cfe_package_ensure_full`.
+#
+# **Example:**
+#
+# ```cf3
+# methods:
+# "upgradefilezip"
+# usebundle => cfe_package_named_file_ensure_upgrade("/mydir/zip",
+# "==",
+# "3.0-7",
+# ifelse("debian", "amd64",
+# "x86_64"));
+# "upgradezip"
+# usebundle => cfe_package_named_file_ensure_upgrade("/mydir/zip",
+# "==",
+# "3.0-7",
+# ifelse("debian", "amd64",
+# "x86_64"));
+# ```
+{
+ methods:
+ "ensure" usebundle => cfe_package_ensure_named($(packageorfile), "addupdate", $(select), $(package_version), $(package_arch));
+}
+
+bundle agent cfe_package_ensure_named(package_name, desired, select, package_version, package_arch)
+# @depends apt_get yum_rpm generic dpkg_version rpm_version
+# @brief Ensure `package_name` has the `desired` state
+# @param package_name the packages to ensure (can be files)
+# @param desired the desired `package_policy`, add or delete or addupdate
+# @param select the `package_select` method
+# @param package_version the desired `package_version`
+# @param package_arch the desired package architecture
+#
+# This package method will manage `packages` with `package_policy` set
+# to `desired`, using `select`, `package_version`, and `package_arch`.
+#
+# If `package_name` is **not** a file name: on Debian, it will use
+# `apt_get`. On Red Hat, `yum_rpm`. Otherwise, `generic`.
+#
+# If `package_name` **is** a file name, it will use `dpkg_version` or
+# `rpm_version` from the file's directory.
+#
+# **Example:**
+#
+# ```cf3
+# methods:
+# "ensure" usebundle => cfe_package_ensure_named("zsh", "add", "==", "1.2.3", "amd64");
+# "ensure" usebundle => cfe_package_ensure_named("/mydir/package.deb", "add", "==", "9.8.7", "amd64");
+# "ensure" usebundle => cfe_package_ensure_named("tcsh", "delete", ">=", "2.3.4", "x86_64");
+# ```
+{
+ classes:
+ "filebased" expression => fileexists($(package_name));
+
+ vars:
+ filebased::
+ "package_basename" string => lastnode($(package_name), "/");
+ "dir" string => dirname($(package_name));
+
+ packages:
+
+ debian.!filebased::
+
+ "$(package_name)"
+ package_policy => $(desired),
+ package_select => $(select),
+ package_version => $(package_version),
+ package_architectures => { $(package_arch) },
+ package_method => apt_get;
+
+
+ debian.filebased::
+
+ "$(package_basename)"
+ package_policy => $(desired),
+ package_select => $(select),
+ package_version => $(package_version),
+ package_architectures => { $(package_arch) },
+ package_method => dpkg_version($(dir));
+
+ redhat.!filebased::
+
+ "$(package_name)"
+ package_policy => $(desired),
+ package_select => $(select),
+ package_version => $(package_version),
+ package_architectures => { $(package_arch) },
+ package_method => yum_rpm;
+
+ redhat.filebased::
+
+ "$(package_basename)"
+ package_policy => $(desired),
+ package_select => $(select),
+ package_version => $(package_version),
+ package_architectures => { $(package_arch) },
+ package_method => rpm_version($(dir));
+
+ !filebased.!debian.!redhat::
+
+ "$(package_name)"
+ package_policy => $(desired),
+ package_method => generic;
+
+ reports:
+ (inform_mode||verbose_mode).filebased.!debian.!redhat::
+ "$(this.bundle): sorry, can't do file-based installs on $(sys.os)";
+}
--- /dev/null
+############################################################################
+# Copyright (C) CFEngine AS
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License LGPL as published by the
+# Free Software Foundation; version 3.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# To the extent this program is licensed as part of the Enterprise
+# versions of CFEngine, the applicable Commercial Open Source License
+# (COSL) may apply to this file if you as a licensee so wish it. See
+# included file COSL.txt.
+###########################################################################
+#
+# CFEngine Community Open Promise-Body Library
+#
+# This initiative started by CFEngine promotes a
+# standardized set of names and promise specifications
+# for template functionality within CFEngine 3.
+#
+# The aim is to promote an industry standard for
+# naming of configuration patterns, leading to a
+# de facto middleware of standardized syntax.
+#
+# Names should be intuitive and parameters should be
+# minimal to assist readability and comprehensibility.
+
+# Contributions to this file are voluntarily given to
+# the cfengine community, and are moderated by CFEngine.
+# No liability or warranty for misuse is implied.
+#
+# If you add to this file, please try to make the
+# contributions "self-documenting". Comments made
+# after the bundle/body statement are retained in
+# the online docs
+#
+
+# For CFEngine Core: 3.6.0 to 3.6.x
+# Paths bundle (used by other bodies)
+
+###################################################
+# If you find CFEngine useful, please consider #
+# purchasing a commercial version of the software.#
+###################################################
+
+bundle common paths
+# @brief Defines an array `path` with common paths to standard binaries,
+# and classes for defined and existing paths.
+#
+# If the current platform knows that binary XYZ should be present,
+# `_stdlib_has_path_XYZ` is defined. Furthermore, if XYZ is actually present
+# (i.e. the binary exists) in the expected location, `_stdlib_path_exists_XYZ` is
+# defined.
+#
+# **Example:**
+#
+# ```cf3
+# bundle agent repair_newlines(filename)
+# {
+# commands:
+# _stdlib_path_exists_sed::
+# “$(path[sed])”
+# args => “-i 's/^M//' $(filename)”
+# }
+# ```
+{
+ vars:
+
+ #
+ # Common full pathname of commands for OS
+ #
+
+ any::
+ "path[getfacl]" string => "/usr/bin/getfacl";
+ "path[git]" string => "/usr/bin/git";
+ "path[npm]" string => "/usr/bin/npm";
+ "path[pip]" string => "/usr/bin/pip";
+
+ linux::
+ "path[lsattr]" string => "/usr/bin/lsattr";
+
+ aix::
+
+ "path[awk]" string => "/usr/bin/awk";
+ "path[bc]" string => "/usr/bin/bc";
+ "path[cat]" string => "/bin/cat";
+ "path[cksum]" string => "/usr/bin/cksum";
+ "path[crontabs]" string => "/var/spool/cron/crontabs";
+ "path[cut]" string => "/usr/bin/cut";
+ "path[dc]" string => "/usr/bin/dc";
+ "path[df]" string => "/usr/bin/df";
+ "path[diff]" string => "/usr/bin/diff";
+ "path[dig]" string => "/usr/bin/dig";
+ "path[echo]" string => "/usr/bin/echo";
+ "path[egrep]" string => "/usr/bin/egrep";
+ "path[find]" string => "/usr/bin/find";
+ "path[grep]" string => "/usr/bin/grep";
+ "path[ls]" string => "/usr/bin/ls";
+ "path[netstat]" string => "/usr/bin/netstat";
+ "path[ping]" string => "/usr/bin/ping";
+ "path[perl]" string => "/usr/bin/perl";
+ "path[printf]" string => "/usr/bin/printf";
+ "path[sed]" string => "/usr/bin/sed";
+ "path[sort]" string => "/usr/bin/sort";
+ "path[tr]" string => "/usr/bin/tr";
+
+ archlinux::
+
+ "path[awk]" string => "/usr/bin/awk";
+ "path[bc]" string => "/usr/bin/bc";
+ "path[cat]" string => "/usr/bin/cat";
+ "path[cksum]" string => "/usr/bin/cksum";
+ "path[crontab]" string => "/usr/bin/crontab";
+ "path[cut]" string => "/usr/bin/cut";
+ "path[dc]" string => "/usr/bin/dc";
+ "path[df]" string => "/usr/bin/df";
+ "path[diff]" string => "/usr/bin/diff";
+ "path[dig]" string => "/usr/bin/dig";
+ "path[dmidecode]" string => "/usr/bin/dmidecode";
+ "path[echo]" string => "/usr/bin/echo";
+ "path[egrep]" string => "/usr/bin/egrep";
+ "path[find]" string => "/usr/bin/find";
+ "path[free]" string => "/usr/bin/free";
+ "path[grep]" string => "/usr/bin/grep";
+ "path[hostname]" string => "/usr/bin/hostname";
+ "path[init]" string => "/usr/bin/init";
+ "path[iptables]" string => "/usr/bin/iptables";
+ "path[iptables_save]" string => "/usr/bin/iptables-save";
+ "path[iptables_restore]" string => "/usr/bin/iptables-restore";
+ "path[ls]" string => "/usr/bin/ls";
+ "path[lsof]" string => "/usr/bin/lsof";
+ "path[netstat]" string => "/usr/bin/netstat";
+ "path[ping]" string => "/usr/bin/ping";
+ "path[perl]" string => "/usr/bin/perl";
+ "path[printf]" string => "/usr/bin/printf";
+ "path[sed]" string => "/usr/bin/sed";
+ "path[sort]" string => "/usr/bin/sort";
+ "path[test]" string => "/usr/bin/test";
+ "path[top]" string => "/usr/bin/top";
+ "path[tr]" string => "/usr/bin/tr";
+ #
+ "path[pacman]" string => "/usr/bin/pacman";
+ "path[yaourt]" string => "/usr/bin/yaourt";
+ "path[useradd]" string => "/usr/bin/useradd";
+ "path[groupadd]" string => "/usr/bin/groupadd";
+ "path[ip]" string => "/usr/bin/ip";
+ "path[ifconfig]" string => "/usr/bin/ifconfig";
+ "path[journalctl]" string => "/usr/bin/journalctl";
+ "path[systemctl]" string => "/usr/bin/systemctl";
+ "path[netctl]" string => "/usr/bin/netctl";
+
+ freebsd|netbsd::
+
+ "path[awk]" string => "/usr/bin/awk";
+ "path[bc]" string => "/usr/bin/bc";
+ "path[cat]" string => "/bin/cat";
+ "path[cksum]" string => "/usr/bin/cksum";
+ "path[crontabs]" string => "/var/cron/tabs";
+ "path[cut]" string => "/usr/bin/cut";
+ "path[dc]" string => "/usr/bin/dc";
+ "path[df]" string => "/bin/df";
+ "path[diff]" string => "/usr/bin/diff";
+ "path[dig]" string => "/usr/bin/dig";
+ "path[echo]" string => "/bin/echo";
+ "path[egrep]" string => "/usr/bin/egrep";
+ "path[find]" string => "/usr/bin/find";
+ "path[grep]" string => "/usr/bin/grep";
+ "path[ls]" string => "/bin/ls";
+ "path[netstat]" string => "/usr/bin/netstat";
+ "path[ping]" string => "/usr/bin/ping";
+ "path[perl]" string => "/usr/bin/perl";
+ "path[printf]" string => "/usr/bin/printf";
+ "path[sed]" string => "/usr/bin/sed";
+ "path[sort]" string => "/usr/bin/sort";
+ "path[tr]" string => "/usr/bin/tr";
+ "path[realpath]" string => "/bin/realpath";
+
+ openbsd::
+
+ "path[awk]" string => "/usr/bin/awk";
+ "path[bc]" string => "/usr/bin/bc";
+ "path[cat]" string => "/bin/cat";
+ "path[cksum]" string => "/bin/cksum";
+ "path[crontabs]" string => "/var/cron/tabs";
+ "path[cut]" string => "/usr/bin/cut";
+ "path[dc]" string => "/usr/bin/dc";
+ "path[df]" string => "/bin/df";
+ "path[diff]" string => "/usr/bin/diff";
+ "path[dig]" string => "/usr/sbin/dig";
+ "path[echo]" string => "/bin/echo";
+ "path[egrep]" string => "/usr/bin/egrep";
+ "path[find]" string => "/usr/bin/find";
+ "path[grep]" string => "/usr/bin/grep";
+ "path[ls]" string => "/bin/ls";
+ "path[netstat]" string => "/usr/bin/netstat";
+ "path[ping]" string => "/usr/bin/ping";
+ "path[perl]" string => "/usr/bin/perl";
+ "path[printf]" string => "/usr/bin/printf";
+ "path[sed]" string => "/usr/bin/sed";
+ "path[sort]" string => "/usr/bin/sort";
+ "path[tr]" string => "/usr/bin/tr";
+
+ solaris::
+
+ "path[awk]" string => "/usr/bin/awk";
+ "path[bc]" string => "/usr/bin/bc";
+ "path[cat]" string => "/usr/bin/cat";
+ "path[cksum]" string => "/usr/bin/cksum";
+ "path[crontab]" string => "/usr/bin/crontab";
+ "path[crontabs]" string => "/var/spool/cron/crontabs";
+ "path[curl]" string => "/usr/bin/curl";
+ "path[cut]" string => "/usr/bin/cut";
+ "path[dc]" string => "/usr/bin/dc";
+ "path[df]" string => "/usr/bin/df";
+ "path[diff]" string => "/usr/bin/diff";
+ "path[dig]" string => "/usr/sbin/dig";
+ "path[echo]" string => "/usr/bin/echo";
+ "path[egrep]" string => "/usr/bin/egrep";
+ "path[find]" string => "/usr/bin/find";
+ "path[grep]" string => "/usr/bin/grep";
+ "path[ls]" string => "/usr/bin/ls";
+ "path[netstat]" string => "/usr/bin/netstat";
+ "path[ping]" string => "/usr/bin/ping";
+ "path[perl]" string => "/usr/bin/perl";
+ "path[printf]" string => "/usr/bin/printf";
+ "path[sed]" string => "/usr/bin/sed";
+ "path[sort]" string => "/usr/bin/sort";
+ "path[tr]" string => "/usr/bin/tr";
+ "path[wget]" string => "/usr/bin/wget";
+ #
+ "path[svcs]" string => "/usr/bin/svcs";
+ "path[svcadm]" string => "/usr/sbin/svcadm";
+ "path[svccfg]" string => "/usr/sbin/svccfg";
+ "path[netadm]" string => "/usr/sbin/netadm";
+ "path[dladm]" string => "/usr/sbin/dladm";
+ "path[ipadm]" string => "/usr/sbin/ipadm";
+ "path[pkg]" string => "/usr/bin/pkg";
+ "path[pkginfo]" string => "/usr/bin/pkginfo";
+ "path[pkgadd]" string => "/usr/sbin/pkgadd";
+ "path[pkgrm]" string => "/usr/sbin/pkgrm";
+ "path[zoneadm]" string => "/usr/sbin/zoneadm";
+ "path[zonecfg]" string => "/usr/sbin/zonecfg";
+
+ redhat::
+
+ "path[awk]" string => "/bin/awk";
+ "path[bc]" string => "/usr/bin/bc";
+ "path[cat]" string => "/bin/cat";
+ "path[cksum]" string => "/usr/bin/cksum";
+ "path[createrepo]" string => "/usr/bin/createrepo";
+ "path[crontab]" string => "/usr/bin/crontab";
+ "path[crontabs]" string => "/var/spool/cron";
+ "path[curl]" string => "/usr/bin/curl";
+ "path[cut]" string => "/bin/cut";
+ "path[dc]" string => "/usr/bin/dc";
+ "path[df]" string => "/bin/df";
+ "path[diff]" string => "/usr/bin/diff";
+ "path[dig]" string => "/usr/bin/dig";
+ "path[domainname]" string => "/bin/domainname";
+ "path[echo]" string => "/bin/echo";
+ "path[egrep]" string => "/bin/egrep";
+ "path[find]" string => "/usr/bin/find";
+ "path[grep]" string => "/bin/grep";
+ "path[hostname]" string => "/bin/hostname";
+ "path[init]" string => "/sbin/init";
+ "path[iptables]" string => "/sbin/iptables";
+ "path[iptables_save]" string => "/sbin/iptables-save";
+ "path[ls]" string => "/bin/ls";
+ "path[lsof]" string => "/usr/sbin/lsof";
+ "path[netstat]" string => "/bin/netstat";
+ "path[nologin]" string => "/sbin/nologin";
+ "path[ping]" string => "/usr/bin/ping";
+ "path[perl]" string => "/usr/bin/perl";
+ "path[printf]" string => "/usr/bin/printf";
+ "path[sed]" string => "/bin/sed";
+ "path[sort]" string => "/bin/sort";
+ "path[test]" string => "/usr/bin/test";
+ "path[tr]" string => "/usr/bin/tr";
+ "path[wget]" string => "/usr/bin/wget";
+ "path[realpath]" string => "/usr/bin/realpath";
+
+ #
+ "path[chkconfig]" string => "/sbin/chkconfig";
+ "path[groupadd]" string => "/usr/sbin/groupadd";
+ "path[groupdel]" string => "/usr/sbin/groupdel";
+ "path[ifconfig]" string => "/sbin/ifconfig";
+ "path[ip]" string => "/sbin/ip";
+ "path[rpm]" string => "/bin/rpm";
+ "path[service]" string => "/sbin/service";
+ "path[svc]" string => "/sbin/service";
+ "path[useradd]" string => "/usr/sbin/useradd";
+ "path[userdel]" string => "/usr/sbin/userdel";
+ "path[yum]" string => "/usr/bin/yum";
+
+ darwin::
+ "path[awk]" string => "/usr/bin/awk";
+ "path[bc]" string => "/usr/bin/bc";
+ "path[cat]" string => "/bin/cat";
+ "path[cksum]" string => "/usr/bin/cksum";
+ "path[createrepo]" string => "/usr/bin/createrepo";
+ "path[crontab]" string => "/usr/bin/crontab";
+ "path[crontabs]" string => "/usr/lib/cron/tabs";
+ "path[cut]" string => "/usr/bin/cut";
+ "path[dc]" string => "/usr/bin/dc";
+ "path[df]" string => "/bin/df";
+ "path[diff]" string => "/usr/bin/diff";
+ "path[dig]" string => "/usr/bin/dig";
+ "path[domainname]" string => "/bin/domainname";
+ "path[echo]" string => "/bin/echo";
+ "path[egrep]" string => "/usr/bin/egrep";
+ "path[find]" string => "/usr/bin/find";
+ "path[grep]" string => "/usr/bin/grep";
+ "path[hostname]" string => "/bin/hostname";
+ "path[ls]" string => "/bin/ls";
+ "path[lsof]" string => "/usr/sbin/lsof";
+ "path[netstat]" string => "/usr/sbin/netstat";
+ "path[ping]" string => "/sbin/ping";
+ "path[perl]" string => "/usr/bin/perl";
+ "path[printf]" string => "/usr/bin/printf";
+ "path[sed]" string => "/usr/bin/sed";
+ "path[sort]" string => "/usr/bin/sort";
+ "path[test]" string => "/bin/test";
+ "path[tr]" string => "/usr/bin/tr";
+
+ #
+ "path[brew]" string => "/usr/local/bin/brew";
+ "path[sudo]" string => "/usr/bin/sudo";
+
+ debian::
+
+ "path[awk]" string => "/usr/bin/awk";
+ "path[bc]" string => "/usr/bin/bc";
+ "path[cat]" string => "/bin/cat";
+ "path[chkconfig]" string => "/sbin/chkconfig";
+ "path[cksum]" string => "/usr/bin/cksum";
+ "path[createrepo]" string => "/usr/bin/createrepo";
+ "path[crontab]" string => "/usr/bin/crontab";
+ "path[crontabs]" string => "/var/spool/cron/crontabs";
+ "path[curl]" string => "/usr/bin/curl";
+ "path[cut]" string => "/usr/bin/cut";
+ "path[dc]" string => "/usr/bin/dc";
+ "path[df]" string => "/bin/df";
+ "path[diff]" string => "/usr/bin/diff";
+ "path[dig]" string => "/usr/bin/dig";
+ "path[dmidecode]" string => "/usr/sbin/dmidecode";
+ "path[domainname]" string => "/bin/domainname";
+ "path[echo]" string => "/bin/echo";
+ "path[egrep]" string => "/bin/egrep";
+ "path[find]" string => "/usr/bin/find";
+ "path[grep]" string => "/bin/grep";
+ "path[hostname]" string => "/bin/hostname";
+ "path[init]" string => "/sbin/init";
+ "path[iptables]" string => "/sbin/iptables";
+ "path[iptables_save]" string => "/sbin/iptables-save";
+ "path[ls]" string => "/bin/ls";
+ "path[lsof]" string => "/usr/bin/lsof";
+ "path[netstat]" string => "/bin/netstat";
+ "path[nologin]" string => "/usr/sbin/nologin";
+ "path[ping]" string => "/bin/ping";
+ "path[perl]" string => "/usr/bin/perl";
+ "path[printf]" string => "/usr/bin/printf";
+ "path[sed]" string => "/bin/sed";
+ "path[sort]" string => "/usr/bin/sort";
+ "path[test]" string => "/usr/bin/test";
+ "path[tr]" string => "/usr/bin/tr";
+ "path[wget]" string => "/usr/bin/wget";
+ "path[realpath]" string => "/usr/bin/realpath";
+
+ #
+ "path[apt_cache]" string => "/usr/bin/apt-cache";
+ "path[apt_config]" string => "/usr/bin/apt-config";
+ "path[apt_get]" string => "/usr/bin/apt-get";
+ "path[apt_key]" string => "/usr/bin/apt-key";
+ "path[aptitude]" string => "/usr/bin/aptitude";
+ "path[dpkg]" string => "/usr/bin/dpkg";
+ "path[groupadd]" string => "/usr/sbin/groupadd";
+ "path[ifconfig]" string => "/sbin/ifconfig";
+ "path[ip]" string => "/sbin/ip";
+ "path[service]" string => "/usr/sbin/service";
+ "path[svc]" string => "/usr/sbin/service";
+ "path[update_alternatives]" string => "/usr/bin/update-alternatives";
+ "path[update_rc_d]" string => "/usr/sbin/update-rc.d";
+ "path[useradd]" string => "/usr/sbin/useradd";
+
+ archlinux||darwin::
+
+ "path[sysctl]" string => "/usr/bin/sysctl";
+
+ !(archlinux||darwin)::
+
+ "path[sysctl]" string => "/sbin/sysctl";
+
+ !(SuSE||SUSE||suse)::
+ "path[logger]" string => "/usr/bin/logger";
+
+ SuSE||SUSE||suse::
+
+ "path[awk]" string => "/usr/bin/awk";
+ "path[bc]" string => "/usr/bin/bc";
+ "path[cat]" string => "/bin/cat";
+ "path[cksum]" string => "/usr/bin/cksum";
+ "path[createrepo]" string => "/usr/bin/createrepo";
+ "path[crontab]" string => "/usr/bin/crontab";
+ "path[crontabs]" string => "/var/spool/cron/tabs";
+ "path[curl]" string => "/usr/bin/curl";
+ "path[cut]" string => "/usr/bin/cut";
+ "path[dc]" string => "/usr/bin/dc";
+ "path[df]" string => "/bin/df";
+ "path[diff]" string => "/usr/bin/diff";
+ "path[dig]" string => "/usr/bin/dig";
+ "path[dmidecode]" string => "/usr/sbin/dmidecode";
+ "path[domainname]" string => "/bin/domainname";
+ "path[echo]" string => "/bin/echo";
+ "path[egrep]" string => "/usr/bin/egrep";
+ "path[find]" string => "/usr/bin/find";
+ "path[free]" string => "/usr/bin/free";
+ "path[grep]" string => "/usr/bin/grep";
+ "path[hostname]" string => "/bin/hostname";
+ "path[init]" string => "/sbin/init";
+ "path[iptables]" string => "/usr/sbin/iptables";
+ "path[iptables_save]" string => "/usr/sbin/iptables-save";
+ "path[ls]" string => "/bin/ls";
+ "path[lsof]" string => "/usr/bin/lsof";
+ "path[netstat]" string => "/bin/netstat";
+ "path[nologin]" string => "/sbin/nologin";
+ "path[ping]" string => "/bin/ping";
+ "path[perl]" string => "/usr/bin/perl";
+ "path[printf]" string => "/usr/bin/printf";
+ "path[sed]" string => "/bin/sed";
+ "path[sort]" string => "/usr/bin/sort";
+ "path[test]" string => "/usr/bin/test";
+ "path[tr]" string => "/usr/bin/tr";
+ "path[logger]" string => "/bin/logger";
+ "path[wget]" string => "/usr/bin/wget";
+
+ #
+ "path[chkconfig]" string => "/sbin/chkconfig";
+ "path[groupadd]" string => "/usr/sbin/groupadd";
+ "path[groupdel]" string => "/usr/sbin/groupdel";
+ "path[groupmod]" string => "/usr/sbin/groupmod";
+ "path[ifconfig]" string => "/sbin/ifconfig";
+ "path[ip]" string => "/sbin/ip";
+ "path[rpm]" string => "/bin/rpm";
+ "path[service]" string => "/sbin/service";
+ "path[useradd]" string => "/usr/sbin/useradd";
+ "path[userdel]" string => "/usr/sbin/userdel";
+ "path[usermod]" string => "/usr/sbin/usermod";
+ "path[zypper]" string => "/usr/bin/zypper";
+
+ linux|solaris::
+
+ "path[shadow]" string => "/etc/shadow";
+
+ freebsd|openbsd|netbsd|darwin::
+
+ "path[shadow]" string => "/etc/master.passwd";
+
+ aix::
+
+ "path[shadow]" string => "/etc/security/passwd";
+
+ any::
+ "all_paths" slist => getindices("path");
+ "$(all_paths)" string => "$(path[$(all_paths)])";
+
+ classes:
+ "_stdlib_has_path_$(all_paths)"
+ expression => isvariable("$(all_paths)"),
+ comment => "It's useful to know if a given path is defined";
+
+ "_stdlib_path_exists_$(all_paths)"
+ expression => fileexists("$(path[$(all_paths)])"),
+ comment => "It's useful to know if $(all_paths) exists on the filesystem as defined";
+}
--- /dev/null
+############################################################################
+# Copyright (C) CFEngine AS
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License LGPL as published by the
+# Free Software Foundation; version 3.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# To the extent this program is licensed as part of the Enterprise
+# versions of CFEngine, the applicable Commercial Open Source License
+# (COSL) may apply to this file if you as a licensee so wish it. See
+# included file COSL.txt.
+###########################################################################
+#
+# CFEngine Community Open Promise-Body Library
+#
+# This initiative started by CFEngine promotes a
+# standardized set of names and promise specifications
+# for template functionality within CFEngine 3.
+#
+# The aim is to promote an industry standard for
+# naming of configuration patterns, leading to a
+# de facto middleware of standardized syntax.
+#
+# Names should be intuitive and parameters should be
+# minimal to assist readability and comprehensibility.
+
+# Contributions to this file are voluntarily given to
+# the cfengine community, and are moderated by CFEngine.
+# No liability or warranty for misuse is implied.
+#
+# If you add to this file, please try to make the
+# contributions "self-documenting". Comments made
+# after the bundle/body statement are retained in
+# the online docs
+#
+
+# For CFEngine Core: 3.6.0 to 3.6.x
+# Processes bodies
+
+###################################################
+# If you find CFEngine useful, please consider #
+# purchasing a commercial version of the software.#
+###################################################
+
+##-------------------------------------------------------
+## process promises
+##-------------------------------------------------------
+
+body process_select exclude_procs(x)
+# @brief Select all processes excluding those matching `x`
+# @param x Regular expression matching the command/cmd field
+# of the processes that should be excluded
+{
+ command => "$(x)";
+ process_result => "!command";
+}
+
+##
+
+body process_select days_older_than(d)
+# @brief Select all processes that are older than `d` days
+# @param d Days that processes need to be old to be selected
+{
+ stime_range => irange(ago(0,0,"$(d)",0,0,0),now);
+ process_result => "stime";
+}
+
+##
+
+body process_select by_owner(u)
+# @brief Select processes owned by user `u`
+# @param u The name of the user
+#
+# Matches processes against the given username and the given username's uid
+# in case only uid is visible in process list.
+{
+ process_owner => { "$(u)", canonify(getuid("$(u)")) };
+ process_result => "process_owner";
+}
+
+##
+
+body process_count any_count(cl)
+# @brief Define class `cl` if the process is running
+# @param cl Name of the class to be defined
+{
+ match_range => "0,0";
+ out_of_range_define => { "$(cl)" };
+}
+
+##
+
+body process_count check_range(name,lower,upper)
+# @brief Define a class if the number of processes is not
+# within the specified range.
+# @param name The name part of the class `$(name)_out_of_range`
+# @param lower The lower bound of the range
+# @param upper The upper bound of the range
+{
+ match_range => irange("$(lower)","$(upper)");
+ out_of_range_define => { "$(name)_out_of_range" };
+}
--- /dev/null
+############################################################################
+# Copyright (C) CFEngine AS
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License LGPL as published by the
+# Free Software Foundation; version 3.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# To the extent this program is licensed as part of the Enterprise
+# versions of CFEngine, the applicable Commercial Open Source License
+# (COSL) may apply to this file if you as a licensee so wish it. See
+# included file COSL.txt.
+###########################################################################
+#
+# CFEngine Community Open Promise-Body Library
+#
+# This initiative started by CFEngine promotes a
+# standardized set of names and promise specifications
+# for template functionality within CFEngine 3.
+#
+# The aim is to promote an industry standard for
+# naming of configuration patterns, leading to a
+# de facto middleware of standardized syntax.
+#
+# Names should be intuitive and parameters should be
+# minimal to assist readability and comprehensibility.
+
+# Contributions to this file are voluntarily given to
+# the cfengine community, and are moderated by CFEngine.
+# No liability or warranty for misuse is implied.
+#
+# If you add to this file, please try to make the
+# contributions "self-documenting". Comments made
+# after the bundle/body statement are retained in
+# the online docs
+#
+
+# For CFEngine Core: 3.6.0 to 3.6.x
+# Services bodies
+
+###################################################
+# If you find CFEngine useful, please consider #
+# purchasing a commercial version of the software.#
+###################################################
+
+##-------------------------------------------------------
+## service promises
+##-------------------------------------------------------
+
+body service_method bootstart
+# @brief Start the service and all its dependencies at boot time
+#
+# **See also:** `service_autostart_policy`, `service_dependence_chain`
+{
+ service_autostart_policy => "boot_time";
+ service_dependence_chain => "start_parent_services";
+ windows::
+ service_type => "windows";
+}
+
+##
+
+body service_method force_deps
+# @brief Start all dependendencies when this service starts, and stop all
+# dependent services when this service stops.
+#
+# The service does not get automatically started.
+#
+# **See also:** `service_autostart_policy`, `service_dependence_chain`
+{
+ service_dependence_chain => "all_related";
+ windows::
+ service_type => "windows";
+}
+
+##
+
+bundle agent standard_services(service,state)
+# @brief Standard services bundle, used by CFEngine by default
+# @author CFEngine AS
+# @author Tero Kantonen <prkele@gmail.com>
+# @param service specific service to control
+# @param state desired state for that service
+#
+# This bundle is used by CFEngine if you don't specify a services
+# handler explicitly.
+#
+# It receives the service name and the desired service state, then
+# does the needful to reach the desired state..
+#
+# **Example:**
+#
+# ```cf3
+# services:
+# "ntp" service_policy => "start";
+# "ssh" service_policy => "stop";
+# ```
+#
+# There's multiple ways you can add new services to this list.
+# Here's few examples:
+#
+# a) The zeroconf mode; If the new service matches these rules,
+# you don't need to add anything to the standard_services:
+#
+# 1. Your init script basename = `$(service)`
+# 2. Your init script argument = `$(state)`
+# 3. Your init script lives in `/etc/init.d/` (for non-*bsd),
+# or `/etc/rc.d/` (for *bsd)
+# 4. Your process regex pattern = `\b$(service)\b`
+# 5. You call the init as `/etc/init.d/<script> <arg>` (for non-*bsd),
+# or `/etc/rc.d/<script> <arg>` (for *bsd)
+#
+# b) If the 1st rule doesn't match, but rest does:
+#
+# Use the `baseinit[$(service)]` array to point towards your
+# init script's basename. For example:
+#
+# ```cf3
+# "baseinit[www]" string => "httpd";
+# ```
+#
+# This would fire up init script `/etc/init.d/httpd`, instead of
+# the default `/etc/init.d/www`. From `/etc/rc.d/` if you're on *bsd system.
+#
+# c) If the 4th rule doesn't match, but rest does:
+#
+# Use the `pattern[$(service)]` array to specify your own
+# regex match. It's advisable to use conservative regex so
+# there's less chance of getting a mismatch.
+#
+# ```cf3
+# "pattern[www]" string => ".*httpd.*";
+# ```
+#
+# Instead of matching the default '\bwww\b', this now matches
+# your given string,
+#
+# d) 5th rule doesn't match:
+#
+# If you can specify the init system used.
+# Currently supported: `sysvinitd`, `sysvservice`, `systemd`
+#
+# ```cf3
+# "init[www]" string => "sysvservice";
+# "init[www]" string => "sysvinitd";
+# "init[www]" string => "systemd";
+# ```
+#
+# ^^ The above is not a valid syntax as you can only use one `init[]`
+# per service, but it shows all the currently supported ones.
+#
+# ```cf3
+# "sysvservice" == /(usr/)?sbin/service
+# "sysvinitd" == /etc/init.d/ (non-*bsd) | /etc/rc.d/ (*bsd)
+# "systemd" == /bin/systemctl
+# ```
+#
+# e) 2nd and 3rd rule matches, but rest doesn't:
+#
+# Use a combination of the `pattern[]`, `baseinit[]` and `init[]`,
+# to fill your need.
+#
+# ```cf3
+# "baseinit[www]" string => "httpd";
+# "pattern[www]" string => ".*httpd.*";
+# "init[www]" string => "sysvservice";
+# ```
+#
+# f) As a fallback, if none of the above rules match, you can also
+# define exactly what you need for each `$(state)`.
+#
+# ```cf3
+# "startcommand[rhnsd]" string => "/sbin/service rhnsd start";
+# "restartcommand[rhnsd]" string => "/sbin/service rhnsd restart";
+# "reloadcommand[rhnsd]" string => "/sbin/service rhnsd reload";
+# "stopcommand[rhnsd]" string => "/sbin/service rhnsd stop";
+# "pattern[rhnsd]" string => "rhnsd";
+# ```
+#
+# ---
+#
+# If *any* of the `(re)?(start|load|stop)command` variables are set for
+# your service, they take _priority_ in case there's conflict of intent
+# with other data.
+#
+# Say you'd have the following service definition:
+#
+# ```cf3
+# "startcommand[qwerty]" string => "/sbin/service qwerty start";
+# "stopcommand[qwerty]" string => "/sbin/service qwerty stop";
+# "pattern[qwerty]" string => ".*qwerty.*";
+# "baseinit[qwerty]" string => "asdfgh"
+# "init[qwerty]" string => "systemd";
+# ```
+#
+# There's a conflict of intent now. As the `~command` definitions takes
+# priority, this kind of service config for `qwerty` would execute the
+# following commands:
+#
+# ```
+# start: "/sbin/service qwerty start"
+# stop: "/sbin/service qwerty stop"
+# restart: "/bin/systemctl asdfgh restart"
+# reload: "/bin/systemctl asdfgh reload"
+# ```
+{
+ vars:
+ "all_states" slist => { "start", "restart", "reload", "stop" };
+
+ "inits" slist => { "sysvinitd", "sysvservice", "systemd" },
+ comment => "Currently handled init systems";
+
+ "default[prefix][sysvservice]" string => "$(paths.path[service]) ",
+ comment => "Command for sysv service interactions";
+
+ "default[prefix][systemd]" string => "/bin/systemctl ",
+ comment => "Command for systemd interactions";
+
+ "default[prefix][sysvinitd]" string => ifelse("openbsd", "/etc/rc.d/",
+ "freebsd", "/etc/rc.d/",
+ "netbsd", "/etc/rc.d/",
+ "/etc/init.d/"),
+ comment => "Command prefix for sysv init script interactions";
+
+ "default[cmd][$(inits)]" string => "$(default[prefix][$(inits)])$(service) $(state)",
+ comment => "Default command to control the service";
+
+ "default[pattern]" string => "\b$(service)\b",
+ comment => "Set default pattern for proc matching";
+
+ "default[init]" string => "sysvinitd",
+ comment => "Set the default init system used if one isn't defined";
+
+ "stakeholders[cfengine3]" slist => { "cfengine_in" };
+ "stakeholders[acpid]" slist => { "cpu", "cpu0", "cpu1", "cpu2", "cpu3" };
+ "stakeholders[mongod]" slist => { "mongo_in" };
+ "stakeholders[postfix]" slist => { "smtp_in" };
+ "stakeholders[sendmail]" slist => { "smtp_in" };
+ "stakeholders[www]" slist => { "www_in", "wwws_in", "www_alt_in" };
+ "stakeholders[ssh]" slist => { "ssh_in" };
+ "stakeholders[mysql]" slist => { "mysql_in" };
+ "stakeholders[nfs]" slist => { "nfsd_in" };
+ "stakeholders[syslog]" slist => { "syslog" };
+ "stakeholders[rsyslog]" slist => { "syslog" };
+ "stakeholders[tomcat5]" slist => { "www_alt_in" };
+ "stakeholders[tomcat6]" slist => { "www_alt_in" };
+
+ linux::
+
+ "pattern[acpid]" string => ".*acpid.*";
+ "pattern[cfengine3]" string => ".*cf-execd.*";
+ "pattern[fancontrol]" string => ".*fancontrol.*";
+ "pattern[hddtemp]" string => ".*hddtemp.*";
+ "pattern[irqbalance]" string => ".*irqbalance.*";
+ "pattern[lm-sensor]" string => ".*psensor.*";
+ "pattern[mongod]" string => ".*mongod.*";
+ "pattern[openvpn]" string => ".*openvpn.*";
+ "pattern[postfix]" string => ".*postfix.*";
+ "pattern[rsync]" string => ".*rsync.*";
+ "pattern[rsyslog]" string => ".*rsyslogd.*";
+ "pattern[sendmail]" string => ".*sendmail.*";
+ "pattern[tomcat5]" string => ".*tomcat5.*";
+ "pattern[tomcat6]" string => ".*tomcat6.*";
+ "pattern[varnish]" string => ".*varnish.*";
+ "pattern[wpa_supplicant]" string => ".*wpa_supplicant.*";
+
+ SuSE|suse::
+
+ "baseinit[mysql]" string => "mysqld";
+ "pattern[mysql]" string => ".*mysqld.*";
+
+ "baseinit[www]" string => "apache2";
+ "pattern[www]" string => ".*apache2.*";
+
+ "baseinit[ssh]" string => "sshd";
+ # filter out "sshd: ..." children
+ "pattern[ssh]" string => ".*\Ssshd.*";
+
+ "pattern[ntpd]" string => ".*ntpd.*";
+
+ redhat::
+
+ "pattern[anacron]" string => ".*anacron.*";
+ "pattern[atd]" string => ".*sbin/atd.*";
+ "pattern[auditd]" string => ".*auditd$";
+ "pattern[autofs]" string => ".*automount.*";
+ "pattern[capi]" string => ".*capiinit.*";
+ "pattern[conman]" string => ".*conmand.*";
+ "pattern[cpuspeed]" string => ".*cpuspeed.*";
+ "pattern[crond]" string => ".*crond.*";
+ "pattern[dc_client]" string => ".*dc_client.*";
+ "pattern[dc_server]" string => ".*dc_server.*";
+ "pattern[dnsmasq]" string => ".*dnsmasq.*";
+ "pattern[dund]" string => ".*dund.*";
+ "pattern[gpm]" string => ".*gpm.*";
+ "pattern[haldaemon]" string => ".*hald.*";
+ "pattern[hidd]" string => ".*hidd.*";
+ "pattern[irda]" string => ".*irattach.*";
+ "pattern[iscsid]" string => ".*iscsid.*";
+ "pattern[isdn]" string => ".*isdnlog.*";
+ "pattern[lvm2-monitor]" string => ".*vgchange.*";
+ "pattern[mcstrans]" string => ".*mcstransd.*";
+ "pattern[mdmonitor]" string => ".*mdadm.*";
+ "pattern[mdmpd]" string => ".*mdmpd.*";
+ "pattern[messagebus]" string => ".*dbus-daemon.*";
+ "pattern[microcode_ctl]" string => ".*microcode_ctl.*";
+ "pattern[multipathd]" string => ".*multipathd.*";
+ "pattern[netplugd]" string => ".*netplugd.*";
+ "pattern[NetworkManager]" string => ".*NetworkManager.*";
+ "pattern[nfs]" string => ".*nfsd.*";
+ "pattern[nfslock]" string => ".*rpc.statd.*";
+ "pattern[nscd]" string => ".*nscd.*";
+ "pattern[ntpd]" string => ".*ntpd.*";
+ "pattern[oddjobd]" string => ".*oddjobd.*";
+ "pattern[pand]" string => ".*pand.*";
+ "pattern[pcscd]" string => ".*pcscd.*";
+ "pattern[portmap]" string => ".*portmap.*";
+ "pattern[postgresql]" string => ".*postmaster.*";
+ "pattern[rdisc]" string => ".*rdisc.*";
+ "pattern[readahead_early]" string => ".*readahead.*early.*";
+ "pattern[readahead_later]" string => ".*readahead.*later.*";
+ "pattern[restorecond]" string => ".*restorecond.*";
+ "pattern[rpcgssd]" string => ".*rpc.gssd.*";
+ "pattern[rpcidmapd]" string => ".*rpc.idmapd.*";
+ "pattern[rpcsvcgssd]" string => ".*rpc.svcgssd.*";
+ "pattern[saslauthd]" string => ".*saslauthd.*";
+ "pattern[smartd]" string => ".*smartd.*";
+ "pattern[svnserve]" string => ".*svnserve.*";
+ "pattern[syslog]" string => ".*syslogd.*";
+ "pattern[tcsd]" string => ".*tcsd.*";
+ "pattern[xfs]" string => ".*xfs.*";
+ "pattern[ypbind]" string => ".*ypbind.*";
+ "pattern[yum-updatesd]" string => ".*yum-updatesd.*";
+ "pattern[munin-node]" string => ".*munin-node.*";
+
+ "baseinit[bluetoothd]" string => "bluetooth";
+ "pattern[bluetoothd]" string => ".*hcid.*";
+
+ "baseinit[mysql]" string => "mysqld";
+ "pattern[mysql]" string => ".*mysqld.*";
+
+ "baseinit[www]" string => "httpd";
+ "pattern[www]" string => ".*httpd.*";
+
+ "baseinit[ssh]" string => "sshd";
+ # filter out "sshd: ..." children
+ "pattern[ssh]" string => ".*\Ssshd.*";
+
+ "init[rhnsd]" string => "sysvservice";
+ "pattern[rhnsd]" string => "rhnsd";
+
+ "baseinit[snmpd]" string => "snmpd";
+ "pattern[snmpd]" string => "/usr/sbin/snmpd";
+
+ debian|ubuntu::
+
+ "pattern[atd]" string => "atd.*";
+ "pattern[bluetoothd]" string => ".*bluetoothd.*";
+ "pattern[bootlogd]" string => ".*bootlogd.*";
+ "pattern[crond]" string => ".*cron.*";
+ "pattern[kerneloops]" string => ".*kerneloops.*";
+ "pattern[mysql]" string => ".*mysqld.*";
+ "pattern[NetworkManager]" string => ".*NetworkManager.*";
+ "pattern[ondemand]" string => ".*ondemand.*";
+ "pattern[plymouth]" string => ".*plymouthd.*";
+ "pattern[saned]" string => ".*saned.*";
+ "pattern[udev]" string => ".*udev.*";
+ "pattern[udevmonitor]" string => ".*udevadm.*monitor.*";
+ "pattern[snmpd]" string => "/usr/sbin/snmpd";
+ "pattern[pgbouncer]" string => ".*pgbouncer.*";
+ "pattern[supervisor]" string => ".*supervisord.*";
+ "pattern[munin-node]" string => ".*munin-node.*";
+ "pattern[carbon-cache]" string => ".*carbon-cache.*";
+ "pattern[cassandra]" string => ".*jsvc\.exec.*apache-cassandra\.jar.*";
+ # filter out "sshd: ..." children
+ "pattern[ssh]" string => ".*\Ssshd.*";
+
+ "baseinit[ntpd]" string => "ntp";
+ "pattern[ntpd]" string => ".*ntpd.*";
+
+ "baseinit[postgresql84]" string => "postgresql-8.4";
+ "pattern[postgresql84]" string => ".*postgresql.*";
+
+ "baseinit[postgresql91]" string => "postgresql-9.1";
+ "pattern[postgresql91]" string => ".*postgresql.*";
+
+ "baseinit[www]" string => "apache2";
+ "pattern[www]" string => ".*apache2.*";
+
+ "baseinit[nrpe]" string => "nagios-nrpe-server";
+ "pattern[nrpe]" string => ".*nrpe.*";
+
+ "baseinit[omsa-dataeng]" string => "dataeng";
+ "pattern[omsa-dataeng]" string => ".*dsm_sa_datamgr.*";
+
+ freebsd::
+
+ "pattern[ntpd]" string => ".*ntpd.*";
+
+ "baseinit[ssh]" string => "sshd";
+ "pattern[ssh]" string => "/usr/sbin/sshd.*";
+
+ "baseinit[syslog]" string => "syslogd";
+ "pattern[syslog]" string => "/usr/sbin/syslogd.*";
+
+ "baseinit[crond]" string => "cron";
+ "pattern[crond]" string => "/usr/sbin/cron.*";
+
+ "baseinit[snmpd]" string => "bsnmpd";
+ "pattern[snmpd]" string => "/usr/sbin/bsnmpd.*";
+
+ "pattern[newsyslog]" string => "/usr/sbin/newsyslog.*";
+
+ classes:
+ # Set classes for each possible state after $(all_states)
+ "$(all_states)" expression => strcmp($(all_states), $(state)),
+ comment => "Set a class named after the desired state";
+
+ "$(inits)_set" expression => strcmp("$(init[$(service)])","$(inits)"),
+ comment => "Check if init system is specified";
+ "no_inits_set" not => isvariable("init[$(service)]"),
+ comment => "Check if no init system is specified";
+
+ processes:
+
+ start::
+
+ "$(pattern[$(service)])" -> { "@(stakeholders[$(service)])" }
+
+ comment => "Verify that the service appears in the process table",
+ restart_class => "start_$(service)",
+ ifvarclass => and(isvariable("pattern[$(service)]"));
+
+ "$(default[pattern])" -> { "@(stakeholders[$(service)])" }
+
+ comment => "Verify that the service appears in the process table",
+ restart_class => "start_$(service)",
+ ifvarclass => not(isvariable("pattern[$(service)]"));
+
+ stop::
+
+ "$(pattern[$(service)])" -> { "@(stakeholders[$(service)])" }
+
+ comment => "Verify that the service does not appear in the process",
+ process_stop => "$(stopcommand[$(service)])",
+ signals => { "term", "kill"},
+ ifvarclass => and(isvariable("stopcommand[$(service)]"),
+ isvariable("pattern[$(service)]"));
+
+ "$(default[pattern])" -> { "@(stakeholders[$(service)])" }
+
+ comment => "Verify that the service does not appear in the process",
+ process_stop => "$(stopcommand[$(service)])",
+ signals => { "term", "kill"},
+ ifvarclass => and(isvariable("stopcommand[$(service)]"),
+ not(isvariable("pattern[$(service)]")));
+
+ "$(pattern[$(service)])" -> { "@(stakeholders[$(service)])" }
+
+ comment => "Verify that the service does not appear in the process",
+ process_stop => "$(default[prefix][$(default[init])])$(baseinit[$(service)]) $(state)",
+ signals => { "term", "kill"},
+ ifvarclass => and(not(isvariable("stopcommand[$(service)]")),
+ isvariable("baseinit[$(service)]"),
+ isvariable("pattern[$(service)]"),
+ "no_inits_set");
+
+ "$(pattern[$(service)])" -> { "@(stakeholders[$(service)])" }
+
+ comment => "Verify that the service does not appear in the process",
+ process_stop => "$(default[prefix][$(inits)])$(baseinit[$(service)]) $(state)",
+ signals => { "term", "kill"},
+ ifvarclass => and(not(isvariable("stopcommand[$(service)]")),
+ isvariable("baseinit[$(service)]"),
+ isvariable("pattern[$(service)]"),
+ canonify("$(inits)_set"));
+
+##
+ "$(default[pattern])" -> { "@(stakeholders[$(service)])" }
+
+ comment => "Verify that the service does not appear in the process",
+ process_stop => "$(default[prefix][$(default[init])])$(baseinit[$(service)]) $(state)",
+ signals => { "term", "kill"},
+ ifvarclass => and(not(isvariable("stopcommand[$(service)]")),
+ isvariable("baseinit[$(service)]"),
+ not(isvariable("pattern[$(service)]")),
+ "no_inits_set");
+
+ "$(default[pattern])" -> { "@(stakeholders[$(service)])" }
+
+ comment => "Verify that the service does not appear in the process",
+ process_stop => "$(default[prefix][$(inits)])$(baseinit[$(service)]) $(state)",
+ signals => { "term", "kill"},
+ ifvarclass => and(not(isvariable("stopcommand[$(service)]")),
+ isvariable("baseinit[$(service)]"),
+ not(isvariable("pattern[$(service)]")),
+ canonify("$(inits)_set"));
+
+##
+ "$(pattern[$(service)])" -> { "@(stakeholders[$(service)])" }
+
+ comment => "Verify that the service does not appear in the process",
+ process_stop => "$(default[cmd][$(default[init])])",
+ signals => { "term", "kill"},
+ ifvarclass => and(not(isvariable("stopcommand[$(service)]")),
+ not(isvariable("baseinit[$(service)]")),
+ isvariable("pattern[$(service)]"),
+ "no_inits_set");
+
+ "$(pattern[$(service)])" -> { "@(stakeholders[$(service)])" }
+
+ comment => "Verify that the service does not appear in the process",
+ process_stop => "$(default[cmd][$(inits)])",
+ signals => { "term", "kill"},
+ ifvarclass => and(not(isvariable("stopcommand[$(service)]")),
+ not(isvariable("baseinit[$(service)]")),
+ isvariable("pattern[$(service)]"),
+ canonify("$(inits)_set"));
+
+##
+ "$(default[pattern])" -> { "@(stakeholders[$(service)])" }
+
+ comment => "Verify that the service does not appear in the process",
+ process_stop => "$(default[cmd][$(default[init])])",
+ signals => { "term", "kill"},
+ ifvarclass => and(not(isvariable("stopcommand[$(service)]")),
+ not(isvariable("baseinit[$(service)]")),
+ not(isvariable("pattern[$(service)]")),
+ "no_inits_set");
+
+ "$(default[pattern])" -> { "@(stakeholders[$(service)])" }
+
+ comment => "Verify that the service does not appear in the process",
+ process_stop => "$(default[cmd][$(inits)])",
+ signals => { "term", "kill"},
+ ifvarclass => and(not(isvariable("stopcommand[$(service)]")),
+ not(isvariable("baseinit[$(service)]")),
+ not(isvariable("pattern[$(service)]")),
+ canonify("$(inits)_set"));
+
+ commands:
+
+ "$(startcommand[$(service)])" -> { "@(stakeholders[$(service)])" }
+ comment => "Execute command to start the $(service) service",
+ ifvarclass => and(isvariable("startcommand[$(service)]"),
+ canonify("start_$(service)"));
+##
+ "$(default[prefix][$(default[init])])$(baseinit[$(service)]) $(state)" -> { "@(stakeholders[$(service)])" }
+ comment => "Execute (baseinit init) command to start the $(service) service",
+ ifvarclass => and(not(isvariable("startcommand[$(service)]")),
+ isvariable("baseinit[$(service)]"),
+ canonify("start_$(service)"),
+ "no_inits_set");
+
+ "$(default[prefix][$(inits)])$(baseinit[$(service)]) $(state)" -> { "@(stakeholders[$(service)])" }
+ comment => "Execute (baseinit init) command to start the $(service) service",
+ ifvarclass => and(not(isvariable("startcommand[$(service)]")),
+ isvariable("baseinit[$(service)]"),
+ canonify("start_$(service)"),
+ canonify("$(inits)_set"));
+##
+ "$(default[cmd][$(default[init])])" -> { "@(stakeholders[$(service)])" }
+ comment => "Execute (default) command to start the $(service) service",
+ ifvarclass => and(not(isvariable("startcommand[$(service)]")),
+ not(isvariable("baseinit[$(service)]")),
+ canonify("start_$(service)"),
+ "no_inits_set");
+
+ "$(default[cmd][$(inits)])" -> { "@(stakeholders[$(service)])" }
+ comment => "Execute (default) command to start the $(service) service",
+ ifvarclass => and(not(isvariable("startcommand[$(service)]")),
+ not(isvariable("baseinit[$(service)]")),
+ canonify("start_$(service)"),
+ canonify("$(inits)_set"));
+
+ restart::
+ "$(restartcommand[$(service)])" -> { "@(stakeholders[$(service)])" }
+ comment => "Execute command to restart the $(service) service",
+ ifvarclass => and(isvariable("restartcommand[$(service)]"));
+##
+
+ "$(default[prefix][$(default[init])])$(baseinit[$(service)]) $(state)" -> { "@(stakeholders[$(service)])" }
+ comment => "Execute (baseinit init) command to restart the $(service) service",
+ ifvarclass => and(not(isvariable("restartcommand[$(service)]")),
+ isvariable("baseinit[$(service)]"),
+ "no_inits_set");
+
+ "$(default[prefix][$(inits)])$(baseinit[$(service)]) $(state)" -> { "@(stakeholders[$(service)])" }
+ comment => "Execute (baseinit init) command to restart the $(service) service",
+ ifvarclass => and(not(isvariable("restartcommand[$(service)]")),
+ isvariable("baseinit[$(service)]"),
+ canonify("$(inits)_set"));
+##
+ "$(default[cmd][$(default[init])])" -> { "@(stakeholders[$(service)])" }
+ comment => "Execute (default) command to restart the $(service) service",
+ ifvarclass => and(not(isvariable("restartcommand[$(service)]")),
+ not(isvariable("baseinit[$(service)]")),
+ "no_inits_set");
+
+ "$(default[cmd][$(inits)])" -> { "@(stakeholders[$(service)])" }
+ comment => "Execute (default) command to restart the $(service) service",
+ ifvarclass => and(not(isvariable("restartcommand[$(service)]")),
+ not(isvariable("baseinit[$(service)]")),
+ canonify("$(inits)_set"));
+
+ reload::
+ "$(reloadcommand[$(service)])" -> { "@(stakeholders[$(service)])" }
+ comment => "Execute command to reload the $(service) service",
+ ifvarclass => and(isvariable("reloadcommand[$(service)]"));
+##
+ "$(default[prefix][$(default[init])])$(baseinit[$(service)]) $(state)" -> { "@(stakeholders[$(service)])" }
+ comment => "Execute (baseinit init) command to reload the $(service) service",
+ ifvarclass => and(not(isvariable("reloadcommand[$(service)]")),
+ isvariable("baseinit[$(service)]"),
+ "no_inits_set");
+
+ "$(default[prefix][$(inits)])$(baseinit[$(service)]) $(state)" -> { "@(stakeholders[$(service)])" }
+ comment => "Execute (baseinit init) command to reload the $(service) service",
+ ifvarclass => and(not(isvariable("reloadcommand[$(service)]")),
+ isvariable("baseinit[$(service)]"),
+ canonify("$(inits)_set"));
+##
+ "$(default[cmd][$(default[init])])" -> { "@(stakeholders[$(service)])" }
+ comment => "Execute (default) command to reload the $(service) service",
+ ifvarclass => and(not(isvariable("reloadcommand[$(service)]")),
+ not(isvariable("baseinit[$(service)]")),
+ "no_inits_set");
+
+ "$(default[cmd][$(inits)])" -> { "@(stakeholders[$(service)])" }
+ comment => "Execute (default) command to reload the $(service) service",
+ ifvarclass => and(not(isvariable("reloadcommand[$(service)]")),
+ not(isvariable("baseinit[$(service)]")),
+ canonify("$(inits)_set"));
+
+ reports:
+ inform_mode::
+ "$(this.bundle): Using init system $(inits)"
+ ifvarclass => and(canonify("$(inits)_set"));
+
+ "$(this.bundle): No init system is set, using $(default[init])"
+ ifvarclass => "no_inits_set";
+
+ "$(this.bundle): The service $(service) needs to be started"
+ ifvarclass => and(canonify("start_$(service)"));
+
+ "$(this.bundle): The service pattern is provided: $(pattern[$(service)])"
+ ifvarclass => and(isvariable("pattern[$(service)]"));
+
+ "$(this.bundle): The default service pattern was used: $(default[pattern])"
+ ifvarclass => not(isvariable("pattern[$(service)]"));
+
+ "$(this.bundle): The stopcommand is provided: $(stopcommand[$(service)])"
+ ifvarclass => and(isvariable("stopcommand[$(service)]"));
+
+ "$(this.bundle): The stopcommand is NOT provided, using default"
+ ifvarclass => not(isvariable("stopcommand[$(service)]"));
+
+ "$(this.bundle): The startcommand is provided: $(startcommand[$(service)])"
+ ifvarclass => and(isvariable("startcommand[$(service)]"));
+
+ "$(this.bundle): The startcommand is NOT provided, using default"
+ ifvarclass => not(isvariable("startcommand[$(service)]"));
+
+ "$(this.bundle): The restartcommand is provided: $(restartcommand[$(service)])"
+ ifvarclass => and(isvariable("restartcommand[$(service)]"));
+
+ "$(this.bundle): The restartcommand is NOT provided, using default"
+ ifvarclass => not(isvariable("restartcommand[$(service)]"));
+
+ "$(this.bundle): The reloadcommand is provided: $(reloadcommand[$(service)])"
+ ifvarclass => and(isvariable("reloadcommand[$(service)]"));
+
+ "$(this.bundle): The reloadcommand is NOT provided, using default"
+ ifvarclass => not(isvariable("reloadcommand[$(service)]"));
+
+ "$(this.bundle): The baseinit is provided: $(baseinit[$(service)])"
+ ifvarclass => and(isvariable("baseinit[$(service)]"));
+
+ "$(this.bundle): The baseinit is NOT provided, using default"
+ ifvarclass => not(isvariable("baseinit[$(service)]"));
+}
--- /dev/null
+############################################################################
+# Copyright (C) CFEngine AS
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License LGPL as published by the
+# Free Software Foundation; version 3.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# To the extent this program is licensed as part of the Enterprise
+# versions of CFEngine, the applicable Commercial Open Source License
+# (COSL) may apply to this file if you as a licensee so wish it. See
+# included file COSL.txt.
+###########################################################################
+#
+# CFEngine Community Open Promise-Body Library
+#
+# This initiative started by CFEngine promotes a
+# standardized set of names and promise specifications
+# for template functionality within CFEngine 3.
+#
+# The aim is to promote an industry standard for
+# naming of configuration patterns, leading to a
+# de facto middleware of standardized syntax.
+#
+# Names should be intuitive and parameters should be
+# minimal to assist readability and comprehensibility.
+
+# Contributions to this file are voluntarily given to
+# the cfengine community, and are moderated by CFEngine.
+# No liability or warranty for misuse is implied.
+#
+# If you add to this file, please try to make the
+# contributions "self-documenting". Comments made
+# after the bundle/body statement are retained in
+# the online docs
+#
+
+# For CFEngine Core: 3.6.0 to 3.6.x
+# Main COPBL include file
+
+###################################################
+# If you find CFEngine useful, please consider #
+# purchasing a commercial version of the software.#
+###################################################
+
+bundle common stdlib_common
+# @ignore
+{
+ vars:
+ "inputs" slist => {
+ "$(this.promise_dirname)/paths.cf",
+ "$(this.promise_dirname)/common.cf",
+ "$(this.promise_dirname)/commands.cf",
+ "$(this.promise_dirname)/packages.cf",
+ "$(this.promise_dirname)/files.cf",
+ "$(this.promise_dirname)/services.cf",
+ "$(this.promise_dirname)/processes.cf",
+ "$(this.promise_dirname)/storage.cf",
+ "$(this.promise_dirname)/databases.cf",
+ "$(this.promise_dirname)/users.cf",
+ "$(this.promise_dirname)/monitor.cf",
+ "$(this.promise_dirname)/guest_environments.cf",
+ "$(this.promise_dirname)/bundles.cf",
+ };
+
+ reports:
+ verbose_mode::
+ "$(this.bundle): adding COPBL stdlib inputs='$(inputs)'";
+}
+
+body file control
+# @ignore
+{
+ inputs => { @(stdlib_common.inputs) };
+}
--- /dev/null
+############################################################################
+# Copyright (C) CFEngine AS
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License LGPL as published by the
+# Free Software Foundation; version 3.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# To the extent this program is licensed as part of the Enterprise
+# versions of CFEngine, the applicable Commercial Open Source License
+# (COSL) may apply to this file if you as a licensee so wish it. See
+# included file COSL.txt.
+###########################################################################
+#
+# CFEngine Community Open Promise-Body Library
+#
+# This initiative started by CFEngine promotes a
+# standardized set of names and promise specifications
+# for template functionality within CFEngine 3.
+#
+# The aim is to promote an industry standard for
+# naming of configuration patterns, leading to a
+# de facto middleware of standardized syntax.
+#
+# Names should be intuitive and parameters should be
+# minimal to assist readability and comprehensibility.
+
+# Contributions to this file are voluntarily given to
+# the cfengine community, and are moderated by CFEngine.
+# No liability or warranty for misuse is implied.
+#
+# If you add to this file, please try to make the
+# contributions "self-documenting". Comments made
+# after the bundle/body statement are retained in
+# the online docs
+#
+
+# For CFEngine Core: 3.6.0 to 3.6.x
+# Storage bodies
+
+###################################################
+# If you find CFEngine useful, please consider #
+# purchasing a commercial version of the software.#
+###################################################
+
+##-------------------------------------------------------
+## storage promises
+##-------------------------------------------------------
+
+body volume min_free_space(free)
+# @brief Warn if the storage doesn't have at least `free` free space.
+#
+# A warnings is also generated if the storage is smaller than 10K or as
+# less than 2 file entries.
+#
+# @param free Absolute or percentage minimum disk space that should be
+# available before warning
+{
+ check_foreign => "false";
+ freespace => "$(free)";
+ sensible_size => "10000";
+ sensible_count => "2";
+}
+
+##
+
+body mount nfs(server,source)
+# @brief Mounts the storage at `source` on `server` via nfs protocol.
+#
+# Also modifies the file system table.
+#
+# @param server Hostname or IP of remote server
+# @param source Path of remote file system to mount
+#
+# **See also:** `nfs_p()`, `unmount()`
+{
+ mount_type => "nfs";
+ mount_source => "$(source)";
+ mount_server => "$(server)";
+ edit_fstab => "true";
+}
+
+##
+
+body mount nfs_p(server,source,perm)
+# @brief Mounts the storage via nfs, with `perm` passed as options to mount.
+#
+# Also modifies the file system table.
+#
+# @param server Hostname or IP of remote server
+# @param source Path of remote file system to mount
+# @param perm A list of options that's passed to the mount command
+#
+# **See also:** `nfs`, `unmount()`
+{
+ mount_type => "nfs";
+ mount_source => "$(source)";
+ mount_server => "$(server)";
+ mount_options => {"$(perm)"};
+ edit_fstab => "true";
+}
+
+##
+
+body mount unmount
+# @brief Unmounts the nfs storage.
+#
+# Also modifies the file system table.
+#
+# **See also:** `nfs()`, `nfs_p()`
+{
+ mount_type => "nfs";
+ edit_fstab => "true";
+ unmount => "true";
+}
--- /dev/null
+############################################################################
+# Copyright (C) CFEngine AS
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License LGPL as published by the
+# Free Software Foundation; version 3.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# To the extent this program is licensed as part of the Enterprise
+# versions of CFEngine, the applicable Commercial Open Source License
+# (COSL) may apply to this file if you as a licensee so wish it. See
+# included file COSL.txt.
+###########################################################################
+#
+# CFEngine Community Open Promise-Body Library
+#
+# This initiative started by CFEngine promotes a
+# standardized set of names and promise specifications
+# for template functionality within CFEngine 3.
+#
+# The aim is to promote an industry standard for
+# naming of configuration patterns, leading to a
+# de facto middleware of standardized syntax.
+#
+# Names should be intuitive and parameters should be
+# minimal to assist readability and comprehensibility.
+
+# Contributions to this file are voluntarily given to
+# the cfengine community, and are moderated by CFEngine.
+# No liability or warranty for misuse is implied.
+#
+# If you add to this file, please try to make the
+# contributions "self-documenting". Comments made
+# after the bundle/body statement are retained in
+# the online docs
+#
+
+# For CFEngine Core: 3.6.0 to 3.6.x
+# Users bodies
+
+###################################################
+# If you find CFEngine useful, please consider #
+# purchasing a commercial version of the software.#
+###################################################
+
+body password plaintext_password(text)
+# @brief Sets the plaintext password for the user to `text`
+# @param text the plain text version of the password
+#
+# **Note:** Don't use that unless you really have no choice
+#
+# **See also:** `hashed_password()`
+{
+ format => "plaintext";
+ data => $(text);
+}
+
+body password hashed_password(hash)
+# @brief Sets the hashed password for the user to `hash`
+# @param hash the hashed representation of the password
+#
+# The hashing method is up to the platform.
+#
+# **See also:** `plaintext_password()`
+{
+ format => "hash";
+ data => $(hash);
+}
--- /dev/null
+#!/opt/cfengine/bin/cf-agent --inform
+
+bundle common g
+{
+ vars:
+ "workdir" string => "/opt/enigmabox/cfengine-promises";
+ "network_profile" string => readfile("/etc/enigmabox/network-profile" , "33");
+ "site" string => "/box/.cf-site.json";
+}
+
+
+
+body common control
+{
+ inputs => {
+ "$(g.workdir)/lib/files.cf",
+ "$(g.workdir)/system_network/bundle.cf",
+ "$(g.workdir)/app_cjdns/bundle.cf",
+ "$(g.workdir)/app_telephony/bundle.cf",
+ "$(g.workdir)/app_email/bundle.cf",
+ "$(g.workdir)/app_webfilter/bundle.cf",
+ "$(g.workdir)/app_security/bundle.cf",
+# "$(g.workdir)/app_database/bundle.cf",
+ };
+
+ bundlesequence => {
+ "system_network",
+ "app_cjdns",
+ "app_telephony",
+ "app_email",
+ "app_webfilter",
+ "app_security",
+# "app_database",
+ };
+}
+
+body perms script
+{
+ mode => "755";
+}
+
+body perms file
+{
+ mode => "644";
+}
+
+body copy_from local_copy(from)
+{
+ source => "$(from)";
+ compare => "hash";
+ copy_backup => false;
+}
+
--- /dev/null
+
+bundle agent system_base
+{
+ services:
+ "cron"
+ service_policy => "start";
+
+ files:
+ "/etc/lighttpd/lighttpd.conf"
+ edit_template => "$(this.promise_dirname)/templates/lighttpd.conf",
+ edit_defaults => no_backup,
+ classes => if_repaired("restart_lighttpd");
+
+ "/etc/config/network"
+ edit_template => "$(this.promise_dirname)/templates/config-network.$(g.network_profile)",
+ edit_defaults => no_backup,
+ classes => if_repaired("restart_network");
+
+ "/etc/config/radvd"
+ edit_template => "$(this.promise_dirname)/templates/config-radvd.$(g.network_profile)",
+ edit_defaults => no_backup,
+ classes => if_repaired("restart_radvd");
+
+ "/etc/config/system"
+ edit_template => "$(this.promise_dirname)/templates/config-system",
+ edit_defaults => no_backup,
+ classes => if_repaired("restart_system");
+
+ "/etc/sysupgrade.conf"
+ edit_template => "$(this.promise_dirname)/templates/sysupgrade.conf",
+ edit_defaults => no_backup;
+
+ "/etc/banner"
+ edit_template => "$(this.promise_dirname)/templates/banner",
+ edit_defaults => no_backup;
+
+ "/etc/inittab"
+ edit_template => "$(this.promise_dirname)/templates/inittab",
+ edit_defaults => no_backup;
+
+ "/etc/crontabs/root"
+ create => "true",
+ edit_template => "$(this.promise_dirname)/templates/crontab",
+ edit_defaults => no_backup;
+
+ "/usr/libexec/."
+ create => "true";
+
+ "/usr/libexec/sftp-server"
+ link_from => ln_s("/usr/bin/gesftpserver");
+
+ commands:
+ restart_lighttpd::
+ "/etc/init.d/lighttpd restart";
+
+ restart_network::
+ "/etc/init.d/network restart";
+
+ restart_radvd::
+ "/etc/init.d/radvd restart";
+
+ restart_system::
+ "/etc/init.d/system restart";
+}
+
--- /dev/null
+ _____ _ _
+| ___| (_) | |
+| |__ _ __ _ __ _ _ __ ___ __ _| |__ _____ __
+| __| '_ \| |/ _` | '_ ` _ \ / _` | '_ \ / _ \ \/ /
+| |__| | | | | (_| | | | | | | (_| | |_) | (_) > <
+\____/_| |_|_|\__, |_| |_| |_|\__,_|_.__/ \___/_/\_\
+ __/ |
+ |___/
+
+Type "hello" for a brief introduction
+
--- /dev/null
+
+config interface 'loopback'
+ option ifname 'lo'
+ option proto 'static'
+ option ipaddr '127.0.0.1'
+ option netmask '255.0.0.0'
+
+config interface 'eth0'
+ option ifname 'eth0'
+ option proto 'dhcp'
+
+config interface 'eth1'
+ option ifname 'eth1'
+ option proto 'static'
+ option ipaddr '192.168.100.1'
+ option netmask '255.255.255.0'
+ option ip6addr 'fdfc::1/64'
+
+config interface 'eth2'
+ option ifname 'eth2'
+ option proto 'static'
+ option ipaddr '192.168.101.1'
+ option netmask '255.255.255.0'
+ option ip6addr 'fdfc::2/64'
+
+config globals 'globals'
+ option ula_prefix 'fd9a:6a77:bce8::/48'
+
--- /dev/null
+
+config interface 'loopback'
+ option ifname 'lo'
+ option proto 'static'
+ option ipaddr '127.0.0.1'
+ option netmask '255.0.0.0'
+
+config interface 'eth0'
+ option ifname 'eth0'
+ option proto 'static'
+ option ipaddr '192.168.101.1'
+ option netmask '255.255.255.0'
+ option ip6addr 'fdfc::2/64'
+
+config interface 'eth1'
+ option ifname 'eth1'
+ option proto 'static'
+ option ipaddr '192.168.100.1'
+ option netmask '255.255.255.0'
+ option ip6addr 'fdfc::1/64'
+
+config interface 'eth2'
+ option ifname 'eth0'
+ option proto 'dhcp'
+
+config globals 'globals'
+ option ula_prefix 'fd9a:6a77:bce8::/48'
+
--- /dev/null
+
+config interface 'loopback'
+ option ifname 'lo'
+ option proto 'static'
+ option ipaddr '127.0.0.1'
+ option netmask '255.0.0.0'
+
+config interface 'eth0'
+ option ifname 'eth0'
+ option proto 'static'
+ option ipaddr '192.168.100.1'
+ option netmask '255.255.255.0'
+
+config globals 'globals'
+ option ula_prefix 'fd9a:6a77:bce8::/48'
+
--- /dev/null
+config interface
+ option interface 'eth1'
+ option AdvSendAdvert 1
+ option AdvRetransTimer 5000
+ option MinRtrAdvInterval 3
+ option MaxRtrAdvInterval 10
+
+config prefix
+ option interface 'eth1'
+ option prefix 'fdfc::1/64'
+ option AdvRouterAddr 1
+
+config interface
+ option interface 'eth2'
+ option AdvSendAdvert 1
+ option AdvRetransTimer 5000
+ option MinRtrAdvInterval 3
+ option MaxRtrAdvInterval 10
+
+config prefix
+ option interface 'eth2'
+ option prefix 'fdfc::2/64'
+ option AdvRouterAddr 1
+
--- /dev/null
+config interface
+ option interface 'eth1'
+ option AdvSendAdvert 1
+ option AdvRetransTimer 5000
+ option MinRtrAdvInterval 3
+ option MaxRtrAdvInterval 10
+
+config prefix
+ option interface 'eth1'
+ option prefix 'fdfc::1/64'
+ option AdvRouterAddr 1
+
+config interface
+ option interface 'eth0'
+ option AdvSendAdvert 1
+ option AdvRetransTimer 5000
+ option MinRtrAdvInterval 3
+ option MaxRtrAdvInterval 10
+
+config prefix
+ option interface 'eth0'
+ option prefix 'fdfc::2/64'
+ option AdvRouterAddr 1
+
--- /dev/null
+config interface
+ option interface 'eth0'
+ option AdvSendAdvert 1
+ option AdvRetransTimer 5000
+ option MinRtrAdvInterval 3
+ option MaxRtrAdvInterval 10
+
+config prefix
+ option interface 'eth0'
+ option prefix 'fdfc::1/64'
+ option AdvRouterAddr 1
+
--- /dev/null
+
+config system
+ option hostname box
+ option timezone UTC
+
+config timeserver ntp
+ list server 0.openwrt.pool.ntp.org
+ list server 1.openwrt.pool.ntp.org
+ list server 2.openwrt.pool.ntp.org
+ list server 3.openwrt.pool.ntp.org
+ option enable_server 0
+
--- /dev/null
+
+# cjdns networking
+* * * * * /usr/sbin/setup-cjdns-networking &> /dev/null
+
+# subscriber stuff
+0 */2 * * * /usr/sbin/subscriber-stuff sleep &> /dev/null
+
--- /dev/null
+::sysinit:/etc/init.d/rcS S boot
+::shutdown:/etc/init.d/rcS K shutdown
--- /dev/null
+# lighttpd configuration file
+#
+## modules to load
+# all other module should only be loaded if really neccesary
+# - saves some time
+# - saves memory
+server.modules = (
+ "mod_rewrite",
+# "mod_redirect",
+# "mod_alias",
+ "mod_auth",
+# "mod_status",
+ "mod_setenv",
+ "mod_fastcgi",
+ "mod_proxy",
+# "mod_simple_vhost",
+# "mod_cgi",
+# "mod_ssi",
+# "mod_usertrack",
+# "mod_expire",
+# "mod_webdav"
+)
+
+# force use of the "write" backend (closes: #2401)
+server.network-backend = "write"
+
+## a static document-root, for virtual-hosting take look at the
+## server.virtual-* options
+server.document-root = "/www/"
+
+## where to send error-messages to
+server.errorlog = "/var/log/lighttpd/error.log"
+
+## files to check for if .../ is requested
+index-file.names = ( "index.php", "index.html",
+ "index.htm", "default.htm",
+ " index.lighttpd.html" )
+
+## mimetype mapping
+mimetype.assign = (
+ ".pdf" => "application/pdf",
+ ".class" => "application/octet-stream",
+ ".pac" => "application/x-ns-proxy-autoconfig",
+ ".swf" => "application/x-shockwave-flash",
+ ".wav" => "audio/x-wav",
+ ".gif" => "image/gif",
+ ".jpg" => "image/jpeg",
+ ".jpeg" => "image/jpeg",
+ ".png" => "image/png",
+ ".svg" => "image/svg+xml",
+ ".css" => "text/css",
+ ".html" => "text/html",
+ ".htm" => "text/html",
+ ".js" => "text/javascript",
+ ".txt" => "text/plain",
+ ".dtd" => "text/xml",
+ ".xml" => "text/xml"
+ )
+
+## Use the "Content-Type" extended attribute to obtain mime type if possible
+#mimetypes.use-xattr = "enable"
+
+## send a different Server: header
+## be nice and keep it at lighttpd
+#server.tag = "lighttpd"
+
+$HTTP["url"] =~ "\.pdf$" {
+ server.range-requests = "disable"
+}
+
+##
+# which extensions should not be handle via static-file transfer
+#
+# .php, .pl, .fcgi are most often handled by mod_fastcgi or mod_cgi
+static-file.exclude-extensions = ( ".php", ".pl", ".fcgi" )
+
+######### Options that are good to be but not neccesary to be changed #######
+
+## bind to port (default: 80)
+#server.port = 81
+
+## bind to localhost (default: all interfaces)
+#server.bind = "localhost"
+
+## error-handler for status 404
+#server.error-handler-404 = "/error-handler.html"
+#server.error-handler-404 = "/error-handler.php"
+
+## to help the rc.scripts
+server.pid-file = "/var/run/lighttpd.pid"
+
+
+###### virtual hosts
+##
+## If you want name-based virtual hosting add the next three settings and load
+## mod_simple_vhost
+##
+## document-root =
+## virtual-server-root + virtual-server-default-host + virtual-server-docroot or
+## virtual-server-root + http-host + virtual-server-docroot
+##
+#simple-vhost.server-root = "/home/weigon/wwwroot/servers/"
+#simple-vhost.default-host = "grisu.home.kneschke.de"
+#simple-vhost.document-root = "/pages/"
+
+
+##
+## Format: <errorfile-prefix><status>.html
+## -> ..../status-404.html for 'File not found'
+#server.errorfile-prefix = "/www/error-"
+
+## virtual directory listings
+#server.dir-listing = "enable"
+
+## send unhandled HTTP-header headers to error-log
+#debug.dump-unknown-headers = "enable"
+
+### only root can use these options
+#
+# chroot() to directory (default: no chroot() )
+#server.chroot = "/"
+
+## change uid to <uid> (default: don't care)
+#server.username = "nobody"
+#
+server.upload-dirs = ( "/tmp" )
+
+## change uid to <uid> (default: don't care)
+#server.groupname = "nobody"
+
+#### compress module
+#compress.cache-dir = "/dev/null/"
+#compress.filetype = ("text/plain", "text/html")
+
+#### proxy module
+## read proxy.txt for more info
+#proxy.server = (
+# ".php" => (
+# "localhost" => (
+# "host" => "192.168.0.101",
+# "port" => 80
+# )
+# )
+#)
+
+#### fastcgi module
+## read fastcgi.txt for more info
+#fastcgi.server = (
+# ".php" => (
+# "localhost" => (
+# "socket" => "/tmp/php-fastcgi.socket",
+# "bin-path" => "/usr/local/bin/php"
+# )
+# )
+#)
+
+#### CGI module
+#cgi.assign = ( ".pl" => "/usr/bin/perl", ".cgi" => "/usr/bin/perl" )
+
+#### SSL engine
+#ssl.engine = "enable"
+#ssl.pemfile = "server.pem"
+
+#### status module
+#status.status-url = "/server-status"
+#status.config-url = "/server-config"
+
+#### auth module
+## read authentification.txt for more info
+#auth.backend = "plain"
+#auth.backend.plain.userfile = "lighttpd.user"
+#auth.backend.plain.groupfile = "lighttpd.group"
+#auth.require = (
+# "/server-status" => (
+# "method" => "digest",
+# "realm" => "download archiv",
+# "require" => "group=www|user=jan|host=192.168.2.10"
+# ),
+# "/server-info" => (
+# "method" => "digest",
+# "realm" => "download archiv",
+# "require" => "group=www|user=jan|host=192.168.2.10"
+# )
+#)
+
+#### url handling modules (rewrite, redirect, access)
+#url.rewrite = ( "^/$" => "/server-status" )
+#url.redirect = ( "^/wishlist/(.+)" => "http://www.123.org/$1" )
+
+#### both rewrite/redirect support back reference to regex conditional using %n
+#$HTTP["host"] =~ "^www\.(.*)" {
+# url.redirect = ( "^/(.*)" => "http://%1/$1" )
+#}
+
+#### expire module
+#expire.url = ( "/buggy/" => "access 2 hours", "/asdhas/" => "access plus 1 seconds 2 minutes")
+
+#### ssi
+#ssi.extension = ( ".shtml" )
+
+#### setenv
+#setenv.add-request-header = ( "TRAV_ENV" => "mysql://user@host/db" )
+#setenv.add-response-header = ( "X-Secret-Message" => "42" )
+
+#### variable usage:
+## variable name without "." is auto prefixed by "var." and becomes "var.bar"
+#bar = 1
+#var.mystring = "foo"
+
+## integer add
+#bar += 1
+## string concat, with integer cast as string, result: "www.foo1.com"
+#server.name = "www." + mystring + var.bar + ".com"
+## array merge
+#index-file.names = (foo + ".php") + index-file.names
+#index-file.names += (foo + ".php")
+
+#### include
+#include /etc/lighttpd/lighttpd-inc.conf
+## same as above if you run: "lighttpd -f /etc/lighttpd/lighttpd.conf"
+#include "lighttpd-inc.conf"
+
+#### include_shell
+#include_shell "echo var.a=1"
+## the above is same as:
+#var.a=1
+
+#### webdav
+#$HTTP["url"] =~ "^/webdav($|/)" {
+# webdav.activate = "enable"
+# webdav.is-readonly = "enable"
+# webdav.sqlite-db-name = "/var/run/lighttpd-webdav-lock.db"
+#}
+
+include_shell "cat /etc/lighttpd/sites.d/*.conf"
+
--- /dev/null
+## This file contains files and directories that should
+## be preserved during an upgrade.
+
+# /etc/example.conf
+# /etc/openvpn/
+
+/etc/enigmabox/addressbook.db
+/etc/enigmabox/opkg.conf
+/etc/enigmabox/opkg-testing.conf
+/box/settings.sqlite
+/box/teletext.db
+/box/server.json
+/box/ssl/
+/box/cjdroute.conf
+
--- /dev/null
+
+bundle agent system_network
+{
+ vars:
+ "json"
+ data => readjson("$(g.site)", 64000);
+
+ classes:
+ "missioncontrol"
+ expression => regcmp("true", "$(json[if_missioncontrol])");
+
+ files:
+ "/etc/hosts"
+ template_method => "mustache",
+ template_data => readjson("$(g.site)", 64000),
+ edit_template => "$(this.promise_dirname)/templates/hosts.mustache",
+ edit_defaults => no_backup,
+ classes => if_repaired("restart_dnsmasq");
+
+ "/etc/enigmabox/display_names"
+ create => "true",
+ template_method => "mustache",
+ template_data => readjson("$(g.site)", 64000),
+ edit_template => "$(this.promise_dirname)/templates/display_names.mustache",
+ edit_defaults => no_backup;
+
+ "/etc/dhcpd.conf"
+ edit_template => "$(this.promise_dirname)/templates/dhcpd.conf",
+ classes => if_repaired("restart_dhcpd");
+
+ "/etc/config/dhcp"
+ edit_template => "$(this.promise_dirname)/templates/config-dhcp",
+ classes => if_repaired("restart_dnsmasq"),
+ edit_defaults => no_backup;
+
+ "/usr/sbin/rebuild-iptables"
+ create => "true",
+ perms => script,
+ template_method => "mustache",
+ template_data => readjson("$(g.site)", 64000),
+ edit_template => "$(this.promise_dirname)/templates/rebuild-iptables.mustache",
+ edit_defaults => no_backup,
+ classes => if_repaired("rebuild_iptables");
+
+ missioncontrol::
+ "/etc/enigmabox/autoupdates"
+ create => "true",
+ template_method => "mustache",
+ template_data => readjson("$(g.site)", 64000),
+ edit_template => "$(this.promise_dirname)/templates/autoupdates.mustache",
+ edit_defaults => no_backup;
+
+ commands:
+ restart_dnsmasq::
+ "/etc/init.d/dnsmasq restart";
+
+ restart_dhcpd::
+ "/etc/init.d/dhcpd restart";
+
+ rebuild_iptables::
+ "/usr/sbin/rebuild-iptables";
+
+ reports:
+ "checking network configuration: done";
+}
+
--- /dev/null
+{{autoupdates}}
--- /dev/null
+config dnsmasq
+ option domainneeded '1'
+ option boguspriv '1'
+ option filterwin2k '1'
+ option localise_queries '1'
+ option rebind_protection '1'
+ option rebind_localhost '1'
+# option local '/lan/'
+ option domain 'box'
+ option expandhosts '1'
+ option nonegcache '0'
+ option authoritative '1'
+ option readethers '1'
+ option leasefile '/tmp/dhcp.leases'
+ option resolvfile '/etc/resolv.conf.enigmabox'
+
--- /dev/null
+ddns-update-style none;
+default-lease-time 86400;
+max-lease-time 604800;
+authoritative;
+log-facility local7;
+option domain-name "box";
+option domain-name-servers 192.168.100.1;
+option tftp-server-name "http://box:8080";
+
+subnet 192.168.100.0 netmask 255.255.255.0 {
+ range 192.168.100.50 192.168.100.150;
+ option routers 192.168.100.1;
+}
+
+subnet 192.168.101.0 netmask 255.255.255.0 {
+ range 192.168.101.50 192.168.101.150;
+ option routers 192.168.101.1;
+}
+
--- /dev/null
+{{#addresses}}
+{{hostname}}|{{display_name}}
+{{/addresses}}
--- /dev/null
+127.0.0.1 localhost
+::1 localhost ip6-localhost ip6-loopback
+fe00::0 ip6-localnet
+ff00::0 ip6-mcastprefix
+ff02::1 ip6-allnodes
+ff02::2 ip6-allrouters
+
+# enigmabox internal
+192.168.100.1 box enigma mail text box.enigmabox.net
+{{#missioncontrol}}
+{{ip}} {{hostname}}
+{{/missioncontrol}}
+
+# enigmabox hosts
+{{#if_cjdns_v6}}
+fc5d:524c:f21d:bbef:6742:1008:a105:b60e directory
+{{/if_cjdns_v6}}
+{{^if_cjdns_v6}}
+fcef:1264:ac19:6fac:5942:ed13:06bf:f4ae directory
+{{/if_cjdns_v6}}
+fcd9:08a6:d9c1:5f57:d221:1ba0:35c9:6ff1 schallundrauch.h
+
+# friends
+{{#addresses}}
+{{ipv6}} {{hostname}}
+{{/addresses}}
+
+# global addresses
+{{#global_addresses}}
+{{ipv6}} {{hostname}}.eb
+{{/global_addresses}}
+
--- /dev/null
+#!/bin/ash
+
+iptables="/usr/sbin/iptables"
+ip6tables="/usr/sbin/ip6tables"
+network_profile="$(cat /etc/enigmabox/network-profile)"
+
+# define interfaces
+[[ "$network_profile" == "alix" ]] && internal_interfaces="eth1 eth2"
+[[ "$network_profile" == "apu" ]] && internal_interfaces="eth0 eth1"
+[[ "$network_profile" == "raspi" ]] && internal_interfaces="eth0"
+
+
+
+################################################################################
+# init
+################################################################################
+
+# reset all
+$iptables -F
+$iptables -t nat -F
+
+# defaults
+$iptables -P INPUT DROP
+{{#if_missioncontrol}}$iptables -P OUTPUT DROP{{/if_missioncontrol}}
+{{^if_missioncontrol}}$iptables -P OUTPUT ACCEPT{{/if_missioncontrol}}
+$iptables -P FORWARD DROP
+
+################################################################################
+# define interfaces + address groups
+################################################################################
+
+# loopback
+$iptables -A INPUT -i lo -j ACCEPT
+$iptables -A OUTPUT -o lo -j ACCEPT
+
+$iptables -X internal-services
+$iptables -N internal-services
+$iptables -A internal-services -p tcp --dport 22 -j ACCEPT # ssh
+$iptables -A internal-services -p tcp --dport 25 -j ACCEPT # smtp
+$iptables -A internal-services -p udp --dport 53 -j ACCEPT # allow dns requests to tinyproxy
+$iptables -A internal-services -p udp --dport 67:68 -j ACCEPT # dhcp server
+$iptables -A internal-services -p tcp --dport 80 -j ACCEPT # webinterface
+$iptables -A internal-services -p tcp --dport 110 -j ACCEPT # pop3
+$iptables -A internal-services -p tcp --dport 143 -j ACCEPT # imap
+$iptables -A internal-services -p udp --dport 5060 -j ACCEPT # voip phone
+$iptables -A internal-services -p tcp --dport 8117 -j ACCEPT # renew notice
+$iptables -A internal-services -p tcp --dport 8080 -j ACCEPT # grandstream phone provisioning
+$iptables -A internal-services -p tcp --dport 8888 -j ACCEPT # tinyproxy
+$iptables -A internal-services -p icmp -m limit --limit 10/second -j ACCEPT # icmp
+
+$iptables -X internal-in
+$iptables -N internal-in
+for interface in $internal_interfaces; do
+ $iptables -A internal-in -i "$interface" -j internal-services
+done
+
+$iptables -X internal-out
+$iptables -N internal-out
+for interface in $internal_interfaces; do
+ $iptables -A internal-out -o "$interface" -j ACCEPT
+done
+
+$iptables -X peering-servers
+$iptables -N peering-servers
+{{#peerings}}
+$iptables -A peering-servers --dst {{ip}} -j ACCEPT
+{{/peerings}}
+
+{{#missioncontrol}}
+$iptables -A peering-servers --dst {{ip}} -j ACCEPT
+{{/missioncontrol}}
+
+################################################################################
+# general rules
+################################################################################
+
+# syncookies
+echo 1 > /proc/sys/net/ipv4/tcp_syncookies
+
+# allow related and established
+$iptables -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
+$iptables -A OUTPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
+
+################################################################################
+# input
+################################################################################
+
+# allow internal interfaces
+$iptables -A INPUT -j internal-in
+
+{{#if_allow_peering}}
+# peering port
+$iptables -A INPUT -p udp --dport {{peering_port}} -j ACCEPT
+{{/if_allow_peering}}
+
+################################################################################
+# output
+################################################################################
+
+$iptables -A OUTPUT -o tun0 -j ACCEPT # that is needed for dnsmasq to make dns requests
+$iptables -A OUTPUT -j peering-servers
+$iptables -A OUTPUT -j internal-out # asterisk needs this
+
+################################################################################
+# forward
+################################################################################
+
+# NAT
+$iptables -A POSTROUTING -t nat -o tun0 -j MASQUERADE
+
+for interface in $internal_interfaces; do
+
+ # MTU fix
+ $iptables -A FORWARD -i "$interface" -p tcp -m tcp --tcp-flags SYN,RST SYN -j TCPMSS --clamp-mss-to-pmtu
+
+ # allow forwarding for internal_interfaces, but not for others
+ $iptables -A FORWARD -i "$interface" -o tun0 -j ACCEPT
+ $iptables -A FORWARD -i tun0 -o "$interface" -m state --state RELATED,ESTABLISHED -j ACCEPT
+
+done
+
+{{#if_display_expiration_notice}}
+# show renew notice if subscription has expired
+for interface in $internal_interfaces; do
+ $iptables -t nat -A PREROUTING -i "$interface" -p tcp --dport 80 -j REDIRECT --to-port 8117
+done
+{{/if_display_expiration_notice}}
+
+
+
+################################################################################
+# IPv6
+################################################################################
+
+################################################################################
+# init
+################################################################################
+
+# reset all
+$ip6tables -F
+$ip6tables -t nat -F
+
+# defaults
+$ip6tables -P INPUT DROP
+$ip6tables -P OUTPUT DROP
+$ip6tables -P FORWARD DROP
+
+################################################################################
+# define interfaces + address groups
+################################################################################
+
+# loopback
+$ip6tables -A INPUT -i lo -j ACCEPT
+$ip6tables -A OUTPUT -o lo -j ACCEPT
+
+$ip6tables -X internal-in
+$ip6tables -N internal-in
+for interface in $internal_interfaces; do
+ $ip6tables -A internal-in -i "$interface" -j ACCEPT
+done
+
+$ip6tables -X internal-out
+$ip6tables -N internal-out
+for interface in $internal_interfaces; do
+ $ip6tables -A internal-out -o "$interface" -j ACCEPT
+done
+
+$ip6tables -X friends-services
+$ip6tables -N friends-services
+$ip6tables -A friends-services -p tcp --dport 25 -j ACCEPT # smtp
+$ip6tables -A friends-services -p tcp --dport 5060 -j ACCEPT # asterisk
+$ip6tables -A friends-services -p udp --dport 5060 -j ACCEPT # asterisk
+$ip6tables -A friends-services -p udp -m udp --dport 10000:20000 -j ACCEPT # rtp
+$ip6tables -A friends-services -p ipv6-icmp -m limit --limit 10/second -j ACCEPT # icmp
+
+$ip6tables -X friends-in
+$ip6tables -N friends-in
+{{#addresses}}
+$ip6tables -A friends-in -i tun0 --src {{ipv6}} -j friends-services
+{{/addresses}}
+
+################################################################################
+# general rules
+################################################################################
+
+# allow related and established
+$ip6tables -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
+$ip6tables -A OUTPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
+
+# allow ping
+$ip6tables -A INPUT -p ipv6-icmp -m limit --limit 10/second -j internal-in
+
+################################################################################
+# input
+################################################################################
+
+# allow friends
+$ip6tables -A INPUT -j friends-in
+
+{{#if_global_availability}}
+# allow ping
+$ip6tables -A INPUT -i tun0 -p ipv6-icmp -m limit --limit 10/second -j ACCEPT
+
+# allow calls from everyone
+$ip6tables -A INPUT -i tun0 -p tcp --dport 5060 -j ACCEPT
+$ip6tables -A INPUT -i tun0 -p udp --dport 5060 -j ACCEPT
+
+# RTP - the media stream
+# (related to the port range in /etc/asterisk/rtp.conf)
+$ip6tables -A INPUT -i tun0 -p udp -m udp --dport 10000:20000 -j ACCEPT
+
+# allow emails from everyone
+$ip6tables -A INPUT -i tun0 -p tcp --dport 25 -j ACCEPT
+{{/if_global_availability}}
+
+{{#if_teletext_enabled}}
+# teletext
+$ip6tables -A INPUT -i tun0 -p tcp --dport 3838 -j ACCEPT
+#$ip6tables -A INPUT -i tun0 -p tcp --sport 3838 -j ACCEPT - test that
+{{/if_teletext_enabled}}
+
+{{#if_webserver_enabled}}
+# hypesite
+$ip6tables -A INPUT -i tun0 -p tcp --dport 80 -j ACCEPT
+{{/if_webserver_enabled}}
+
+# allow webserver access from LAN
+$ip6tables -A INPUT -p tcp --dport 80 -j internal-in
+
+################################################################################
+# output
+################################################################################
+
+# allow OUTPUT for tun0
+$ip6tables -A OUTPUT -o tun0 -j ACCEPT
+
+# allow router advertisements
+#$ip6tables -A OUTPUT -p ipv6-icmp -j internal-out
+
+################################################################################
+# forward
+################################################################################
+
+# NAT
+$ip6tables -A POSTROUTING -t nat -o tun0 -j MASQUERADE
+
+for interface in $internal_interfaces; do
+
+ # allow hype access
+ $ip6tables -A FORWARD -i "$interface" -o tun0 -j ACCEPT
+ $ip6tables -A FORWARD -i tun0 -o "$interface" -m state --state RELATED,ESTABLISHED -j ACCEPT
+
+done
+
+# EOF
--- /dev/null
+#!/bin/sh /etc/rc.common
+# setup cjdns networking
+
+START=94
+
+start() {
+ /usr/sbin/setup-cjdns-networking
+}
+
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright 2013 Matt Martz
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+__version__ = '0.2.5'
+
+# Some global variables we use
+source = None
+shutdown_event = None
+
+import math
+import time
+import os
+import sys
+import threading
+import re
+import signal
+import socket
+
+# Used for bound_interface
+socket_socket = socket.socket
+
+try:
+ import xml.etree.cElementTree as ET
+except ImportError:
+ try:
+ import xml.etree.ElementTree as ET
+ except ImportError:
+ from xml.dom import minidom as DOM
+ ET = None
+
+# Begin import game to handle Python 2 and Python 3
+try:
+ from urllib2 import urlopen, Request, HTTPError, URLError
+except ImportError:
+ from urllib.request import urlopen, Request, HTTPError, URLError
+
+try:
+ from Queue import Queue
+except ImportError:
+ from queue import Queue
+
+try:
+ from urlparse import urlparse
+except ImportError:
+ from urllib.parse import urlparse
+
+try:
+ from urlparse import parse_qs
+except ImportError:
+ try:
+ from urllib.parse import parse_qs
+ except ImportError:
+ from cgi import parse_qs
+
+try:
+ from hashlib import md5
+except ImportError:
+ from md5 import md5
+
+try:
+ from argparse import ArgumentParser as ArgParser
+except ImportError:
+ from optparse import OptionParser as ArgParser
+
+try:
+ import builtins
+except ImportError:
+ def print_(*args, **kwargs):
+ """The new-style print function taken from
+ https://pypi.python.org/pypi/six/
+
+ """
+ fp = kwargs.pop("file", sys.stdout)
+ if fp is None:
+ return
+
+ def write(data):
+ if not isinstance(data, basestring):
+ data = str(data)
+ fp.write(data)
+
+ want_unicode = False
+ sep = kwargs.pop("sep", None)
+ if sep is not None:
+ if isinstance(sep, unicode):
+ want_unicode = True
+ elif not isinstance(sep, str):
+ raise TypeError("sep must be None or a string")
+ end = kwargs.pop("end", None)
+ if end is not None:
+ if isinstance(end, unicode):
+ want_unicode = True
+ elif not isinstance(end, str):
+ raise TypeError("end must be None or a string")
+ if kwargs:
+ raise TypeError("invalid keyword arguments to print()")
+ if not want_unicode:
+ for arg in args:
+ if isinstance(arg, unicode):
+ want_unicode = True
+ break
+ if want_unicode:
+ newline = unicode("\n")
+ space = unicode(" ")
+ else:
+ newline = "\n"
+ space = " "
+ if sep is None:
+ sep = space
+ if end is None:
+ end = newline
+ for i, arg in enumerate(args):
+ if i:
+ write(sep)
+ write(arg)
+ write(end)
+else:
+ print_ = getattr(builtins, 'print')
+ del builtins
+
+
+def bound_socket(*args, **kwargs):
+ """Bind socket to a specified source IP address"""
+
+ global source
+ sock = socket_socket(*args, **kwargs)
+ sock.bind((source, 0))
+ return sock
+
+
+def distance(origin, destination):
+ """Determine distance between 2 sets of [lat,lon] in km"""
+
+ lat1, lon1 = origin
+ lat2, lon2 = destination
+ radius = 6371 # km
+
+ dlat = math.radians(lat2 - lat1)
+ dlon = math.radians(lon2 - lon1)
+ a = (math.sin(dlat / 2) * math.sin(dlat / 2) + math.cos(math.radians(lat1))
+ * math.cos(math.radians(lat2)) * math.sin(dlon / 2)
+ * math.sin(dlon / 2))
+ c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
+ d = radius * c
+
+ return d
+
+
+class FileGetter(threading.Thread):
+ """Thread class for retrieving a URL"""
+
+ def __init__(self, url, start):
+ self.url = url
+ self.result = None
+ self.starttime = start
+ threading.Thread.__init__(self)
+
+ def run(self):
+ self.result = [0]
+ try:
+ if (time.time() - self.starttime) <= 10:
+ f = urlopen(self.url)
+ while 1 and not shutdown_event.isSet():
+ self.result.append(len(f.read(10240)))
+ if self.result[-1] == 0:
+ break
+ f.close()
+ except IOError:
+ pass
+
+
+def downloadSpeed(files, quiet=False):
+ """Function to launch FileGetter threads and calculate download speeds"""
+
+ start = time.time()
+
+ def producer(q, files):
+ for file in files:
+ thread = FileGetter(file, start)
+ thread.start()
+ q.put(thread, True)
+ if not quiet and not shutdown_event.isSet():
+ sys.stdout.write('.')
+ sys.stdout.flush()
+
+ finished = []
+
+ def consumer(q, total_files):
+ while len(finished) < total_files:
+ thread = q.get(True)
+ while thread.isAlive():
+ thread.join(timeout=0.1)
+ finished.append(sum(thread.result))
+ del thread
+
+ q = Queue(6)
+ prod_thread = threading.Thread(target=producer, args=(q, files))
+ cons_thread = threading.Thread(target=consumer, args=(q, len(files)))
+ start = time.time()
+ prod_thread.start()
+ cons_thread.start()
+ while prod_thread.isAlive():
+ prod_thread.join(timeout=0.1)
+ while cons_thread.isAlive():
+ cons_thread.join(timeout=0.1)
+ return (sum(finished) / (time.time() - start))
+
+
+class FilePutter(threading.Thread):
+ """Thread class for putting a URL"""
+
+ def __init__(self, url, start, size):
+ self.url = url
+ chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+ data = chars * (int(round(int(size) / 36.0)))
+ self.data = ('content1=%s' % data[0:int(size) - 9]).encode()
+ del data
+ self.result = None
+ self.starttime = start
+ threading.Thread.__init__(self)
+
+ def run(self):
+ try:
+ if ((time.time() - self.starttime) <= 10 and
+ not shutdown_event.isSet()):
+ f = urlopen(self.url, self.data)
+ f.read(11)
+ f.close()
+ self.result = len(self.data)
+ else:
+ self.result = 0
+ except IOError:
+ self.result = 0
+
+
+def uploadSpeed(url, sizes, quiet=False):
+ """Function to launch FilePutter threads and calculate upload speeds"""
+
+ start = time.time()
+
+ def producer(q, sizes):
+ for size in sizes:
+ thread = FilePutter(url, start, size)
+ thread.start()
+ q.put(thread, True)
+ if not quiet and not shutdown_event.isSet():
+ sys.stdout.write('.')
+ sys.stdout.flush()
+
+ finished = []
+
+ def consumer(q, total_sizes):
+ while len(finished) < total_sizes:
+ thread = q.get(True)
+ while thread.isAlive():
+ thread.join(timeout=0.1)
+ finished.append(thread.result)
+ del thread
+
+ q = Queue(6)
+ prod_thread = threading.Thread(target=producer, args=(q, sizes))
+ cons_thread = threading.Thread(target=consumer, args=(q, len(sizes)))
+ start = time.time()
+ prod_thread.start()
+ cons_thread.start()
+ while prod_thread.isAlive():
+ prod_thread.join(timeout=0.1)
+ while cons_thread.isAlive():
+ cons_thread.join(timeout=0.1)
+ return (sum(finished) / (time.time() - start))
+
+
+def getAttributesByTagName(dom, tagName):
+ """Retrieve an attribute from an XML document and return it in a
+ consistent format
+
+ Only used with xml.dom.minidom, which is likely only to be used
+ with python versions older than 2.5
+ """
+ elem = dom.getElementsByTagName(tagName)[0]
+ return dict(list(elem.attributes.items()))
+
+
+def getConfig():
+ """Download the speedtest.net configuration and return only the data
+ we are interested in
+ """
+
+ uh = urlopen('http://www.speedtest.net/speedtest-config.php')
+ configxml = []
+ while 1:
+ configxml.append(uh.read(10240))
+ if len(configxml[-1]) == 0:
+ break
+ if int(uh.code) != 200:
+ return None
+ uh.close()
+ try:
+ root = ET.fromstring(''.encode().join(configxml))
+ config = {
+ 'client': root.find('client').attrib,
+ 'times': root.find('times').attrib,
+ 'download': root.find('download').attrib,
+ 'upload': root.find('upload').attrib}
+ except AttributeError:
+ root = DOM.parseString(''.join(configxml))
+ config = {
+ 'client': getAttributesByTagName(root, 'client'),
+ 'times': getAttributesByTagName(root, 'times'),
+ 'download': getAttributesByTagName(root, 'download'),
+ 'upload': getAttributesByTagName(root, 'upload')}
+ del root
+ del configxml
+ return config
+
+
+def closestServers(client, all=False):
+ """Determine the 5 closest speedtest.net servers based on geographic
+ distance
+ """
+
+ uh = urlopen('http://www.speedtest.net/speedtest-servers.php')
+ serversxml = []
+ while 1:
+ serversxml.append(uh.read(10240))
+ if len(serversxml[-1]) == 0:
+ break
+ if int(uh.code) != 200:
+ return None
+ uh.close()
+ try:
+ root = ET.fromstring(''.encode().join(serversxml))
+ elements = root.getiterator('server')
+ except AttributeError:
+ root = DOM.parseString(''.join(serversxml))
+ elements = root.getElementsByTagName('server')
+ servers = {}
+ for server in elements:
+ try:
+ attrib = server.attrib
+ except AttributeError:
+ attrib = dict(list(server.attributes.items()))
+ d = distance([float(client['lat']), float(client['lon'])],
+ [float(attrib.get('lat')), float(attrib.get('lon'))])
+ attrib['d'] = d
+ if d not in servers:
+ servers[d] = [attrib]
+ else:
+ servers[d].append(attrib)
+ del root
+ del serversxml
+ del elements
+
+ closest = []
+ for d in sorted(servers.keys()):
+ for s in servers[d]:
+ closest.append(s)
+ if len(closest) == 5 and not all:
+ break
+ else:
+ continue
+ break
+
+ del servers
+ return closest
+
+
+def getBestServer(servers):
+ """Perform a speedtest.net "ping" to determine which speedtest.net
+ server has the lowest latency
+ """
+
+ results = {}
+ for server in servers:
+ cum = []
+ url = os.path.dirname(server['url'])
+ for i in range(0, 3):
+ try:
+ uh = urlopen('%s/latency.txt' % url)
+ except (HTTPError, URLError):
+ cum.append(3600)
+ continue
+ start = time.time()
+ text = uh.read(9)
+ total = time.time() - start
+ if int(uh.code) == 200 and text == 'test=test'.encode():
+ cum.append(total)
+ else:
+ cum.append(3600)
+ uh.close()
+ avg = round((sum(cum) / 3) * 1000000, 3)
+ results[avg] = server
+
+ fastest = sorted(results.keys())[0]
+ best = results[fastest]
+ best['latency'] = fastest
+
+ return best
+
+
+def ctrl_c(signum, frame):
+ """Catch Ctrl-C key sequence and set a shutdown_event for our threaded
+ operations
+ """
+
+ global shutdown_event
+ shutdown_event.set()
+ raise SystemExit('\nCancelling...')
+
+
+def version():
+ """Print the version"""
+
+ raise SystemExit(__version__)
+
+
+def speedtest():
+ """Run the full speedtest.net test"""
+
+ global shutdown_event, source
+ shutdown_event = threading.Event()
+
+ signal.signal(signal.SIGINT, ctrl_c)
+
+ description = (
+ 'Command line interface for testing internet bandwidth using '
+ 'speedtest.net.\n'
+ '------------------------------------------------------------'
+ '--------------\n'
+ 'https://github.com/sivel/speedtest-cli')
+
+ parser = ArgParser(description=description)
+ # Give optparse.OptionParser an `add_argument` method for
+ # compatibility with argparse.ArgumentParser
+ try:
+ parser.add_argument = parser.add_option
+ except AttributeError:
+ pass
+ parser.add_argument('--share', action='store_true',
+ help='Generate and provide a URL to the speedtest.net '
+ 'share results image')
+ parser.add_argument('--simple', action='store_true',
+ help='Suppress verbose output, only show basic '
+ 'information')
+ parser.add_argument('--list', action='store_true',
+ help='Display a list of speedtest.net servers '
+ 'sorted by distance')
+ parser.add_argument('--server', help='Specify a server ID to test against')
+ parser.add_argument('--mini', help='URL of the Speedtest Mini server')
+ parser.add_argument('--source', help='Source IP address to bind to')
+ parser.add_argument('--version', action='store_true',
+ help='Show the version number and exit')
+
+ options = parser.parse_args()
+ if isinstance(options, tuple):
+ args = options[0]
+ else:
+ args = options
+ del options
+
+ # Print the version and exit
+ if args.version:
+ version()
+
+ # If specified bind to a specific IP address
+ if args.source:
+ source = args.source
+ socket.socket = bound_socket
+
+ if not args.simple:
+ print_('Retrieving speedtest.net configuration...')
+ try:
+ config = getConfig()
+ except URLError:
+ print_('Cannot retrieve speedtest configuration')
+ sys.exit(1)
+
+ if not args.simple:
+ print_('Retrieving speedtest.net server list...')
+ if args.list or args.server:
+ servers = closestServers(config['client'], True)
+ if args.list:
+ serverList = []
+ for server in servers:
+ line = ('%(id)4s) %(sponsor)s (%(name)s, %(country)s) '
+ '[%(d)0.2f km]' % server)
+ serverList.append(line)
+ # Python 2.7 and newer seem to be ok with the resultant encoding
+ # from parsing the XML, but older versions have some issues.
+ # This block should detect whether we need to encode or not
+ try:
+ unicode()
+ print_('\n'.join(serverList).encode('utf-8', 'ignore'))
+ except NameError:
+ print_('\n'.join(serverList))
+ except IOError:
+ pass
+ sys.exit(0)
+ else:
+ servers = closestServers(config['client'])
+
+ if not args.simple:
+ print_('Testing from %(isp)s (%(ip)s)...' % config['client'])
+
+ if args.server:
+ try:
+ best = getBestServer(filter(lambda x: x['id'] == args.server,
+ servers))
+ except IndexError:
+ print_('Invalid server ID')
+ sys.exit(1)
+ elif args.mini:
+ name, ext = os.path.splitext(args.mini)
+ if ext:
+ url = os.path.dirname(args.mini)
+ else:
+ url = args.mini
+ urlparts = urlparse(url)
+ try:
+ f = urlopen(args.mini)
+ except:
+ print_('Invalid Speedtest Mini URL')
+ sys.exit(1)
+ else:
+ text = f.read()
+ f.close()
+ extension = re.findall('upload_extension: "([^"]+)"', text.decode())
+ if not urlparts or not extension:
+ print_('Please provide the full URL of your Speedtest Mini server')
+ sys.exit(1)
+ servers = [{
+ 'sponsor': 'Speedtest Mini',
+ 'name': urlparts[1],
+ 'd': 0,
+ 'url': '%s/speedtest/upload.%s' % (url.rstrip('/'), extension[0]),
+ 'latency': 0,
+ 'id': 0
+ }]
+ try:
+ best = getBestServer(servers)
+ except:
+ best = servers[0]
+ else:
+ if not args.simple:
+ print_('Selecting best server based on ping...')
+ best = getBestServer(servers)
+
+ if not args.simple:
+ # Python 2.7 and newer seem to be ok with the resultant encoding
+ # from parsing the XML, but older versions have some issues.
+ # This block should detect whether we need to encode or not
+ try:
+ unicode()
+ print_(('Hosted by %(sponsor)s (%(name)s) [%(d)0.2f km]: '
+ '%(latency)s ms' % best).encode('utf-8', 'ignore'))
+ except NameError:
+ print_('Hosted by %(sponsor)s (%(name)s) [%(d)0.2f km]: '
+ '%(latency)s ms' % best)
+ else:
+ print_('Ping: %(latency)s ms' % best)
+
+ sizes = [350, 500, 750, 1000, 1500, 2000, 2500, 3000, 3500, 4000]
+ urls = []
+ for size in sizes:
+ for i in range(0, 4):
+ urls.append('%s/random%sx%s.jpg' %
+ (os.path.dirname(best['url']), size, size))
+ if not args.simple:
+ print_('Testing download speed', end='')
+ dlspeed = downloadSpeed(urls, args.simple)
+ if not args.simple:
+ print_()
+ print_('Download: %0.2f Mbit/s' % ((dlspeed / 1000 / 1000) * 8))
+
+ sizesizes = [int(.25 * 1000 * 1000), int(.5 * 1000 * 1000)]
+ sizes = []
+ for size in sizesizes:
+ for i in range(0, 25):
+ sizes.append(size)
+ if not args.simple:
+ print_('Testing upload speed', end='')
+ ulspeed = uploadSpeed(best['url'], sizes, args.simple)
+ if not args.simple:
+ print_()
+ print_('Upload: %0.2f Mbit/s' % ((ulspeed / 1000 / 1000) * 8))
+
+ if args.share and args.mini:
+ print_('Cannot generate a speedtest.net share results image while '
+ 'testing against a Speedtest Mini server')
+ elif args.share:
+ dlspeedk = int(round((dlspeed / 1000) * 8, 0))
+ ping = int(round(best['latency'], 0))
+ ulspeedk = int(round((ulspeed / 1000) * 8, 0))
+
+ # Build the request to send results back to speedtest.net
+ # We use a list instead of a dict because the API expects parameters
+ # in a certain order
+ apiData = [
+ 'download=%s' % dlspeedk,
+ 'ping=%s' % ping,
+ 'upload=%s' % ulspeedk,
+ 'promo=',
+ 'startmode=%s' % 'pingselect',
+ 'recommendedserverid=%s' % best['id'],
+ 'accuracy=%s' % 1,
+ 'serverid=%s' % best['id'],
+ 'hash=%s' % md5(('%s-%s-%s-%s' %
+ (ping, ulspeedk, dlspeedk, '297aae72'))
+ .encode()).hexdigest()]
+
+ req = Request('http://www.speedtest.net/api/api.php',
+ data='&'.join(apiData).encode())
+ req.add_header('Referer', 'http://c.speedtest.net/flash/speedtest.swf')
+ f = urlopen(req)
+ response = f.read()
+ code = f.code
+ f.close()
+
+ if int(code) != 200:
+ print_('Could not submit results to speedtest.net')
+ sys.exit(1)
+
+ qsargs = parse_qs(response.decode())
+ resultid = qsargs.get('resultid')
+ if not resultid or len(resultid) != 1:
+ print_('Could not submit results to speedtest.net')
+ sys.exit(1)
+
+ print_('Share results: http://www.speedtest.net/result/%s.png' %
+ resultid[0])
+
+
+def main():
+ try:
+ speedtest()
+ except KeyboardInterrupt:
+ print_('\nCancelling...')
+
+
+if __name__ == '__main__':
+ main()
+
+# vim:ts=4:sw=4:expandtab
--- /dev/null
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=cfengine
+PKG_VERSION:=3.6.0erc-build7
+PKG_RELEASE:=1
+
+PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.bz2
+PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION)
+PKG_SOURCE_URL:=https://github.com/cfengine/core.git
+PKG_SOURCE_PROTO:=git
+PKG_SOURCE_VERSION:=$(PKG_VERSION)
+PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
+
+CC="$(TARGET_CC)"
+
+include $(INCLUDE_DIR)/package.mk
+
+define Package/cfengine
+ SECTION:=admin
+ CATEGORY:=Administration
+ TITLE:=cfengine3
+ DEPENDS:=+libpcre +libopenssl +libxml2 +tokyocabinet +libpam
+endef
+
+define Package/cfengine/description
+ CFEngine is a popular open source configuration management system,
+ written by Mark Burgess. Its primary function is to provide automated
+ configuration and maintenance of large-scale computer systems, including
+ the unified management of servers, desktops, embedded networked devices,
+ mobile smartphones, and tablet computers.
+endef
+
+define Build/Configure
+ cd $(PKG_BUILD_DIR) && CFLAGS="$(TARGET_CFLAGS)" ./autogen.sh --host=$(ARCH) --target=$(GNU_TARGET_NAME) --with-tokyocabinet --without-libxml2 --without-postgresql $(TARGET_CONFIGURE_OPTS)
+endef
+
+define Package/cfengine/install
+ $(INSTALL_DIR) $(1)/opt/cfengine/bin
+ $(INSTALL_DIR) $(1)/opt/cfengine/lib
+
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/cf-agent/cf-agent $(1)/opt/cfengine/bin/cf-agent
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/cf-execd/cf-execd $(1)/opt/cfengine/bin/cf-execd
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/cf-key/cf-key $(1)/opt/cfengine/bin/cf-key
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/cf-monitord/cf-monitord $(1)/opt/cfengine/bin/cf-monitord
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/cf-promises/cf-promises $(1)/opt/cfengine/bin/cf-promises
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/cf-runagent/cf-runagent $(1)/opt/cfengine/bin/cf-runagent
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/cf-serverd/cf-serverd $(1)/opt/cfengine/bin/cf-serverd
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/ext/rpmvercmp $(1)/opt/cfengine/bin/cf-rpmvercmp
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/libpromises/libpromises.la $(1)/opt/cfengine/lib/cf-libpromises.la
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/libpromises/libpromises.la $(1)/opt/cfengine/lib/libpromises.so.1.0.0
+
+ $(INSTALL_DIR) $(1)/etc/init.d
+ $(INSTALL_BIN) ./files/cfengine.init $(1)/etc/init.d/cfengine
+
+ # we will never use those keys for server communication, so this is just for speedup at boot
+ $(INSTALL_DIR) $(1)/opt/cfengine/ppkeys
+ $(INSTALL_DATA) ./files/localhost.priv $(1)/opt/cfengine/ppkeys/localhost.priv
+ $(INSTALL_DATA) ./files/localhost.pub $(1)/opt/cfengine/ppkeys/localhost.pub
+endef
+
+$(eval $(call BuildPackage,cfengine))
--- /dev/null
+#!/bin/sh /etc/rc.common
+# cfengine
+
+START=85
+
+start() {
+ echo start
+ mkdir -p /var/cfengine/bin
+ cp /opt/cfengine/bin/cf-promises /var/cfengine/bin/cf-promises
+ cp -r /opt/cfengine/ppkeys /var/cfengine/
+ chmod 700 /var/cfengine/ppkeys
+ /opt/cfengine/bootstrap.cf
+ echo started.
+
+ if [[ $(cat /etc/network-profile) == 'alix' ]]; then
+
+ # fuckin ethernets on apu board hang - bootstrap again and wait for initialisation
+ while [[ $(grep eth1 /etc/config/network | wc -l) -eq 0 ]]; do
+ /opt/cfengine/bootstrap.cf
+ sleep 1
+ done
+
+ while [[ $(ifconfig eth1 | wc -l) -eq 0 ]]; do
+ sleep 1
+ done
+
+ else
+
+ while [[ $(ifconfig eth0 | wc -l) -eq 0 ]]; do
+ sleep 1
+ done
+
+ fi
+
+}
+
--- /dev/null
+-----BEGIN RSA PRIVATE KEY-----
+Proc-Type: 4,ENCRYPTED
+DEK-Info: DES-EDE3-CBC,E77597809E19918F
+
+IkO7dEsrOT6WT+7HSe9F9r29eZSADOIU1eODnSQcK12DHPxDuOCGhDHr9+8Ymh/G
+jUKKDb0o8rnlmO8CZSXC9mmltXQZeSrhyt+X1TpRmJTsRyt4CyRc4sHANtCMUpuB
+Zs9o3baAs6Pvz3aK0Gl8XSqwO/3l2Isz1UvCfisLRC5FY51YKi2inZPuJdq0X562
+IlVzrg69WB51Jx8m9lKUcfmdm3sRpmHZv5mxX0oKvYN2K7T1T2B+kabtdS0ZSdMv
+6HJlWwqw29sOjw1VlhF8KxIXrPiwLaBnokN8gEIWYszCC+9OEgoKmR85KncaEZCz
+AX5xN/oKiHkL9RLQrDksVgKDOP2RWTae1ajK1za4sAYmDeMk5eeoIzT7onJSdPSW
+HpXNrz9S1UEN9CQwdUnwLcXg8VuuT7ECw0q97AWuBuWYR6v0T/ym65G13e4Y+hv/
+0BbIDKZW0lCrVI9nePQMBdYoYcQbbeku2dOiNrBxUxpbQfYzTwtoBQ6aFQiu+kEl
+1gJgAknHu9cqk8CfkEZEnpuBuMqArbK5hXX+HvNMg2iGh1zLgwKIONBnQxH7nYc0
+8qQumQACqhvjUzeYtezSb0Y6c0mIT7L09/j4ko576YAprsli43vTe5FGZygl/XH3
+/sKkstBGcSmgV8FXhyrCOVfiNYeJ7cSfmGLGGwTzNwuRP4UyCsOr5vqeX1n6WxIw
+R6b78C+jqOJ7BVbGcVdlq/tn1IjD90RKps09oOrk6zxpYLjXvMtI8mJMaMAt3L00
+RaLnmpNU8A8/eiXdD752Aekd2oHwgrBNu9tXYm030c6rUOdyBrlB5pX0Qe6rN/OY
+oWlucZ0WA+K3vTolMa+nQLj3ZVJcNN3m9NPTAPAS8Wkyufz1SPX//8zG9osRGu+i
+kBZe6L9xXkvhfC6lpwj+HLSD677VEaPUzrrxMimgpCtU5Vhi36qdM6Ygw/aseJSl
+XaWNY9lj8u9AvcAn+9wIXCKx12Nnp9IxY2fJ8G8SPE7pUeP5KpTWhqMCe3fbQLfq
+xCE6Q7kmdCzIFGbOVcygsyD2TWdieevKiO/0vKt8z0VX2hH0A4FBDUVuRsM1I++l
+RC4zBe84/+M4nfVRVNPk+582/RnvhA1lC3z1pAASb1y1sG8z8UYeMD7VTZJbaK9k
+AlL9ds4mSCp3DvH3rzjCcNKSBPx4N36JudAETOwdz0X5KaNU/Qc+IUIy2nHA7Fis
+9QrN2XV1fCMPGLNM/IDSWd0kb6pVWuWLEH+ylTeEuKpAuhG5Mug3Jxm9QqpIfeew
+LTuF1CLhOvrdhRQUKnxpZ67cBXnpqffyh9w0zzHbWMiZ6L2PQ1jmo4rAuSUhqfX7
+z/d1sA93QePx58bHXUTg/1u/rBmyiCkSQNzM1yJdMzVrbAELNWRMQikWC8vIh1BG
+NdAQF4oY7rnoFrrLADMn4bZdK2u+FtSIzSRWyRPR74wIGfW2LPWpd3IDfHxQ6z9S
+9TZPev6Q9N0YMc8c93Ybve6WT79j7ih1aB6MLKSwRE2hf6DC01HJF4JDE+Itbx2u
+Uk+bui0DIpmCXeak/83QOx9/JdQdL7fn5hj67+tkplPqWapdWqH8iQ==
+-----END RSA PRIVATE KEY-----
--- /dev/null
+-----BEGIN RSA PUBLIC KEY-----
+MIIBCAKCAQEA2SbIk5nnlTa53mREKdUlFXi2RmEBLF/gQ8quzL67AOGDWJCdkGbq
+bQjFqNdz5eeYerMevJY/YwbpfSf6ZqGSLIRlLVxiMaOiId5CR7a0xjeN6yDxZZcn
+s5xzu5RtysDAmvzek62b9n7cP8lcaWy/jU7A2pYncOAdHFyB/LS9BWkwye/SKNvW
+MWdoTm9TBJRqrCrCG8/B2FFYmx26iTT0Rh5WJRdVcRLhyiBaEOHZ3jq8LSEL1Kb1
+H9dPk2PV3NFb3gylg7H1RsYhV8QwdyYFNjGyTzAro1GbNCaAj+tPW4DsrM55Ff5N
+SwAYHjLZCaBZaAHNk1clJnNJo80aZf2k/QIBIw==
+-----END RSA PUBLIC KEY-----
--- /dev/null
+--- a/configure.ac
++++ b/configure.ac
+@@ -667,7 +667,7 @@ CF3_WITH_LIBRARY(pam, [
+ ])
+ dnl These platforms must have pam, others can have it, but not required.
+ AS_CASE([$target_os],
+- [*gnu*|*solaris*|*aix*|*hpux*|*hp-ux*], [
++ [*solaris*|*aix*|*hpux*|*hp-ux*], [
+ AS_IF([test "x$ac_cv_lib_pam_pam_start" != "xyes"],
+ [AC_MSG_ERROR(Cannot find PAM library)]
+ )
--- /dev/null
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=cjdns-master
+PKG_VERSION:=2014-10-25-e2b673698e471dbc82b4e9dbc04cb9e16f1f06a6
+PKG_RELEASE:=4
+
+PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.bz2
+PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION)
+PKG_SOURCE_URL:=https://github.com/cjdelisle/cjdns.git
+PKG_SOURCE_PROTO:=git
+PKG_SOURCE_VERSION:=e2b673698e471dbc82b4e9dbc04cb9e16f1f06a6
+PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
+PKG_BUILD_DEPENDS:=libssp
+
+include $(INCLUDE_DIR)/package.mk
+
+define Package/cjdns-master
+ SECTION:=net
+ CATEGORY:=Network
+ SUBMENU:=Routing and Redirection
+ TITLE:=Experimental self configuring routing protocol.
+ DEPENDS:=+kmod-tun +kmod-ipv6 +libnl +libpthread +librt +@SSP_SUPPORT +libssp
+endef
+
+define Package/cjdns-master/description
+ Cjdns is a networking protocol, a system of digital rules for
+ message exchange between computers. The philosophy behind cjdns
+ is that networks should be easy to set up, protocols should
+ scale up smoothly and security should be ubiquitous.
+endef
+
+define Build/Configure
+endef
+
+## NEON
+#define Build/Compile
+# CROSS="true" \
+# CFLAGS="-O2 -march=armv7-a -mtune=cortex-a8 -mfpu=neon -ftree-vectorize -ffast-math -mfloat-abi=hard -marm -Wno-error=maybe-uninitialized" \
+# NO_PIE="true" \
+# Seccomp_NO="true" \
+# CC="$(TARGET_CC)" \
+# ARCH="$(ARCH)" \
+# LDFLAGS="$(CFLAGS) -lssp" \
+# $(PKG_BUILD_DIR)/do
+#endef
+
+# regular
+define Build/Compile
+ CROSS="true" \
+ CFLAGS="$(TARGET_CFLAGS)" \
+ NO_PIE="true" \
+ Seccomp_NO="true" \
+ CC="$(TARGET_CC)" \
+ ARCH="$(ARCH)" \
+ LDFLAGS="$(TARGET_LDFLAGS) -lssp" \
+ $(PKG_BUILD_DIR)/do
+endef
+
+define Package/cjdns-master/install
+ $(INSTALL_DIR) $(1)/usr/sbin
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/cjdroute $(1)/usr/sbin/cjdroute
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/cjdroute $(1)/usr/sbin/cjdroute.master
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/privatetopublic $(1)/usr/sbin/
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/publictoip6 $(1)/usr/sbin/
+
+ $(INSTALL_DIR) $(1)/usr/lib/python2.7
+ cp -rv $(PKG_BUILD_DIR)/contrib/python/cjdnsadmin $(1)/usr/lib/python2.7/
+
+ $(INSTALL_DIR) $(1)/usr/bin
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/contrib/python/cexec $(1)/usr/bin/
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/contrib/python/cjdnsa $(1)/usr/bin/
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/contrib/python/cjdnslog $(1)/usr/bin/
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/contrib/python/dumptable $(1)/usr/bin/
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/contrib/python/findnodes $(1)/usr/bin/
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/contrib/python/ip6topk $(1)/usr/bin/
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/contrib/python/peerStats $(1)/usr/bin/
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/contrib/python/pingAll.py $(1)/usr/bin/
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/contrib/python/pktoip6 $(1)/usr/bin/
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/contrib/python/sessionStats $(1)/usr/bin/
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/contrib/python/trashroutes $(1)/usr/bin/
+
+ $(INSTALL_DIR) $(1)/etc/init.d
+ $(INSTALL_BIN) ./files/cjdns.init $(1)/etc/init.d/cjdns
+endef
+
+$(eval $(call BuildPackage,cjdns-master))
--- /dev/null
+#!/bin/sh /etc/rc.common
+# cjdns
+
+START=93
+STOP=07
+EXTRA_COMMANDS="status"
+
+start() {
+ ( /usr/sbin/cjdroute < /box/cjdroute.conf &> /dev/null ) &
+}
+
+stop() {
+ killall cjdroute
+}
+
+status() {
+ if [[ $(ps | grep cjdroute | wc -l) -gt 1 ]]; then
+ echo running
+ else
+ echo stopped
+ fi
+}
+
+restart() {
+ stop
+ start
+}
+
--- /dev/null
+diff --git a/node_build/dependencies/libuv/gyp_uv.py b/node_build/dependencies/libuv/gyp_uv.py
+index 4ba6916..d9f871b 100755
+--- a/node_build/dependencies/libuv/gyp_uv.py
++++ b/node_build/dependencies/libuv/gyp_uv.py
+@@ -20,7 +20,7 @@ except ImportError:
+
+
+ def host_arch():
+- machine = platform.machine()
++ machine = os.environ.get('ARCH', platform.machine())
+ if machine == 'i386': return 'ia32'
+ if machine == 'x86_64': return 'x64'
+ if machine.startswith('arm'): return 'arm'
--- /dev/null
+diff --git a/node_build/make.js b/node_build/make.js
+index 522c185..9bd81d5 100644
+--- a/node_build/make.js
++++ b/node_build/make.js
+@@ -297,6 +297,8 @@ Builder.configure({
+ 'BUILDTYPE=Release',
+ 'CC=' + builder.config.gcc,
+ 'CXX=' + builder.config.gcc,
++ 'LINK=' + builder.config.gcc,
++ 'LDFLAGS=' + builder.config.ldflags,
+ 'V=1'
+ ];
+
--- /dev/null
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=cjdns-v6
+PKG_VERSION:=2015-02-02-1d51ddc5d036244c5eb231d635e649bb5d325445
+PKG_RELEASE:=3
+
+PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.bz2
+PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION)
+PKG_SOURCE_URL:=https://github.com/cjdelisle/cjdns.git
+PKG_SOURCE_PROTO:=git
+PKG_SOURCE_VERSION:=1d51ddc5d036244c5eb231d635e649bb5d325445
+PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
+PKG_BUILD_DEPENDS:=libssp
+
+include $(INCLUDE_DIR)/package.mk
+
+define Package/cjdns-v6
+ SECTION:=net
+ CATEGORY:=Network
+ SUBMENU:=Routing and Redirection
+ TITLE:=Experimental self configuring routing protocol.
+ DEPENDS:=+kmod-tun +kmod-ipv6 +libnl +libpthread +librt +@SSP_SUPPORT +libssp
+endef
+
+define Package/cjdns-v6/description
+ Cjdns is a networking protocol, a system of digital rules for
+ message exchange between computers. The philosophy behind cjdns
+ is that networks should be easy to set up, protocols should
+ scale up smoothly and security should be ubiquitous.
+endef
+
+define Build/Configure
+endef
+
+## NEON
+#define Build/Compile
+# CROSS="true" \
+# CFLAGS="-O2 -march=armv7-a -mtune=cortex-a8 -mfpu=neon -ftree-vectorize -ffast-math -mfloat-abi=hard -marm -Wno-error=maybe-uninitialized" \
+# NO_PIE="true" \
+# Seccomp_NO="true" \
+# CC="$(TARGET_CC)" \
+# ARCH="$(ARCH)" \
+# LDFLAGS="$(CFLAGS) -lssp" \
+# $(PKG_BUILD_DIR)/do
+#endef
+
+# regular
+define Build/Compile
+ CROSS="true" \
+ CFLAGS="$(TARGET_CFLAGS) -Wno-error=maybe-uninitialized" \
+ NO_PIE="true" \
+ Seccomp_NO="true" \
+ CC="$(TARGET_CC)" \
+ ARCH="$(ARCH)" \
+ LDFLAGS="$(TARGET_LDFLAGS) -lssp" \
+ LINK="$(TARGET_CC)" \
+ $(PKG_BUILD_DIR)/do
+endef
+
+define Package/cjdns-v6/install
+ $(INSTALL_DIR) $(1)/usr/sbin
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/cjdroute $(1)/usr/sbin/cjdroute.v6
+endef
+
+$(eval $(call BuildPackage,cjdns-v6))
--- /dev/null
+diff --git a/node_build/dependencies/libuv/gyp_uv.py b/node_build/dependencies/libuv/gyp_uv.py
+index 4ba6916..d9f871b 100755
+--- a/node_build/dependencies/libuv/gyp_uv.py
++++ b/node_build/dependencies/libuv/gyp_uv.py
+@@ -20,7 +20,7 @@ except ImportError:
+
+
+ def host_arch():
+- machine = platform.machine()
++ machine = os.environ.get('ARCH', platform.machine())
+ if machine == 'i386': return 'ia32'
+ if machine == 'x86_64': return 'x64'
+ if machine.startswith('arm'): return 'arm'
--- /dev/null
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=owncloud
+PKG_VERSION:=8.0.0
+PKG_RELEASE:=2
+
+PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.bz2
+PKG_SOURCE_URL:=https://download.owncloud.org/community/
+PKG_MD5SUM:=d2c1366be0756c24e1f5cfc02f80269f
+PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
+CC="$(TARGET_CC)"
+
+include $(INCLUDE_DIR)/package.mk
+
+define Package/owncloud
+ SECTION:=admin
+ CATEGORY:=Enigmabox
+ TITLE:=ownCloud - self hosted cloud
+ DEPENDS:=hypesites
+ SUBMENU:=Hypesite apps
+endef
+
+define Package/owncloud/description
+ Access your data from all your devices, on an open platform you can extend and modify.
+endef
+
+define Build/Configure
+endef
+
+define Build/Compile
+endef
+
+define Package/owncloud/install
+ $(INSTALL_DIR) $(1)/www/htdocs/owncloud
+ $(CP) -rv $(BUILD_DIR)/owncloud/* $(1)/www/htdocs/owncloud/
+
+ # hypesite config
+ $(INSTALL_DIR) $(1)/etc/lighttpd/hypesites.d
+ $(INSTALL_DATA) ./files/owncloud.conf $(1)/etc/lighttpd/hypesites.d/owncloud.conf
+endef
+
+$(eval $(call BuildPackage,owncloud))
--- /dev/null
+
+#TODO: also doesn't work yet
+$HTTP["url"] =~ "^/owncloud/data/" {
+ url.access-deny = ("")
+}
+
+$HTTP["url"] =~ "^/owncloud/" {
+
+ # max post request: 1M
+ server.max-request-size = 1024
+
+ # docroot
+ server.document-root = "/www/htdocs"
+
+ # activate directory listing
+ dir-listing.activate = "disable"
+
+ # php
+ cgi.assign = ( ".php" => "/usr/bin/php-cgi" )
+ #TODO: manage /etc/lighttpd/lighttpd.conf with cfengine
+
+# # fastcgi: not yet; white page in files :(
+# fastcgi.server = (
+# ".php" =>
+# ((
+# "host" => "127.0.0.1",
+# "port" => 1026,
+# "check-local" => "disable",
+# "docroot" => "/www/htdocs"
+# ))
+# )
+
+}
+