Merge remote-tracking branch 'upstream/staging' into deps-reorg

This commit is contained in:
John Ericson 2017-09-28 12:32:57 -04:00
commit f037625f87
789 changed files with 15725 additions and 8096 deletions

View file

@ -785,7 +785,20 @@ example of such a situation is when `py.test` is used.
#### Common issues
- Non-working tests can often be deselected. In the case of `py.test`: `py.test -k 'not function_name and not other_function'`.
- Non-working tests can often be deselected. By default `buildPythonPackage` runs `python setup.py test`.
Most python modules follows the standard test protocol where the pytest runner can be used instead.
`py.test` supports a `-k` parameter to ignore test methods or classes:
```nix
buildPythonPackage {
# ...
# assumes the tests are located in tests
checkInputs = [ pytest ];
checkPhase = ''
py.test -k 'not function_name and not other_function' tests
'';
}
```
- Unicode issues can typically be fixed by including `glibcLocales` in `buildInputs` and exporting `LC_ALL=en_US.utf-8`.
- Tests that attempt to access `$HOME` can be fixed by using the following work-around before running tests (e.g. `preCheck`): `export HOME=$(mktemp -d)`

View file

@ -477,32 +477,18 @@ it. Place the resulting <filename>package.nix</filename> file into
<varlistentry>
<term>Using the FOSS Radeon or nouveau (nvidia) drivers</term>
<listitem><itemizedlist><listitem><para>
Both the open source radeon drivers as well as the nouveau drivers (nvidia)
need a newer libc++ than is provided by the default runtime, which leads to a
crash on launch. Use <programlisting>environment.systemPackages =
[(pkgs.steam.override { newStdcpp = true; })];</programlisting> in your config
if you get an error like
<programlisting>
libGL error: unable to load driver: radeonsi_dri.so
libGL error: driver pointer missing
libGL error: failed to load driver: radeonsi
libGL error: unable to load driver: swrast_dri.so
libGL error: failed to load driver: swrast</programlisting>
or
<programlisting>
libGL error: unable to load driver: nouveau_dri.so
libGL error: driver pointer missing
libGL error: failed to load driver: nouveau
libGL error: unable to load driver: swrast_dri.so
libGL error: failed to load driver: swrast</programlisting></para></listitem>
<listitem><para>
Steam ships statically linked with a version of libcrypto that
conflics with the one dynamically loaded by radeonsi_dri.so.
If you get the error
<programlisting>steam.sh: line 713: 7842 Segmentation fault (core dumped)</programlisting>
have a look at <link xlink:href="https://github.com/NixOS/nixpkgs/pull/20269">this pull request</link>.
</para></listitem>
<listitem><itemizedlist>
<listitem><para>The <literal>newStdcpp</literal> parameter
was removed since NixOS 17.09 and should not be needed anymore.
</para></listitem>
<listitem><para>
Steam ships statically linked with a version of libcrypto that
conflics with the one dynamically loaded by radeonsi_dri.so.
If you get the error
<programlisting>steam.sh: line 713: 7842 Segmentation fault (core dumped)</programlisting>
have a look at <link xlink:href="https://github.com/NixOS/nixpkgs/pull/20269">this pull request</link>.
</para></listitem>
</itemizedlist></listitem></varlistentry>

View file

@ -1,4 +1,3 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="chap-stdenv">
@ -188,11 +187,13 @@ genericBuild
<varlistentry>
<term><varname>NIX_DEBUG</varname></term>
<listitem><para>If set, <literal>stdenv</literal> will print some
debug information during the build. In particular, the
<command>gcc</command> and <command>ld</command> wrapper scripts
will print out the complete command line passed to the wrapped
tools.</para></listitem>
<listitem><para>
A natural number indicating how much information to log.
If set to 1 or higher, <literal>stdenv</literal> will print moderate debug information during the build.
In particular, the <command>gcc</command> and <command>ld</command> wrapper scripts will print out the complete command line passed to the wrapped tools.
If set to 6 or higher, the <literal>stdenv</literal> setup script will be run with <literal>set -x</literal> tracing.
If set to 7 or higher, the <command>gcc</command> and <command>ld</command> wrapper scripts will also be run with <literal>set -x</literal> tracing.
</para></listitem>
</varlistentry>
</variablelist>

View file

@ -281,6 +281,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
url = https://fedoraproject.org/wiki/Licensing/GPL_Classpath_Exception;
};
hpnd = spdx {
spdxId = "HPND";
fullName = "Historic Permission Notice and Disclaimer";
};
# Intel's license, seems free
iasl = {
fullName = "iASL";
@ -292,9 +297,10 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = "Independent JPEG Group License";
};
inria = {
fullName = "INRIA Non-Commercial License Agreement";
inria-compcert = {
fullName = "INRIA Non-Commercial License Agreement for the CompCert verified compiler";
url = "http://compcert.inria.fr/doc/LICENSE";
free = false;
};
ipa = spdx {

View file

@ -107,6 +107,7 @@
choochootrain = "Hurshal Patel <hurshal@imap.cc>";
chris-martin = "Chris Martin <ch.martin@gmail.com>";
chrisjefferson = "Christopher Jefferson <chris@bubblescope.net>";
chrisrosset = "Christopher Rosset <chris@rosset.org.uk>";
christopherpoole = "Christopher Mark Poole <mail@christopherpoole.net>";
ciil = "Simon Lackerbauer <simon@lackerbauer.com>";
ckampka = "Christian Kampka <christian@kampka.net>";
@ -186,17 +187,20 @@
ellis = "Ellis Whitehead <nixos@ellisw.net>";
eperuffo = "Emanuele Peruffo <info@emanueleperuffo.com>";
epitrochoid = "Mabry Cervin <mpcervin@uncg.edu>";
eqyiel = "Ruben Maher <r@rkm.id.au>";
ericbmerritt = "Eric Merritt <eric@afiniate.com>";
ericsagnes = "Eric Sagnes <eric.sagnes@gmail.com>";
erikryb = "Erik Rybakken <erik.rybakken@math.ntnu.no>";
ertes = "Ertugrul Söylemez <esz@posteo.de>";
ethercrow = "Dmitry Ivanov <ethercrow@gmail.com>";
etu = "Elis Hirwing <elis@hirwing.se>";
exi = "Reno Reckling <nixos@reckling.org>";
exlevan = "Alexey Levan <exlevan@gmail.com>";
expipiplus1 = "Joe Hermaszewski <nix@monoid.al>";
fadenb = "Tristan Helmich <tristan.helmich+nixos@gmail.com>";
fare = "Francois-Rene Rideau <fahree@gmail.com>";
falsifian = "James Cook <james.cook@utoronto.ca>";
fare = "Francois-Rene Rideau <fahree@gmail.com>";
fgaz = "Francesco Gazzetta <francygazz@gmail.com>";
florianjacob = "Florian Jacob <projects+nixos@florianjacob.de>";
flosse = "Markus Kohlhase <mail@markus-kohlhase.de>";
fluffynukeit = "Daniel Austin <dan@fluffynukeit.com>";
@ -238,6 +242,7 @@
guillaumekoenig = "Guillaume Koenig <guillaume.edward.koenig@gmail.com>";
guyonvarch = "Joris Guyonvarch <joris@guyonvarch.me>";
hakuch = "Jesse Haber-Kucharsky <hakuch@gmail.com>";
hamhut1066 = "Hamish Hutchings <github@hamhut1066.com>";
havvy = "Ryan Scheel <ryan.havvy@gmail.com>";
hbunke = "Hendrik Bunke <bunke.hendrik@gmail.com>";
hce = "Hans-Christian Esperer <hc@hcesperer.org>";
@ -287,12 +292,12 @@
jonafato = "Jon Banafato <jon@jonafato.com>";
jpierre03 = "Jean-Pierre PRUNARET <nix@prunetwork.fr>";
jpotier = "Martin Potier <jpo.contributes.to.nixos@marvid.fr>";
jyp = "Jean-Philippe Bernardy <jeanphilippe.bernardy@gmail.com>";
jraygauthier = "Raymond Gauthier <jraygauthier@gmail.com>";
jtojnar = "Jan Tojnar <jtojnar@gmail.com>";
juliendehos = "Julien Dehos <dehos@lisic.univ-littoral.fr>";
jwiegley = "John Wiegley <johnw@newartisans.com>";
jwilberding = "Jordan Wilberding <jwilberding@afiniate.com>";
jyp = "Jean-Philippe Bernardy <jeanphilippe.bernardy@gmail.com>";
jzellner = "Jeff Zellner <jeffz@eml.cc>";
kaiha = "Kai Harries <kai.harries@gmail.com>";
kamilchm = "Kamil Chmielewski <kamil.chm@gmail.com>";
@ -332,6 +337,7 @@
lovek323 = "Jason O'Conal <jason@oconal.id.au>";
lowfatcomputing = "Andreas Wagner <andreas.wagner@lowfatcomputing.org>";
lsix = "Lancelot SIX <lsix@lancelotsix.com>";
ltavard = "Laure Tavard <laure.tavard@univ-grenoble-alpes.fr>";
lucas8 = "Luc Chabassier <luc.linux@mailoo.org>";
ludo = "Ludovic Courtès <ludo@gnu.org>";
lufia = "Kyohei Kadota <lufia@lufia.org>";
@ -494,6 +500,7 @@
renzo = "Renzo Carbonara <renzocarbonara@gmail.com>";
retrry = "Tadas Barzdžius <retrry@gmail.com>";
rht = "rht <rhtbot@protonmail.com>";
richardipsum = "Richard Ipsum <richardipsum@fastmail.co.uk>";
rick68 = "Wei-Ming Yang <rick68@gmail.com>";
rickynils = "Rickard Nilsson <rickynils@gmail.com>";
ris = "Robert Scott <code@humanleg.org.uk>";
@ -503,6 +510,7 @@
robberer = "Longrin Wischnewski <robberer@freakmail.de>";
robbinch = "Robbin C. <robbinch33@gmail.com>";
roberth = "Robert Hensing <nixpkgs@roberthensing.nl>";
robertodr = "Roberto Di Remigio <roberto.diremigio@gmail.com>";
robgssp = "Rob Glossop <robgssp@gmail.com>";
roblabla = "Robin Lambertz <robinlambertz+dev@gmail.com>";
roconnor = "Russell O'Connor <roconnor@theorem.ca>";
@ -576,10 +584,9 @@
taku0 = "Takuo Yonezawa <mxxouy6x3m_github@tatapa.org>";
tari = "Peter Marheine <peter@taricorp.net>";
tavyc = "Octavian Cerna <octavian.cerna@gmail.com>";
ltavard = "Laure Tavard <laure.tavard@univ-grenoble-alpes.fr>";
teh = "Tom Hunger <tehunger@gmail.com>";
teto = "Matthieu Coudron <mcoudron@hotmail.com>";
telotortium = "Robert Irelan <rirelan@gmail.com>";
teto = "Matthieu Coudron <mcoudron@hotmail.com>";
thall = "Niclas Thall <niclas.thall@gmail.com>";
thammers = "Tobias Hammerschmidt <jawr@gmx.de>";
the-kenny = "Moritz Ulrich <moritz@tarn-vedra.de>";
@ -608,6 +615,7 @@
#urkud = "Yury G. Kudryashov <urkud+nix@ya.ru>"; inactive since 2012
uwap = "uwap <me@uwap.name>";
vaibhavsagar = "Vaibhav Sagar <vaibhavsagar@gmail.com>";
valeriangalliat = "Valérian Galliat <val@codejam.info>";
vandenoever = "Jos van den Oever <jos@vandenoever.info>";
vanschelven = "Klaas van Schelven <klaas@vanschelven.com>";
vanzef = "Ivan Solyankin <vanzef@gmail.com>";
@ -624,7 +632,6 @@
vlstill = "Vladimír Štill <xstill@fi.muni.cz>";
vmandela = "Venkateswara Rao Mandela <venkat.mandela@gmail.com>";
vmchale = "Vanessa McHale <tmchale@wisc.edu>";
valeriangalliat = "Valérian Galliat <val@codejam.info>";
volhovm = "Mikhail Volkhov <volhovm.cs@gmail.com>";
volth = "Jaroslavas Pocepko <jaroslavas@volth.com>";
vozz = "Oliver Hunt <oliver.huntuk@gmail.com>";
@ -646,6 +653,7 @@
xvapx = "Marti Serra <marti.serra.coscollano@gmail.com>";
xwvvvvwx = "David Terry <davidterry@posteo.de>";
yarr = "Dmitry V. <savraz@gmail.com>";
yegortimoshenko = "Yegor Timoshenko <yegortimoshenko@gmail.com>";
yochai = "Yochai <yochai@titat.info>";
yorickvp = "Yorick van Pelt <yorickvanpelt@gmail.com>";
yuriaisaka = "Yuri Aisaka <yuri.aisaka+nix@gmail.com>";
@ -662,4 +670,5 @@
zoomulator = "Kim Simmons <zoomulator@gmail.com>";
zraexy = "David Mell <zraexy@gmail.com>";
zx2c4 = "Jason A. Donenfeld <Jason@zx2c4.com>";
zzamboni = "Diego Zamboni <diego@zzamboni.org>";
}

View file

@ -9,17 +9,15 @@ GNOME_FTP=ftp.gnome.org/pub/GNOME/sources
NO_GNOME_MAJOR="ghex gtkhtml gdm"
usage() {
echo "Usage: $0 gnome_dir <show project>|<update project>|<update-all> [major.minor]" >&2
echo "gnome_dir is for example pkgs/desktops/gnome-3/3.18" >&2
echo "Usage: $0 <show project>|<update project>|<update-all> [major.minor]" >&2
exit 0
}
if [ "$#" -lt 2 ]; then
if [ "$#" -lt 1 ]; then
usage
fi
GNOME_TOP=$1
shift
GNOME_TOP=pkgs/desktops/gnome-3
action=$1

View file

@ -13,10 +13,8 @@ from pyquery import PyQuery as pq
maintainers_json = subprocess.check_output([
'nix-instantiate',
'lib/maintainers.nix',
'--eval',
'--json'])
'nix-instantiate', '-E', 'import ./lib/maintainers.nix {}', '--eval', '--json'
])
maintainers = json.loads(maintainers_json)
MAINTAINERS = {v: k for k, v in maintainers.iteritems()}

View file

@ -9,6 +9,7 @@
<para>This section lists the release notes for each stable version of NixOS
and current unstable revision.</para>
<xi:include href="rl-1803.xml" />
<xi:include href="rl-1709.xml" />
<xi:include href="rl-1703.xml" />
<xi:include href="rl-1609.xml" />

View file

@ -45,6 +45,33 @@ has the following highlights: </para>
even though <literal>HDMI-0</literal> is the first head in the list.
</para>
</listitem>
<listitem>
<para>
The handling of SSL in the nginx module has been cleaned up, renaming
the misnomed <literal>enableSSL</literal> to <literal>onlySSL</literal>
which reflects its original intention. This is not to be used with the
already existing <literal>forceSSL</literal> which creates a second
non-SSL virtual host redirecting to the SSL virtual host. This by
chance had worked earlier due to specific implementation details. In
case you had specified both please remove the <literal>enableSSL</literal>
option to keep the previous behaviour.
</para>
<para>
Another <literal>addSSL</literal> option has been introduced to configure
both a non-SSL virtual host and an SSL virtual host.
</para>
<para>
Options to configure <literal>resolver</literal>s and
<literal>upstream</literal>s have been introduced. See their information
for further details.
</para>
<para>
The <literal>port</literal> option has been replaced by a more generic
<literal>listen</literal> option which makes it possible to specify
multiple addresses, ports and SSL configs dependant on the new SSL
handling mentioned above.
</para>
</listitem>
</itemizedlist>
<para>The following new services were added since the last release:</para>
@ -62,10 +89,17 @@ following incompatible changes:</para>
<itemizedlist>
<listitem>
<para>
<literal>aiccu</literal> package was removed. This is due to SixXS
The <literal>aiccu</literal> package was removed. This is due to SixXS
<link xlink:href="https://www.sixxs.net/main/"> sunsetting</link> its IPv6 tunnel.
</para>
</listitem>
<listitem>
<para>
The <literal>fanctl</literal> package and <literal>fan</literal> module
have been removed due to the developers not upstreaming their iproute2
patches and lagging with compatibility to recent iproute2 versions.
</para>
</listitem>
<listitem>
<para>
Top-level <literal>idea</literal> package collection was renamed.
@ -202,6 +236,112 @@ rmdir /var/lib/ipfs/.ipfs
<command>gpgv</command>, etc.
</para>
</listitem>
<listitem>
<para>
<literal>services.mysql</literal> now has declarative
configuration of databases and users with the <literal>ensureDatabases</literal> and
<literal>ensureUsers</literal> options.
</para>
<para>
These options will never delete existing databases and users,
especially not when the value of the options are changed.
</para>
<para>
The MySQL users will be identified using
<link xlink:href="https://mariadb.com/kb/en/library/authentication-plugin-unix-socket/">
Unix socket authentication</link>. This authenticates the
Unix user with the same name only, and that without the need
for a password.
</para>
<para>
If you have previously created a MySQL <literal>root</literal>
user <emphasis>with a password</emphasis>, you will need to add
<literal>root</literal> user for unix socket authentication
before using the new options. This can be done by running the
following SQL script:
<programlisting language="sql">
CREATE USER 'root'@'%' IDENTIFIED BY '';
GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;
FLUSH PRIVILEGES;
-- Optionally, delete the password-authenticated user:
-- DROP USER 'root'@'localhost';
</programlisting>
</para>
</listitem>
<listitem>
<para>
<literal>sha256</literal> argument value of
<literal>dockerTools.pullImage</literal> expression must be
updated since the mechanism to download the image has been
changed. Skopeo is now used to pull the image instead of the
Docker daemon.
</para>
</listitem>
<listitem>
<para>
Templated systemd services e.g <literal>container@name</literal> are
now handled currectly when switching to a new configuration, resulting
in them being reloaded.
</para>
</listitem>
<listitem>
<para>
<literal>services.mysqlBackup</literal> now works by default
without any user setup, including for users other than
<literal>mysql</literal>.
</para>
<para>
By default, the <literal>mysql</literal> user is no longer the
user which performs the backup. Instead a system account
<literal>mysqlbackup</literal> is used.
</para>
<para>
The <literal>mysqlBackup</literal> service is also now using
systemd timers instead of <literal>cron</literal>.
</para>
<para>
Therefore, the <literal>services.mysqlBackup.period</literal>
option no longer exists, and has been replaced with
<literal>services.mysqlBackup.calendar</literal>, which is in
the format of <link
xlink:href="https://www.freedesktop.org/software/systemd/man/systemd.time.html#Calendar%20Events">systemd.time(7)</link>.
</para>
<para>
If you expect to be sent an e-mail when the backup fails,
consider using a script which monitors the systemd journal for
errors. Regretfully, at present there is no built-in
functionality for this.
</para>
<para>
You can check that backups still work by running
<command>systemctl start mysql-backup</command> then
<command>systemctl status mysql-backup</command>.
</para>
</listitem>
<listitem>
<para>Steam: the <literal>newStdcpp</literal> parameter
was removed and should not be needed anymore.</para>
</listitem>
<listitem>
<para>
Redis has been updated to version 4 which mandates a cluster
mass-restart, due to changes in the network handling, in order
to ensure compatibility with networks NATing traffic.
</para>
</listitem>
</itemizedlist>
<para>Other notable improvements:</para>
@ -257,11 +397,55 @@ rmdir /var/lib/ipfs/.ipfs
</listitem>
<listitem>
<para>
<literal>sha256</literal> argument value of
<literal>dockerTools.pullImage</literal> expression must be
updated since the mechanism to download the image has been
changed. Skopeo is now used to pull the image instead of the
Docker daemon.
Definitions for <filename>/etc/hosts</filename> can now be specified
declaratively with <literal>networking.hosts</literal>.
</para>
</listitem>
<listitem>
<para>
Two new options have been added to the installer loader, in addition
to the default having changed. The kernel log verbosity has been lowered
to the upstream default for the default options, in order to not spam
the console when e.g. joining a network.
</para>
<para>
This therefore leads to adding a new <literal>debug</literal> option
to set the log level to the previous verbose mode, to make debugging
easier, but still accessible easily.
</para>
<para>
Additionally a <literal>copytoram</literal> option has been added,
which makes it possible to remove the install medium after booting.
This allows tethering from your phone after booting from it.
</para>
<para>
<literal>services.gitlab-runner.configOptions</literal> has been added
to specify the configuration of gitlab-runners declaratively.
</para>
<para>
<literal>services.jenkins.plugins</literal> has been added
to install plugins easily, this can be generated with jenkinsPlugins2nix.
</para>
<para>
<literal>services.postfix.config</literal> has been added
to specify the main.cf with NixOS options. Additionally other options
have been added to the postfix module and has been improved further.
</para>
<para>
The GitLab package and module have been updated to the latest 9.5 release.
</para>
<para>
The <literal>systemd-boot</literal> boot loader now lists the NixOS
version, kernel version and build date of all bootable generations.
</para>
</listitem>
<listitem>
<para>
The dnscrypt-proxy service now defaults to using a random upstream resolver,
selected from the list of public non-logging resolvers with DNSSEC support.
Existing configurations can be migrated to this mode of operation by
omitting the <option>services.dnscrypt-proxy.resolverName</option> option
or setting it to <literal>"random"</literal>.
</para>
</listitem>

View file

@ -29,8 +29,7 @@ following incompatible changes:</para>
<itemizedlist>
<listitem>
<para>
</para>
<para></para>
</listitem>
</itemizedlist>

View file

@ -77,7 +77,6 @@ let
excludedOptions = [
"boot.systemd.services"
"systemd.services"
"environment.gnome3.packageSet"
"kde.extraPackages"
];
excludeOptions = list:

View file

@ -9,9 +9,7 @@ let
cfg = config.networking;
dnsmasqResolve = config.services.dnsmasq.enable &&
config.services.dnsmasq.resolveLocalQueries;
bindResolve = config.services.bind.enable &&
config.services.bind.resolveLocalQueries;
hasLocalResolver = bindResolve || dnsmasqResolve;
hasLocalResolver = config.services.bind.enable || dnsmasqResolve;
resolvconfOptions = cfg.resolvconfOptions
++ optional cfg.dnsSingleRequest "single-request"

View file

@ -40,6 +40,12 @@ in
{
config = mkIf enabled {
assertions = [
{
assertion = services.xserver.displayManager.gdm.wayland;
message = "NVidia drivers don't support wayland";
}
];
services.xserver.drivers = singleton
{ name = "nvidia"; modules = [ nvidia_x11.bin ]; libPath = [ nvidia_x11 ]; };
@ -62,11 +68,16 @@ in
boot.extraModulePackages = [ nvidia_x11.bin ];
# nvidia-uvm is required by CUDA applications.
boot.kernelModules = [ "nvidia-uvm" ];
boot.kernelModules = [ "nvidia-uvm" ] ++
lib.optionals config.services.xserver.enable [ "nvidia" "nvidia_modeset" "nvidia_drm" ];
# Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
services.udev.extraRules =
''
KERNEL=="nvidia", RUN+="${pkgs.stdenv.shell} -c 'mknod -m 666 /dev/nvidiactl c $(grep nvidia-frontend /proc/devices | cut -d \ -f 1) 255'"
KERNEL=="nvidia_modeset", RUN+="${pkgs.stdenv.shell} -c 'mknod -m 666 /dev/nvidia-modeset c $(grep nvidia-frontend /proc/devices | cut -d \ -f 1) 254'"
KERNEL=="card*", SUBSYSTEM=="drm", DRIVERS=="nvidia", RUN+="${pkgs.stdenv.shell} -c 'mknod -m 666 /dev/nvidia%n c $(grep nvidia-frontend /proc/devices | cut -d \ -f 1) %n'"
KERNEL=="nvidia_uvm", RUN+="${pkgs.stdenv.shell} -c 'mknod -m 666 /dev/nvidia-uvm c $(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 0'"
'';

View file

@ -46,17 +46,24 @@ let
# A variant to boot with 'nomodeset'
LABEL boot-nomodeset
MENU LABEL NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel} (with nomodeset)
MENU LABEL NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel} (nomodeset)
LINUX /boot/bzImage
APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} nomodeset
INITRD /boot/initrd
# A variant to boot with 'copytoram'
LABEL boot-copytoram
MENU LABEL NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel} (with copytoram)
MENU LABEL NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel} (copytoram)
LINUX /boot/bzImage
APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} copytoram
INITRD /boot/initrd
# A variant to boot with verbose logging to the console
LABEL boot-nomodeset
MENU LABEL NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel} (debug)
LINUX /boot/bzImage
APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} loglevel=7
INITRD /boot/initrd
'';
isolinuxMemtest86Entry = ''
@ -74,25 +81,43 @@ let
cp -v ${pkgs.systemd}/lib/systemd/boot/efi/systemd-boot${targetArch}.efi $out/EFI/boot/boot${targetArch}.efi
mkdir -p $out/loader/entries
echo "title NixOS Live CD" > $out/loader/entries/nixos-livecd.conf
echo "linux /boot/bzImage" >> $out/loader/entries/nixos-livecd.conf
echo "initrd /boot/initrd" >> $out/loader/entries/nixos-livecd.conf
echo "options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}" >> $out/loader/entries/nixos-livecd.conf
cat << EOF > $out/loader/entries/nixos-iso.conf
title NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel}
linux /boot/bzImage
initrd /boot/initrd
options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}
EOF
# A variant to boot with 'nomodeset'
echo "title NixOS Live CD (with nomodeset)" > $out/loader/entries/nixos-livecd-nomodeset.conf
echo "linux /boot/bzImage" >> $out/loader/entries/nixos-livecd-nomodeset.conf
echo "initrd /boot/initrd" >> $out/loader/entries/nixos-livecd-nomodeset.conf
echo "options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} nomodeset" >> $out/loader/entries/nixos-livecd-nomodeset.conf
cat << EOF > $out/loader/entries/nixos-iso-nomodeset.conf
title NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel}
version nomodeset
linux /boot/bzImage
initrd /boot/initrd
options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} nomodeset
EOF
# A variant to boot with 'copytoram'
echo "title NixOS Live CD (with copytoram)" > $out/loader/entries/nixos-livecd-copytoram.conf
echo "linux /boot/bzImage" >> $out/loader/entries/nixos-livecd-copytoram.conf
echo "initrd /boot/initrd" >> $out/loader/entries/nixos-livecd-copytoram.conf
echo "options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} copytoram" >> $out/loader/entries/nixos-livecd-copytoram.conf
cat << EOF > $out/loader/entries/nixos-iso-copytoram.conf
title NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel}
version copytoram
linux /boot/bzImage
initrd /boot/initrd
options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} copytoram
EOF
echo "default nixos-livecd" > $out/loader/loader.conf
echo "timeout ${builtins.toString config.boot.loader.timeout}" >> $out/loader/loader.conf
# A variant to boot with verbose logging to the console
cat << EOF > $out/loader/entries/nixos-iso-debug.conf
title NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel} (debug)
linux /boot/bzImage
initrd /boot/initrd
options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} loglevel=7
EOF
cat << EOF > $out/loader/loader.conf
default nixos-iso
timeout ${builtins.toString config.boot.loader.timeout}
EOF
'';
efiImg = pkgs.runCommand "efi-image_eltorito" { buildInputs = [ pkgs.mtools pkgs.libfaketime ]; }
@ -336,6 +361,9 @@ in
{ source = config.isoImage.splashImage;
target = "/isolinux/background.png";
}
{ source = pkgs.writeText "version" config.system.nixosVersion;
target = "/version.txt";
}
] ++ optionals config.isoImage.makeEfiBootable [
{ source = efiImg;
target = "/boot/efi.img";

View file

@ -583,9 +583,15 @@ $bootLoaderConfig
# List packages installed in system profile. To search by name, run:
# \$ nix-env -qaP | grep wget
# environment.systemPackages = with pkgs; [
# wget
# wget vim
# ];
# Some programs need SUID wrappers, can be configured further or are
# started in user sessions.
# programs.bash.enableCompletion = true;
# programs.mtr.enable = true;
# programs.gnupg.agent = { enable = true; enableSSHSupport = true; };
# List services that you want to enable:
# Enable the OpenSSH daemon.

View file

@ -102,7 +102,7 @@ fi
extraBuildFlags+=(--option "build-users-group" "$buildUsersGroup")
# Inherit binary caches from the host
# TODO: will this still work with Nix 1.12 now that it has no perl? Probably not...
# TODO: will this still work with Nix 1.12 now that it has no perl? Probably not...
binary_caches="$(@perl@/bin/perl -I @nix@/lib/perl5/site_perl/*/* -e 'use Nix::Config; Nix::Config::readConfig; print $Nix::Config::config{"binary-caches"};')"
extraBuildFlags+=(--option "binary-caches" "$binary_caches")
@ -113,8 +113,33 @@ if [[ -z "$closure" ]]; then
fi
unset NIXOS_CONFIG
# TODO: do I need to set NIX_SUBSTITUTERS here or is the --option binary-caches above enough?
# These get created in nixos-prepare-root as well, but we want to make sure they're here in case we're
# running with --chroot. TODO: --chroot should just be split into a separate tool.
mkdir -m 0755 -p "$mountPoint/dev" "$mountPoint/proc" "$mountPoint/sys"
# Set up some bind mounts we'll want regardless of chroot or not
mount --rbind /dev "$mountPoint/dev"
mount --rbind /proc "$mountPoint/proc"
mount --rbind /sys "$mountPoint/sys"
# If we asked for a chroot, that means we're not actually installing anything (yeah I was confused too)
# and we just want to run a command in the context of a $mountPoint that we're assuming has already been
# set up by a previous nixos-install invocation. In that case we set up some remaining bind mounts and
# exec the requested command, skipping the rest of the installation procedure.
if [ -n "$runChroot" ]; then
mount -t tmpfs -o "mode=0755" none $mountPoint/run
rm -rf $mountPoint/var/run
ln -s /run $mountPoint/var/run
for f in /etc/resolv.conf /etc/hosts; do rm -f $mountPoint/$f; [ -f "$f" ] && cp -Lf $f $mountPoint/etc/; done
for f in /etc/passwd /etc/group; do touch $mountPoint/$f; [ -f "$f" ] && mount --rbind -o ro $f $mountPoint/$f; done
if ! [ -L $mountPoint/nix/var/nix/profiles/system ]; then
echo "$0: installation not finished; cannot chroot into installation directory"
exit 1
fi
ln -s /nix/var/nix/profiles/system $mountPoint/run/current-system
exec chroot $mountPoint "${chrootCommand[@]}"
fi
# A place to drop temporary closures
trap "rm -rf $tmpdir" EXIT
@ -153,9 +178,7 @@ nix-store --export $channel_root > $channel_closure
# nixos-prepare-root doesn't currently do anything with file ownership, so we set it up here instead
chown @root_uid@:@nixbld_gid@ $mountPoint/nix/store
mount --rbind /dev $mountPoint/dev
mount --rbind /proc $mountPoint/proc
mount --rbind /sys $mountPoint/sys
# Grub needs an mtab.
ln -sfn /proc/mounts $mountPoint/etc/mtab

View file

@ -426,7 +426,7 @@
teamspeak = 124;
influxdb = 125;
nsd = 126;
#gitolite = 127; # unused
gitolite = 127;
znc = 128;
polipo = 129;
mopidy = 130;

View file

@ -92,6 +92,7 @@
./programs/mosh.nix
./programs/mtr.nix
./programs/nano.nix
./programs/npm.nix
./programs/oblogout.nix
./programs/qt5ct.nix
./programs/screen.nix
@ -156,7 +157,9 @@
./services/backup/tarsnap.nix
./services/backup/znapzend.nix
./services/cluster/fleet.nix
./services/cluster/kubernetes.nix
./services/cluster/kubernetes/default.nix
./services/cluster/kubernetes/dns.nix
./services/cluster/kubernetes/dashboard.nix
./services/cluster/panamax.nix
./services/computing/boinc/client.nix
./services/computing/torque/server.nix
@ -352,6 +355,7 @@
./services/monitoring/collectd.nix
./services/monitoring/das_watchdog.nix
./services/monitoring/dd-agent/dd-agent.nix
./services/monitoring/fusion-inventory.nix
./services/monitoring/grafana.nix
./services/monitoring/graphite.nix
./services/monitoring/hdaps.nix
@ -423,12 +427,12 @@
./services/networking/ddclient.nix
./services/networking/dhcpcd.nix
./services/networking/dhcpd.nix
./services/networking/dnscache.nix
./services/networking/dnschain.nix
./services/networking/dnscrypt-proxy.nix
./services/networking/dnscrypt-wrapper.nix
./services/networking/dnsmasq.nix
./services/networking/ejabberd.nix
./services/networking/fan.nix
./services/networking/fakeroute.nix
./services/networking/ferm.nix
./services/networking/firefox/sync-server.nix
@ -524,6 +528,7 @@
./services/networking/tcpcrypt.nix
./services/networking/teamspeak3.nix
./services/networking/tinc.nix
./services/networking/tinydns.nix
./services/networking/tftpd.nix
./services/networking/tox-bootstrapd.nix
./services/networking/toxvpn.nix

View file

@ -20,6 +20,7 @@
# Some networking tools.
pkgs.fuse
pkgs.fuse3
pkgs.sshfs-fuse
pkgs.socat
pkgs.screen

View file

@ -77,7 +77,6 @@ with lib;
# Show all debug messages from the kernel but don't log refused packets
# because we have the firewall enabled. This makes installs from the
# console less cumbersome if the machine has a public IP.
boot.consoleLogLevel = mkDefault 7;
networking.firewall.logRefusedConnections = mkDefault false;
environment.systemPackages = [ pkgs.vim ];

View file

@ -0,0 +1,44 @@
{ config, lib, ... }:
with lib;
let
cfg = config.programs.npm;
in
{
###### interface
options = {
programs.npm = {
enable = mkEnableOption "<command>npm</command> global config";
npmrc = lib.mkOption {
type = lib.types.lines;
description = ''
The system-wide npm configuration.
See <link xlink:href="https://docs.npmjs.com/misc/config"/>.
'';
default = ''
prefix = ''${HOME}/.npm
'';
example = ''
prefix = ''${HOME}/.npm
https-proxy=proxy.example.com
init-license=MIT
init-author-url=http://npmjs.org
color=true
'';
};
};
};
###### implementation
config = lib.mkIf cfg.enable {
environment.etc."npmrc".text = cfg.npmrc;
environment.variables.NPM_CONFIG_GLOBALCONFIG = "/etc/npmrc";
};
}

View file

@ -21,7 +21,7 @@ in
enable = mkOption {
default = false;
description = ''
Whether to configure xnosh as an interactive shell.
Whether to configure xonsh as an interactive shell.
'';
type = types.bool;
};

View file

@ -17,19 +17,27 @@ with lib;
};
config = mkIf config.security.lockKernelModules {
boot.kernelModules = concatMap (x:
if x.device != null
then
if x.fsType == "vfat"
then [ "vfat" "nls-cp437" "nls-iso8859-1" ]
else [ x.fsType ]
else []) config.system.build.fileSystems;
systemd.services.disable-kernel-module-loading = rec {
description = "Disable kernel module loading";
wantedBy = [ config.systemd.defaultUnit ];
after = [ "systemd-udev-settle.service" "firewall.service" "systemd-modules-load.service" ] ++ wantedBy;
script = "echo -n 1 > /proc/sys/kernel/modules_disabled";
after = [ "systemd-udev-settle.service" "firewall.service" "systemd-modules-load.service" ] ++ wantedBy;
unitConfig.ConditionPathIsReadWrite = "/proc/sys/kernel";
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
ExecStart = "/bin/sh -c 'echo -n 1 >/proc/sys/kernel/modules_disabled'";
};
};
};

View file

@ -155,7 +155,10 @@ in
###### implementation
config = {
security.wrappers.fusermount.source = "${pkgs.fuse}/bin/fusermount";
security.wrappers = {
fusermount.source = "${pkgs.fuse}/bin/fusermount";
fusermount3.source = "${pkgs.fuse3}/bin/fusermount3";
};
boot.specialFileSystems.${parentWrapperDir} = {
fsType = "tmpfs";

View file

@ -105,7 +105,8 @@ in {
RABBITMQ_MNESIA_BASE = "${cfg.dataDir}/mnesia";
RABBITMQ_NODE_IP_ADDRESS = cfg.listenAddress;
RABBITMQ_NODE_PORT = toString cfg.port;
RABBITMQ_SERVER_START_ARGS = "-rabbit error_logger tty -rabbit sasl_error_logger false";
RABBITMQ_LOGS = "-";
RABBITMQ_SASL_LOGS = "-";
RABBITMQ_PID_FILE = "${cfg.dataDir}/pid";
SYS_PREFIX = "";
RABBITMQ_ENABLED_PLUGINS_FILE = pkgs.writeText "enabled_plugins" ''
@ -128,7 +129,7 @@ in {
preStart = ''
${optionalString (cfg.cookie != "") ''
echo -n ${cfg.cookie} > ${cfg.dataDir}/.erlang.cookie
chmod 400 ${cfg.dataDir}/.erlang.cookie
chmod 600 ${cfg.dataDir}/.erlang.cookie
''}
'';
};

View file

@ -6,10 +6,28 @@ let
inherit (pkgs) mysql gzip;
cfg = config.services.mysqlBackup ;
location = cfg.location ;
mysqlBackupCron = db : ''
${cfg.period} ${cfg.user} ${mysql}/bin/mysqldump ${if cfg.singleTransaction then "--single-transaction" else ""} ${db} | ${gzip}/bin/gzip -c > ${location}/${db}.gz
cfg = config.services.mysqlBackup;
defaultUser = "mysqlbackup";
backupScript = ''
set -o pipefail
failed=""
${concatMapStringsSep "\n" backupDatabaseScript cfg.databases}
if [ -n "$failed" ]; then
echo "Backup of database(s) failed:$failed"
exit 1
fi
'';
backupDatabaseScript = db: ''
dest="${cfg.location}/${db}.gz"
if ${mysql}/bin/mysqldump ${if cfg.singleTransaction then "--single-transaction" else ""} ${db} | ${gzip}/bin/gzip -c > $dest.tmp; then
mv $dest.tmp $dest
echo "Backed up to $dest"
else
echo "Failed to back up to $dest"
rm -f $dest.tmp
failed="$failed ${db}"
fi
'';
in
@ -26,17 +44,16 @@ in
'';
};
period = mkOption {
default = "15 01 * * *";
calendar = mkOption {
type = types.str;
default = "01:15:00";
description = ''
This option defines (in the format used by cron) when the
databases should be dumped.
The default is to update at 01:15 (at night) every day.
Configured when to run the backup service systemd unit (DayOfWeek Year-Month-Day Hour:Minute:Second).
'';
};
user = mkOption {
default = "mysql";
default = defaultUser;
description = ''
User to be used to perform backup.
'';
@ -66,16 +83,49 @@ in
};
config = mkIf config.services.mysqlBackup.enable {
config = mkIf cfg.enable {
users.extraUsers = optionalAttrs (cfg.user == defaultUser) (singleton
{ name = defaultUser;
isSystemUser = true;
createHome = false;
home = cfg.location;
group = "nogroup";
});
services.cron.systemCronJobs = map mysqlBackupCron config.services.mysqlBackup.databases;
system.activationScripts.mysqlBackup = stringAfter [ "stdio" "users" ]
''
mkdir -m 0700 -p ${config.services.mysqlBackup.location}
chown ${config.services.mysqlBackup.user} ${config.services.mysqlBackup.location}
'';
services.mysql.ensureUsers = [{
name = cfg.user;
ensurePermissions = with lib;
let
privs = "SELECT, SHOW VIEW, TRIGGER, LOCK TABLES";
grant = db: nameValuePair "${db}.*" privs;
in
listToAttrs (map grant cfg.databases);
}];
systemd = {
timers."mysql-backup" = {
description = "Mysql backup timer";
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = cfg.calendar;
AccuracySec = "5m";
Unit = "mysql-backup.service";
};
};
services."mysql-backup" = {
description = "Mysql backup service";
enable = true;
serviceConfig = {
User = cfg.user;
PermissionsStartOnly = true;
};
preStart = ''
mkdir -m 0700 -p ${cfg.location}
chown -R ${cfg.user} ${cfg.location}
'';
script = backupScript;
};
};
};
}

View file

@ -0,0 +1,160 @@
{ config, pkgs, lib, ... }:
with lib;
let
cfg = config.services.kubernetes.addons.dashboard;
name = "gcr.io/google_containers/kubernetes-dashboard-amd64";
version = "v1.6.3";
image = pkgs.dockerTools.pullImage {
imageName = name;
imageTag = version;
sha256 = "0b5v7xa3s91yi9yfsw2b8wijiprnicbb02f5kqa579h4yndb3gfz";
};
in {
options.services.kubernetes.addons.dashboard = {
enable = mkEnableOption "kubernetes dashboard addon";
enableRBAC = mkOption {
description = "Whether to enable role based access control is enabled for kubernetes dashboard";
type = types.bool;
default = elem "RBAC" config.services.kubernetes.apiserver.authorizationMode;
};
};
config = mkIf cfg.enable {
services.kubernetes.kubelet.seedDockerImages = [image];
services.kubernetes.addonManager.addons = {
kubernetes-dashboard-deployment = {
kind = "Deployment";
apiVersion = "apps/v1beta1";
metadata = {
labels = {
k8s-addon = "kubernetes-dashboard.addons.k8s.io";
k8s-app = "kubernetes-dashboard";
version = version;
"kubernetes.io/cluster-service" = "true";
"addonmanager.kubernetes.io/mode" = "Reconcile";
};
name = "kubernetes-dashboard";
namespace = "kube-system";
};
spec = {
replicas = 1;
revisionHistoryLimit = 10;
selector.matchLabels."k8s-app" = "kubernetes-dashboard";
template = {
metadata = {
labels = {
k8s-addon = "kubernetes-dashboard.addons.k8s.io";
k8s-app = "kubernetes-dashboard";
version = version;
"kubernetes.io/cluster-service" = "true";
};
annotations = {
"scheduler.alpha.kubernetes.io/critical-pod" = "";
#"scheduler.alpha.kubernetes.io/tolerations" = ''[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'';
};
};
spec = {
containers = [{
name = "kubernetes-dashboard";
image = "${name}:${version}";
ports = [{
containerPort = 9090;
protocol = "TCP";
}];
resources = {
limits = {
cpu = "100m";
memory = "50Mi";
};
requests = {
cpu = "100m";
memory = "50Mi";
};
};
livenessProbe = {
httpGet = {
path = "/";
port = 9090;
};
initialDelaySeconds = 30;
timeoutSeconds = 30;
};
}];
serviceAccountName = "kubernetes-dashboard";
tolerations = [{
key = "node-role.kubernetes.io/master";
effect = "NoSchedule";
}];
};
};
};
};
kubernetes-dashboard-svc = {
apiVersion = "v1";
kind = "Service";
metadata = {
labels = {
k8s-addon = "kubernetes-dashboard.addons.k8s.io";
k8s-app = "kubernetes-dashboard";
"kubernetes.io/cluster-service" = "true";
"kubernetes.io/name" = "KubeDashboard";
"addonmanager.kubernetes.io/mode" = "Reconcile";
};
name = "kubernetes-dashboard";
namespace = "kube-system";
};
spec = {
ports = [{
port = 80;
targetPort = 9090;
}];
selector.k8s-app = "kubernetes-dashboard";
};
};
kubernetes-dashboard-sa = {
apiVersion = "v1";
kind = "ServiceAccount";
metadata = {
labels = {
k8s-app = "kubernetes-dashboard";
k8s-addon = "kubernetes-dashboard.addons.k8s.io";
"addonmanager.kubernetes.io/mode" = "Reconcile";
};
name = "kubernetes-dashboard";
namespace = "kube-system";
};
};
} // (optionalAttrs cfg.enableRBAC {
kubernetes-dashboard-crb = {
apiVersion = "rbac.authorization.k8s.io/v1beta1";
kind = "ClusterRoleBinding";
metadata = {
name = "kubernetes-dashboard";
labels = {
k8s-app = "kubernetes-dashboard";
k8s-addon = "kubernetes-dashboard.addons.k8s.io";
"addonmanager.kubernetes.io/mode" = "Reconcile";
};
};
roleRef = {
apiGroup = "rbac.authorization.k8s.io";
kind = "ClusterRole";
name = "cluster-admin";
};
subjects = [{
kind = "ServiceAccount";
name = "kubernetes-dashboard";
namespace = "kube-system";
}];
};
});
};
}

View file

@ -0,0 +1,311 @@
{ config, pkgs, lib, ... }:
with lib;
let
version = "1.14.4";
k8s-dns-kube-dns = pkgs.dockerTools.pullImage {
imageName = "gcr.io/google_containers/k8s-dns-kube-dns-amd64";
imageTag = version;
sha256 = "0g64jc2076ng28xl4w3w9svf7hc6s9h8rq9mhvvwpfy2p6lgj6gy";
};
k8s-dns-dnsmasq-nanny = pkgs.dockerTools.pullImage {
imageName = "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64";
imageTag = version;
sha256 = "0sdpsbj1vismihy7ass1cn96nwmav6sf3r5h6i4k2dxha0y0jsh5";
};
k8s-dns-sidecar = pkgs.dockerTools.pullImage {
imageName = "gcr.io/google_containers/k8s-dns-sidecar-amd64";
imageTag = version;
sha256 = "01zpi189hpy2z62awl38fap908s8rrhc3v5gb6m90y2pycl4ad6q";
};
cfg = config.services.kubernetes.addons.dns;
in {
options.services.kubernetes.addons.dns = {
enable = mkEnableOption "kubernetes dns addon";
clusterIp = mkOption {
description = "Dns addon clusterIP";
# this default is also what kubernetes users
default = (
concatStringsSep "." (
take 3 (splitString "." config.services.kubernetes.apiserver.serviceClusterIpRange
))
) + ".254";
type = types.str;
};
clusterDomain = mkOption {
description = "Dns cluster domain";
default = "cluster.local";
type = types.str;
};
};
config = mkIf cfg.enable {
services.kubernetes.kubelet.seedDockerImages = [
k8s-dns-kube-dns
k8s-dns-dnsmasq-nanny
k8s-dns-sidecar
];
services.kubernetes.addonManager.addons = {
kubedns-deployment = {
apiVersion = "apps/v1beta1";
kind = "Deployment";
metadata = {
labels = {
"addonmanager.kubernetes.io/mode" = "Reconcile";
"k8s-app" = "kube-dns";
"kubernetes.io/cluster-service" = "true";
};
name = "kube-dns";
namespace = "kube-system";
};
spec = {
selector.matchLabels."k8s-app" = "kube-dns";
strategy = {
rollingUpdate = {
maxSurge = "10%";
maxUnavailable = 0;
};
};
template = {
metadata = {
annotations."scheduler.alpha.kubernetes.io/critical-pod" = "";
labels.k8s-app = "kube-dns";
};
spec = {
containers = [
{
name = "kubedns";
args = [
"--domain=${cfg.clusterDomain}"
"--dns-port=10053"
"--config-dir=/kube-dns-config"
"--v=2"
];
env = [
{
name = "PROMETHEUS_PORT";
value = "10055";
}
];
image = "gcr.io/google_containers/k8s-dns-kube-dns-amd64:${version}";
livenessProbe = {
failureThreshold = 5;
httpGet = {
path = "/healthcheck/kubedns";
port = 10054;
scheme = "HTTP";
};
initialDelaySeconds = 60;
successThreshold = 1;
timeoutSeconds = 5;
};
ports = [
{
containerPort = 10053;
name = "dns-local";
protocol = "UDP";
}
{
containerPort = 10053;
name = "dns-tcp-local";
protocol = "TCP";
}
{
containerPort = 10055;
name = "metrics";
protocol = "TCP";
}
];
readinessProbe = {
httpGet = {
path = "/readiness";
port = 8081;
scheme = "HTTP";
};
initialDelaySeconds = 3;
timeoutSeconds = 5;
};
resources = {
limits.memory = "170Mi";
requests = {
cpu = "100m";
memory = "70Mi";
};
};
volumeMounts = [
{
mountPath = "/kube-dns-config";
name = "kube-dns-config";
}
];
}
{
args = [
"-v=2"
"-logtostderr"
"-configDir=/etc/k8s/dns/dnsmasq-nanny"
"-restartDnsmasq=true"
"--"
"-k"
"--cache-size=1000"
"--log-facility=-"
"--server=/${cfg.clusterDomain}/127.0.0.1#10053"
"--server=/in-addr.arpa/127.0.0.1#10053"
"--server=/ip6.arpa/127.0.0.1#10053"
];
image = "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:${version}";
livenessProbe = {
failureThreshold = 5;
httpGet = {
path = "/healthcheck/dnsmasq";
port = 10054;
scheme = "HTTP";
};
initialDelaySeconds = 60;
successThreshold = 1;
timeoutSeconds = 5;
};
name = "dnsmasq";
ports = [
{
containerPort = 53;
name = "dns";
protocol = "UDP";
}
{
containerPort = 53;
name = "dns-tcp";
protocol = "TCP";
}
];
resources = {
requests = {
cpu = "150m";
memory = "20Mi";
};
};
volumeMounts = [
{
mountPath = "/etc/k8s/dns/dnsmasq-nanny";
name = "kube-dns-config";
}
];
}
{
name = "sidecar";
image = "gcr.io/google_containers/k8s-dns-sidecar-amd64:${version}";
args = [
"--v=2"
"--logtostderr"
"--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.${cfg.clusterDomain},5,A"
"--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.${cfg.clusterDomain},5,A"
];
livenessProbe = {
failureThreshold = 5;
httpGet = {
path = "/metrics";
port = 10054;
scheme = "HTTP";
};
initialDelaySeconds = 60;
successThreshold = 1;
timeoutSeconds = 5;
};
ports = [
{
containerPort = 10054;
name = "metrics";
protocol = "TCP";
}
];
resources = {
requests = {
cpu = "10m";
memory = "20Mi";
};
};
}
];
dnsPolicy = "Default";
serviceAccountName = "kube-dns";
tolerations = [
{
key = "CriticalAddonsOnly";
operator = "Exists";
}
];
volumes = [
{
configMap = {
name = "kube-dns";
optional = true;
};
name = "kube-dns-config";
}
];
};
};
};
};
kubedns-svc = {
apiVersion = "v1";
kind = "Service";
metadata = {
labels = {
"addonmanager.kubernetes.io/mode" = "Reconcile";
"k8s-app" = "kube-dns";
"kubernetes.io/cluster-service" = "true";
"kubernetes.io/name" = "KubeDNS";
};
name = "kube-dns";
namespace = "kube-system";
};
spec = {
clusterIP = cfg.clusterIp;
ports = [
{name = "dns"; port = 53; protocol = "UDP";}
{name = "dns-tcp"; port = 53; protocol = "TCP";}
];
selector.k8s-app = "kube-dns";
};
};
kubedns-sa = {
apiVersion = "v1";
kind = "ServiceAccount";
metadata = {
name = "kube-dns";
namespace = "kube-system";
labels = {
"kubernetes.io/cluster-service" = "true";
"addonmanager.kubernetes.io/mode" = "Reconcile";
};
};
};
kubedns-cm = {
apiVersion = "v1";
kind = "ConfigMap";
metadata = {
name = "kube-dns";
namespace = "kube-system";
labels = {
"addonmanager.kubernetes.io/mode" = "EnsureExists";
};
};
};
};
services.kubernetes.kubelet.clusterDns = mkDefault cfg.clusterIp;
};
}

View file

@ -170,11 +170,16 @@ in
mkdir -m 0770 -p ${cfg.dataDir}
if [ "$(id -u)" = 0 ]; then chown -R ${cfg.user}:${cfg.group} ${cfg.dataDir}; fi
'';
postStart = mkBefore ''
until ${pkgs.curl.bin}/bin/curl -s -o /dev/null ${if configOptions.http.https-enabled then "-k https" else "http"}://127.0.0.1${toString configOptions.http.bind-address}/ping; do
sleep 1;
done
'';
postStart =
let
scheme = if configOptions.http.https-enabled then "-k https" else "http";
bindAddr = (ba: if hasPrefix ":" ba then "127.0.0.1${ba}" else "${ba}")(toString configOptions.http.bind-address);
in
mkBefore ''
until ${pkgs.curl.bin}/bin/curl -s -o /dev/null ${scheme}://${bindAddr}/ping; do
sleep 1;
done
'';
};
users.extraUsers = optional (cfg.user == "influxdb") {

View file

@ -34,6 +34,8 @@ with lib;
services.dbus.packages = [ pkgs.at_spi2_core ];
systemd.packages = [ pkgs.at_spi2_core ];
};
}

View file

@ -4,9 +4,6 @@
with lib;
let
gnome3 = config.environment.gnome3.packageSet;
in
{
###### interface
@ -33,11 +30,11 @@ in
config = mkIf config.services.gnome3.evolution-data-server.enable {
environment.systemPackages = [ gnome3.evolution_data_server ];
environment.systemPackages = [ pkgs.gnome3.evolution_data_server ];
services.dbus.packages = [ gnome3.evolution_data_server ];
services.dbus.packages = [ pkgs.gnome3.evolution_data_server ];
systemd.packages = [ gnome3.evolution_data_server ];
systemd.packages = [ pkgs.gnome3.evolution_data_server ];
};

View file

@ -4,9 +4,6 @@
with lib;
let
gnome3 = config.environment.gnome3.packageSet;
in
{
###### interface
@ -33,9 +30,9 @@ in
config = mkIf config.services.gnome3.gnome-disks.enable {
environment.systemPackages = [ gnome3.gnome-disk-utility ];
environment.systemPackages = [ pkgs.gnome3.gnome-disk-utility ];
services.dbus.packages = [ gnome3.gnome-disk-utility ];
services.dbus.packages = [ pkgs.gnome3.gnome-disk-utility ];
};

View file

@ -4,9 +4,6 @@
with lib;
let
gnome3 = config.environment.gnome3.packageSet;
in
{
###### interface
@ -33,9 +30,9 @@ in
config = mkIf config.services.gnome3.gnome-documents.enable {
environment.systemPackages = [ gnome3.gnome-documents ];
environment.systemPackages = [ pkgs.gnome3.gnome-documents ];
services.dbus.packages = [ gnome3.gnome-documents ];
services.dbus.packages = [ pkgs.gnome3.gnome-documents ];
services.gnome3.gnome-online-accounts.enable = true;

View file

@ -4,9 +4,6 @@
with lib;
let
gnome3 = config.environment.gnome3.packageSet;
in
{
###### interface
@ -34,9 +31,9 @@ in
config = mkIf config.services.gnome3.gnome-keyring.enable {
environment.systemPackages = [ gnome3.gnome_keyring ];
environment.systemPackages = [ pkgs.gnome3.gnome_keyring ];
services.dbus.packages = [ gnome3.gnome_keyring gnome3.gcr ];
services.dbus.packages = [ pkgs.gnome3.gnome_keyring pkgs.gnome3.gcr ];
};

View file

@ -4,9 +4,6 @@
with lib;
let
gnome3 = config.environment.gnome3.packageSet;
in
{
###### interface
@ -33,9 +30,9 @@ in
config = mkIf config.services.gnome3.gnome-online-accounts.enable {
environment.systemPackages = [ gnome3.gnome_online_accounts ];
environment.systemPackages = [ pkgs.gnome3.gnome_online_accounts ];
services.dbus.packages = [ gnome3.gnome_online_accounts ];
services.dbus.packages = [ pkgs.gnome3.gnome_online_accounts ];
};

View file

@ -4,9 +4,6 @@
with lib;
let
gnome3 = config.environment.gnome3.packageSet;
in
{
###### interface
@ -33,9 +30,9 @@ in
config = mkIf config.services.gnome3.gnome-online-miners.enable {
environment.systemPackages = [ gnome3.gnome-online-miners ];
environment.systemPackages = [ pkgs.gnome3.gnome-online-miners ];
services.dbus.packages = [ gnome3.gnome-online-miners ];
services.dbus.packages = [ pkgs.gnome3.gnome-online-miners ];
};

View file

@ -4,9 +4,6 @@
with lib;
let
gnome3 = config.environment.gnome3.packageSet;
in
{
###### interface
@ -33,11 +30,11 @@ in
config = mkIf config.services.gnome3.gnome-terminal-server.enable {
environment.systemPackages = [ gnome3.gnome_terminal ];
environment.systemPackages = [ pkgs.gnome3.gnome_terminal ];
services.dbus.packages = [ gnome3.gnome_terminal ];
services.dbus.packages = [ pkgs.gnome3.gnome_terminal ];
systemd.packages = [ gnome3.gnome_terminal ];
systemd.packages = [ pkgs.gnome3.gnome_terminal ];
};

View file

@ -4,9 +4,6 @@
with lib;
let
gnome3 = config.environment.gnome3.packageSet;
in
{
###### interface
@ -33,9 +30,9 @@ in
config = mkIf config.services.gnome3.gnome-user-share.enable {
environment.systemPackages = [ gnome3.gnome-user-share ];
environment.systemPackages = [ pkgs.gnome3.gnome-user-share ];
services.xserver.displayManager.sessionCommands = with gnome3; ''
services.xserver.displayManager.sessionCommands = with pkgs.gnome3; ''
# Don't let gnome-control-center depend upon gnome-user-share
export XDG_DATA_DIRS=$XDG_DATA_DIRS''${XDG_DATA_DIRS:+:}${gnome-user-share}/share/gsettings-schemas/${gnome-user-share.name}
'';

View file

@ -1,11 +1,8 @@
# GPaste daemon.
{ config, lib, ... }:
{ config, lib, pkgs, ... }:
with lib;
let
gnome3 = config.environment.gnome3.packageSet;
in
{
###### interface
options = {
@ -22,9 +19,9 @@ in
###### implementation
config = mkIf config.services.gnome3.gpaste.enable {
environment.systemPackages = [ gnome3.gpaste ];
services.dbus.packages = [ gnome3.gpaste ];
services.xserver.desktopManager.gnome3.sessionPath = [ gnome3.gpaste ];
systemd.packages = [ gnome3.gpaste ];
environment.systemPackages = [ pkgs.gnome3.gpaste ];
services.dbus.packages = [ pkgs.gnome3.gpaste ];
services.xserver.desktopManager.gnome3.sessionPath = [ pkgs.gnome3.gpaste ];
systemd.packages = [ pkgs.gnome3.gpaste ];
};
}

View file

@ -4,9 +4,6 @@
with lib;
let
gnome3 = config.environment.gnome3.packageSet;
in
{
###### interface
@ -33,11 +30,11 @@ in
config = mkIf config.services.gnome3.gvfs.enable {
environment.systemPackages = [ gnome3.gvfs ];
environment.systemPackages = [ pkgs.gnome3.gvfs ];
services.dbus.packages = [ gnome3.gvfs ];
services.dbus.packages = [ pkgs.gnome3.gvfs ];
systemd.packages = [ gnome3.gvfs ];
systemd.packages = [ pkgs.gnome3.gvfs ];
services.udev.packages = [ pkgs.libmtp.bin ];

View file

@ -4,9 +4,6 @@
with lib;
let
gnome3 = config.environment.gnome3.packageSet;
in
{
###### interface
@ -32,9 +29,9 @@ in
config = mkIf config.services.gnome3.seahorse.enable {
environment.systemPackages = [ gnome3.seahorse ];
environment.systemPackages = [ pkgs.gnome3.seahorse ];
services.dbus.packages = [ gnome3.seahorse ];
services.dbus.packages = [ pkgs.gnome3.seahorse ];
};

View file

@ -4,9 +4,6 @@
with lib;
let
gnome3 = config.environment.gnome3.packageSet;
in
{
###### interface
@ -32,9 +29,9 @@ in
config = mkIf config.services.gnome3.sushi.enable {
environment.systemPackages = [ gnome3.sushi ];
environment.systemPackages = [ pkgs.gnome3.sushi ];
services.dbus.packages = [ gnome3.sushi ];
services.dbus.packages = [ pkgs.gnome3.sushi ];
};

View file

@ -4,9 +4,6 @@
with lib;
let
gnome3 = config.environment.gnome3.packageSet;
in
{
###### interface
@ -33,11 +30,11 @@ in
config = mkIf config.services.gnome3.tracker.enable {
environment.systemPackages = [ gnome3.tracker ];
environment.systemPackages = [ pkgs.gnome3.tracker ];
services.dbus.packages = [ gnome3.tracker ];
services.dbus.packages = [ pkgs.gnome3.tracker ];
systemd.packages = [ gnome3.tracker ];
systemd.packages = [ pkgs.gnome3.tracker ];
};

View file

@ -42,7 +42,7 @@ in
Then you can Use this sieve filter:
require ["fileinto", "reject", "envelope"];
if header :contains "X-Spam-Flag" "YES" {
fileinto "spam";
}
@ -67,11 +67,11 @@ in
initPreConf = mkOption {
type = types.str;
description = "The SpamAssassin init.pre config.";
default =
''
default =
''
#
# to update this list, run this command in the rules directory:
# grep 'loadplugin.*Mail::SpamAssassin::Plugin::.*' -o -h * | sort | uniq
# grep 'loadplugin.*Mail::SpamAssassin::Plugin::.*' -o -h * | sort | uniq
#
#loadplugin Mail::SpamAssassin::Plugin::AccessDB
@ -122,7 +122,11 @@ in
config = mkIf cfg.enable {
# Allow users to run 'spamc'.
environment.systemPackages = [ pkgs.spamassassin ];
environment = {
etc = singleton { source = spamdEnv; target = "spamassassin"; };
systemPackages = [ pkgs.spamassassin ];
};
users.extraUsers = singleton {
name = "spamd";
@ -138,7 +142,7 @@ in
systemd.services.sa-update = {
script = ''
set +e
set +e
${pkgs.su}/bin/su -s "${pkgs.bash}/bin/bash" -c "${pkgs.spamassassin}/bin/sa-update --gpghomedir=/var/lib/spamassassin/sa-update-keys/ --siteconfigpath=${spamdEnv}/" spamd
v=$?
@ -153,7 +157,7 @@ in
'';
};
systemd.timers.sa-update = {
systemd.timers.sa-update = {
description = "sa-update-service";
partOf = [ "sa-update.service" ];
wantedBy = [ "timers.target" ];
@ -177,15 +181,10 @@ in
# 0 and 1 no error, exitcode > 1 means error:
# https://spamassassin.apache.org/full/3.1.x/doc/sa-update.html#exit_codes
preStart = ''
# this abstraction requires no centralized config at all
if [ -d /etc/spamassassin ]; then
echo "This spamassassin does not support global '/etc/spamassassin' folder for configuration as this would be impure. Merge your configs into 'services.spamassassin' and remove the '/etc/spamassassin' folder to make this service work. Also see 'https://github.com/NixOS/nixpkgs/pull/26470'.";
exit 1
fi
echo "Recreating '/var/lib/spamasassin' with creating '3.004001' (or similar) and 'sa-update-keys'"
mkdir -p /var/lib/spamassassin
chown spamd:spamd /var/lib/spamassassin -R
set +e
set +e
${pkgs.su}/bin/su -s "${pkgs.bash}/bin/bash" -c "${pkgs.spamassassin}/bin/sa-update --gpghomedir=/var/lib/spamassassin/sa-update-keys/ --siteconfigpath=${spamdEnv}/" spamd
v=$?
set -e

View file

@ -142,9 +142,9 @@ let
GITLAB_UPLOADS_PATH = "${cfg.statePath}/uploads";
GITLAB_LOG_PATH = "${cfg.statePath}/log";
GITLAB_SHELL_PATH = "${cfg.packages.gitlab-shell}";
GITLAB_SHELL_CONFIG_PATH = "${cfg.statePath}/home/config.yml";
GITLAB_SHELL_CONFIG_PATH = "${cfg.statePath}/shell/config.yml";
GITLAB_SHELL_SECRET_PATH = "${cfg.statePath}/config/gitlab_shell_secret";
GITLAB_SHELL_HOOKS_PATH = "${cfg.statePath}/home/hooks";
GITLAB_SHELL_HOOKS_PATH = "${cfg.statePath}/shell/hooks";
GITLAB_REDIS_CONFIG_FILE = pkgs.writeText "gitlab-redis.yml" redisYml;
prometheus_multiproc_dir = "/run/gitlab";
RAILS_ENV = "production";
@ -555,6 +555,7 @@ in {
openssh
nodejs
procps
gnupg
];
preStart = ''
mkdir -p ${cfg.backupPath}
@ -567,7 +568,7 @@ in {
mkdir -p ${cfg.statePath}/tmp/pids
mkdir -p ${cfg.statePath}/tmp/sockets
rm -rf ${cfg.statePath}/config ${cfg.statePath}/home/hooks
rm -rf ${cfg.statePath}/config ${cfg.statePath}/shell/hooks
mkdir -p ${cfg.statePath}/config
tr -dc A-Za-z0-9 < /dev/urandom | head -c 32 > ${cfg.statePath}/config/gitlab_shell_secret

View file

@ -41,6 +41,15 @@ in
'';
};
enableGitAnnex = mkOption {
type = types.bool;
default = false;
description = ''
Enable git-annex support. Uses the <literal>extraGitoliteRc</literal> option
to apply the necessary configuration.
'';
};
commonHooks = mkOption {
type = types.listOf types.path;
default = [];
@ -49,6 +58,37 @@ in
'';
};
extraGitoliteRc = mkOption {
type = types.lines;
default = "";
example = literalExample ''
$RC{UMASK} = 0027;
$RC{SITE_INFO} = 'This is our private repository host';
push( @{$RC{ENABLE}}, 'Kindergarten' ); # enable the command/feature
@{$RC{ENABLE}} = grep { $_ ne 'desc' } @{$RC{ENABLE}}; # disable the command/feature
'';
description = ''
Extra configuration to append to the default <literal>~/.gitolite.rc</literal>.
This should be Perl code that modifies the <literal>%RC</literal>
configuration variable. The default <literal>~/.gitolite.rc</literal>
content is generated by invoking <literal>gitolite print-default-rc</literal>,
and extra configuration from this option is appended to it. The result
is placed to Nix store, and the <literal>~/.gitolite.rc</literal> file
becomes a symlink to it.
If you already have a customized (or otherwise changed)
<literal>~/.gitolite.rc</literal> file, NixOS will refuse to replace
it with a symlink, and the `gitolite-init` initialization service
will fail. In this situation, in order to use this option, you
will need to take any customizations you may have in
<literal>~/.gitolite.rc</literal>, convert them to appropriate Perl
statements, add them to this option, and remove the file.
See also the <literal>enableGitAnnex</literal> option.
'';
};
user = mkOption {
type = types.str;
default = "gitolite";
@ -56,17 +96,59 @@ in
Gitolite user account. This is the username of the gitolite endpoint.
'';
};
group = mkOption {
type = types.str;
default = "gitolite";
description = ''
Primary group of the Gitolite user account.
'';
};
};
};
config = mkIf cfg.enable {
config = mkIf cfg.enable (
let
manageGitoliteRc = cfg.extraGitoliteRc != "";
rcDir = pkgs.runCommand "gitolite-rc" { } rcDirScript;
rcDirScript =
''
mkdir "$out"
export HOME=temp-home
mkdir -p "$HOME/.gitolite/logs" # gitolite can't run without it
'${pkgs.gitolite}'/bin/gitolite print-default-rc >>"$out/gitolite.rc.default"
cat <<END >>"$out/gitolite.rc"
# This file is managed by NixOS.
# Use services.gitolite options to control it.
END
cat "$out/gitolite.rc.default" >>"$out/gitolite.rc"
'' +
optionalString (cfg.extraGitoliteRc != "") ''
echo -n ${escapeShellArg ''
# Added by NixOS:
${removeSuffix "\n" cfg.extraGitoliteRc}
# per perl rules, this should be the last line in such a file:
1;
''} >>"$out/gitolite.rc"
'';
in {
services.gitolite.extraGitoliteRc = optionalString cfg.enableGitAnnex ''
# Enable git-annex support:
push( @{$RC{ENABLE}}, 'git-annex-shell ua');
'';
users.extraUsers.${cfg.user} = {
description = "Gitolite user";
home = cfg.dataDir;
createHome = true;
uid = config.ids.uids.gitolite;
group = cfg.group;
useDefaultShell = true;
};
users.extraGroups."${cfg.group}".gid = config.ids.gids.gitolite;
systemd.services."gitolite-init" = {
description = "Gitolite initialization";
@ -77,21 +159,62 @@ in
serviceConfig.Type = "oneshot";
serviceConfig.RemainAfterExit = true;
path = [ pkgs.gitolite pkgs.git pkgs.perl pkgs.bash config.programs.ssh.package ];
script = ''
cd ${cfg.dataDir}
mkdir -p .gitolite/logs
if [ ! -d repositories ]; then
gitolite setup -pk ${pubkeyFile}
fi
if [ -n "${hooks}" ]; then
cp ${hooks} .gitolite/hooks/common/
chmod +x .gitolite/hooks/common/*
fi
gitolite setup # Upgrade if needed
'';
path = [ pkgs.gitolite pkgs.git pkgs.perl pkgs.bash pkgs.diffutils config.programs.ssh.package ];
script =
let
rcSetupScriptIfCustomFile =
if manageGitoliteRc then ''
cat <<END
<3>ERROR: NixOS can't apply declarative configuration
<3>to your .gitolite.rc file, because it seems to be
<3>already customized manually.
<3>See the services.gitolite.extraGitoliteRc option
<3>in "man configuration.nix" for more information.
END
# Not sure if the line below addresses the issue directly or just
# adds a delay, but without it our error message often doesn't
# show up in `systemctl status gitolite-init`.
journalctl --flush
exit 1
'' else ''
:
'';
rcSetupScriptIfDefaultFileOrStoreSymlink =
if manageGitoliteRc then ''
ln -sf "${rcDir}/gitolite.rc" "$GITOLITE_RC"
'' else ''
[[ -L "$GITOLITE_RC" ]] && rm -f "$GITOLITE_RC"
'';
in
''
cd ${cfg.dataDir}
mkdir -p .gitolite/logs
GITOLITE_RC=.gitolite.rc
GITOLITE_RC_DEFAULT=${rcDir}/gitolite.rc.default
if ( [[ ! -e "$GITOLITE_RC" ]] && [[ ! -L "$GITOLITE_RC" ]] ) ||
( [[ -f "$GITOLITE_RC" ]] && diff -q "$GITOLITE_RC" "$GITOLITE_RC_DEFAULT" >/dev/null ) ||
( [[ -L "$GITOLITE_RC" ]] && [[ "$(readlink "$GITOLITE_RC")" =~ ^/nix/store/ ]] )
then
'' + rcSetupScriptIfDefaultFileOrStoreSymlink +
''
else
'' + rcSetupScriptIfCustomFile +
''
fi
if [ ! -d repositories ]; then
gitolite setup -pk ${pubkeyFile}
fi
if [ -n "${hooks}" ]; then
cp ${hooks} .gitolite/hooks/common/
chmod +x .gitolite/hooks/common/*
fi
gitolite setup # Upgrade if needed
'';
};
environment.systemPackages = [ pkgs.gitolite pkgs.git ];
};
environment.systemPackages = [ pkgs.gitolite pkgs.git ]
++ optional cfg.enableGitAnnex pkgs.gitAndTools.git-annex;
});
}

View file

@ -428,7 +428,7 @@ in
fi
'';
nix.nrBuildUsers = mkDefault (lib.max 10 cfg.maxJobs);
nix.nrBuildUsers = mkDefault (lib.max 32 cfg.maxJobs);
users.extraUsers = nixbldUsers;

View file

@ -0,0 +1,66 @@
# Fusion Inventory daemon.
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.fusionInventory;
configFile = pkgs.writeText "fusion_inventory.conf" ''
server = ${concatStringsSep ", " cfg.servers}
logger = stderr
${cfg.extraConfig}
'';
in {
###### interface
options = {
services.fusionInventory = {
enable = mkEnableOption "Fusion Inventory Agent";
servers = mkOption {
type = types.listOf types.str;
description = ''
The urls of the OCS/GLPI servers to connect to.
'';
};
extraConfig = mkOption {
default = "";
type = types.lines;
description = ''
Configuration that is injected verbatim into the configuration file.
'';
};
};
};
###### implementation
config = mkIf cfg.enable {
users.extraUsers = singleton {
name = "fusion-inventory";
description = "FusionInventory user";
};
systemd.services."fusion-inventory" = {
description = "Fusion Inventory Agent";
wantedBy = [ "multi-user.target" ];
environment = {
OPTIONS = "--no-category=software";
};
serviceConfig = {
ExecStart = "${pkgs.fusionInventory}/bin/fusioninventory-agent --conf-file=${configFile} --daemon --no-fork";
};
};
};
}

View file

@ -17,20 +17,22 @@ in
};
config = mkOption {
default = "";
description = "monit.conf content";
description = "monitrc content";
};
};
};
config = mkIf config.services.monit.enable {
environment.systemPackages = [ pkgs.monit ];
environment.etc = [
{
source = pkgs.writeTextFile {
name = "monit.conf";
name = "monitrc";
text = config.services.monit.config;
};
target = "monit.conf";
target = "monitrc";
mode = "0400";
}
];
@ -40,9 +42,9 @@ in
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
ExecStart = "${pkgs.monit}/bin/monit -I -c /etc/monit.conf";
ExecStop = "${pkgs.monit}/bin/monit -c /etc/monit.conf quit";
ExecReload = "${pkgs.monit}/bin/monit -c /etc/monit.conf reload";
ExecStart = "${pkgs.monit}/bin/monit -I -c /etc/monitrc";
ExecStop = "${pkgs.monit}/bin/monit -c /etc/monitrc quit";
ExecReload = "${pkgs.monit}/bin/monit -c /etc/monitrc reload";
KillMode = "process";
Restart = "always";
};

View file

@ -33,7 +33,7 @@ in {
default = [];
example = ''[ "systemd" ]'';
description = ''
Collectors to enable, additionally to the defaults.
Collectors to enable. Only collectors explicitly listed here will be enabled.
'';
};

View file

@ -5,6 +5,22 @@ with lib;
let
inherit (pkgs) glusterfs rsync;
tlsCmd = if (cfg.tlsSettings != null) then
''
mkdir -p /var/lib/glusterd
touch /var/lib/glusterd/secure-access
''
else
''
rm -f /var/lib/glusterd/secure-access
'';
restartTriggers = if (cfg.tlsSettings != null) then [
config.environment.etc."ssl/glusterfs.pem".source
config.environment.etc."ssl/glusterfs.key".source
config.environment.etc."ssl/glusterfs.ca".source
] else [];
cfg = config.services.glusterfs;
in
@ -30,6 +46,41 @@ in
description = "Extra flags passed to the GlusterFS daemon";
default = [];
};
tlsSettings = mkOption {
description = ''
Make the server communicate via TLS.
This means it will only connect to other gluster
servers having certificates signed by the same CA.
Enabling this will create a file <filename>/var/lib/glusterd/secure-access</filename>.
Disabling will delete this file again.
See also: https://gluster.readthedocs.io/en/latest/Administrator%20Guide/SSL/
'';
default = null;
type = types.nullOr (types.submodule {
options = {
tlsKeyPath = mkOption {
default = null;
type = types.str;
description = "Path to the private key used for TLS.";
};
tlsPem = mkOption {
default = null;
type = types.path;
description = "Path to the certificate used for TLS.";
};
caCert = mkOption {
default = null;
type = types.path;
description = "Path certificate authority used to sign the cluster certificates.";
};
};
});
};
};
};
@ -40,7 +91,14 @@ in
services.rpcbind.enable = true;
environment.etc = mkIf (cfg.tlsSettings != null) {
"ssl/glusterfs.pem".source = cfg.tlsSettings.tlsPem;
"ssl/glusterfs.key".source = cfg.tlsSettings.tlsKeyPath;
"ssl/glusterfs.ca".source = cfg.tlsSettings.caCert;
};
systemd.services.glusterd = {
inherit restartTriggers;
description = "GlusterFS, a clustered file-system server";
@ -57,6 +115,8 @@ in
+ ''
mkdir -p /var/lib/glusterd/hooks/
${rsync}/bin/rsync -a ${glusterfs}/var/lib/glusterd/hooks/ /var/lib/glusterd/hooks/
${tlsCmd}
''
# `glusterfind` needs dirs that upstream installs at `make install` phase
# https://github.com/gluster/glusterfs/blob/v3.10.2/tools/glusterfind/Makefile.am#L16-L17
@ -75,6 +135,7 @@ in
};
systemd.services.glustereventsd = {
inherit restartTriggers;
description = "Gluster Events Notifier";

View file

@ -27,6 +27,14 @@ in
'';
};
extraNfsdConfig = mkOption {
type = types.str;
default = "";
description = ''
Extra configuration options for the [nfsd] section of /etc/nfs.conf.
'';
};
exports = mkOption {
type = types.lines;
default = "";
@ -107,6 +115,7 @@ in
[nfsd]
threads=${toString cfg.nproc}
${optionalString (cfg.hostName != null) "host=${cfg.hostName}"}
${cfg.extraNfsdConfig}
[mountd]
${optionalString (cfg.mountdPort != null) "port=${toString cfg.mountdPort}"}

View file

@ -151,15 +151,6 @@ in
";
};
resolveLocalQueries = mkOption {
type = types.bool;
default = true;
description = ''
Whether bind should resolve local queries (i.e. add 127.0.0.1 to
/etc/resolv.conf, overriding networking.nameserver).
'';
};
};
};

View file

@ -183,6 +183,7 @@ in
ExecReload = "${cfg.package.bin}/bin/consul reload";
PermissionsStartOnly = true;
User = if cfg.dropPrivileges then "consul" else null;
Restart = "on-failure";
TimeoutStartSec = "0";
} // (optionalAttrs (cfg.leaveOnStop) {
ExecStop = "${cfg.package.bin}/bin/consul leave";

View file

@ -0,0 +1,86 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.dnscache;
dnscache-root = pkgs.runCommand "dnscache-root" {} ''
mkdir -p $out/{servers,ip}
${concatMapStrings (ip: ''
echo > "$out/ip/"${lib.escapeShellArg ip}
'') cfg.clientIps}
${concatStrings (mapAttrsToList (host: ips: ''
${concatMapStrings (ip: ''
echo ${lib.escapeShellArg ip} > "$out/servers/"${lib.escapeShellArg host}
'') ips}
'') cfg.domainServers)}
# djbdns contains an outdated list of root servers;
# if one was not provided in config, provide a current list
if [ ! -e servers/@ ]; then
awk '/^.?.ROOT-SERVERS.NET/ { print $4 }' ${pkgs.dns-root-data}/root.hints > $out/servers/@
fi
'';
in {
###### interface
options = {
services.dnscache = {
enable = mkOption {
default = false;
type = types.bool;
description = "Whether to run the dnscache caching dns server";
};
ip = mkOption {
default = "0.0.0.0";
type = types.str;
description = "IP address on which to listen for connections";
};
clientIps = mkOption {
default = [ "127.0.0.1" ];
type = types.listOf types.str;
description = "client IP addresses (or prefixes) from which to accept connections";
example = ["192.168" "172.23.75.82"];
};
domainServers = mkOption {
default = { };
type = types.attrsOf (types.listOf types.str);
description = "table of {hostname: server} pairs to use as authoritative servers for hosts (and subhosts)";
example = {
"example.com" = ["8.8.8.8" "8.8.4.4"];
};
};
};
};
###### implementation
config = mkIf config.services.dnscache.enable {
environment.systemPackages = [ pkgs.djbdns ];
users.extraUsers.dnscache = {};
systemd.services.dnscache = {
description = "djbdns dnscache server";
wantedBy = [ "multi-user.target" ];
path = with pkgs; [ bash daemontools djbdns ];
preStart = ''
rm -rf /var/lib/dnscache
dnscache-conf dnscache dnscache /var/lib/dnscache ${config.services.dnscache.ip}
rm -rf /var/lib/dnscache/root
ln -sf ${dnscache-root} /var/lib/dnscache/root
'';
script = ''
cd /var/lib/dnscache/
exec ./run
'';
};
};
}

View file

@ -42,7 +42,7 @@ in
default = true;
description = ''
Whether dnsmasq should resolve local queries (i.e. add 127.0.0.1 to
/etc/resolv.conf overriding networking.nameservers).
/etc/resolv.conf).
'';
};

View file

@ -1,60 +0,0 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.networking.fan;
modprobe = "${pkgs.kmod}/bin/modprobe";
in
{
###### interface
options = {
networking.fan = {
enable = mkEnableOption "FAN Networking";
};
};
###### implementation
config = mkIf cfg.enable {
environment.systemPackages = [ pkgs.fanctl ];
systemd.services.fan = {
description = "FAN Networking";
wantedBy = [ "multi-user.target" ];
after = [ "network-online.target" ];
before = [ "docker.service" ];
restartIfChanged = false;
preStart = ''
if [ ! -f /proc/sys/net/fan/version ]; then
${modprobe} ipip
if [ ! -f /proc/sys/net/fan/version ]; then
echo "The Fan Networking patches have not been applied to this kernel!" 1>&2
exit 1
fi
fi
mkdir -p /var/lib/fan-networking
'';
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
ExecStart = "${pkgs.fanctl}/bin/fanctl up -a";
ExecStop = "${pkgs.fanctl}/bin/fanctl down -a";
};
};
};
}

View file

@ -9,7 +9,7 @@ let
confFile = pkgs.writeText "radicale.conf" cfg.config;
# This enables us to default to version 2 while still not breaking configurations of people with version 1
defaultPackage = if versionAtLeast "17.09" config.system.stateVersion then {
defaultPackage = if versionAtLeast config.system.stateVersion "17.09" then {
pkg = pkgs.radicale2;
text = "pkgs.radicale2";
} else {

View file

@ -141,7 +141,6 @@ in
${optionalString (data.ed25519PrivateKeyFile != null) "Ed25519PrivateKeyFile = ${data.ed25519PrivateKeyFile}"}
${optionalString (data.listenAddress != null) "ListenAddress = ${data.listenAddress}"}
${optionalString (data.bindToAddress != null) "BindToAddress = ${data.bindToAddress}"}
Device = /dev/net/tun
Interface = tinc.${network}
${data.extraConfig}
'';
@ -164,10 +163,17 @@ in
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
path = [ data.package ];
restartTriggers =
let
drvlist = [ config.environment.etc."tinc/${network}/tinc.conf".source ]
++ mapAttrsToList (host: _: config.environment.etc."tinc/${network}/hosts/${host}".source) data.hosts;
in # drvlist might be too long to be used directly
[ (builtins.hashString "sha256" (concatMapStrings (d: d.outPath) drvlist)) ];
serviceConfig = {
Type = "simple";
Restart = "always";
RestartSec = "3";
ExecStart = "${data.package}/bin/tincd -D -U tinc.${network} -n ${network} ${optionalString (data.chroot) "-R"} --pidfile /run/tinc.${network}.pid -d ${toString data.debugLevel}";
};
preStart = ''
mkdir -p /etc/tinc/${network}/hosts
@ -187,9 +193,6 @@ in
[ -f "/etc/tinc/${network}/rsa_key.priv" ] || tincd -n ${network} -K 4096
fi
'';
script = ''
tincd -D -U tinc.${network} -n ${network} ${optionalString (data.chroot) "-R"} --pidfile /run/tinc.${network}.pid -d ${toString data.debugLevel}
'';
})
);

View file

@ -0,0 +1,54 @@
{ config, lib, pkgs, ... }:
with lib;
{
###### interface
options = {
services.tinydns = {
enable = mkOption {
default = false;
type = types.bool;
description = "Whether to run the tinydns dns server";
};
data = mkOption {
type = types.lines;
default = "";
description = "The DNS data to serve, in the format described by tinydns-data(8)";
};
ip = mkOption {
default = "0.0.0.0";
type = types.str;
description = "IP address on which to listen for connections";
};
};
};
###### implementation
config = mkIf config.services.tinydns.enable {
environment.systemPackages = [ pkgs.djbdns ];
users.extraUsers.tinydns = {};
systemd.services.tinydns = {
description = "djbdns tinydns server";
wantedBy = [ "multi-user.target" ];
path = with pkgs; [ daemontools djbdns ];
preStart = ''
rm -rf /var/lib/tinydns
tinydns-conf tinydns tinydns /var/lib/tinydns ${config.services.tinydns.ip}
cd /var/lib/tinydns/root/
ln -sf ${pkgs.writeText "tinydns-data" config.services.tinydns.data} data
tinydns-data
'';
script = ''
cd /var/lib/tinydns
exec ./run
'';
};
};
}

View file

@ -95,6 +95,14 @@ let
type = with types; listOf (submodule peerOpts);
};
allowedIPsAsRoutes = mkOption {
example = false;
default = true;
type = types.bool;
description = ''
Determines whether to add allowed IPs as routes or not.
'';
};
};
};
@ -217,11 +225,11 @@ let
"${ipCommand} link set up dev ${name}"
(map (peer:
(optionals (values.allowedIPsAsRoutes != false) (map (peer:
(map (allowedIP:
"${ipCommand} route replace ${allowedIP} dev ${name} table ${values.table}"
) peer.allowedIPs)
) values.peers)
) values.peers))
values.postSetup
]);

View file

@ -148,6 +148,7 @@ in {
wants = [ "network.target" ];
requires = lib.concatMap deviceUnit ifaces;
wantedBy = [ "multi-user.target" ];
stopIfChanged = false;
path = [ pkgs.wpa_supplicant ];

View file

@ -212,6 +212,14 @@ in
'';
};
openFirewall = mkOption {
type = types.bool;
default = false;
description = ''
Whether to open ports in the firewall for ZNC.
'';
};
zncConf = mkOption {
default = "";
example = "See: http://wiki.znc.in/Configuration";
@ -276,14 +284,6 @@ in
'';
};
openFirewall = mkOption {
type = types.bool;
default = false;
description = ''
Whether to open ports in the firewall for ZNC.
'';
};
passBlock = mkOption {
example = defaultPassBlock;
type = types.string;
@ -359,7 +359,7 @@ in
config = mkIf cfg.enable {
networking.firewall = mkIf cfg.openFirewall {
allowedTCPPorts = [ cfg.port ];
allowedTCPPorts = [ cfg.confOptions.port ];
};
systemd.services.znc = {

View file

@ -33,6 +33,8 @@ in {
};
config = mkIf cfg.enable {
boot.kernelModules = [ "dummy" ];
networking.interfaces.dummy0 = {
ipAddress = "169.254.169.254";
prefixLength = 32;

View file

@ -60,6 +60,7 @@ in {
ConditionPathExists=/dev/tty0
[Service]
ExecStart=
ExecStart=${pkgs.kmscon}/bin/kmscon "--vt=%I" ${cfg.extraOptions} --seats=seat0 --no-switchvt --configdir ${configDir} --login -- ${pkgs.shadow}/bin/login -p
UtmpIdentifier=%I
TTYPath=/dev/%I

View file

@ -83,11 +83,11 @@ let
# Unpack Mediawiki and put the config file in its root directory.
mediawikiRoot = pkgs.stdenv.mkDerivation rec {
name= "mediawiki-1.27.3";
name= "mediawiki-1.29.1";
src = pkgs.fetchurl {
url = "http://download.wikimedia.org/mediawiki/1.27/${name}.tar.gz";
sha256 = "08x8mvc0y1gwq8rg0zm98wc6hc5j8imb6dcpx6s7392j5dc71m0i";
url = "http://download.wikimedia.org/mediawiki/1.29/${name}.tar.gz";
sha256 = "03mpazbxvb011s2nmlw5p6dc43yjgl5yrsilmj1imyykm57bwb3m";
};
skins = config.skins;

View file

@ -4,7 +4,6 @@ with lib;
let
cfg = config.services.xserver.desktopManager.gnome3;
gnome3 = config.environment.gnome3.packageSet;
# Remove packages of ys from xs, based on their names
removePackagesByName = xs: ys:
@ -28,7 +27,7 @@ let
nixos-gsettings-desktop-schemas = pkgs.runCommand "nixos-gsettings-desktop-schemas" {}
''
mkdir -p $out/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas
cp -rf ${gnome3.gsettings_desktop_schemas}/share/gsettings-schemas/gsettings-desktop-schemas*/glib-2.0/schemas/*.xml $out/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas
cp -rf ${pkgs.gnome3.gsettings_desktop_schemas}/share/gsettings-schemas/gsettings-desktop-schemas*/glib-2.0/schemas/*.xml $out/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas
${concatMapStrings (pkg: "cp -rf ${pkg}/share/gsettings-schemas/*/glib-2.0/schemas/*.xml $out/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas\n") cfg.extraGSettingsOverridePackages}
@ -61,7 +60,7 @@ in {
example = literalExample "[ pkgs.gnome3.gpaste ]";
description = "Additional list of packages to be added to the session search path.
Useful for gnome shell extensions or gsettings-conditionated autostart.";
apply = list: list ++ [ gnome3.gnome_shell gnome3.gnome-shell-extensions ];
apply = list: list ++ [ pkgs.gnome3.gnome_shell pkgs.gnome3.gnome-shell-extensions ];
};
extraGSettingsOverrides = mkOption {
@ -79,13 +78,6 @@ in {
debug = mkEnableOption "gnome-session debug messages";
};
environment.gnome3.packageSet = mkOption {
default = null;
example = literalExample "pkgs.gnome3_22";
description = "Which GNOME 3 package set to use.";
apply = p: if p == null then pkgs.gnome3 else p;
};
environment.gnome3.excludePackages = mkOption {
default = [];
example = literalExample "[ pkgs.gnome3.totem ]";
@ -169,26 +161,26 @@ in {
# Update user dirs as described in http://freedesktop.org/wiki/Software/xdg-user-dirs/
${pkgs.xdg-user-dirs}/bin/xdg-user-dirs-update
${gnome3.gnome_session}/bin/gnome-session ${optionalString cfg.debug "--debug"} &
${pkgs.gnome3.gnome_session}/bin/gnome-session ${optionalString cfg.debug "--debug"} &
waitPID=$!
'';
};
services.xserver.updateDbusEnvironment = true;
environment.variables.GIO_EXTRA_MODULES = [ "${lib.getLib gnome3.dconf}/lib/gio/modules"
"${gnome3.glib_networking.out}/lib/gio/modules"
"${gnome3.gvfs}/lib/gio/modules" ];
environment.systemPackages = gnome3.corePackages ++ cfg.sessionPath
++ (removePackagesByName gnome3.optionalPackages config.environment.gnome3.excludePackages);
environment.variables.GIO_EXTRA_MODULES = [ "${lib.getLib pkgs.gnome3.dconf}/lib/gio/modules"
"${pkgs.gnome3.glib_networking.out}/lib/gio/modules"
"${pkgs.gnome3.gvfs}/lib/gio/modules" ];
environment.systemPackages = pkgs.gnome3.corePackages ++ cfg.sessionPath
++ (removePackagesByName pkgs.gnome3.optionalPackages config.environment.gnome3.excludePackages);
# Use the correct gnome3 packageSet
networking.networkmanager.basePackages =
{ inherit (pkgs) networkmanager modemmanager wpa_supplicant;
inherit (gnome3) networkmanager_openvpn networkmanager_vpnc
networkmanager_openconnect networkmanager_fortisslvpn
networkmanager_pptp networkmanager_iodine
networkmanager_l2tp; };
inherit (pkgs.gnome3) networkmanager_openvpn networkmanager_vpnc
networkmanager_openconnect networkmanager_fortisslvpn
networkmanager_pptp networkmanager_iodine
networkmanager_l2tp; };
# Needed for themes and backgrounds
environment.pathsToLink = [ "/share" ];

View file

@ -5,8 +5,7 @@ with lib;
let
cfg = config.services.xserver.displayManager;
gnome3 = config.environment.gnome3.packageSet;
gdm = gnome3.gdm;
gdm = pkgs.gnome3.gdm;
in
@ -65,6 +64,14 @@ in
};
};
wayland = mkOption {
default = true;
description = ''
Allow GDM run on Wayland instead of Xserver
'';
type = types.bool;
};
};
};
@ -95,6 +102,7 @@ in
# GDM needs different xserverArgs, presumable because using wayland by default.
services.xserver.tty = null;
services.xserver.display = null;
services.xserver.verbose = null;
services.xserver.displayManager.job =
{
@ -103,7 +111,7 @@ in
(filter (arg: arg != "-terminate") cfg.xserverArgs);
GDM_SESSIONS_DIR = "${cfg.session.desktops}";
# Find the mouse
XCURSOR_PATH = "~/.icons:${gnome3.adwaita-icon-theme}/share/icons";
XCURSOR_PATH = "~/.icons:${pkgs.gnome3.adwaita-icon-theme}/share/icons";
};
execCmd = "exec ${gdm}/bin/gdm";
};
@ -127,7 +135,7 @@ in
StandardError = "inherit";
};
systemd.services.display-manager.path = [ gnome3.gnome_session ];
systemd.services.display-manager.path = [ pkgs.gnome3.gnome_session ];
services.dbus.packages = [ gdm ];
@ -140,6 +148,7 @@ in
# presented and there's a little delay.
environment.etc."gdm/custom.conf".text = ''
[daemon]
WaylandEnable=${if cfg.gdm.wayland then "true" else "false"}
${optionalString cfg.gdm.autoLogin.enable (
if cfg.gdm.autoLogin.delay > 0 then ''
TimedLoginEnable=true
@ -186,7 +195,7 @@ in
auth required pam_env.so envfile=${config.system.build.pamEnvironment}
auth required pam_succeed_if.so uid >= 1000 quiet
auth optional ${gnome3.gnome_keyring}/lib/security/pam_gnome_keyring.so
auth optional ${pkgs.gnome3.gnome_keyring}/lib/security/pam_gnome_keyring.so
auth ${if config.security.pam.enableEcryptfs then "required" else "sufficient"} pam_unix.so nullok likeauth
${optionalString config.security.pam.enableEcryptfs
"auth required ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so unwrap"}
@ -206,7 +215,7 @@ in
"session optional ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so"}
session required pam_loginuid.so
session optional ${pkgs.systemd}/lib/security/pam_systemd.so
session optional ${gnome3.gnome_keyring}/lib/security/pam_gnome_keyring.so auto_start
session optional ${pkgs.gnome3.gnome_keyring}/lib/security/pam_gnome_keyring.so auto_start
'';
gdm-password.text = ''
@ -214,7 +223,7 @@ in
auth required pam_env.so envfile=${config.system.build.pamEnvironment}
auth required pam_succeed_if.so uid >= 1000 quiet
auth optional ${gnome3.gnome_keyring}/lib/security/pam_gnome_keyring.so
auth optional ${pkgs.gnome3.gnome_keyring}/lib/security/pam_gnome_keyring.so
auth ${if config.security.pam.enableEcryptfs then "required" else "sufficient"} pam_unix.so nullok likeauth
${optionalString config.security.pam.enableEcryptfs
"auth required ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so unwrap"}
@ -233,7 +242,7 @@ in
"session optional ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so"}
session required pam_loginuid.so
session optional ${pkgs.systemd}/lib/security/pam_systemd.so
session optional ${gnome3.gnome_keyring}/lib/security/pam_gnome_keyring.so auto_start
session optional ${pkgs.gnome3.gnome_keyring}/lib/security/pam_gnome_keyring.so auto_start
'';
gdm-autologin.text = ''

View file

@ -480,6 +480,15 @@ in
'';
};
verbose = mkOption {
type = types.nullOr types.int;
default = 3;
example = 7;
description = ''
Controls verbosity of X logging.
'';
};
useGlamor = mkOption {
type = types.bool;
default = false;
@ -631,10 +640,11 @@ in
[ "-config ${configFile}"
"-xkbdir" "${cfg.xkbDir}"
# Log at the default verbosity level to stderr rather than /var/log/X.*.log.
"-verbose" "3" "-logfile" "/dev/null"
"-logfile" "/dev/null"
] ++ optional (cfg.display != null) ":${toString cfg.display}"
++ optional (cfg.tty != null) "vt${toString cfg.tty}"
++ optional (cfg.dpi != null) "-dpi ${toString cfg.dpi}"
++ optional (cfg.verbose != null) "-verbose ${toString cfg.verbose}"
++ optional (!cfg.enableTCP) "-nolisten tcp"
++ optional (cfg.autoRepeatDelay != null) "-ardelay ${toString cfg.autoRepeatDelay}"
++ optional (cfg.autoRepeatInterval != null) "-arinterval ${toString cfg.autoRepeatInterval}"

View file

@ -235,6 +235,16 @@ in
'';
};
boot.initrd.luks.forceLuksSupportInInitrd = mkOption {
type = types.bool;
default = false;
internal = true;
description = ''
Whether to configure luks support in the initrd, when no luks
devices are configured.
'';
};
boot.initrd.luks.devices = mkOption {
default = { };
example = { "luksroot".device = "/dev/disk/by-uuid/430e9eff-d852-4f68-aa3b-2fa3599ebe08"; };
@ -417,7 +427,7 @@ in
};
};
config = mkIf (luks.devices != {}) {
config = mkIf (luks.devices != {} || luks.forceLuksSupportInInitrd) {
# actually, sbp2 driver is the one enabling the DMA attack, but this needs to be tested
boot.blacklistedKernelModules = optionals luks.mitigateDMAAttacks

View file

@ -639,11 +639,7 @@ in
Rules for creating and cleaning up temporary files
automatically. See
<citerefentry><refentrytitle>tmpfiles.d</refentrytitle><manvolnum>5</manvolnum></citerefentry>
for the exact format. You should not use this option to create
files required by systemd services, since there is no
guarantee that <command>systemd-tmpfiles</command> runs when
the system is reconfigured using
<command>nixos-rebuild</command>.
for the exact format.
'';
};
@ -879,7 +875,12 @@ in
systemd.services.systemd-remount-fs.restartIfChanged = false;
systemd.services.systemd-update-utmp.restartIfChanged = false;
systemd.services.systemd-user-sessions.restartIfChanged = false; # Restart kills all active sessions.
systemd.services.systemd-logind.restartTriggers = [ config.environment.etc."systemd/logind.conf".source ];
# Restarting systemd-logind breaks X11
# - upstream commit: https://cgit.freedesktop.org/xorg/xserver/commit/?id=dc48bd653c7e101
# - systemd announcement: https://github.com/systemd/systemd/blob/22043e4317ecd2bc7834b48a6d364de76bb26d91/NEWS#L103-L112
# - this might be addressed in the future by xorg
#systemd.services.systemd-logind.restartTriggers = [ config.environment.etc."systemd/logind.conf".source ];
systemd.services.systemd-logind.restartIfChanged = false;
systemd.services.systemd-logind.stopIfChanged = false;
systemd.services.systemd-journald.restartTriggers = [ config.environment.etc."systemd/journald.conf".source ];
systemd.services.systemd-journald.stopIfChanged = false;

View file

@ -56,11 +56,19 @@ in
};
config = mkIf anyEncrypted {
assertions = map (dev: {
assertion = dev.label != null;
message = ''
The filesystem for ${dev.mountPoint} has encrypted.enable set to true, but no encrypted.label set
'';
}) encDevs;
boot.initrd = {
luks = {
devices =
map (dev: { name = dev.encrypted.label; device = dev.encrypted.blkDev; } ) keylessEncDevs;
cryptoModules = [ "aes" "sha256" "sha1" "xts" ];
forceLuksSupportInInitrd = true;
};
postMountCommands =
concatMapStrings (dev: "cryptsetup luksOpen --key-file ${dev.encrypted.keyFile} ${dev.encrypted.blkDev} ${dev.encrypted.label};\n") keyedEncDevs;

View file

@ -217,7 +217,7 @@ in
# Add the mount helpers to the system path so that `mount' can find them.
system.fsPackages = [ pkgs.dosfstools ];
environment.systemPackages = [ pkgs.fuse ] ++ config.system.fsPackages;
environment.systemPackages = with pkgs; [ fuse3 fuse ] ++ config.system.fsPackages;
environment.etc.fstab.text =
let

View file

@ -85,8 +85,14 @@ in
enable = mkDefault false;
};
systemd.services.auth-rpcgss-module =
{
unitConfig.ConditionPathExists = [ "" "/etc/krb5.keytab" ];
};
systemd.services.rpc-gssd =
{ restartTriggers = [ nfsConfFile ];
unitConfig.ConditionPathExists = [ "" "/etc/krb5.keytab" ];
};
systemd.services.rpc-statd =

View file

@ -140,6 +140,17 @@ in
this once.
'';
};
requestEncryptionCredentials = mkOption {
type = types.bool;
default = config.boot.zfs.enableUnstable;
description = ''
Request encryption keys or passwords for all encrypted datasets on import.
Dataset encryption is only supported in zfsUnstable at the moment.
'';
};
};
services.zfs.autoSnapshot = {
@ -263,6 +274,10 @@ in
assertion = !cfgZfs.forceImportAll || cfgZfs.forceImportRoot;
message = "If you enable boot.zfs.forceImportAll, you must also enable boot.zfs.forceImportRoot";
}
{
assertion = cfgZfs.requestEncryptionCredentials -> cfgZfs.enableUnstable;
message = "This feature is only available for zfs unstable. Set the NixOS option boot.zfs.enableUnstable.";
}
];
boot = {
@ -306,6 +321,9 @@ in
done
echo
if [[ -n "$msg" ]]; then echo "$msg"; fi
${lib.optionalString cfgZfs.requestEncryptionCredentials ''
zfs load-key -a
''}
'') rootPools));
};

View file

@ -98,22 +98,10 @@ in
'') config.i18n.consoleColors}
'';
/* XXX: systemd-vconsole-setup needs a "main" terminal. By default
* /dev/tty0 is used which wouldn't work when the service is restarted
* from X11. We set this to /dev/tty1; not ideal because it may also be
* owned by X11 or something else.
*
* See #22470.
*/
systemd.services."systemd-vconsole-setup" =
{ wantedBy = [ "sysinit.target" ];
before = [ "display-manager.service" ];
{ before = [ "display-manager.service" ];
after = [ "systemd-udev-settle.service" ];
restartTriggers = [ vconsoleConf kbdEnv ];
serviceConfig.ExecStart = [
""
"${pkgs.systemd}/lib/systemd/systemd-vconsole-setup /dev/tty1"
];
};
}

View file

@ -9,6 +9,12 @@ let
interfaces = attrValues cfg.interfaces;
hasVirtuals = any (i: i.virtual) interfaces;
slaves = concatMap (i: i.interfaces) (attrValues cfg.bonds)
++ concatMap (i: i.interfaces) (attrValues cfg.bridges)
++ concatMap (i: i.interfaces) (attrValues cfg.vswitches)
++ concatMap (i: [i.interface]) (attrValues cfg.macvlans)
++ concatMap (i: [i.interface]) (attrValues cfg.vlans);
# We must escape interfaces due to the systemd interpretation
subsystemDevice = interface:
"sys-subsystem-net-devices-${escapeSystemdPath interface}.device";
@ -105,7 +111,7 @@ let
''
# Set the static DNS configuration, if given.
${pkgs.openresolv}/sbin/resolvconf -m 1 -a static <<EOF
${optionalString (cfg.domain != null) ''
${optionalString (cfg.nameservers != [] && cfg.domain != null) ''
domain ${cfg.domain}
''}
${optionalString (cfg.search != []) ("search " + concatStringsSep " " cfg.search)}
@ -116,24 +122,32 @@ let
# Set the default gateway.
${optionalString (cfg.defaultGateway != null && cfg.defaultGateway.address != "") ''
# FIXME: get rid of "|| true" (necessary to make it idempotent).
ip route add default ${optionalString (cfg.defaultGateway.metric != null)
${optionalString (cfg.defaultGateway.interface != null) ''
ip route replace ${cfg.defaultGateway.address} dev ${cfg.defaultGateway.interface} ${optionalString (cfg.defaultGateway.metric != null)
"metric ${toString cfg.defaultGateway.metric}"
} proto static
''}
ip route replace default ${optionalString (cfg.defaultGateway.metric != null)
"metric ${toString cfg.defaultGateway.metric}"
} via "${cfg.defaultGateway.address}" ${
optionalString (cfg.defaultGatewayWindowSize != null)
"window ${toString cfg.defaultGatewayWindowSize}"} ${
optionalString (cfg.defaultGateway.interface != null)
"dev ${cfg.defaultGateway.interface}"} proto static || true
"dev ${cfg.defaultGateway.interface}"} proto static
''}
${optionalString (cfg.defaultGateway6 != null && cfg.defaultGateway6.address != "") ''
# FIXME: get rid of "|| true" (necessary to make it idempotent).
ip -6 route add ::/0 ${optionalString (cfg.defaultGateway6.metric != null)
${optionalString (cfg.defaultGateway6.interface != null) ''
ip -6 route replace ${cfg.defaultGateway6.address} dev ${cfg.defaultGateway6.interface} ${optionalString (cfg.defaultGateway6.metric != null)
"metric ${toString cfg.defaultGateway6.metric}"
} proto static
''}
ip -6 route replace default ${optionalString (cfg.defaultGateway6.metric != null)
"metric ${toString cfg.defaultGateway6.metric}"
} via "${cfg.defaultGateway6.address}" ${
optionalString (cfg.defaultGatewayWindowSize != null)
"window ${toString cfg.defaultGatewayWindowSize}"} ${
optionalString (cfg.defaultGateway6.interface != null)
"dev ${cfg.defaultGateway6.interface}"} proto static || true
"dev ${cfg.defaultGateway6.interface}"} proto static
''}
'';
};
@ -152,7 +166,11 @@ let
in
nameValuePair "network-addresses-${i.name}"
{ description = "Address configuration of ${i.name}";
wantedBy = [ "network-setup.service" ];
wantedBy = [
"network-setup.service"
"network-link-${i.name}.service"
"network.target"
];
# propagate stop and reload from network-setup
partOf = [ "network-setup.service" ];
# order before network-setup because the routes that are configured
@ -206,7 +224,7 @@ let
after = [ "dev-net-tun.device" "network-pre.target" ];
wantedBy = [ "network-setup.service" (subsystemDevice i.name) ];
partOf = [ "network-setup.service" ];
before = [ "network-setup.service" (subsystemDevice i.name) ];
before = [ "network-setup.service" ];
path = [ pkgs.iproute ];
serviceConfig = {
Type = "oneshot";
@ -232,7 +250,7 @@ let
partOf = [ "network-setup.service" ] ++ optional v.rstp "mstpd.service";
after = [ "network-pre.target" ] ++ deps ++ optional v.rstp "mstpd.service"
++ concatMap (i: [ "network-addresses-${i}.service" "network-link-${i}.service" ]) v.interfaces;
before = [ "network-setup.service" (subsystemDevice n) ];
before = [ "network-setup.service" ];
serviceConfig.Type = "oneshot";
serviceConfig.RemainAfterExit = true;
path = [ pkgs.iproute ];
@ -331,7 +349,7 @@ let
partOf = [ "network-setup.service" ];
after = [ "network-pre.target" ] ++ deps
++ concatMap (i: [ "network-addresses-${i}.service" "network-link-${i}.service" ]) v.interfaces;
before = [ "network-setup.service" (subsystemDevice n) ];
before = [ "network-setup.service" ];
serviceConfig.Type = "oneshot";
serviceConfig.RemainAfterExit = true;
path = [ pkgs.iproute pkgs.gawk ];
@ -369,7 +387,7 @@ let
bindsTo = deps;
partOf = [ "network-setup.service" ];
after = [ "network-pre.target" ] ++ deps;
before = [ "network-setup.service" (subsystemDevice n) ];
before = [ "network-setup.service" ];
serviceConfig.Type = "oneshot";
serviceConfig.RemainAfterExit = true;
path = [ pkgs.iproute ];
@ -394,7 +412,7 @@ let
bindsTo = deps;
partOf = [ "network-setup.service" ];
after = [ "network-pre.target" ] ++ deps;
before = [ "network-setup.service" (subsystemDevice n) ];
before = [ "network-setup.service" ];
serviceConfig.Type = "oneshot";
serviceConfig.RemainAfterExit = true;
path = [ pkgs.iproute ];
@ -422,7 +440,7 @@ let
bindsTo = deps;
partOf = [ "network-setup.service" ];
after = [ "network-pre.target" ] ++ deps;
before = [ "network-setup.service" (subsystemDevice n) ];
before = [ "network-setup.service" ];
serviceConfig.Type = "oneshot";
serviceConfig.RemainAfterExit = true;
path = [ pkgs.iproute ];
@ -465,5 +483,8 @@ in
config = mkMerge [
bondWarnings
(mkIf (!cfg.useNetworkd) normalConfig)
{ # Ensure slave interfaces are brought up
networking.interfaces = genAttrs slaves (i: {});
}
];
}

View file

@ -271,7 +271,7 @@ in rec {
tests.kernel-latest = callTest tests/kernel-latest.nix {};
tests.kernel-lts = callTest tests/kernel-lts.nix {};
tests.keystone = callTest tests/keystone.nix {};
tests.kubernetes = hydraJob (import tests/kubernetes.nix { system = "x86_64-linux"; });
tests.kubernetes = hydraJob (import tests/kubernetes/default.nix { system = "x86_64-linux"; });
tests.latestKernel.login = callTest tests/login.nix { latestKernel = true; };
tests.ldap = callTest tests/ldap.nix {};
#tests.lightdm = callTest tests/lightdm.nix {};
@ -283,6 +283,7 @@ in rec {
tests.mumble = callTest tests/mumble.nix {};
tests.munin = callTest tests/munin.nix {};
tests.mysql = callTest tests/mysql.nix {};
tests.mysqlBackup = callTest tests/mysql-backup.nix {};
tests.mysqlReplication = callTest tests/mysql-replication.nix {};
tests.nat.firewall = callTest tests/nat.nix { withFirewall = true; };
tests.nat.firewall-conntrack = callTest tests/nat.nix { withFirewall = true; withConntrackHelpers = true; };

View file

@ -10,6 +10,17 @@ import ./make-test.nix ({ pkgs, ...} : {
{ users.users.alice = { isNormalUser = true; extraGroups = [ "proc" ]; };
users.users.sybil = { isNormalUser = true; group = "wheel"; };
imports = [ ../modules/profiles/hardened.nix ];
virtualisation.emptyDiskImages = [ 4096 ];
boot.initrd.postDeviceCommands = ''
${pkgs.dosfstools}/bin/mkfs.vfat -n EFISYS /dev/vdb
'';
fileSystems = lib.mkVMOverride {
"/efi" = {
device = "/dev/disk/by-label/EFISYS";
fsType = "vfat";
options = [ "noauto" ];
};
};
};
testScript =
@ -42,5 +53,13 @@ import ./make-test.nix ({ pkgs, ...} : {
subtest "kcore", sub {
$machine->fail("cat /proc/kcore");
};
# Test deferred mount
subtest "mount", sub {
$machine->fail("mountpoint -q /efi"); # was deferred
$machine->execute("mkdir -p /efi");
$machine->succeed("mount /dev/disk/by-label/EFISYS /efi");
$machine->succeed("mountpoint -q /efi"); # now mounted
};
'';
})

View file

@ -1,409 +0,0 @@
{ system ? builtins.currentSystem }:
with import ../lib/testing.nix { inherit system; };
with import ../lib/qemu-flags.nix;
with pkgs.lib;
let
redisPod = pkgs.writeText "redis-master-pod.json" (builtins.toJSON {
kind = "Pod";
apiVersion = "v1";
metadata.name = "redis";
metadata.labels.name = "redis";
spec.containers = [{
name = "redis";
image = "redis";
args = ["--bind" "0.0.0.0"];
imagePullPolicy = "Never";
ports = [{
name = "redis-server";
containerPort = 6379;
}];
}];
});
redisService = pkgs.writeText "redis-service.json" (builtins.toJSON {
kind = "Service";
apiVersion = "v1";
metadata.name = "redis";
spec = {
ports = [{port = 6379; targetPort = 6379;}];
selector = {name = "redis";};
};
});
redisImage = pkgs.dockerTools.buildImage {
name = "redis";
tag = "latest";
contents = pkgs.redis;
config.Entrypoint = "/bin/redis-server";
};
testSimplePod = ''
$kubernetes->execute("docker load < ${redisImage}");
$kubernetes->waitUntilSucceeds("kubectl create -f ${redisPod}");
$kubernetes->succeed("kubectl create -f ${redisService}");
$kubernetes->waitUntilSucceeds("kubectl get pod redis | grep Running");
$kubernetes->succeed("nc -z \$\(dig \@10.10.0.1 redis.default.svc.cluster.local +short\) 6379");
'';
in {
# This test runs kubernetes on a single node
trivial = makeTest {
name = "kubernetes-trivial";
nodes = {
kubernetes =
{ config, pkgs, lib, nodes, ... }:
{
virtualisation.memorySize = 768;
virtualisation.diskSize = 2048;
programs.bash.enableCompletion = true;
environment.systemPackages = with pkgs; [ netcat bind ];
services.kubernetes.roles = ["master" "node"];
virtualisation.docker.extraOptions = "--iptables=false --ip-masq=false -b cbr0";
networking.bridges.cbr0.interfaces = [];
networking.interfaces.cbr0 = {};
};
};
testScript = ''
startAll;
$kubernetes->waitUntilSucceeds("kubectl get nodes | grep kubernetes | grep Ready");
${testSimplePod}
'';
};
cluster = let
runWithOpenSSL = file: cmd: pkgs.runCommand file {
buildInputs = [ pkgs.openssl ];
} cmd;
ca_key = runWithOpenSSL "ca-key.pem" "openssl genrsa -out $out 2048";
ca_pem = runWithOpenSSL "ca.pem" ''
openssl req \
-x509 -new -nodes -key ${ca_key} \
-days 10000 -out $out -subj "/CN=etcd-ca"
'';
etcd_key = runWithOpenSSL "etcd-key.pem" "openssl genrsa -out $out 2048";
etcd_csr = runWithOpenSSL "etcd.csr" ''
openssl req \
-new -key ${etcd_key} \
-out $out -subj "/CN=etcd" \
-config ${openssl_cnf}
'';
etcd_cert = runWithOpenSSL "etcd.pem" ''
openssl x509 \
-req -in ${etcd_csr} \
-CA ${ca_pem} -CAkey ${ca_key} \
-CAcreateserial -out $out \
-days 365 -extensions v3_req \
-extfile ${openssl_cnf}
'';
etcd_client_key = runWithOpenSSL "etcd-client-key.pem"
"openssl genrsa -out $out 2048";
etcd_client_csr = runWithOpenSSL "etcd-client-key.pem" ''
openssl req \
-new -key ${etcd_client_key} \
-out $out -subj "/CN=etcd-client" \
-config ${client_openssl_cnf}
'';
etcd_client_cert = runWithOpenSSL "etcd-client.crt" ''
openssl x509 \
-req -in ${etcd_client_csr} \
-CA ${ca_pem} -CAkey ${ca_key} -CAcreateserial \
-out $out -days 365 -extensions v3_req \
-extfile ${client_openssl_cnf}
'';
apiserver_key = runWithOpenSSL "apiserver-key.pem" "openssl genrsa -out $out 2048";
apiserver_csr = runWithOpenSSL "apiserver.csr" ''
openssl req \
-new -key ${apiserver_key} \
-out $out -subj "/CN=kube-apiserver" \
-config ${apiserver_cnf}
'';
apiserver_cert = runWithOpenSSL "apiserver.pem" ''
openssl x509 \
-req -in ${apiserver_csr} \
-CA ${ca_pem} -CAkey ${ca_key} -CAcreateserial \
-out $out -days 365 -extensions v3_req \
-extfile ${apiserver_cnf}
'';
worker_key = runWithOpenSSL "worker-key.pem" "openssl genrsa -out $out 2048";
worker_csr = runWithOpenSSL "worker.csr" ''
openssl req \
-new -key ${worker_key} \
-out $out -subj "/CN=kube-worker" \
-config ${worker_cnf}
'';
worker_cert = runWithOpenSSL "worker.pem" ''
openssl x509 \
-req -in ${worker_csr} \
-CA ${ca_pem} -CAkey ${ca_key} -CAcreateserial \
-out $out -days 365 -extensions v3_req \
-extfile ${worker_cnf}
'';
openssl_cnf = pkgs.writeText "openssl.cnf" ''
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth
subjectAltName = @alt_names
[alt_names]
DNS.1 = etcd1
DNS.2 = etcd2
DNS.3 = etcd3
IP.1 = 127.0.0.1
'';
client_openssl_cnf = pkgs.writeText "client-openssl.cnf" ''
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth
'';
apiserver_cnf = pkgs.writeText "apiserver-openssl.cnf" ''
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[alt_names]
DNS.1 = kubernetes
DNS.2 = kubernetes.default
DNS.3 = kubernetes.default.svc
DNS.4 = kubernetes.default.svc.cluster.local
IP.1 = 10.10.10.1
'';
worker_cnf = pkgs.writeText "worker-openssl.cnf" ''
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[alt_names]
DNS.1 = kubeWorker1
DNS.2 = kubeWorker2
'';
etcdNodeConfig = {
virtualisation.memorySize = 128;
services = {
etcd = {
enable = true;
keyFile = etcd_key;
certFile = etcd_cert;
trustedCaFile = ca_pem;
peerClientCertAuth = true;
listenClientUrls = ["https://0.0.0.0:2379"];
listenPeerUrls = ["https://0.0.0.0:2380"];
};
};
environment.variables = {
ETCDCTL_CERT_FILE = "${etcd_client_cert}";
ETCDCTL_KEY_FILE = "${etcd_client_key}";
ETCDCTL_CA_FILE = "${ca_pem}";
ETCDCTL_PEERS = "https://127.0.0.1:2379";
};
networking.firewall.allowedTCPPorts = [ 2379 2380 ];
};
kubeConfig = {
virtualisation.diskSize = 2048;
programs.bash.enableCompletion = true;
services.flannel = {
enable = true;
network = "10.10.0.0/16";
iface = "eth1";
etcd = {
endpoints = ["https://etcd1:2379" "https://etcd2:2379" "https://etcd3:2379"];
keyFile = etcd_client_key;
certFile = etcd_client_cert;
caFile = ca_pem;
};
};
# vxlan
networking.firewall.allowedUDPPorts = [ 8472 ];
systemd.services.docker.after = ["flannel.service"];
systemd.services.docker.serviceConfig.EnvironmentFile = "/run/flannel/subnet.env";
virtualisation.docker.extraOptions = "--iptables=false --ip-masq=false --bip $FLANNEL_SUBNET";
services.kubernetes.verbose = true;
services.kubernetes.etcd = {
servers = ["https://etcd1:2379" "https://etcd2:2379" "https://etcd3:2379"];
keyFile = etcd_client_key;
certFile = etcd_client_cert;
caFile = ca_pem;
};
environment.systemPackages = [ pkgs.bind pkgs.tcpdump pkgs.utillinux ];
};
kubeMasterConfig = {pkgs, ...}: {
require = [kubeConfig];
# kube apiserver
networking.firewall.allowedTCPPorts = [ 443 ];
virtualisation.memorySize = 512;
services.kubernetes = {
roles = ["master"];
scheduler.leaderElect = true;
controllerManager.leaderElect = true;
apiserver = {
publicAddress = "0.0.0.0";
advertiseAddress = "192.168.1.8";
tlsKeyFile = apiserver_key;
tlsCertFile = apiserver_cert;
clientCaFile = ca_pem;
kubeletClientCaFile = ca_pem;
kubeletClientKeyFile = worker_key;
kubeletClientCertFile = worker_cert;
};
};
};
kubeWorkerConfig = { pkgs, ... }: {
require = [kubeConfig];
virtualisation.memorySize = 512;
# kubelet
networking.firewall.allowedTCPPorts = [ 10250 ];
services.kubernetes = {
roles = ["node"];
kubeconfig = {
server = "https://kubernetes:443";
caFile = ca_pem;
certFile = worker_cert;
keyFile = worker_key;
};
kubelet = {
tlsKeyFile = worker_key;
tlsCertFile = worker_cert;
};
};
};
in makeTest {
name = "kubernetes-cluster";
nodes = {
etcd1 = { config, pkgs, nodes, ... }: {
require = [etcdNodeConfig];
services.etcd = {
advertiseClientUrls = ["https://etcd1:2379"];
initialCluster = ["etcd1=https://etcd1:2380" "etcd2=https://etcd2:2380" "etcd3=https://etcd3:2380"];
initialAdvertisePeerUrls = ["https://etcd1:2380"];
};
};
etcd2 = { config, pkgs, ... }: {
require = [etcdNodeConfig];
services.etcd = {
advertiseClientUrls = ["https://etcd2:2379"];
initialCluster = ["etcd1=https://etcd1:2380" "etcd2=https://etcd2:2380" "etcd3=https://etcd3:2380"];
initialAdvertisePeerUrls = ["https://etcd2:2380"];
};
};
etcd3 = { config, pkgs, ... }: {
require = [etcdNodeConfig];
services.etcd = {
advertiseClientUrls = ["https://etcd3:2379"];
initialCluster = ["etcd1=https://etcd1:2380" "etcd2=https://etcd2:2380" "etcd3=https://etcd3:2380"];
initialAdvertisePeerUrls = ["https://etcd3:2380"];
};
};
kubeMaster1 = { config, pkgs, lib, nodes, ... }: {
require = [kubeMasterConfig];
};
kubeMaster2 = { config, pkgs, lib, nodes, ... }: {
require = [kubeMasterConfig];
};
# Kubernetes TCP load balancer
kubernetes = { config, pkgs, ... }: {
# kubernetes
networking.firewall.allowedTCPPorts = [ 443 ];
services.haproxy.enable = true;
services.haproxy.config = ''
global
log 127.0.0.1 local0 notice
user haproxy
group haproxy
defaults
log global
retries 2
timeout connect 3000
timeout server 5000
timeout client 5000
listen kubernetes
bind 0.0.0.0:443
mode tcp
option ssl-hello-chk
balance roundrobin
server kube-master-1 kubeMaster1:443 check
server kube-master-2 kubeMaster2:443 check
'';
};
kubeWorker1 = { config, pkgs, lib, nodes, ... }: {
require = [kubeWorkerConfig];
};
kubeWorker2 = { config, pkgs, lib, nodes, ... }: {
require = [kubeWorkerConfig];
};
};
testScript = ''
startAll;
${testSimplePod}
'';
};
}

View file

@ -0,0 +1,113 @@
{ system ? builtins.currentSystem }:
with import ../../lib/testing.nix { inherit system; };
with import ../../lib/qemu-flags.nix;
with pkgs.lib;
let
mkKubernetesBaseTest =
{ name, domain ? "my.zyx", test, machines
, pkgs ? import <nixpkgs> { inherit system; }
, certs ? import ./certs.nix { inherit pkgs; externalDomain = domain; }
, extraConfiguration ? null }:
let
masterName = head (filter (machineName: any (role: role == "master") machines.${machineName}.roles) (attrNames machines));
master = machines.${masterName};
extraHosts = ''
${master.ip} etcd.${domain}
${master.ip} api.${domain}
${concatMapStringsSep "\n" (machineName: "${machines.${machineName}.ip} ${machineName}.${domain}") (attrNames machines)}
'';
in makeTest {
inherit name;
nodes = mapAttrs (machineName: machine:
{ config, pkgs, lib, nodes, ... }:
mkMerge [
{
virtualisation.memorySize = mkDefault 768;
virtualisation.diskSize = mkDefault 4096;
networking = {
inherit domain extraHosts;
primaryIPAddress = mkForce machine.ip;
firewall = {
allowedTCPPorts = [
10250 # kubelet
];
trustedInterfaces = ["docker0"];
extraCommands = concatMapStrings (node: ''
iptables -A INPUT -s ${node.config.networking.primaryIPAddress} -j ACCEPT
'') (attrValues nodes);
};
};
programs.bash.enableCompletion = true;
environment.variables = {
ETCDCTL_CERT_FILE = "${certs.worker}/etcd-client.pem";
ETCDCTL_KEY_FILE = "${certs.worker}/etcd-client-key.pem";
ETCDCTL_CA_FILE = "${certs.worker}/ca.pem";
ETCDCTL_PEERS = "https://etcd.${domain}:2379";
};
services.flannel.iface = "eth1";
services.kubernetes.apiserver.advertiseAddress = master.ip;
}
(optionalAttrs (any (role: role == "master") machine.roles) {
networking.firewall.allowedTCPPorts = [
2379 2380 # etcd
443 # kubernetes apiserver
];
services.etcd = {
enable = true;
certFile = "${certs.master}/etcd.pem";
keyFile = "${certs.master}/etcd-key.pem";
trustedCaFile = "${certs.master}/ca.pem";
peerClientCertAuth = true;
listenClientUrls = ["https://0.0.0.0:2379"];
listenPeerUrls = ["https://0.0.0.0:2380"];
advertiseClientUrls = ["https://etcd.${config.networking.domain}:2379"];
initialCluster = ["${masterName}=https://etcd.${config.networking.domain}:2380"];
initialAdvertisePeerUrls = ["https://etcd.${config.networking.domain}:2380"];
};
})
(import ./kubernetes-common.nix { inherit (machine) roles; inherit pkgs config certs; })
(optionalAttrs (machine ? "extraConfiguration") (machine.extraConfiguration { inherit config pkgs lib nodes; }))
(optionalAttrs (extraConfiguration != null) (extraConfiguration { inherit config pkgs lib nodes; }))
]
) machines;
testScript = ''
startAll;
${test}
'';
};
mkKubernetesMultiNodeTest = attrs: mkKubernetesBaseTest ({
machines = {
machine1 = {
roles = ["master"];
ip = "192.168.1.1";
};
machine2 = {
roles = ["node"];
ip = "192.168.1.2";
};
};
} // attrs // {
name = "kubernetes-${attrs.name}-multinode";
});
mkKubernetesSingleNodeTest = attrs: mkKubernetesBaseTest ({
machines = {
machine1 = {
roles = ["master" "node"];
ip = "192.168.1.1";
};
};
} // attrs // {
name = "kubernetes-${attrs.name}-singlenode";
});
in {
inherit mkKubernetesBaseTest mkKubernetesSingleNodeTest mkKubernetesMultiNodeTest;
}

View file

@ -0,0 +1,185 @@
{
pkgs ? import <nixpkgs> {},
internalDomain ? "cloud.yourdomain.net",
externalDomain ? "myawesomecluster.cluster.yourdomain.net",
serviceClusterIp ? "10.0.0.1"
}:
let
runWithCFSSL = name: cmd:
builtins.fromJSON (builtins.readFile (
pkgs.runCommand "${name}-cfss.json" {
buildInputs = [ pkgs.cfssl ];
} "cfssl ${cmd} > $out"
));
writeCFSSL = content:
pkgs.runCommand content.name {
buildInputs = [ pkgs.cfssl ];
} ''
mkdir -p $out
cd $out
cat ${writeFile content} | cfssljson -bare ${content.name}
'';
noCSR = content: pkgs.lib.filterAttrs (n: v: n != "csr") content;
noKey = content: pkgs.lib.filterAttrs (n: v: n != "key") content;
writeFile = content: pkgs.writeText "content" (
if pkgs.lib.isAttrs content then builtins.toJSON content
else toString content
);
createServingCertKey = { ca, cn, hosts? [], size ? 2048, name ? cn }:
noCSR (
(runWithCFSSL name "gencert -ca=${writeFile ca.cert} -ca-key=${writeFile ca.key} -profile=server -config=${writeFile ca.config} ${writeFile {
CN = cn;
hosts = hosts;
key = { algo = "rsa"; inherit size; };
}}") // { inherit name; }
);
createClientCertKey = { ca, cn, groups ? [], size ? 2048, name ? cn }:
noCSR (
(runWithCFSSL name "gencert -ca=${writeFile ca.cert} -ca-key=${writeFile ca.key} -profile=client -config=${writeFile ca.config} ${writeFile {
CN = cn;
names = map (group: {O = group;}) groups;
hosts = [""];
key = { algo = "rsa"; inherit size; };
}}") // { inherit name; }
);
createSigningCertKey = { C ? "xx", ST ? "x", L ? "x", O ? "x", OU ? "x", CN ? "ca", emailAddress ? "x", expiry ? "43800h", size ? 2048, name ? CN }:
(noCSR (runWithCFSSL CN "genkey -initca ${writeFile {
key = { algo = "rsa"; inherit size; };
names = [{ inherit C ST L O OU CN emailAddress; }];
}}")) // {
inherit name;
config.signing = {
default.expiry = expiry;
profiles = {
server = {
inherit expiry;
usages = [
"signing"
"key encipherment"
"server auth"
];
};
client = {
inherit expiry;
usages = [
"signing"
"key encipherment"
"client auth"
];
};
peer = {
inherit expiry;
usages = [
"signing"
"key encipherment"
"server auth"
"client auth"
];
};
};
};
};
ca = createSigningCertKey {};
kube-apiserver = createServingCertKey {
inherit ca;
cn = "kube-apiserver";
hosts = ["kubernetes.default" "kubernetes.default.svc" "localhost" "api.${externalDomain}" serviceClusterIp];
};
kubelet = createServingCertKey {
inherit ca;
cn = "kubelet";
hosts = ["*.${externalDomain}"];
};
service-accounts = createServingCertKey {
inherit ca;
cn = "kube-service-accounts";
};
etcd = createServingCertKey {
inherit ca;
cn = "etcd";
hosts = ["etcd.${externalDomain}"];
};
etcd-client = createClientCertKey {
inherit ca;
cn = "etcd-client";
};
kubelet-client = createClientCertKey {
inherit ca;
cn = "kubelet-client";
groups = ["system:masters"];
};
apiserver-client = {
kubelet = createClientCertKey {
inherit ca;
cn = "apiserver-client-kubelet";
groups = ["system:nodes"];
};
kube-proxy = createClientCertKey {
inherit ca;
name = "apiserver-client-kube-proxy";
cn = "system:kube-proxy";
groups = ["system:kube-proxy" "system:nodes"];
};
kube-controller-manager = createClientCertKey {
inherit ca;
name = "apiserver-client-kube-controller-manager";
cn = "system:kube-controller-manager";
groups = ["system:masters"];
};
kube-scheduler = createClientCertKey {
inherit ca;
name = "apiserver-client-kube-scheduler";
cn = "system:kube-scheduler";
groups = ["system:kube-scheduler"];
};
admin = createClientCertKey {
inherit ca;
cn = "admin";
groups = ["system:masters"];
};
};
in {
master = pkgs.buildEnv {
name = "master-keys";
paths = [
(writeCFSSL (noKey ca))
(writeCFSSL kube-apiserver)
(writeCFSSL kubelet-client)
(writeCFSSL apiserver-client.kube-controller-manager)
(writeCFSSL apiserver-client.kube-scheduler)
(writeCFSSL service-accounts)
(writeCFSSL etcd)
];
};
worker = pkgs.buildEnv {
name = "worker-keys";
paths = [
(writeCFSSL (noKey ca))
(writeCFSSL kubelet)
(writeCFSSL apiserver-client.kubelet)
(writeCFSSL apiserver-client.kube-proxy)
(writeCFSSL etcd-client)
];
};
admin = writeCFSSL apiserver-client.admin;
}

View file

@ -0,0 +1,7 @@
{ system ? builtins.currentSystem }:
{
dns = import ./dns.nix { inherit system; };
# e2e = import ./e2e.nix { inherit system; }; # TODO: make it pass
# the following test(s) can be removed when e2e is working:
rbac = import ./rbac.nix { inherit system; };
}

View file

@ -0,0 +1,127 @@
{ system ? builtins.currentSystem, pkgs ? import <nixpkgs> { inherit system; } }:
with import ./base.nix { inherit system; };
let
domain = "my.zyx";
certs = import ./certs.nix { externalDomain = domain; };
redisPod = pkgs.writeText "redis-pod.json" (builtins.toJSON {
kind = "Pod";
apiVersion = "v1";
metadata.name = "redis";
metadata.labels.name = "redis";
spec.containers = [{
name = "redis";
image = "redis";
args = ["--bind" "0.0.0.0"];
imagePullPolicy = "Never";
ports = [{
name = "redis-server";
containerPort = 6379;
}];
}];
});
redisService = pkgs.writeText "redis-service.json" (builtins.toJSON {
kind = "Service";
apiVersion = "v1";
metadata.name = "redis";
spec = {
ports = [{port = 6379; targetPort = 6379;}];
selector = {name = "redis";};
};
});
redisImage = pkgs.dockerTools.buildImage {
name = "redis";
tag = "latest";
contents = [ pkgs.redis pkgs.bind.host ];
config.Entrypoint = "/bin/redis-server";
};
probePod = pkgs.writeText "probe-pod.json" (builtins.toJSON {
kind = "Pod";
apiVersion = "v1";
metadata.name = "probe";
metadata.labels.name = "probe";
spec.containers = [{
name = "probe";
image = "probe";
args = [ "-f" ];
tty = true;
imagePullPolicy = "Never";
}];
});
probeImage = pkgs.dockerTools.buildImage {
name = "probe";
tag = "latest";
contents = [ pkgs.bind.host pkgs.busybox ];
config.Entrypoint = "/bin/tail";
};
extraConfiguration = { config, pkgs, lib, nodes, ... }: {
environment.systemPackages = [ pkgs.bind.host ];
# virtualisation.docker.extraOptions = "--dns=${config.services.kubernetes.addons.dns.clusterIp}";
services.dnsmasq.enable = true;
services.dnsmasq.servers = [
"/cluster.local/${config.services.kubernetes.addons.dns.clusterIp}#53"
];
};
base = {
name = "dns";
inherit domain certs extraConfiguration;
};
singleNodeTest = {
test = ''
# prepare machine1 for test
$machine1->waitUntilSucceeds("kubectl get node machine1.${domain} | grep -w Ready");
$machine1->execute("docker load < ${redisImage}");
$machine1->waitUntilSucceeds("kubectl create -f ${redisPod}");
$machine1->waitUntilSucceeds("kubectl create -f ${redisService}");
$machine1->execute("docker load < ${probeImage}");
$machine1->waitUntilSucceeds("kubectl create -f ${probePod}");
# check if pods are running
$machine1->waitUntilSucceeds("kubectl get pod redis | grep Running");
$machine1->waitUntilSucceeds("kubectl get pod probe | grep Running");
$machine1->waitUntilSucceeds("kubectl get pods -n kube-system | grep 'kube-dns.*3/3'");
# check dns on host (dnsmasq)
$machine1->succeed("host redis.default.svc.cluster.local");
# check dns inside the container
$machine1->succeed("kubectl exec -ti probe -- /bin/host redis.default.svc.cluster.local");
'';
};
multiNodeTest = {
test = ''
# prepare machines for test
$machine1->waitUntilSucceeds("kubectl get node machine1.${domain} | grep -w Ready");
$machine1->waitUntilSucceeds("kubectl get node machine2.${domain} | grep -w Ready");
$machine2->execute("docker load < ${redisImage}");
$machine1->waitUntilSucceeds("kubectl create -f ${redisPod}");
$machine1->waitUntilSucceeds("kubectl create -f ${redisService}");
$machine2->execute("docker load < ${probeImage}");
$machine1->waitUntilSucceeds("kubectl create -f ${probePod}");
# check if pods are running
$machine1->waitUntilSucceeds("kubectl get pod redis | grep Running");
$machine1->waitUntilSucceeds("kubectl get pod probe | grep Running");
$machine1->waitUntilSucceeds("kubectl get pods -n kube-system | grep 'kube-dns.*3/3'");
# check dns on hosts (dnsmasq)
$machine1->succeed("host redis.default.svc.cluster.local");
$machine2->succeed("host redis.default.svc.cluster.local");
# check dns inside the container
$machine1->succeed("kubectl exec -ti probe -- /bin/host redis.default.svc.cluster.local");
'';
};
in {
singlenode = mkKubernetesSingleNodeTest (base // singleNodeTest);
multinode = mkKubernetesMultiNodeTest (base // multiNodeTest);
}

View file

@ -0,0 +1,40 @@
{ system ? builtins.currentSystem, pkgs ? import <nixpkgs> { inherit system; } }:
with import ./base.nix { inherit system; };
let
domain = "my.zyx";
certs = import ./certs.nix { externalDomain = domain; };
kubeconfig = pkgs.writeText "kubeconfig.json" (builtins.toJSON {
apiVersion = "v1";
kind = "Config";
clusters = [{
name = "local";
cluster.certificate-authority = "${certs.master}/ca.pem";
cluster.server = "https://api.${domain}";
}];
users = [{
name = "kubelet";
user = {
client-certificate = "${certs.admin}/admin.pem";
client-key = "${certs.admin}/admin-key.pem";
};
}];
contexts = [{
context = {
cluster = "local";
user = "kubelet";
};
current-context = "kubelet-context";
}];
});
base = {
name = "e2e";
inherit domain certs;
test = ''
$machine1->succeed("e2e.test -kubeconfig ${kubeconfig} -provider local -ginkgo.focus '\\[Conformance\\]' -ginkgo.skip '\\[Flaky\\]|\\[Serial\\]'");
'';
};
in {
singlenode = mkKubernetesSingleNodeTest base;
multinode = mkKubernetesMultiNodeTest base;
}

View file

@ -0,0 +1,59 @@
{ roles, config, pkgs, certs }:
with pkgs.lib;
let
base = {
inherit roles;
featureGates = ["AllAlpha"];
flannel.enable = true;
addons.dashboard.enable = true;
verbose = true;
caFile = "${certs.master}/ca.pem";
apiserver = {
tlsCertFile = "${certs.master}/kube-apiserver.pem";
tlsKeyFile = "${certs.master}/kube-apiserver-key.pem";
kubeletClientCertFile = "${certs.master}/kubelet-client.pem";
kubeletClientKeyFile = "${certs.master}/kubelet-client-key.pem";
serviceAccountKeyFile = "${certs.master}/kube-service-accounts.pem";
};
etcd = {
servers = ["https://etcd.${config.networking.domain}:2379"];
certFile = "${certs.worker}/etcd-client.pem";
keyFile = "${certs.worker}/etcd-client-key.pem";
};
kubeconfig = {
server = "https://api.${config.networking.domain}";
};
kubelet = {
tlsCertFile = "${certs.worker}/kubelet.pem";
tlsKeyFile = "${certs.worker}/kubelet-key.pem";
hostname = "${config.networking.hostName}.${config.networking.domain}";
kubeconfig = {
certFile = "${certs.worker}/apiserver-client-kubelet.pem";
keyFile = "${certs.worker}/apiserver-client-kubelet-key.pem";
};
};
controllerManager = {
serviceAccountKeyFile = "${certs.master}/kube-service-accounts-key.pem";
kubeconfig = {
certFile = "${certs.master}/apiserver-client-kube-controller-manager.pem";
keyFile = "${certs.master}/apiserver-client-kube-controller-manager-key.pem";
};
};
scheduler = {
kubeconfig = {
certFile = "${certs.master}/apiserver-client-kube-scheduler.pem";
keyFile = "${certs.master}/apiserver-client-kube-scheduler-key.pem";
};
};
proxy = {
kubeconfig = {
certFile = "${certs.worker}/apiserver-client-kube-proxy.pem";
keyFile = "${certs.worker}//apiserver-client-kube-proxy-key.pem";
};
};
};
in {
services.kubernetes = base;
}

View file

@ -0,0 +1,137 @@
{ system ? builtins.currentSystem, pkgs ? import <nixpkgs> { inherit system; } }:
with import ./base.nix { inherit system; };
let
roServiceAccount = pkgs.writeText "ro-service-account.json" (builtins.toJSON {
kind = "ServiceAccount";
apiVersion = "v1";
metadata = {
name = "read-only";
namespace = "default";
};
});
roRoleBinding = pkgs.writeText "ro-role-binding.json" (builtins.toJSON {
apiVersion = "rbac.authorization.k8s.io/v1beta1";
kind = "RoleBinding";
metadata = {
name = "read-pods";
namespace = "default";
};
roleRef = {
apiGroup = "rbac.authorization.k8s.io";
kind = "Role";
name = "pod-reader";
};
subjects = [{
kind = "ServiceAccount";
name = "read-only";
namespace = "default";
}];
});
roRole = pkgs.writeText "ro-role.json" (builtins.toJSON {
apiVersion = "rbac.authorization.k8s.io/v1beta1";
kind = "Role";
metadata = {
name = "pod-reader";
namespace = "default";
};
rules = [{
apiGroups = [""];
resources = ["pods"];
verbs = ["get" "list" "watch"];
}];
});
kubectlPod = pkgs.writeText "kubectl-pod.json" (builtins.toJSON {
kind = "Pod";
apiVersion = "v1";
metadata.name = "kubectl";
metadata.namespace = "default";
metadata.labels.name = "kubectl";
spec.serviceAccountName = "read-only";
spec.containers = [{
name = "kubectl";
image = "kubectl:latest";
command = ["/bin/tail" "-f"];
imagePullPolicy = "Never";
tty = true;
}];
});
kubectlPod2 = pkgs.writeTextDir "kubectl-pod-2.json" (builtins.toJSON {
kind = "Pod";
apiVersion = "v1";
metadata.name = "kubectl-2";
metadata.namespace = "default";
metadata.labels.name = "kubectl-2";
spec.serviceAccountName = "read-only";
spec.containers = [{
name = "kubectl-2";
image = "kubectl:latest";
command = ["/bin/tail" "-f"];
imagePullPolicy = "Never";
tty = true;
}];
});
kubectl = pkgs.runCommand "copy-kubectl" { buildInputs = [ pkgs.kubernetes ]; } ''
mkdir -p $out/bin
cp ${pkgs.kubernetes}/bin/kubectl $out/bin/kubectl
'';
kubectlImage = pkgs.dockerTools.buildImage {
name = "kubectl";
tag = "latest";
contents = [ kubectl pkgs.busybox kubectlPod2 ];
config.Entrypoint = "/bin/sh";
};
base = {
name = "rbac";
};
singlenode = base // {
test = ''
$machine1->waitUntilSucceeds("kubectl get node machine1.my.zyx | grep -w Ready");
$machine1->execute("docker load < ${kubectlImage}");
$machine1->waitUntilSucceeds("kubectl apply -f ${roServiceAccount}");
$machine1->waitUntilSucceeds("kubectl apply -f ${roRole}");
$machine1->waitUntilSucceeds("kubectl apply -f ${roRoleBinding}");
$machine1->waitUntilSucceeds("kubectl create -f ${kubectlPod}");
$machine1->waitUntilSucceeds("kubectl get pod kubectl | grep Running");
$machine1->succeed("kubectl exec -ti kubectl -- kubectl get pods");
$machine1->fail("kubectl exec -ti kubectl -- kubectl create -f /kubectl-pod-2.json");
$machine1->fail("kubectl exec -ti kubectl -- kubectl delete pods -l name=kubectl");
'';
};
multinode = base // {
test = ''
$machine1->waitUntilSucceeds("kubectl get node machine1.my.zyx | grep -w Ready");
$machine1->waitUntilSucceeds("kubectl get node machine2.my.zyx | grep -w Ready");
$machine2->execute("docker load < ${kubectlImage}");
$machine1->waitUntilSucceeds("kubectl apply -f ${roServiceAccount}");
$machine1->waitUntilSucceeds("kubectl apply -f ${roRole}");
$machine1->waitUntilSucceeds("kubectl apply -f ${roRoleBinding}");
$machine1->waitUntilSucceeds("kubectl create -f ${kubectlPod}");
$machine1->waitUntilSucceeds("kubectl get pod kubectl | grep Running");
$machine1->succeed("kubectl exec -ti kubectl -- kubectl get pods");
$machine1->fail("kubectl exec -ti kubectl -- kubectl create -f /kubectl-pod-2.json");
$machine1->fail("kubectl exec -ti kubectl -- kubectl delete pods -l name=kubectl");
'';
};
in {
singlenode = mkKubernetesSingleNodeTest singlenode;
multinode = mkKubernetesMultiNodeTest multinode;
}

View file

@ -56,9 +56,7 @@ import ./make-test.nix ({ pkgs, ...} : rec {
src = ./mesos_test.py;
phases = [ "installPhase" "fixupPhase" ];
installPhase = ''
mkdir $out
cp $src $out/mesos_test.py
chmod +x $out/mesos_test.py
install -Dvm 0755 $src $out/bin/mesos_test.py
echo "done" > test.result
tar czf $out/test.tar.gz test.result
@ -74,18 +72,18 @@ import ./make-test.nix ({ pkgs, ...} : rec {
$master->waitForOpenPort(5050);
$slave->waitForOpenPort(5051);
# is slave registred?
# is slave registered?
$master->waitUntilSucceeds("curl -s --fail http://master:5050/master/slaves".
" | grep -q \"\\\"hostname\\\":\\\"slave\\\"\"");
# try to run docker image
# try to run docker image
$master->succeed("${pkgs.mesos}/bin/mesos-execute --master=master:5050".
" --resources=\"cpus:0.1;mem:32\" --name=simple-docker".
" --containerizer=mesos --docker_image=echo:latest".
" --shell=true --command=\"echo done\" | grep -q TASK_FINISHED");
# simple command with .tar.gz uri
$master->succeed("${testFramework}/mesos_test.py master ".
$master->succeed("${testFramework}/bin/mesos_test.py master ".
"${testFramework}/test.tar.gz");
'';
})

View file

@ -0,0 +1,42 @@
# Test whether mysqlBackup option works
import ./make-test.nix ({ pkgs, ... } : {
name = "mysql-backup";
meta = with pkgs.stdenv.lib.maintainers; {
maintainers = [ rvl ];
};
nodes = {
master = { config, pkgs, ... }: {
services.mysql = {
enable = true;
initialDatabases = [ { name = "testdb"; schema = ./testdb.sql; } ];
package = pkgs.mysql;
};
services.mysqlBackup = {
enable = true;
databases = [ "doesnotexist" "testdb" ];
};
};
};
testScript =
'' startAll;
# Need to have mysql started so that it can be populated with data.
$master->waitForUnit("mysql.service");
# Wait for testdb to be populated.
$master->sleep(10);
# Do a backup and wait for it to finish.
$master->startJob("mysql-backup.service");
$master->waitForJob("mysql-backup.service");
# Check that data appears in backup
$master->succeed("${pkgs.gzip}/bin/zcat /var/backup/mysql/testdb.gz | grep hello");
# Check that a failed backup is logged
$master->succeed("journalctl -u mysql-backup.service | grep 'fail.*doesnotexist' > /dev/null");
'';
})

View file

@ -43,6 +43,7 @@ in
});
})
];
system.stateVersion = "17.03";
};
radicale1_export = lib.recursiveUpdate radicale1 {
services.radicale.extraArgs = [

View file

@ -8,3 +8,4 @@ insert into tests values (1, 'a');
insert into tests values (2, 'b');
insert into tests values (3, 'c');
insert into tests values (4, 'd');
insert into tests values (5, 'hello');

View file

@ -461,11 +461,11 @@ in mapAttrs mkVBoxTest {
my $test1IP = waitForIP_test1 1;
my $test2IP = waitForIP_test2 1;
$machine->succeed("echo '$test2IP' | nc '$test1IP' 1234");
$machine->succeed("echo '$test1IP' | nc '$test2IP' 1234");
$machine->succeed("echo '$test2IP' | nc -N '$test1IP' 1234");
$machine->succeed("echo '$test1IP' | nc -N '$test2IP' 1234");
$machine->waitUntilSucceeds("nc '$test1IP' 5678 >&2");
$machine->waitUntilSucceeds("nc '$test2IP' 5678 >&2");
$machine->waitUntilSucceeds("nc -N '$test1IP' 5678 < /dev/null >&2");
$machine->waitUntilSucceeds("nc -N '$test2IP' 5678 < /dev/null >&2");
shutdownVM_test1;
shutdownVM_test2;

View file

@ -0,0 +1,43 @@
{ stdenv, fetchFromGitHub, pkgconfig, autoreconfHook, openssl, db48, boost
, zlib, miniupnpc, qt5, utillinux, protobuf, qrencode, libevent
, withGui }:
with stdenv.lib;
stdenv.mkDerivation rec {
name = "bitcoin" + (toString (optional (!withGui) "d")) + "-abc-" + version;
version = "0.15.0";
src = fetchFromGitHub {
owner = "bitcoin-ABC";
repo = "bitcoin-abc";
rev = "v${version}";
sha256 = "1fygn6cc99iasg5g5jyps5ps873hfnn4ln4hsmcwlwiqd591qxyv";
};
patches = [ ./fix-bitcoin-qt-build.patch ];
nativeBuildInputs = [ pkgconfig autoreconfHook ];
buildInputs = [ openssl db48 boost zlib
miniupnpc utillinux protobuf libevent ]
++ optionals withGui [ qt5.qtbase qt5.qttools qrencode ];
configureFlags = [ "--with-boost-libdir=${boost.out}/lib" ]
++ optionals withGui [ "--with-gui=qt5" ];
meta = {
description = "Peer-to-peer electronic cash system (Cash client)";
longDescription= ''
Bitcoin ABC is the name of open source software which enables the use of Bitcoin.
It is designed to facilite a hard fork to increase Bitcoin's block size limit.
"ABC" stands for "Adjustable Blocksize Cap".
Bitcoin ABC is a fork of the Bitcoin Core software project.
'';
homepage = https://bitcoinabc.org/;
maintainers = with maintainers; [ lassulus ];
license = licenses.mit;
platforms = platforms.unix;
};
}

View file

@ -5,13 +5,11 @@
with stdenv.lib;
stdenv.mkDerivation rec{
name = "bitcoin" + (toString (optional (!withGui) "d")) + "-" + version;
version = "0.15.0";
version = "0.15.0.1";
src = fetchurl {
urls = [ "https://bitcoin.org/bin/bitcoin-core-${version}/bitcoin-${version}.tar.gz"
"mirror://sourceforge/bitcoin/Bitcoin/bitcoin-${version}/bitcoin-${version}.tar.gz"
];
sha256 = "18gj5gdscarv2a1hdgjps50czwi4hrmrrmhssaag55ysh94zbdjl";
url = "https://bitcoin.org/bin/bitcoin-core-${version}/bitcoin-${version}.tar.gz";
sha256 = "16si3skhm6jhw1pkniv2b9y1kkdhjmhj392palphir0qc1srwzmm";
};
nativeBuildInputs = [ pkgconfig autoreconfHook ];

View file

@ -5,6 +5,9 @@ rec {
bitcoin = callPackage ./bitcoin.nix { withGui = true; };
bitcoind = callPackage ./bitcoin.nix { withGui = false; };
bitcoin-abc = callPackage ./bitcoin-abc.nix { withGui = true; };
bitcoind-abc = callPackage ./bitcoin-abc.nix { withGui = false; };
bitcoin-unlimited = callPackage ./bitcoin-unlimited.nix { withGui = true; };
bitcoind-unlimited = callPackage ./bitcoin-unlimited.nix { withGui = false; };

View file

@ -0,0 +1,15 @@
--- bitcoin-abc-v0.15.0-src/build-aux/m4/bitcoin_qt.m4 1970-01-01 01:00:01.000000000 +0100
+++ bitcoin-abc-v0.15.0-src.org/build-aux/m4/bitcoin_qt.m4 2017-09-27 23:38:44.748384197 +0100
@@ -35,11 +35,7 @@
dnl Output: $1 is set to the path of $2 if found. $2 are searched in order.
AC_DEFUN([BITCOIN_QT_PATH_PROGS],[
BITCOIN_QT_CHECK([
- if test "x$3" != "x"; then
- AC_PATH_PROGS($1,$2,,$3)
- else
- AC_PATH_PROGS($1,$2)
- fi
+ AC_PATH_PROGS($1,$2)
if test "x$$1" = "x" && test "x$4" != "xyes"; then
BITCOIN_QT_FAIL([$1 not found])
fi

View file

@ -40,6 +40,6 @@ stdenv.mkDerivation rec {
license = stdenv.lib.licenses.gpl2Plus;
maintainers = [ ];
platforms = stdenv.lib.platforms.unix;
platforms = stdenv.lib.platforms.linux;
};
}

View file

@ -0,0 +1,37 @@
{ stdenv
, cmake
, extra-cmake-modules
, plasma-framework
, kwindowsystem
, fetchFromGitHub
}:
stdenv.mkDerivation rec {
name = "playbar2-${version}";
version = "2.5";
src = fetchFromGitHub {
owner = "audoban";
repo = "PlayBar2";
rev = "v${version}";
sha256 = "0iv2m4flgaz2r0k7f6l0ca8p6cw8j8j2gin1gci2pg3l5g5khbch";
};
nativeBuildInputs = [
cmake
extra-cmake-modules
];
buildInputs = [
plasma-framework
kwindowsystem
];
meta = with stdenv.lib; {
description = "Mpris2 Client for Plasma5";
homepage = https://github.com/audoban/PlayBar2;
license = licenses.gpl3;
platforms = platforms.linux;
maintainers = with maintainers; [ pjones ];
};
}

View file

@ -0,0 +1,15 @@
Dump temacs in an empty environment to prevent -dev paths from ending
up in the dumped image.
diff -ru -x '*~' emacs-25.3/src/Makefile.in emacs-25.3-new/src/Makefile.in
--- emacs-25.3/src/Makefile.in 2017-04-14 17:02:47.000000000 +0200
+++ emacs-25.3-new/src/Makefile.in 2017-09-25 19:03:02.173861038 +0200
@@ -532,7 +532,7 @@
ifeq ($(CANNOT_DUMP),yes)
ln -f temacs$(EXEEXT) $@
else
- LC_ALL=C $(RUN_TEMACS) -batch -l loadup dump
+ env -i LC_ALL=C $(RUN_TEMACS) -batch -l loadup dump
ifneq ($(PAXCTL_dumped),)
$(PAXCTL_dumped) $@
endif

Some files were not shown because too many files have changed in this diff Show more