Merge remote-tracking branch 'origin/master' into gcc-6

This commit is contained in:
Eelco Dolstra 2016-12-21 15:51:18 +01:00
commit 3373a55cac
No known key found for this signature in database
GPG key ID: 8170B4726D7198DE
1440 changed files with 47512 additions and 21655 deletions

View file

@ -32,4 +32,4 @@ See the nixpkgs manual for more details on how to [Submit changes to nixpkgs](ht
## Reviewing contributions
See the nixpkgs manual for more details on how to [Review contributions](http://hydra.nixos.org/job/nixpkgs/trunk/manual/latest/download-by-type/doc/manual#chap-reviewing-contributions).
See the nixpkgs manual for more details on how to [Review contributions](https://nixos.org/nixpkgs/manual/#sec-reviewing-contributions).

View file

@ -1,10 +1,12 @@
{
"userBlacklist": [
"civodul",
"jhasse"
"jhasse",
"shlevy"
],
"alwaysNotifyForPaths": [
{ "name": "FRidh", "files": ["pkgs/top-level/python-packages.nix", "pkgs/development/interpreters/python/*", "pkgs/development/python-modules/*" ] },
{ "name": "LnL7", "files": ["pkgs/stdenv/darwin/*", "pkgs/os-specific/darwin/*"] },
{ "name": "copumpkin", "files": ["pkgs/stdenv/darwin/*", "pkgs/os-specific/darwin/apple-source-releases/*"] }
],
"fileBlacklist": ["pkgs/top-level/all-packages.nix"]

View file

@ -4,7 +4,7 @@ matrix:
- os: linux
sudo: false
script:
- ./maintainers/scripts/travis-nox-review-pr.sh nixpkgs-verify nixpkgs-manual nixpkgs-tarball
- ./maintainers/scripts/travis-nox-review-pr.sh nixpkgs-verify nixpkgs-manual nixpkgs-tarball nixpkgs-unstable
- ./maintainers/scripts/travis-nox-review-pr.sh nixos-options nixos-manual
- os: linux
sudo: required

View file

@ -8,252 +8,295 @@
The nixpkgs repository has several utility functions to manipulate Nix expressions.
</para>
<section xml:id="sec-pkgs-overridePackages">
<title>pkgs.overridePackages</title>
<section xml:id="sec-overrides">
<title>Overriding</title>
<para>
This function inside the nixpkgs expression (<varname>pkgs</varname>)
can be used to override the set of packages itself.
</para>
<para>
Warning: this function is expensive and must not be used from within
the nixpkgs repository.
</para>
<para>
Example usage:
<programlisting>let
pkgs = import &lt;nixpkgs&gt; {};
newpkgs = pkgs.overridePackages (self: super: {
foo = super.foo.override { ... };
};
in ...</programlisting>
Sometimes one wants to override parts of
<literal>nixpkgs</literal>, e.g. derivation attributes, the results of
derivations or even the whole package set.
</para>
<para>
The resulting <varname>newpkgs</varname> will have the new <varname>foo</varname>
expression, and all other expressions depending on <varname>foo</varname> will also
use the new <varname>foo</varname> expression.
</para>
<section xml:id="sec-pkgs-overridePackages">
<title>pkgs.overridePackages</title>
<para>
The behavior of this function is similar to <link
linkend="sec-modify-via-packageOverrides">config.packageOverrides</link>.
</para>
<para>
The <varname>self</varname> parameter refers to the final package set with the
applied overrides. Using this parameter may lead to infinite recursion if not
used consciously.
</para>
<para>
The <varname>super</varname> parameter refers to the old package set.
It's equivalent to <varname>pkgs</varname> in the above example.
</para>
<para>
Note that in previous versions of nixpkgs, this method replaced any changes from <link
linkend="sec-modify-via-packageOverrides">config.packageOverrides</link>,
along with that from previous calls if this function was called repeatedly.
Now those previous changes will be preserved so this function can be "chained" meaningfully.
To recover the old behavior, make sure <varname>config.packageOverrides</varname> is unset,
and call this only once off a "freshly" imported nixpkgs:
<programlisting>let
pkgs = import &lt;nixpkgs&gt; { config: {}; };
newpkgs = pkgs.overridePackages ...;
in ...</programlisting>
</para>
</section>
<section xml:id="sec-pkg-override">
<title>&lt;pkg&gt;.override</title>
<para>
The function <varname>override</varname> is usually available for all the
derivations in the nixpkgs expression (<varname>pkgs</varname>).
</para>
<para>
It is used to override the arguments passed to a function.
</para>
<para>
Example usages:
<programlisting>pkgs.foo.override { arg1 = val1; arg2 = val2; ... }</programlisting>
<programlisting>pkgs.overridePackages (self: super: {
foo = super.foo.override { barSupport = true ; };
})</programlisting>
<programlisting>mypkg = pkgs.callPackage ./mypkg.nix {
mydep = pkgs.mydep.override { ... };
})</programlisting>
</para>
<para>
In the first example, <varname>pkgs.foo</varname> is the result of a function call
with some default arguments, usually a derivation.
Using <varname>pkgs.foo.override</varname> will call the same function with
the given new arguments.
</para>
</section>
<section xml:id="sec-pkg-overrideAttrs">
<title>&lt;pkg&gt;.overrideAttrs</title>
<para>
The function <varname>overrideAttrs</varname> allows overriding the
attribute set passed to a <varname>stdenv.mkDerivation</varname> call,
producing a new derivation based on the original one.
This function is available on all derivations produced by the
<varname>stdenv.mkDerivation</varname> function, which is most packages
in the nixpkgs expression <varname>pkgs</varname>.
</para>
<para>
Example usage:
<programlisting>helloWithDebug = pkgs.hello.overrideAttrs (oldAttrs: rec {
separateDebugInfo = true;
});</programlisting>
</para>
<para>
In the above example, the <varname>separateDebugInfo</varname> attribute is
overriden to be true, thus building debug info for
<varname>helloWithDebug</varname>, while all other attributes will be
retained from the original <varname>hello</varname> package.
</para>
<para>
The argument <varname>oldAttrs</varname> is conventionally used to refer to
the attr set originally passed to <varname>stdenv.mkDerivation</varname>.
</para>
<note>
<para>
Note that <varname>separateDebugInfo</varname> is processed only by the
<varname>stdenv.mkDerivation</varname> function, not the generated, raw
Nix derivation. Thus, using <varname>overrideDerivation</varname> will
not work in this case, as it overrides only the attributes of the final
derivation. It is for this reason that <varname>overrideAttrs</varname>
should be preferred in (almost) all cases to
<varname>overrideDerivation</varname>, i.e. to allow using
<varname>sdenv.mkDerivation</varname> to process input arguments, as well
as the fact that it is easier to use (you can use the same attribute
names you see in your Nix code, instead of the ones generated (e.g.
<varname>buildInputs</varname> vs <varname>nativeBuildInputs</varname>,
and involves less typing.
This function inside the nixpkgs expression (<varname>pkgs</varname>)
can be used to override the set of packages itself.
</para>
</note>
</section>
<section xml:id="sec-pkg-overrideDerivation">
<title>&lt;pkg&gt;.overrideDerivation</title>
<warning>
<para>You should prefer <varname>overrideAttrs</varname> in almost all
cases, see its documentation for the reasons why.
<varname>overrideDerivation</varname> is not deprecated and will continue
to work, but is less nice to use and does not have as many abilities as
<varname>overrideAttrs</varname>.
</para>
</warning>
<warning>
<para>Do not use this function in Nixpkgs as it evaluates a Derivation
before modifying it, which breaks package abstraction and removes
error-checking of function arguments. In addition, this
evaluation-per-function application incurs a performance penalty,
which can become a problem if many overrides are used.
It is only intended for ad-hoc customisation, such as in
<filename>~/.nixpkgs/config.nix</filename>.
</para>
</warning>
<para>
The function <varname>overrideDerivation</varname> creates a new derivation
based on an existing one by overriding the original's attributes with
the attribute set produced by the specified function.
This function is available on all
derivations defined using the <varname>makeOverridable</varname> function.
Most standard derivation-producing functions, such as
<varname>stdenv.mkDerivation</varname>, are defined using this
function, which means most packages in the nixpkgs expression,
<varname>pkgs</varname>, have this function.
</para>
<para>
Example usage:
<programlisting>mySed = pkgs.gnused.overrideDerivation (oldAttrs: {
name = "sed-4.2.2-pre";
src = fetchurl {
url = ftp://alpha.gnu.org/gnu/sed/sed-4.2.2-pre.tar.bz2;
sha256 = "11nq06d131y4wmf3drm0yk502d2xc6n5qy82cg88rb9nqd2lj41k";
};
patches = [];
});</programlisting>
</para>
<para>
In the above example, the <varname>name</varname>, <varname>src</varname>,
and <varname>patches</varname> of the derivation will be overridden, while
all other attributes will be retained from the original derivation.
</para>
<para>
The argument <varname>oldAttrs</varname> is used to refer to the attribute set of
the original derivation.
</para>
<note>
<para>
A package's attributes are evaluated *before* being modified by
the <varname>overrideDerivation</varname> function.
For example, the <varname>name</varname> attribute reference
in <varname>url = "mirror://gnu/hello/${name}.tar.gz";</varname>
is filled-in *before* the <varname>overrideDerivation</varname> function
modifies the attribute set. This means that overriding the
<varname>name</varname> attribute, in this example, *will not* change the
value of the <varname>url</varname> attribute. Instead, we need to override
both the <varname>name</varname> *and* <varname>url</varname> attributes.
Warning: this function is expensive and must not be used from within
the nixpkgs repository.
</para>
</note>
<para>
Example usage:
<programlisting>let
pkgs = import &lt;nixpkgs&gt; {};
newpkgs = pkgs.overridePackages (self: super: {
foo = super.foo.override { ... };
};
in ...</programlisting>
</para>
<para>
The resulting <varname>newpkgs</varname> will have the new <varname>foo</varname>
expression, and all other expressions depending on <varname>foo</varname> will also
use the new <varname>foo</varname> expression.
</para>
<para>
The behavior of this function is similar to <link
linkend="sec-modify-via-packageOverrides">config.packageOverrides</link>.
</para>
<para>
The <varname>self</varname> parameter refers to the final package set with the
applied overrides. Using this parameter may lead to infinite recursion if not
used consciously.
</para>
<para>
The <varname>super</varname> parameter refers to the old package set.
It's equivalent to <varname>pkgs</varname> in the above example.
</para>
<para>
Note that in previous versions of nixpkgs, this method replaced any changes from <link
linkend="sec-modify-via-packageOverrides">config.packageOverrides</link>,
along with that from previous calls if this function was called repeatedly.
Now those previous changes will be preserved so this function can be "chained" meaningfully.
To recover the old behavior, make sure <varname>config.packageOverrides</varname> is unset,
and call this only once off a "freshly" imported nixpkgs:
<programlisting>let
pkgs = import &lt;nixpkgs&gt; { config: {}; };
newpkgs = pkgs.overridePackages ...;
in ...</programlisting>
</para>
</section>
<section xml:id="sec-pkg-override">
<title>&lt;pkg&gt;.override</title>
<para>
The function <varname>override</varname> is usually available for all the
derivations in the nixpkgs expression (<varname>pkgs</varname>).
</para>
<para>
It is used to override the arguments passed to a function.
</para>
<para>
Example usages:
<programlisting>pkgs.foo.override { arg1 = val1; arg2 = val2; ... }</programlisting>
<programlisting>pkgs.overridePackages (self: super: {
foo = super.foo.override { barSupport = true ; };
})</programlisting>
<programlisting>mypkg = pkgs.callPackage ./mypkg.nix {
mydep = pkgs.mydep.override { ... };
})</programlisting>
</para>
<para>
In the first example, <varname>pkgs.foo</varname> is the result of a function call
with some default arguments, usually a derivation.
Using <varname>pkgs.foo.override</varname> will call the same function with
the given new arguments.
</para>
</section>
<section xml:id="sec-pkg-overrideAttrs">
<title>&lt;pkg&gt;.overrideAttrs</title>
<para>
The function <varname>overrideAttrs</varname> allows overriding the
attribute set passed to a <varname>stdenv.mkDerivation</varname> call,
producing a new derivation based on the original one.
This function is available on all derivations produced by the
<varname>stdenv.mkDerivation</varname> function, which is most packages
in the nixpkgs expression <varname>pkgs</varname>.
</para>
<para>
Example usage:
<programlisting>helloWithDebug = pkgs.hello.overrideAttrs (oldAttrs: rec {
separateDebugInfo = true;
});</programlisting>
</para>
<para>
In the above example, the <varname>separateDebugInfo</varname> attribute is
overriden to be true, thus building debug info for
<varname>helloWithDebug</varname>, while all other attributes will be
retained from the original <varname>hello</varname> package.
</para>
<para>
The argument <varname>oldAttrs</varname> is conventionally used to refer to
the attr set originally passed to <varname>stdenv.mkDerivation</varname>.
</para>
<note>
<para>
Note that <varname>separateDebugInfo</varname> is processed only by the
<varname>stdenv.mkDerivation</varname> function, not the generated, raw
Nix derivation. Thus, using <varname>overrideDerivation</varname> will
not work in this case, as it overrides only the attributes of the final
derivation. It is for this reason that <varname>overrideAttrs</varname>
should be preferred in (almost) all cases to
<varname>overrideDerivation</varname>, i.e. to allow using
<varname>sdenv.mkDerivation</varname> to process input arguments, as well
as the fact that it is easier to use (you can use the same attribute
names you see in your Nix code, instead of the ones generated (e.g.
<varname>buildInputs</varname> vs <varname>nativeBuildInputs</varname>,
and involves less typing.
</para>
</note>
</section>
<section xml:id="sec-pkg-overrideDerivation">
<title>&lt;pkg&gt;.overrideDerivation</title>
<warning>
<para>You should prefer <varname>overrideAttrs</varname> in almost all
cases, see its documentation for the reasons why.
<varname>overrideDerivation</varname> is not deprecated and will continue
to work, but is less nice to use and does not have as many abilities as
<varname>overrideAttrs</varname>.
</para>
</warning>
<warning>
<para>Do not use this function in Nixpkgs as it evaluates a Derivation
before modifying it, which breaks package abstraction and removes
error-checking of function arguments. In addition, this
evaluation-per-function application incurs a performance penalty,
which can become a problem if many overrides are used.
It is only intended for ad-hoc customisation, such as in
<filename>~/.nixpkgs/config.nix</filename>.
</para>
</warning>
<para>
The function <varname>overrideDerivation</varname> creates a new derivation
based on an existing one by overriding the original's attributes with
the attribute set produced by the specified function.
This function is available on all
derivations defined using the <varname>makeOverridable</varname> function.
Most standard derivation-producing functions, such as
<varname>stdenv.mkDerivation</varname>, are defined using this
function, which means most packages in the nixpkgs expression,
<varname>pkgs</varname>, have this function.
</para>
<para>
Example usage:
<programlisting>mySed = pkgs.gnused.overrideDerivation (oldAttrs: {
name = "sed-4.2.2-pre";
src = fetchurl {
url = ftp://alpha.gnu.org/gnu/sed/sed-4.2.2-pre.tar.bz2;
sha256 = "11nq06d131y4wmf3drm0yk502d2xc6n5qy82cg88rb9nqd2lj41k";
};
patches = [];
});</programlisting>
</para>
<para>
In the above example, the <varname>name</varname>, <varname>src</varname>,
and <varname>patches</varname> of the derivation will be overridden, while
all other attributes will be retained from the original derivation.
</para>
<para>
The argument <varname>oldAttrs</varname> is used to refer to the attribute set of
the original derivation.
</para>
<note>
<para>
A package's attributes are evaluated *before* being modified by
the <varname>overrideDerivation</varname> function.
For example, the <varname>name</varname> attribute reference
in <varname>url = "mirror://gnu/hello/${name}.tar.gz";</varname>
is filled-in *before* the <varname>overrideDerivation</varname> function
modifies the attribute set. This means that overriding the
<varname>name</varname> attribute, in this example, *will not* change the
value of the <varname>url</varname> attribute. Instead, we need to override
both the <varname>name</varname> *and* <varname>url</varname> attributes.
</para>
</note>
</section>
<section xml:id="sec-lib-makeOverridable">
<title>lib.makeOverridable</title>
<para>
The function <varname>lib.makeOverridable</varname> is used to make the result
of a function easily customizable. This utility only makes sense for functions
that accept an argument set and return an attribute set.
</para>
<para>
Example usage:
<programlisting>f = { a, b }: { result = a+b; }
c = lib.makeOverridable f { a = 1; b = 2; }</programlisting>
</para>
<para>
The variable <varname>c</varname> is the value of the <varname>f</varname> function
applied with some default arguments. Hence the value of <varname>c.result</varname>
is <literal>3</literal>, in this example.
</para>
<para>
The variable <varname>c</varname> however also has some additional functions, like
<link linkend="sec-pkg-override">c.override</link> which can be used to
override the default arguments. In this example the value of
<varname>(c.override { a = 4; }).result</varname> is 6.
</para>
</section>
</section>
<section xml:id="sec-lib-makeOverridable">
<title>lib.makeOverridable</title>
<section xml:id="sec-generators">
<title>Generators</title>
<para>
The function <varname>lib.makeOverridable</varname> is used to make the result
of a function easily customizable. This utility only makes sense for functions
that accept an argument set and return an attribute set.
Generators are functions that create file formats from nix
data structures, e.g. for configuration files.
There are generators available for: <literal>INI</literal>,
<literal>JSON</literal> and <literal>YAML</literal>
</para>
<para>
Example usage:
<programlisting>f = { a, b }: { result = a+b; }
c = lib.makeOverridable f { a = 1; b = 2; }</programlisting>
All generators follow a similar call interface: <code>generatorName
configFunctions data</code>, where <literal>configFunctions</literal> is a
set of user-defined functions that format variable parts of the content.
They each have common defaults, so often they do not need to be set
manually. An example is <code>mkSectionName ? (name: libStr.escape [ "[" "]"
] name)</code> from the <literal>INI</literal> generator. It gets the name
of a section and returns a sanitized name. The default
<literal>mkSectionName</literal> escapes <literal>[</literal> and
<literal>]</literal> with a backslash.
</para>
<para>
The variable <varname>c</varname> is the value of the <varname>f</varname> function
applied with some default arguments. Hence the value of <varname>c.result</varname>
is <literal>3</literal>, in this example.
</para>
<note><para>Nix store paths can be converted to strings by enclosing a
derivation attribute like so: <code>"${drv}"</code>.</para></note>
<para>
The variable <varname>c</varname> however also has some additional functions, like
<link linkend="sec-pkg-override">c.override</link> which can be used to
override the default arguments. In this example the value of
<varname>(c.override { a = 4; }).result</varname> is 6.
Detailed documentation for each generator can be found in
<literal>lib/generators.nix</literal>.
</para>
</section>
@ -370,37 +413,37 @@ c = lib.makeOverridable f { a = 1; b = 2; }</programlisting>
</section>
<section xml:id="sec-pkgs-dockerTools">
<title>pkgs.dockerTools</title>
<title>pkgs.dockerTools</title>
<para>
<para>
<varname>pkgs.dockerTools</varname> is a set of functions for creating and
manipulating Docker images according to the
<link xlink:href="https://github.com/docker/docker/blob/master/image/spec/v1.md#docker-image-specification-v100">
Docker Image Specification v1.0.0
Docker Image Specification v1.0.0
</link>. Docker itself is not used to perform any of the operations done by these
functions.
</para>
</para>
<warning>
<warning>
<para>
The <varname>dockerTools</varname> API is unstable and may be subject to
backwards-incompatible changes in the future.
The <varname>dockerTools</varname> API is unstable and may be subject to
backwards-incompatible changes in the future.
</para>
</warning>
</warning>
<section xml:id="ssec-pkgs-dockerTools-buildImage">
<section xml:id="ssec-pkgs-dockerTools-buildImage">
<title>buildImage</title>
<para>
This function is analogous to the <command>docker build</command> command,
in that can used to build a Docker-compatible repository tarball containing
a single image with one or multiple layers. As such, the result
is suitable for being loaded in Docker with <command>docker load</command>.
This function is analogous to the <command>docker build</command> command,
in that can used to build a Docker-compatible repository tarball containing
a single image with one or multiple layers. As such, the result
is suitable for being loaded in Docker with <command>docker load</command>.
</para>
<para>
The parameters of <varname>buildImage</varname> with relative example values are
described below:
The parameters of <varname>buildImage</varname> with relative example values are
described below:
</para>
<example xml:id='ex-dockerTools-buildImage'><title>Docker build</title>
@ -408,11 +451,11 @@ c = lib.makeOverridable f { a = 1; b = 2; }</programlisting>
buildImage {
name = "redis"; <co xml:id='ex-dockerTools-buildImage-1' />
tag = "latest"; <co xml:id='ex-dockerTools-buildImage-2' />
fromImage = someBaseImage; <co xml:id='ex-dockerTools-buildImage-3' />
fromImageName = null; <co xml:id='ex-dockerTools-buildImage-4' />
fromImageTag = "latest"; <co xml:id='ex-dockerTools-buildImage-5' />
contents = pkgs.redis; <co xml:id='ex-dockerTools-buildImage-6' />
runAsRoot = '' <co xml:id='ex-dockerTools-buildImage-runAsRoot' />
#!${stdenv.shell}
@ -431,131 +474,131 @@ c = lib.makeOverridable f { a = 1; b = 2; }</programlisting>
</example>
<para>The above example will build a Docker image <literal>redis/latest</literal>
from the given base image. Loading and running this image in Docker results in
<literal>redis-server</literal> being started automatically.
from the given base image. Loading and running this image in Docker results in
<literal>redis-server</literal> being started automatically.
</para>
<calloutlist>
<callout arearefs='ex-dockerTools-buildImage-1'>
<callout arearefs='ex-dockerTools-buildImage-1'>
<para>
<varname>name</varname> specifies the name of the resulting image.
This is the only required argument for <varname>buildImage</varname>.
<varname>name</varname> specifies the name of the resulting image.
This is the only required argument for <varname>buildImage</varname>.
</para>
</callout>
</callout>
<callout arearefs='ex-dockerTools-buildImage-2'>
<callout arearefs='ex-dockerTools-buildImage-2'>
<para>
<varname>tag</varname> specifies the tag of the resulting image.
By default it's <literal>latest</literal>.
<varname>tag</varname> specifies the tag of the resulting image.
By default it's <literal>latest</literal>.
</para>
</callout>
</callout>
<callout arearefs='ex-dockerTools-buildImage-3'>
<callout arearefs='ex-dockerTools-buildImage-3'>
<para>
<varname>fromImage</varname> is the repository tarball containing the base image.
It must be a valid Docker image, such as exported by <command>docker save</command>.
By default it's <literal>null</literal>, which can be seen as equivalent
to <literal>FROM scratch</literal> of a <filename>Dockerfile</filename>.
<varname>fromImage</varname> is the repository tarball containing the base image.
It must be a valid Docker image, such as exported by <command>docker save</command>.
By default it's <literal>null</literal>, which can be seen as equivalent
to <literal>FROM scratch</literal> of a <filename>Dockerfile</filename>.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-4'>
<para>
<varname>fromImageName</varname> can be used to further specify
the base image within the repository, in case it contains multiple images.
By default it's <literal>null</literal>, in which case
<varname>buildImage</varname> will peek the first image available
in the repository.
</para>
</callout>
</callout>
<callout arearefs='ex-dockerTools-buildImage-5'>
<callout arearefs='ex-dockerTools-buildImage-4'>
<para>
<varname>fromImageTag</varname> can be used to further specify the tag
of the base image within the repository, in case an image contains multiple tags.
By default it's <literal>null</literal>, in which case
<varname>buildImage</varname> will peek the first tag available for the base image.
<varname>fromImageName</varname> can be used to further specify
the base image within the repository, in case it contains multiple images.
By default it's <literal>null</literal>, in which case
<varname>buildImage</varname> will peek the first image available
in the repository.
</para>
</callout>
</callout>
<callout arearefs='ex-dockerTools-buildImage-6'>
<callout arearefs='ex-dockerTools-buildImage-5'>
<para>
<varname>contents</varname> is a derivation that will be copied in the new
layer of the resulting image. This can be similarly seen as
<command>ADD contents/ /</command> in a <filename>Dockerfile</filename>.
By default it's <literal>null</literal>.
<varname>fromImageTag</varname> can be used to further specify the tag
of the base image within the repository, in case an image contains multiple tags.
By default it's <literal>null</literal>, in which case
<varname>buildImage</varname> will peek the first tag available for the base image.
</para>
</callout>
</callout>
<callout arearefs='ex-dockerTools-buildImage-runAsRoot'>
<callout arearefs='ex-dockerTools-buildImage-6'>
<para>
<varname>runAsRoot</varname> is a bash script that will run as root
in an environment that overlays the existing layers of the base image with
the new resulting layer, including the previously copied
<varname>contents</varname> derivation.
This can be similarly seen as
<command>RUN ...</command> in a <filename>Dockerfile</filename>.
<note>
<varname>contents</varname> is a derivation that will be copied in the new
layer of the resulting image. This can be similarly seen as
<command>ADD contents/ /</command> in a <filename>Dockerfile</filename>.
By default it's <literal>null</literal>.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-runAsRoot'>
<para>
<varname>runAsRoot</varname> is a bash script that will run as root
in an environment that overlays the existing layers of the base image with
the new resulting layer, including the previously copied
<varname>contents</varname> derivation.
This can be similarly seen as
<command>RUN ...</command> in a <filename>Dockerfile</filename>.
<note>
<para>
Using this parameter requires the <literal>kvm</literal>
device to be available.
Using this parameter requires the <literal>kvm</literal>
device to be available.
</para>
</note>
</note>
</para>
</callout>
</callout>
<callout arearefs='ex-dockerTools-buildImage-8'>
<callout arearefs='ex-dockerTools-buildImage-8'>
<para>
<varname>config</varname> is used to specify the configuration of the
containers that will be started off the built image in Docker.
The available options are listed in the
<link xlink:href="https://github.com/docker/docker/blob/master/image/spec/v1.md#container-runconfig-field-descriptions">
<varname>config</varname> is used to specify the configuration of the
containers that will be started off the built image in Docker.
The available options are listed in the
<link xlink:href="https://github.com/docker/docker/blob/master/image/spec/v1.md#container-runconfig-field-descriptions">
Docker Image Specification v1.0.0
</link>.
</link>.
</para>
</callout>
</callout>
</calloutlist>
<para>
After the new layer has been created, its closure
(to which <varname>contents</varname>, <varname>config</varname> and
<varname>runAsRoot</varname> contribute) will be copied in the layer itself.
Only new dependencies that are not already in the existing layers will be copied.
After the new layer has been created, its closure
(to which <varname>contents</varname>, <varname>config</varname> and
<varname>runAsRoot</varname> contribute) will be copied in the layer itself.
Only new dependencies that are not already in the existing layers will be copied.
</para>
<para>
At the end of the process, only one new single layer will be produced and
added to the resulting image.
At the end of the process, only one new single layer will be produced and
added to the resulting image.
</para>
<para>
The resulting repository will only list the single image
<varname>image/tag</varname>. In the case of <xref linkend='ex-dockerTools-buildImage'/>
it would be <varname>redis/latest</varname>.
The resulting repository will only list the single image
<varname>image/tag</varname>. In the case of <xref linkend='ex-dockerTools-buildImage'/>
it would be <varname>redis/latest</varname>.
</para>
<para>
It is possible to inspect the arguments with which an image was built
using its <varname>buildArgs</varname> attribute.
It is possible to inspect the arguments with which an image was built
using its <varname>buildArgs</varname> attribute.
</para>
</section>
</section>
<section xml:id="ssec-pkgs-dockerTools-fetchFromRegistry">
<section xml:id="ssec-pkgs-dockerTools-fetchFromRegistry">
<title>pullImage</title>
<para>
This function is analogous to the <command>docker pull</command> command,
in that can be used to fetch a Docker image from a Docker registry.
Currently only registry <literal>v1</literal> is supported.
By default <link xlink:href="https://hub.docker.com/">Docker Hub</link>
is used to pull images.
This function is analogous to the <command>docker pull</command> command,
in that can be used to fetch a Docker image from a Docker registry.
Currently only registry <literal>v1</literal> is supported.
By default <link xlink:href="https://hub.docker.com/">Docker Hub</link>
is used to pull images.
</para>
<para>
Its parameters are described in the example below:
Its parameters are described in the example below:
</para>
<example xml:id='ex-dockerTools-pullImage'><title>Docker pull</title>
@ -573,73 +616,73 @@ c = lib.makeOverridable f { a = 1; b = 2; }</programlisting>
</example>
<calloutlist>
<callout arearefs='ex-dockerTools-pullImage-1'>
<callout arearefs='ex-dockerTools-pullImage-1'>
<para>
<varname>imageName</varname> specifies the name of the image to be downloaded,
which can also include the registry namespace (e.g. <literal>library/debian</literal>).
This argument is required.
<varname>imageName</varname> specifies the name of the image to be downloaded,
which can also include the registry namespace (e.g. <literal>library/debian</literal>).
This argument is required.
</para>
</callout>
<callout arearefs='ex-dockerTools-pullImage-2'>
<para>
<varname>imageTag</varname> specifies the tag of the image to be downloaded.
By default it's <literal>latest</literal>.
</para>
</callout>
</callout>
<callout arearefs='ex-dockerTools-pullImage-3'>
<callout arearefs='ex-dockerTools-pullImage-2'>
<para>
<varname>imageId</varname>, if specified this exact image will be fetched, instead
of <varname>imageName/imageTag</varname>. However, the resulting repository
will still be named <varname>imageName/imageTag</varname>.
By default it's <literal>null</literal>.
<varname>imageTag</varname> specifies the tag of the image to be downloaded.
By default it's <literal>latest</literal>.
</para>
</callout>
</callout>
<callout arearefs='ex-dockerTools-pullImage-4'>
<callout arearefs='ex-dockerTools-pullImage-3'>
<para>
<varname>sha256</varname> is the checksum of the whole fetched image.
This argument is required.
<varname>imageId</varname>, if specified this exact image will be fetched, instead
of <varname>imageName/imageTag</varname>. However, the resulting repository
will still be named <varname>imageName/imageTag</varname>.
By default it's <literal>null</literal>.
</para>
</callout>
<callout arearefs='ex-dockerTools-pullImage-4'>
<para>
<varname>sha256</varname> is the checksum of the whole fetched image.
This argument is required.
</para>
<note>
<para>The checksum is computed on the unpacked directory, not on the final tarball.</para>
<para>The checksum is computed on the unpacked directory, not on the final tarball.</para>
</note>
</callout>
</callout>
<callout arearefs='ex-dockerTools-pullImage-5'>
<callout arearefs='ex-dockerTools-pullImage-5'>
<para>
In the above example the default values are shown for the variables
<varname>indexUrl</varname> and <varname>registryVersion</varname>.
Hence by default the Docker.io registry is used to pull the images.
In the above example the default values are shown for the variables
<varname>indexUrl</varname> and <varname>registryVersion</varname>.
Hence by default the Docker.io registry is used to pull the images.
</para>
</callout>
</callout>
</calloutlist>
</section>
<section xml:id="ssec-pkgs-dockerTools-exportImage">
</section>
<section xml:id="ssec-pkgs-dockerTools-exportImage">
<title>exportImage</title>
<para>
This function is analogous to the <command>docker export</command> command,
in that can used to flatten a Docker image that contains multiple layers.
It is in fact the result of the merge of all the layers of the image.
As such, the result is suitable for being imported in Docker
with <command>docker import</command>.
This function is analogous to the <command>docker export</command> command,
in that can used to flatten a Docker image that contains multiple layers.
It is in fact the result of the merge of all the layers of the image.
As such, the result is suitable for being imported in Docker
with <command>docker import</command>.
</para>
<note>
<para>
<para>
Using this function requires the <literal>kvm</literal>
device to be available.
</para>
</para>
</note>
<para>
The parameters of <varname>exportImage</varname> are the following:
The parameters of <varname>exportImage</varname> are the following:
</para>
<example xml:id='ex-dockerTools-exportImage'><title>Docker export</title>
@ -648,35 +691,35 @@ c = lib.makeOverridable f { a = 1; b = 2; }</programlisting>
fromImage = someLayeredImage;
fromImageName = null;
fromImageTag = null;
name = someLayeredImage.name;
}
</programlisting>
</example>
<para>
The parameters relative to the base image have the same synopsis as
described in <xref linkend='ssec-pkgs-dockerTools-buildImage'/>, except that
<varname>fromImage</varname> is the only required argument in this case.
The parameters relative to the base image have the same synopsis as
described in <xref linkend='ssec-pkgs-dockerTools-buildImage'/>, except that
<varname>fromImage</varname> is the only required argument in this case.
</para>
<para>
The <varname>name</varname> argument is the name of the derivation output,
which defaults to <varname>fromImage.name</varname>.
The <varname>name</varname> argument is the name of the derivation output,
which defaults to <varname>fromImage.name</varname>.
</para>
</section>
</section>
<section xml:id="ssec-pkgs-dockerTools-shadowSetup">
<section xml:id="ssec-pkgs-dockerTools-shadowSetup">
<title>shadowSetup</title>
<para>
This constant string is a helper for setting up the base files for managing
users and groups, only if such files don't exist already.
It is suitable for being used in a
<varname>runAsRoot</varname> <xref linkend='ex-dockerTools-buildImage-runAsRoot'/> script for cases like
in the example below:
This constant string is a helper for setting up the base files for managing
users and groups, only if such files don't exist already.
It is suitable for being used in a
<varname>runAsRoot</varname> <xref linkend='ex-dockerTools-buildImage-runAsRoot'/> script for cases like
in the example below:
</para>
<example xml:id='ex-dockerTools-shadowSetup'><title>Shadow base files</title>
<programlisting>
buildImage {
@ -695,13 +738,13 @@ c = lib.makeOverridable f { a = 1; b = 2; }</programlisting>
</example>
<para>
Creating base files like <literal>/etc/passwd</literal> or
<literal>/etc/login.defs</literal> are necessary for shadow-utils to
manipulate users and groups.
Creating base files like <literal>/etc/passwd</literal> or
<literal>/etc/login.defs</literal> are necessary for shadow-utils to
manipulate users and groups.
</para>
</section>
</section>
</section>
</chapter>

View file

@ -248,7 +248,7 @@ $ nix-env -f &quot;&lt;nixpkgs&gt;&quot; -iA beamPackages.ibrowse
development. Many times we need to create a
<literal>shell.nix</literal> file and do our development inside
of the environment specified by that file. This file looks a lot
like the packageing described above. The main difference is that
like the packaging described above. The main difference is that
<literal>src</literal> points to project root and we call the
package directly.
</para>

View file

@ -633,7 +633,7 @@ Now the builds succeeds.
Of course, in the concrete example of `ghc-events` this whole exercise is not
an ideal solution, because `ghc-events` can analyze the output emitted by any
version of GHC later than 6.12 regardless of the compiler version that was used
to build the `ghc-events' executable, so strictly speaking there's no reason to
to build the `ghc-events` executable, so strictly speaking there's no reason to
prefer one built with GHC 7.8.x in the first place. However, for users who
cannot use GHC 7.10.x at all for some reason, the approach of downgrading to an
older version might be useful.

View file

@ -97,7 +97,7 @@ We will first have a look at how Python packages are packaged on Nix. Then, we w
#### Python packaging on Nix
On Nix all packages are built by functions. The main function in Nix for building Python packages is [`buildPythonPackage`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/python-modules/generic/default.nix).
On Nix all packages are built by functions. The main function in Nix for building Python packages is [`buildPythonPackage`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/interpreters/python/build-python-package.nix).
Let's see how we would build the `toolz` package. According to [`python-packages.nix`](https://raw.githubusercontent.com/NixOS/nixpkgs/master/pkgs/top-level/python-packages.nix) `toolz` is build using
```nix
@ -141,13 +141,15 @@ with import <nixpkgs> {};
pkgs.python35Packages.buildPythonPackage rec {
name = "toolz-${version}";
version = "0.7.4";
version = "0.8.0";
src = pkgs.fetchurl{
url = "mirror://pypi/t/toolz/toolz-${version}.tar.gz";
sha256 = "43c2c9e5e7a16b6c88ba3088a9bfc82f7db8e13378be7c78d6c14a5f8ed05afd";
sha256 = "e8451af61face57b7c5d09e71c0d27b8005f001ead56e9fdf470417e5cc6d479";
};
doCheck = false;
meta = {
homepage = "http://github.com/pytoolz/toolz/";
description = "List processing tools and functional utilities";
@ -170,18 +172,18 @@ with import <nixpkgs> {};
( let
toolz = pkgs.python35Packages.buildPythonPackage rec {
name = "toolz-${version}";
version = "0.7.4";
version = "0.8.0";
src = pkgs.fetchurl{
url = "mirror://pypi/t/toolz/toolz-${version}.tar.gz";
sha256 = "43c2c9e5e7a16b6c88ba3088a9bfc82f7db8e13378be7c78d6c14a5f8ed05afd";
sha256 = "e8451af61face57b7c5d09e71c0d27b8005f001ead56e9fdf470417e5cc6d479";
};
doCheck = false;
meta = {
homepage = "http://github.com/pytoolz/toolz/";
description = "List processing tools and functional utilities";
license = licenses.bsd3;
maintainers = with maintainers; [ fridh ];
};
};
@ -308,11 +310,10 @@ Note also the line `doCheck = false;`, we explicitly disabled running the test-s
#### Develop local package
As a Python developer you're likely aware of [development mode](http://pythonhosted.org/setuptools/setuptools.html#development-mode) (`python setup.py develop`);
As a Python developer you're likely aware of [development mode](http://setuptools.readthedocs.io/en/latest/setuptools.html#development-mode) (`python setup.py develop`);
instead of installing the package this command creates a special link to the project code.
That way, you can run updated code without having to reinstall after each and every change you make.
Development mode is also available on Nix as [explained](http://nixos.org/nixpkgs/manual/#ssec-python-development) in the Nixpkgs manual.
Let's see how you can use it.
Development mode is also available. Let's see how you can use it.
In the previous Nix expression the source was fetched from an url. We can also refer to a local source instead using
@ -409,10 +410,10 @@ and in this case the `python35` interpreter is automatically used.
### Interpreters
Versions 2.6, 2.7, 3.3, 3.4 and 3.5 of the CPython interpreter are as respectively
Versions 2.6, 2.7, 3.3, 3.4 and 3.5 of the CPython interpreter are available as respectively
`python26`, `python27`, `python33`, `python34` and `python35`. The PyPy interpreter
is available as `pypy`. The aliases `python2` and `python3` correspond to respectively `python27` and
`python35`. The default interpreter, `python`, maps to `python3`.
`python35`. The default interpreter, `python`, maps to `python2`.
The Nix expressions for the interpreters can be found in
`pkgs/development/interpreters/python`.
@ -434,17 +435,20 @@ Each interpreter has the following attributes:
- `withPackages`. Simpler interface to `buildEnv`. See section *python.withPackages function* for usage and documentation.
- `sitePackages`. Alias for `lib/${libPrefix}/site-packages`.
- `executable`. Name of the interpreter executable, e.g. `python3.4`.
- `pkgs`. Set of Python packages for that specific interpreter. The package set can be modified by overriding the interpreter and passing `packageOverrides`.
### Building packages and applications
Python packages (libraries) and applications that use `setuptools` or
`distutils` are typically built with respectively the `buildPythonPackage` and
`buildPythonApplication` functions.
Python libraries and applications that use `setuptools` or
`distutils` are typically build with respectively the `buildPythonPackage` and
`buildPythonApplication` functions. These two functions also support installing a `wheel`.
All Python packages reside in `pkgs/top-level/python-packages.nix` and all
applications elsewhere. Some packages are also defined in
applications elsewhere. In case a package is used as both a library and an application,
then the package should be in `pkgs/top-level/python-packages.nix` since only those packages are made
available for all interpreter versions. The preferred location for library expressions is in
`pkgs/development/python-modules`. It is important that these packages are
called in `pkgs/top-level/python-packages.nix` and not elsewhere, to guarantee
called from `pkgs/top-level/python-packages.nix` and not elsewhere, to guarantee
the right version of the package is built.
Based on the packages defined in `pkgs/top-level/python-packages.nix` an
@ -462,14 +466,14 @@ and the aliases
* `pkgs.python2Packages` pointing to `pkgs.python27Packages`
* `pkgs.python3Packages` pointing to `pkgs.python35Packages`
* `pkgs.pythonPackages` pointing to `pkgs.python3Packages`
* `pkgs.pythonPackages` pointing to `pkgs.python2Packages`
#### `buildPythonPackage` function
The `buildPythonPackage` function is implemented in
`pkgs/development/interpreters/python/build-python-package.nix`
and can be used as:
The following is an example:
twisted = buildPythonPackage {
name = "twisted-8.1.0";
@ -520,7 +524,7 @@ All parameters from `mkDerivation` function are still supported.
* `postShellHook`: Hook to execute commands after `shellHook`.
* `makeWrapperArgs`: A list of strings. Arguments to be passed to `makeWrapper`, which wraps generated binaries. By default, the arguments to `makeWrapper` set `PATH` and `PYTHONPATH` environment variables before calling the binary. Additional arguments here can allow a developer to set environment variables which will be available when the binary is run. For example, `makeWrapperArgs = ["--set FOO BAR" "--set BAZ QUX"]`.
* `installFlags`: A list of strings. Arguments to be passed to `pip install`. To pass options to `python setup.py install`, use `--install-option`. E.g., `installFlags=["--install-option='--cpp_implementation'"].
* `format`: Format of the source. Options are `setup` for when the source has a `setup.py` and `setuptools` is used to build a wheel, and `wheel` in case the source is already a binary wheel. The default value is `setup`.
* `format`: Format of the source. Valid options are `setuptools` (default), `flit`, `wheel`, and `other`. `setuptools` is for when the source has a `setup.py` and `setuptools` is used to build a wheel, `flit`, in case `flit` should be used to build a wheel, and `wheel` in case a wheel is provided. In case you need to provide your own `buildPhase` and `installPhase` you can use `other`.
* `catchConflicts` If `true`, abort package build if a package name appears more than once in dependency tree. Default is `true`.
* `checkInputs` Dependencies needed for running the `checkPhase`. These are added to `buildInputs` when `doCheck = true`.
@ -697,59 +701,55 @@ should also be done when packaging `A`.
### How to override a Python package?
Recursively updating a package can be done with `pkgs.overridePackages` as explained in the Nixpkgs manual.
Python attribute sets are created for each interpreter version. We will therefore override the attribute set for the interpreter version we're interested.
In the following example we change the name of the package `pandas` to `foo`.
```
newpkgs = pkgs.overridePackages(self: super: rec {
python35Packages = (super.python35Packages.override { self = python35Packages;})
// { pandas = super.python35Packages.pandas.override {name = "foo";};
};
});
```
This can be tested with
```
We can override the interpreter and pass `packageOverrides`.
In the following example we rename the `pandas` package and build it.
```nix
with import <nixpkgs> {};
(let
let
python = let
packageOverrides = self: super: {
pandas = super.pandas.override {name="foo";};
};
in pkgs.python35.override {inherit packageOverrides;};
newpkgs = pkgs.overridePackages(self: super: rec {
python35Packages = (super.python35Packages.override { self = python35Packages;})
// { pandas = super.python35Packages.pandas.override {name = "foo";};
};
});
in newpkgs.python35.withPackages (ps: [ps.blaze])
).env
```
A typical use case is to switch to another version of a certain package. For example, in the Nixpkgs repository we have multiple versions of `django` and `scipy`.
In the following example we use a different version of `scipy`. All packages in `newpkgs` will now use the updated `scipy` version.
in python.pkgs.pandas
```
Using `nix-build` on this expression will build the package `pandas`
but with the new name `foo`.
All packages in the package set will use the renamed package.
A typical use case is to switch to another version of a certain package.
For example, in the Nixpkgs repository we have multiple versions of `django` and `scipy`.
In the following example we use a different version of `scipy` and create an environment that uses it.
All packages in the Python package set will now use the updated `scipy` version.
```nix
with import <nixpkgs> {};
(let
newpkgs = pkgs.overridePackages(self: super: rec {
python35Packages = super.python35Packages.override {
self = python35Packages // { scipy = python35Packages.scipy_0_17;};
(
let
packageOverrides = self: super: {
scipy = super.scipy_0_17;
};
});
in newpkgs.python35.withPackages (ps: [ps.blaze])
in (pkgs.python35.override {inherit packageOverrides;}).withPackages (ps: [ps.blaze])
).env
```
The requested package `blaze` depends upon `pandas` which itself depends on `scipy`.
The requested package `blaze` depends on `pandas` which itself depends on `scipy`.
A similar example but now using `django`
If you want the whole of Nixpkgs to use your modifications, then you can use `pkgs.overridePackages`
as explained in this manual. In the following example we build a `inkscape` using a different version of `numpy`.
```
with import <nixpkgs> {};
(let
newpkgs = pkgs.overridePackages(self: super: rec {
python27Packages = (super.python27Packages.override {self = python27Packages;})
// { django = super.python27Packages.django_1_9; };
});
in newpkgs.python27.withPackages (ps: [ps.django_guardian ])
).env
let
pkgs = import <nixpkgs> {};
newpkgs = pkgs.overridePackages ( pkgsself: pkgssuper: {
python27 = let
packageOverrides = self: super: {
numpy = super.numpy_1_10;
};
in pkgssuper.python27.override {inherit packageOverrides;};
} );
in newpkgs.inkscape
```
### `python setup.py bdist_wheel` cannot create .whl
@ -770,9 +770,9 @@ or the current time:
nix-shell --run "SOURCE_DATE_EPOCH=$(date +%s) python3 setup.py bdist_wheel"
```
or unset:
"""
```
nix-shell --run "unset SOURCE_DATE_EPOCH; python3 setup.py bdist_wheel"
"""
```
### `install_data` / `data_files` problems

View file

@ -20,6 +20,7 @@
<xi:include href="package-notes.xml" />
<xi:include href="coding-conventions.xml" />
<xi:include href="submitting-changes.xml" />
<xi:include href="reviewing-contributions.xml" />
<xi:include href="contributing.xml" />
</book>

View file

@ -382,4 +382,138 @@ it. Place the resulting <filename>package.nix</filename> file into
</section>
<section xml:id="sec-steam">
<title>Steam</title>
<section xml:id="sec-steam-nix">
<title>Steam in Nix</title>
<para>
Steam is distributed as a <filename>.deb</filename> file, for now only
as an i686 package (the amd64 package only has documentation).
When unpacked, it has a script called <filename>steam</filename> that
in ubuntu (their target distro) would go to <filename>/usr/bin
</filename>. When run for the first time, this script copies some
files to the user's home, which include another script that is the
ultimate responsible for launching the steam binary, which is also
in $HOME.
</para>
<para>
Nix problems and constraints:
<itemizedlist>
<listitem><para>We don't have <filename>/bin/bash</filename> and many
scripts point there. Similarly for <filename>/usr/bin/python</filename>
.</para></listitem>
<listitem><para>We don't have the dynamic loader in <filename>/lib
</filename>.</para></listitem>
<listitem><para>The <filename>steam.sh</filename> script in $HOME can
not be patched, as it is checked and rewritten by steam.</para></listitem>
<listitem><para>The steam binary cannot be patched, it's also checked.</para></listitem>
</itemizedlist>
</para>
<para>
The current approach to deploy Steam in NixOS is composing a FHS-compatible
chroot environment, as documented
<link xlink:href="http://sandervanderburg.blogspot.nl/2013/09/composing-fhs-compatible-chroot.html">here</link>.
This allows us to have binaries in the expected paths without disrupting the system,
and to avoid patching them to work in a non FHS environment.
</para>
</section>
<section xml:id="sec-steam-play">
<title>How to play</title>
<para>
For 64-bit systems it's important to have
<programlisting>hardware.opengl.driSupport32Bit = true;</programlisting>
in your <filename>/etc/nixos/configuration.nix</filename>. You'll also need
<programlisting>hardware.pulseaudio.support32Bit = true;</programlisting>
if you are using PulseAudio - this will enable 32bit ALSA apps integration.
To use the Steam controller, you need to add
<programlisting>services.udev.extraRules = ''
SUBSYSTEM=="usb", ATTRS{idVendor}=="28de", MODE="0666"
KERNEL=="uinput", MODE="0660", GROUP="users", OPTIONS+="static_node=uinput"
'';</programlisting>
to your configuration.
</para>
</section>
<section xml:id="sec-steam-troub">
<title>Troubleshooting</title>
<para>
<variablelist>
<varlistentry>
<term>Steam fails to start. What do I do?</term>
<listitem><para>Try to run
<programlisting>strace steam</programlisting>
to see what is causing steam to fail.</para></listitem>
</varlistentry>
<varlistentry>
<term>Using the FOSS Radeon drivers</term>
<listitem><itemizedlist><listitem><para>
The open source radeon drivers need a newer libc++ than is provided
by the default runtime, which leads to a crash on launch. Use
<programlisting>environment.systemPackages = [(pkgs.steam.override { newStdcpp = true; })];</programlisting>
in your config if you get an error like
<programlisting>
libGL error: unable to load driver: radeonsi_dri.so
libGL error: driver pointer missing
libGL error: failed to load driver: radeonsi
libGL error: unable to load driver: swrast_dri.so
libGL error: failed to load driver: swrast</programlisting></para></listitem>
<listitem><para>
Steam ships statically linked with a version of libcrypto that
conflics with the one dynamically loaded by radeonsi_dri.so.
If you get the error
<programlisting>steam.sh: line 713: 7842 Segmentation fault (core dumped)</programlisting>
have a look at <link xlink:href="https://github.com/NixOS/nixpkgs/pull/20269">this pull request</link>.
</para></listitem>
</itemizedlist></listitem></varlistentry>
<varlistentry>
<term>Java</term>
<listitem><orderedlist>
<listitem><para>
There is no java in steam chrootenv by default. If you get a message like
<programlisting>/home/foo/.local/share/Steam/SteamApps/common/towns/towns.sh: line 1: java: command not found</programlisting>
You need to add
<programlisting> steam.override { withJava = true; };</programlisting>
to your configuration.
</para></listitem>
</orderedlist></listitem></varlistentry>
</variablelist>
</para>
</section>
<section xml:id="sec-steam-run">
<title>steam-run</title>
<para>
The FHS-compatible chroot used for steam can also be used to run
other linux games that expect a FHS environment.
To do it, add
<programlisting>pkgs.(steam.override {
nativeOnly = true;
newStdcpp = true;
}).run</programlisting>
to your configuration, rebuild, and run the game with
<programlisting>steam-run ./foo</programlisting>
</para>
</section>
</section>
</chapter>

View file

@ -391,7 +391,7 @@ rec {
);
in f [] [rhs lhs];
/* A recursive variant of the update operator //. The recusion
/* A recursive variant of the update operator //. The recursion
stops when one of the attribute values is not an attribute set,
in which case the right hand side value takes precedence over the
left hand side value.

View file

@ -27,6 +27,7 @@ let
# misc
debug = import ./debug.nix;
generators = import ./generators.nix;
misc = import ./deprecated.nix;
# domain-specific
@ -39,7 +40,7 @@ in
customisation maintainers meta sources
modules options types
licenses platforms systems
debug misc
debug generators misc
sandbox fetchers;
}
# !!! don't include everything at top-level; perhaps only the most

93
lib/generators.nix Normal file
View file

@ -0,0 +1,93 @@
/* Functions that generate widespread file
* formats from nix data structures.
*
* They all follow a similar interface:
* generator { config-attrs } data
*
* Tests can be found in ./tests.nix
* Documentation in the manual, #sec-generators
*/
with import ./trivial.nix;
let
libStr = import ./strings.nix;
libAttr = import ./attrsets.nix;
flipMapAttrs = flip libAttr.mapAttrs;
in
rec {
/* Generate a line of key k and value v, separated by
* character sep. If sep appears in k, it is escaped.
* Helper for synaxes with different separators.
*
* mkKeyValueDefault ":" "f:oo" "bar"
* > "f\:oo:bar"
*/
mkKeyValueDefault = sep: k: v:
"${libStr.escape [sep] k}${sep}${toString v}";
/* Generate a key-value-style config file from an attrset.
*
* mkKeyValue is the same as in toINI.
*/
toKeyValue = {
mkKeyValue ? mkKeyValueDefault "="
}: attrs:
let mkLine = k: v: mkKeyValue k v + "\n";
in libStr.concatStrings (libAttr.mapAttrsToList mkLine attrs);
/* Generate an INI-style config file from an
* attrset of sections to an attrset of key-value pairs.
*
* generators.toINI {} {
* foo = { hi = "${pkgs.hello}"; ciao = "bar"; };
* baz = { "also, integers" = 42; };
* }
*
*> [baz]
*> also, integers=42
*>
*> [foo]
*> ciao=bar
*> hi=/nix/store/y93qql1p5ggfnaqjjqhxcw0vqw95rlz0-hello-2.10
*
* The mk* configuration attributes can generically change
* the way sections and key-value strings are generated.
*
* For more examples see the test cases in ./tests.nix.
*/
toINI = {
# apply transformations (e.g. escapes) to section names
mkSectionName ? (name: libStr.escape [ "[" "]" ] name),
# format a setting line from key and value
mkKeyValue ? mkKeyValueDefault "="
}: attrsOfAttrs:
let
# map function to string for each key val
mapAttrsToStringsSep = sep: mapFn: attrs:
libStr.concatStringsSep sep
(libAttr.mapAttrsToList mapFn attrs);
mkSection = sectName: sectValues: ''
[${mkSectionName sectName}]
'' + toKeyValue { inherit mkKeyValue; } sectValues;
in
# map input to ini sections
mapAttrsToStringsSep "\n" mkSection attrsOfAttrs;
/* Generates JSON from an arbitrary (non-function) value.
* For more information see the documentation of the builtin.
*/
toJSON = {}: builtins.toJSON;
/* YAML has been a strict superset of JSON since 1.2, so we
* use toJSON. Before it only had a few differences referring
* to implicit typing rules, so it should work with older
* parsers as well.
*/
toYAML = {}@args: toJSON args;
}

View file

@ -48,6 +48,7 @@
aske = "Kirill Boltaev <aske@fmap.me>";
asppsa = "Alastair Pharo <asppsa@gmail.com>";
astsmtl = "Alexander Tsamutali <astsmtl@yandex.ru>";
asymmetric = "Lorenzo Manacorda <lorenzo@mailbox.org>";
aszlig = "aszlig <aszlig@redmoonstudios.org>";
auntie = "Jonathan Glines <auntieNeo@gmail.com>";
avnik = "Alexander V. Nikolaev <avn@avnik.info>";
@ -88,6 +89,7 @@
chris-martin = "Chris Martin <ch.martin@gmail.com>";
chrisjefferson = "Christopher Jefferson <chris@bubblescope.net>";
christopherpoole = "Christopher Mark Poole <mail@christopherpoole.net>";
ckampka = "Christian Kampka <christian@kampka.net>";
cko = "Christine Koppelt <christine.koppelt@gmail.com>";
cleverca22 = "Michael Bishop <cleverca22@gmail.com>";
cmcdragonkai = "Roger Qiu <roger.qiu@matrix.ai>";
@ -116,6 +118,7 @@
deepfire = "Kosyrev Serge <_deepfire@feelingofgreen.ru>";
demin-dmitriy = "Dmitriy Demin <demindf@gmail.com>";
DerGuteMoritz = "Moritz Heidkamp <moritz@twoticketsplease.de>";
DerTim1 = "Tim Digel <tim.digel@active-group.de>";
desiderius = "Didier J. Devroye <didier@devroye.name>";
devhell = "devhell <\"^\"@regexmail.net>";
dezgeg = "Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi>";
@ -128,6 +131,8 @@
doublec = "Chris Double <chris.double@double.co.nz>";
drets = "Dmytro Rets <dmitryrets@gmail.com>";
drewkett = "Andrew Burkett <burkett.andrew@gmail.com>";
dtzWill = "Will Dietz <nix@wdtz.org>";
e-user = "Alexander Kahl <nixos@sodosopa.io>";
ebzzry = "Rommel Martinez <ebzzry@gmail.com>";
ederoyd46 = "Matthew Brown <matt@ederoyd.co.uk>";
eduarrrd = "Eduard Bachmakov <e.bachmakov@gmail.com>";
@ -137,6 +142,7 @@
ehmry = "Emery Hemingway <emery@vfemail.net>";
eikek = "Eike Kettner <eike.kettner@posteo.de>";
elasticdog = "Aaron Bull Schaefer <aaron@elasticdog.com>";
eleanor = "Dejan Lukan <dejan@proteansec.com>";
elitak = "Eric Litak <elitak@gmail.com>";
ellis = "Ellis Whitehead <nixos@ellisw.net>";
epitrochoid = "Mabry Cervin <mpcervin@uncg.edu>";
@ -182,6 +188,7 @@
gridaphobe = "Eric Seidel <eric@seidel.io>";
guibert = "David Guibert <david.guibert@gmail.com>";
guillaumekoenig = "Guillaume Koenig <guillaume.edward.koenig@gmail.com>";
guyonvarch = "Joris Guyonvarch <joris@guyonvarch.me>";
hakuch = "Jesse Haber-Kucharsky <hakuch@gmail.com>";
havvy = "Ryan Scheel <ryan.havvy@gmail.com>";
hbunke = "Hendrik Bunke <bunke.hendrik@gmail.com>";
@ -197,8 +204,10 @@
jagajaga = "Arseniy Seroka <ars.seroka@gmail.com>";
javaguirre = "Javier Aguirre <contacto@javaguirre.net>";
jb55 = "William Casarin <bill@casarin.me>";
jbedo = "Justin Bedő <cu@cua0.org>";
jcumming = "Jack Cummings <jack@mudshark.org>";
jefdaj = "Jeffrey David Johnson <jefdaj@gmail.com>";
jerith666 = "Matt McHenry <github@matt.mchenryfamily.org>";
jfb = "James Felix Black <james@yamtime.com>";
jgeerds = "Jascha Geerds <jascha@jgeerds.name>";
jgillich = "Jakob Gillich <jakob@gillich.me>";
@ -235,6 +244,7 @@
leonardoce = "Leonardo Cecchi <leonardo.cecchi@gmail.com>";
lethalman = "Luca Bruno <lucabru@src.gnome.org>";
lewo = "Antoine Eiche <lewo@abesis.fr>";
lheckemann = "Linus Heckemann <git@sphalerite.org>";
lhvwb = "Nathaniel Baxter <nathaniel.baxter@gmail.com>";
lihop = "Leroy Hopson <nixos@leroy.geek.nz>";
linquize = "Linquize <linquize@yahoo.com.hk>";
@ -266,12 +276,14 @@
matthiasbeyer = "Matthias Beyer <mail@beyermatthias.de>";
maurer = "Matthew Maurer <matthew.r.maurer+nix@gmail.com>";
mbakke = "Marius Bakke <mbakke@fastmail.com>";
mbbx6spp = "Susan Potter <me@susanpotter.net>";
mbe = "Brandon Edens <brandonedens@gmail.com>";
mboes = "Mathieu Boespflug <mboes@tweag.net>";
mcmtroffaes = "Matthias C. M. Troffaes <matthias.troffaes@gmail.com>";
mdaiter = "Matthew S. Daiter <mdaiter8121@gmail.com>";
meditans = "Carlo Nucera <meditans@gmail.com>";
meisternu = "Matt Miemiec <meister@krutt.org>";
mguentner = "Maximilian Güntner <code@klandest.in>";
mic92 = "Jörg Thalheim <joerg@higgsboson.tk>";
michaelpj = "Michael Peyton Jones <michaelpj@gmail.com>";
michalrus = "Michal Rus <m@michalrus.com>";
@ -281,9 +293,11 @@
mingchuan = "Ming Chuan <ming@culpring.com>";
mirdhyn = "Merlin Gaillard <mirdhyn@gmail.com>";
mirrexagon = "Andrew Abbott <mirrexagon@mirrexagon.com>";
mjanczyk = "Marcin Janczyk <m@dragonvr.pl>";
mlieberman85 = "Michael Lieberman <mlieberman85@gmail.com>";
modulistic = "Pablo Costa <modulistic@gmail.com>";
mog = "Matthew O'Gorman <mog-lists@rldn.net>";
montag451 = "montag451 <montag451@laposte.net>";
moosingin3space = "Nathan Moos <moosingin3space@gmail.com>";
moretea = "Maarten Hoogendoorn <maarten@moretea.nl>";
mornfall = "Petr Ročkai <me@mornfall.net>";
@ -316,6 +330,7 @@
ocharles = "Oliver Charles <ollie@ocharles.org.uk>";
odi = "Oliver Dunkl <oliver.dunkl@gmail.com>";
offline = "Jaka Hudoklin <jakahudoklin@gmail.com>";
okasu = "Okasu <oka.sux@gmail.com>";
olcai = "Erik Timan <dev@timan.info>";
olejorgenb = "Ole Jørgen Brønner <olejorgenb@yahoo.no>";
orbekk = "KJ Ørbekk <kjetil.orbekk@gmail.com>";
@ -329,6 +344,7 @@
palo = "Ingolf Wanger <palipalo9@googlemail.com>";
pashev = "Igor Pashev <pashev.igor@gmail.com>";
pawelpacana = "Paweł Pacana <pawel.pacana@gmail.com>";
periklis = "theopompos@gmail.com";
pesterhazy = "Paulus Esterhazy <pesterhazy@gmail.com>";
peterhoeg = "Peter Hoeg <peter@hoeg.com>";
peti = "Peter Simons <simons@cryp.to>";
@ -372,6 +388,7 @@
retrry = "Tadas Barzdžius <retrry@gmail.com>";
rick68 = "Wei-Ming Yang <rick68@gmail.com>";
rickynils = "Rickard Nilsson <rickynils@gmail.com>";
rlupton20 = "Richard Lupton <richard.lupton@gmail.com>";
rnhmjoj = "Michele Guerini Rocco <micheleguerinirocco@me.com>";
rob = "Rob Vermaas <rob.vermaas@gmail.com>";
robberer = "Longrin Wischnewski <robberer@freakmail.de>";
@ -458,6 +475,7 @@
vbgl = "Vincent Laporte <Vincent.Laporte@gmail.com>";
vbmithr = "Vincent Bernardoff <vb@luminar.eu.org>";
vcunat = "Vladimír Čunát <vcunat@gmail.com>";
vdemeester = "Vincent Demeester <vincent@sbr.pm>";
veprbl = "Dmitry Kalinkin <veprbl@gmail.com>";
viric = "Lluís Batlle i Rossell <viric@viric.name>";
vizanto = "Danny Wilson <danny@prime.vc>";
@ -465,6 +483,7 @@
vlstill = "Vladimír Štill <xstill@fi.muni.cz>";
vmandela = "Venkateswara Rao Mandela <venkat.mandela@gmail.com>";
volhovm = "Mikhail Volkhov <volhovm.cs@gmail.com>";
volth = "Jaroslavas Pocepko <jaroslavas@volth.com>";
vozz = "Oliver Hunt <oliver.huntuk@gmail.com>";
vrthra = "Rahul Gopinath <rahul@gopinath.org>";
wedens = "wedens <kirill.wedens@gmail.com>";

View file

@ -375,10 +375,13 @@ rec {
if def._type or "" == "merge" then
concatMap dischargeProperties def.contents
else if def._type or "" == "if" then
if def.condition then
dischargeProperties def.content
if isBool def.condition then
if def.condition then
dischargeProperties def.content
else
[ ]
else
[ ]
throw "mkIf called with a non-Boolean condition"
else
[ def ];

View file

@ -12,19 +12,19 @@ rec {
# Bring in a path as a source, filtering out all Subversion and CVS
# directories, as well as backup files (*~).
cleanSource =
let filter = name: type: let baseName = baseNameOf (toString name); in ! (
# Filter out Subversion and CVS directories.
(type == "directory" && (baseName == ".git" || baseName == ".svn" || baseName == "CVS" || baseName == ".hg")) ||
# Filter out backup files.
lib.hasSuffix "~" baseName ||
# Filter out generates files.
lib.hasSuffix ".o" baseName ||
lib.hasSuffix ".so" baseName ||
# Filter out nix-build result symlinks
(type == "symlink" && lib.hasPrefix "result" baseName)
);
in src: builtins.filterSource filter src;
cleanSourceFilter = name: type: let baseName = baseNameOf (toString name); in ! (
# Filter out Subversion and CVS directories.
(type == "directory" && (baseName == ".git" || baseName == ".svn" || baseName == "CVS" || baseName == ".hg")) ||
# Filter out backup files.
lib.hasSuffix "~" baseName ||
# Filter out generates files.
lib.hasSuffix ".o" baseName ||
lib.hasSuffix ".so" baseName ||
# Filter out nix-build result symlinks
(type == "symlink" && lib.hasPrefix "result" baseName)
);
cleanSource = builtins.filterSource cleanSourceFilter;
# Get all files ending with the specified suffices from the given

View file

@ -130,4 +130,94 @@ runTests {
expected = false;
};
/* Generator tests */
# these tests assume attributes are converted to lists
# in alphabetical order
testMkKeyValueDefault = {
expr = generators.mkKeyValueDefault ":" "f:oo" "bar";
expected = ''f\:oo:bar'';
};
testToKeyValue = {
expr = generators.toKeyValue {} {
key = "value";
"other=key" = "baz";
};
expected = ''
key=value
other\=key=baz
'';
};
testToINIEmpty = {
expr = generators.toINI {} {};
expected = "";
};
testToINIEmptySection = {
expr = generators.toINI {} { foo = {}; bar = {}; };
expected = ''
[bar]
[foo]
'';
};
testToINIDefaultEscapes = {
expr = generators.toINI {} {
"no [ and ] allowed unescaped" = {
"and also no = in keys" = 42;
};
};
expected = ''
[no \[ and \] allowed unescaped]
and also no \= in keys=42
'';
};
testToINIDefaultFull = {
expr = generators.toINI {} {
"section 1" = {
attribute1 = 5;
x = "Me-se JarJar Binx";
};
"foo[]" = {
"he\\h=he" = "this is okay";
};
};
expected = ''
[foo\[\]]
he\h\=he=this is okay
[section 1]
attribute1=5
x=Me-se JarJar Binx
'';
};
/* right now only invocation check */
testToJSONSimple =
let val = {
foobar = [ "baz" 1 2 3 ];
};
in {
expr = generators.toJSON {} val;
# trival implementation
expected = builtins.toJSON val;
};
/* right now only invocation check */
testToYAMLSimple =
let val = {
list = [ { one = 1; } { two = 2; } ];
all = 42;
};
in {
expr = generators.toYAML {} val;
# trival implementation
expected = builtins.toJSON val;
};
}

View file

@ -138,7 +138,4 @@ rec {
*/
warn = msg: builtins.trace "WARNING: ${msg}";
info = msg: builtins.trace "INFO: ${msg}";
fetchMD5warn = name: context : data : info
"Deprecated use of MD5 hash in ${name} to fetch ${context}" data;
}

View file

@ -333,7 +333,15 @@ rec {
name = "either";
description = "${t1.description} or ${t2.description}";
check = x: t1.check x || t2.check x;
merge = mergeOneOption;
merge = loc: defs:
let
defList = map (d: d.value) defs;
in
if all (x: t1.check x) defList
then t1.merge loc defs
else if all (x: t2.check x) defList
then t2.merge loc defs
else mergeOneOption loc defs;
typeMerge = f':
let mt1 = t1.typeMerge (elemAt f'.wrapped 0).functor;
mt2 = t2.typeMerge (elemAt f'.wrapped 1).functor;

View file

@ -38,6 +38,12 @@ while test -n "$1"; do
nix-build $TRAVIS_BUILD_DIR/pkgs/top-level/release.nix --attr tarball --show-trace
;;
nixpkgs-unstable)
echo "=== Checking nixpkgs unstable job"
nix-instantiate $TRAVIS_BUILD_DIR/pkgs/top-level/release.nix --attr unstable --show-trace
;;
nixpkgs-lint)
echo "=== Checking nixpkgs lint"

131
maintainers/scripts/update.nix Executable file
View file

@ -0,0 +1,131 @@
{ package ? null
, maintainer ? null
}:
# TODO: add assert statements
let
pkgs = import ./../../default.nix { };
packagesWith = cond: return: set:
pkgs.lib.flatten
(pkgs.lib.mapAttrsToList
(name: pkg:
let
result = builtins.tryEval (
if pkgs.lib.isDerivation pkg && cond name pkg
then [(return name pkg)]
else if pkg.recurseForDerivations or false || pkg.recurseForRelease or false
then packagesWith cond return pkg
else []
);
in
if result.success then result.value
else []
)
set
);
packagesWithUpdateScriptAndMaintainer = maintainer':
let
maintainer =
if ! builtins.hasAttr maintainer' pkgs.lib.maintainers then
builtins.throw "Maintainer with name `${maintainer'} does not exist in `lib/maintainers.nix`."
else
builtins.getAttr maintainer' pkgs.lib.maintainers;
in
packagesWith (name: pkg: builtins.hasAttr "updateScript" pkg &&
(if builtins.hasAttr "maintainers" pkg.meta
then (if builtins.isList pkg.meta.maintainers
then builtins.elem maintainer pkg.meta.maintainers
else maintainer == pkg.meta.maintainers
)
else false
)
)
(name: pkg: pkg)
pkgs;
packageByName = name:
let
package = pkgs.lib.attrByPath (pkgs.lib.splitString "." name) null pkgs;
in
if package == null then
builtins.throw "Package with an attribute name `${name}` does not exists."
else if ! builtins.hasAttr "updateScript" package then
builtins.throw "Package with an attribute name `${name}` does have an `passthru.updateScript` defined."
else
package;
packages =
if package != null then
[ (packageByName package) ]
else if maintainer != null then
packagesWithUpdateScriptAndMaintainer maintainer
else
builtins.throw "No arguments provided.\n\n${helpText}";
helpText = ''
Please run:
% nix-shell maintainers/scripts/update.nix --argstr maintainer garbas
to run all update scripts for all packages that lists \`garbas\` as a maintainer
and have \`updateScript\` defined, or:
% nix-shell maintainers/scripts/update.nix --argstr package garbas
to run update script for specific package.
'';
runUpdateScript = package: ''
echo -ne " - ${package.name}: UPDATING ..."\\r
${package.updateScript} &> ${(builtins.parseDrvName package.name).name}.log
CODE=$?
if [ "$CODE" != "0" ]; then
echo " - ${package.name}: ERROR "
echo ""
echo "--- SHOWING ERROR LOG FOR ${package.name} ----------------------"
echo ""
cat ${(builtins.parseDrvName package.name).name}.log
echo ""
echo "--- SHOWING ERROR LOG FOR ${package.name} ----------------------"
exit $CODE
else
rm ${(builtins.parseDrvName package.name).name}.log
fi
echo " - ${package.name}: DONE. "
'';
in pkgs.stdenv.mkDerivation {
name = "nixpkgs-update-script";
buildCommand = ''
echo ""
echo "----------------------------------------------------------------"
echo ""
echo "Not possible to update packages using \`nix-build\`"
echo ""
echo "${helpText}"
echo "----------------------------------------------------------------"
exit 1
'';
shellHook = ''
echo ""
echo "Going to be running update for following packages:"
echo "${builtins.concatStringsSep "\n" (map (x: " - ${x.name}") packages)}"
echo ""
read -n1 -r -p "Press space to continue..." confirm
if [ "$confirm" = "" ]; then
echo ""
echo "Running update for:"
${builtins.concatStringsSep "\n" (map runUpdateScript packages)}
echo ""
echo "Packages updated!"
exit 0
else
echo "Aborting!"
exit 1
fi
'';
}

View file

@ -47,4 +47,12 @@ where <literal>eth0</literal> should be replaced with the desired
external interface. Note that <literal>ve-+</literal> is a wildcard
that matches all container interfaces.</para>
<para>If you are using Network Manager, you need to explicitly prevent
it from managing container interfaces:
<programlisting>
networking.networkmanager.unmanaged = [ "interface-name:ve-*" ];
</programlisting>
</para>
</section>

View file

@ -129,7 +129,7 @@ default; run <literal>nix-env -i nix-repl</literal> to get it. A
typical use:
<screen>
$ nix-repl '&lt;nixos>'
$ nix-repl '&lt;nixpkgs/nixos>'
nix-repl> config.networking.hostName
"mandark"

View file

@ -18,7 +18,6 @@ NixOS.</para>
<xi:include href="building-nixos.xml" />
<xi:include href="nixos-tests.xml" />
<xi:include href="testing-installer.xml" />
<xi:include href="reviewing-contributions.xml" />
<xi:include href="releases.xml" />
</part>

View file

@ -101,6 +101,11 @@ channel by running
which is equivalent to the more verbose <literal>nix-channel --update
nixos; nixos-rebuild switch</literal>.</para>
<note><para>Channels are set per user. This means that running <literal>
nix-channel --add</literal> as a non root user (or without sudo) will not
affect configuration in <literal>/etc/nixos/configuration.nix</literal>
</para></note>
<warning><para>It is generally safe to switch back and forth between
channels. The only exception is that a newer NixOS may also have a
newer Nix version, which may involve an upgrade of Nixs database

View file

@ -68,7 +68,7 @@ desired operation. It must be one of the following:
<listitem>
<para>Build and activate the new configuration, and make it the
boot default. That is, the configuration is added to the GRUB
boot menu as the default meny entry, so that subsequent reboots
boot menu as the default menu entry, so that subsequent reboots
will boot the system into the new configuration. Previous
configurations activated with <command>nixos-rebuild
switch</command> or <command>nixos-rebuild boot</command> remain

View file

@ -68,6 +68,26 @@ following incompatible changes:</para>
that may be in /etc.
</para>
</listitem>
<listitem>
<para>
Parsoid service now uses YAML configuration format.
<literal>service.parsoid.interwikis</literal> is now called
<literal>service.parsoid.wikis</literal> and is a list of either API URLs
or attribute sets as specified in parsoid's documentation.
</para>
</listitem>
<listitem>
<para>
<literal>Ntpd</literal> was replaced by
<literal>systemd-timesyncd</literal> as the default service to synchronize
system time with a remote NTP server. The old behavior can be restored by
setting <literal>services.ntp.enable</literal> to <literal>true</literal>.
Upstream time servers for all NTP implementations are now configured using
<literal>networking.timeServers</literal>.
</para>
</listitem>
</itemizedlist>

View file

@ -9,6 +9,8 @@ rec {
inherit pkgs;
qemu = pkgs.qemu_test;
# Build a virtual network from an attribute set `{ machine1 =
# config1; ... machineN = configN; }', where `machineX' is the
@ -27,6 +29,7 @@ rec {
[ ../modules/virtualisation/qemu-vm.nix
../modules/testing/test-instrumentation.nix # !!! should only get added for automated test runs
{ key = "no-manual"; services.nixosManual.enable = false; }
{ key = "qemu"; system.build.qemu = qemu; }
] ++ optional minimal ../modules/testing/minimal-kernel.nix;
extraArgs = { inherit nodes; };
};

View file

@ -27,6 +27,10 @@
, name ? "nixos-disk-image"
# This prevents errors while checking nix-store validity, see
# https://github.com/NixOS/nix/issues/1134
, fixValidity ? true
, format ? "raw"
}:
@ -61,9 +65,6 @@ pkgs.vmTools.runInLinuxVM (
# Create an empty filesystem and mount it.
mkfs.${fsType} -L nixos $rootDisk
${optionalString (fsType == "ext4") ''
tune2fs -c 0 -i 0 $rootDisk
''}
mkdir /mnt
mount $rootDisk /mnt
@ -71,9 +72,11 @@ pkgs.vmTools.runInLinuxVM (
printRegistration=1 perl ${pkgs.pathsFromGraph} /tmp/xchg/closure | \
${config.nix.package.out}/bin/nix-store --load-db --option build-users-group ""
# Add missing size/hash fields to the database. FIXME:
# exportReferencesGraph should provide these directly.
${config.nix.package.out}/bin/nix-store --verify --check-contents --option build-users-group ""
${if fixValidity then ''
# Add missing size/hash fields to the database. FIXME:
# exportReferencesGraph should provide these directly.
${config.nix.package.out}/bin/nix-store --verify --check-contents --option build-users-group ""
'' else ""}
# In case the bootloader tries to write to /dev/sda…
ln -s vda /dev/xvda
@ -97,7 +100,9 @@ pkgs.vmTools.runInLinuxVM (
umount /mnt
# Do a fsck to make sure resize2fs works.
fsck.${fsType} -f -y $rootDisk
# Make sure resize2fs works
${optionalString (fsType == "ext4") ''
tune2fs -c 0 -i 0 $rootDisk
''}
''
)

View file

@ -25,6 +25,6 @@ stdenv.mkDerivation {
# Generate the squashfs image.
mksquashfs nix-path-registration $storePaths $out \
-keep-as-directory -all-root
-keep-as-directory -all-root -comp xz
'';
}

View file

@ -52,9 +52,10 @@ $extraCommands
mkdir -p $out/tarball
tar cvJf $out/tarball/$fileName.tar.xz * $extraArgs
rm env-vars
tar --sort=name --mtime='1970-01-01' -cvJf $out/tarball/$fileName.tar.xz * $extraArgs
mkdir -p $out/nix-support
echo $system > $out/nix-support/system
echo "file system-tarball $out/tarball/$fileName.tar.xz" > $out/nix-support/hydra-build-products

View file

@ -504,6 +504,31 @@ sub screenshot {
}, { image => $name } );
}
# Get the text of TTY<n>
sub getTTYText {
my ($self, $tty) = @_;
my ($status, $out) = $self->execute("fold -w 80 /dev/vcs${tty}");
return $out;
}
# Wait until TTY<n>'s text matches a particular regular expression
sub waitUntilTTYMatches {
my ($self, $tty, $regexp) = @_;
$self->nest("waiting for $regexp to appear on tty $tty", sub {
retry sub {
return 1 if $self->getTTYText($tty) =~ /$regexp/;
}
});
}
# Debugging: Dump the contents of the TTY<n>
sub dumpTTYContents {
my ($self, $tty) = @_;
$self->execute("fold -w 80 /dev/vcs${tty} | systemd-cat");
}
# Take a screenshot and return the result as text using optical character
# recognition.

View file

@ -29,7 +29,7 @@ rec {
cp ${./test-driver/Logger.pm} $libDir/Logger.pm
wrapProgram $out/bin/nixos-test-driver \
--prefix PATH : "${lib.makeBinPath [ qemu_kvm vde2 netpbm coreutils ]}" \
--prefix PATH : "${lib.makeBinPath [ qemu vde2 netpbm coreutils ]}" \
--prefix PERL5LIB : "${with perlPackages; lib.makePerlPath [ TermReadLineGnu XMLWriter IOTty FileSlurp ]}:$out/lib/perl5/site_perl"
'';
};
@ -93,7 +93,7 @@ rec {
vms = map (m: m.config.system.build.vm) (lib.attrValues nodes);
ocrProg = tesseract.override { enableLanguages = [ "eng" ]; };
ocrProg = tesseract;
# Generate onvenience wrappers for running the test driver
# interactively with the specified network, and for starting the

View file

@ -1,4 +1,8 @@
#! /bin/sh -e
#!/usr/bin/env nix-shell
#! nix-shell -i bash -p qemu ec2_ami_tools jq ec2_api_tools awscli
# To start with do: nix-shell -p awscli --run "aws configure"
set -o pipefail
#set -x
@ -15,7 +19,7 @@ rm -f ec2-amis.nix
types="hvm pv"
stores="ebs s3"
regions="eu-west-1 eu-central-1 us-east-1 us-west-1 us-west-2 ap-southeast-1 ap-southeast-2 ap-northeast-1 ap-northeast-2 sa-east-1 ap-south-1"
regions="eu-west-1 eu-central-1 us-east-1 us-east-2 us-west-1 us-west-2 ap-southeast-1 ap-southeast-2 ap-northeast-1 ap-northeast-2 sa-east-1 ap-south-1"
for type in $types; do
link=$stateDir/$type
@ -57,7 +61,7 @@ for type in $types; do
ami=$(aws ec2 copy-image \
--region "$region" \
--source-region "$prevRegion" --source-image-id "$prevAmi" \
--name "$name" --description "$description" | json -q .ImageId)
--name "$name" --description "$description" | jq -r '.ImageId')
if [ "$ami" = null ]; then break; fi
else

View file

@ -301,9 +301,7 @@ in
};
style = mkOption {
type = types.str // {
check = flip elem ["none" "slight" "medium" "full"];
};
type = types.enum ["none" "slight" "medium" "full"];
default = "full";
description = ''
TrueType hinting style, one of <literal>none</literal>,
@ -329,9 +327,7 @@ in
default = "rgb";
type = types.enum ["rgb" "bgr" "vrgb" "vbgr" "none"];
description = ''
Subpixel order, one of <literal>none</literal>,
<literal>rgb</literal>, <literal>bgr</literal>,
<literal>vrgb</literal>, or <literal>vbgr</literal>.
Subpixel order.
'';
};
@ -339,9 +335,7 @@ in
default = "default";
type = types.enum ["none" "default" "light" "legacy"];
description = ''
FreeType LCD filter, one of <literal>none</literal>,
<literal>default</literal>, <literal>light</literal>, or
<literal>legacy</literal>.
FreeType LCD filter.
'';
};

View file

@ -7,11 +7,11 @@ with lib;
gnu = mkOption {
type = types.bool;
default = false;
description =
'' When enabled, GNU software is chosen by default whenever a there is
a choice between GNU and non-GNU software (e.g., GNU lsh
vs. OpenSSH).
'';
description = ''
When enabled, GNU software is chosen by default whenever a there is
a choice between GNU and non-GNU software (e.g., GNU lsh
vs. OpenSSH).
'';
};
};

View file

@ -44,8 +44,9 @@ in
consolePackages = mkOption {
type = types.listOf types.package;
default = with pkgs.kbdKeymaps; [ dvp neo ];
defaultText = ''with pkgs.kbdKeymaps; [ dvp neo ]'';
description = ''
List of additional packages that provide console fonts, keymaps and
List of additional packages that provide console fonts, keymaps and
other resources.
'';
};

View file

@ -84,6 +84,18 @@ in
'';
};
networking.timeServers = mkOption {
default = [
"0.nixos.pool.ntp.org"
"1.nixos.pool.ntp.org"
"2.nixos.pool.ntp.org"
"3.nixos.pool.ntp.org"
];
description = ''
The set of NTP servers from which to synchronise.
'';
};
networking.proxy = {
default = lib.mkOption {

View file

@ -10,9 +10,21 @@ let
inherit (config.services.samba) nsswins;
ldap = (config.users.ldap.enable && config.users.ldap.nsswitch);
in
hostArray = [ "files" "mymachines" ]
++ optionals nssmdns [ "mdns_minimal [!UNAVAIL=return]" ]
++ optionals nsswins [ "wins" ]
++ [ "dns" ]
++ optionals nssmdns [ "mdns" ]
++ ["myhostname" ];
{
passwdArray = [ "files" ]
++ optionals ldap [ "ldap" ]
++ [ "mymachines" ];
shadowArray = [ "files" ]
++ optionals ldap [ "ldap" ];
in {
options = {
# NSS modules. Hacky!
@ -39,24 +51,26 @@ in
# Name Service Switch configuration file. Required by the C
# library. !!! Factor out the mdns stuff. The avahi module
# should define an option used by this module.
environment.etc."nsswitch.conf".text =
''
passwd: files ${optionalString ldap "ldap"}
group: files ${optionalString ldap "ldap"}
shadow: files ${optionalString ldap "ldap"}
hosts: files ${optionalString nssmdns "mdns_minimal [NOTFOUND=return]"} dns ${optionalString nssmdns "mdns"} ${optionalString nsswins "wins"} myhostname mymachines
networks: files dns
ethers: files
services: files
protocols: files
'';
environment.etc."nsswitch.conf".text = ''
passwd: ${concatStringsSep " " passwdArray}
group: ${concatStringsSep " " passwdArray}
shadow: ${concatStringsSep " " shadowArray}
hosts: ${concatStringsSep " " hostArray}
networks: files
ethers: files
services: files
protocols: files
rpc: files
'';
# Systemd provides nss-myhostname to ensure that our hostname
# always resolves to a valid IP address. It returns all locally
# configured IP addresses, or ::1 and 127.0.0.2 as
# fallbacks. Systemd also provides nss-mymachines to return IP
# addresses of local containers.
system.nssModules = [ config.systemd.package ];
system.nssModules = [ config.systemd.package.out ];
};
}

View file

@ -118,6 +118,7 @@ in
"/share/terminfo"
"/share/themes"
"/share/vim-plugins"
"/share/vulkan"
];
system.path = pkgs.buildEnv {

View file

@ -135,6 +135,12 @@ in
environment.sessionVariables.LD_LIBRARY_PATH =
[ "/run/opengl-driver/lib" "/run/opengl-driver-32/lib" ];
environment.extraInit = ''
export XDG_DATA_DIRS=$XDG_DATA_DIRS:/run/opengl-driver/share
'' + optionalString cfg.driSupport32Bit ''
export XDG_DATA_DIRS=$XDG_DATA_DIRS:/run/opengl-driver-32/share
'';
hardware.opengl.package = mkDefault (makePackage pkgs);
hardware.opengl.package32 = mkDefault (makePackage pkgs_i686);

View file

@ -45,10 +45,8 @@ in
"amd/amdapfxx.blb".source = package + "/etc/amd/amdapfxx.blb";
"gbm/gbm.conf".source = package + "/etc/gbm/gbm.conf";
"OpenCL/vendors/amdocl64.icd".source = package + "/etc/OpenCL/vendors/amdocl64.icd";
"vulkan/icd.d/amd_icd64.json".source = package + "/etc/vulkan/icd.d/amd_icd64.json";
} // optionalAttrs opengl.driSupport32Bit {
"OpenCL/vendors/amdocl32.icd".source = package32 + "/etc/OpenCL/vendors/amdocl32.icd";
"vulkan/icd.d/amd_icd32.json".source = package32 + "/etc/vulkan/icd.d/amd_icd32.json";
};
};

View file

@ -13,6 +13,8 @@ let
useDisplayDevice = cfg.connectDisplay;
};
useBbswitch = cfg.pmMethod == "bbswitch";
primus = pkgs.primus.override {
inherit useNvidia;
};
@ -22,58 +24,69 @@ in
{
options = {
hardware.bumblebee.enable = mkOption {
default = false;
type = types.bool;
description = ''
Enable the bumblebee daemon to manage Optimus hybrid video cards.
This should power off secondary GPU until its use is requested
by running an application with optirun.
hardware.bumblebee = {
Only nvidia driver is supported so far.
'';
};
hardware.bumblebee.group = mkOption {
default = "wheel";
example = "video";
type = types.str;
description = ''Group for bumblebee socket'';
};
enable = mkOption {
default = false;
type = types.bool;
description = ''
Enable the bumblebee daemon to manage Optimus hybrid video cards.
This should power off secondary GPU until its use is requested
by running an application with optirun.
'';
};
hardware.bumblebee.connectDisplay = mkOption {
default = false;
type = types.bool;
description = ''
Set to true if you intend to connect your discrete card to a
monitor. This option will set up your Nvidia card for EDID
discovery and to turn on the monitor signal.
group = mkOption {
default = "wheel";
example = "video";
type = types.str;
description = ''Group for bumblebee socket'';
};
Only nvidia driver is supported so far.
'';
};
connectDisplay = mkOption {
default = false;
type = types.bool;
description = ''
Set to true if you intend to connect your discrete card to a
monitor. This option will set up your Nvidia card for EDID
discovery and to turn on the monitor signal.
Only nvidia driver is supported so far.
'';
};
driver = mkOption {
default = "nvidia";
type = types.enum [ "nvidia" "nouveau" ];
description = ''
Set driver used by bumblebeed. Supported are nouveau and nvidia.
'';
};
pmMethod = mkOption {
default = "auto";
type = types.enum [ "auto" "bbswitch" "nouveau" "switcheroo" "none" ];
description = ''
Set preferred power management method for unused card.
'';
};
hardware.bumblebee.driver = mkOption {
default = "nvidia";
type = types.enum [ "nvidia" "nouveau" ];
description = ''
Set driver used by bumblebeed. Supported are nouveau and nvidia.
'';
};
};
config = mkIf config.hardware.bumblebee.enable {
boot.blacklistedKernelModules = [ "nouveau" "nvidia" ];
boot.kernelModules = [ "bbswitch" ];
boot.extraModulePackages = [ kernel.bbswitch ] ++ optional useNvidia kernel.nvidia_x11;
config = mkIf cfg.enable {
boot.blacklistedKernelModules = [ "nvidia-drm" "nvidia" "nouveau" ];
boot.kernelModules = optional useBbswitch [ "bbswitch" ];
boot.extraModulePackages = optional useBbswitch kernel.bbswitch ++ optional useNvidia kernel.nvidia_x11;
environment.systemPackages = [ bumblebee primus ];
systemd.services.bumblebeed = {
description = "Bumblebee Hybrid Graphics Switcher";
wantedBy = [ "display-manager.service" ];
path = [ kernel.bbswitch bumblebee ];
wantedBy = [ "multi-user.target" ];
before = [ "display-manager.service" ];
serviceConfig = {
ExecStart = "${bumblebee}/bin/bumblebeed --use-syslog -g ${cfg.group} --driver ${cfg.driver}";
ExecStart = "${bumblebee}/bin/bumblebeed --use-syslog -g ${cfg.group} --driver ${cfg.driver} --pm-method ${cfg.pmMethod}";
};
};
};

View file

@ -1,20 +1,41 @@
# This module defines a NixOS installation CD that contains X11 and
# KDE 4.
# KDE 5.
{ config, lib, pkgs, ... }:
with lib;
{
imports = [ ./installation-cd-base.nix ../../profiles/graphical.nix ];
imports = [ ./installation-cd-base.nix ];
# Provide wicd for easy wireless configuration.
#networking.wicd.enable = true;
services.xserver = {
enable = true;
# Automatically login as root.
displayManager.slim = {
enable = true;
defaultUser = "root";
autoLogin = true;
};
desktopManager.kde5 = {
enable = true;
enableQt4Support = false;
};
# Enable touchpad support for many laptops.
synaptics.enable = true;
};
environment.systemPackages =
[ # Include gparted for partitioning disks.
[ pkgs.glxinfo
# Include gparted for partitioning disks.
pkgs.gparted
# Firefox for reading the manual.
pkgs.firefox
# Include some editors.
pkgs.vim
pkgs.bvi # binary editor
@ -32,80 +53,21 @@ with lib;
# Don't start the X server by default.
services.xserver.autorun = mkForce false;
# Auto-login as root.
services.xserver.displayManager.kdm.extraConfig =
''
[X-*-Core]
AllowRootLogin=true
AutoLoginEnable=true
AutoLoginUser=root
AutoLoginPass=""
'';
# Custom kde-workspace adding some icons on the desktop
system.activationScripts.installerDesktop = let
openManual = pkgs.writeScript "nixos-manual.sh" ''
#!${pkgs.stdenv.shell}
cd ${config.system.build.manual.manual}/share/doc/nixos/
konqueror ./index.html
'';
desktopFile = pkgs.writeText "nixos-manual.desktop" ''
[Desktop Entry]
Version=1.0
Type=Application
Name=NixOS Manual
Exec=${openManual}
Icon=konqueror
Exec=firefox ${config.system.build.manual.manual}/share/doc/nixos/index.html
Icon=text-html
'';
in ''
mkdir -p /root/Desktop
ln -sfT ${desktopFile} /root/Desktop/nixos-manual.desktop
ln -sfT ${pkgs.kde4.konsole}/share/applications/kde4/konsole.desktop /root/Desktop/konsole.desktop
ln -sfT ${pkgs.kde5.konsole}/share/applications/org.kde.konsole.desktop /root/Desktop/org.kde.konsole.desktop
ln -sfT ${pkgs.gparted}/share/applications/gparted.desktop /root/Desktop/gparted.desktop
'';
services.xserver.desktopManager.kde4.kdeWorkspacePackage = let
pkg = pkgs.kde4.kde_workspace;
plasmaInit = pkgs.writeText "00-defaultLayout.js" ''
loadTemplate("org.kde.plasma-desktop.defaultPanel")
for (var i = 0; i < screenCount; ++i) {
var desktop = new Activity
desktop.name = i18n("Desktop")
desktop.screen = i
desktop.wallpaperPlugin = 'image'
desktop.wallpaperMode = 'SingleImage'
var folderview = desktop.addWidget("folderview");
folderview.writeConfig("url", "desktop:/");
//Create more panels for other screens
if (i > 0){
var panel = new Panel
panel.screen = i
panel.location = 'bottom'
panel.height = screenGeometry(i).height > 1024 ? 35 : 27
var tasks = panel.addWidget("tasks")
tasks.writeConfig("showOnlyCurrentScreen", true);
}
}
'';
in
pkgs.runCommand pkg.name
{ inherit (pkg) meta; }
''
mkdir -p $out
cp -prf ${pkg}/* $out/
chmod a+w $out/share/apps/plasma-desktop/init
cp -f ${plasmaInit} $out/share/apps/plasma-desktop/init/00-defaultLayout.js
'';
# Disable large stuff that's not very useful on the installation CD.
services.xserver.desktopManager.kde4.enablePIM = false;
}

View file

@ -26,11 +26,6 @@ with lib;
# here and it causes a cyclic dependency.
boot.loader.grub.enable = false;
boot.initrd.postMountCommands = ''
mkdir -p /mnt-root/nix/store
mount -t squashfs /nix-store.squashfs /mnt-root/nix/store
'';
# !!! Hack - attributes expected by other modules.
system.boot.loader.kernelFile = "bzImage";
environment.systemPackages = [ pkgs.grub2 pkgs.grub2_efi pkgs.syslinux ];
@ -42,13 +37,34 @@ with lib;
options = [ "mode=0755" ];
};
# In stage 1, mount a tmpfs on top of /nix/store (the squashfs
# image) to make this a live CD.
fileSystems."/nix/.ro-store" =
{ fsType = "squashfs";
device = "../nix-store.squashfs";
options = [ "loop" ];
neededForBoot = true;
};
fileSystems."/nix/.rw-store" =
{ fsType = "tmpfs";
options = [ "mode=0755" ];
neededForBoot = true;
};
fileSystems."/nix/store" =
{ fsType = "unionfs-fuse";
device = "unionfs";
options = [ "allow_other" "cow" "nonempty" "chroot=/mnt-root" "max_files=32768" "hide_meta_files" "dirs=/nix/.rw-store=rw:/nix/.ro-store=ro" ];
};
boot.initrd.availableKernelModules = [ "squashfs" ];
boot.initrd.kernelModules = [ "loop" ];
# Closures to be copied to the Nix store, namely the init
# script and the top-level system configuration directory.
netboot.storeContents =
netboot.storeContents =
[ config.system.build.toplevel ];
# Create the squashfs image that contains the Nix store.

View file

@ -126,9 +126,9 @@ targetHostCmd() {
copyToTarget() {
if ! [ "$targetHost" = "$buildHost" ]; then
if [ -z "$targetHost" ]; then
NIX_SSHOPTS=$SSH_OPTS nix-copy-closure --from "$buildHost" "$1"
NIX_SSHOPTS=$SSHOPTS nix-copy-closure --from "$buildHost" "$1"
elif [ -z "$buildHost" ]; then
NIX_SSHOPTS=$SSH_OPTS nix-copy-closure --to "$targetHost" "$1"
NIX_SSHOPTS=$SSHOPTS nix-copy-closure --to "$targetHost" "$1"
else
buildHostCmd nix-copy-closure --to "$targetHost" "$1"
fi
@ -169,7 +169,7 @@ nixBuild() {
local drv="$(nix-instantiate "${instArgs[@]}" "${extraBuildFlags[@]}")"
if [ -a "$drv" ]; then
NIX_SSHOPTS=$SSH_OPTS nix-copy-closure --to "$buildHost" "$drv"
NIX_SSHOPTS=$SSHOPTS nix-copy-closure --to "$buildHost" "$drv"
buildHostCmd nix-store -r "$drv" "${buildArgs[@]}"
else
echo "nix-instantiate failed"

View file

@ -18,5 +18,5 @@ with lib;
# Add some more video drivers to give X11 a shot at working in
# VMware and QEMU.
services.xserver.videoDrivers = mkOverride 40 [ "virtualbox" "vmware" "cirrus" "vesa" ];
services.xserver.videoDrivers = mkOverride 40 [ "virtualbox" "vmware" "cirrus" "vesa" "modesetting" ];
}

View file

@ -58,7 +58,6 @@
#utmp = 29; # unused
ddclient = 30;
davfs2 = 31;
privoxy = 32;
#disnix = 33; # unused
osgi = 34;
tor = 35;
@ -84,7 +83,7 @@
spamd = 56;
#networkmanager = 57; # unused
nslcd = 58;
#scanner = 59; # unused
scanner = 59;
nginx = 60;
chrony = 61;
#systemd-journal = 62; # unused
@ -212,7 +211,6 @@
lambdabot = 191;
asterisk = 192;
plex = 193;
bird = 195;
grafana = 196;
skydns = 197;
ripple-rest = 198;
@ -279,6 +277,10 @@
hound = 259;
leaps = 260;
ipfs = 261;
stanchion = 262;
riak-cs = 263;
infinoted = 264;
keystone = 265;
# When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399!
@ -319,7 +321,6 @@
utmp = 29;
#ddclient = 30; # unused
davfs2 = 31;
privoxy = 32;
disnix = 33;
osgi = 34;
tor = 35;
@ -469,7 +470,6 @@
#asterisk = 192; # unused
plex = 193;
sabnzbd = 194;
bird = 195;
#grafana = 196; #unused
#skydns = 197; #unused
#ripple-rest = 198; #unused
@ -528,6 +528,10 @@
hound = 259;
leaps = 260;
ipfs = 261;
stanchion = 262;
riak-cs = 263;
infinoted = 264;
keystone = 265;
# When adding a gid, make sure it doesn't match an existing
# uid. Users and groups with the same name should have equal

View file

@ -133,9 +133,11 @@
./services/cluster/fleet.nix
./services/cluster/kubernetes.nix
./services/cluster/panamax.nix
./services/computing/boinc/client.nix
./services/computing/torque/server.nix
./services/computing/torque/mom.nix
./services/computing/slurm/slurm.nix
./services/continuous-integration/buildbot/master.nix
./services/continuous-integration/buildkite-agent.nix
./services/continuous-integration/hydra/default.nix
./services/continuous-integration/gitlab-runner.nix
@ -159,6 +161,8 @@
./services/databases/postgresql.nix
./services/databases/redis.nix
./services/databases/riak.nix
./services/databases/riak-cs.nix
./services/databases/stanchion.nix
./services/databases/virtuoso.nix
./services/desktops/accountsservice.nix
./services/desktops/geoclue2.nix
@ -178,6 +182,7 @@
./services/desktops/telepathy.nix
./services/development/hoogle.nix
./services/editors/emacs.nix
./services/editors/infinoted.nix
./services/games/factorio.nix
./services/games/ghost-one.nix
./services/games/minecraft-server.nix
@ -346,6 +351,7 @@
./services/networking/connman.nix
./services/networking/consul.nix
./services/networking/coturn.nix
./services/networking/dante.nix
./services/networking/ddclient.nix
./services/networking/dhcpcd.nix
./services/networking/dhcpd.nix
@ -354,6 +360,7 @@
./services/networking/dnsmasq.nix
./services/networking/ejabberd.nix
./services/networking/fan.nix
./services/networking/fakeroute.nix
./services/networking/ferm.nix
./services/networking/firefox/sync-server.nix
./services/networking/firewall.nix
@ -478,6 +485,7 @@
./services/security/torify.nix
./services/security/tor.nix
./services/security/torsocks.nix
./services/system/cgmanager.nix
./services/system/cloud-init.nix
./services/system/dbus.nix
./services/system/kerberos.nix
@ -519,6 +527,7 @@
./services/x11/colord.nix
./services/x11/compton.nix
./services/x11/unclutter.nix
./services/x11/unclutter-xfixes.nix
./services/x11/desktop-managers/default.nix
./services/x11/display-managers/auto.nix
./services/x11/display-managers/default.nix
@ -539,7 +548,6 @@
./services/x11/window-managers/fluxbox.nix
./services/x11/window-managers/icewm.nix
./services/x11/window-managers/bspwm.nix
./services/x11/window-managers/bspwm-unstable.nix
./services/x11/window-managers/metacity.nix
./services/x11/window-managers/none.nix
./services/x11/window-managers/twm.nix
@ -612,6 +620,7 @@
./virtualisation/docker.nix
./virtualisation/libvirtd.nix
./virtualisation/lxc.nix
./virtualisation/lxcfs.nix
./virtualisation/lxd.nix
./virtualisation/amazon-options.nix
./virtualisation/openvswitch.nix
@ -622,4 +631,5 @@
./virtualisation/vmware-guest.nix
./virtualisation/xen-dom0.nix
./virtualisation/xe-guest-utilities.nix
./virtualisation/openstack/keystone.nix
]

View file

@ -7,7 +7,7 @@
# Include some utilities that are useful for installing or repairing
# the system.
environment.systemPackages = [
pkgs.w3m # needed for the manual anyway
pkgs.w3m-nox # needed for the manual anyway
pkgs.testdisk # useful for repairing boot problems
pkgs.mssys # for writing Microsoft boot sectors / MBRs
pkgs.efibootmgr
@ -42,8 +42,6 @@
# Some compression/archiver tools.
pkgs.unzip
pkgs.zip
pkgs.dar # disk archiver
pkgs.cabextract
];
# Include support for various filesystems.

View file

@ -14,4 +14,6 @@ with lib;
programs.man.enable = mkDefault false;
programs.info.enable = mkDefault false;
sound.enable = mkDefault false;
}

View file

@ -34,6 +34,7 @@ in
package = mkOption {
default = pkgs.jdk;
defaultText = "pkgs.jdk";
description = ''
Java package to install. Typical values are pkgs.jdk or pkgs.jre.
'';

View file

@ -9,14 +9,14 @@ let
in
{
options.programs.mosh = {
enable = mkOption {
description = ''
Whether to enable mosh. Note, this will open ports in your firewall!
'';
default = false;
example = true;
type = lib.types.bool;
};
enable = mkOption {
description = ''
Whether to enable mosh. Note, this will open ports in your firewall!
'';
default = false;
example = true;
type = lib.types.bool;
};
};
config = mkIf cfg.enable {

View file

@ -165,7 +165,7 @@ in
config = {
programs.ssh.setXAuthLocation =
mkDefault (config.services.xserver.enable || config.programs.ssh.forwardX11);
mkDefault (config.services.xserver.enable || config.programs.ssh.forwardX11 || config.services.openssh.forwardX11);
assertions =
[ { assertion = cfg.forwardX11 -> cfg.setXAuthLocation;

View file

@ -30,6 +30,8 @@ with lib;
(mkRenamedOptionModule [ "services" "gitlab" "stateDir" ] [ "services" "gitlab" "statePath" ])
(mkRemovedOptionModule [ "services" "gitlab" "satelliteDir" ] "")
(mkRenamedOptionModule [ "services" "clamav" "updater" "config" ] [ "services" "clamav" "updater" "extraConfig" ])
# Old Grub-related options.
(mkRenamedOptionModule [ "boot" "initrd" "extraKernelModules" ] [ "boot" "initrd" "kernelModules" ])
(mkRenamedOptionModule [ "boot" "extraKernelParams" ] [ "boot" "kernelParams" ])
@ -142,6 +144,12 @@ with lib;
# murmur
(mkRenamedOptionModule [ "services" "murmur" "welcome" ] [ "services" "murmur" "welcometext" ])
# parsoid
(mkRemovedOptionModule [ "services" "parsoid" "interwikis" ] [ "services" "parsoid" "wikis" ])
# tarsnap
(mkRemovedOptionModule [ "services" "tarsnap" "cachedir" ] "Use services.tarsnap.archives.<name>.cachedir")
# Options that are obsolete and have no replacement.
(mkRemovedOptionModule [ "boot" "initrd" "luks" "enable" ] "")
(mkRemovedOptionModule [ "programs" "bash" "enable" ] "")

View file

@ -178,6 +178,7 @@ in
path = [ pkgs.simp_le ];
preStart = ''
mkdir -p '${cfg.directory}'
chown '${data.user}:${data.group}' '${cfg.directory}'
if [ ! -d '${cpath}' ]; then
mkdir '${cpath}'
fi

View file

@ -75,7 +75,7 @@ options for the <literal>security.acme</literal> module.</para>
<programlisting>
security.acme.certs."foo.example.com" = {
webroot = "/var/www/challenges";
webroot = config.security.acme.directory + "/acme-challenge";
email = "foo@example.com";
user = "nginx";
group = "nginx";

View file

@ -73,7 +73,7 @@ in
};
failmode = mkOption {
type = types.str;
type = types.enum [ "safe" "enum" ];
default = "safe";
description = ''
On service or configuration errors that prevent Duo
@ -115,7 +115,7 @@ in
};
prompts = mkOption {
type = types.int;
type = types.enum [ 1 2 3 ];
default = 3;
description = ''
If a user fails to authenticate with a second factor, Duo
@ -181,13 +181,7 @@ in
config = mkIf (cfg.ssh.enable || cfg.pam.enable) {
assertions =
[ { assertion = cfg.failmode == "safe" || cfg.failmode == "secure";
message = "Invalid value for failmode (must be safe or secure).";
}
{ assertion = cfg.prompts == 1 || cfg.prompts == 2 || cfg.prompts == 3;
message = "Invalid value for prompts (must be 1, 2, or 3).";
}
{ assertion = !cfg.pam.enable;
[ { assertion = !cfg.pam.enable;
message = "PAM support is currently not implemented.";
}
];

View file

@ -6,14 +6,6 @@ let
cfg = config.security.grsecurity;
grsecLockPath = "/proc/sys/kernel/grsecurity/grsec_lock";
# Ascertain whether ZFS is required for booting the system; grsecurity is
# currently incompatible with ZFS, rendering the system unbootable.
zfsNeededForBoot = filter
(fs: (fs.neededForBoot
|| elem fs.mountPoint [ "/" "/nix" "/nix/store" "/var" "/var/log" "/var/lib" "/etc" ])
&& fs.fsType == "zfs")
config.system.build.fileSystems != [];
# Ascertain whether NixOS container support is required
containerSupportRequired =
config.boot.enableContainers && config.containers != {};
@ -27,7 +19,14 @@ in
options.security.grsecurity = {
enable = mkEnableOption "grsecurity/PaX";
enable = mkOption {
type = types.bool;
example = true;
default = false;
description = ''
Enable grsecurity/PaX.
'';
};
lockTunables = mkOption {
type = types.bool;
@ -58,19 +57,10 @@ in
config = mkIf cfg.enable {
# Allow the user to select a different package set, subject to the stated
# required kernel config
boot.kernelPackages = mkDefault pkgs.linuxPackages_grsec_nixos;
boot.kernelPackages = mkForce pkgs.linuxPackages_grsec_nixos;
boot.kernelParams = optional cfg.disableEfiRuntimeServices "noefi";
system.requiredKernelConfig = with config.lib.kernelConfig;
[ (isEnabled "GRKERNSEC")
(isEnabled "PAX")
(isYes "GRKERNSEC_SYSCTL")
(isYes "GRKERNSEC_SYSCTL_DISTRO")
(isNo "GRKERNSEC_NO_RBAC")
];
boot.kernelParams = [ "grsec_sysfs_restrict=0" ]
++ optional cfg.disableEfiRuntimeServices "noefi";
nixpkgs.config.grsecurity = true;
@ -120,26 +110,63 @@ in
boot.kernel.sysctl = {
# Read-only under grsecurity
"kernel.kptr_restrict" = mkForce null;
# All grsec tunables default to off, those not enabled below are
# *disabled*. We use mkDefault to allow expert users to override
# our choices, but use mkForce where tunables would outright
# conflict with other settings.
# Enable all chroot restrictions by default (overwritten as
# necessary below)
"kernel.grsecurity.chroot_caps" = mkDefault 1;
"kernel.grsecurity.chroot_deny_bad_rename" = mkDefault 1;
"kernel.grsecurity.chroot_deny_chmod" = mkDefault 1;
"kernel.grsecurity.chroot_deny_chroot" = mkDefault 1;
"kernel.grsecurity.chroot_deny_fchdir" = mkDefault 1;
"kernel.grsecurity.chroot_deny_mknod" = mkDefault 1;
"kernel.grsecurity.chroot_deny_mount" = mkDefault 1;
"kernel.grsecurity.chroot_deny_pivot" = mkDefault 1;
"kernel.grsecurity.chroot_deny_shmat" = mkDefault 1;
"kernel.grsecurity.chroot_deny_sysctl" = mkDefault 1;
"kernel.grsecurity.chroot_deny_unix" = mkDefault 1;
"kernel.grsecurity.chroot_enforce_chdir" = mkDefault 1;
"kernel.grsecurity.chroot_findtask" = mkDefault 1;
"kernel.grsecurity.chroot_restrict_nice" = mkDefault 1;
# Enable various grsec protections
"kernel.grsecurity.consistent_setxid" = mkDefault 1;
"kernel.grsecurity.deter_bruteforce" = mkDefault 1;
"kernel.grsecurity.fifo_restrictions" = mkDefault 1;
"kernel.grsecurity.harden_ipc" = mkDefault 1;
"kernel.grsecurity.harden_ptrace" = mkDefault 1;
"kernel.grsecurity.harden_tty" = mkDefault 1;
"kernel.grsecurity.ip_blackhole" = mkDefault 1;
"kernel.grsecurity.linking_restrictions" = mkDefault 1;
"kernel.grsecurity.ptrace_readexec" = mkDefault 1;
# Enable auditing
"kernel.grsecurity.audit_ptrace" = mkDefault 1;
"kernel.grsecurity.forkfail_logging" = mkDefault 1;
"kernel.grsecurity.rwxmap_logging" = mkDefault 1;
"kernel.grsecurity.signal_logging" = mkDefault 1;
"kernel.grsecurity.timechange_logging" = mkDefault 1;
} // optionalAttrs config.nix.useSandbox {
# chroot(2) restrictions that conflict with sandboxed Nix builds
"kernel.grsecurity.chroot_caps" = mkForce 0;
"kernel.grsecurity.chroot_deny_chmod" = mkForce 0;
"kernel.grsecurity.chroot_deny_chroot" = mkForce 0;
"kernel.grsecurity.chroot_deny_mount" = mkForce 0;
"kernel.grsecurity.chroot_deny_pivot" = mkForce 0;
"kernel.grsecurity.chroot_deny_chmod" = mkForce 0;
} // optionalAttrs containerSupportRequired {
# chroot(2) restrictions that conflict with NixOS lightweight containers
"kernel.grsecurity.chroot_caps" = mkForce 0;
"kernel.grsecurity.chroot_deny_chmod" = mkForce 0;
"kernel.grsecurity.chroot_deny_mount" = mkForce 0;
"kernel.grsecurity.chroot_restrict_nice" = mkForce 0;
"kernel.grsecurity.chroot_caps" = mkForce 0;
# Disable privileged IO by default, unless X is enabled
} // optionalAttrs (!config.services.xserver.enable) {
"kernel.grsecurity.disable_priv_io" = mkDefault 1;
};
assertions = [
{ assertion = !zfsNeededForBoot;
message = "grsecurity is currently incompatible with ZFS";
}
];
};
}

View file

@ -51,6 +51,13 @@
# nixos-rebuild boot
# reboot
</programlisting>
<note><para>
Enabling the grsecurity module overrides
<option>boot.kernelPackages</option>, to reduce the risk of
misconfiguration. <xref linkend="sec-grsec-custom-kernel" />
describes how to use a custom kernel package set.
</para></note>
For most users, further configuration should be unnecessary. All users
are encouraged to look over <xref linkend="sec-grsec-security" /> before
using the system, however. If you experience problems, please refer to
@ -144,15 +151,8 @@
a TCP simultaneous OPEN on that port before the connection is actually
established.</para></listitem>
<listitem><para><filename class="directory">/sys</filename> hardening:
breaks systemd.</para></listitem>
<listitem><para>Trusted path execution: a desirable feature, but
requires some more work to operate smoothly on NixOS.</para></listitem>
<listitem><para>Module hardening: would break user initiated module
loading. Might enable this at some point, depending on the potential
breakage.</para></listitem>
</itemizedlist>
</para></listitem>
@ -205,31 +205,42 @@
</para>
<para>
To use a custom kernel with upstream's recommended settings for server
deployments:
To build a custom kernel using upstream's recommended settings for server
deployments, while still using the NixOS module:
<programlisting>
boot.kernelPackages =
let
kernel = pkgs.linux_grsec_nixos.override {
extraConfig = ''
GRKERNSEC_CONFIG_AUTO y
GRKERNSEC_CONFIG_SERVER y
GRKERNSEC_CONFIG_SECURITY y
'';
nixpkgs.config.packageOverrides = super: {
linux_grsec_nixos = super.linux_grsec_nixos.override {
extraConfig = ''
GRKERNSEC_CONFIG_AUTO y
GRKERNSEC_CONFIG_SERVER y
GRKERNSEC_CONFIG_SECURITY y
'';
};
self = pkgs.linuxPackagesFor kernel self;
in self;
}
</programlisting>
</para>
<para>
The wikibook provides an exhaustive listing of
<link xlink:href="https://en.wikibooks.org/wiki/Grsecurity/Appendix/Grsecurity_and_PaX_Configuration_Options">kernel configuration options</link>.
</para>
<para>
The NixOS module makes several assumptions about the kernel and so may be
incompatible with your customised kernel. Most of these assumptions are
encoded as assertions &#x2014; mismatches should ideally result in a build
failure. Currently, the only way to work around incompatibilities is to
eschew the NixOS module and do all configuration yourself.
The NixOS module makes several assumptions about the kernel and so
may be incompatible with your customised kernel. Currently, the only way
to work around incompatibilities is to eschew the NixOS module.
If not using the NixOS module, a custom grsecurity package set can
be specified inline instead, as in
<programlisting>
boot.kernelPackages =
let
kernel = pkgs.linux_grsec_nixos.override {
extraConfig = /* as above */;
};
self = pkgs.linuxPackagesFor kernel self;
in self;
</programlisting>
</para>
</sect1>
@ -277,6 +288,10 @@
<option>security.grsecurity.disableEfiRuntimeServices</option> to override
this behavior.</para></listitem>
<listitem><para>User initiated autoloading of modules (e.g., when
using fuse or loop devices) is disallowed; either load requisite modules
as root or add them to<option>boot.kernelModules</option>.</para></listitem>
<listitem><para>Virtualization: KVM is the preferred virtualization
solution. Xen, Virtualbox, and VMWare are
<emphasis>unsupported</emphasis> and most likely require a custom kernel.
@ -310,6 +325,19 @@
</programlisting>
</para></listitem>
<listitem><para>
The gitlab service (<xref linkend="module-services-gitlab" />)
requires a variant of the <literal>ruby</literal> interpreter
built without `mprotect()` hardening, as in
<programlisting>
services.gitlab.packages.gitlab = pkgs.gitlab.override {
ruby = pkgs.ruby.overrideAttrs (attrs: {
postFixup = "paxmark m $out/bin/ruby";
});
};
</programlisting>
</para></listitem>
</itemizedlist>
</sect1>
@ -332,13 +360,19 @@
<listitem><para>
<literal>pax_sanitize_slab={off|fast|full}</literal>: control kernel
slab object sanitization
slab object sanitization. Defaults to <literal>fast</literal>
</para></listitem>
<listitem><para>
<literal>pax_size_overflow_report_only</literal>: log size overflow
violations but leave the violating task running
</para></listitem>
<listitem><para>
<literal>grsec_sysfs_restrict=[0|1]</literal>: toggle sysfs
restrictions. The NixOS module sets this to <literal>0</literal>
for systemd compatibility
</para></listitem>
</itemizedlist>
</para>

View file

@ -19,7 +19,9 @@ with lib;
config = mkIf config.security.hideProcessInformation {
users.groups.proc.gid = config.ids.gids.proc;
users.groups.proc.members = [ "polkituser" ];
boot.specialFileSystems."/proc".options = [ "hidepid=2" "gid=${toString config.ids.gids.proc}" ];
systemd.services.systemd-logind.serviceConfig.SupplementaryGroups = [ "proc" ];
};
}

View file

@ -49,7 +49,7 @@ with lib;
ensureDir ${crashplan.vardir}/backupArchives 700
ensureDir ${crashplan.vardir}/log 777
cp -avn ${crashplan}/conf.template/* ${crashplan.vardir}/conf
for x in app.asar bin EULA.txt install.vars lang lib libjniwrap64.so libjniwrap.so libjtux64.so libjtux.so libmd564.so libmd5.so share skin upgrade; do
for x in app.asar bin install.vars lang lib libc42archive64.so libc52archive.so libjniwrap64.so libjniwrap.so libjtux64.so libjtux.so libleveldb64.so libleveldb.so libmd564.so libmd5.so share skin upgrade; do
rm -f ${crashplan.vardir}/$x;
ln -sf ${crashplan}/$x ${crashplan.vardir}/$x;
done

View file

@ -1,25 +1,25 @@
{ config, lib, pkgs, ... }:
{ config, lib, pkgs, utils, ... }:
with lib;
let
cfg = config.services.tarsnap;
gcfg = config.services.tarsnap;
configFile = name: cfg: ''
cachedir ${config.services.tarsnap.cachedir}/${name}
keyfile ${cfg.keyfile}
keyfile ${cfg.keyfile}
${optionalString (cfg.cachedir != null) "cachedir ${cfg.cachedir}"}
${optionalString cfg.nodump "nodump"}
${optionalString cfg.printStats "print-stats"}
${optionalString cfg.printStats "humanize-numbers"}
${optionalString (cfg.checkpointBytes != null) ("checkpoint-bytes "+cfg.checkpointBytes)}
${optionalString cfg.aggressiveNetworking "aggressive-networking"}
${concatStringsSep "\n" (map (v: "exclude "+v) cfg.excludes)}
${concatStringsSep "\n" (map (v: "include "+v) cfg.includes)}
${concatStringsSep "\n" (map (v: "exclude ${v}") cfg.excludes)}
${concatStringsSep "\n" (map (v: "include ${v}") cfg.includes)}
${optionalString cfg.lowmem "lowmem"}
${optionalString cfg.verylowmem "verylowmem"}
${optionalString (cfg.maxbw != null) ("maxbw "+toString cfg.maxbw)}
${optionalString (cfg.maxbwRateUp != null) ("maxbw-rate-up "+toString cfg.maxbwRateUp)}
${optionalString (cfg.maxbwRateDown != null) ("maxbw-rate-down "+toString cfg.maxbwRateDown)}
${optionalString (cfg.maxbw != null) "maxbw ${toString cfg.maxbw}"}
${optionalString (cfg.maxbwRateUp != null) "maxbw-rate-up ${toString cfg.maxbwRateUp}"}
${optionalString (cfg.maxbwRateDown != null) "maxbw-rate-down ${toString cfg.maxbwRateDown}"}
'';
in
{
@ -60,34 +60,13 @@ in
'';
};
cachedir = mkOption {
type = types.nullOr types.path;
default = "/var/cache/tarsnap";
description = ''
The cache allows tarsnap to identify previously stored data
blocks, reducing archival time and bandwidth usage.
Should the cache become desynchronized or corrupted, tarsnap
will refuse to run until you manually rebuild the cache with
<command>tarsnap --fsck</command>.
Note that each individual archive (specified below) has its own cache
directory specified under <literal>cachedir</literal>; this is because
tarsnap locks the cache during backups, meaning multiple services
archives cannot be backed up concurrently or overlap with a shared
cache.
Set to <literal>null</literal> to disable caching.
'';
};
archives = mkOption {
type = types.attrsOf (types.submodule (
type = types.attrsOf (types.submodule ({ config, ... }:
{
options = {
keyfile = mkOption {
type = types.str;
default = config.services.tarsnap.keyfile;
default = gcfg.keyfile;
description = ''
Set a specific keyfile for this archive. This defaults to
<literal>"/root/tarsnap.key"</literal> if left unspecified.
@ -107,6 +86,21 @@ in
'';
};
cachedir = mkOption {
type = types.nullOr types.path;
default = "/var/cache/tarsnap/${utils.escapeSystemdPath config.keyfile}";
description = ''
The cache allows tarsnap to identify previously stored data
blocks, reducing archival time and bandwidth usage.
Should the cache become desynchronized or corrupted, tarsnap
will refuse to run until you manually rebuild the cache with
<command>tarsnap --fsck</command>.
Set to <literal>null</literal> to disable caching.
'';
};
nodump = mkOption {
type = types.bool;
default = true;
@ -249,7 +243,7 @@ in
};
gamedata =
{ directories = [ "/var/lib/minecraft "];
{ directories = [ "/var/lib/minecraft" ];
period = "*:30";
};
}
@ -262,8 +256,8 @@ in
archive names are suffixed by a 1 second resolution timestamp.
For each member of the set is created a timer which triggers the
instanced <literal>tarsnap@</literal> service unit. You may use
<command>systemctl start tarsnap@archive-name</command> to
instanced <literal>tarsnap-archive-name</literal> service unit. You may use
<command>systemctl start tarsnap-archive-name</command> to
manually trigger creation of <literal>archive-name</literal> at
any time.
'';
@ -271,63 +265,73 @@ in
};
};
config = mkIf cfg.enable {
config = mkIf gcfg.enable {
assertions =
(mapAttrsToList (name: cfg:
{ assertion = cfg.directories != [];
message = "Must specify paths for tarsnap to back up";
}) cfg.archives) ++
}) gcfg.archives) ++
(mapAttrsToList (name: cfg:
{ assertion = !(cfg.lowmem && cfg.verylowmem);
message = "You cannot set both lowmem and verylowmem";
}) cfg.archives);
}) gcfg.archives);
systemd.services."tarsnap@" = {
description = "Tarsnap archive '%i'";
requires = [ "network-online.target" ];
after = [ "network-online.target" ];
systemd.services =
mapAttrs' (name: cfg: nameValuePair "tarsnap-${name}" {
description = "Tarsnap archive '${name}'";
requires = [ "network-online.target" ];
after = [ "network-online.target" ];
path = [ pkgs.iputils pkgs.tarsnap pkgs.coreutils ];
path = [ pkgs.iputils pkgs.tarsnap pkgs.utillinux ];
# In order for the persistent tarsnap timer to work reliably, we have to
# make sure that the tarsnap server is reachable after systemd starts up
# the service - therefore we sleep in a loop until we can ping the
# endpoint.
preStart = "while ! ping -q -c 1 v1-0-0-server.tarsnap.com &> /dev/null; do sleep 3; done";
scriptArgs = "%i";
script = ''
mkdir -p -m 0755 ${dirOf cfg.cachedir}
mkdir -p -m 0700 ${cfg.cachedir}
chown root:root ${cfg.cachedir}
chmod 0700 ${cfg.cachedir}
mkdir -p -m 0700 ${cfg.cachedir}/$1
DIRS=`cat /etc/tarsnap/$1.dirs`
exec tarsnap --configfile /etc/tarsnap/$1.conf -c -f $1-$(date +"%Y%m%d%H%M%S") $DIRS
'';
# In order for the persistent tarsnap timer to work reliably, we have to
# make sure that the tarsnap server is reachable after systemd starts up
# the service - therefore we sleep in a loop until we can ping the
# endpoint.
preStart = ''
while ! ping -q -c 1 v1-0-0-server.tarsnap.com &> /dev/null; do sleep 3; done
'';
serviceConfig = {
IOSchedulingClass = "idle";
NoNewPrivileges = "true";
CapabilityBoundingSet = "CAP_DAC_READ_SEARCH";
PermissionsStartOnly = "true";
};
};
script =
let run = ''tarsnap --configfile "/etc/tarsnap/${name}.conf" -c -f "${name}-$(date +"%Y%m%d%H%M%S")" ${concatStringsSep " " cfg.directories}'';
in if (cfg.cachedir != null) then ''
mkdir -p ${cfg.cachedir}
chmod 0700 ${cfg.cachedir}
( flock 9
if [ ! -e ${cfg.cachedir}/firstrun ]; then
( flock 10
flock -u 9
tarsnap --configfile "/etc/tarsnap/${name}.conf" --fsck
flock 9
) 10>${cfg.cachedir}/firstrun
fi
) 9>${cfg.cachedir}/lockf
exec flock ${cfg.cachedir}/firstrun ${run}
'' else "exec ${run}";
serviceConfig = {
Type = "oneshot";
IOSchedulingClass = "idle";
NoNewPrivileges = "true";
CapabilityBoundingSet = [ "CAP_DAC_READ_SEARCH" ];
PermissionsStartOnly = "true";
};
}) gcfg.archives;
# Note: the timer must be Persistent=true, so that systemd will start it even
# if e.g. your laptop was asleep while the latest interval occurred.
systemd.timers = mapAttrs' (name: cfg: nameValuePair "tarsnap@${name}"
systemd.timers = mapAttrs' (name: cfg: nameValuePair "tarsnap-${name}"
{ timerConfig.OnCalendar = cfg.period;
timerConfig.Persistent = "true";
wantedBy = [ "timers.target" ];
}) cfg.archives;
}) gcfg.archives;
environment.etc =
(mapAttrs' (name: cfg: nameValuePair "tarsnap/${name}.conf"
mapAttrs' (name: cfg: nameValuePair "tarsnap/${name}.conf"
{ text = configFile name cfg;
}) cfg.archives) //
(mapAttrs' (name: cfg: nameValuePair "tarsnap/${name}.dirs"
{ text = concatStringsSep " " cfg.directories;
}) cfg.archives);
}) gcfg.archives;
environment.systemPackages = [ pkgs.tarsnap ];
};

View file

@ -28,7 +28,7 @@ in {
etcdServers = mkOption {
type = types.listOf types.str;
default = [ "http://127.0.0.1:4001" ];
default = [ "http://127.0.0.1:2379" ];
description = ''
Fleet list of etcd endpoints to use.
'';

View file

@ -5,28 +5,62 @@ with lib;
let
cfg = config.services.kubernetes;
skipAttrs = attrs: map (filterAttrs (k: v: k != "enable"))
(filter (v: !(hasAttr "enable" v) || v.enable) attrs);
infraContainer = pkgs.dockerTools.buildImage {
name = "pause";
tag = "latest";
contents = cfg.package.pause;
config.Cmd = "/bin/pause";
};
kubeconfig = pkgs.writeText "kubeconfig" (builtins.toJSON {
apiVersion = "v1";
kind = "Config";
clusters = [{
name = "local";
cluster.certificate-authority = cfg.kubeconfig.caFile;
cluster.server = cfg.kubeconfig.server;
}];
users = [{
name = "kubelet";
user = {
client-certificate = cfg.kubeconfig.certFile;
client-key = cfg.kubeconfig.keyFile;
};
}];
contexts = [{
context = {
cluster = "local";
user = "kubelet";
};
current-context = "kubelet-context";
}];
});
policyFile = pkgs.writeText "kube-policy"
concatStringsSep "\n" (map (builtins.toJSON cfg.apiserver.authorizationPolicy));
cniConfig = pkgs.buildEnv {
name = "kubernetes-cni-config";
paths = imap (i: entry:
pkgs.writeTextDir "${10+i}-${entry.type}.conf" (builtins.toJSON entry)
) cfg.kubelet.cni.config;
};
manifests = pkgs.buildEnv {
name = "kubernetes-manifests";
paths = mapAttrsToList (name: manifest:
pkgs.writeTextDir "${name}.json" (builtins.toJSON manifest)
) cfg.kubelet.manifests;
};
in {
###### interface
options.services.kubernetes = {
package = mkOption {
description = "Kubernetes package to use.";
type = types.package;
};
verbose = mkOption {
description = "Kubernetes enable verbose mode for debugging";
default = false;
type = types.bool;
};
etcdServers = mkOption {
description = "Kubernetes list of etcd servers to watch.";
default = [ "127.0.0.1:4001" ];
type = types.listOf types.str;
};
roles = mkOption {
description = ''
Kubernetes role that this machine should take.
@ -38,18 +72,76 @@ in {
type = types.listOf (types.enum ["master" "node"]);
};
package = mkOption {
description = "Kubernetes package to use.";
type = types.package;
default = pkgs.kubernetes;
};
verbose = mkOption {
description = "Kubernetes enable verbose mode for debugging";
default = false;
type = types.bool;
};
etcd = {
servers = mkOption {
description = "List of etcd servers. By default etcd is started, except if this option is changed.";
default = ["http://127.0.0.1:2379"];
type = types.listOf types.str;
};
keyFile = mkOption {
description = "Etcd key file";
default = null;
type = types.nullOr types.path;
};
certFile = mkOption {
description = "Etcd cert file";
default = null;
type = types.nullOr types.path;
};
caFile = mkOption {
description = "Etcd ca file";
default = null;
type = types.nullOr types.path;
};
};
kubeconfig = {
server = mkOption {
description = "Kubernetes apiserver server address";
default = "http://${cfg.apiserver.address}:${toString cfg.apiserver.port}";
type = types.str;
};
caFile = mkOption {
description = "Certificate authrority file to use to connect to kuberentes apiserver";
type = types.nullOr types.path;
default = null;
};
certFile = mkOption {
description = "Client certificate file to use to connect to kubernetes";
type = types.nullOr types.path;
default = null;
};
keyFile = mkOption {
description = "Client key file to use to connect to kubernetes";
type = types.nullOr types.path;
default = null;
};
};
dataDir = mkOption {
description = "Kubernetes root directory for managing kubelet files.";
default = "/var/lib/kubernetes";
type = types.path;
};
dockerCfg = mkOption {
description = "Kubernetes contents of dockercfg file.";
default = "";
type = types.lines;
};
apiserver = {
enable = mkOption {
description = "Whether to enable kubernetes apiserver.";
@ -72,6 +164,16 @@ in {
type = types.str;
};
advertiseAddress = mkOption {
description = ''
Kubernetes apiserver IP address on which to advertise the apiserver
to members of the cluster. This address must be reachable by the rest
of the cluster.
'';
default = null;
type = types.nullOr types.str;
};
port = mkOption {
description = "Kubernetes apiserver listening port.";
default = 8080;
@ -80,41 +182,36 @@ in {
securePort = mkOption {
description = "Kubernetes apiserver secure port.";
default = 6443;
default = 443;
type = types.int;
};
tlsCertFile = mkOption {
description = "Kubernetes apiserver certificate file.";
default = "";
type = types.str;
default = null;
type = types.nullOr types.path;
};
tlsPrivateKeyFile = mkOption {
tlsKeyFile = mkOption {
description = "Kubernetes apiserver private key file.";
default = "";
type = types.str;
default = null;
type = types.nullOr types.path;
};
clientCaFile = mkOption {
description = "Kubernetes apiserver CA file for client auth.";
default = "";
type = types.str;
default = null;
type = types.nullOr types.path;
};
tokenAuth = mkOption {
description = ''
Kubernetes apiserver token authentication file. See
<link xlink:href="http://kubernetes.io/v1.0/docs/admin/authentication.html"/>
<link xlink:href="http://kubernetes.io/docs/admin/authentication.html"/>
'';
default = {};
example = literalExample ''
{
alice = "abc123";
bob = "xyz987";
}
'';
type = types.attrsOf types.str;
default = null;
example = ''token,user,uid,"group1,group2,group3"'';
type = types.nullOr types.lines;
};
authorizationMode = mkOption {
@ -148,13 +245,13 @@ in {
allowPrivileged = mkOption {
description = "Whether to allow privileged containers on kubernetes.";
default = false;
default = true;
type = types.bool;
};
portalNet = mkOption {
description = "Kubernetes CIDR notation IP range from which to assign portal IPs";
default = "10.10.10.10/16";
default = "10.10.10.10/24";
type = types.str;
};
@ -171,9 +268,9 @@ in {
admissionControl = mkOption {
description = ''
Kubernetes admission control plugins to use. See
<link xlink:href="http://kubernetes.io/v1.0/docs/admin/admission-controllers.html"/>
<link xlink:href="http://kubernetes.io/docs/admin/admission-controllers/"/>
'';
default = ["AlwaysAdmit"];
default = ["NamespaceLifecycle" "LimitRanger" "ServiceAccount" "ResourceQuota"];
example = [
"NamespaceLifecycle" "NamespaceExists" "LimitRanger"
"SecurityContextDeny" "ServiceAccount" "ResourceQuota"
@ -181,15 +278,40 @@ in {
type = types.listOf types.str;
};
serviceAccountKey = mkOption {
serviceAccountKeyFile = mkOption {
description = ''
Kubernetes apiserver PEM-encoded x509 RSA private or public key file,
used to verify ServiceAccount tokens.
used to verify ServiceAccount tokens. By default tls private key file
is used.
'';
default = null;
type = types.nullOr types.path;
};
kubeletClientCaFile = mkOption {
description = "Path to a cert file for connecting to kubelet";
default = null;
type = types.nullOr types.path;
};
kubeletClientCertFile = mkOption {
description = "Client certificate to use for connections to kubelet";
default = null;
type = types.nullOr types.path;
};
kubeletClientKeyFile = mkOption {
description = "Key to use for connections to kubelet";
default = null;
type = types.nullOr types.path;
};
kubeletHttps = mkOption {
description = "Whether to use https for connections to kubelet";
default = true;
type = types.bool;
};
extraOpts = mkOption {
description = "Kubernetes apiserver extra command line options.";
default = "";
@ -216,10 +338,10 @@ in {
type = types.int;
};
master = mkOption {
description = "Kubernetes apiserver address";
default = "${cfg.apiserver.address}:${toString cfg.apiserver.port}";
type = types.str;
leaderElect = mkOption {
description = "Whether to start leader election before executing main loop";
type = types.bool;
default = false;
};
extraOpts = mkOption {
@ -248,13 +370,13 @@ in {
type = types.int;
};
master = mkOption {
description = "Kubernetes apiserver address";
default = "${cfg.apiserver.address}:${toString cfg.apiserver.port}";
type = types.str;
leaderElect = mkOption {
description = "Whether to start leader election before executing main loop";
type = types.bool;
default = false;
};
serviceAccountPrivateKey = mkOption {
serviceAccountKeyFile = mkOption {
description = ''
Kubernetes controller manager PEM-encoded private RSA key file used to
sign service account tokens
@ -272,6 +394,12 @@ in {
type = types.nullOr types.path;
};
clusterCidr = mkOption {
description = "Kubernetes controller manager CIDR Range for Pods in cluster";
default = "10.10.0.0/16";
type = types.str;
};
extraOpts = mkOption {
description = "Kubernetes controller manager extra command line options.";
default = "";
@ -292,6 +420,12 @@ in {
type = types.bool;
};
registerSchedulable = mkOption {
description = "Register the node as schedulable. No-op if register-node is false.";
default = true;
type = types.bool;
};
address = mkOption {
description = "Kubernetes kubelet info server listening address.";
default = "0.0.0.0";
@ -304,6 +438,18 @@ in {
type = types.int;
};
tlsCertFile = mkOption {
description = "File containing x509 Certificate for HTTPS.";
default = null;
type = types.nullOr types.path;
};
tlsKeyFile = mkOption {
description = "File containing x509 private key matching tlsCertFile.";
default = null;
type = types.nullOr types.path;
};
healthz = {
bind = mkOption {
description = "Kubernetes kubelet healthz listening address.";
@ -326,19 +472,10 @@ in {
allowPrivileged = mkOption {
description = "Whether to allow kubernetes containers to request privileged mode.";
default = false;
default = true;
type = types.bool;
};
apiServers = mkOption {
description = ''
Kubernetes kubelet list of Kubernetes API servers for publishing events,
and reading pods and services.
'';
default = ["${cfg.apiserver.address}:${toString cfg.apiserver.port}"];
type = types.listOf types.str;
};
cadvisorPort = mkOption {
description = "Kubernetes kubelet local cadvisor port.";
default = 4194;
@ -347,16 +484,62 @@ in {
clusterDns = mkOption {
description = "Use alternative dns.";
default = "";
default = "10.10.0.1";
type = types.str;
};
clusterDomain = mkOption {
description = "Use alternative domain.";
default = "kubernetes.io";
default = "cluster.local";
type = types.str;
};
networkPlugin = mkOption {
description = "Network plugin to use by kubernetes";
type = types.nullOr (types.enum ["cni" "kubenet"]);
default = "kubenet";
};
cni = {
packages = mkOption {
description = "List of network plugin packages to install";
type = types.listOf types.package;
default = [];
};
config = mkOption {
description = "Kubernetes CNI configuration";
type = types.listOf types.attrs;
default = [];
example = literalExample ''
[{
"cniVersion": "0.2.0",
"name": "mynet",
"type": "bridge",
"bridge": "cni0",
"isGateway": true,
"ipMasq": true,
"ipam": {
"type": "host-local",
"subnet": "10.22.0.0/16",
"routes": [
{ "dst": "0.0.0.0/0" }
]
}
} {
"cniVersion": "0.2.0",
"type": "loopback"
}]
'';
};
};
manifests = mkOption {
description = "List of manifests to bootstrap with kubelet";
type = types.attrsOf types.attrs;
default = {};
};
extraOpts = mkOption {
description = "Kubernetes kubelet extra command line options.";
default = "";
@ -377,12 +560,6 @@ in {
type = types.str;
};
master = mkOption {
description = "Kubernetes apiserver address";
default = "${cfg.apiserver.address}:${toString cfg.apiserver.port}";
type = types.str;
};
extraOpts = mkOption {
description = "Kubernetes proxy extra command line options.";
default = "";
@ -390,23 +567,23 @@ in {
};
};
kube2sky = {
enable = mkEnableOption "Whether to enable kube2sky dns service.";
dns = {
enable = mkEnableOption "kubernetes dns service.";
port = mkOption {
description = "Kubernetes dns listening port";
default = 53;
type = types.int;
};
domain = mkOption {
description = "Kuberntes kube2sky domain under which all DNS names will be hosted.";
description = "Kuberntes dns domain under which to create names.";
default = cfg.kubelet.clusterDomain;
type = types.str;
};
master = mkOption {
description = "Kubernetes apiserver address";
default = "${cfg.apiserver.address}:${toString cfg.apiserver.port}";
type = types.str;
};
extraOpts = mkOption {
description = "Kubernetes kube2sky extra command line options.";
description = "Kubernetes dns extra command line options.";
default = "";
type = types.str;
};
@ -416,50 +593,118 @@ in {
###### implementation
config = mkMerge [
(mkIf cfg.kubelet.enable {
systemd.services.kubelet = {
description = "Kubernetes Kubelet Service";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" "docker.service" "kube-apiserver.service" ];
path = with pkgs; [ gitMinimal openssh docker utillinux iproute ethtool thin-provisioning-tools iptables ];
preStart = ''
docker load < ${infraContainer}
rm /opt/cni/bin/* || true
${concatMapStringsSep "\n" (p: "ln -fs ${p.plugins}/* /opt/cni/bin") cfg.kubelet.cni.packages}
'';
serviceConfig = {
ExecStart = ''${cfg.package}/bin/kubelet \
--pod-manifest-path=${manifests} \
--kubeconfig=${kubeconfig} \
--require-kubeconfig \
--address=${cfg.kubelet.address} \
--port=${toString cfg.kubelet.port} \
--register-node=${if cfg.kubelet.registerNode then "true" else "false"} \
--register-schedulable=${if cfg.kubelet.registerSchedulable then "true" else "false"} \
${optionalString (cfg.kubelet.tlsCertFile != null)
"--tls-cert-file=${cfg.kubelet.tlsCertFile}"} \
${optionalString (cfg.kubelet.tlsKeyFile != null)
"--tls-private-key-file=${cfg.kubelet.tlsKeyFile}"} \
--healthz-bind-address=${cfg.kubelet.healthz.bind} \
--healthz-port=${toString cfg.kubelet.healthz.port} \
--hostname-override=${cfg.kubelet.hostname} \
--allow-privileged=${if cfg.kubelet.allowPrivileged then "true" else "false"} \
--root-dir=${cfg.dataDir} \
--cadvisor_port=${toString cfg.kubelet.cadvisorPort} \
${optionalString (cfg.kubelet.clusterDns != "")
"--cluster-dns=${cfg.kubelet.clusterDns}"} \
${optionalString (cfg.kubelet.clusterDomain != "")
"--cluster-domain=${cfg.kubelet.clusterDomain}"} \
--pod-infra-container-image=pause \
${optionalString (cfg.kubelet.networkPlugin != null)
"--network-plugin=${cfg.kubelet.networkPlugin}"} \
--cni-conf-dir=${cniConfig} \
--reconcile-cidr \
--hairpin-mode=hairpin-veth \
${optionalString cfg.verbose "--v=6 --log_flush_frequency=1s"} \
${cfg.kubelet.extraOpts}
'';
WorkingDirectory = cfg.dataDir;
};
};
environment.etc = mapAttrs' (name: manifest:
nameValuePair "kubernetes/manifests/${name}.json" {
text = builtins.toJSON manifest;
mode = "0755";
}
) cfg.kubelet.manifests;
# Allways include cni plugins
services.kubernetes.kubelet.cni.packages = [pkgs.cni];
})
(mkIf cfg.apiserver.enable {
systemd.services.kube-apiserver = {
description = "Kubernetes Api Server";
description = "Kubernetes Kubelet Service";
wantedBy = [ "multi-user.target" ];
requires = ["kubernetes-setup.service"];
after = [ "network.target" "etcd.service" "kubernetes-setup.service" ];
after = [ "network.target" "docker.service" ];
serviceConfig = {
ExecStart = let
authorizationPolicyFile =
pkgs.writeText "kubernetes-policy"
(builtins.toJSON cfg.apiserver.authorizationPolicy);
tokenAuthFile =
pkgs.writeText "kubernetes-auth"
(concatImapStringsSep "\n" (i: v: v + "," + (toString i))
(mapAttrsToList (name: token: token + "," + name) cfg.apiserver.tokenAuth));
in ''${cfg.package}/bin/kube-apiserver \
--etcd-servers=${concatMapStringsSep "," (f: "http://${f}") cfg.etcdServers} \
--insecure-bind-address=${cfg.apiserver.address} \
ExecStart = ''${cfg.package}/bin/kube-apiserver \
--etcd-servers=${concatStringsSep "," cfg.etcd.servers} \
${optionalString (cfg.etcd.caFile != null)
"--etcd-cafile=${cfg.etcd.caFile}"} \
${optionalString (cfg.etcd.certFile != null)
"--etcd-certfile=${cfg.etcd.certFile}"} \
${optionalString (cfg.etcd.keyFile != null)
"--etcd-keyfile=${cfg.etcd.keyFile}"} \
--insecure-port=${toString cfg.apiserver.port} \
--bind-address=${cfg.apiserver.publicAddress} \
--bind-address=0.0.0.0 \
${optionalString (cfg.apiserver.advertiseAddress != null)
"--advertise-address=${cfg.apiserver.advertiseAddress}"} \
--allow-privileged=${if cfg.apiserver.allowPrivileged then "true" else "false"} \
${optionalString (cfg.apiserver.tlsCertFile!="")
${optionalString (cfg.apiserver.tlsCertFile != null)
"--tls-cert-file=${cfg.apiserver.tlsCertFile}"} \
${optionalString (cfg.apiserver.tlsPrivateKeyFile!="")
"--tls-private-key-file=${cfg.apiserver.tlsPrivateKeyFile}"} \
${optionalString (cfg.apiserver.tokenAuth!=[])
"--token-auth-file=${tokenAuthFile}"} \
${optionalString (cfg.apiserver.clientCaFile!="")
${optionalString (cfg.apiserver.tlsKeyFile != null)
"--tls-private-key-file=${cfg.apiserver.tlsKeyFile}"} \
${optionalString (cfg.apiserver.tokenAuth != null)
"--token-auth-file=${cfg.apiserver.tokenAuth}"} \
--kubelet-https=${if cfg.apiserver.kubeletHttps then "true" else "false"} \
${optionalString (cfg.apiserver.kubeletClientCaFile != null)
"--kubelet-certificate-authority=${cfg.apiserver.kubeletClientCaFile}"} \
${optionalString (cfg.apiserver.kubeletClientCertFile != null)
"--kubelet-client-certificate=${cfg.apiserver.kubeletClientCertFile}"} \
${optionalString (cfg.apiserver.kubeletClientKeyFile != null)
"--kubelet-client-key=${cfg.apiserver.kubeletClientKeyFile}"} \
${optionalString (cfg.apiserver.clientCaFile != null)
"--client-ca-file=${cfg.apiserver.clientCaFile}"} \
--authorization-mode=${cfg.apiserver.authorizationMode} \
${optionalString (cfg.apiserver.authorizationMode == "ABAC")
"--authorization-policy-file=${authorizationPolicyFile}"} \
"--authorization-policy-file=${policyFile}"} \
--secure-port=${toString cfg.apiserver.securePort} \
--service-cluster-ip-range=${cfg.apiserver.portalNet} \
${optionalString (cfg.apiserver.runtimeConfig!="")
${optionalString (cfg.apiserver.runtimeConfig != "")
"--runtime-config=${cfg.apiserver.runtimeConfig}"} \
--admission_control=${concatStringsSep "," cfg.apiserver.admissionControl} \
${optionalString (cfg.apiserver.serviceAccountKey!=null)
"--service-account-key-file=${cfg.apiserver.serviceAccountKey}"} \
--logtostderr=true \
${optionalString cfg.verbose "--v=6 --log-flush-frequency=1s"} \
${optionalString (cfg.apiserver.serviceAccountKeyFile!=null)
"--service-account-key-file=${cfg.apiserver.serviceAccountKeyFile}"} \
${optionalString cfg.verbose "--v=6"} \
${optionalString cfg.verbose "--log-flush-frequency=1s"} \
${cfg.apiserver.extraOpts}
'';
WorkingDirectory = cfg.dataDir;
User = "kubernetes";
Group = "kubernetes";
AmbientCapabilities = "cap_net_bind_service";
Restart = "on-failure";
RestartSec = 5;
};
};
})
@ -468,17 +713,20 @@ in {
systemd.services.kube-scheduler = {
description = "Kubernetes Scheduler Service";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" "kubernetes-apiserver.service" ];
after = [ "kube-apiserver.service" ];
serviceConfig = {
ExecStart = ''${cfg.package}/bin/kube-scheduler \
--address=${cfg.scheduler.address} \
--port=${toString cfg.scheduler.port} \
--master=${cfg.scheduler.master} \
--logtostderr=true \
${optionalString cfg.verbose "--v=6 --log-flush-frequency=1s"} \
--leader-elect=${if cfg.scheduler.leaderElect then "true" else "false"} \
--kubeconfig=${kubeconfig} \
${optionalString cfg.verbose "--v=6"} \
${optionalString cfg.verbose "--log-flush-frequency=1s"} \
${cfg.scheduler.extraOpts}
'';
WorkingDirectory = cfg.dataDir;
User = "kubernetes";
Group = "kubernetes";
};
};
})
@ -487,113 +735,94 @@ in {
systemd.services.kube-controller-manager = {
description = "Kubernetes Controller Manager Service";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" "kubernetes-apiserver.service" ];
after = [ "kube-apiserver.service" ];
serviceConfig = {
ExecStart = ''${cfg.package}/bin/kube-controller-manager \
--address=${cfg.controllerManager.address} \
--port=${toString cfg.controllerManager.port} \
--master=${cfg.controllerManager.master} \
${optionalString (cfg.controllerManager.serviceAccountPrivateKey!=null)
"--service-account-private-key-file=${cfg.controllerManager.serviceAccountPrivateKey}"} \
--kubeconfig=${kubeconfig} \
--leader-elect=${if cfg.controllerManager.leaderElect then "true" else "false"} \
${if (cfg.controllerManager.serviceAccountKeyFile!=null)
then "--service-account-private-key-file=${cfg.controllerManager.serviceAccountKeyFile}"
else "--service-account-private-key-file=/var/run/kubernetes/apiserver.key"} \
${optionalString (cfg.controllerManager.rootCaFile!=null)
"--root-ca-file=${cfg.controllerManager.rootCaFile}"} \
--logtostderr=true \
${optionalString cfg.verbose "--v=6 --log-flush-frequency=1s"} \
${optionalString (cfg.controllerManager.clusterCidr!=null)
"--cluster-cidr=${cfg.controllerManager.clusterCidr}"} \
--allocate-node-cidrs=true \
${optionalString cfg.verbose "--v=6"} \
${optionalString cfg.verbose "--log-flush-frequency=1s"} \
${cfg.controllerManager.extraOpts}
'';
WorkingDirectory = cfg.dataDir;
User = "kubernetes";
Group = "kubernetes";
};
};
})
(mkIf cfg.kubelet.enable {
systemd.services.kubelet = {
description = "Kubernetes Kubelet Service";
wantedBy = [ "multi-user.target" ];
requires = ["kubernetes-setup.service"];
after = [ "network.target" "etcd.service" "docker.service" "kubernetes-setup.service" ];
path = [ pkgs.gitMinimal pkgs.openssh ];
script = ''
export PATH="/bin:/sbin:/usr/bin:/usr/sbin:$PATH"
exec ${cfg.package}/bin/kubelet \
--api-servers=${concatMapStringsSep "," (f: "http://${f}") cfg.kubelet.apiServers} \
--register-node=${if cfg.kubelet.registerNode then "true" else "false"} \
--address=${cfg.kubelet.address} \
--port=${toString cfg.kubelet.port} \
--healthz-bind-address=${cfg.kubelet.healthz.bind} \
--healthz-port=${toString cfg.kubelet.healthz.port} \
--hostname-override=${cfg.kubelet.hostname} \
--allow-privileged=${if cfg.kubelet.allowPrivileged then "true" else "false"} \
--root-dir=${cfg.dataDir} \
--cadvisor_port=${toString cfg.kubelet.cadvisorPort} \
${optionalString (cfg.kubelet.clusterDns != "")
''--cluster-dns=${cfg.kubelet.clusterDns}''} \
${optionalString (cfg.kubelet.clusterDomain != "")
''--cluster-domain=${cfg.kubelet.clusterDomain}''} \
--logtostderr=true \
${optionalString cfg.verbose "--v=6 --log_flush_frequency=1s"} \
${cfg.kubelet.extraOpts}
'';
serviceConfig.WorkingDirectory = cfg.dataDir;
};
})
(mkIf cfg.proxy.enable {
systemd.services.kube-proxy = {
description = "Kubernetes Proxy Service";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" "etcd.service" ];
after = [ "kube-apiserver.service" ];
path = [pkgs.iptables];
serviceConfig = {
ExecStart = ''${cfg.package}/bin/kube-proxy \
--master=${cfg.proxy.master} \
--kubeconfig=${kubeconfig} \
--bind-address=${cfg.proxy.address} \
--logtostderr=true \
${optionalString cfg.verbose "--v=6 --log-flush-frequency=1s"} \
${cfg.proxy.extraOpts}
${optionalString cfg.verbose "--v=6"} \
${optionalString cfg.verbose "--log-flush-frequency=1s"} \
${cfg.controllerManager.extraOpts}
'';
Restart = "always"; # Retry connection
RestartSec = "5s";
WorkingDirectory = cfg.dataDir;
};
};
})
(mkIf cfg.kube2sky.enable {
systemd.services.kube2sky = {
description = "Kubernetes Dns Bridge Service";
(mkIf cfg.dns.enable {
systemd.services.kube-dns = {
description = "Kubernetes Dns Service";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" "skydns.service" "etcd.service" "kubernetes-apiserver.service" ];
after = [ "kube-apiserver.service" ];
serviceConfig = {
ExecStart = ''${cfg.package}/bin/kube2sky \
-etcd-server=http://${head cfg.etcdServers} \
-domain=${cfg.kube2sky.domain} \
-kube_master_url=http://${cfg.kube2sky.master} \
-logtostderr=true \
${optionalString cfg.verbose "--v=6 --log-flush-frequency=1s"} \
${cfg.kube2sky.extraOpts}
ExecStart = ''${cfg.package}/bin/kube-dns \
--kubecfg-file=${kubeconfig} \
--dns-port=${toString cfg.dns.port} \
--domain=${cfg.dns.domain} \
${optionalString cfg.verbose "--v=6"} \
${optionalString cfg.verbose "--log-flush-frequency=1s"} \
${cfg.dns.extraOpts}
'';
WorkingDirectory = cfg.dataDir;
User = "kubernetes";
Group = "kubernetes";
AmbientCapabilities = "cap_net_bind_service";
SendSIGHUP = true;
};
};
})
(mkIf cfg.kubelet.enable {
boot.kernelModules = ["br_netfilter"];
})
(mkIf (any (el: el == "master") cfg.roles) {
virtualisation.docker.enable = mkDefault true;
services.kubernetes.kubelet.enable = mkDefault true;
services.kubernetes.kubelet.allowPrivileged = mkDefault true;
services.kubernetes.apiserver.enable = mkDefault true;
services.kubernetes.scheduler.enable = mkDefault true;
services.kubernetes.controllerManager.enable = mkDefault true;
services.kubernetes.kube2sky.enable = mkDefault true;
services.etcd.enable = mkDefault (cfg.etcd.servers == ["http://127.0.0.1:2379"]);
})
(mkIf (any (el: el == "node") cfg.roles) {
virtualisation.docker.enable = mkDefault true;
virtualisation.docker.logDriver = mkDefault "json-file";
services.kubernetes.kubelet.enable = mkDefault true;
services.kubernetes.proxy.enable = mkDefault true;
})
(mkIf (any (el: el == "node" || el == "master") cfg.roles) {
services.etcd.enable = mkDefault true;
services.skydns.enable = mkDefault true;
services.skydns.domain = mkDefault cfg.kubelet.clusterDomain;
services.kubernetes.dns.enable = mkDefault true;
})
(mkIf (
@ -601,24 +830,16 @@ in {
cfg.scheduler.enable ||
cfg.controllerManager.enable ||
cfg.kubelet.enable ||
cfg.proxy.enable
cfg.proxy.enable ||
cfg.dns.enable
) {
systemd.services.kubernetes-setup = {
description = "Kubernetes setup.";
serviceConfig.Type = "oneshot";
script = ''
mkdir -p /var/run/kubernetes
chown kubernetes /var/lib/kubernetes
rm ${cfg.dataDir}/.dockercfg || true
ln -fs ${pkgs.writeText "kubernetes-dockercfg" cfg.dockerCfg} ${cfg.dataDir}/.dockercfg
'';
};
services.kubernetes.package = mkDefault pkgs.kubernetes;
systemd.tmpfiles.rules = [
"d /opt/cni/bin 0755 root root -"
"d /var/run/kubernetes 0755 kubernetes kubernetes -"
"d /var/lib/kubernetes 0755 kubernetes kubernetes -"
];
environment.systemPackages = [ cfg.package ];
users.extraUsers = singleton {
name = "kubernetes";
uid = config.ids.uids.kubernetes;
@ -630,6 +851,5 @@ in {
};
users.extraGroups.kubernetes.gid = config.ids.gids.kubernetes;
})
];
}

View file

@ -46,7 +46,7 @@ in {
fleetctlEndpoint = mkOption {
type = types.str;
default = "http://127.0.0.1:4001";
default = "http://127.0.0.1:2379";
description = ''
Panamax fleetctl endpoint.
'';

View file

@ -0,0 +1,88 @@
{config, lib, pkgs, ...}:
with lib;
let
cfg = config.services.boinc;
allowRemoteGuiRpcFlag = optionalString cfg.allowRemoteGuiRpc "--allow_remote_gui_rpc";
in
{
options.services.boinc = {
enable = mkOption {
type = types.bool;
default = false;
example = true;
description = ''
Whether to enable the BOINC distributed computing client. If this
option is set to true, the boinc_client daemon will be run as a
background service. The boinccmd command can be used to control the
daemon.
'';
};
package = mkOption {
type = types.package;
default = pkgs.boinc;
defaultText = "pkgs.boinc";
description = ''
Which BOINC package to use.
'';
};
dataDir = mkOption {
type = types.path;
default = "/var/lib/boinc";
description = ''
The directory in which to store BOINC's configuration and data files.
'';
};
allowRemoteGuiRpc = mkOption {
type = types.bool;
default = false;
example = true;
description = ''
If set to true, any remote host can connect to and control this BOINC
client (subject to password authentication). If instead set to false,
only the hosts listed in <varname>dataDir</varname>/remote_hosts.cfg will be allowed to
connect.
See also: <link xlink:href="http://boinc.berkeley.edu/wiki/Controlling_BOINC_remotely#Remote_access"/>
'';
};
};
config = mkIf cfg.enable {
environment.systemPackages = [cfg.package];
users.users.boinc = {
createHome = false;
description = "BOINC Client";
home = cfg.dataDir;
isSystemUser = true;
};
systemd.services.boinc = {
description = "BOINC Client";
after = ["network.target" "local-fs.target"];
wantedBy = ["multi-user.target"];
preStart = ''
mkdir -p ${cfg.dataDir}
chown boinc ${cfg.dataDir}
'';
script = ''
${cfg.package}/bin/boinc_client --dir ${cfg.dataDir} --redirectio ${allowRemoteGuiRpcFlag}
'';
serviceConfig = {
PermissionsStartOnly = true; # preStart must be run as root
User = "boinc";
Nice = 10;
};
};
};
meta = {
maintainers = with lib.maintainers; [kierdavis];
};
}

View file

@ -0,0 +1,250 @@
# NixOS module for Buildbot continous integration server.
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.buildbot-master;
escapeStr = s: escape ["'"] s;
masterCfg = pkgs.writeText "master.cfg" ''
from buildbot.plugins import *
factory = util.BuildFactory()
c = BuildmasterConfig = dict(
workers = [${concatStringsSep "," cfg.workers}],
protocols = { 'pb': {'port': ${cfg.bpPort} } },
title = '${escapeStr cfg.title}',
titleURL = '${escapeStr cfg.titleUrl}',
buildbotURL = '${escapeStr cfg.buildbotUrl}',
db = dict(db_url='${escapeStr cfg.dbUrl}'),
www = dict(port=${toString cfg.port}),
change_source = [ ${concatStringsSep "," cfg.changeSource} ],
schedulers = [ ${concatStringsSep "," cfg.schedulers} ],
builders = [ ${concatStringsSep "," cfg.builders} ],
status = [ ${concatStringsSep "," cfg.status} ],
)
for step in [ ${concatStringsSep "," cfg.factorySteps} ]:
factory.addStep(step)
${cfg.extraConfig}
'';
configFile = if cfg.masterCfg == null then masterCfg else cfg.masterCfg;
in {
options = {
services.buildbot-master = {
factorySteps = mkOption {
type = types.listOf types.str;
description = "Factory Steps";
default = [];
example = [
"steps.Git(repourl='git://github.com/buildbot/pyflakes.git', mode='incremental')"
"steps.ShellCommand(command=['trial', 'pyflakes'])"
];
};
changeSource = mkOption {
type = types.listOf types.str;
description = "List of Change Sources.";
default = [];
example = [
"changes.GitPoller('git://github.com/buildbot/pyflakes.git', workdir='gitpoller-workdir', branch='master', pollinterval=300)"
];
};
enable = mkOption {
type = types.bool;
default = false;
description = "Whether to enable the Buildbot continuous integration server.";
};
extraConfig = mkOption {
type = types.str;
description = "Extra configuration to append to master.cfg";
default = "";
};
masterCfg = mkOption {
type = with types; nullOr path;
description = ''
Optionally pass path to raw master.cfg file.
Other options in this configuration will be ignored.
'';
default = null;
example = literalExample ''
pkgs.writeText "master.cfg" "BuildmasterConfig = c = {}"
'';
};
schedulers = mkOption {
type = types.listOf types.str;
description = "List of Schedulers.";
default = [
"schedulers.SingleBranchScheduler(name='all', change_filter=util.ChangeFilter(branch='master'), treeStableTimer=None, builderNames=['runtests'])"
"schedulers.ForceScheduler(name='force',builderNames=['runtests'])"
];
};
builders = mkOption {
type = types.listOf types.str;
description = "List of Builders.";
default = [
"util.BuilderConfig(name='runtests',workernames=['default-worker'],factory=factory)"
];
};
workers = mkOption {
type = types.listOf types.str;
description = "List of Workers.";
default = [
"worker.Worker('default-worker', 'password')"
];
example = [ "worker.LocalWorker('default-worker')" ];
};
status = mkOption {
default = [];
type = types.listOf types.str;
description = "List of status notification endpoints.";
};
user = mkOption {
default = "buildbot";
type = types.str;
description = "User the buildbot server should execute under.";
};
group = mkOption {
default = "buildbot";
type = types.str;
description = "Primary group of buildbot user.";
};
extraGroups = mkOption {
type = types.listOf types.str;
default = [ "nixbld" ];
description = "List of extra groups that the buildbot user should be a part of.";
};
home = mkOption {
default = "/home/buildbot";
type = types.path;
description = "Buildbot home directory.";
};
buildbotDir = mkOption {
default = "${cfg.home}/master";
type = types.path;
description = "Specifies the Buildbot directory.";
};
bpPort = mkOption {
default = "9989";
type = types.string;
example = "tcp:10000:interface=127.0.0.1";
description = "Port where the master will listen to Buildbot Worker.";
};
listenAddress = mkOption {
default = "0.0.0.0";
type = types.str;
description = "Specifies the bind address on which the buildbot HTTP interface listens.";
};
buildbotUrl = mkOption {
default = "http://localhost:8010/";
type = types.str;
description = "Specifies the Buildbot URL.";
};
title = mkOption {
default = "Buildbot";
type = types.str;
description = "Specifies the Buildbot Title.";
};
titleUrl = mkOption {
default = "Buildbot";
type = types.str;
description = "Specifies the Buildbot TitleURL.";
};
dbUrl = mkOption {
default = "sqlite:///state.sqlite";
type = types.str;
description = "Specifies the database connection string.";
};
port = mkOption {
default = 8010;
type = types.int;
description = "Specifies port number on which the buildbot HTTP interface listens.";
};
package = mkOption {
type = types.package;
default = pkgs.buildbot-ui;
description = ''
Package to use for buildbot.
<literal>buildbot-full</literal> is required in order to use local workers.
'';
example = pkgs.buildbot-full;
};
packages = mkOption {
default = [ ];
example = [ pkgs.git ];
type = types.listOf types.package;
description = "Packages to add to PATH for the buildbot process.";
};
};
};
config = mkIf cfg.enable {
users.extraGroups = optional (cfg.group == "buildbot") {
name = "buildbot";
};
users.extraUsers = optional (cfg.user == "buildbot") {
name = "buildbot";
description = "buildbot user";
isNormalUser = true;
createHome = true;
home = cfg.home;
group = cfg.group;
extraGroups = cfg.extraGroups;
useDefaultShell = true;
};
systemd.services.buildbot-master = {
description = "Buildbot Continuous Integration Server";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
path = cfg.packages;
serviceConfig = {
Type = "forking";
User = cfg.user;
Group = cfg.group;
WorkingDirectory = cfg.home;
ExecStart = "${cfg.package}/bin/buildbot start ${cfg.buildbotDir}";
};
preStart = ''
mkdir -vp ${cfg.buildbotDir}
chown -c ${cfg.user}:${cfg.group} ${cfg.buildbotDir}
ln -sf ${configFile} ${cfg.buildbotDir}/master.cfg
${cfg.package}/bin/buildbot create-master ${cfg.buildbotDir}
'';
postStart = ''
until [[ $(${pkgs.curl}/bin/curl -s --head -w '\n%{http_code}' http://localhost:${toString cfg.port} | tail -n1) =~ ^(200|403)$ ]]; do
sleep 1
done
'';
};
};
}

View file

@ -37,6 +37,7 @@ in {
packages = mkOption {
default = [ pkgs.stdenv pkgs.jre pkgs.git config.programs.ssh.package pkgs.nix ];
defaultText = "[ pkgs.stdenv pkgs.jre pkgs.git config.programs.ssh.package pkgs.nix ]";
type = types.listOf types.package;
description = ''
Packages to add to PATH for the Go.CD agent process.

View file

@ -68,6 +68,7 @@ in {
packages = mkOption {
default = [ pkgs.stdenv pkgs.jre pkgs.git config.programs.ssh.package pkgs.nix ];
defaultText = "[ pkgs.stdenv pkgs.jre pkgs.git config.programs.ssh.package pkgs.nix ]";
type = types.listOf types.package;
description = ''
Packages to add to PATH for the Go.CD server's process.

View file

@ -343,7 +343,7 @@ in
{ wantedBy = [ "multi-user.target" ];
requires = [ "hydra-init.service" ];
after = [ "hydra-init.service" "network.target" ];
path = [ pkgs.nettools ];
path = [ cfg.package pkgs.nettools ];
environment = env;
serviceConfig =
{ ExecStart = "@${cfg.package}/bin/hydra-evaluator hydra-evaluator";

View file

@ -162,7 +162,7 @@ in {
if [ "$(id -u)" = 0 ]; then
chown ${cfg.user}:${cfg.group} `dirname ${cfg.uriFile}`;
(-f ${cfg.uriFile} && chown ${cfg.user}:${cfg.group} ${cfg.uriFile}) || true
(test -f ${cfg.uriFile} && chown ${cfg.user}:${cfg.group} ${cfg.uriFile}) || true
chown ${cfg.user}:${cfg.group} ${cfg.databaseDir}
chown ${cfg.user}:${cfg.group} ${cfg.viewIndexDir}
chown ${cfg.user}:${cfg.group} ${cfg.configFile}

View file

@ -52,9 +52,7 @@ in
package = mkOption {
type = types.package;
default = pkgs.mysql;
defaultText = "pkgs.mysql";
example = literalExample "pkgs.mysql55";
example = literalExample "pkgs.mysql";
description = "
Which MySQL derivation to use.
";

View file

@ -0,0 +1,202 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.riak-cs;
in
{
###### interface
options = {
services.riak-cs = {
enable = mkEnableOption "riak-cs";
package = mkOption {
type = types.package;
default = pkgs.riak-cs;
defaultText = "pkgs.riak-cs";
example = literalExample "pkgs.riak-cs";
description = ''
Riak package to use.
'';
};
nodeName = mkOption {
type = types.str;
default = "riak-cs@127.0.0.1";
description = ''
Name of the Erlang node.
'';
};
anonymousUserCreation = mkOption {
type = types.bool;
default = false;
description = ''
Anonymous user creation.
'';
};
riakHost = mkOption {
type = types.str;
default = "127.0.0.1:8087";
description = ''
Name of riak hosting service.
'';
};
listener = mkOption {
type = types.str;
default = "127.0.0.1:8080";
description = ''
Name of Riak CS listening service.
'';
};
stanchionHost = mkOption {
type = types.str;
default = "127.0.0.1:8085";
description = ''
Name of stanchion hosting service.
'';
};
stanchionSsl = mkOption {
type = types.bool;
default = true;
description = ''
Tell stanchion to use SSL.
'';
};
distributedCookie = mkOption {
type = types.str;
default = "riak";
description = ''
Cookie for distributed node communication. All nodes in the
same cluster should use the same cookie or they will not be able to
communicate.
'';
};
dataDir = mkOption {
type = types.path;
default = "/var/db/riak-cs";
description = ''
Data directory for Riak CS.
'';
};
logDir = mkOption {
type = types.path;
default = "/var/log/riak-cs";
description = ''
Log directory for Riak CS.
'';
};
extraConfig = mkOption {
type = types.lines;
default = "";
description = ''
Additional text to be appended to <filename>riak-cs.conf</filename>.
'';
};
extraAdvancedConfig = mkOption {
type = types.lines;
default = "";
description = ''
Additional text to be appended to <filename>advanced.config</filename>.
'';
};
};
};
###### implementation
config = mkIf cfg.enable {
environment.systemPackages = [ cfg.package ];
environment.etc."riak-cs/riak-cs.conf".text = ''
nodename = ${cfg.nodeName}
distributed_cookie = ${cfg.distributedCookie}
platform_log_dir = ${cfg.logDir}
riak_host = ${cfg.riakHost}
listener = ${cfg.listener}
stanchion_host = ${cfg.stanchionHost}
anonymous_user_creation = ${if cfg.anonymousUserCreation then "on" else "off"}
${cfg.extraConfig}
'';
environment.etc."riak-cs/advanced.config".text = ''
${cfg.extraAdvancedConfig}
'';
users.extraUsers.riak-cs = {
name = "riak-cs";
uid = config.ids.uids.riak-cs;
group = "riak";
description = "Riak CS server user";
};
systemd.services.riak-cs = {
description = "Riak CS Server";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
path = [
pkgs.utillinux # for `logger`
pkgs.bash
];
environment.HOME = "${cfg.dataDir}";
environment.RIAK_CS_DATA_DIR = "${cfg.dataDir}";
environment.RIAK_CS_LOG_DIR = "${cfg.logDir}";
environment.RIAK_CS_ETC_DIR = "/etc/riak";
preStart = ''
if ! test -e ${cfg.logDir}; then
mkdir -m 0755 -p ${cfg.logDir}
chown -R riak-cs ${cfg.logDir}
fi
if ! test -e ${cfg.dataDir}; then
mkdir -m 0700 -p ${cfg.dataDir}
chown -R riak-cs ${cfg.dataDir}
fi
'';
serviceConfig = {
ExecStart = "${cfg.package}/bin/riak-cs console";
ExecStop = "${cfg.package}/bin/riak-cs stop";
StandardInput = "tty";
User = "riak-cs";
Group = "riak-cs";
PermissionsStartOnly = true;
# Give Riak a decent amount of time to clean up.
TimeoutStopSec = 120;
LimitNOFILE = 65536;
};
unitConfig.RequiresMountsFor = [
"${cfg.dataDir}"
"${cfg.logDir}"
"/etc/riak"
];
};
};
}

View file

@ -20,6 +20,8 @@ in
package = mkOption {
type = types.package;
default = pkgs.riak;
defaultText = "pkgs.riak";
example = literalExample "pkgs.riak";
description = ''
Riak package to use.
@ -68,6 +70,14 @@ in
'';
};
extraAdvancedConfig = mkOption {
type = types.lines;
default = "";
description = ''
Additional text to be appended to <filename>advanced.config</filename>.
'';
};
};
};
@ -88,6 +98,10 @@ in
${cfg.extraConfig}
'';
environment.etc."riak/advanced.config".text = ''
${cfg.extraAdvancedConfig}
'';
users.extraUsers.riak = {
name = "riak";
uid = config.ids.uids.riak;

View file

@ -0,0 +1,212 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.stanchion;
in
{
###### interface
options = {
services.stanchion = {
enable = mkEnableOption "stanchion";
package = mkOption {
type = types.package;
default = pkgs.stanchion;
defaultText = "pkgs.stanchion";
example = literalExample "pkgs.stanchion";
description = ''
Stanchion package to use.
'';
};
nodeName = mkOption {
type = types.str;
default = "stanchion@127.0.0.1";
description = ''
Name of the Erlang node.
'';
};
adminKey = mkOption {
type = types.str;
default = "";
description = ''
Name of admin user.
'';
};
adminSecret = mkOption {
type = types.str;
default = "";
description = ''
Name of admin secret
'';
};
riakHost = mkOption {
type = types.str;
default = "127.0.0.1:8087";
description = ''
Name of riak hosting service.
'';
};
listener = mkOption {
type = types.str;
default = "127.0.0.1:8085";
description = ''
Name of Riak CS listening service.
'';
};
stanchionHost = mkOption {
type = types.str;
default = "127.0.0.1:8085";
description = ''
Name of stanchion hosting service.
'';
};
stanchionSsl = mkOption {
type = types.bool;
default = true;
description = ''
Tell stanchion to use SSL.
'';
};
distributedCookie = mkOption {
type = types.str;
default = "riak";
description = ''
Cookie for distributed node communication. All nodes in the
same cluster should use the same cookie or they will not be able to
communicate.
'';
};
dataDir = mkOption {
type = types.path;
default = "/var/db/stanchion";
description = ''
Data directory for Stanchion.
'';
};
logDir = mkOption {
type = types.path;
default = "/var/log/stanchion";
description = ''
Log directory for Stanchino.
'';
};
extraConfig = mkOption {
type = types.lines;
default = "";
description = ''
Additional text to be appended to <filename>stanchion.conf</filename>.
'';
};
};
};
###### implementation
config = mkIf cfg.enable {
environment.systemPackages = [ cfg.package ];
environment.etc."stanchion/advanced.config".text = ''
[{stanchion, []}].
'';
environment.etc."stanchion/stanchion.conf".text = ''
listener = ${cfg.listener}
riak_host = ${cfg.riakHost}
${optionalString (cfg.adminKey == "") "#"} admin.key=${optionalString (cfg.adminKey != "") cfg.adminKey}
${optionalString (cfg.adminSecret == "") "#"} admin.secret=${optionalString (cfg.adminSecret != "") cfg.adminSecret}
platform_bin_dir = ${pkgs.stanchion}/bin
platform_data_dir = ${cfg.dataDir}
platform_etc_dir = /etc/stanchion
platform_lib_dir = ${pkgs.stanchion}/lib
platform_log_dir = ${cfg.logDir}
nodename = ${cfg.nodeName}
distributed_cookie = ${cfg.distributedCookie}
stanchion_ssl=${if cfg.stanchionSsl then "on" else "off"}
${cfg.extraConfig}
'';
users.extraUsers.stanchion = {
name = "stanchion";
uid = config.ids.uids.stanchion;
group = "stanchion";
description = "Stanchion server user";
};
users.extraGroups.stanchion.gid = config.ids.gids.stanchion;
systemd.services.stanchion = {
description = "Stanchion Server";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
path = [
pkgs.utillinux # for `logger`
pkgs.bash
];
environment.HOME = "${cfg.dataDir}";
environment.STANCHION_DATA_DIR = "${cfg.dataDir}";
environment.STANCHION_LOG_DIR = "${cfg.logDir}";
environment.STANCHION_ETC_DIR = "/etc/stanchion";
preStart = ''
if ! test -e ${cfg.logDir}; then
mkdir -m 0755 -p ${cfg.logDir}
chown -R stanchion:stanchion ${cfg.logDir}
fi
if ! test -e ${cfg.dataDir}; then
mkdir -m 0700 -p ${cfg.dataDir}
chown -R stanchion:stanchion ${cfg.dataDir}
fi
'';
serviceConfig = {
ExecStart = "${cfg.package}/bin/stanchion console";
ExecStop = "${cfg.package}/bin/stanchion stop";
StandardInput = "tty";
User = "stanchion";
Group = "stanchion";
PermissionsStartOnly = true;
# Give Stanchion a decent amount of time to clean up.
TimeoutStopSec = 120;
LimitNOFILE = 65536;
};
unitConfig.RequiresMountsFor = [
"${cfg.dataDir}"
"${cfg.logDir}"
"/etc/stanchion"
];
};
};
}

View file

@ -86,6 +86,12 @@ in {
};
config = mkIf cfg.enable {
assertions = [
{ assertion = cfg.users != [];
message = "services.psd.users must contain at least one user";
}
];
systemd = {
services = {
psd = {

View file

@ -0,0 +1,158 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.infinoted;
in {
options.services.infinoted = {
enable = mkEnableOption "infinoted";
package = mkOption {
type = types.package;
default = pkgs.libinfinity.override { daemon = true; };
defaultText = "pkgs.libinfinity.override { daemon = true; }";
description = ''
Package providing infinoted
'';
};
keyFile = mkOption {
type = types.nullOr types.path;
default = null;
description = ''
Private key to use for TLS
'';
};
certificateFile = mkOption {
type = types.nullOr types.path;
default = null;
description = ''
Server certificate to use for TLS
'';
};
certificateChain = mkOption {
type = types.nullOr types.path;
default = null;
description = ''
Chain of CA-certificates to which our `certificateFile` is relative.
Optional for TLS.
'';
};
securityPolicy = mkOption {
type = types.enum ["no-tls" "allow-tls" "require-tls"];
default = "require-tls";
description = ''
How strictly to enforce clients connection with TLS.
'';
};
port = mkOption {
type = types.int;
default = 6523;
description = ''
Port to listen on
'';
};
rootDirectory = mkOption {
type = types.path;
default = "/var/lib/infinoted/documents/";
description = ''
Root of the directory structure to serve
'';
};
plugins = mkOption {
type = types.listOf types.str;
default = [ "note-text" "note-chat" "logging" "autosave" ];
description = ''
Plugins to enable
'';
};
passwordFile = mkOption {
type = types.nullOr types.path;
default = null;
description = ''
File to read server-wide password from
'';
};
extraConfig = mkOption {
type = types.lines;
default = ''
[autosave]
interval=10
'';
description = ''
Additional configuration to append to infinoted.conf
'';
};
user = mkOption {
type = types.str;
default = "infinoted";
description = ''
What to call the dedicated user under which infinoted is run
'';
};
group = mkOption {
type = types.str;
default = "infinoted";
description = ''
What to call the primary group of the dedicated user under which infinoted is run
'';
};
};
config = mkIf (cfg.enable) {
users.extraUsers = optional (cfg.user == "infinoted")
{ name = "infinoted";
description = "Infinoted user";
group = cfg.group;
};
users.extraGroups = optional (cfg.group == "infinoted")
{ name = "infinoted";
};
systemd.services.infinoted =
{ description = "Gobby Dedicated Server";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
Type = "simple";
Restart = "always";
ExecStart = "${cfg.package}/bin/infinoted-0.6 --config-file=/var/lib/infinoted/infinoted.conf";
User = cfg.user;
Group = cfg.group;
PermissionsStartOnly = true;
};
preStart = ''
mkdir -p /var/lib/infinoted
install -o ${cfg.user} -g ${cfg.group} -m 0600 /dev/null /var/lib/infinoted/infinoted.conf
cat >>/var/lib/infinoted/infinoted.conf <<EOF
[infinoted]
${optionalString (cfg.keyFile != null) ''key-file=${cfg.keyFile}''}
${optionalString (cfg.certificateFile != null) ''certificate-file=${cfg.certificateFile}''}
${optionalString (cfg.certificateChain != null) ''certificate-chain=${cfg.certificateChain}''}
port=${toString cfg.port}
security-policy=${cfg.securityPolicy}
root-directory=${cfg.rootDirectory}
plugins=${concatStringsSep ";" cfg.plugins}
${optionalString (cfg.passwordFile != null) ''password=$(head -n 1 ${cfg.passwordFile})''}
${cfg.extraConfig}
EOF
install -o ${cfg.user} -g ${cfg.group} -m 0750 -d ${cfg.rootDirectory}
'';
};
};
}

View file

@ -64,7 +64,7 @@ in
};
worldPath = mkOption {
type = types.path;
type = types.nullOr types.path;
default = null;
description = ''
The path to the world file (<literal>.wld</literal>) which should be loaded.
@ -126,8 +126,8 @@ in
User = "terraria";
Type = "oneshot";
RemainAfterExit = true;
ExecStart = "${pkgs.tmux.bin}/bin/tmux -S /var/lib/terraria/terraria.sock new -d ${pkgs.terraria-server}/bin/TerrariaServer ${concatStringsSep " " flags}";
ExecStop = "${pkgs.tmux.bin}/bin/tmux -S /var/lib/terraria/terraria.sock send-keys Enter \"exit\" Enter";
ExecStart = "${getBin pkgs.tmux}/bin/tmux -S /var/lib/terraria/terraria.sock new -d ${pkgs.terraria-server}/bin/TerrariaServer ${concatStringsSep " " flags}";
ExecStop = "${getBin pkgs.tmux}/bin/tmux -S /var/lib/terraria/terraria.sock send-keys Enter \"exit\" Enter";
};
postStart = ''

View file

@ -39,6 +39,8 @@ in {
ProtectSystem = "full";
SystemCallArchitectures = "native";
};
wants = [ "systemd-udev-settle.service" ];
after = [ "local-fs.target" "systemd-udev-settle.service" ];
before = [ "sysinit.target" ];
wantedBy = [ "sysinit.target" ];
};

View file

@ -7,9 +7,35 @@ let
pkg = if config.hardware.sane.snapshot
then pkgs.sane-backends-git
else pkgs.sane-backends;
backends = [ pkg ] ++ config.hardware.sane.extraBackends;
sanedConf = pkgs.writeTextFile {
name = "saned.conf";
destination = "/etc/sane.d/saned.conf";
text = ''
localhost
${config.services.saned.extraConfig}
'';
};
netConf = pkgs.writeTextFile {
name = "net.conf";
destination = "/etc/sane.d/net.conf";
text = ''
${lib.optionalString config.services.saned.enable "localhost"}
${config.hardware.sane.netConf}
'';
};
env = {
SANE_CONFIG_DIR = config.hardware.sane.configDir;
LD_LIBRARY_PATH = [ "${saneConfig}/lib/sane" ];
};
backends = [ pkg netConf ] ++ optional config.services.saned.enable sanedConf ++ config.hardware.sane.extraBackends;
saneConfig = pkgs.mkSaneConfig { paths = backends; };
enabled = config.hardware.sane.enable || config.services.saned.enable;
in
{
@ -51,27 +77,86 @@ in
hardware.sane.configDir = mkOption {
type = types.string;
internal = true;
description = "The value of SANE_CONFIG_DIR.";
};
hardware.sane.netConf = mkOption {
type = types.lines;
default = "";
example = "192.168.0.16";
description = ''
Network hosts that should be probed for remote scanners.
'';
};
services.saned.enable = mkOption {
type = types.bool;
default = false;
description = ''
Enable saned network daemon for remote connection to scanners.
saned would be runned from <literal>scanner</literal> user; to allow
access to hardware that doesn't have <literal>scanner</literal> group
you should add needed groups to this user.
'';
};
services.saned.extraConfig = mkOption {
type = types.lines;
default = "";
example = "192.168.0.0/24";
description = ''
Extra saned configuration lines.
'';
};
};
###### implementation
config = mkIf config.hardware.sane.enable {
config = mkMerge [
(mkIf enabled {
hardware.sane.configDir = mkDefault "${saneConfig}/etc/sane.d";
hardware.sane.configDir = mkDefault "${saneConfig}/etc/sane.d";
environment.systemPackages = backends;
environment.sessionVariables = env;
services.udev.packages = backends;
environment.systemPackages = backends;
environment.sessionVariables = {
SANE_CONFIG_DIR = config.hardware.sane.configDir;
LD_LIBRARY_PATH = [ "${saneConfig}/lib/sane" ];
};
services.udev.packages = backends;
users.extraGroups."scanner".gid = config.ids.gids.scanner;
})
users.extraGroups."scanner".gid = config.ids.gids.scanner;
(mkIf config.services.saned.enable {
networking.firewall.connectionTrackingModules = [ "sane" ];
};
systemd.services."saned@" = {
description = "Scanner Service";
environment = mapAttrs (name: val: toString val) env;
serviceConfig = {
User = "scanner";
Group = "scanner";
ExecStart = "${pkg}/bin/saned";
};
};
systemd.sockets.saned = {
description = "saned incoming socket";
wantedBy = [ "sockets.target" ];
listenStreams = [ "0.0.0.0:6566" "[::]:6566" ];
socketConfig = {
# saned needs to distinguish between IPv4 and IPv6 to open matching data sockets.
BindIPv6Only = "ipv6-only";
Accept = true;
MaxConnections = 1;
};
};
users.extraUsers."scanner" = {
uid = config.ids.uids.scanner;
group = "scanner";
};
})
];
}

View file

@ -100,6 +100,12 @@ in
config = mkIf cfg.enable {
assertions =
[ { assertion = !config.services.rsyslogd.enable;
message = "rsyslogd conflicts with syslogd";
}
];
environment.systemPackages = [ pkgs.sysklogd ];
services.syslogd.extraParams = optional cfg.enableNetworkInput "-r";

View file

@ -20,17 +20,29 @@ in {
description = "Whether to enable the postsrsd SRS server for Postfix.";
};
domain = mkOption {
type = types.str;
description = "Domain name for rewrite";
};
secretsFile = mkOption {
type = types.path;
default = "/var/lib/postsrsd/postsrsd.secret";
description = "Secret keys used for signing and verification";
};
domain = mkOption {
type = types.str;
description = "Domain name for rewrite";
};
separator = mkOption {
type = types.enum ["-" "=" "+"];
default = "=";
description = "First separator character in generated addresses";
};
# bindAddress = mkOption { # uncomment once 1.5 is released
# type = types.str;
# default = "127.0.0.1";
# description = "Socket listen address";
# };
forwardPort = mkOption {
type = types.int;
default = 10001;
@ -43,6 +55,18 @@ in {
description = "Port for the reverse SRS lookup";
};
timeout = mkOption {
type = types.int;
default = 1800;
description = "Timeout for idle client connections in seconds";
};
excludeDomains = mkOption {
type = types.listOf types.str;
default = [];
description = "Origin domains to exclude from rewriting in addition to primary domain";
};
user = mkOption {
type = types.str;
default = "postsrsd";
@ -86,7 +110,7 @@ in {
path = [ pkgs.coreutils ];
serviceConfig = {
ExecStart = ''${pkgs.postsrsd}/sbin/postsrsd "-s${cfg.secretsFile}" "-d${cfg.domain}" -f${toString cfg.forwardPort} -r${toString cfg.reversePort}'';
ExecStart = ''${pkgs.postsrsd}/sbin/postsrsd "-s${cfg.secretsFile}" "-d${cfg.domain}" -a${cfg.separator} -f${toString cfg.forwardPort} -r${toString cfg.reversePort} -t${toString cfg.timeout} "-X${concatStringsSep "," cfg.excludeDomains}"'';
User = cfg.user;
Group = cfg.group;
PermissionsStartOnly = true;

View file

@ -203,7 +203,7 @@ milter_default_action = accept
PermissionsStartOnly = true;
Restart = "always";
RuntimeDirectory = "rmilter";
RuntimeDirectoryPermissions="0755";
RuntimeDirectoryMode = "0755";
};
};

View file

@ -25,7 +25,8 @@ in
DBs = mkOption {
type = types.listOf types.package;
default = with pkgs.dictdDBs; [ wiktionary wordnet ];
example = [ pkgs.dictdDBs.nld2eng ];
defaultText = "with pkgs.dictdDBs; [ wiktionary wordnet ]";
example = literalExample "[ pkgs.dictdDBs.nld2eng ]";
description = ''List of databases to make available.'';
};

View file

@ -41,6 +41,7 @@ in
type = types.path;
description = "The Disnix package";
default = pkgs.disnix;
defaultText = "pkgs.disnix";
};
};

View file

@ -164,18 +164,21 @@ in {
packages.gitlab = mkOption {
type = types.package;
default = pkgs.gitlab;
defaultText = "pkgs.gitlab";
description = "Reference to the gitlab package";
};
packages.gitlab-shell = mkOption {
type = types.package;
default = pkgs.gitlab-shell;
defaultText = "pkgs.gitlab-shell";
description = "Reference to the gitlab-shell package";
};
packages.gitlab-workhorse = mkOption {
type = types.package;
default = pkgs.gitlab-workhorse;
defaultText = "pkgs.gitlab-workhorse";
description = "Reference to the gitlab-workhorse package";
};
@ -425,7 +428,7 @@ in {
TimeoutSec = "300";
Restart = "on-failure";
WorkingDirectory = "${cfg.packages.gitlab}/share/gitlab";
ExecStart="${cfg.packages.gitlab.env}/bin/bundle exec \"sidekiq -q post_receive -q mailers -q system_hook -q project_web_hook -q gitlab_shell -q common -q default -e production -P ${cfg.statePath}/tmp/sidekiq.pid\"";
ExecStart="${cfg.packages.gitlab.env}/bin/bundle exec \"sidekiq -C \"${cfg.packages.gitlab}/share/gitlab/config/sidekiq_queues.yml\" -e production -P ${cfg.statePath}/tmp/sidekiq.pid\"";
};
};

View file

@ -59,7 +59,12 @@ uploads_path: "/var/lib/matrix-synapse/uploads"
max_upload_size: "${cfg.max_upload_size}"
max_image_pixels: "${cfg.max_image_pixels}"
dynamic_thumbnails: ${fromBool cfg.dynamic_thumbnails}
url_preview_enabled: False
url_preview_enabled: ${fromBool cfg.url_preview_enabled}
${optionalString (cfg.url_preview_enabled == true) ''
url_preview_ip_range_blacklist: ${builtins.toJSON cfg.url_preview_ip_range_blacklist}
url_preview_ip_range_whitelist: ${builtins.toJSON cfg.url_preview_ip_range_whitelist}
url_preview_url_blacklist: ${builtins.toJSON cfg.url_preview_url_blacklist}
''}
recaptcha_private_key: "${cfg.recaptcha_private_key}"
recaptcha_public_key: "${cfg.recaptcha_public_key}"
enable_registration_captcha: ${fromBool cfg.enable_registration_captcha}
@ -355,6 +360,47 @@ in {
default = "10K";
description = "Number of events to cache in memory.";
};
url_preview_enabled = mkOption {
type = types.bool;
default = false;
description = ''
Is the preview URL API enabled? If enabled, you *must* specify an
explicit url_preview_ip_range_blacklist of IPs that the spider is
denied from accessing.
'';
};
url_preview_ip_range_blacklist = mkOption {
type = types.listOf types.str;
default = [];
description = ''
List of IP address CIDR ranges that the URL preview spider is denied
from accessing.
'';
};
url_preview_ip_range_whitelist = mkOption {
type = types.listOf types.str;
default = [];
description = ''
List of IP address CIDR ranges that the URL preview spider is allowed
to access even if they are specified in
url_preview_ip_range_blacklist.
'';
};
url_preview_url_blacklist = mkOption {
type = types.listOf types.str;
default = [
"127.0.0.0/8"
"10.0.0.0/8"
"172.16.0.0/12"
"192.168.0.0/16"
"100.64.0.0/10"
"169.254.0.0/16"
];
description = ''
Optional list of URL matches that the URL preview spider is
denied from accessing.
'';
};
recaptcha_private_key = mkOption {
type = types.str;
default = "";

View file

@ -172,8 +172,8 @@ in
sshKey = "/root/.ssh/id_buildfarm";
system = "x86_64-linux";
maxJobs = 2;
supportedFeatures = "kvm";
mandatoryFeatures = "perf";
supportedFeatures = [ "kvm" ];
mandatoryFeatures = [ "perf" ];
}
];
description = ''

View file

@ -6,20 +6,21 @@ let
cfg = config.services.parsoid;
conf = ''
exports.setup = function( parsoidConfig ) {
${toString (mapAttrsToList (name: str: "parsoidConfig.setInterwiki('${name}', '${str}');") cfg.interwikis)}
confTree = {
worker_heartbeat_timeout = 300000;
logging = { level = "info"; };
services = [{
module = "lib/index.js";
entrypoint = "apiServiceWorker";
conf = {
mwApis = map (x: if isAttrs x then x else { uri = x; }) cfg.wikis;
serverInterface = cfg.interface;
serverPort = cfg.port;
};
}];
};
parsoidConfig.serverInterface = "${cfg.interface}";
parsoidConfig.serverPort = ${toString cfg.port};
parsoidConfig.useSelser = true;
${cfg.extraConfig}
};
'';
confFile = builtins.toFile "localsettings.js" conf;
confFile = pkgs.writeText "config.yml" (builtins.toJSON (recursiveUpdate confTree cfg.extraConfig));
in
{
@ -38,9 +39,9 @@ in
'';
};
interwikis = mkOption {
type = types.attrsOf types.str;
example = { localhost = "http://localhost/api.php"; };
wikis = mkOption {
type = types.listOf (types.either types.str types.attrs);
example = [ "http://localhost/api.php" ];
description = ''
Used MediaWiki API endpoints.
'';
@ -71,8 +72,8 @@ in
};
extraConfig = mkOption {
type = types.lines;
default = "";
type = types.attrs;
default = {};
description = ''
Extra configuration to add to parsoid configuration.
'';

View file

@ -9,7 +9,7 @@ let
BaseDir "${cfg.dataDir}"
PIDFile "${cfg.pidFile}"
AutoLoadPlugin ${if cfg.autoLoadPlugin then "true" else "false"}
Hostname ${config.networking.hostName}
Hostname "${config.networking.hostName}"
LoadPlugin syslog
<Plugin "syslog">
@ -108,7 +108,8 @@ in {
};
preStart = ''
mkdir -m 0700 -p ${cfg.dataDir}
mkdir -p ${cfg.dataDir}
chmod 755 ${cfg.dataDir}
install -D /dev/null ${cfg.pidFile}
if [ "$(id -u)" = 0 ]; then
chown -R ${cfg.user} ${cfg.dataDir};

View file

@ -25,9 +25,16 @@ let
scrape_configs = cfg.scrapeConfigs;
};
generatedPrometheusYml = writePrettyJSON "prometheus.yml" promConfig;
prometheusYml =
if cfg.configText != null then
pkgs.writeText "prometheus.yml" cfg.configText
else generatedPrometheusYml;
cmdlineArgs = cfg.extraFlags ++ [
"-storage.local.path=${cfg.dataDir}/metrics"
"-config.file=${writePrettyJSON "prometheus.yml" promConfig}"
"-config.file=${prometheusYml}"
"-web.listen-address=${cfg.listenAddress}"
"-alertmanager.notification-queue-capacity=${toString cfg.alertmanagerNotificationQueueCapacity}"
"-alertmanager.timeout=${toString cfg.alertmanagerTimeout}s"
@ -359,6 +366,16 @@ in {
'';
};
configText = mkOption {
type = types.nullOr types.lines;
default = null;
description = ''
If non-null, this option defines the text that is written to
prometheus.yml. If null, the contents of prometheus.yml is generated
from the structured config options.
'';
};
globalConfig = mkOption {
type = promTypes.globalConfig;
default = {};

View file

@ -47,6 +47,18 @@ in
'';
};
gatewayAddress = mkOption {
type = types.str;
default = "/ip4/127.0.0.1/tcp/8080";
description = "Where the IPFS Gateway can be reached";
};
apiAddress = mkOption {
type = types.str;
default = "/ip4/127.0.0.1/tcp/5001";
description = "Where IPFS exposes its API to";
};
enableGC = mkOption {
type = types.bool;
default = false;
@ -98,6 +110,8 @@ in
cd ${cfg.dataDir}
${pkgs.su}/bin/su -s ${pkgs.bash}/bin/sh ${cfg.user} -c "${ipfs}/bin/ipfs init"
fi
${pkgs.su}/bin/su -s ${pkgs.bash}/bin/sh ${cfg.user} -c "${ipfs}/bin/ipfs config Addresses.API ${cfg.apiAddress}"
${pkgs.su}/bin/su -s ${pkgs.bash}/bin/sh ${cfg.user} -c "${ipfs}/bin/ipfs config Addresses.Gateway ${cfg.gatewayAddress}"
'';
serviceConfig = {

View file

@ -138,6 +138,45 @@ in
'';
};
helper.enable = mkEnableOption "helper service";
sftpd.enable = mkEnableOption "SFTP service";
sftpd.port = mkOption {
default = null;
type = types.nullOr types.int;
description = ''
The port on which the SFTP server will listen.
This is the correct setting to tweak if you want Tahoe's SFTP
daemon to listen on a different port.
'';
};
sftpd.hostPublicKeyFile = mkOption {
default = null;
type = types.nullOr types.path;
description = ''
Path to the SSH host public key.
'';
};
sftpd.hostPrivateKeyFile = mkOption {
default = null;
type = types.nullOr types.path;
description = ''
Path to the SSH host private key.
'';
};
sftpd.accounts.file = mkOption {
default = null;
type = types.nullOr types.path;
description = ''
Path to the accounts file.
'';
};
sftpd.accounts.url = mkOption {
default = null;
type = types.nullOr types.str;
description = ''
URL of the accounts server.
'';
};
package = mkOption {
default = pkgs.tahoelafs;
defaultText = "pkgs.tahoelafs";
@ -194,6 +233,12 @@ in
serviceConfig = {
Type = "simple";
PIDFile = pidfile;
# Believe it or not, Tahoe is very brittle about the order of
# arguments to $(tahoe start). The node directory must come first,
# and arguments which alter Twisted's behavior come afterwards.
ExecStart = ''
${settings.package}/bin/tahoe start ${nodedir} -n -l- --pidfile=${pidfile}
'';
};
preStart = ''
if [ \! -d ${nodedir} ]; then
@ -209,12 +254,6 @@ in
# ln -s /etc/tahoe-lafs/introducer-${node}.cfg ${nodedir}/tahoe.cfg
cp /etc/tahoe-lafs/introducer-${node}.cfg ${nodedir}/tahoe.cfg
'';
# Believe it or not, Tahoe is very brittle about the order of
# arguments to $(tahoe start). The node directory must come first,
# and arguments which alter Twisted's behavior come afterwards.
script = ''
tahoe start ${nodedir} -n -l- --pidfile=${pidfile}
'';
});
users.extraUsers = flip mapAttrs' cfg.introducers (node: _:
nameValuePair "tahoe.introducer-${node}" {
@ -256,6 +295,19 @@ in
[helper]
enabled = ${if settings.helper.enable then "true" else "false"}
[sftpd]
enabled = ${if settings.sftpd.enable then "true" else "false"}
${optionalString (settings.sftpd.port != null)
"port = ${toString settings.sftpd.port}"}
${optionalString (settings.sftpd.hostPublicKeyFile != null)
"host_pubkey_file = ${settings.sftpd.hostPublicKeyFile}"}
${optionalString (settings.sftpd.hostPrivateKeyFile != null)
"host_privkey_file = ${settings.sftpd.hostPrivateKeyFile}"}
${optionalString (settings.sftpd.accounts.file != null)
"accounts.file = ${settings.sftpd.accounts.file}"}
${optionalString (settings.sftpd.accounts.url != null)
"accounts.url = ${settings.sftpd.accounts.url}"}
'';
});
# Actually require Tahoe, so that we will have it installed.
@ -281,6 +333,12 @@ in
serviceConfig = {
Type = "simple";
PIDFile = pidfile;
# Believe it or not, Tahoe is very brittle about the order of
# arguments to $(tahoe start). The node directory must come first,
# and arguments which alter Twisted's behavior come afterwards.
ExecStart = ''
${settings.package}/bin/tahoe start ${nodedir} -n -l- --pidfile=${pidfile}
'';
};
preStart = ''
if [ \! -d ${nodedir} ]; then
@ -296,12 +354,6 @@ in
# ln -s /etc/tahoe-lafs/${node}.cfg ${nodedir}/tahoe.cfg
cp /etc/tahoe-lafs/${node}.cfg ${nodedir}/tahoe.cfg
'';
# Believe it or not, Tahoe is very brittle about the order of
# arguments to $(tahoe start). The node directory must come first,
# and arguments which alter Twisted's behavior come afterwards.
script = ''
tahoe start ${nodedir} -n -l- --pidfile=${pidfile}
'';
});
users.extraUsers = flip mapAttrs' cfg.nodes (node: _:
nameValuePair "tahoe.${node}" {

View file

@ -55,6 +55,15 @@ in
description = "The directory to use for Yandex.Disk storage";
};
excludes = mkOption {
default = "";
type = types.string;
example = "data,backup";
description = ''
Comma-separated list of directories which are excluded from synchronization.
'';
};
};
};
@ -86,7 +95,7 @@ in
chown ${u} ${dir}
if ! test -d "${cfg.directory}" ; then
mkdir -p -m 755 ${cfg.directory} ||
(mkdir -p -m 755 ${cfg.directory} && chown ${u} ${cfg.directory}) ||
exit 1
fi
@ -94,7 +103,7 @@ in
-c '${pkgs.yandex-disk}/bin/yandex-disk token -p ${cfg.password} ${cfg.username} ${dir}/token'
${pkgs.su}/bin/su -s ${pkgs.stdenv.shell} ${u} \
-c '${pkgs.yandex-disk}/bin/yandex-disk start --no-daemon -a ${dir}/token -d ${cfg.directory}'
-c '${pkgs.yandex-disk}/bin/yandex-disk start --no-daemon -a ${dir}/token -d ${cfg.directory} --exclude-dirs=${cfg.excludes}'
'';
};

View file

@ -1,76 +1,68 @@
{ config, lib, pkgs, ... }:
let
inherit (lib) mkEnableOption mkIf mkOption singleton types;
inherit (pkgs) bird;
cfg = config.services.bird;
inherit (lib) mkEnableOption mkIf mkOption types;
configFile = pkgs.writeText "bird.conf" ''
${cfg.config}
'';
in
{
###### interface
options = {
services.bird = {
enable = mkEnableOption "BIRD Internet Routing Daemon";
config = mkOption {
type = types.string;
description = ''
BIRD Internet Routing Daemon configuration file.
<link xlink:href='http://bird.network.cz/'/>
generic = variant:
let
cfg = config.services.${variant};
pkg = pkgs.${variant};
birdc = if variant == "bird6" then "birdc6" else "birdc";
configFile = pkgs.stdenv.mkDerivation {
name = "${variant}.conf";
text = cfg.config;
preferLocalBuild = true;
buildCommand = ''
echo -n "$text" > $out
${pkg}/bin/${variant} -d -p -c $out
'';
};
user = mkOption {
type = types.string;
default = "bird";
description = ''
BIRD Internet Routing Daemon user.
'';
in {
###### interface
options = {
services.${variant} = {
enable = mkEnableOption "BIRD Internet Routing Daemon";
config = mkOption {
type = types.lines;
description = ''
BIRD Internet Routing Daemon configuration file.
<link xlink:href='http://bird.network.cz/'/>
'';
};
};
};
group = mkOption {
type = types.string;
default = "bird";
description = ''
BIRD Internet Routing Daemon group.
'';
};
};
};
###### implementation
config = mkIf cfg.enable {
users.extraUsers = singleton {
name = cfg.user;
description = "BIRD Internet Routing Daemon user";
uid = config.ids.uids.bird;
group = cfg.group;
};
users.extraGroups = singleton {
name = cfg.group;
gid = config.ids.gids.bird;
};
systemd.services.bird = {
description = "BIRD Internet Routing Daemon";
wantedBy = [ "multi-user.target" ];
serviceConfig = {
ExecStart = "${bird}/bin/bird -d -c ${configFile} -s /var/run/bird.ctl -u ${cfg.user} -g ${cfg.group}";
###### implementation
config = mkIf cfg.enable {
systemd.services.${variant} = {
description = "BIRD Internet Routing Daemon";
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "forking";
Restart = "on-failure";
ExecStart = "${pkg}/bin/${variant} -c ${configFile} -u ${variant} -g ${variant}";
ExecReload = "${pkg}/bin/${birdc} configure";
ExecStop = "${pkg}/bin/${birdc} down";
CapabilityBoundingSet = [ "CAP_CHOWN" "CAP_FOWNER" "CAP_DAC_OVERRIDE" "CAP_SETUID" "CAP_SETGID"
# see bird/sysdep/linux/syspriv.h
"CAP_NET_BIND_SERVICE" "CAP_NET_BROADCAST" "CAP_NET_ADMIN" "CAP_NET_RAW" ];
ProtectSystem = "full";
ProtectHome = "yes";
SystemCallFilter="~@cpu-emulation @debug @keyring @module @mount @obsolete @raw-io";
MemoryDenyWriteExecute = "yes";
};
};
users = {
extraUsers.${variant} = {
description = "BIRD Internet Routing Daemon user";
group = "${variant}";
};
extraGroups.${variant} = {};
};
};
};
};
inherit (config.services) bird bird6;
in {
imports = [(generic "bird") (generic "bird6")];
}

View file

@ -31,7 +31,7 @@ in
};
servers = mkOption {
default = config.services.ntp.servers;
default = config.networking.timeServers;
description = ''
The set of NTP servers from which to synchronise.
'';
@ -102,7 +102,7 @@ in
home = stateDir;
};
systemd.services.ntpd.enable = mkForce false;
systemd.services.timesyncd.enable = mkForce false;
systemd.services.chronyd =
{ description = "chrony NTP daemon";

View file

@ -19,30 +19,21 @@ let
type = types.str;
description = "Public key at the opposite end of the tunnel.";
};
hostname = mkOption {
default = "";
example = "foobar.hype";
type = types.str;
description = "Optional hostname to add to /etc/hosts; prevents reverse lookup failures.";
};
};
};
# Additional /etc/hosts entries for peers with an associated hostname
cjdnsExtraHosts = import (pkgs.runCommand "cjdns-hosts" {}
# Generate a builder that produces an output usable as a Nix string value
''
exec >$out
echo \'\'
${concatStringsSep "\n" (mapAttrsToList (k: v:
optionalString (v.hostname != "")
"echo $(${pkgs.cjdns}/bin/publictoip6 ${v.publicKey}) ${v.hostname}")
(cfg.ETHInterface.connectTo // cfg.UDPInterface.connectTo))}
echo \'\'
'');
parseModules = x:
x // { connectTo = mapAttrs (name: value: { inherit (value) password publicKey; }) x.connectTo; };
# check for the required attributes, otherwise
# permit attributes not undefined here
checkPeers = x:
x // {
connectTo = mapAttrs
(name: value:
if !hasAttr "publicKey" value then abort "cjdns peer ${name} missing a publicKey" else
if !hasAttr "password" value then abort "cjdns peer ${name} missing a password" else
value
)
x.connectTo;
};
# would be nice to merge 'cfg' with a //,
# but the json nesting is wacky.
@ -53,8 +44,8 @@ let
};
authorizedPasswords = map (p: { password = p; }) cfg.authorizedPasswords;
interfaces = {
ETHInterface = if (cfg.ETHInterface.bind != "") then [ (parseModules cfg.ETHInterface) ] else [ ];
UDPInterface = if (cfg.UDPInterface.bind != "") then [ (parseModules cfg.UDPInterface) ] else [ ];
ETHInterface = if (cfg.ETHInterface.bind != "") then [ (checkPeers cfg.ETHInterface) ] else [ ];
UDPInterface = if (cfg.UDPInterface.bind != "") then [ (checkPeers cfg.UDPInterface) ] else [ ];
};
privateKey = "@CJDNS_PRIVATE_KEY@";
@ -134,12 +125,12 @@ in
'';
};
connectTo = mkOption {
type = types.attrsOf ( types.submodule ( connectToSubmodule ) );
type = types.attrsOf (types.attrsOf types.str);
default = { };
example = {
"192.168.1.1:27313" = {
hostname = "homer.hype";
password = "5kG15EfpdcKNX3f2GSQ0H1HC7yIfxoCoImnO5FHM";
user = "foobar";
password = "5kG15EfpdcKNX3f2GSQ0H1HC7yIfxoCoImnO5FHM";
publicKey = "371zpkgs8ss387tmr81q04mp0hg1skb51hw34vk1cq644mjqhup0.k";
};
};
@ -179,12 +170,12 @@ in
};
connectTo = mkOption {
type = types.attrsOf ( types.submodule ( connectToSubmodule ) );
type = types.attrsOf (types.attrsOf types.str);
default = { };
example = {
"01:02:03:04:05:06" = {
hostname = "homer.hype";
password = "5kG15EfpdcKNX3f2GSQ0H1HC7yIfxoCoImnO5FHM";
user = "foobar";
password = "5kG15EfpdcKNX3f2GSQ0H1HC7yIfxoCoImnO5FHM";
publicKey = "371zpkgs8ss387tmr81q04mp0hg1skb51hw34vk1cq644mjqhup0.k";
};
};
@ -207,8 +198,9 @@ in
systemd.services.cjdns = {
description = "cjdns: routing engine designed for security, scalability, speed and ease of use";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
wantedBy = [ "multi-user.target" "sleep.target"];
after = [ "network-online.target" ];
bindsTo = [ "network-online.target" ];
preStart = if cfg.confFile != null then "" else ''
[ -e /etc/cjdns.keys ] && source /etc/cjdns.keys
@ -244,7 +236,9 @@ in
serviceConfig = {
Type = "forking";
Restart = "on-failure";
Restart = "always";
StartLimitInterval = 0;
RestartSec = 1;
CapabilityBoundingSet = "CAP_NET_ADMIN CAP_NET_RAW";
AmbientCapabilities = "CAP_NET_ADMIN CAP_NET_RAW";
ProtectSystem = "full";
@ -254,8 +248,6 @@ in
};
};
networking.extraHosts = cjdnsExtraHosts;
assertions = [
{ assertion = ( cfg.ETHInterface.bind != "" || cfg.UDPInterface.bind != "" || cfg.confFile != null );
message = "Neither cjdns.ETHInterface.bind nor cjdns.UDPInterface.bind defined.";

View file

@ -0,0 +1,61 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.dante;
confFile = pkgs.writeText "dante-sockd.conf" ''
user.privileged: root
user.unprivileged: dante
${cfg.config}
'';
in
{
meta = {
maintainers = with maintainers; [ arobyn ];
};
options = {
services.dante = {
enable = mkEnableOption "Dante SOCKS proxy";
config = mkOption {
default = null;
type = types.nullOr types.str;
description = ''
Contents of Dante's configuration file
NOTE: user.privileged/user.unprivileged are set by the service
'';
};
};
};
config = mkIf cfg.enable {
assertions = [
{ assertion = cfg.config != null;
message = "please provide Dante configuration file contents";
}
];
users.users.dante = {
description = "Dante SOCKS proxy daemon user";
isSystemUser = true;
group = "dante";
};
users.groups.dante = {};
systemd.services.dante = {
description = "Dante SOCKS v4 and v5 compatible proxy server";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "simple";
ExecStart = "${pkgs.dante}/bin/sockd -f ${confFile}";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
Restart = "always";
};
};
};
}

View file

@ -0,0 +1,63 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.fakeroute;
routeConf = pkgs.writeText "route.conf" (concatStringsSep "\n" cfg.route);
in
{
###### interface
options = {
services.fakeroute = {
enable = mkOption {
type = types.bool;
default = false;
description = ''
Whether to enable the fakeroute service.
'';
};
route = mkOption {
type = types.listOf types.str;
default = [];
example = [
"216.102.187.130"
"4.0.1.122"
"198.116.142.34"
"63.199.8.242"
];
description = ''
Fake route that will appear after the real
one to any host running a traceroute.
'';
};
};
};
###### implementation
config = mkIf cfg.enable {
systemd.services.fakeroute = {
description = "Fakeroute Daemon";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "forking";
User = "root";
ExecStart = "${pkgs.fakeroute}/bin/fakeroute -f ${routeConf}";
};
};
};
}

Some files were not shown because too many files have changed in this diff Show more