Merge remote-tracking branch 'origin/staging' into systemd-219

Conflicts:
	pkgs/os-specific/linux/kernel/linux-3.4.nix
	pkgs/os-specific/linux/systemd/default.nix
This commit is contained in:
Eelco Dolstra 2015-07-20 22:57:23 +02:00
commit bc1773fe16
2207 changed files with 83880 additions and 41719 deletions

View file

@ -1 +1 @@
15.06
15.07

12
CONTRIBUTING.md Normal file
View file

@ -0,0 +1,12 @@
# How to contribute
## Opening issues
* Make sure you have a [GitHub account](https://github.com/signup/free)
* [Submit an issue](https://github.com/NixOS/nixpkgs/issues) - assuming one does not already exist.
* Clearly describe the issue including steps to reproduce when it is a bug.
* Include information what version of nixpkgs and Nix are you using (nixos-version or git revision).
## Submitting changes
See the nixpkgs manual for details on how to [Submit changes to nixpkgs](http://hydra.nixos.org/job/nixpkgs/trunk/manual/latest/download-by-type/doc/manual#chap-submitting-changes).

View file

@ -5,7 +5,7 @@
[![Issue Stats](http://www.issuestats.com/github/nixos/nixpkgs/badge/issue)](http://www.issuestats.com/github/nixos/nixpkgs)
Nixpkgs is a collection of packages for the [Nix](https://nixos.org/nix/) package
manager. It is periodically build and tested by the [hydra](http://hydra.nixos.org/)
manager. It is periodically built and tested by the [hydra](http://hydra.nixos.org/)
build daemon as so-called channels. To get channel information via git, add
[nixpkgs-channels](https://github.com/NixOS/nixpkgs-channels.git) as a remote:
@ -15,7 +15,7 @@ build daemon as so-called channels. To get channel information via git, add
For stability and maximum binary package support, it is recommended to maintain
custom changes on top of one of the channels, e.g. `nixos-14.12` for the latest
release and `nixos-unstable` for the latest successfully build master:
release and `nixos-unstable` for the latest successful build of master:
```
% git remote update channels
@ -40,6 +40,3 @@ Communication:
* [Mailing list](http://lists.science.uu.nl/mailman/listinfo/nix-dev)
* [IRC - #nixos on freenode.net](irc://irc.freenode.net/#nixos)
---
[![Throughput Graph](https://graphs.waffle.io/nixos/nixpkgs/throughput.svg)](https://waffle.io/nixos/nixpkgs/metrics)

View file

@ -5,7 +5,7 @@
<title>Coding conventions</title>
<section><title>Syntax</title>
<section xml:id="sec-syntax"><title>Syntax</title>
<itemizedlist>
@ -207,7 +207,7 @@ args.stdenv.mkDerivation (args // {
</section>
<section><title>Package naming</title>
<section xml:id="sec-package-naming"><title>Package naming</title>
<para>In Nixpkgs, there are generally three different names associated with a package:
@ -292,7 +292,7 @@ dashes between words — not in camel case. For instance, it should be
<filename>allPackages.nix</filename> or
<filename>AllPackages.nix</filename>.</para>
<section><title>Hierarchy</title>
<section xml:id="sec-hierarchy"><title>Hierarchy</title>
<para>Each package should be stored in its own directory somewhere in
the <filename>pkgs/</filename> tree, i.e. in
@ -451,12 +451,17 @@ splitting up an existing category.</para>
</listitem>
</varlistentry>
<varlistentry>
<term>If its a <emphasis>desktop environment</emphasis>
(including <emphasis>window managers</emphasis>):</term>
<term>If its a <emphasis>desktop environment</emphasis>:</term>
<listitem>
<para><filename>desktops</filename> (e.g. <filename>kde</filename>, <filename>gnome</filename>, <filename>enlightenment</filename>)</para>
</listitem>
</varlistentry>
<varlistentry>
<term>If its a <emphasis>window manager</emphasis>:</term>
<listitem>
<para><filename>applications/window-managers</filename> (e.g. <filename>awesome</filename>, <filename>compiz</filename>, <filename>stumpwm</filename>)</para>
</listitem>
</varlistentry>
<varlistentry>
<term>If its an <emphasis>application</emphasis>:</term>
<listitem>
@ -620,33 +625,39 @@ evaluate correctly.</para>
fetchers from <literal>pkgs/build-support/</literal>. As an example going
from bad to good:
<itemizedlist>
<listitem><para>Uses <literal>git://</literal> which won't be proxied.
<programlisting>
src = fetchgit {
url = "git://github.com/NixOS/nix.git";
rev = "1f795f9f44607cc5bec70d1300150bfefcef2aae";
sha256 = "1cw5fszffl5pkpa6s6wjnkiv6lm5k618s32sp60kvmvpy7a2v9kg";
}
</programlisting></para>
<listitem>
<para>Uses <literal>git://</literal> which won't be proxied.
<programlisting>
src = fetchgit {
url = "git://github.com/NixOS/nix.git";
rev = "1f795f9f44607cc5bec70d1300150bfefcef2aae";
sha256 = "1cw5fszffl5pkpa6s6wjnkiv6lm5k618s32sp60kvmvpy7a2v9kg";
}
</programlisting>
</para>
</listitem>
<listitem><para>This is ok, but an archive fetch will still be faster.
<programlisting>
src = fetchgit {
url = "https://github.com/NixOS/nix.git";
rev = "1f795f9f44607cc5bec70d1300150bfefcef2aae";
sha256 = "1cw5fszffl5pkpa6s6wjnkiv6lm5k618s32sp60kvmvpy7a2v9kg";
}
</programlisting></para>
<listitem>
<para>This is ok, but an archive fetch will still be faster.
<programlisting>
src = fetchgit {
url = "https://github.com/NixOS/nix.git";
rev = "1f795f9f44607cc5bec70d1300150bfefcef2aae";
sha256 = "1cw5fszffl5pkpa6s6wjnkiv6lm5k618s32sp60kvmvpy7a2v9kg";
}
</programlisting>
</para>
</listitem>
<listitem><para>Fetches a snapshot archive and you get the rev you want.
<programlisting>
src = fetchFromGitHub {
owner = "NixOS";
repo = "nix";
rev = "1f795f9f44607cc5bec70d1300150bfefcef2aae";
sha256 = "04yri911rj9j19qqqn6m82266fl05pz98inasni0vxr1cf1gdgv9";
}
</programlisting></para>
<listitem>
<para>Fetches a snapshot archive and you get the rev you want.
<programlisting>
src = fetchFromGitHub {
owner = "NixOS";
repo = "nix";
rev = "1f795f9f44607cc5bec70d1300150bfefcef2aae";
sha256 = "04yri911rj9j19qqqn6m82266fl05pz98inasni0vxr1cf1gdgv9";
}
</programlisting>
</para>
</listitem>
</itemizedlist>
</para>

View file

@ -2,18 +2,19 @@
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="chap-contributing">
<title>Contributing</title>
<title>Contributing to this documentation</title>
<para>If you make modifications to the manual, it's important to build the manual before contributing:</para>
<para>The DocBook sources of the Nixpkgs manual are in the <filename
xlink:href="https://github.com/NixOS/nixpkgs/tree/master/doc">doc</filename>
subdirectory of the Nixpkgs repository. If you make modifications to
the manual, it's important to build it before committing. You can do that as follows:
<orderedlist>
<screen>
$ cd /path/to/nixpkgs
$ nix-build doc
</screen>
<listitem><para><command>$ git clone git://github.com/NixOS/nixpkgs.git</command></para></listitem>
<listitem><para><command>$ nix-build -A manual nixpkgs/pkgs/top-level/release.nix</command></para></listitem>
<listitem><para>Inside the built derivation you shall see <literal>manual/index.html</literal> file.</para></listitem>
</orderedlist>
If the build succeeds, the manual will be in
<filename>./result/share/doc/nixpkgs/manual.html</filename>.</para>
</chapter>

View file

@ -36,6 +36,9 @@ stdenv.mkDerivation {
cp ${./style.css} $dst/style.css
mkdir -p $dst/images/callouts
cp ${docbook5_xsl}/xml/xsl/docbook/images/callouts/*.gif $dst/images/callouts/
mkdir -p $out/nix-support
echo "doc manual $dst manual.html" >> $out/nix-support/hydra-build-products
'';

120
doc/functions.xml Normal file
View file

@ -0,0 +1,120 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="chap-functions">
<title>Functions reference</title>
<para>
The nixpkgs repository has several utility functions to manipulate Nix expressions.
</para>
<section xml:id="sec-pkgs-overridePackages">
<title>pkgs.overridePackages</title>
<para>
This function inside the nixpkgs expression (<varname>pkgs</varname>)
can be used to override the set of packages itself.
</para>
<para>
Warning: this function is expensive and must not be used from within
the nixpkgs repository.
</para>
<para>
Example usage:
<programlisting>let
pkgs = import &lt;nixpkgs&gt; {};
newpkgs = pkgs.overridePackages (self: super: {
foo = super.foo.override { ... };
};
in ...</programlisting>
</para>
<para>
The resulting <varname>newpkgs</varname> will have the new <varname>foo</varname>
expression, and all other expressions depending on <varname>foo</varname> will also
use the new <varname>foo</varname> expression.
</para>
<para>
The behavior of this function is similar to <link
linkend="sec-modify-via-packageOverrides">config.packageOverrides</link>.
</para>
<para>
The <varname>self</varname> parameter refers to the final package set with the
applied overrides. Using this parameter may lead to infinite recursion if not
used consciously.
</para>
<para>
The <varname>super</varname> parameter refers to the old package set.
It's equivalent to <varname>pkgs</varname> in the above example.
</para>
</section>
<section xml:id="sec-pkg-override">
<title>&lt;pkg&gt;.override</title>
<para>
The function <varname>override</varname> is usually available for all the
derivations in the nixpkgs expression (<varname>pkgs</varname>).
</para>
<para>
It is used to override the arguments passed to a function.
</para>
<para>
Example usages:
<programlisting>pkgs.foo.override { arg1 = val1; arg2 = val2; ... }</programlisting>
<programlisting>pkgs.overridePackages (self: super: {
foo = super.foo.override { barSupport = true ; };
})</programlisting>
<programlisting>mypkg = pkgs.callPackage ./mypkg.nix {
mydep = pkgs.mydep.override { ... };
})</programlisting>
</para>
<para>
In the first example, <varname>pkgs.foo</varname> is the result of a function call
with some default arguments, usually a derivation.
Using <varname>pkgs.foo.override</varname> will call the same function with
the given new arguments.
</para>
</section>
<section xml:id="sec-lib-makeOverridable">
<title>lib.makeOverridable</title>
<para>
The function <varname>lib.makeOverridable</varname> is used make the result
of a function easily customizable. This utility only makes sense for functions
that accept an argument set and return an attribute set.
</para>
<para>
Example usage:
<programlisting>f = { a, b }: { result = a+b; }
c = lib.makeOverridable f { a = 1; b = 2; }</programlisting>
</para>
<para>
The variable <varname>c</varname> is the value of the <varname>f</varname> function
applied with some default arguments. Hence the value of <varname>c.result</varname>
is <literal>3</literal>, in this example.
</para>
<para>
The variable <varname>c</varname> however also has some additional functions, like
<link linkend="sec-pkg-override">c.override</link> which can be used to
override the default arguments. In this example the value of
<varname>(c.override { a = 4; }).result</varname> is 6.
</para>
</section>
</chapter>

758
doc/haskell-users-guide.xml Normal file
View file

@ -0,0 +1,758 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="users-guide-to-the-haskell-infrastructure">
<title>User's Guide to the Haskell Infrastructure</title>
<section xml:id="how-to-install-haskell-packages">
<title>How to install Haskell packages</title>
<para>
Nixpkgs distributes build instructions for all Haskell packages
registered on
<link xlink:href="http://hackage.haskell.org/">Hackage</link>, but
strangely enough normal Nix package lookups don't seem to discover
any of them:
</para>
<programlisting>
$ nix-env -qa cabal-install
error: selector cabal-install matches no derivations
$ nix-env -i ghc
error: selector ghc matches no derivations
</programlisting>
<para>
The Haskell package set is not registered in the top-level namespace
because it is <emphasis>huge</emphasis>. If all Haskell packages
were visible to these commands, then name-based search/install
operations would be much slower than they are now. We avoided that
by keeping all Haskell-related packages in a separate attribute set
called <literal>haskellPackages</literal>, which the following
command will list:
</para>
<programlisting>
$ nix-env -f &quot;&lt;nixpkgs&gt;&quot; -qaP -A haskellPackages
haskellPackages.a50 a50-0.5
haskellPackages.abacate haskell-abacate-0.0.0.0
haskellPackages.abcBridge haskell-abcBridge-0.12
haskellPackages.afv afv-0.1.1
haskellPackages.alex alex-3.1.4
haskellPackages.Allure Allure-0.4.101.1
haskellPackages.alms alms-0.6.7
[... some 8000 entries omitted ...]
</programlisting>
<para>
To install any of those packages into your profile, refer to them by
their attribute path (first column):
</para>
<programlisting>
$ nix-env -f &quot;&lt;nixpkgs&gt;&quot; -iA haskellPackages.Allure ...
</programlisting>
<para>
The attribute path of any Haskell packages corresponds to the name
of that particular package on Hackage: the package
<literal>cabal-install</literal> has the attribute
<literal>haskellPackages.cabal-install</literal>, and so on.
(Actually, this convention causes trouble with packages like
<literal>3dmodels</literal> and <literal>4Blocks</literal>, because
these names are invalid identifiers in the Nix language. The issue
of how to deal with these rare corner cases is currently
unresolved.)
</para>
<para>
Haskell packages who's Nix name (second column) begins with a
<literal>haskell-</literal> prefix are packages that provide a
library whereas packages without that prefix provide just
executables. Libraries may provide executables too, though: the
package <literal>haskell-pandoc</literal>, for example, installs
both a library and an application. You can install and use Haskell
executables just like any other program in Nixpkgs, but using
Haskell libraries for development is a bit trickier and we'll
address that subject in great detail in section
<link linkend="how-to-create-a-development-environment">How to
create a development environment</link>.
</para>
<para>
Attribute paths are deterministic inside of Nixpkgs, but the path
necessary to reach Nixpkgs varies from system to system. We dodged
that problem by giving <literal>nix-env</literal> an explicit
<literal>-f &quot;&lt;nixpkgs&gt;&quot;</literal> parameter, but if
you call <literal>nix-env</literal> without that flag, then chances
are the invocation fails:
</para>
<programlisting>
$ nix-env -iA haskellPackages.cabal-install
error: attribute haskellPackages in selection path
haskellPackages.cabal-install not found
</programlisting>
<para>
On NixOS, for example, Nixpkgs does <emphasis>not</emphasis> exist
in the top-level namespace by default. To figure out the proper
attribute path, it's easiest to query for the path of a well-known
Nixpkgs package, i.e.:
</para>
<programlisting>
$ nix-env -qaP coreutils
nixos.pkgs.coreutils coreutils-8.23
</programlisting>
<para>
If your system responds like that (most NixOS installatios will),
then the attribute path to <literal>haskellPackages</literal> is
<literal>nixos.pkgs.haskellPackages</literal>. Thus, if you want to
use <literal>nix-env</literal> without giving an explicit
<literal>-f</literal> flag, then that's the way to do it:
</para>
<programlisting>
$ nix-env -qaP -A nixos.pkgs.haskellPackages
$ nix-env -iA nixos.pkgs.haskellPackages.cabal-install
</programlisting>
<para>
Our current default compiler is GHC 7.10.x and the
<literal>haskellPackages</literal> set contains packages built with
that particular version. Nixpkgs contains the latest major release
of every GHC since 6.10.4, however, and there is a whole family of
package sets available that defines Hackage packages built with each
of those compilers, too:
</para>
<programlisting>
$ nix-env -f &quot;&lt;nixpkgs&gt;&quot; -qaP -A haskell.packages.ghc6123
$ nix-env -f &quot;&lt;nixpkgs&gt;&quot; -qaP -A haskell.packages.ghc763
</programlisting>
<para>
The name <literal>haskellPackages</literal> is really just a synonym
for <literal>haskell.packages.ghc7101</literal>, because we prefer
that package set internally and recommend it to our users as their
default choice, but ultimately you are free to compile your Haskell
packages with any GHC version you please. The following command
displays the complete list of available compilers:
</para>
<programlisting>
$ nix-env -f &quot;&lt;nixpkgs&gt;&quot; -qaP -A haskell.compiler
haskell.compiler.ghc6104 ghc-6.10.4
haskell.compiler.ghc6123 ghc-6.12.3
haskell.compiler.ghc704 ghc-7.0.4
haskell.compiler.ghc722 ghc-7.2.2
haskell.compiler.ghc742 ghc-7.4.2
haskell.compiler.ghc763 ghc-7.6.3
haskell.compiler.ghc784 ghc-7.8.4
haskell.compiler.ghc7101 ghc-7.10.1
haskell.compiler.ghcHEAD ghc-7.11.20150402
haskell.compiler.ghcNokinds ghc-nokinds-7.11.20150704
haskell.compiler.ghcjs ghcjs-0.1.0
haskell.compiler.jhc jhc-0.8.2
haskell.compiler.uhc uhc-1.1.9.0
</programlisting>
<para>
We have no package sets for <literal>jhc</literal> or
<literal>uhc</literal> yet, unfortunately, but for every version of
GHC listed above, there exists a package set based on that compiler.
Also, the attributes <literal>haskell.compiler.ghcXYC</literal> and
<literal>haskell.packages.ghcXYC.ghc</literal> are synonymous for
the sake of convenience.
</para>
</section>
<section xml:id="how-to-create-a-development-environment">
<title>How to create a development environment</title>
<section xml:id="how-to-install-a-compiler">
<title>How to install a compiler</title>
<para>
A simple development environment consists of a Haskell compiler
and the tool <literal>cabal-install</literal>, and we saw in
section <link linkend="how-to-install-haskell-packages">How to
install Haskell packages</link> how you can install those programs
into your user profile:
</para>
<programlisting>
$ nix-env -f &quot;&lt;nixpkgs&gt;&quot; -iA haskellPackages.ghc haskellPackages.cabal-install
</programlisting>
<para>
Instead of the default package set
<literal>haskellPackages</literal>, you can also use the more
precise name <literal>haskell.compiler.ghc7101</literal>, which
has the advantage that it refers to the same GHC version
regardless of what Nixpkgs considers &quot;default&quot; at any
given time.
</para>
<para>
Once you've made those tools available in
<literal>$PATH</literal>, it's possible to build Hackage packages
the same way people without access to Nix do it all the time:
</para>
<programlisting>
$ cabal get lens-4.11 &amp;&amp; cd lens-4.11
$ cabal install -j --dependencies-only
$ cabal configure
$ cabal build
</programlisting>
<para>
If you enjoy working with Cabal sandboxes, then that's entirely
possible too: just execute the command
</para>
<programlisting>
$ cabal sandbox init
</programlisting>
<para>
before installing the required dependencies.
</para>
<para>
The <literal>nix-shell</literal> utility makes it easy to switch
to a different compiler version; just enter the Nix shell
environment with the command
</para>
<programlisting>
$ nix-shell -p haskell.compiler.ghc784
</programlisting>
<para>
to bring GHC 7.8.4 into <literal>$PATH</literal>. Re-running
<literal>cabal configure</literal> switches your build to use that
compiler instead. If you're working on a project that doesn't
depend on any additional system libraries outside of GHC, then
it's sufficient even to run the <literal>cabal configure</literal>
command inside of the shell:
</para>
<programlisting>
$ nix-shell -p haskell.compiler.ghc784 --command &quot;cabal configure&quot;
</programlisting>
<para>
Afterwards, all other commands like <literal>cabal build</literal>
work just fine in any shell environment, because the configure
phase recorded the absolute paths to all required tools like GHC
in its build configuration inside of the <literal>dist/</literal>
directory. Please note, however, that
<literal>nix-collect-garbage</literal> can break such an
environment because the Nix store paths created by
<literal>nix-shell</literal> aren't &quot;alive&quot; anymore once
<literal>nix-shell</literal> has terminated. If you find that your
Haskell builds no longer work after garbage collection, then
you'll have to re-run <literal>cabal configure</literal> inside of
a new <literal>nix-shell</literal> environment.
</para>
</section>
<section xml:id="how-to-install-a-compiler-with-libraries">
<title>How to install a compiler with libraries</title>
<para>
GHC expects to find all installed libraries inside of its own
<literal>lib</literal> directory. This approach works fine on
traditional Unix systems, but it doesn't work for Nix, because
GHC's store path is immutable once it's built. We cannot install
additional libraries into that location. As a consequence, our
copies of GHC don't know any packages except their own core
libraries, like <literal>base</literal>,
<literal>containers</literal>, <literal>Cabal</literal>, etc.
</para>
<para>
We can register additional libraries to GHC, however, using a
special build function called <literal>ghcWithPackages</literal>.
That function expects one argument: a function that maps from an
attribute set of Haskell packages to a list of packages, which
determines the libraries known to that particular version of GHC.
For example, the Nix expression
<literal>ghcWithPackages (pkgs: [pkgs.mtl])</literal> generates a
copy of GHC that has the <literal>mtl</literal> library registered
in addition to its normal core packages:
</para>
<programlisting>
$ nix-shell -p &quot;haskellPackages.ghcWithPackages (pkgs: [pkgs.mtl])&quot;
[nix-shell:~]$ ghc-pkg list mtl
/nix/store/zy79...-ghc-7.10.1/lib/ghc-7.10.1/package.conf.d:
mtl-2.2.1
</programlisting>
<para>
This function allows users to define their own development
environment by means of an override. After adding the following
snippet to <literal>~/.nixpkgs/config.nix</literal>,
</para>
<programlisting>
{
packageOverrides = super: let self = super.pkgs; in
{
myHaskellEnv = self.haskell.packages.ghc7101.ghcWithPackages
(haskellPackages: with haskellPackages; [
# libraries
arrows async cgi criterion
# tools
cabal-install haskintex
]);
};
}
</programlisting>
<para>
it's possible to install that compiler with
<literal>nix-env -f &quot;&lt;nixpkgs&gt;&quot; -iA myHaskellEnv</literal>.
If you'd like to switch that development environment to a
different version of GHC, just replace the
<literal>ghc7101</literal> bit in the previous definition with the
appropriate name. Of course, it's also possible to define any
number of these development environments! (You can't install two
of them into the same profile at the same time, though, because
that would result in file conflicts.)
</para>
<para>
The generated <literal>ghc</literal> program is a wrapper script
that re-directs the real GHC executable to use a new
<literal>lib</literal> directory --- one that we specifically
constructed to contain all those packages the user requested:
</para>
<programlisting>
$ cat $(type -p ghc)
#! /nix/store/xlxj...-bash-4.3-p33/bin/bash -e
export NIX_GHC=/nix/store/19sm...-ghc-7.10.1/bin/ghc
export NIX_GHCPKG=/nix/store/19sm...-ghc-7.10.1/bin/ghc-pkg
export NIX_GHC_DOCDIR=/nix/store/19sm...-ghc-7.10.1/share/doc/ghc/html
export NIX_GHC_LIBDIR=/nix/store/19sm...-ghc-7.10.1/lib/ghc-7.10.1
exec /nix/store/j50p...-ghc-7.10.1/bin/ghc &quot;-B$NIX_GHC_LIBDIR&quot; &quot;$@&quot;
</programlisting>
<para>
The variables <literal>$NIX_GHC</literal>,
<literal>$NIX_GHCPKG</literal>, etc. point to the
<emphasis>new</emphasis> store path
<literal>ghcWithPackages</literal> constructed specifically for
this environment. The last line of the wrapper script then
executes the real <literal>ghc</literal>, but passes the path to
the new <literal>lib</literal> directory using GHC's
<literal>-B</literal> flag.
</para>
<para>
The purpose of those environment variables is to work around an
impurity in the popular
<link xlink:href="http://hackage.haskell.org/package/ghc-paths">ghc-paths</link>
library. That library promises to give its users access to GHC's
installation paths. Only, the library can't possible know that
path when it's compiled, because the path GHC considers its own is
determined only much later, when the user configures it through
<literal>ghcWithPackages</literal>. So we
<link xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/haskell-modules/ghc-paths-nix.patch">patched
ghc-paths</link> to return the paths found in those environment
variables at run-time rather than trying to guess them at
compile-time.
</para>
<para>
To make sure that mechanism works properly all the time, we
recommend that you set those variables to meaningful values in
your shell environment, too, i.e. by adding the following code to
your <literal>~/.bashrc</literal>:
</para>
<programlisting>
if type &gt;/dev/null 2&gt;&amp;1 -p ghc; then
eval &quot;$(egrep ^export &quot;$(type -p ghc)&quot;)&quot;
fi
</programlisting>
<para>
If you are certain that you'll use only one GHC environment which
is located in your user profile, then you can use the following
code, too, which has the advantage that it doesn't contain any
paths from the Nix store, i.e. those settings always remain valid
even if a <literal>nix-env -u</literal> operation updates the GHC
environment in your profile:
</para>
<programlisting>
if [ -e ~/.nix-profile/bin/ghc ]; then
export NIX_GHC=&quot;$HOME/.nix-profile/bin/ghc&quot;
export NIX_GHCPKG=&quot;$HOME/.nix-profile/bin/ghc-pkg&quot;
export NIX_GHC_DOCDIR=&quot;$HOME/.nix-profile/share/doc/ghc/html&quot;
export NIX_GHC_LIBDIR=&quot;$HOME/.nix-profile/lib/ghc-$($NIX_GHC --numeric-version)&quot;
fi
</programlisting>
</section>
<section xml:id="how-to-create-ad-hoc-environments-for-nix-shell">
<title>How to create ad hoc environments for
<literal>nix-shell</literal></title>
<para>
The easiest way to create an ad hoc development environment is to
run <literal>nix-shell</literal> with the appropriate GHC
environment given on the command-line:
</para>
<programlisting>
nix-shell -p &quot;haskellPackages.ghcWithPackages (pkgs: with pkgs; [mtl pandoc])&quot;
</programlisting>
<para>
For more sophisticated use-cases, however, it's more convenient to
save the desired configuration in a file called
<literal>shell.nix</literal> that looks like this:
</para>
<programlisting>
{ nixpkgs ? import &lt;nixpkgs&gt; {}, compiler ? &quot;ghc7101&quot; }:
let
inherit (nixpkgs) pkgs;
ghc = pkgs.haskell.packages.${compiler}.ghcWithPackages (ps: with ps; [
monad-par mtl
]);
in
pkgs.stdenv.mkDerivation {
name = &quot;my-haskell-env-0&quot;;
buildInputs = [ ghc ];
shellHook = &quot;eval $(egrep ^export ${ghc}/bin/ghc)&quot;;
}
</programlisting>
<para>
Now run <literal>nix-shell</literal> --- or even
<literal>nix-shell --pure</literal> --- to enter a shell
environment that has the appropriate compiler in
<literal>$PATH</literal>. If you use <literal>--pure</literal>,
then add all other packages that your development environment
needs into the <literal>buildInputs</literal> attribute. If you'd
like to switch to a different compiler version, then pass an
appropriate <literal>compiler</literal> argument to the
expression, i.e.
<literal>nix-shell --argstr compiler ghc784</literal>.
</para>
<para>
If you need such an environment because you'd like to compile a
Hackage package outside of Nix --- i.e. because you're hacking on
the latest version from Git ---, then the package set provides
suitable nix-shell environments for you already! Every Haskell
package has an <literal>env</literal> attribute that provides a
shell environment suitable for compiling that particular package.
If you'd like to hack the <literal>lens</literal> library, for
example, then you just have to check out the source code and enter
the appropriate environment:
</para>
<programlisting>
$ cabal get lens-4.11 &amp;&amp; cd lens-4.11
Downloading lens-4.11...
Unpacking to lens-4.11/
$ nix-shell &quot;&lt;nixpkgs&gt;&quot; -A haskellPackages.lens.env
[nix-shell:/tmp/lens-4.11]$
</programlisting>
<para>
At point, you can run <literal>cabal configure</literal>,
<literal>cabal build</literal>, and all the other development
commands. Note that you need <literal>cabal-install</literal>
installed in your <literal>$PATH</literal> already to use it here
--- the <literal>nix-shell</literal> environment does not provide
it.
</para>
</section>
</section>
<section xml:id="how-to-create-nix-builds-for-your-own-private-haskell-packages">
<title>How to create Nix builds for your own private Haskell
packages</title>
<para>
If your own Haskell packages have build instructions for Cabal, then
you can convert those automatically into build instructions for Nix
using the <literal>cabal2nix</literal> utility, which you can
install into your profile by running
<literal>nix-env -i cabal2nix</literal>.
</para>
<section xml:id="how-to-build-a-stand-alone-project">
<title>How to build a stand-alone project</title>
<para>
For example, let's assume that you're working on a private project
called <literal>foo</literal>. To generate a Nix build expression
for it, change into the project's top-level directory and run the
command:
</para>
<programlisting>
$ cabal2nix . &gt;foo.nix
</programlisting>
<para>
Then write the following snippet into a file called
<literal>default.nix</literal>:
</para>
<programlisting>
{ nixpkgs ? import &lt;nixpkgs&gt; {}, compiler ? &quot;ghc7101&quot; }:
nixpkgs.pkgs.haskell.packages.${compiler}.callPackage ./foo.nix { }
</programlisting>
<para>
Finally, store the following code in a file called
<literal>shell.nix</literal>:
</para>
<programlisting>
{ nixpkgs ? import &lt;nixpkgs&gt; {}, compiler ? &quot;ghc7101&quot; }:
(import ./default.nix { inherit nixpkgs compiler; }).env
</programlisting>
<para>
At this point, you can run <literal>nix-build</literal> to have
Nix compile your project and install it into a Nix store path. The
local directory will contain a symlink called
<literal>result</literal> after <literal>nix-build</literal>
returns that points into that location. Of course, passing the
flag <literal>--argstr compiler ghc763</literal> allows switching
the build to any version of GHC currently supported.
</para>
<para>
Furthermore, you can call <literal>nix-shell</literal> to enter an
interactive development environment in which you can use
<literal>cabal configure</literal> and
<literal>cabal build</literal> to develop your code. That
environment will automatically contain a proper GHC derivation
with all the required libraries registered as well as all the
system-level libraries your package might need.
</para>
<para>
If your package does not depend on any system-level libraries,
then it's sufficient to run
</para>
<programlisting>
$ nix-shell --command &quot;cabal configure&quot;
</programlisting>
<para>
once to set up your build. <literal>cabal-install</literal>
determines the absolute paths to all resources required for the
build and writes them into a config file in the
<literal>dist/</literal> directory. Once that's done, you can run
<literal>cabal build</literal> and any other command for that
project even outside of the <literal>nix-shell</literal>
environment. This feature is particularly nice for those of us who
like to edit their code with an IDE, like Emacs'
<literal>haskell-mode</literal>, because it's not necessary to
start Emacs inside of nix-shell just to make it find out the
necessary settings for building the project;
<literal>cabal-install</literal> has already done that for us.
</para>
<para>
If you want to do some quick-and-dirty hacking and don't want to
bother setting up a <literal>default.nix</literal> and
<literal>shell.nix</literal> file manually, then you can use the
<literal>--shell</literal> flag offered by
<literal>cabal2nix</literal> to have it generate a stand-alone
<literal>nix-shell</literal> environment for you. With that
feature, running
</para>
<programlisting>
$ cabal2nix --shell . &gt;shell.nix
$ nix-shell --command &quot;cabal configure&quot;
</programlisting>
<para>
is usually enough to set up a build environment for any given
Haskell package. You can even use that generated file to run
<literal>nix-build</literal>, too:
</para>
<programlisting>
$ nix-build shell.nix
</programlisting>
</section>
<section xml:id="how-to-build-projects-that-depend-on-each-other">
<title>How to build projects that depend on each other</title>
<para>
If you have multiple private Haskell packages that depend on each
other, then you'll have to register those packages in the Nixpkgs
set to make them visible for the dependency resolution performed
by <literal>callPackage</literal>. First of all, change into each
of your projects top-level directories and generate a
<literal>default.nix</literal> file with
<literal>cabal2nix</literal>:
</para>
<programlisting>
$ cd ~/src/foo &amp;&amp; cabal2nix . &gt;default.nix
$ cd ~/src/bar &amp;&amp; cabal2nix . &gt;default.nix
</programlisting>
<para>
Then edit your <literal>~/.nixpkgs/config.nix</literal> file to
register those builds in the default Haskell package set:
</para>
<programlisting>
{
packageOverrides = super: let self = super.pkgs; in
{
haskellPackages = super.haskellPackages.override {
overrides = self: super: {
foo = self.callPackage ../src/foo {};
bar = self.callPackage ../src/bar {};
};
};
};
}
</programlisting>
<para>
Once that's accomplished,
<literal>nix-env -f &quot;&lt;nixpkgs&gt;&quot; -qA haskellPackages</literal>
will show your packages like any other package from Hackage, and
you can build them
</para>
<programlisting>
$ nix-build &quot;&lt;nixpkgs&gt;&quot; -A haskellPackages.foo
</programlisting>
<para>
or enter an interactive shell environment suitable for building
them:
</para>
<programlisting>
$ nix-shell &quot;&lt;nixpkgs&gt;&quot; -A haskellPackages.bar.env
</programlisting>
</section>
</section>
<section xml:id="miscellaneous-topics">
<title>Miscellaneous Topics</title>
<section xml:id="how-to-build-with-profiling-enabled">
<title>How to build with profiling enabled</title>
<para>
Every Haskell package set takes a function called
<literal>overrides</literal> that you can use to manipulate the
package as much as you please. One useful application of this
feature is to replace the default <literal>mkDerivation</literal>
function with one that enables library profiling for all packages.
To accomplish that, add configure the following snippet in your
<literal>~/.nixpkgs/config.nix</literal> file:
</para>
<programlisting>
{
packageOverrides = super: let self = super.pkgs; in
{
profiledHaskellPackages = self.haskellPackages.override {
overrides = self: super: {
mkDerivation = args: super.mkDerivation (args // {
enableLibraryProfiling = true;
});
};
};
};
}
</programlisting>
</section>
<section xml:id="how-to-override-package-versions-in-a-compiler-specific-package-set">
<title>How to override package versions in a compiler-specific
package set</title>
<para>
Nixpkgs provides the latest version of
<link xlink:href="http://hackage.haskell.org/package/ghc-events"><literal>ghc-events</literal></link>,
which is 0.4.4.0 at the time of this writing. This is fine for
users of GHC 7.10.x, but GHC 7.8.4 cannot compile that binary.
Now, one way to solve that problem is to register an older version
of <literal>ghc-events</literal> in the 7.8.x-specific package
set. The first step is to generate Nix build instructions with
<literal>cabal2nix</literal>:
</para>
<programlisting>
$ cabal2nix cabal://ghc-events-0.4.3.0 &gt;~/.nixpkgs/ghc-events-0.4.3.0.nix
</programlisting>
<para>
Then add the override in <literal>~/.nixpkgs/config.nix</literal>:
</para>
<programlisting>
{
packageOverrides = super: let self = super.pkgs; in
{
haskell = super.haskell // {
packages = super.haskell.packages // {
ghc784 = super.haskell.packages.ghc784.override {
overrides = self: super: {
ghc-events = self.callPackage ./ghc-events-0.4.3.0.nix {};
};
};
};
};
};
}
</programlisting>
<para>
This code is a little crazy, no doubt, but it's necessary because
the intuitive version
</para>
<programlisting>
haskell.packages.ghc784 = super.haskell.packages.ghc784.override {
overrides = self: super: {
ghc-events = self.callPackage ./ghc-events-0.4.3.0.nix {};
};
};
</programlisting>
<para>
doesn't do what we want it to: that code replaces the
<literal>haskell</literal> package set in Nixpkgs with one that
contains only one entry,<literal>packages</literal>, which
contains only one entry <literal>ghc784</literal>. This override
loses the <literal>haskell.compiler</literal> set, and it loses
the <literal>haskell.packages.ghcXYZ</literal> sets for all
compilers but GHC 7.8.4. To avoid that problem, we have to perform
the convoluted little dance from above, iterating over each step
in hierarchy.
</para>
<para>
Once it's accomplished, however, we can install a variant of
<literal>ghc-events</literal> that's compiled with GHC 7.8.4:
</para>
<programlisting>
nix-env -f &quot;&lt;nixpkgs&gt;&quot; -iA haskell.packages.ghc784.ghc-events
</programlisting>
<para>
Unfortunately, it turns out that this build fails again while
executing the test suite! Apparently, the release archive on
Hackage is missing some data files that the test suite requires,
so we cannot run it. We accomplish that by re-generating the Nix
expression with the <literal>--no-check</literal> flag:
</para>
<programlisting>
$ cabal2nix --no-check cabal://ghc-events-0.4.3.0 &gt;~/.nixpkgs/ghc-events-0.4.3.0.nix
</programlisting>
<para>
Now the builds succeeds.
</para>
<para>
Of course, in the concrete example of
<literal>ghc-events</literal> this whole exercise is not an ideal
solution, because <literal>ghc-events</literal> can analyze the
output emitted by any version of GHC later than 6.12 regardless of
the compiler version that was used to build the `ghc-events'
executable, so strictly speaking there's no reason to prefer one
built with GHC 7.8.x in the first place. However, for users who
cannot use GHC 7.10.x at all for some reason, the approach of
downgrading to an older version might be useful.
</para>
</section>
<section xml:id="how-to-recover-from-ghcs-infamous-non-deterministic-library-id-bug">
<title>How to recover from GHC's infamous non-deterministic library
ID bug</title>
<para>
GHC and distributed build farms don't get along well:
</para>
<programlisting>
https://ghc.haskell.org/trac/ghc/ticket/4012
</programlisting>
<para>
When you see an error like this one
</para>
<programlisting>
package foo-0.7.1.0 is broken due to missing package
text-1.2.0.4-98506efb1b9ada233bb5c2b2db516d91
</programlisting>
<para>
then you have to download and re-install <literal>foo</literal>
and all its dependents from scratch:
</para>
<programlisting>
# nix-store -q --referrers /nix/store/*-haskell-text-1.2.0.4 \
| nix-store --repair-path --option binary-caches http://hydra.nixos.org
</programlisting>
<para>
If you're using additional Hydra servers other than
<literal>hydra.nixos.org</literal>, then it might be necessary to
purge the local caches that store data from those machines to
disable these binary channels for the duration of the previous
command, i.e. by running:
</para>
<programlisting>
rm /nix/var/nix/binary-cache-v3.sqlite
rm /nix/var/nix/manifests/*
rm /nix/var/nix/channel-cache/*
</programlisting>
</section>
<section xml:id="builds-on-darwin-fail-with-math.h-not-found">
<title>Builds on Darwin fail with <literal>math.h</literal> not
found</title>
<para>
Users of GHC on Darwin have occasionally reported that builds
fail, because the compiler complains about a missing include file:
</para>
<programlisting>
fatal error: 'math.h' file not found
</programlisting>
<para>
The issue has been discussed at length in
<link xlink:href="https://github.com/NixOS/nixpkgs/issues/6390">ticket
6390</link>, and so far no good solution has been proposed. As a
work-around, users who run into this problem can configure the
environment variables
</para>
<programlisting>
export NIX_CFLAGS_COMPILE=&quot;-idirafter /usr/include&quot;
export NIX_CFLAGS_LINK=&quot;-L/usr/lib&quot;
</programlisting>
<para>
in their <literal>~/.bashrc</literal> file to avoid the compiler
error.
</para>
</section>
</section>
</chapter>

View file

@ -13,7 +13,7 @@ in Nixpkgs to easily build packages for other programming languages,
such as Perl or Haskell. These are described in this chapter.</para>
<section xml:id="ssec-language-perl"><title>Perl</title>
<section xml:id="sec-language-perl"><title>Perl</title>
<para>Nixpkgs provides a function <varname>buildPerlPackage</varname>,
a generic package builder function for any Perl package that has a
@ -151,7 +151,7 @@ ClassC3Componentised = buildPerlPackage rec {
</para>
<section><title>Generation from CPAN</title>
<section xml:id="ssec-generation-from-CPAN"><title>Generation from CPAN</title>
<para>Nix expressions for Perl packages can be generated (almost)
automatically from CPAN. This is done by the program
@ -191,7 +191,7 @@ you need it.</para>
</section>
<section xml:id="python"><title>Python</title>
<section xml:id="sec-python"><title>Python</title>
<para>
Currently supported interpreters are <varname>python26</varname>, <varname>python27</varname>,
@ -245,44 +245,44 @@ are provided with all modules included.</para>
Name of the folder in <literal>${python}/lib/</literal> for corresponding interpreter.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>interpreter</varname></term>
<listitem><para>
Alias for <literal>${python}/bin/${executable}.</literal>
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>buildEnv</varname></term>
<listitem><para>
Function to build python interpreter environments with extra packages bundled together.
See <xref linkend="python-build-env" /> for usage and documentation.
See <xref linkend="ssec-python-build-env" /> for usage and documentation.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>sitePackages</varname></term>
<listitem><para>
Alias for <literal>lib/${libPrefix}/site-packages</literal>.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>executable</varname></term>
<listitem><para>
Name of the interpreter executable, ie <literal>python3.4</literal>.
</para></listitem>
</varlistentry>
</variablelist>
<section xml:id="build-python-package"><title><varname>buildPythonPackage</varname> function</title>
<section xml:id="ssec-build-python-package"><title><varname>buildPythonPackage</varname> function</title>
<para>
The function is implemented in <link xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/python-modules/generic/default.nix">
<filename>pkgs/development/python-modules/generic/default.nix</filename></link>.
Example usage:
<programlisting language="nix">
twisted = buildPythonPackage {
name = "twisted-8.1.0";
@ -308,27 +308,27 @@ twisted = buildPythonPackage {
<varname>python27Packages</varname>, <varname>python32Packages</varname>, <varname>python33Packages</varname>,
<varname>python34Packages</varname> and <varname>pypyPackages</varname>.
</para>
<para>
<function>buildPythonPackage</function> mainly does four things:
<orderedlist>
<listitem><para>
In the <varname>configurePhase</varname>, it patches
<literal>setup.py</literal> to always include setuptools before
distutils for monkeypatching machinery to take place.
</para></listitem>
<listitem><para>
In the <varname>buildPhase</varname>, it calls
In the <varname>buildPhase</varname>, it calls
<literal>${python.interpreter} setup.py build ...</literal>
</para></listitem>
<listitem><para>
In the <varname>installPhase</varname>, it calls
In the <varname>installPhase</varname>, it calls
<literal>${python.interpreter} setup.py install ...</literal>
</para></listitem>
<listitem><para>
In the <varname>postFixup</varname> phase, <literal>wrapPythonPrograms</literal>
bash function is called to wrap all programs in <filename>$out/bin/*</filename>
@ -337,23 +337,23 @@ twisted = buildPythonPackage {
</para></listitem>
</orderedlist>
</para>
<para>By default <varname>doCheck = true</varname> is set and tests are run with
<para>By default <varname>doCheck = true</varname> is set and tests are run with
<literal>${python.interpreter} setup.py test</literal> command in <varname>checkPhase</varname>.</para>
<para><varname>propagatedBuildInputs</varname> packages are propagated to user environment.</para>
<para>
By default <varname>meta.platforms</varname> is set to the same value
as the interpreter unless overriden otherwise.
</para>
<variablelist>
<title>
<varname>buildPythonPackage</varname> parameters
(all parameters from <varname>mkDerivation</varname> function are still supported)
</title>
<varlistentry>
<term><varname>namePrefix</varname></term>
<listitem><para>
@ -363,7 +363,7 @@ twisted = buildPythonPackage {
if you're packaging an application or a command line tool.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>disabled</varname></term>
<listitem><para>
@ -373,21 +373,21 @@ twisted = buildPythonPackage {
for examples.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>setupPyInstallFlags</varname></term>
<listitem><para>
List of flags passed to <command>setup.py install</command> command.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>setupPyBuildFlags</varname></term>
<listitem><para>
List of flags passed to <command>setup.py build</command> command.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>pythonPath</varname></term>
<listitem><para>
@ -396,21 +396,21 @@ twisted = buildPythonPackage {
(contrary to <varname>propagatedBuildInputs</varname>).
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>preShellHook</varname></term>
<listitem><para>
Hook to execute commands before <varname>shellHook</varname>.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>postShellHook</varname></term>
<listitem><para>
Hook to execute commands after <varname>shellHook</varname>.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>distutilsExtraCfg</varname></term>
<listitem><para>
@ -419,15 +419,29 @@ twisted = buildPythonPackage {
configuration).
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>makeWrapperArgs</varname></term>
<listitem><para>
A list of strings. Arguments to be passed to
<varname>makeWrapper</varname>, which wraps generated binaries. By
default, the arguments to <varname>makeWrapper</varname> set
<varname>PATH</varname> and <varname>PYTHONPATH</varname> environment
variables before calling the binary. Additional arguments here can
allow a developer to set environment variables which will be
available when the binary is run. For example,
<varname>makeWrapperArgs = ["--set FOO BAR" "--set BAZ QUX"]</varname>.
</para></listitem>
</varlistentry>
</variablelist>
</section>
<section xml:id="python-build-env"><title><function>python.buildEnv</function> function</title>
<section xml:id="ssec-python-build-env"><title><function>python.buildEnv</function> function</title>
<para>
Create Python environments using low-level <function>pkgs.buildEnv</function> function. Example <filename>default.nix</filename>:
<programlisting language="nix">
<![CDATA[with import <nixpkgs> {};
@ -436,31 +450,31 @@ python.buildEnv.override {
ignoreCollisions = true;
}]]>
</programlisting>
Running <command>nix-build</command> will create
<filename>/nix/store/cf1xhjwzmdki7fasgr4kz6di72ykicl5-python-2.7.8-env</filename>
with wrapped binaries in <filename>bin/</filename>.
</para>
<variablelist>
<title>
<function>python.buildEnv</function> arguments
</title>
<varlistentry>
<term><varname>extraLibs</varname></term>
<listitem><para>
List of packages installed inside the environment.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>postBuild</varname></term>
<listitem><para>
Shell command executed after the build of environment.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>ignoreCollisions</varname></term>
<listitem><para>
@ -470,7 +484,7 @@ python.buildEnv.override {
</variablelist>
</section>
<section xml:id="python-tools"><title>Tools</title>
<section xml:id="ssec-python-tools"><title>Tools</title>
<para>Packages inside nixpkgs are written by hand. However many tools
exist in community to help save time. No tool is preferred at the moment.
@ -497,20 +511,20 @@ exist in community to help save time. No tool is preferred at the moment.
</section>
<section xml:id="python-development"><title>Development</title>
<section xml:id="ssec-python-development"><title>Development</title>
<para>
To develop Python packages <function>buildPythonPackage</function> has
additional logic inside <varname>shellPhase</varname> to run
<command>${python.interpreter} setup.py develop</command> for the package.
</para>
<warning><para><varname>shellPhase</varname> is executed only if <filename>setup.py</filename>
exists.</para></warning>
<para>
Given a <filename>default.nix</filename>:
<programlisting language="nix">
<![CDATA[with import <nixpkgs> {};
@ -522,18 +536,18 @@ buildPythonPackage {
src = ./.;
}]]>
</programlisting>
Running <command>nix-shell</command> with no arguments should give you
the environment in which the package would be build with
<command>nix-build</command>.
</para>
<para>
Shortcut to setup environments with C headers/libraries and python packages:
<programlisting language="bash">$ nix-shell -p pythonPackages.pyramid zlib libjpeg git</programlisting>
</para>
<note><para>
There is a boolean value <varname>lib.inNixShell</varname> set to
<varname>true</varname> if nix-shell is invoked.
@ -541,7 +555,7 @@ buildPythonPackage {
</section>
<section xml:id="python-faq"><title>FAQ</title>
<section xml:id="ssec-python-faq"><title>FAQ</title>
<variablelist>
@ -562,18 +576,18 @@ buildPythonPackage {
Known bug in setuptools <varname>install_data</varname> does not respect --prefix</link>. Example of
such package using the feature is <filename>pkgs/tools/X11/xpra/default.nix</filename>. As workaround
install it as an extra <varname>preInstall</varname> step:
<programlisting>${python.interpreter} setup.py install_data --install-dir=$out --root=$out
sed -i '/ = data_files/d' setup.py</programlisting>
</para></listitem>
</varlistentry>
<varlistentry>
<term>Rationale of non-existent global site-packages</term>
<listitem><para>
There is no need to have global site-packages in Nix. Each package has isolated
dependency tree and installing any python package will only populate <varname>$PATH</varname>
inside user environment. See <xref linkend="python-build-env" /> to create self-contained
inside user environment. See <xref linkend="ssec-python-build-env" /> to create self-contained
interpreter with a set of packages.
</para></listitem>
</varlistentry>
@ -583,7 +597,7 @@ sed -i '/ = data_files/d' setup.py</programlisting>
</section>
<section xml:id="python-contrib"><title>Contributing guidelines</title>
<section xml:id="ssec-python-contrib"><title>Contributing guidelines</title>
<para>
Following rules are desired to be respected:
</para>
@ -611,12 +625,12 @@ sed -i '/ = data_files/d' setup.py</programlisting>
</section>
<section xml:id="ssec-language-ruby"><title>Ruby</title>
<section xml:id="sec-language-ruby"><title>Ruby</title>
<para>There currently is support to bundle applications that are packaged as Ruby gems. The utility "bundix" allows you to write a <filename>Gemfile</filename>, let bundler create a <filename>Gemfile.lock</filename>, and then convert
this into a nix expression that contains all Gem dependencies automatically.</para>
<para>For example, to package sensu, we did:</para>
<screen>
<![CDATA[$ cd pkgs/servers/monitoring
$ mkdir sensu
@ -652,7 +666,7 @@ and scalable.";
</section>
<section xml:id="ssec-language-go"><title>Go</title>
<section xml:id="sec-language-go"><title>Go</title>
<para>The function <varname>buildGoPackage</varname> builds
standard Go packages.
@ -773,7 +787,7 @@ done
</section>
<section xml:id="ssec-language-java"><title>Java</title>
<section xml:id="sec-language-java"><title>Java</title>
<para>Ant-based Java packages are typically built from source as follows:
@ -854,7 +868,7 @@ Runtime) instead of the OpenJRE.</para>
</section>
<section xml:id="ssec-language-lua"><title>Lua</title>
<section xml:id="sec-language-lua"><title>Lua</title>
<para>
Lua packages are built by the <varname>buildLuaPackage</varname> function. This function is
@ -862,7 +876,7 @@ Runtime) instead of the OpenJRE.</para>
in <link xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/lua-modules/generic/default.nix">
<filename>pkgs/development/lua-modules/generic/default.nix</filename></link>
and works similarly to <varname>buildPerlPackage</varname>. (See
<xref linkend="ssec-language-perl"/> for details.)
<xref linkend="sec-language-perl"/> for details.)
</para>
<para>
@ -876,7 +890,7 @@ fileSystem = buildLuaPackage {
src = fetchurl {
url = "https://github.com/keplerproject/luafilesystem/archive/v1_6_2.tar.gz";
sha256 = "1n8qdwa20ypbrny99vhkmx8q04zd2jjycdb5196xdhgvqzk10abz";
};
};
meta = {
homepage = "https://github.com/keplerproject/luafilesystem";
hydraPlatforms = stdenv.lib.platforms.linux;
@ -887,7 +901,7 @@ fileSystem = buildLuaPackage {
</para>
<para>
Though, more complicated package should be placed in a seperate file in
Though, more complicated package should be placed in a seperate file in
<link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/lua-modules"><filename>pkgs/development/lua-modules</filename></link>.
</para>
@ -901,7 +915,7 @@ fileSystem = buildLuaPackage {
</section>
<section xml:id="ssec-language-coq"><title>Coq</title>
<section xml:id="sec-language-coq"><title>Coq</title>
<para>
Coq libraries should be installed in
<literal>$(out)/lib/coq/${coq.coq-version}/user-contrib/</literal>.

View file

@ -13,10 +13,13 @@
<xi:include href="quick-start.xml" />
<xi:include href="stdenv.xml" />
<xi:include href="packageconfig.xml" />
<xi:include href="functions.xml" />
<xi:include href="meta.xml" />
<xi:include href="language-support.xml" />
<xi:include href="package-notes.xml" />
<xi:include href="coding-conventions.xml" />
<xi:include href="submitting-changes.xml" />
<xi:include href="haskell-users-guide.xml" />
<xi:include href="contributing.xml" />
</book>

View file

@ -82,7 +82,8 @@ hello-2.3 A program that produces a familiar, friendly greeting
</para>
<section><title>Standard meta-attributes</title>
<section xml:id="sec-standard-meta-attributes"><title>Standard
meta-attributes</title>
<para>It is expected that each meta-attribute is one of the following:</para>
@ -137,12 +138,39 @@ hello-2.3 A program that produces a familiar, friendly greeting
<varlistentry>
<term><varname>license</varname></term>
<listitem><para>The license for the package. One from the
attribute set defined in <link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/lib/licenses.nix">
<filename>nixpkgs/lib/licenses.nix</filename></link>. Example:
<literal>stdenv.lib.licenses.gpl3</literal>. For details, see
<xref linkend='sec-meta-license'/>.</para></listitem>
<listitem>
<para>
The license, or licenses, for the package. One from the attribute set
defined in <link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/lib/licenses.nix">
<filename>nixpkgs/lib/licenses.nix</filename></link>. At this moment
using both a list of licenses and a single license is valid. If the
license field is in the form of a list representation, then it means
that parts of the package are licensed differently. Each license
should preferably be referenced by their attribute. The non-list
attribute value can also be a space delimited string representation of
the contained attribute shortNames or spdxIds. The following are all valid
examples:
<itemizedlist>
<listitem><para>Single license referenced by attribute (preferred)
<literal>stdenv.lib.licenses.gpl3</literal>.
</para></listitem>
<listitem><para>Single license referenced by its attribute shortName (frowned upon)
<literal>"gpl3"</literal>.
</para></listitem>
<listitem><para>Single license referenced by its attribute spdxId (frowned upon)
<literal>"GPL-3.0"</literal>.
</para></listitem>
<listitem><para>Multiple licenses referenced by attribute (preferred)
<literal>with stdenv.lib.licenses; [ asl20 free ofl ]</literal>.
</para></listitem>
<listitem><para>Multiple licenses referenced as a space delimited string of attribute shortNames (frowned upon)
<literal>"asl20 free ofl"</literal>.
</para></listitem>
</itemizedlist>
For details, see <xref linkend='sec-meta-license'/>.
</para>
</listitem>
</varlistentry>
<varlistentry>

View file

@ -141,7 +141,7 @@ $ make menuconfig ARCH=<replaceable>arch</replaceable></screen>
<!--============================================================-->
<section>
<section xml:id="sec-xorg">
<title>X.org</title>

View file

@ -67,7 +67,8 @@
<filename>lib/licenses.nix</filename> of the nix package tree.
</para>
<section><title>Modify packages via <literal>packageOverrides</literal></title>
<section xml:id="sec-modify-via-packageOverrides"><title>Modify
packages via <literal>packageOverrides</literal></title>
<para>

View file

@ -55,18 +55,18 @@ $ git add pkgs/development/libraries/libfoo/default.nix</screen>
<itemizedlist>
<listitem>
<para>GNU cpio: <link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/tools/archivers/cpio/default.nix"><filename>pkgs/tools/archivers/cpio/default.nix</filename></link>.
The simplest possible package. The generic builder in
<varname>stdenv</varname> does everything for you. It has
no dependencies beyond <varname>stdenv</varname>.</para>
<para>GNU Hello: <link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/misc/hello/ex-2/default.nix"><filename>pkgs/applications/misc/hello/ex-2/default.nix</filename></link>.
Trivial package, which specifies some <varname>meta</varname>
attributes which is good practice.</para>
</listitem>
<listitem>
<para>GNU Hello: <link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/misc/hello/ex-2/default.nix"><filename>pkgs/applications/misc/hello/ex-2/default.nix</filename></link>.
Also trivial, but it specifies some <varname>meta</varname>
attributes which is good practice.</para>
<para>GNU cpio: <link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/tools/archivers/cpio/default.nix"><filename>pkgs/tools/archivers/cpio/default.nix</filename></link>.
Also a simple package. The generic builder in
<varname>stdenv</varname> does everything for you. It has
no dependencies beyond <varname>stdenv</varname>.</para>
</listitem>
<listitem>

View file

@ -15,7 +15,8 @@ environment does everything automatically. If
can easily customise or override the various build phases.</para>
<section><title>Using <literal>stdenv</literal></title>
<section xml:id="sec-using-stdenv"><title>Using
<literal>stdenv</literal></title>
<para>To build a package with the standard environment, you use the
function <varname>stdenv.mkDerivation</varname>, instead of the
@ -58,7 +59,7 @@ build. To make this easier, the standard environment breaks the
package build into a number of <emphasis>phases</emphasis>, all of
which can be overridden or modified individually: unpacking the
sources, applying patches, configuring, building, and installing.
(There are some others; see <xref linkend="ssec-stdenv-phases"/>.)
(There are some others; see <xref linkend="sec-stdenv-phases"/>.)
For instance, a package that doesnt supply a makefile but instead has
to be compiled “manually” could be handled like this:
@ -124,7 +125,8 @@ genericBuild
</section>
<section><title>Tools provided by <literal>stdenv</literal></title>
<section xml:id="sec-tools-of-stdenv"><title>Tools provided by
<literal>stdenv</literal></title>
<para>The standard environment provides the following packages:
@ -225,7 +227,7 @@ genericBuild
</section>
<section xml:id="ssec-stdenv-phases"><title>Phases</title>
<section xml:id="sec-stdenv-phases"><title>Phases</title>
<para>The generic builder has a number of <emphasis>phases</emphasis>.
Package builds are split into phases to make it easier to override
@ -243,7 +245,8 @@ is convenient to override a phase from the derivation, while the
latter is convenient from a build script.</para>
<section><title>Controlling phases</title>
<section xml:id="ssec-controlling-phases"><title>Controlling
phases</title>
<para>There are a number of variables that control what phases are
executed and in what order:
@ -327,7 +330,7 @@ executed and in what order:
</section>
<section><title>The unpack phase</title>
<section xml:id="ssec-unpack-phase"><title>The unpack phase</title>
<para>The unpack phase is responsible for unpacking the source code of
the package. The default implementation of
@ -434,7 +437,7 @@ Additional file types can be supported by setting the
</section>
<section><title>The patch phase</title>
<section xml:id="ssec-patch-phase"><title>The patch phase</title>
<para>The patch phase applies the list of patches defined in the
<varname>patches</varname> variable.</para>
@ -477,7 +480,7 @@ Additional file types can be supported by setting the
</section>
<section><title>The configure phase</title>
<section xml:id="ssec-configure-phase"><title>The configure phase</title>
<para>The configure phase prepares the source tree for building. The
default <function>configurePhase</function> runs
@ -573,7 +576,7 @@ script) if it exists.</para>
</section>
<section><title>The build phase</title>
<section xml:id="build-phase"><title>The build phase</title>
<para>The build phase is responsible for actually building the package
(e.g. compiling it). The default <function>buildPhase</function>
@ -657,7 +660,7 @@ called, respectively.</para>
</section>
<section><title>The check phase</title>
<section xml:id="ssec-check-phase"><title>The check phase</title>
<para>The check phase checks whether the package was built correctly
by running its test suite. The default
@ -717,7 +720,7 @@ doCheck = true;</programlisting>
</section>
<section><title>The install phase</title>
<section xml:id="ssec-install-phase"><title>The install phase</title>
<para>The install phase is responsible for installing the package in
the Nix store under <envar>out</envar>. The default
@ -772,7 +775,7 @@ installTargets = "install-bin install-doc";</programlisting>
</section>
<section><title>The fixup phase</title>
<section xml:id="ssec-fixup-phase"><title>The fixup phase</title>
<para>The fixup phase performs some (Nix-specific) post-processing
actions on the files installed under <filename>$out</filename> by the
@ -813,6 +816,12 @@ following:
stripped. By default, they are.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>dontMoveSbin</varname></term>
<listitem><para>If set, files in <filename>$out/sbin</filename> are not moved
to <filename>$out/bin</filename>. By default, they are.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>stripAllList</varname></term>
<listitem><para>List of directories to search for libraries and
@ -895,7 +904,8 @@ following:
</section>
<section><title>The distribution phase</title>
<section xml:id="ssec-distribution-phase"><title>The distribution
phase</title>
<para>The distribution phase is intended to produce a source
distribution of the package. The default
@ -1199,7 +1209,7 @@ echo @foo@
</section>
<section><title>Purity in Nixpkgs</title>
<section xml:id="sec-purity-in-nixpkgs"><title>Purity in Nixpkgs</title>
<para>[measures taken to prevent dependencies on packages outside the
store, and what you can do to prevent them]</para>

283
doc/submitting-changes.xml Normal file
View file

@ -0,0 +1,283 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="chap-submitting-changes">
<title>Submitting changes</title>
<section>
<title>Making patches</title>
<itemizedlist>
<listitem>
<para>Read <link xlink:href="https://nixos.org/nixpkgs/manual/">Manual (How to write packages for Nix)</link>.</para>
</listitem>
<listitem>
<para>Fork the repository on GitHub.</para>
</listitem>
<listitem>
<para>Create a branch for your future fix.
<itemizedlist>
<listitem>
<para>You can make branch from a commit of your local <command>nixos-version</command>. That will help you to avoid additional local compilations. Because you will receive packages from binary cache.
<itemizedlist>
<listitem>
<para>For example: <command>nixos-version</command> returns <command>15.05.git.0998212 (Dingo)</command>. So you can do:</para>
</listitem>
</itemizedlist>
<screen>
$ git checkout 0998212
$ git checkout -b 'fix/pkg-name-update'
</screen>
</para>
</listitem>
<listitem>
<para>Please avoid working directly on the <command>master</command> branch.</para>
</listitem>
</itemizedlist>
</para>
</listitem>
<listitem>
<para>Make commits of logical units.
<itemizedlist>
<listitem>
<para>If you removed pkgs, made some major NixOS changes etc., write about them in <command>nixos/doc/manual/release-notes/rl-unstable.xml</command>.</para>
</listitem>
</itemizedlist>
</para>
</listitem>
<listitem>
<para>Check for unnecessary whitespace with <command>git diff --check</command> before committing.</para>
</listitem>
<listitem>
<para>Format the commit in a following way:</para>
<programlisting>
(pkg-name | service-name): (from -> to | init at version | refactor | etc)
Additional information.
</programlisting>
<itemizedlist>
<listitem>
<para>Examples:
<itemizedlist>
<listitem>
<para>
<command>nginx: init at 2.0.1</command>
</para>
</listitem>
<listitem>
<para>
<command>firefox: 3.0 -> 3.1.1</command>
</para>
</listitem>
<listitem>
<para>
<command>hydra service: add bazBaz option</command>
</para>
</listitem>
<listitem>
<para>
<command>nginx service: refactor config generation</command>
</para>
</listitem>
</itemizedlist>
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem>
<para>Test your changes. If you work with
<itemizedlist>
<listitem>
<para>nixpkgs:
<itemizedlist>
<listitem>
<para>update pkg ->
<itemizedlist>
<listitem>
<para>
<command>nix-env -i pkg-name -f &lt;path to your local nixpkgs folder&gt;</command>
</para>
</listitem>
</itemizedlist>
</para>
</listitem>
<listitem>
<para>add pkg ->
<itemizedlist>
<listitem>
<para>Make sure it's in <command>pkgs/top-level/all-packages.nix</command>
</para>
</listitem>
<listitem>
<para>
<command>nix-env -i pkg-name -f &lt;path to your local nixpkgs folder&gt;</command>
</para>
</listitem>
</itemizedlist>
</para>
</listitem>
<listitem>
<para>
<emphasis>If you don't want to install pkg in you profile</emphasis>.
<itemizedlist>
<listitem>
<para>
<command>nix-build -A pkg-attribute-name &lt;path to your local nixpkgs folder&gt;/default.nix</command> and check results in the folder <command>result</command>. It will appear in the same directory where you did <command>nix-build</command>.</para>
</listitem>
</itemizedlist>
</para>
</listitem>
<listitem>
<para>If you did <command>nix-env -i pkg-name</command> you can do <command>nix-env -e pkg-name</command> to uninstall it from your system.</para>
</listitem>
</itemizedlist>
</para>
</listitem>
<listitem>
<para>NixOS and its modules:
<itemizedlist>
<listitem>
<para>You can add new module to your NixOS configuration file (usually it's <command>/etc/nixos/configuration.nix</command>).
And do <command>sudo nixos-rebuild test -I nixpkgs=&lt;path to your local nixpkgs folder&gt; --fast</command>.</para>
</listitem>
</itemizedlist>
</para>
</listitem>
</itemizedlist>
</para>
</listitem>
<listitem>
<para>If you have commits <command>pkg-name: oh, forgot to insert whitespace</command>: squash commits in this case. Use <command>git rebase -i</command>.</para>
</listitem>
<listitem>
<para>Rebase you branch against current <command>master</command>.</para>
</listitem>
</itemizedlist>
</section>
<section>
<title>Submitting changes</title>
<itemizedlist>
<listitem>
<para>Push your changes to your fork of nixpkgs.</para>
</listitem>
<listitem>
<para>Create pull request:
<itemizedlist>
<listitem>
<para>Write the title in format <command>(pkg-name | service): improvement</command>.
<itemizedlist>
<listitem>
<para>If you update the pkg, write versions <command>from -> to</command>.</para>
</listitem>
</itemizedlist>
</para>
</listitem>
<listitem>
<para>Write in comment if you have tested your patch. Do not rely much on <command>TravisCI</command>.</para>
</listitem>
<listitem>
<para>If you make an improvement, write about your motivation.</para>
</listitem>
<listitem>
<para>Notify maintainers of the package. For example add to the message: <command>cc @jagajaga @domenkozar</command>.</para>
</listitem>
</itemizedlist>
</para>
</listitem>
</itemizedlist>
</section>
<section>
<title>Hotfixing pull requests</title>
<itemizedlist>
<listitem>
<para>Make the appropriate changes in you branch.</para>
</listitem>
<listitem>
<para>Don't create additional commits, do
<itemizedlist>
<listitem>
<para><command>git rebase -i</command></para>
</listitem>
<listitem>
<para>
<command>git push --force</command> to your branch.</para>
</listitem>
</itemizedlist>
</para>
</listitem>
</itemizedlist>
</section>
<section>
<title>Commit policy</title>
<itemizedlist>
<listitem>
<para>Commits must be sufficiently tested before being merged, both for the master and staging branches.</para>
</listitem>
<listitem>
<para>Hydra builds for master and staging should not be used as testing platform, it's a build farm for changes that have been already tested.</para>
</listitem>
<listitem>
<para>Master should only see non-breaking commits that do not cause mass rebuilds.</para>
</listitem>
<listitem>
<para>Staging should only see non-breaking mass-rebuild commits. That means it's not to be used for testing, and changes must have been well tested already. <link xlink:href="http://comments.gmane.org/gmane.linux.distributions.nixos/13447">Read policy here</link>.</para>
</listitem>
<listitem>
<para>If staging is already in a broken state, please refrain from adding extra new breakages. Stabilize it for a few days, merge into master, then resume development on staging. <link xlink:href="http://hydra.nixos.org/jobset/nixpkgs/staging#tabs-evaluations">Keep an eye on the staging evaluations here</link>.</para>
</listitem>
<listitem>
<para>When changing the bootloader installation process, extra care must be taken. Grub installations cannot be rolled back, hence changes may break people's installations forever. For any non-trivial change to the bootloader please file a PR asking for review, especially from @edolstra.</para>
</listitem>
</itemizedlist>
</section>
</chapter>

View file

@ -85,6 +85,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = "Creative Commons Zero v1.0 Universal";
};
cc-by-sa-25 = spdx {
spdxId = "CC-BY-SA-2.5";
fullName = "Creative Commons Attribution Share Alike 2.5";
};
cc-by-30 = spdx {
spdxId = "CC-BY-3.0";
fullName = "Creative Commons Attribution 3.0";
@ -322,11 +327,21 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = "University of Illinois/NCSA Open Source License";
};
notion_lgpl = {
url = "https://raw.githubusercontent.com/raboof/notion/master/LICENSE";
fullName = "Notion modified LGPL";
};
ofl = spdx {
spdxId = "OFL-1.1";
fullName = "SIL Open Font License 1.1";
};
openldap = spdx {
spdxId = "OLDAP-2.8";
fullName = "Open LDAP Public License v2.8";
};
openssl = spdx {
spdxId = "OpenSSL";
fullName = "OpenSSL License";
@ -403,6 +418,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = "The Unlicense";
};
vim = spdx {
spdxId = "Vim";
fullName = "Vim License";
};
vsl10 = spdx {
spdxId = "VSL-1.0";
fullName = "Vovida Software License v1.0";

View file

@ -1,13 +1,17 @@
/* -*- coding: utf-8; -*- */
{
/* Add your name and email address here. Keep the list
alphabetically sorted. */
/* Add your name and email address here.
Keep the list alphabetically sorted.
Prefer the same attrname as your github username, please,
so it's easy to ping a package @maintainer.
*/
_1126 = "Christian Lask <mail@elfsechsundzwanzig.de>";
abaldeau = "Andreas Baldeau <andreas@baldeau.net>";
abbradar = "Nikolay Amiantov <ab@fmap.me>";
adev = "Adrien Devresse <adev@adev.name>";
aforemny = "Alexander Foremny <alexanderforemny@googlemail.com>";
aflatter = "Alexander Flatter <flatter@fastmail.fm>";
aherrmann = "Andreas Herrmann <andreash87@gmx.ch>";
ak = "Alexander Kjeldaas <ak@formalprivacy.com>";
akc = "Anders Claesson <akc@akc.is>";
@ -16,7 +20,9 @@
amiddelk = "Arie Middelkoop <amiddelk@gmail.com>";
amorsillo = "Andrew Morsillo <andrew.morsillo@gmail.com>";
AndersonTorres = "Anderson Torres <torres.anderson.85@gmail.com>";
anderspapitto = "Anders Papitto <anderspapitto@gmail.com>";
andres = "Andres Loeh <ksnixos@andres-loeh.de>";
andrewrk = "Andrew Kelley <superjoe30@gmail.com>";
antono = "Antono Vasiljev <self@antono.info>";
ardumont = "Antoine R. Dumont <eniotna.t@gmail.com>";
aristid = "Aristid Breitkreuz <aristidb@gmail.com>";
@ -25,9 +31,11 @@
astsmtl = "Alexander Tsamutali <astsmtl@yandex.ru>";
aszlig = "aszlig <aszlig@redmoonstudios.org>";
auntie = "Jonathan Glines <auntieNeo@gmail.com>";
avnik = "Alexander V. Nikolaev <avn@avnik.info>";
aycanirican = "Aycan iRiCAN <iricanaycan@gmail.com>";
balajisivaraman = "Balaji Sivaraman<sivaraman.balaji@gmail.com>";
bbenoist = "Baptist BENOIST <return_0@live.com>";
bcarrell = "Brandon Carrell <brandoncarrell@gmail.com>";
bcdarwin = "Ben Darwin <bcdarwin@gmail.com>";
bdimcheff = "Brandon Dimcheff <brandon@dimcheff.com>";
bennofs = "Benno Fünfstück <benno.fuenfstueck@gmail.com>";
@ -49,11 +57,13 @@
cdepillabout = "Dennis Gosnell <cdep.illabout@gmail.com>";
cfouche = "Chaddaï Fouché <chaddai.fouche@gmail.com>";
chaoflow = "Florian Friesdorf <flo@chaoflow.net>";
chattered = "Phil Scott <me@philscotted.com>";
christopherpoole = "Christopher Mark Poole <mail@christopherpoole.net>";
coconnor = "Corey O'Connor <coreyoconnor@gmail.com>";
codyopel = "Cody Opel <codyopel@gmail.com>";
copumpkin = "Dan Peebles <pumpkingod@gmail.com>";
coroa = "Jonas Hörsch <jonas@chaoflow.net>";
couchemar = "Andrey Pavlov <couchemar@yandex.ru>";
cstrahan = "Charles Strahan <charles.c.strahan@gmail.com>";
cwoac = "Oliver Matthews <oliver@codersoffortune.net>";
DamienCassou = "Damien Cassou <damien.cassou@gmail.com>";
@ -71,6 +81,7 @@
eikek = "Eike Kettner <eike.kettner@posteo.de>";
ellis = "Ellis Whitehead <nixos@ellisw.net>";
emery = "Emery Hemingway <emery@vfemail.net>";
ericbmerritt = "Eric Merritt <eric@afiniate.com>";
ertes = "Ertugrul Söylemez <ertesx@gmx.de>";
exlevan = "Alexey Levan <exlevan@gmail.com>";
falsifian = "James Cook <james.cook@utoronto.ca>";
@ -78,6 +89,7 @@
fluffynukeit = "Daniel Austin <dan@fluffynukeit.com>";
forkk = "Andrew Okin <forkk@forkk.net>";
fpletz = "Franz Pletz <fpletz@fnordicwalking.de>";
fro_ozen = "fro_ozen <fro_ozen@gmx.de>";
ftrvxmtrx = "Siarhei Zirukin <ftrvxmtrx@gmail.com>";
funfunctor = "Edward O'Callaghan <eocallaghan@alterapraxis.com>";
fuuzetsu = "Mateusz Kowalczyk <fuuzetsu@fuuzetsu.co.uk>";
@ -91,18 +103,22 @@
goibhniu = "Cillian de Róiste <cillian.deroiste@gmail.com>";
gridaphobe = "Eric Seidel <eric@seidel.io>";
guibert = "David Guibert <david.guibert@gmail.com>";
havvy = "Ryan Scheel <ryan.havvy@gmail.com>";
hbunke = "Hendrik Bunke <bunke.hendrik@gmail.com>";
henrytill = "Henry Till <henrytill@gmail.com>";
hiberno = "Christian Lask <mail@elfsechsundzwanzig.de>";
hinton = "Tom Hinton <t@larkery.com>";
hrdinka = "Christoph Hrdinka <c.nix@hrdinka.at>";
iand675 = "Ian Duncan <ian@iankduncan.com>";
ianwookim = "Ian-Woo Kim <ianwookim@gmail.com>";
iElectric = "Domen Kozar <domen@dev.si>";
ikervagyok = "Balázs Lengyel <ikervagyok@gmail.com>";
iyzsong = "Song Wenwu <iyzsong@gmail.com>";
j-keck = "Jürgen Keck <jhyphenkeck@gmail.com>";
jagajaga = "Arseniy Seroka <ars.seroka@gmail.com>";
jb55 = "William Casarin <bill@casarin.me>";
jcumming = "Jack Cummings <jack@mudshark.org>";
jfb = "James Felix Black <james@yamtime.com>";
jgeerds = "Jascha Geerds <jg@ekby.de>";
jirkamarsik = "Jirka Marsik <jiri.marsik89@gmail.com>";
joachifm = "Joachim Fasting <joachifm@fastmail.fm>";
@ -111,6 +127,7 @@
joelteon = "Joel Taylor <me@joelt.io>";
jpbernardy = "Jean-Philippe Bernardy <jeanphilippe.bernardy@gmail.com>";
jwiegley = "John Wiegley <johnw@newartisans.com>";
jwilberding = "Jordan Wilberding <jwilberding@afiniate.com>";
jzellner = "Jeff Zellner <jeffz@eml.cc>";
kkallio = "Karn Kallio <tierpluspluslists@gmail.com>";
koral = "Koral <koral@mailoo.org>";
@ -118,28 +135,36 @@
kragniz = "Louis Taylor <kragniz@gmail.com>";
ktosiek = "Tomasz Kontusz <tomasz.kontusz@gmail.com>";
lassulus = "Lassulus <lassulus@gmail.com>";
leonardoce = "Leonardo Cecchi <leonardo.cecchi@gmail.com>";
lethalman = "Luca Bruno <lucabru@src.gnome.org>";
lhvwb = "Nathaniel Baxter <nathaniel.baxter@gmail.com>";
lihop = "Leroy Hopson <nixos@leroy.geek.nz>";
linquize = "Linquize <linquize@yahoo.com.hk>";
linus = "Linus Arver <linusarver@gmail.com>";
lnl7 = "Daiderd Jordan <daiderd@gmail.com>";
lovek323 = "Jason O'Conal <jason@oconal.id.au>";
lsix = "Lancelot SIX <lsix@lancelotsix.com>";
ludo = "Ludovic Courtès <ludo@gnu.org>";
madjar = "Georges Dubus <georges.dubus@compiletoi.net>";
magnetophon = "Bart Brouns <bart@magnetophon.nl>";
malyn = "Michael Alyn Miller <malyn@strangeGizmo.com>";
manveru = "Michael Fellinger <m.fellinger@gmail.com>";
marcweber = "Marc Weber <marco-oweber@gmx.de>";
maurer = "Matthew Maurer <matthew.r.maurer+nix@gmail.com>";
matejc = "Matej Cotman <cotman.matej@gmail.com>";
matthiasbeyer = "Matthias Beyer <mail@beyermatthias.de>";
mbakke = "Marius Bakke <ymse@tuta.io>";
meditans = "Carlo Nucera <meditans@gmail.com>";
meisternu = "Matt Miemiec <meister@krutt.org>";
michelk = "Michel Kuhlmann <michel@kuhlmanns.info>";
mirdhyn = "Merlin Gaillard <mirdhyn@gmail.com>";
mschristiansen = "Mikkel Christiansen <mikkel@rheosystems.com>";
modulistic = "Pablo Costa <modulistic@gmail.com>";
mornfall = "Petr Ročkai <me@mornfall.net>";
MP2E = "Cray Elliott <MP2E@archlinux.us>";
msackman = "Matthew Sackman <matthew@wellquite.org>";
mtreskin = "Max Treskin <zerthurd@gmail.com>";
mudri = "James Wood <lamudri@gmail.com>";
muflax = "Stefan Dorn <mail@muflax.com>";
nathan-gs = "Nathan Bijnens <nathan@nathan.gs>";
nckx = "Tobias Geerinckx-Rice <tobias.geerinckx.rice@gmail.com>";
@ -180,6 +205,7 @@
rickynils = "Rickard Nilsson <rickynils@gmail.com>";
rob = "Rob Vermaas <rob.vermaas@gmail.com>";
robberer = "Longrin Wischnewski <robberer@freakmail.de>";
robbinch = "Robbin C. <robbinch33@gmail.com>";
roconnor = "Russell O'Connor <roconnor@theorem.ca>";
roelof = "Roelof Wobben <rwobben@hotmail.com>";
romildo = "José Romildo Malaquias <malaquias@gmail.com>";
@ -194,21 +220,26 @@
shell = "Shell Turner <cam.turn@gmail.com>";
shlevy = "Shea Levy <shea@shealevy.com>";
simons = "Peter Simons <simons@cryp.to>";
simonvandel = "Simon Vandel Sillesen <simon.vandel@gmail.com>";
sjagoe = "Simon Jagoe <simon@simonjagoe.com>";
sjmackenzie = "Stewart Mackenzie <setori88@gmail.com>";
skeidel = "Sven Keidel <svenkeidel@gmail.com>";
smironov = "Sergey Mironov <ierton@gmail.com>";
spacefrogg = "Michael Raitza <spacefrogg-nixos@meterriblecrew.net>";
sprock = "Roger Mason <rmason@mun.ca>";
spwhitt = "Spencer Whitt <sw@swhitt.me>";
stephenmw = "Stephen Weinberg <stephen@q5comm.com>";
sztupi = "Attila Sztupak <attila.sztupak@gmail.com>";
tailhook = "Paul Colomiets <paul@colomiets.name>";
taktoa = "Remy Goldschmidt <taktoa@gmail.com>";
telotortium = "Robert Irelan <rirelan@gmail.com>";
thammers = "Tobias Hammerschmidt <jawr@gmx.de>";
the-kenny = "Moritz Ulrich <moritz@tarn-vedra.de>";
theuni = "Christian Theune <ct@flyingcircus.io>";
thoughtpolice = "Austin Seipp <aseipp@pobox.com>";
titanous = "Jonathan Rudenberg <jonathan@titanous.com>";
tomberek = "Thomas Bereknyei <tomberek@gmail.com>";
travisbhartwell = "Travis B. Hartwell <nafai@travishartwell.net>";
trino = "Hubert Mühlhans <muehlhans.hubert@ekodia.de>";
tstrobel = "Thomas Strobel <ts468@cam.ac.uk>";
ttuegel = "Thomas Tuegel <ttuegel@gmail.com>";

View file

@ -17,6 +17,10 @@ rec {
evalModules) and the less declarative the module set is. */
evalModules = { modules
, prefix ? []
, # This should only be used for special arguments that need to be evaluated
# when resolving module structure (like in imports). For everything else,
# there's _module.args.
specialArgs ? {}
, # This would be remove in the future, Prefer _module.args option instead.
args ? {}
, # This would be remove in the future, Prefer _module.check option instead.
@ -39,7 +43,7 @@ rec {
};
_module.check = mkOption {
type = types.uniq types.bool;
type = types.bool;
internal = true;
default = check;
description = "Whether to check whether all option definitions have matching declarations.";
@ -51,7 +55,7 @@ rec {
};
};
closed = closeModules (modules ++ [ internalModule ]) { inherit config options; lib = import ./.; };
closed = closeModules (modules ++ [ internalModule ]) (specialArgs // { inherit config options; lib = import ./.; });
# Note: the list of modules is reversed to maintain backward
# compatibility with the old module system. Not sure if this is
@ -87,9 +91,11 @@ rec {
let
toClosureList = file: parentKey: imap (n: x:
if isAttrs x || isFunction x then
unifyModuleSyntax file "${parentKey}:anon-${toString n}" (unpackSubmodule applyIfFunction x args)
let key = "${parentKey}:anon-${toString n}"; in
unifyModuleSyntax file key (unpackSubmodule (applyIfFunction key) x args)
else
unifyModuleSyntax (toString x) (toString x) (applyIfFunction (import x) args));
let file = toString x; key = toString x; in
unifyModuleSyntax file key (applyIfFunction key (import x) args));
in
builtins.genericClosure {
startSet = toClosureList unknownModule "" modules;
@ -118,7 +124,7 @@ rec {
config = removeAttrs m ["key" "_file" "require" "imports"];
};
applyIfFunction = f: arg@{ config, options, lib }: if isFunction f then
applyIfFunction = key: f: args@{ config, options, lib, ... }: if isFunction f then
let
# Module arguments are resolved in a strict manner when attribute set
# deconstruction is used. As the arguments are now defined with the
@ -133,11 +139,18 @@ rec {
# not their values. The values are forwarding the result of the
# evaluation of the option.
requiredArgs = builtins.attrNames (builtins.functionArgs f);
context = name: ''while evaluating the module argument `${name}' in "${key}":'';
extraArgs = builtins.listToAttrs (map (name: {
inherit name;
value = config._module.args.${name};
value = addErrorContext (context name)
(args.${name} or config._module.args.${name});
}) requiredArgs);
in f (extraArgs // arg)
# Note: we append in the opposite order such that we can add an error
# context on the explicited arguments of "args" too. This update
# operator is used to make the "args@{ ... }: with args.lib;" notation
# works.
in f (args // extraArgs)
else
f;

View file

@ -59,26 +59,21 @@ rec {
else if all isInt list && all (x: x == head list) list then head list
else throw "Cannot merge definitions of `${showOption loc}' given in ${showFiles (getFiles defs)}.";
/* Obsolete, will remove soon. Specify an option type or apply
function instead. */
mergeTypedOption = typeName: predicate: merge: loc: list:
let list' = map (x: x.value) list; in
if all predicate list then merge list'
else throw "Expected a ${typeName}.";
mergeEnableOption = mergeTypedOption "boolean"
(x: true == x || false == x) (fold lib.or false);
mergeListOption = mergeTypedOption "list" isList concatLists;
mergeStringOption = mergeTypedOption "string" isString lib.concatStrings;
mergeOneOption = loc: defs:
if defs == [] then abort "This case should never happen."
else if length defs != 1 then
throw "The unique option `${showOption loc}' is defined multiple times, in ${showFiles (getFiles defs)}."
else (head defs).value;
/* "Merge" option definitions by checking that they all have the same value. */
mergeEqualOption = loc: defs:
if defs == [] then abort "This case should never happen."
else fold (def: val:
if def.value != val then
throw "The option `${showOption loc}' has conflicting definitions, in ${showFiles (getFiles defs)}."
else
val) (head defs).value defs;
getValues = map (x: x.value);
getFiles = map (x: x.file);

View file

@ -107,11 +107,13 @@ rec {
# replaceChars ["<" ">"] ["&lt;" "&gt;"] "<foo>" returns "&lt;foo&gt;".
replaceChars = del: new: s:
let
substList = lib.zipLists del new;
subst = c:
(lib.fold
(sub: res: if sub.fst == c then sub else res)
{fst = c; snd = c;} (lib.zipLists del new)
).snd;
let found = lib.findFirst (sub: sub.fst == c) null substList; in
if found == null then
c
else
found.snd;
in
stringAsChars subst s;

View file

@ -12,7 +12,7 @@ evalConfig() {
local attr=$1
shift;
local script="import ./default.nix { modules = [ $@ ];}"
nix-instantiate --timeout 1 -E "$script" -A "$attr" --eval-only
nix-instantiate --timeout 1 -E "$script" -A "$attr" --eval-only --show-trace
}
reportFailure() {
@ -100,7 +100,15 @@ checkConfigOutput 'true' "$@" ./define-enable.nix ./define-loaOfSub-foo-if-enabl
checkConfigOutput 'true' "$@" ./define-enable.nix ./define-loaOfSub-foo-enable-if.nix
# Check _module.args.
checkConfigOutput "true" config.enable ./declare-enable.nix ./custom-arg-define-enable.nix
set -- config.enable ./declare-enable.nix ./define-enable-with-custom-arg.nix
checkConfigError 'while evaluating the module argument .*custom.* in .*define-enable-with-custom-arg.nix.*:' "$@"
checkConfigOutput "true" "$@" ./define-_module-args-custom.nix
# Check that using _module.args on imports cause infinite recursions, with
# the proper error context.
set -- "$@" ./define-_module-args-custom.nix ./import-custom-arg.nix
checkConfigError 'while evaluating the module argument .*custom.* in .*import-custom-arg.nix.*:' "$@"
checkConfigError 'infinite recursion encountered' "$@"
# Check _module.check.
set -- config.enable ./declare-enable.nix ./define-enable.nix ./define-loaOfSub-foo.nix

View file

@ -0,0 +1,7 @@
{ lib, ... }:
{
config = {
_module.args.custom = true;
};
}

View file

@ -2,7 +2,6 @@
{
config = {
_module.args.custom = true;
enable = custom;
};
}

View file

@ -0,0 +1,6 @@
{ lib, custom, ... }:
{
imports = []
++ lib.optional custom ./define-enable-force.nix;
}

View file

@ -54,7 +54,7 @@ rec {
bool = mkOptionType {
name = "boolean";
check = isBool;
merge = loc: fold (x: y: x.value || y) false;
merge = mergeEqualOption;
};
int = mkOptionType {

View file

@ -1,4 +1,4 @@
#!/bin/sh
#!/usr/bin/env bash
GNOME_FTP="ftp.gnome.org/pub/GNOME/sources"

View file

@ -6,6 +6,7 @@ hydra_eval_jobs \
--argstr system i686-linux \
--argstr system x86_64-darwin \
--argstr system i686-cygwin \
--argstr system x86_64-cygwin \
--argstr system i686-freebsd \
--arg officialRelease false \
--arg nixpkgs "{ outPath = builtins.storePath ./. ; rev = 1234; }" \

View file

@ -31,7 +31,15 @@ elif [[ $1 == build ]]; then
echo "=== Not a pull request"
else
echo "=== Checking PR"
nox-review pr ${TRAVIS_PULL_REQUEST}
if ! nox-review pr ${TRAVIS_PULL_REQUEST}; then
if sudo dmesg | egrep 'Out of memory|Killed process' > /tmp/oom-log; then
echo "=== The build failed due to running out of memory:"
cat /tmp/oom-log
echo "=== Please disregard the result of this Travis build."
fi
exit 1
fi
fi
# echo "=== Checking tarball creation"
# nix-build pkgs/top-level/release.nix -A tarball

View file

@ -158,7 +158,7 @@ let locatedb = "/var/cache/locatedb"; in
script =
''
mkdir -m 0755 -p $(dirname ${locatedb})
exec updatedb --localuser=nobody --output=${locatedb} --prunepaths='/tmp /var/tmp /media /run'
exec updatedb --localuser=nobody --output=${locatedb} --prunepaths='/tmp /var/tmp /run'
'';
};
@ -172,4 +172,4 @@ let locatedb = "/var/cache/locatedb"; in
<xi:include href="option-declarations.xml" />
<xi:include href="option-def.xml" />
</chapter>
</chapter>

View file

@ -41,10 +41,6 @@ changes:
<option>boot.loader.efi</option> and <option>boot.loader.gummiboot</option>
as well.</para>
</listitem>
<listitem>
<para>To see console messages during early boot, add <literal>"fbcon"</literal>
to your <option>boot.initrd.kernelModules</option>.</para>
</listitem>
</itemizedlist>
</para>

View file

@ -120,7 +120,11 @@ $ nixos-generate-config --root /mnt</screen>
$ nano /mnt/etc/nixos/configuration.nix
</screen>
The <command>vim</command> text editor is also available.</para>
If youre using the graphical ISO image, other editors may be
available (such as <command>vim</command>). If you have network
access, you can also install other editors — for instance, you can
install Emacs by running <literal>nix-env -i
emacs</literal>.</para>
<para>You <emphasis>must</emphasis> set the option
<option>boot.loader.grub.device</option> to specify on which disk
@ -189,11 +193,13 @@ $ reboot</screen>
<listitem>
<para>You should now be able to boot into the installed NixOS. The GRUB boot menu shows a list
of <emphasis>available configurations</emphasis> (initially just one). Every time
you change the NixOS configuration (see<link linkend="sec-changing-config">Changing
Configuration</link> ), a new item appears in the menu. This allows you to
easily roll back to another configuration if something goes wrong.</para>
<para>You should now be able to boot into the installed NixOS. The
GRUB boot menu shows a list of <emphasis>available
configurations</emphasis> (initially just one). Every time you
change the NixOS configuration (see <link
linkend="sec-changing-config">Changing Configuration</link> ), a
new item is added to the menu. This allows you to easily roll back
to a previous configuration if something goes wrong.</para>
<para>You should log in and change the <literal>root</literal>
password with <command>passwd</command>.</para>

View file

@ -8,9 +8,32 @@
<para>In addition to numerous new and upgraded packages, this release has the following highlights:
<!--<itemizedlist>
<itemizedlist>
<listitem>
<para>
The Haskell packages infrastructure has been re-designed from the ground up.
NixOS now distributes the latest version of every single package registered on
<link xlink:href="http://hackage.haskell.org/">Hackage</link>, i.e. well over
8000 Haskell packages. Further information and usage instructions for the
improved infrastructure are available at <link
xlink:href="https://nixos.org/wiki/Haskell">https://nixos.org/wiki/Haskell</link>.
Users migrating from an earlier release will find also find helpful information
below, in the list of backwards-incompatible changes.
</para>
</listitem>
<listitem>
<para>
Users running an SSH server who worry about the quality of their
<literal>/etc/ssh/moduli</literal> file with respect to the <link
xlink:href="https://stribika.github.io/2015/01/04/secure-secure-shell.html">vulnerabilities
discovered in the Diffie-Hellman key exchange</link> can now replace OpenSSH's
default version with one they generated themselves using the new
<literal>services.openssh.moduliFile</literal> option.
</para>
</listitem>
</itemizedlist>
</itemizedlist>-->
</para>
<para>Following new services were added since the last release:
@ -18,6 +41,7 @@
<itemizedlist>
<listitem><para><literal>brltty</literal></para></listitem>
<listitem><para><literal>marathon</literal></para></listitem>
<listitem><para><literal>Tvheadend</literal></para></listitem>
</itemizedlist>
</para>
@ -36,6 +60,10 @@ and old <literal>steam</literal> package -- to <literal>steamOriginal</literal>.
was accordingly renamed to <literal>bomi</literal>
</para></listitem>
<listitem><para>Atom Shell has been renamed to Electron upstream. Package <literal>atom-shell</literal>
was accordingly renamed to <literal>electron</literal>
</para></listitem>
<listitem>
<para>
The default <literal>NIX_PATH</literal> for NixOS now includes
@ -71,6 +99,84 @@ was accordingly renamed to <literal>bomi</literal>
</para>
</listitem>
<listitem>
<para>
Haskell packages can no longer be found by name, i.e. the commands
<literal>nix-env -qa cabal-install</literal> and <literal>nix-env -i
ghc</literal> will fail, even though we <emphasis>do</emphasis> ship
both <literal>cabal-install</literal> and <literal>ghc</literal>.
The reason for this inconvenience is the sheer size of the Haskell
package set: name-based lookups such as these would become much
slower than they are today if we'd add the entire Hackage database
into the top level attribute set. Instead, the list of Haskell
packages can be displayed by
</para>
<programlisting>
nix-env -f &quot;&lt;nixpkgs&gt;&quot; -qaP -A haskellPackages
</programlisting>
<para>
and packages can be installed with:
</para>
<programlisting>
nix-env -f &quot;&lt;nixpkgs&gt;&quot; -iA haskellPackages.cabal-install
</programlisting>
</listitem>
<listitem>
<para>
Previous versions of NixOS come with a feature called
<literal>ghc-wrapper</literal>, a small wrapper script that allows
GHC to transparently pick up on libraries installed in the user's
profile. This feature has been deprecated;
<literal>ghc-wrapper</literal> was removed from the distribution.
The proper way to register Haskell libraries with the compiler now
is the <literal>haskellPackages.ghcWithPackages</literal>
function.
<link xlink:href="https://nixos.org/wiki/Haskell">https://nixos.org/wiki/Haskell</link>
provides much information about this subject.
</para>
</listitem>
<listitem>
<para>
All Haskell builds that have been generated with version 1.x of
the <literal>cabal2nix</literal> utility are now invalid and need
to be re-generated with a current version of
<literal>cabal2nix</literal> to function. The most recent version
of this tool can be installed by running
<literal>nix-env -i cabal2nix</literal>.
</para>
</listitem>
<listitem>
<para>
The <literal>haskellPackages</literal> set in Nixpkgs used to have a
function attribute called <literal>extension</literal> that users
could override in their <literal>~/.nixpkgs/config.nix</literal>
files to configure additional attributes, etc. That function still
exists, but it's now called <literal>overrides</literal>.
</para>
</listitem>
<listitem>
<para>
The OpenBLAS library has been updated to version
<literal>0.2.14</literal>. Support for the
<literal>x86_64-darwin</literal> platform was added. Dynamic
architecture detection was enabled; OpenBLAS now selects
microarchitecture-optimized routines at runtime, so optimal
performance is achieved without the need to rebuild OpenBLAS
locally. OpenBLAS has replaced ATLAS in most packages which use an
optimized BLAS or LAPACK implementation.
</para>
</listitem>
<listitem>
<para>
The <literal>phpfpm</literal> is now using the default PHP version
(<literal>pkgs.php</literal>) instead of PHP 5.4 (<literal>pkgs.php54</literal>).
</para>
</listitem>
</itemizedlist>
</para>

View file

@ -47,6 +47,7 @@ in rec {
inherit prefix check;
modules = modules ++ extraModules ++ baseModules ++ [ pkgsModule ];
args = extraArgs;
specialArgs = { modulesPath = ../modules; };
}) config options;
# These are the extra arguments passed to every module. In

View file

@ -0,0 +1,31 @@
{ pkgs, nixpkgs, version, versionSuffix }:
pkgs.releaseTools.makeSourceTarball {
name = "nixos-channel";
src = nixpkgs;
officialRelease = false; # FIXME: fix this in makeSourceTarball
inherit version versionSuffix;
buildInputs = [ pkgs.nixUnstable ];
expr = builtins.readFile ./channel-expr.nix;
distPhase = ''
rm -rf .git
echo -n $VERSION_SUFFIX > .version-suffix
echo -n ${nixpkgs.rev or nixpkgs.shortRev} > .git-revision
releaseName=nixos-$VERSION$VERSION_SUFFIX
mkdir -p $out/tarballs
mkdir ../$releaseName
cp -prd . ../$releaseName/nixpkgs
chmod -R u+w ../$releaseName
ln -s nixpkgs/nixos ../$releaseName/nixos
echo "$expr" > ../$releaseName/default.nix
NIX_STATE_DIR=$TMPDIR nix-env -f ../$releaseName/default.nix -qaP --meta --xml \* > /dev/null
cd ..
chmod -R u+w $releaseName
tar cfJ $out/tarballs/$releaseName.tar.xz $releaseName
'';
}

View file

@ -21,7 +21,7 @@ sub new {
my ($class, $args) = @_;
my $startCommand = $args->{startCommand};
my $name = $args->{name};
if (!$name) {
$startCommand =~ /run-(.*)-vm$/ if defined $startCommand;
@ -34,7 +34,7 @@ sub new {
"qemu-kvm -m 384 " .
"-net nic,model=virtio \$QEMU_OPTS ";
my $iface = $args->{hdaInterface} || "virtio";
$startCommand .= "-drive file=" . Cwd::abs_path($args->{hda}) . ",if=$iface,boot=on,werror=report "
$startCommand .= "-drive file=" . Cwd::abs_path($args->{hda}) . ",if=$iface,werror=report "
if defined $args->{hda};
$startCommand .= "-cdrom $args->{cdrom} "
if defined $args->{cdrom};
@ -43,8 +43,6 @@ sub new {
$startCommand .= "-bios $args->{bios} "
if defined $args->{bios};
$startCommand .= $args->{qemuFlags} || "";
} else {
$startCommand = Cwd::abs_path $startCommand;
}
my $tmpDir = $ENV{'TMPDIR'} || "/tmp";
@ -171,7 +169,7 @@ sub start {
eval {
local $SIG{CHLD} = sub { die "QEMU died prematurely\n"; };
# Wait until QEMU connects to the monitor.
accept($self->{monitor}, $monitorS) or die;
@ -182,11 +180,11 @@ sub start {
$self->{socket}->autoflush(1);
};
die "$@" if $@;
$self->waitForMonitorPrompt;
$self->log("QEMU running (pid $pid)");
$self->{pid} = $pid;
$self->{booted} = 1;
}
@ -241,7 +239,7 @@ sub connect {
alarm 300;
readline $self->{socket} or die "the VM quit before connecting\n";
alarm 0;
$self->log("connected to guest root shell");
$self->{connected} = 1;
@ -270,7 +268,7 @@ sub isUp {
sub execute_ {
my ($self, $command) = @_;
$self->connect;
print { $self->{socket} } ("( $command ); echo '|!=EOF' \$?\n");
@ -453,7 +451,7 @@ sub shutdown {
sub crash {
my ($self) = @_;
return unless $self->{booted};
$self->log("forced crash");
$self->sendMonitorCommand("quit");

View file

@ -122,12 +122,16 @@ rec {
${lib.optionalString (builtins.length vms == 1) "--set USE_SERIAL 1"}
''; # "
test = runTests driver;
passMeta = drv: drv // lib.optionalAttrs (t ? meta) {
meta = (drv.meta or {}) // t.meta;
};
report = releaseTools.gcovReport { coverageRuns = [ test ]; };
in (if makeCoverageReport then report else test) // { inherit nodes driver test; };
test = passMeta (runTests driver);
report = passMeta (releaseTools.gcovReport { coverageRuns = [ test ]; });
in (if makeCoverageReport then report else test) // {
inherit nodes driver test;
};
runInMachine =
{ drv

View file

@ -1,5 +1,5 @@
{ modulesPath, ...}:
{
imports = [ "${modulesPath}/virtualisation/amazon-config.nix" ];
imports = [ "${modulesPath}/virtualisation/amazon-init.nix" ];
services.journald.rateLimitBurst = 0;
}

View file

@ -1,3 +1,6 @@
# This module is deprecated, since you can just say fonts.fonts = [
# pkgs.corefonts ]; instead.
{ config, lib, pkgs, ... }:
with lib;
@ -9,6 +12,7 @@ with lib;
fonts = {
enableCoreFonts = mkOption {
visible = false;
default = false;
description = ''
Whether to include Microsoft's proprietary Core Fonts. These fonts

View file

@ -43,7 +43,7 @@ in
consoleFont = mkOption {
type = types.str;
default = "lat9w-16";
default = "Lat2-Terminus16";
example = "LatArCyrHeb-16";
description = ''
The font used for the virtual consoles. Leave empty to use

View file

@ -48,7 +48,7 @@ in
config = mkIf config.krb5.enable {
environment.systemPackages = [ pkgs.krb5 ];
environment.systemPackages = [ pkgs.krb5Full ];
environment.etc."krb5.conf".text =
''

View file

@ -12,7 +12,7 @@ let
# Forces 32bit pulseaudio and alsaPlugins to be built/supported for apps
# using 32bit alsa on 64bit linux.
enable32BitAlsaPlugins = stdenv.isx86_64 && (pkgs_i686.alsaLib != null && pkgs_i686.pulseaudio != null);
enable32BitAlsaPlugins = cfg.support32Bit && stdenv.isx86_64 && (pkgs_i686.alsaLib != null && pkgs_i686.libpulseaudio != null);
ids = config.ids;
@ -78,6 +78,15 @@ in {
'';
};
support32Bit = mkOption {
type = types.bool;
default = false;
description = ''
Whether to include the 32-bit pulseaudio libraries in the systemn or not.
This is only useful on 64-bit systems and currently limited to x86_64-linux.
'';
};
configFile = mkOption {
type = types.path;
description = ''
@ -89,12 +98,12 @@ in {
package = mkOption {
type = types.package;
default = pulseaudioFull;
default = pulseaudioLight;
example = literalExample "pkgs.pulseaudioFull";
description = ''
The PulseAudio derivation to use. This can be used to disable
features (such as JACK support, Bluetooth) that are enabled in the
pulseaudioFull package in Nixpkgs.
The PulseAudio derivation to use. This can be used to enable
features (such as JACK support, Bluetooth) via the
<literal>pulseaudioFull</literal> package.
'';
};

View file

@ -63,7 +63,7 @@ in
description = ''
A list of profiles used to setup the global environment.
'';
type = types.listOf types.string;
type = types.listOf types.str;
};
environment.profileRelativeEnvVars = mkOption {

View file

@ -38,7 +38,7 @@ let
pkgs.nano
pkgs.ncurses
pkgs.netcat
pkgs.openssh
config.programs.ssh.package
pkgs.perl
pkgs.procps
pkgs.rsync

View file

@ -108,6 +108,15 @@ let
description = "The user's home directory.";
};
cryptHomeLuks = mkOption {
type = with types; nullOr str;
default = null;
description = ''
Path to encrypted luks device that contains
the user's home directory.
'';
};
shell = mkOption {
type = types.str;
default = "/run/current-system/sw/bin/nologin";

View file

@ -26,7 +26,7 @@ in
hardware.bumblebee.group = mkOption {
default = "wheel";
example = "video";
type = types.uniq types.str;
type = types.str;
description = ''Group for bumblebee socket'';
};
hardware.bumblebee.connectDisplay = mkOption {

View file

@ -7,8 +7,7 @@ with lib;
{
imports =
[ ./channel.nix
./iso-image.nix
[ ./iso-image.nix
# Profiles of this basic installation CD.
../../profiles/all-hardware.nix
@ -21,18 +20,6 @@ with lib;
isoImage.volumeID = substring 0 11 "NIXOS_ISO";
# Make the installer more likely to succeed in low memory
# environments. The kernel's overcommit heustistics bite us
# fairly often, preventing processes such as nix-worker or
# download-using-manifests.pl from forking even if there is
# plenty of free memory.
boot.kernel.sysctl."vm.overcommit_memory" = "1";
# To speed up installation a little bit, include the complete stdenv
# in the Nix store on the CD. Archive::Cpio is needed for the
# initrd builder.
isoImage.storeContents = [ pkgs.stdenv pkgs.busybox pkgs.perlPackages.ArchiveCpio ];
# EFI booting
isoImage.makeEfiBootable = true;
@ -42,9 +29,6 @@ with lib;
# Add Memtest86+ to the CD.
boot.loader.grub.memtest86.enable = true;
# Get a console as soon as the initrd loads fbcon on EFI boot.
boot.initrd.kernelModules = [ "fbcon" ];
# Allow the user to log in as root without a password.
users.extraUsers.root.initialHashedPassword = "";
}

View file

@ -11,9 +11,16 @@ with lib;
# Provide wicd for easy wireless configuration.
#networking.wicd.enable = true;
# Include gparted for partitioning disks
environment.systemPackages = [ pkgs.gparted ];
environment.systemPackages =
[ # Include gparted for partitioning disks.
pkgs.gparted
# Include some editors.
pkgs.vim
pkgs.bvi # binary editor
pkgs.joe
];
# Provide networkmanager for easy wireless configuration.
networking.networkmanager.enable = true;
networking.wireless.enable = mkForce false;
@ -67,7 +74,7 @@ with lib;
loadTemplate("org.kde.plasma-desktop.defaultPanel")
for (var i = 0; i < screenCount; ++i) {
var desktop = new Activity
var desktop = new Activity
desktop.name = i18n("Desktop")
desktop.screen = i
desktop.wallpaperPlugin = 'image'
@ -75,7 +82,7 @@ with lib;
var folderview = desktop.addWidget("folderview");
folderview.writeConfig("url", "desktop:/");
//Create more panels for other screens
if (i > 0){
var panel = new Panel

View file

@ -1,7 +1,7 @@
# This module defines a small NixOS installation CD. It does not
# contain any graphical stuff.
{ config, pkgs, ... }:
{ config, lib, ... }:
{
imports =

View file

@ -40,7 +40,7 @@ let
DEFAULT boot
LABEL boot
MENU LABEL NixOS ${config.system.nixosVersion} Installer
MENU LABEL NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel}
LINUX /boot/bzImage
APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}
INITRD /boot/initrd
@ -192,6 +192,18 @@ in
'';
};
isoImage.appendToMenuLabel = mkOption {
default = " Installer";
example = " Live System";
description = ''
The string to append after the menu label for the NixOS system.
This will be directly appended (without whitespace) to the NixOS version
string, like for example if it is set to <literal>XXX</literal>:
<para><literal>NixOS 99.99-pre666XXX</literal></para>
'';
};
};
config = {
@ -204,7 +216,7 @@ in
# !!! Hack - attributes expected by other modules.
system.boot.loader.kernelFile = "bzImage";
environment.systemPackages = [ pkgs.grub2 pkgs.syslinux ];
environment.systemPackages = [ pkgs.grub2 pkgs.grub2_efi pkgs.syslinux ];
# In stage 1 of the boot, mount the CD as the root FS by label so
# that we don't need to know its device. We pass the label of the

View file

@ -70,7 +70,7 @@ my @attrs = ();
my @kernelModules = ();
my @initrdKernelModules = ();
my @modulePackages = ();
my @imports = ("<nixpkgs/nixos/modules/installer/scan/not-detected.nix>");
my @imports;
sub debug {
@ -245,6 +245,18 @@ if ($virt eq "qemu" || $virt eq "kvm" || $virt eq "bochs") {
}
# Pull in NixOS configuration for containers.
if ($virt eq "systemd-nspawn") {
push @attrs, "boot.isContainer = true;";
}
# Provide firmware for devices that are not detected by this script,
# unless we're in a VM/container.
push @imports, "<nixpkgs/nixos/modules/installer/scan/not-detected.nix>"
if $virt eq "none";
# For a device name like /dev/sda1, find a more stable path like
# /dev/disk/by-uuid/X or /dev/disk/by-label/Y.
sub findStableDevPath {
@ -311,9 +323,9 @@ foreach my $fs (read_file("/proc/self/mountinfo")) {
# Maybe this is a bind-mount of a filesystem we saw earlier?
if (defined $fsByDev{$fields[2]}) {
# Make sure this isn't a btrfs subvolume
my ($status, @msg) = runCommand("btrfs subvol show $rootDir$mountPoint");
if (join("", @msg) =~ /ERROR:/) {
# Make sure this isn't a btrfs subvolume.
my $msg = `btrfs subvol show $rootDir$mountPoint`;
if ($? != 0 || $msg =~ /ERROR:/s) {
my $path = $fields[3]; $path = "" if $path eq "/";
my $base = $fsByDev{$fields[2]};
$base = "" if $base eq "/";
@ -354,7 +366,7 @@ EOF
if ($status != 0 || join("", @msg) =~ /ERROR:/) {
die "Failed to retrieve subvolume info for $mountPoint\n";
}
my @ids = join("", @id_info) =~ m/Object ID:[ \t\n]*([^ \t\n]*)/;
my @ids = join("", @id_info) =~ m/Subvolume ID:[ \t\n]*([^ \t\n]*)/;
if ($#ids > 0) {
die "Btrfs subvol name for $mountPoint listed multiple times in mount\n"
} elsif ($#ids == 0) {
@ -459,14 +471,14 @@ if ($showHardwareConfig) {
if ($force || ! -e $fn) {
print STDERR "writing $fn...\n";
my $bootloaderConfig;
my $bootloaderConfig = "";
if (-e "/sys/firmware/efi/efivars") {
$bootLoaderConfig = <<EOF;
# Use the gummiboot efi boot loader.
boot.loader.gummiboot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
EOF
} else {
} elsif ($virt ne "systemd-nspawn") {
$bootLoaderConfig = <<EOF;
# Use the GRUB 2 boot loader.
boot.loader.grub.enable = true;
@ -495,7 +507,7 @@ $bootLoaderConfig
# Select internationalisation properties.
# i18n = {
# consoleFont = "lat9w-16";
# consoleFont = "Lat2-Terminus16";
# consoleKeyMap = "us";
# defaultLocale = "en_US.UTF-8";
# };

View file

@ -21,7 +21,7 @@ with lib;
warnings = mkOption {
internal = true;
default = [];
type = types.listOf types.string;
type = types.listOf types.str;
example = [ "The `foo' service is deprecated and will go away soon!" ];
description = ''
This option allows modules to show warnings to users during

View file

@ -2,8 +2,6 @@
{
_module.args = {
modulesPath = ../.;
pkgs_i686 = import ../../lib/nixpkgs.nix {
system = "i686-linux";
config.allowUnfree = true;

View file

@ -217,6 +217,15 @@
asterisk = 192;
plex = 193;
bird = 195;
grafana = 196;
skydns = 197;
ripple-rest = 198;
nix-serve = 199;
tvheadend = 200;
uwsgi = 201;
gitit = 202;
riemanntools = 203;
subsonic = 204;
# When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399!
@ -373,7 +382,7 @@
seeks = 148;
prosody = 149;
i2pd = 150;
#dnscrypt-proxy = 151; # unused
dnscrypt-proxy = 151;
systemd-network = 152;
systemd-resolve = 153;
systemd-timesync = 154;
@ -412,6 +421,15 @@
plex = 193;
sabnzbd = 194;
bird = 195;
#grafana = 196; #unused
#skydns = 197; #unused
#ripple-rest = 198; #unused
#nix-serve = 199; #unused
#tvheadend = 200; #unused
uwsgi = 201;
gitit = 202;
riemanntools = 203;
subsonic = 204;
# When adding a gid, make sure it doesn't match an existing
# uid. Users and groups with the same name should have equal

View file

@ -59,7 +59,7 @@ in
};
nixpkgs.system = mkOption {
type = types.uniq types.str;
type = types.str;
example = "i686-linux";
description = ''
Specifies the Nix platform type for which NixOS should be built.

View file

@ -84,6 +84,7 @@
./security/grsecurity.nix
./security/pam.nix
./security/pam_usb.nix
./security/pam_mount.nix
./security/polkit.nix
./security/prey.nix
./security/rngd.nix
@ -188,12 +189,14 @@
./services/misc/cpuminer-cryptonight.nix
./services/misc/cgminer.nix
./services/misc/confd.nix
./services/misc/devmon.nix
./services/misc/dictd.nix
./services/misc/disnix.nix
./services/misc/docker-registry.nix
./services/misc/etcd.nix
./services/misc/felix.nix
./services/misc/folding-at-home.nix
./services/misc/gitit.nix
./services/misc/gitlab.nix
./services/misc/gitolite.nix
./services/misc/gpsd.nix
@ -202,6 +205,7 @@
./services/misc/mediatomb.nix
./services/misc/mesos-master.nix
./services/misc/mesos-slave.nix
./services/misc/mwlib.nix
./services/misc/nix-daemon.nix
./services/misc/nix-gc.nix
./services/misc/nixos-manual.nix
@ -211,9 +215,12 @@
./services/misc/plex.nix
./services/misc/redmine.nix
./services/misc/rippled.nix
./services/misc/ripple-rest.nix
./services/misc/ripple-data-api.nix
./services/misc/rogue.nix
./services/misc/siproxd.nix
./services/misc/subsonic.nix
./services/misc/sundtek.nix
./services/misc/svnserve.nix
./services/misc/synergy.nix
./services/misc/uhub.nix
@ -222,13 +229,16 @@
./services/monitoring/bosun.nix
./services/monitoring/cadvisor.nix
./services/monitoring/collectd.nix
./services/monitoring/das_watchdog.nix
./services/monitoring/dd-agent.nix
./services/monitoring/grafana.nix
./services/monitoring/graphite.nix
./services/monitoring/monit.nix
./services/monitoring/munin.nix
./services/monitoring/nagios.nix
./services/monitoring/riemann.nix
./services/monitoring/riemann-dash.nix
./services/monitoring/riemann-tools.nix
./services/monitoring/scollector.nix
./services/monitoring/smartd.nix
./services/monitoring/statsd.nix
@ -266,6 +276,7 @@
./services/networking/dhcpd.nix
./services/networking/dnscrypt-proxy.nix
./services/networking/dnsmasq.nix
./services/networking/docker-registry-server.nix
./services/networking/ejabberd.nix
./services/networking/firefox/sync-server.nix
./services/networking/firewall.nix
@ -306,6 +317,7 @@
./services/networking/privoxy.nix
./services/networking/prosody.nix
./services/networking/quassel.nix
./services/networking/racoon.nix
./services/networking/radicale.nix
./services/networking/radvd.nix
./services/networking/rdnssd.nix
@ -313,6 +325,7 @@
./services/networking/sabnzbd.nix
./services/networking/searx.nix
./services/networking/seeks.nix
./services/networking/skydns.nix
./services/networking/spiped.nix
./services/networking/sslh.nix
./services/networking/ssh/lshd.nix
@ -326,6 +339,7 @@
./services/networking/tftpd.nix
./services/networking/tlsdated.nix
./services/networking/tox-bootstrapd.nix
./services/networking/tvheadend.nix
./services/networking/unbound.nix
./services/networking/unifi.nix
./services/networking/vsftpd.nix
@ -334,6 +348,7 @@
./services/networking/wicd.nix
./services/networking/wpa_supplicant.nix
./services/networking/xinetd.nix
./services/networking/zerotierone.nix
./services/networking/znc.nix
./services/printing/cupsd.nix
./services/scheduling/atd.nix
@ -373,6 +388,7 @@
./services/web-servers/lighttpd/gitweb.nix
./services/web-servers/nginx/default.nix
./services/web-servers/phpfpm.nix
./services/web-servers/shellinabox.nix
./services/web-servers/tomcat.nix
./services/web-servers/uwsgi.nix
./services/web-servers/varnish/default.nix
@ -406,12 +422,14 @@
./services/x11/xserver.nix
./system/activation/activation-script.nix
./system/activation/top-level.nix
./system/boot/coredump.nix
./system/boot/emergency-mode.nix
./system/boot/kernel.nix
./system/boot/kexec.nix
./system/boot/loader/efi.nix
./system/boot/loader/loader.nix
./system/boot/loader/generations-dir/generations-dir.nix
./system/boot/loader/generic-extlinux-compatible
./system/boot/loader/grub/grub.nix
./system/boot/loader/grub/ipxe.nix
./system/boot/loader/grub/memtest.nix
@ -436,6 +454,7 @@
./tasks/filesystems.nix
./tasks/filesystems/btrfs.nix
./tasks/filesystems/cifs.nix
./tasks/filesystems/exfat.nix
./tasks/filesystems/ext.nix
./tasks/filesystems/f2fs.nix
./tasks/filesystems/jfs.nix

View file

@ -40,7 +40,7 @@
"ohci1394" "sbp2"
# Virtio (QEMU, KVM etc.) support.
"virtio_net" "virtio_pci" "virtio_blk" "virtio_balloon" "virtio_console"
"virtio_net" "virtio_pci" "virtio_blk" "virtio_scsi" "virtio_balloon" "virtio_console"
# Keyboards
"usbhid" "hid_apple" "hid_logitech_dj" "hid_lenovo_tpkbd" "hid_roccat"

View file

@ -44,11 +44,6 @@
pkgs.zip
pkgs.dar # disk archiver
pkgs.cabextract
# Some editors.
pkgs.vim
pkgs.bvi # binary editor
pkgs.joe
];
# Include support for various filesystems.

View file

@ -1,5 +1,5 @@
# Provide a basic configuration for installation devices like CDs.
{ config, lib, ... }:
{ config, pkgs, lib, ... }:
with lib;
@ -13,10 +13,17 @@ with lib;
# Allow "nixos-rebuild" to work properly by providing
# /etc/nixos/configuration.nix.
./clone-config.nix
# Include a copy of Nixpkgs so that nixos-install works out of
# the box.
../installer/cd-dvd/channel.nix
];
config = {
# Enable in installer, even if the minimal profile disables it.
services.nixosManual.enable = mkForce true;
# Show the manual.
services.nixosManual.showManual = true;
@ -43,7 +50,7 @@ with lib;
systemd.services.sshd.wantedBy = mkOverride 50 [];
# Enable wpa_supplicant, but don't start it by default.
networking.wireless.enable = true;
networking.wireless.enable = mkDefault true;
jobs.wpa_supplicant.startOn = mkOverride 50 "";
# Tell the Nix evaluator to garbage collect more aggressively.
@ -51,5 +58,17 @@ with lib;
# (yet) have swap set up.
environment.variables.GC_INITIAL_HEAP_SIZE = "100000";
# Make the installer more likely to succeed in low memory
# environments. The kernel's overcommit heustistics bite us
# fairly often, preventing processes such as nix-worker or
# download-using-manifests.pl from forking even if there is
# plenty of free memory.
boot.kernel.sysctl."vm.overcommit_memory" = "1";
# To speed up installation a little bit, include the complete
# stdenv in the Nix store on the CD. Archive::Cpio is needed for
# the initrd builder.
system.extraDependencies = [ pkgs.stdenv pkgs.busybox pkgs.perlPackages.ArchiveCpio ];
};
}

View file

@ -8,4 +8,5 @@ with lib;
{
environment.noXlibs = mkDefault true;
i18n.supportedLocales = [ config.i18n.defaultLocale ];
services.nixosManual.enable = mkDefault false;
}

View file

@ -4,7 +4,7 @@
{ config, pkgs, ... }:
{
boot.initrd.availableKernelModules = [ "virtio_net" "virtio_pci" "virtio_blk" "9p" "9pnet_virtio" ];
boot.initrd.availableKernelModules = [ "virtio_net" "virtio_pci" "virtio_blk" "virtio_scsi" "9p" "9pnet_virtio" ];
boot.initrd.kernelModules = [ "virtio_balloon" "virtio_console" "virtio_rng" ];
boot.initrd.postDeviceCommands =

View file

@ -27,7 +27,7 @@ in
programs.ssh = {
askPassword = mkOption {
type = types.string;
type = types.str;
default = "${pkgs.x11_ssh_askpass}/libexec/x11-ssh-askpass";
description = ''Program used by SSH to ask for passwords.'';
};
@ -77,7 +77,7 @@ in
};
agentTimeout = mkOption {
type = types.nullOr types.string;
type = types.nullOr types.str;
default = null;
example = "1h";
description = ''

View file

@ -166,7 +166,7 @@ in
script = "exec venus-planet ${configFile}";
serviceConfig.User = "${cfg.user}";
serviceConfig.Group = "${cfg.group}";
environment.OPENSSL_X509_CERT_FILE = "/etc/ssl/certs/ca-bundle.crt";
environment.SSL_CERT_FILE = "/etc/ssl/certs/ca-bundle.crt";
startAt = cfg.dates;
};

View file

@ -141,6 +141,9 @@ in zipModules ([]
++ obsolete [ "services" "xserver" "windowManager" "xbmc" ] [ "services" "xserver" "desktopManager" "kodi" ]
++ obsolete [ "services" "xserver" "desktopManager" "xbmc" ] [ "services" "xserver" "desktopManager" "kodi" ]
# DNSCrypt-proxy
++ obsolete [ "services" "dnscrypt-proxy" "port" ] [ "services" "dnscrypt-proxy" "localPort" ]
# Options that are obsolete and have no replacement.
++ obsolete' [ "boot" "loader" "grub" "bootDevice" ]
++ obsolete' [ "boot" "initrd" "luks" "enable" ]
@ -148,5 +151,6 @@ in zipModules ([]
++ obsolete' [ "services" "samba" "defaultShare" ]
++ obsolete' [ "services" "syslog-ng" "serviceName" ]
++ obsolete' [ "services" "syslog-ng" "listenToJournal" ]
++ obsolete' [ "ec2" "metadata" ]
)

View file

@ -22,7 +22,7 @@ in
security.pki.certificateFiles = mkOption {
type = types.listOf types.path;
default = [];
example = literalExample "[ \"\${pkgs.cacert}/etc/ca-bundle.crt\" ]";
example = literalExample "[ \"\${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt\" ]";
description = ''
A list of files containing trusted root certificates in PEM
format. These are concatenated to form
@ -33,7 +33,7 @@ in
};
security.pki.certificates = mkOption {
type = types.listOf types.string;
type = types.listOf types.str;
default = [];
example = singleton ''
NixOS.org
@ -53,7 +53,7 @@ in
config = {
security.pki.certificateFiles = [ "${pkgs.cacert}/etc/ca-bundle.crt" ];
security.pki.certificateFiles = [ "${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt" ];
# NixOS canonical location + Debian/Ubuntu/Arch/Gentoo compatibility.
environment.etc."ssl/certs/ca-certificates.crt".source = caBundle;
@ -66,8 +66,6 @@ in
environment.sessionVariables =
{ SSL_CERT_FILE = "/etc/ssl/certs/ca-certificates.crt";
# FIXME: unneeded - remove eventually.
OPENSSL_X509_CERT_FILE = "/etc/ssl/certs/ca-certificates.crt";
# FIXME: unneeded - remove eventually.
GIT_SSL_CAINFO = "/etc/ssl/certs/ca-certificates.crt";
};

View file

@ -126,6 +126,14 @@ let
'';
};
pamMount = mkOption {
default = config.security.pam.mount.enable;
type = types.bool;
description = ''
Enable PAM mount (pam_mount) system to mount fileystems on user login.
'';
};
allowNullPassword = mkOption {
default = false;
type = types.bool;
@ -224,7 +232,9 @@ let
${optionalString cfg.usbAuth
"auth sufficient ${pkgs.pam_usb}/lib/security/pam_usb.so"}
${optionalString cfg.unixAuth
"auth ${if config.security.pam.enableEcryptfs then "required" else "sufficient"} pam_unix.so ${optionalString cfg.allowNullPassword "nullok"} likeauth"}
"auth ${if (config.security.pam.enableEcryptfs || cfg.pamMount) then "required" else "sufficient"} pam_unix.so ${optionalString cfg.allowNullPassword "nullok"} likeauth"}
${optionalString cfg.pamMount
"auth optional ${pkgs.pam_mount}/lib/security/pam_mount.so"}
${optionalString config.security.pam.enableEcryptfs
"auth required ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so unwrap"}
${optionalString cfg.otpwAuth
@ -238,12 +248,14 @@ let
auth [default=die success=done] ${pam_ccreds}/lib/security/pam_ccreds.so action=validate use_first_pass
auth sufficient ${pam_ccreds}/lib/security/pam_ccreds.so action=store use_first_pass
''}
${optionalString (! config.security.pam.enableEcryptfs) "auth required pam_deny.so"}
${optionalString (!(config.security.pam.enableEcryptfs || cfg.pamMount)) "auth required pam_deny.so"}
# Password management.
${optionalString config.security.pam.enableEcryptfs
"password optional ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so"}
password requisite pam_unix.so nullok sha512
${optionalString cfg.pamMount
"password optional ${pkgs.pam_mount}/lib/security/pam_mount.so"}
${optionalString config.users.ldap.enable
"password sufficient ${pam_ldap}/lib/security/pam_ldap.so"}
${optionalString config.krb5.enable
@ -280,6 +292,8 @@ let
"session required ${pkgs.pam}/lib/security/pam_limits.so conf=${makeLimitsConf cfg.limits}"}
${optionalString (cfg.showMotd && config.users.motd != null)
"session optional ${pkgs.pam}/lib/security/pam_motd.so motd=${motd}"}
${optionalString cfg.pamMount
"session optional ${pkgs.pam_mount}/lib/security/pam_mount.so"}
'';
};

View file

@ -0,0 +1,72 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.security.pam.mount;
anyPamMount = any (attrByPath ["pamMount"] false) (attrValues config.security.pam.services);
in
{
options = {
security.pam.mount = {
enable = mkOption {
type = types.bool;
default = false;
description = ''
Enable PAM mount system to mount fileystems on user login.
'';
};
extraVolumes = mkOption {
type = types.listOf types.str;
default = [];
description = ''
List of volume definitions for pam_mount.
For more information, visit <link
xlink:href="http://pam-mount.sourceforge.net/pam_mount.conf.5.html" />.
'';
};
};
};
config = mkIf (cfg.enable || anyPamMount) {
environment.systemPackages = [ pkgs.pam_mount ];
environment.etc = [{
target = "security/pam_mount.conf.xml";
source =
let
extraUserVolumes = filterAttrs (n: u: u.cryptHomeLuks != null) config.users.extraUsers;
userVolumeEntry = user: "<volume user=\"${user.name}\" path=\"${user.cryptHomeLuks}\" mountpoint=\"${user.home}\" />\n";
in
pkgs.writeText "pam_mount.conf.xml" ''
<?xml version="1.0" encoding="utf-8" ?>
<!DOCTYPE pam_mount SYSTEM "pam_mount.conf.xml.dtd">
<!-- auto generated from Nixos: modules/config/users-groups.nix -->
<pam_mount>
<debug enable="0" />
${concatStrings (map userVolumeEntry (attrValues extraUserVolumes))}
${concatStringsSep "\n" cfg.extraVolumes}
<!-- if activated, requires ofl from hxtools to be present -->
<logout wait="0" hup="no" term="no" kill="no" />
<!-- set PATH variable for pam_mount module -->
<path>${pkgs.utillinux}/bin</path>
<!-- create mount point if not present -->
<mkmountpoint enable="1" remove="true" />
<!-- specify the binaries to be called -->
<cryptmount>${pkgs.pam_mount}/bin/mount.crypt %(VOLUME) %(MNTPT)</cryptmount>
<cryptumount>${pkgs.pam_mount}/bin/umount.crypt %(MNTPT)</cryptumount>
<pmvarrun>${pkgs.pam_mount}/bin/pmvarrun -u %(USER) -o %(OPERATION)</pmvarrun>
</pam_mount>
'';
}];
};
}

View file

@ -118,7 +118,7 @@ in {
preStart = "mkdir -p ${cfg.dataDir} && chown -R ${cfg.user}:${cfg.group} ${cfg.dataDir}";
script = "exec mpd --no-daemon ${mpdConf}";
serviceConfig = {
User = "mpd";
User = "${cfg.user}";
PermissionsStartOnly = true;
};
};

View file

@ -95,7 +95,7 @@ in {
port = mkOption {
default = 35000;
type = types.uniq types.int;
type = types.int;
description = ''
Port for Almir web server to listen on.
'';

View file

@ -182,7 +182,7 @@ in {
port = mkOption {
default = 9102;
type = types.uniq types.int;
type = types.int;
description = ''
This specifies the port number on which the Client listens for Director connections. It must agree with the FDPort specified in the Client resource of the Director's configuration file. The default is 9102.
'';
@ -237,7 +237,7 @@ in {
port = mkOption {
default = 9103;
type = types.uniq types.int;
type = types.int;
description = ''
Specifies port number on which the Storage daemon listens for Director connections. The default is 9103.
'';
@ -302,7 +302,7 @@ in {
port = mkOption {
default = 9101;
type = types.uniq types.int;
type = types.int;
description = ''
Specify the port (a positive integer) on which the Director daemon will listen for Bacula Console connections. This same port number must be specified in the Director resource of the Console configuration file. The default is 9101, so normally this directive need not be specified. This directive should not be used if you specify DirAddresses (N.B plural) directive.
'';

View file

@ -224,7 +224,7 @@ in {
machines = mkOption {
description = "Kubernetes controller list of machines to schedule to schedule onto";
default = [config.networking.hostName];
default = [];
type = types.listOf types.str;
};
@ -242,6 +242,12 @@ in {
type = types.bool;
};
registerNode = mkOption {
description = "Whether to auto register kubelet with API server.";
default = true;
type = types.bool;
};
address = mkOption {
description = "Kubernetes kubelet info server listening address.";
default = "0.0.0.0";
@ -274,7 +280,7 @@ in {
cadvisorPort = mkOption {
description = "Kubernetes kubelet local cadvisor port.";
default = config.services.cadvisor.port;
default = 4194;
type = types.int;
};
@ -286,7 +292,7 @@ in {
clusterDomain = mkOption {
description = "Use alternative domain.";
default = "";
default = "kubernetes.io";
type = types.str;
};
@ -322,13 +328,35 @@ in {
type = types.str;
};
};
kube2sky = {
enable = mkEnableOption "Whether to enable kube2sky dns service.";
domain = mkOption {
description = "Kuberntes kube2sky domain under which all DNS names will be hosted.";
default = cfg.kubelet.clusterDomain;
type = types.str;
};
master = mkOption {
description = "Kubernetes apiserver address";
default = "${cfg.apiserver.address}:${toString cfg.apiserver.port}";
type = types.str;
};
extraOpts = mkOption {
description = "Kubernetes kube2sky extra command line options.";
default = "";
type = types.str;
};
};
};
###### implementation
config = mkMerge [
(mkIf cfg.apiserver.enable {
systemd.services.kubernetes-apiserver = {
systemd.services.kube-apiserver = {
description = "Kubernetes Api Server";
wantedBy = [ "multi-user.target" ];
requires = ["kubernetes-setup.service"];
@ -343,26 +371,25 @@ in {
(concatImapStringsSep "\n" (i: v: v + "," + (toString i))
(mapAttrsToList (name: token: token + "," + name) cfg.apiserver.tokenAuth));
in ''${cfg.package}/bin/kube-apiserver \
--etcd_servers=${concatMapStringsSep "," (f: "http://${f}") cfg.etcdServers} \
--address=${cfg.apiserver.address} \
--port=${toString cfg.apiserver.port} \
--read_only_port=${toString cfg.apiserver.readOnlyPort} \
--public_address_override=${cfg.apiserver.publicAddress} \
--allow_privileged=${if cfg.apiserver.allowPrivileged then "true" else "false"} \
--etcd-servers=${concatMapStringsSep "," (f: "http://${f}") cfg.etcdServers} \
--insecure-bind-address=${cfg.apiserver.address} \
--insecure-port=${toString cfg.apiserver.port} \
--read-only-port=${toString cfg.apiserver.readOnlyPort} \
--bind-address=${cfg.apiserver.publicAddress} \
--allow-privileged=${if cfg.apiserver.allowPrivileged then "true" else "false"} \
${optionalString (cfg.apiserver.tlsCertFile!="")
"--tls_cert_file=${cfg.apiserver.tlsCertFile}"} \
"--tls-cert-file=${cfg.apiserver.tlsCertFile}"} \
${optionalString (cfg.apiserver.tlsPrivateKeyFile!="")
"--tls_private_key_file=${cfg.apiserver.tlsPrivateKeyFile}"} \
"--tls-private-key-file=${cfg.apiserver.tlsPrivateKeyFile}"} \
${optionalString (cfg.apiserver.tokenAuth!=[])
"--token_auth_file=${tokenAuthFile}"} \
--authorization_mode=${cfg.apiserver.authorizationMode} \
"--token-auth-file=${tokenAuthFile}"} \
--authorization-mode=${cfg.apiserver.authorizationMode} \
${optionalString (cfg.apiserver.authorizationMode == "ABAC")
"--authorization_policy_file=${authorizationPolicyFile}"} \
--secure_port=${toString cfg.apiserver.securePort} \
--portal_net=${cfg.apiserver.portalNet} \
"--authorization-policy-file=${authorizationPolicyFile}"} \
--secure-port=${toString cfg.apiserver.securePort} \
--service-cluster-ip-range=${cfg.apiserver.portalNet} \
--logtostderr=true \
--runtime_config=api/v1beta3 \
${optionalString cfg.verbose "--v=6 --log_flush_frequency=1s"} \
${optionalString cfg.verbose "--v=6 --log-flush-frequency=1s"} \
${cfg.apiserver.extraOpts}
'';
User = "kubernetes";
@ -376,7 +403,7 @@ in {
})
(mkIf cfg.scheduler.enable {
systemd.services.kubernetes-scheduler = {
systemd.services.kube-scheduler = {
description = "Kubernetes Scheduler Service";
wantedBy = [ "multi-user.target" ];
after = [ "network-interfaces.target" "kubernetes-apiserver.service" ];
@ -386,7 +413,7 @@ in {
--port=${toString cfg.scheduler.port} \
--master=${cfg.scheduler.master} \
--logtostderr=true \
${optionalString cfg.verbose "--v=6 --log_flush_frequency=1s"} \
${optionalString cfg.verbose "--v=6 --log-flush-frequency=1s"} \
${cfg.scheduler.extraOpts}
'';
User = "kubernetes";
@ -395,7 +422,7 @@ in {
})
(mkIf cfg.controllerManager.enable {
systemd.services.kubernetes-controller-manager = {
systemd.services.kube-controller-manager = {
description = "Kubernetes Controller Manager Service";
wantedBy = [ "multi-user.target" ];
after = [ "network-interfaces.target" "kubernetes-apiserver.service" ];
@ -406,7 +433,7 @@ in {
--master=${cfg.controllerManager.master} \
--machines=${concatStringsSep "," cfg.controllerManager.machines} \
--logtostderr=true \
${optionalString cfg.verbose "--v=6 --log_flush_frequency=1s"} \
${optionalString cfg.verbose "--v=6 --log-flush-frequency=1s"} \
${cfg.controllerManager.extraOpts}
'';
User = "kubernetes";
@ -415,7 +442,7 @@ in {
})
(mkIf cfg.kubelet.enable {
systemd.services.kubernetes-kubelet = {
systemd.services.kubelet = {
description = "Kubernetes Kubelet Service";
wantedBy = [ "multi-user.target" ];
requires = ["kubernetes-setup.service"];
@ -423,17 +450,18 @@ in {
script = ''
export PATH="/bin:/sbin:/usr/bin:/usr/sbin:$PATH"
exec ${cfg.package}/bin/kubelet \
--api_servers=${concatMapStringsSep "," (f: "http://${f}") cfg.kubelet.apiServers} \
--api-servers=${concatMapStringsSep "," (f: "http://${f}") cfg.kubelet.apiServers} \
--register-node=${if cfg.kubelet.registerNode then "true" else "false"} \
--address=${cfg.kubelet.address} \
--port=${toString cfg.kubelet.port} \
--hostname_override=${cfg.kubelet.hostname} \
--allow_privileged=${if cfg.kubelet.allowPrivileged then "true" else "false"} \
--root_dir=${cfg.dataDir} \
--hostname-override=${cfg.kubelet.hostname} \
--allow-privileged=${if cfg.kubelet.allowPrivileged then "true" else "false"} \
--root-dir=${cfg.dataDir} \
--cadvisor_port=${toString cfg.kubelet.cadvisorPort} \
${optionalString (cfg.kubelet.clusterDns != "")
''--cluster_dns=${cfg.kubelet.clusterDns}''} \
''--cluster-dns=${cfg.kubelet.clusterDns}''} \
${optionalString (cfg.kubelet.clusterDomain != "")
''--cluster_domain=${cfg.kubelet.clusterDomain}''} \
''--cluster-domain=${cfg.kubelet.clusterDomain}''} \
--logtostderr=true \
${optionalString cfg.verbose "--v=6 --log_flush_frequency=1s"} \
${cfg.kubelet.extraOpts}
@ -443,32 +471,53 @@ in {
})
(mkIf cfg.proxy.enable {
systemd.services.kubernetes-proxy = {
systemd.services.kube-proxy = {
description = "Kubernetes Proxy Service";
wantedBy = [ "multi-user.target" ];
after = [ "network-interfaces.target" "etcd.service" ];
serviceConfig = {
ExecStart = ''${cfg.package}/bin/kube-proxy \
--master=${cfg.proxy.master} \
--bind_address=${cfg.proxy.address} \
--bind-address=${cfg.proxy.address} \
--logtostderr=true \
${optionalString cfg.verbose "--v=6 --log_flush_frequency=1s"} \
${optionalString cfg.verbose "--v=6 --log-flush-frequency=1s"} \
${cfg.proxy.extraOpts}
'';
};
};
})
(mkIf cfg.kube2sky.enable {
systemd.services.kube2sky = {
description = "Kubernetes Dns Bridge Service";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" "skydns.service" "etcd.service" "kubernetes-apiserver.service" ];
serviceConfig = {
ExecStart = ''${cfg.package}/bin/kube2sky \
-etcd-server=http://${head cfg.etcdServers} \
-domain=${cfg.kube2sky.domain} \
-kube_master_url=http://${cfg.kube2sky.master} \
-logtostderr=true \
${optionalString cfg.verbose "--v=6 --log-flush-frequency=1s"} \
${cfg.kube2sky.extraOpts}
'';
User = "kubernetes";
};
};
services.skydns.enable = mkDefault true;
services.skydns.domain = mkDefault cfg.kubelet.clusterDomain;
})
(mkIf (any (el: el == "master") cfg.roles) {
services.kubernetes.apiserver.enable = mkDefault true;
services.kubernetes.scheduler.enable = mkDefault true;
services.kubernetes.controllerManager.enable = mkDefault true;
services.kubernetes.kube2sky.enable = mkDefault true;
})
(mkIf (any (el: el == "node") cfg.roles) {
virtualisation.docker.enable = mkDefault true;
services.cadvisor.enable = mkDefault true;
services.cadvisor.port = mkDefault 4194;
services.kubernetes.kubelet.enable = mkDefault true;
services.kubernetes.proxy.enable = mkDefault true;
})

View file

@ -50,14 +50,14 @@ in {
port = mkOption {
default = 8080;
type = types.uniq types.int;
type = types.int;
description = ''
Specifies port number on which the jenkins HTTP interface listens. The default is 8080.
'';
};
packages = mkOption {
default = [ pkgs.stdenv pkgs.git pkgs.jdk pkgs.openssh pkgs.nix ];
default = [ pkgs.stdenv pkgs.git pkgs.jdk config.programs.ssh.package pkgs.nix ];
type = types.listOf types.package;
description = ''
Packages to add to PATH for the jenkins process.

View file

@ -55,7 +55,7 @@ in
enable = mkOption {
default = false;
description = "Whether to enable the influxdb server";
type = types.uniq types.bool;
type = types.bool;
};
package = mkOption {

View file

@ -180,7 +180,8 @@ in
chown -R ${cfg.user} ${cfg.pidDir}
# Make the socket directory
mkdir -m 0755 -p /run/mysqld
mkdir -p /run/mysqld
chmod 0755 /run/mysqld
chown -R ${cfg.user} /run/mysqld
'';

View file

@ -43,7 +43,7 @@ in {
enable = mkOption {
description = "Whether to enable neo4j.";
default = false;
type = types.uniq types.bool;
type = types.bool;
};
package = mkOption {

View file

@ -154,7 +154,7 @@ in
config = mkIf config.services.postgresql.enable {
services.postgresql.authentication =
services.postgresql.authentication = mkAfter
''
# Generated file; do not edit!
local all all ident ${optionalString pre84 "sameuser"}
@ -186,8 +186,9 @@ in
preStart =
''
# Initialise the database.
if ! test -e ${cfg.dataDir}; then
if ! test -e ${cfg.dataDir}/PG_VERSION; then
mkdir -m 0700 -p ${cfg.dataDir}
rm -f ${cfg.dataDir}/*.conf
if [ "$(id -u)" = 0 ]; then
chown -R postgres ${cfg.dataDir}
su -s ${pkgs.stdenv.shell} postgres -c 'initdb -U root'
@ -195,8 +196,6 @@ in
# For non-root operation.
initdb
fi
rm -f ${cfg.dataDir}/*.conf
touch "${cfg.dataDir}/.first_startup"
fi
ln -sfn "${configFile}" "${cfg.dataDir}/postgresql.conf"

View file

@ -21,7 +21,7 @@ in
description = ''
Whether to enable GNOME Keyring daemon, a service designed to
take care of the user's security credentials,
such as user names and passwordsa search engine.
such as user names and passwords.
'';
};

View file

@ -28,7 +28,10 @@ let
# Perform substitutions in all udev rules files.
udevRules = stdenv.mkDerivation {
name = "udev-rules";
preferLocalBuild = true;
allowSubstitutes = false;
buildCommand = ''
mkdir -p $out
shopt -s nullglob

View file

@ -46,7 +46,7 @@ with lib;
serviceConfig = {
Type = "dbus";
BusName = "org.freedesktop.UDisks2";
ExecStart = "${pkgs.udisks2}/lib/udisks2/udisksd --no-debug";
ExecStart = "${pkgs.udisks2}/libexec/udisks2/udisksd --no-debug";
};
};
};

View file

@ -192,7 +192,7 @@ in
extraGroups = mkOption {
default = [];
type = types.listOf types.string;
type = types.listOf types.str;
example = [ "postdrop" "mongodb" ];
description = ''
Extra groups for the logcheck user, for example to be able to use sendmail,

View file

@ -66,7 +66,7 @@ in
};
extraParams = mkOption {
type = types.listOf types.string;
type = types.listOf types.str;
default = [ ];
example = [ "-m 0" ];
description = ''

View file

@ -83,7 +83,7 @@ in
};
extraParams = mkOption {
type = types.listOf types.string;
type = types.listOf types.str;
default = [ ];
example = [ "-m 0" ];
description = ''

View file

@ -24,7 +24,7 @@ in {
};
extraServerArgs = mkOption {
type = types.listOf types.string;
type = types.listOf types.str;
default = [];
example = [ "-v" "-P mta" ];
description = ''

View file

@ -78,7 +78,7 @@ let
smtpd_use_tls = yes
recipientDelimiter = ${cfg.recipientDelimiter}
recipient_delimiter = ${cfg.recipientDelimiter}
''
+ optionalString (cfg.virtual != "") ''
virtual_alias_maps = hash:/etc/postfix/virtual
@ -369,30 +369,30 @@ in
daemonType = "fork";
preStart =
''
if ! [ -d /var/spool/postfix ]; then
${pkgs.coreutils}/bin/mkdir -p /var/spool/mail /var/postfix/conf /var/postfix/queue
fi
preStart = ''
if ! [ -d /var/spool/postfix ]; then
${pkgs.coreutils}/bin/mkdir -p /var/spool/mail /var/postfix/conf /var/postfix/queue
fi
${pkgs.coreutils}/bin/chown -R ${user}:${group} /var/postfix
${pkgs.coreutils}/bin/chown -R ${user}:${setgidGroup} /var/postfix/queue
${pkgs.coreutils}/bin/chmod -R ug+rwX /var/postfix/queue
${pkgs.coreutils}/bin/chown root:root /var/spool/mail
${pkgs.coreutils}/bin/chmod a+rwxt /var/spool/mail
${pkgs.coreutils}/bin/chown -R ${user}:${group} /var/postfix
${pkgs.coreutils}/bin/chown -R ${user}:${setgidGroup} /var/postfix/queue
${pkgs.coreutils}/bin/chmod -R ug+rwX /var/postfix/queue
${pkgs.coreutils}/bin/chown root:root /var/spool/mail
${pkgs.coreutils}/bin/chmod a+rwxt /var/spool/mail
${pkgs.coreutils}/bin/ln -sf /var/spool/mail /var/mail
ln -sf "${pkgs.postfix}/share/postfix/conf/"* /var/postfix/conf
ln -sf "${pkgs.postfix}/etc/postfix/"* /var/postfix/conf
ln -sf ${aliasesFile} /var/postfix/conf/aliases
ln -sf ${virtualFile} /var/postfix/conf/virtual
ln -sf ${mainCfFile} /var/postfix/conf/main.cf
ln -sf ${masterCfFile} /var/postfix/conf/master.cf
ln -sf ${aliasesFile} /var/postfix/conf/aliases
ln -sf ${virtualFile} /var/postfix/conf/virtual
ln -sf ${mainCfFile} /var/postfix/conf/main.cf
ln -sf ${masterCfFile} /var/postfix/conf/master.cf
${pkgs.postfix}/sbin/postalias -c /var/postfix/conf /var/postfix/conf/aliases
${pkgs.postfix}/sbin/postmap -c /var/postfix/conf /var/postfix/conf/virtual
${pkgs.postfix}/sbin/postalias -c /var/postfix/conf /var/postfix/conf/aliases
${pkgs.postfix}/sbin/postmap -c /var/postfix/conf /var/postfix/conf/virtual
${pkgs.postfix}/sbin/postfix -c /var/postfix/conf start
'';
${pkgs.postfix}/sbin/postfix -c /var/postfix/conf start
'';
preStop = ''
${pkgs.postfix}/sbin/postfix -c /var/postfix/conf stop

View file

@ -33,7 +33,7 @@ in {
enable = mkOption {
description = "Whether to enable Apache Kafka.";
default = false;
type = types.uniq types.bool;
type = types.bool;
};
brokerId = mkOption {
@ -108,7 +108,7 @@ in {
"-Djava.awt.headless=true"
"-Djava.net.preferIPv4Stack=true"
];
type = types.listOf types.string;
type = types.listOf types.str;
example = [
"-Djava.net.preferIPv4Stack=true"
"-Dcom.sun.management.jmxremote"
@ -116,11 +116,19 @@ in {
];
};
package = mkOption {
description = "The kafka package to use";
default = pkgs.apacheKafka;
type = types.package;
};
};
config = mkIf cfg.enable {
environment.systemPackages = [pkgs.apacheKafka];
environment.systemPackages = [cfg.package];
users.extraUsers = singleton {
name = "apache-kafka";
@ -136,13 +144,14 @@ in {
serviceConfig = {
ExecStart = ''
${pkgs.jre}/bin/java \
-cp "${pkgs.apacheKafka}/libs/*:${configDir}" \
-cp "${cfg.package}/libs/*:${configDir}" \
${toString cfg.jvmOptions} \
kafka.Kafka \
${configDir}/server.properties
'';
User = "apache-kafka";
PermissionsStartOnly = true;
SuccessExitStatus = "0 143";
};
preStart = ''
mkdir -m 0700 -p ${concatStringsSep " " cfg.logDirs}

View file

@ -17,7 +17,7 @@ let
in {
options.services.confd = {
enable = mkEnableOption "Whether to enable confd service.";
enable = mkEnableOption "confd service";
backend = mkOption {
description = "Confd config storage backend to use.";

View file

@ -0,0 +1,28 @@
{ pkgs, config, lib, ... }:
with lib;
let
cfg = config.services.devmon;
in {
options = {
services.devmon = {
enable = mkOption {
default = false;
description = ''
Whether to enable devmon, an automatic device mounting daemon.
'';
};
};
};
config = mkIf cfg.enable {
systemd.services.devmon = {
description = "devmon automatic device mounting daemon";
wantedBy = [ "multi-user.target" ];
path = [ pkgs.udevil ];
serviceConfig.ExecStart = "${pkgs.udevil}/bin/devmon";
};
};
}

View file

@ -67,7 +67,7 @@ in
###### implementation
config = mkIf cfg.enable {
environment.systemPackages = [ pkgs.disnix ] ++ optional cfg.useWebServiceInterface pkgs.DisnixWebService;
environment.systemPackages = [ pkgs.disnix pkgs.dysnomia ] ++ optional cfg.useWebServiceInterface pkgs.DisnixWebService;
services.dbus.enable = true;
services.dbus.packages = [ pkgs.disnix ];

View file

@ -29,7 +29,7 @@ in {
storagePath = mkOption {
type = types.path;
default = "/var/lib/docker/registry";
default = "/var/lib/docker-registry";
description = "Docker registry storage path.";
};
@ -61,14 +61,9 @@ in {
User = "docker-registry";
Group = "docker";
PermissionsStartOnly = true;
WorkingDirectory = cfg.storagePath;
};
preStart = ''
mkdir -p ${cfg.storagePath}
if [ "$(id -u)" = 0 ]; then
chown -R docker-registry:docker ${cfg.storagePath}
fi
'';
postStart = ''
until ${pkgs.curl}/bin/curl -s -o /dev/null 'http://${cfg.host}:${toString cfg.port}/'; do
sleep 1;
@ -77,6 +72,10 @@ in {
};
users.extraGroups.docker.gid = mkDefault config.ids.gids.docker;
users.extraUsers.docker-registry.uid = config.ids.uids.docker-registry;
users.extraUsers.docker-registry = {
createHome = true;
home = cfg.storagePath;
uid = config.ids.uids.docker-registry;
};
};
}

View file

@ -0,0 +1,659 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.gitit;
homeDir = "/var/lib/gitit";
gititShared = with cfg.haskellPackages; gitit + "/share/" + pkgs.stdenv.system + "-" + ghc.name + "/" + gitit.pname + "-" + gitit.version;
gititWithPkgs = hsPkgs: extras: hsPkgs.ghcWithPackages (self: with self; [ gitit ] ++ (extras self));
gititSh = hsPkgs: extras: with pkgs; let
env = gititWithPkgs hsPkgs extras;
in writeScript "gitit" ''
#!${stdenv.shell}
cd $HOME
export PATH="${makeSearchPath "bin" (
[ git curl ] ++ (if cfg.pdfExport == "yes" then [texLiveFull] else [])
)}:$PATH";
export NIX_GHC="${env}/bin/ghc"
export NIX_GHCPKG="${env}/bin/ghc-pkg"
export NIX_GHC_DOCDIR="${env}/share/doc/ghc/html"
export NIX_GHC_LIBDIR=$( $NIX_GHC --print-libdir )
${env}/bin/gitit -f ${configFile}
'';
gititOptions = let
yesNo = types.enum [ "yes" "no" ];
in {
enable = mkOption {
type = types.bool;
default = false;
description = "Enable the gitit service.";
};
haskellPackages = mkOption {
default = pkgs.haskellPackages;
defaultText = "pkgs.haskellPackages";
example = literalExample "pkgs.haskell.packages.ghc784";
description = "haskellPackages used to build gitit and plugins.";
};
extraPackages = mkOption {
default = self: [];
example = literalExample ''
haskellPackages: [
haskellPackages.wreq
]
'';
description = ''
Extra packages available to ghc when running gitit. The
value must be a function which receives the attrset defined
in <varname>haskellPackages</varname> as the sole argument.
'';
};
address = mkOption {
type = types.str;
default = "0.0.0.0";
description = "IP address on which the web server will listen.";
};
port = mkOption {
type = types.int;
default = 5001;
description = "Port on which the web server will run.";
};
wikiTitle = mkOption {
type = types.str;
default = "Gitit!";
description = "The wiki title.";
};
repositoryType = mkOption {
type = types.enum ["git" "darcs" "mercurial"];
default = "git";
description = "Specifies the type of repository used for wiki content.";
};
repositoryPath = mkOption {
type = types.path;
default = homeDir + "/wiki";
description = ''
Specifies the path of the repository directory. If it does not
exist, gitit will create it on startup.
'';
};
requireAuthentication = mkOption {
type = types.enum [ "none" "modify" "read" ];
default = "modify";
description = ''
If 'none', login is never required, and pages can be edited
anonymously. If 'modify', login is required to modify the wiki
(edit, add, delete pages, upload files). If 'read', login is
required to see any wiki pages.
'';
};
authenticationMethod = mkOption {
type = types.enum [ "form" "http" "generic"];
default = "form";
description = ''
'form' means that users will be logged in and registered using forms
in the gitit web interface. 'http' means that gitit will assume that
HTTP authentication is in place and take the logged in username from
the "Authorization" field of the HTTP request header (in addition,
the login/logout and registration links will be suppressed).
'generic' means that gitit will assume that some form of
authentication is in place that directly sets REMOTE_USER to the name
of the authenticated user (e.g. mod_auth_cas on apache). 'rpx' means
that gitit will attempt to log in through https://rpxnow.com. This
requires that 'rpx-domain', 'rpx-key', and 'base-url' be set below,
and that 'curl' be in the system path.
'';
};
userFile = mkOption {
type = types.path;
default = homeDir + "/gitit-users";
description = ''
Specifies the path of the file containing user login information. If
it does not exist, gitit will create it (with an empty user list).
This file is not used if 'http' is selected for
authentication-method.
'';
};
sessionTimeout = mkOption {
type = types.int;
default = 60;
description = ''
Number of minutes of inactivity before a session expires.
'';
};
staticDir = mkOption {
type = types.path;
default = gititShared + "/data/static";
description = ''
Specifies the path of the static directory (containing javascript,
css, and images). If it does not exist, gitit will create it and
populate it with required scripts, stylesheets, and images.
'';
};
defaultPageType = mkOption {
type = types.enum [ "markdown" "rst" "latex" "html" "markdown+lhs" "rst+lhs" "latex+lhs" ];
default = "markdown";
description = ''
Specifies the type of markup used to interpret pages in the wiki.
Possible values are markdown, rst, latex, html, markdown+lhs,
rst+lhs, and latex+lhs. (the +lhs variants treat the input as
literate Haskell. See pandoc's documentation for more details.) If
Markdown is selected, pandoc's syntax extensions (for footnotes,
delimited code blocks, etc.) will be enabled. Note that pandoc's
restructuredtext parser is not complete, so some pages may not be
rendered correctly if rst is selected. The same goes for latex and
html.
'';
};
math = mkOption {
type = types.enum [ "mathml" "raw" "mathjax" "jsmath" "google" ];
default = "mathml";
description = ''
Specifies how LaTeX math is to be displayed. Possible values are
mathml, raw, mathjax, jsmath, and google. If mathml is selected,
gitit will convert LaTeX math to MathML and link in a script,
MathMLinHTML.js, that allows the MathML to be seen in Gecko browsers,
IE + mathplayer, and Opera. In other browsers you may get a jumble of
characters. If raw is selected, the LaTeX math will be displayed as
raw LaTeX math. If mathjax is selected, gitit will link to the
remote mathjax script. If jsMath is selected, gitit will link to the
script /js/jsMath/easy/load.js, and will assume that jsMath has been
installed into the js/jsMath directory. This is the most portable
solution. If google is selected, the google chart API is called to
render the formula as an image. This requires a connection to google,
and might raise a technical or a privacy problem.
'';
};
mathJaxScript = mkOption {
type = types.str;
default = "https://d3eoax9i5htok0.cloudfront.net/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML";
description = ''
Specifies the path to MathJax rendering script. You might want to
use your own MathJax script to render formulas without Internet
connection or if you want to use some special LaTeX packages. Note:
path specified there cannot be an absolute path to a script on your
hdd, instead you should run your (local if you wish) HTTP server
which will serve the MathJax.js script. You can easily (in four lines
of code) serve MathJax.js using
http://happstack.com/docs/crashcourse/FileServing.html Do not forget
the "http://" prefix (e.g. http://localhost:1234/MathJax.js).
'';
};
showLhsBirdTracks = mkOption {
type = yesNo;
default = "no";
description = ''
Specifies whether to show Haskell code blocks in "bird style", with
"> " at the beginning of each line.
'';
};
templatesDir = mkOption {
type = types.path;
default = gititShared + "/data/templates";
description = ''
Specifies the path of the directory containing page templates. If it
does not exist, gitit will create it with default templates. Users
may wish to edit the templates to customize the appearance of their
wiki. The template files are HStringTemplate templates. Variables to
be interpolated appear between $\'s. Literal $\'s must be
backslash-escaped.
'';
};
logFile = mkOption {
type = types.path;
default = homeDir + "/gitit.log";
description = ''
Specifies the path of gitit's log file. If it does not exist, gitit
will create it. The log is in Apache combined log format.
'';
};
logLevel = mkOption {
type = types.enum [ "DEBUG" "INFO" "NOTICE" "WARNING" "ERROR" "CRITICAL" "ALERT" "EMERGENCY" ];
default = "ERROR";
description = ''
Determines how much information is logged. Possible values (from
most to least verbose) are DEBUG, INFO, NOTICE, WARNING, ERROR,
CRITICAL, ALERT, EMERGENCY.
'';
};
frontPage = mkOption {
type = types.str;
default = "Front Page";
description = ''
Specifies which wiki page is to be used as the wiki's front page.
Gitit creates a default front page on startup, if one does not exist
already.
'';
};
noDelete = mkOption {
type = types.str;
default = "Front Page, Help";
description = ''
Specifies pages that cannot be deleted through the web interface.
(They can still be deleted directly using git or darcs.) A
comma-separated list of page names. Leave blank to allow every page
to be deleted.
'';
};
noEdit = mkOption {
type = types.str;
default = "Help";
description = ''
Specifies pages that cannot be edited through the web interface.
Leave blank to allow every page to be edited.
'';
};
defaultSummary = mkOption {
type = types.str;
default = "";
description = ''
Specifies text to be used in the change description if the author
leaves the "description" field blank. If default-summary is blank
(the default), the author will be required to fill in the description
field.
'';
};
tableOfContents = mkOption {
type = yesNo;
default = "yes";
description = ''
Specifies whether to print a tables of contents (with links to
sections) on each wiki page.
'';
};
plugins = mkOption {
type = types.path;
default = gititShared + "/plugins/Dot.hs";
description = ''
Specifies a list of plugins to load. Plugins may be specified either
by their path or by their module name. If the plugin name starts
with Gitit.Plugin., gitit will assume that the plugin is an installed
module and will not try to find a source file.
Examples:
plugins: plugins/DotPlugin.hs, CapitalizeEmphasisPlugin.hs
plugins: plugins/DotPlugin
plugins: Gitit.Plugin.InterwikiLinks
'';
};
useCache = mkOption {
type = yesNo;
default = "no";
description = ''
Specifies whether to cache rendered pages. Note that if use-feed is
selected, feeds will be cached regardless of the value of use-cache.
'';
};
cacheDir = mkOption {
type = types.path;
default = homeDir + "/cache";
description = "Path where rendered pages will be cached.";
};
maxUploadSize = mkOption {
type = types.str;
default = "1000K";
description = ''
Specifies an upper limit on the size (in bytes) of files uploaded
through the wiki's web interface. To disable uploads, set this to
0K. This will result in the uploads link disappearing and the
_upload url becoming inactive.
'';
};
maxPageSize = mkOption {
type = types.str;
default = "1000K";
description = "Specifies an upper limit on the size (in bytes) of pages.";
};
debugMode = mkOption {
type = yesNo;
default = "no";
description = "Causes debug information to be logged while gitit is running.";
};
compressResponses = mkOption {
type = yesNo;
default = "yes";
description = "Specifies whether HTTP responses should be compressed.";
};
mimeTypesFile = mkOption {
type = types.path;
default = "/etc/mime/types.info";
description = ''
Specifies the path of a file containing mime type mappings. Each
line of the file should contain two fields, separated by whitespace.
The first field is the mime type, the second is a file extension.
For example:
video/x-ms-wmx wmx
If the file is not found, some simple defaults will be used.
'';
};
useReCaptcha = mkOption {
type = yesNo;
default = "no";
description = ''
If "yes", causes gitit to use the reCAPTCHA service
(http://recaptcha.net) to prevent bots from creating accounts.
'';
};
reCaptchaPrivateKey = mkOption {
type = with types; nullOr str;
default = null;
description = ''
Specifies the private key for the reCAPTCHA service. To get
these, you need to create an account at http://recaptcha.net.
'';
};
reCaptchaPublicKey = mkOption {
type = with types; nullOr str;
default = null;
description = ''
Specifies the public key for the reCAPTCHA service. To get
these, you need to create an account at http://recaptcha.net.
'';
};
accessQuestion = mkOption {
type = types.str;
default = "What is the code given to you by Ms. X?";
description = ''
Specifies a question that users must answer when they attempt to
create an account
'';
};
accessQuestionAnswers = mkOption {
type = types.str;
default = "RED DOG, red dog";
description = ''
Specifies a question that users must answer when they attempt to
create an account, along with a comma-separated list of acceptable
answers. This can be used to institute a rudimentary password for
signing up as a user on the wiki, or as an alternative to reCAPTCHA.
Example:
access-question: What is the code given to you by Ms. X?
access-question-answers: RED DOG, red dog
'';
};
rpxDomain = mkOption {
type = with types; nullOr str;
default = null;
description = ''
Specifies the domain and key of your RPX account. The domain is just
the prefix of the complete RPX domain, so if your full domain is
'https://foo.rpxnow.com/', use 'foo' as the value of rpx-domain.
'';
};
rpxKey = mkOption {
type = with types; nullOr str;
default = null;
description = "RPX account access key.";
};
mailCommand = mkOption {
type = types.str;
default = "sendmail %s";
description = ''
Specifies the command to use to send notification emails. '%s' will
be replaced by the destination email address. The body of the
message will be read from stdin. If this field is left blank,
password reset will not be offered.
'';
};
resetPasswordMessage = mkOption {
type = types.lines;
default = ''
> From: gitit@$hostname$
> To: $useremail$
> Subject: Wiki password reset
>
> Hello $username$,
>
> To reset your password, please follow the link below:
> http://$hostname$:$port$$resetlink$
>
> Regards
'';
description = ''
Gives the text of the message that will be sent to the user should
she want to reset her password, or change other registration info.
The lines must be indented, and must begin with '>'. The initial
spaces and '> ' will be stripped off. $username$ will be replaced by
the user's username, $useremail$ by her email address, $hostname$ by
the hostname on which the wiki is running (as returned by the
hostname system call), $port$ by the port on which the wiki is
running, and $resetlink$ by the relative path of a reset link derived
from the user's existing hashed password. If your gitit wiki is being
proxied to a location other than the root path of $port$, you should
change the link to reflect this: for example, to
http://$hostname$/path/to/wiki$resetlink$ or
http://gitit.$hostname$$resetlink$
'';
};
useFeed = mkOption {
type = yesNo;
default = "no";
description = ''
Specifies whether an ATOM feed should be enabled (for the site and
for individual pages).
'';
};
baseUrl = mkOption {
type = with types; nullOr str;
default = null;
description = ''
The base URL of the wiki, to be used in constructing feed IDs and RPX
token_urls. Set this if use-feed is 'yes' or authentication-method
is 'rpx'.
'';
};
absoluteUrls = mkOption {
type = yesNo;
default = "no";
description = ''
Make wikilinks absolute with respect to the base-url. So, for
example, in a wiki served at the base URL '/wiki', on a page
Sub/Page, the wikilink '[Cactus]()' will produce a link to
'/wiki/Cactus' if absolute-urls is 'yes', and a relative link to
'Cactus' (referring to '/wiki/Sub/Cactus') if absolute-urls is 'no'.
'';
};
feedDays = mkOption {
type = types.int;
default = 14;
description = "Number of days to be included in feeds.";
};
feedRefreshTime = mkOption {
type = types.int;
default = 60;
description = "Number of minutes to cache feeds before refreshing.";
};
pdfExport = mkOption {
type = yesNo;
default = "no";
description = ''
If yes, PDF will appear in export options. PDF will be created using
pdflatex, which must be installed and in the path. Note that PDF
exports create significant additional server load.
'';
};
pandocUserData = mkOption {
type = with types; nullOr path;
default = null;
description = ''
If a directory is specified, this will be searched for pandoc
customizations. These can include a templates/ directory for custom
templates for various export formats, an S5 directory for custom S5
styles, and a reference.odt for ODT exports. If no directory is
specified, $HOME/.pandoc will be searched. See pandoc's README for
more information.
'';
};
xssSanitize = mkOption {
type = yesNo;
default = "yes";
description = ''
If yes, all HTML (including that produced by pandoc) is filtered
through xss-sanitize. Set to no only if you trust all of your users.
'';
};
};
configFile = pkgs.writeText "gitit.conf" ''
address: ${cfg.address}
port: ${toString cfg.port}
wiki-title: ${cfg.wikiTitle}
repository-type: ${cfg.repositoryType}
repository-path: ${cfg.repositoryPath}
require-authentication: ${cfg.requireAuthentication}
authentication-method: ${cfg.authenticationMethod}
user-file: ${cfg.userFile}
session-timeout: ${toString cfg.sessionTimeout}
static-dir: ${cfg.staticDir}
default-page-type: ${cfg.defaultPageType}
math: ${cfg.math}
mathjax-script: ${cfg.mathJaxScript}
show-lhs-bird-tracks: ${cfg.showLhsBirdTracks}
templates-dir: ${cfg.templatesDir}
log-file: ${cfg.logFile}
log-level: ${cfg.logLevel}
front-page: ${cfg.frontPage}
no-delete: ${cfg.noDelete}
no-edit: ${cfg.noEdit}
default-summary: ${cfg.defaultSummary}
table-of-contents: ${cfg.tableOfContents}
plugins: ${cfg.plugins}
use-cache: ${cfg.useCache}
cache-dir: ${cfg.cacheDir}
max-upload-size: ${cfg.maxUploadSize}
max-page-size: ${cfg.maxPageSize}
debug-mode: ${cfg.debugMode}
compress-responses: ${cfg.compressResponses}
mime-types-file: ${cfg.mimeTypesFile}
use-recaptcha: ${cfg.useReCaptcha}
recaptcha-private-key: ${toString cfg.reCaptchaPrivateKey}
recaptcha-public-key: ${toString cfg.reCaptchaPublicKey}
access-question: ${cfg.accessQuestion}
access-question-answers: ${cfg.accessQuestionAnswers}
rpx-domain: ${toString cfg.rpxDomain}
rpx-key: ${toString cfg.rpxKey}
mail-command: ${cfg.mailCommand}
reset-password-message: ${cfg.resetPasswordMessage}
use-feed: ${cfg.useFeed}
base-url: ${toString cfg.baseUrl}
absolute-urls: ${cfg.absoluteUrls}
feed-days: ${toString cfg.feedDays}
feed-refresh-time: ${toString cfg.feedRefreshTime}
pdf-export: ${cfg.pdfExport}
pandoc-user-data: ${toString cfg.pandocUserData}
xss-sanitize: ${cfg.xssSanitize}
'';
in
{
options.services.gitit = gititOptions;
config = mkIf cfg.enable {
users.extraUsers.gitit = {
group = config.users.extraGroups.gitit.name;
description = "Gitit user";
home = homeDir;
createHome = true;
uid = config.ids.uids.gitit;
};
users.extraGroups.gitit.gid = config.ids.gids.gitit;
systemd.services.gitit = let
uid = toString config.ids.uids.gitit;
gid = toString config.ids.gids.gitit;
in {
description = "Git and Pandoc Powered Wiki";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
preStart = with cfg; ''
chown ${uid}:${gid} -R ${homeDir}
for dir in ${repositoryPath} ${staticDir} ${templatesDir} ${cacheDir}
do
if [ ! -d $dir ]
then
mkdir -p $dir
find $dir -type d -exec chmod 0750 {} +
find $dir -type f -exec chmod 0640 {} +
fi
done
cd ${repositoryPath}
if [ ! -d .git ]
then
${pkgs.git}/bin/git init
${pkgs.git}/bin/git config user.email "gitit@${config.networking.hostName}"
${pkgs.git}/bin/git config user.name "gitit"
chown ${uid}:${gid} -R {repositoryPath}
fi
cd -
'';
serviceConfig = {
User = config.users.extraUsers.gitit.name;
Group = config.users.extraGroups.gitit.name;
ExecStart = with cfg; gititSh haskellPackages extraPackages;
};
};
};
}

View file

@ -75,7 +75,7 @@ in
serviceConfig.Type = "oneshot";
serviceConfig.RemainAfterExit = true;
path = [ pkgs.gitolite pkgs.git pkgs.perl pkgs.bash pkgs.openssh ];
path = [ pkgs.gitolite pkgs.git pkgs.perl pkgs.bash config.programs.ssh.package ];
script = ''
cd ${cfg.dataDir}
mkdir -p .gitolite/logs

View file

@ -54,7 +54,7 @@ in
};
port = mkOption {
type = types.uniq types.int;
type = types.int;
default = 2947;
description = ''
The port where to listen for TCP connections.
@ -62,7 +62,7 @@ in
};
debugLevel = mkOption {
type = types.uniq types.int;
type = types.int;
default = 0;
description = ''
The debugging level.

View file

@ -49,10 +49,10 @@ let
</server>
<import hidden-files="no">
<scripting script-charset="UTF-8">
<common-script>/nix/store/cngbzn39vidd6jm4wgzxfafqll74ybfa-mediatomb-0.12.1/share/mediatomb/js/common.js</common-script>
<playlist-script>/nix/store/cngbzn39vidd6jm4wgzxfafqll74ybfa-mediatomb-0.12.1/share/mediatomb/js/playlists.js</playlist-script>
<common-script>${pkgs.mediatomb}/share/mediatomb/js/common.js</common-script>
<playlist-script>${pkgs.mediatomb}/share/mediatomb/js/playlists.js</playlist-script>
<virtual-layout type="builtin">
<import-script>/nix/store/cngbzn39vidd6jm4wgzxfafqll74ybfa-mediatomb-0.12.1/share/mediatomb/js/import.js</import-script>
<import-script>${pkgs.mediatomb}/share/mediatomb/js/import.js</import-script>
</virtual-layout>
</scripting>
<mappings>
@ -230,6 +230,13 @@ in {
'';
};
interface = mkOption {
default = "";
description = ''
A specific interface to bind to.
'';
};
uuid = mkOption {
default = "fdfc8a4e-a3ad-4c1d-b43d-a2eedb03a687";
description = ''
@ -256,7 +263,7 @@ in {
after = [ "local-fs.target" "network.target" ];
wantedBy = [ "multi-user.target" ];
path = [ pkgs.mediatomb ];
serviceConfig.ExecStart = "${pkgs.mediatomb}/bin/mediatomb -p ${toString cfg.port} ${if cfg.customCfg then "" else "-c ${mtConf}"} -m ${cfg.dataDir}";
serviceConfig.ExecStart = "${pkgs.mediatomb}/bin/mediatomb -p ${toString cfg.port} ${if cfg.interface!="" then "-e ${cfg.interface}" else ""} ${if cfg.customCfg then "" else "-c ${mtConf}"} -m ${cfg.dataDir}";
serviceConfig.User = "${cfg.user}";
};

View file

@ -13,7 +13,7 @@ in {
enable = mkOption {
description = "Whether to enable the Mesos Master.";
default = false;
type = types.uniq types.bool;
type = types.bool;
};
port = mkOption {
@ -45,7 +45,7 @@ in {
See https://mesos.apache.org/documentation/latest/configuration/
'';
default = [ "" ];
type = types.listOf types.string;
type = types.listOf types.str;
example = [ "--credentials=VALUE" ];
};

View file

@ -12,6 +12,8 @@ let
attribsArg = optionalString (cfg.attributes != {})
"--attributes=${mkAttributes cfg.attributes}";
containerizers = [ "mesos" ] ++ (optional cfg.withDocker "docker");
in {
options.services.mesos = {
@ -19,11 +21,17 @@ in {
enable = mkOption {
description = "Whether to enable the Mesos Slave.";
default = false;
type = types.uniq types.bool;
type = types.bool;
};
ip = mkOption {
description = "IP address to listen on.";
default = "0.0.0.0";
type = types.string;
};
port = mkOption {
description = "Mesos Slave port";
description = "Port to listen on.";
default = 5051;
type = types.int;
};
@ -43,6 +51,12 @@ in {
type = types.bool;
};
withDocker = mkOption {
description = "Enable the docker containerizer.";
default = config.virtualisation.docker.enable;
type = types.bool;
};
workDir = mkOption {
description = "The Mesos work directory.";
default = "/var/lib/mesos/slave";
@ -56,7 +70,7 @@ in {
See https://mesos.apache.org/documentation/latest/configuration/
'';
default = [ "" ];
type = types.listOf types.string;
type = types.listOf types.str;
example = [ "--gc_delay=3days" ];
};
@ -92,17 +106,18 @@ in {
description = "Mesos Slave";
wantedBy = [ "multi-user.target" ];
after = [ "network-interfaces.target" ];
environment.MESOS_CONTAINERIZERS = "docker,mesos";
environment.MESOS_CONTAINERIZERS = concatStringsSep "," containerizers;
serviceConfig = {
ExecStart = ''
${pkgs.mesos}/bin/mesos-slave \
--ip=${cfg.ip} \
--port=${toString cfg.port} \
--master=${cfg.master} \
${optionalString cfg.withHadoop "--hadoop-home=${pkgs.hadoop}"} \
${attribsArg} \
--work_dir=${cfg.workDir} \
--logging_level=${cfg.logLevel} \
--docker=${pkgs.docker}/libexec/docker/docker \
${attribsArg} \
${optionalString cfg.withHadoop "--hadoop-home=${pkgs.hadoop}"} \
${optionalString cfg.withDocker "--docker=${pkgs.docker}/libexec/docker/docker"} \
${toString cfg.extraCmdLineOptions}
'';
PermissionsStartOnly = true;

View file

@ -0,0 +1,259 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.mwlib;
pypkgs = pkgs.python27Packages;
inherit (pypkgs) python mwlib;
user = mkOption {
default = "nobody";
type = types.str;
description = "User to run as.";
};
in
{
options.services.mwlib = {
nserve = {
enable = mkOption {
default = false;
type = types.bool;
description = ''
Whether to enable nserve. Nserve is a HTTP
server. The Collection extension is talking to
that program directly. Nserve uses at least
one qserve instance in order to distribute
and manage jobs.
'';
}; # nserve.enable
port = mkOption {
default = 8899;
type = types.int;
description = "Specify port to listen on.";
}; # nserve.port
address = mkOption {
default = "127.0.0.1";
type = types.str;
description = "Specify network interface to listen on.";
}; # nserve.address
qserve = mkOption {
default = [ "${cfg.qserve.address}:${toString cfg.qserve.port}" ];
type = types.listOf types.str;
description = "Register qserve instance.";
}; # nserve.qserve
inherit user;
}; # nserve
qserve = {
enable = mkOption {
default = false;
type = types.bool;
description = ''
A job queue server used to distribute and manage
jobs. You should start one qserve instance
for each machine that is supposed to render pdf
files. Unless youre operating the Wikipedia
installation, one machine should suffice.
'';
}; # qserve.enable
port = mkOption {
default = 14311;
type = types.int;
description = "Specify port to listen on.";
}; # qserve.port
address = mkOption {
default = "127.0.0.1";
type = types.str;
description = "Specify network interface to listen on.";
}; # qserve.address
datadir = mkOption {
default = "/var/lib/mwlib-qserve";
type = types.path;
description = "qserve data directory (FIXME: unused?)";
}; # qserve.datadir
allow = mkOption {
default = [ "127.0.0.1" ];
type = types.listOf types.str;
description = "List of allowed client IPs. Empty means any.";
}; # qserve.allow
inherit user;
}; # qserve
nslave = {
enable = mkOption {
default = cfg.qserve.enable;
type = types.bool;
description = ''
Pulls new jobs from exactly one qserve instance
and calls the zip and render programs
in order to download article collections and
convert them to different output formats. Nslave
uses a cache directory to store the generated
documents. Nslave also starts an internal http
server serving the content of the cache directory.
'';
}; # nslave.enable
cachedir = mkOption {
default = "/var/cache/mwlib-nslave";
type = types.path;
description = "Directory to store generated documents.";
}; # nslave.cachedir
numprocs = mkOption {
default = 10;
type = types.int;
description = "Number of parallel jobs to be executed.";
}; # nslave.numprocs
http = mkOption {
default = {};
description = ''
Internal http server serving the content of the cache directory.
You have to enable it, or use your own way for serving files
and set the http.url option accordingly.
'';
type = types.submodule ({
options = {
enable = mkOption {
default = true;
type = types.bool;
description = "Enable internal http server.";
}; # nslave.http.enable
port = mkOption {
default = 8898;
type = types.int;
description = "Port to listen to when serving files from cache.";
}; # nslave.http.port
address = mkOption {
default = "127.0.0.1";
type = types.str;
description = "Specify network interface to listen on.";
}; # nslave.http.address
url = mkOption {
default = "http://localhost:${toString cfg.nslave.http.port}/cache";
type = types.str;
description = ''
Specify URL for accessing generated files from cache.
The Collection extension of Mediawiki won't be able to
download files without it.
'';
}; # nslave.http.url
};
}); # types.submodule
}; # nslave.http
inherit user;
}; # nslave
}; # options.services
config = {
systemd.services.mwlib-nserve = mkIf cfg.nserve.enable
{
description = "mwlib network interface";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" "mwlib-qserve.service" ];
serviceConfig = {
ExecStart = concatStringsSep " " (
[
"${mwlib}/bin/nserve"
"--port ${toString cfg.nserve.port}"
"--interface ${cfg.nserve.address}"
] ++ cfg.nserve.qserve
);
User = cfg.nserve.user;
};
}; # systemd.services.mwlib-nserve
systemd.services.mwlib-qserve = mkIf cfg.qserve.enable
{
description = "mwlib job queue server";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" "local-fs.target" ];
preStart = ''
mkdir -pv '${cfg.qserve.datadir}'
chown -Rc ${cfg.qserve.user}:`id -ng ${cfg.qserve.user}` '${cfg.qserve.datadir}'
chmod -Rc u=rwX,go= '${cfg.qserve.datadir}'
'';
serviceConfig = {
ExecStart = concatStringsSep " " (
[
"${mwlib}/bin/mw-qserve"
"-p ${toString cfg.qserve.port}"
"-i ${cfg.qserve.address}"
"-d ${cfg.qserve.datadir}"
] ++ map (a: "-a ${a}") cfg.qserve.allow
);
User = cfg.qserve.user;
PermissionsStartOnly = true;
};
}; # systemd.services.mwlib-qserve
systemd.services.mwlib-nslave = mkIf cfg.nslave.enable
{
description = "mwlib worker";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" "local-fs.target" ];
preStart = ''
mkdir -pv '${cfg.nslave.cachedir}'
chown -Rc ${cfg.nslave.user}:`id -ng ${cfg.nslave.user}` '${cfg.nslave.cachedir}'
chmod -Rc u=rwX,go= '${cfg.nslave.cachedir}'
'';
path = with pkgs; [ imagemagick pdftk ];
environment = {
PYTHONPATH = concatMapStringsSep ":"
(m: "${pypkgs.${m}}/lib/${python.libPrefix}/site-packages")
[ "mwlib-rl" "mwlib-ext" "pygments" "pyfribidi" ];
};
serviceConfig = {
ExecStart = concatStringsSep " " (
[
"${mwlib}/bin/nslave"
"--cachedir ${cfg.nslave.cachedir}"
"--numprocs ${toString cfg.nslave.numprocs}"
"--url ${cfg.nslave.http.url}"
] ++ (
if cfg.nslave.http.enable then
[
"--serve-files-port ${toString cfg.nslave.http.port}"
"--serve-files-address ${cfg.nslave.http.address}"
] else
[
"--no-serve-files"
]
));
User = cfg.nslave.user;
PermissionsStartOnly = true;
};
}; # systemd.services.mwlib-nslave
}; # config
}

View file

@ -47,6 +47,8 @@ let
${optionalString cfg.requireSignedBinaryCaches ''
signed-binary-caches = *
''}
trusted-users = ${toString cfg.trustedUsers}
allowed-users = ${toString cfg.allowedUsers}
$extraOptions
END
'';
@ -141,7 +143,7 @@ in
default = 0;
description = ''
Nix daemon process priority. This priority propagates to build processes.
0 is the default Unix process priority, 20 is the lowest.
0 is the default Unix process priority, 19 is the lowest.
'';
};
@ -277,6 +279,36 @@ in
'';
};
trustedUsers = mkOption {
type = types.listOf types.str;
default = [ "root" ];
example = [ "root" "alice" "@wheel" ];
description = ''
A list of names of users that have additional rights when
connecting to the Nix daemon, such as the ability to specify
additional binary caches, or to import unsigned NARs. You
can also specify groups by prefixing them with
<literal>@</literal>; for instance,
<literal>@wheel</literal> means all users in the wheel
group.
'';
};
allowedUsers = mkOption {
type = types.listOf types.str;
default = [ "*" ];
example = [ "@wheel" "@builders" "alice" "bob" ];
description = ''
A list of names of users (separated by whitespace) that are
allowed to connect to the Nix daemon. As with
<option>nix.trustedUsers</option>, you can specify groups by
prefixing them with <literal>@</literal>. Also, you can
allow all users by specifying <literal>*</literal>. The
default is <literal>*</literal>. Note that trusted users are
always allowed to connect.
'';
};
};
};
@ -296,14 +328,14 @@ in
{ enable = cfg.buildMachines != [];
text =
concatMapStrings (machine:
"${machine.sshUser}@${machine.hostName} "
+ (if machine ? system then machine.system else concatStringsSep "," machine.systems)
+ " ${machine.sshKey} ${toString machine.maxJobs} "
+ (if machine ? speedFactor then toString machine.speedFactor else "1" )
"${if machine ? sshUser then "${machine.sshUser}@" else ""}${machine.hostName} "
+ machine.system or (concatStringsSep "," machine.systems)
+ " ${machine.sshKey or "-"} ${toString machine.maxJobs or 1} "
+ toString (machine.speedFactor or 1)
+ " "
+ (if machine ? supportedFeatures then concatStringsSep "," machine.supportedFeatures else "" )
+ concatStringsSep "," (machine.mandatoryFeatures or [] ++ machine.supportedFeatures or [])
+ " "
+ (if machine ? mandatoryFeatures then concatStringsSep "," machine.mandatoryFeatures else "" )
+ concatStringsSep "," machine.mandatoryFeatures or []
+ "\n"
) cfg.buildMachines;
};
@ -313,7 +345,7 @@ in
systemd.sockets.nix-daemon.wantedBy = [ "sockets.target" ];
systemd.services.nix-daemon =
{ path = [ nix pkgs.openssl pkgs.utillinux pkgs.openssh ]
{ path = [ nix pkgs.openssl pkgs.utillinux config.programs.ssh.package ]
++ optionals cfg.distributedBuilds [ pkgs.gzip ];
environment = cfg.envVars

View file

@ -9,7 +9,7 @@ in
{
options = {
services.plex = {
enable = mkEnableOption "Enable Plex Media Server";
enable = mkEnableOption "Plex Media Server";
# FIXME: In order for this config option to work, symlinks in the Plex
# package in the Nix store have to be changed to point to this directory.

View file

@ -154,7 +154,7 @@ in {
environment.HOME = "${pkgs.redmine}/share/redmine";
environment.REDMINE_LANG = "en";
environment.GEM_HOME = "${pkgs.redmine}/share/redmine/vendor/bundle/ruby/1.9.1";
environment.GEM_PATH = "${bundler}/${bundler.ruby.gemPath}";
environment.GEM_PATH = "${pkgs.bundler}/${pkgs.bundler.ruby.gemPath}";
path = with pkgs; [
imagemagickBig
subversion

View file

@ -35,7 +35,7 @@ let
in {
options = {
services.rippleDataApi = {
enable = mkEnableOption "Whether to enable ripple data api.";
enable = mkEnableOption "ripple data api";
port = mkOption {
description = "Ripple data api port";

Some files were not shown because too many files have changed in this diff Show more