Merge branch 'master' into kube-apiserver-preferred-address-types

This commit is contained in:
Robin Gloster 2019-04-17 16:40:06 +00:00 committed by GitHub
commit b278cd86e1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2652 changed files with 86427 additions and 51769 deletions

10
.github/CODEOWNERS vendored
View File

@ -58,11 +58,11 @@
/doc/languages-frameworks/python.section.md @FRidh /doc/languages-frameworks/python.section.md @FRidh
# Haskell # Haskell
/pkgs/development/compilers/ghc @peti @basvandijk /pkgs/development/compilers/ghc @basvandijk
/pkgs/development/haskell-modules @peti @basvandijk /pkgs/development/haskell-modules @basvandijk
/pkgs/development/haskell-modules/default.nix @peti @basvandijk /pkgs/development/haskell-modules/default.nix @basvandijk
/pkgs/development/haskell-modules/generic-builder.nix @peti @basvandijk /pkgs/development/haskell-modules/generic-builder.nix @basvandijk
/pkgs/development/haskell-modules/hoogle.nix @peti @basvandijk /pkgs/development/haskell-modules/hoogle.nix @basvandijk
# Perl # Perl
/pkgs/development/interpreters/perl @volth /pkgs/development/interpreters/perl @volth

View File

@ -1,3 +1,4 @@
<!-- Nixpkgs has a lot of new incoming Pull Requests, but not enough people to review this constant stream. Even if you aren't a committer, we would appreciate reviews of other PRs, especially simple ones like package updates. Just testing the relevant package/service and leaving a comment saying what you tested, how you tested it and whether it worked would be great. List of open PRs: <https://github.com/NixOS/nixpkgs/pulls>, for more about reviewing contributions: <https://hydra.nixos.org/job/nixpkgs/trunk/manual/latest/download/1/nixpkgs/manual.html#sec-reviewing-contributions>. Reviewing isn't mandatory, but it would help out a lot and reduce the average time-to-merge for all of us. Thanks a lot if you do! -->
###### Motivation for this change ###### Motivation for this change
@ -11,11 +12,10 @@
- [ ] macOS - [ ] macOS
- [ ] other Linux distributions - [ ] other Linux distributions
- [ ] Tested via one or more NixOS test(s) if existing and applicable for the change (look inside [nixos/tests](https://github.com/NixOS/nixpkgs/blob/master/nixos/tests)) - [ ] Tested via one or more NixOS test(s) if existing and applicable for the change (look inside [nixos/tests](https://github.com/NixOS/nixpkgs/blob/master/nixos/tests))
- [ ] Tested compilation of all pkgs that depend on this change using `nix-shell -p nox --run "nox-review wip"` - [ ] Tested compilation of all pkgs that depend on this change using `nix-shell -p nix-review --run "nix-review wip"`
- [ ] Tested execution of all binary files (usually in `./result/bin/`) - [ ] Tested execution of all binary files (usually in `./result/bin/`)
- [ ] Determined the impact on package closure size (by running `nix path-info -S` before and after) - [ ] Determined the impact on package closure size (by running `nix path-info -S` before and after)
- [ ] Assured whether relevant documentation is up to date - [ ] Assured whether relevant documentation is up to date
- [ ] Fits [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/.github/CONTRIBUTING.md). - [ ] Fits [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/.github/CONTRIBUTING.md).
--- ---

View File

@ -1,4 +1,4 @@
Copyright (c) 2003-2018 Eelco Dolstra and the Nixpkgs/NixOS contributors Copyright (c) 2003-2019 Eelco Dolstra and the Nixpkgs/NixOS contributors
Permission is hereby granted, free of charge, to any person obtaining Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the a copy of this software and associated documentation files (the

View File

@ -78,15 +78,14 @@ manual-full.xml: ${MD_TARGETS} .version functions/library/locations.xml function
nix-instantiate --eval \ nix-instantiate --eval \
-E '(import ../lib).version' > .version -E '(import ../lib).version' > .version
function_locations := $(shell nix-build --no-out-link ./lib-function-locations.nix)
functions/library/locations.xml: functions/library/locations.xml:
ln -s $(function_locations) ./functions/library/locations.xml nix-build ./lib-function-locations.nix \
--out-link $@
functions/library/generated: functions/library/generated: functions/library/locations.xml
nix-build ./lib-function-docs.nix \ nix-build ./lib-function-docs.nix \
--arg locationsXml $(function_locations)\ --arg locationsXml $< \
--out-link ./functions/library/generated --out-link $@
%.section.xml: %.section.md %.section.xml: %.section.md
pandoc $^ -w docbook+smart \ pandoc $^ -w docbook+smart \

View File

@ -12,11 +12,12 @@
computing power and memory to compile their own programs. One might think computing power and memory to compile their own programs. One might think
that cross-compilation is a fairly niche concern. However, there are that cross-compilation is a fairly niche concern. However, there are
significant advantages to rigorously distinguishing between build-time and significant advantages to rigorously distinguishing between build-time and
run-time environments! This applies even when one is developing and run-time environments! Significant, because the benefits apply even when one
deploying on the same machine. Nixpkgs is increasingly adopting the opinion is developing and deploying on the same machine. Nixpkgs is increasingly
that packages should be written with cross-compilation in mind, and nixpkgs adopting the opinion that packages should be written with cross-compilation
should evaluate in a similar way (by minimizing cross-compilation-specific in mind, and nixpkgs should evaluate in a similar way (by minimizing
special cases) whether or not one is cross-compiling. cross-compilation-specific special cases) whether or not one is
cross-compiling.
</para> </para>
<para> <para>
@ -30,7 +31,7 @@
<section xml:id="sec-cross-packaging"> <section xml:id="sec-cross-packaging">
<title>Packaging in a cross-friendly manner</title> <title>Packaging in a cross-friendly manner</title>
<section xml:id="sec-cross-platform-parameters"> <section xml:id="ssec-cross-platform-parameters">
<title>Platform parameters</title> <title>Platform parameters</title>
<para> <para>
@ -218,8 +219,20 @@
</variablelist> </variablelist>
</section> </section>
<section xml:id="sec-cross-specifying-dependencies"> <section xml:id="ssec-cross-dependency-categorization">
<title>Specifying Dependencies</title> <title>Theory of dependency categorization</title>
<note>
<para>
This is a rather philosophical description that isn't very
Nixpkgs-specific. For an overview of all the relevant attributes given to
<varname>mkDerivation</varname>, see
<xref
linkend="ssec-stdenv-dependencies"/>. For a description of how
everything is implemented, see
<xref linkend="ssec-cross-dependency-implementation" />.
</para>
</note>
<para> <para>
In this section we explore the relationship between both runtime and In this section we explore the relationship between both runtime and
@ -227,84 +240,98 @@
</para> </para>
<para> <para>
A runtime dependency between 2 packages implies that between them both the A run time dependency between two packages requires that their host
host and target platforms match. This is directly implied by the meaning of platforms match. This is directly implied by the meaning of "host platform"
"host platform" and "runtime dependency": The package dependency exists and "runtime dependency": The package dependency exists while both packages
while both packages are running on a single host platform. are running on a single host platform.
</para> </para>
<para> <para>
A build time dependency, however, implies a shift in platforms between the A build time dependency, however, has a shift in platforms between the
depending package and the depended-on package. The meaning of a build time depending package and the depended-on package. "build time dependency"
dependency is that to build the depending package we need to be able to run means that to build the depending package we need to be able to run the
the depended-on's package. The depending package's build platform is depended-on's package. The depending package's build platform is therefore
therefore equal to the depended-on package's host platform. Analogously, equal to the depended-on package's host platform.
the depending package's host platform is equal to the depended-on package's
target platform.
</para> </para>
<para> <para>
In this manner, given the 3 platforms for one package, we can determine the If both the dependency and depending packages aren't compilers or other
three platforms for all its transitive dependencies. This is the most machine-code-producing tools, we're done. And indeed
important guiding principle behind cross-compilation with Nixpkgs, and will <varname>buildInputs</varname> and <varname>nativeBuildInputs</varname>
be called the <wordasword>sliding window principle</wordasword>. have covered these simpler build-time and run-time (respectively) changes
for many years. But if the dependency does produce machine code, we might
need to worry about its target platform too. In principle, that target
platform might be any of the depending package's build, host, or target
platforms, but we prohibit dependencies from a "later" platform to an
earlier platform to limit confusion because we've never seen a legitimate
use for them.
</para> </para>
<para> <para>
Some examples will make this clearer. If a package is being built with a Finally, if the depending package is a compiler or other
<literal>(build, host, target)</literal> platform triple of <literal>(foo, machine-code-producing tool, it might need dependencies that run at "emit
bar, bar)</literal>, then its build-time dependencies would have a triple time". This is for compilers that (regrettably) insist on being built
of <literal>(foo, foo, bar)</literal>, and <emphasis>those together with their source langauges' standard libraries. Assuming build !=
packages'</emphasis> build-time dependencies would have a triple of host != target, a run-time dependency of the standard library cannot be run
<literal>(foo, foo, foo)</literal>. In other words, it should take two at the compiler's build time or run time, but only at the run time of code
"rounds" of following build-time dependency edges before one reaches a emitted by the compiler.
fixed point where, by the sliding window principle, the platform triple no
longer changes. Indeed, this happens with cross-compilation, where only
rounds of native dependencies starting with the second necessarily coincide
with native packages.
</para> </para>
<note>
<para>
The depending package's target platform is unconstrained by the sliding
window principle, which makes sense in that one can in principle build
cross compilers targeting arbitrary platforms.
</para>
</note>
<para> <para>
How does this work in practice? Nixpkgs is now structured so that Putting this all together, that means we have dependencies in the form
build-time dependencies are taken from <varname>buildPackages</varname>, "host → target", in at most the following six combinations:
whereas run-time dependencies are taken from the top level attribute set. <table>
For example, <varname>buildPackages.gcc</varname> should be used at <caption>Possible dependency types</caption>
build-time, while <varname>gcc</varname> should be used at run-time. Now, <thead>
for most of Nixpkgs's history, there was no <tr>
<varname>buildPackages</varname>, and most packages have not been <th>Dependency's host platform</th>
refactored to use it explicitly. Instead, one can use the six <th>Dependency's target platform</th>
(<emphasis>gasp</emphasis>) attributes used for specifying dependencies as </tr>
documented in <xref linkend="ssec-stdenv-dependencies"/>. We "splice" </thead>
together the run-time and build-time package sets with <tbody>
<varname>callPackage</varname>, and then <varname>mkDerivation</varname> <tr>
for each of four attributes pulls the right derivation out. This splicing <td>build</td>
can be skipped when not cross-compiling as the package sets are the same, <td>build</td>
but is a bit slow for cross-compiling. Because of this, a </tr>
best-of-both-worlds solution is in the works with no splicing or explicit <tr>
access of <varname>buildPackages</varname> needed. For now, feel free to <td>build</td>
use either method. <td>host</td>
</tr>
<tr>
<td>build</td>
<td>target</td>
</tr>
<tr>
<td>host</td>
<td>host</td>
</tr>
<tr>
<td>host</td>
<td>target</td>
</tr>
<tr>
<td>target</td>
<td>target</td>
</tr>
</tbody>
</table>
</para> </para>
<note> <para>
<para> Some examples will make this table clearer. Suppose there's some package
There is also a "backlink" <varname>targetPackages</varname>, yielding a that is being built with a <literal>(build, host, target)</literal>
package set whose <varname>buildPackages</varname> is the current package platform triple of <literal>(foo, bar, baz)</literal>. If it has a
set. This is a hack, though, to accommodate compilers with lousy build build-time library dependency, that would be a "host → build" dependency
systems. Please do not use this unless you are absolutely sure you are with a triple of <literal>(foo, foo, *)</literal> (the target platform is
packaging such a compiler and there is no other way. irrelevant). If it needs a compiler to be built, that would be a "build →
</para> host" dependency with a triple of <literal>(foo, foo, *)</literal> (the
</note> target platform is irrelevant). That compiler, would be built with another
compiler, also "build → host" dependency, with a triple of <literal>(foo,
foo, foo)</literal>.
</para>
</section> </section>
<section xml:id="sec-cross-cookbook"> <section xml:id="ssec-cross-cookbook">
<title>Cross packaging cookbook</title> <title>Cross packaging cookbook</title>
<para> <para>
@ -450,21 +477,202 @@ nix-build &lt;nixpkgs&gt; --arg crossSystem '{ config = "&lt;arch&gt;-&lt;os&gt;
<section xml:id="sec-cross-infra"> <section xml:id="sec-cross-infra">
<title>Cross-compilation infrastructure</title> <title>Cross-compilation infrastructure</title>
<para> <section xml:id="ssec-cross-dependency-implementation">
To be written. <title>Implementation of dependencies</title>
</para>
<note>
<para> <para>
If one explores Nixpkgs, they will see derivations with names like The categorizes of dependencies developed in
<literal>gccCross</literal>. Such <literal>*Cross</literal> derivations is <xref
a holdover from before we properly distinguished between the host and linkend="ssec-cross-dependency-categorization"/> are specified as
target platforms—the derivation with "Cross" in the name covered the lists of derivations given to <varname>mkDerivation</varname>, as
<literal>build = host != target</literal> case, while the other covered the documented in <xref linkend="ssec-stdenv-dependencies"/>. In short,
<literal>host = target</literal>, with build platform the same or not based each list of dependencies for "host → target" of "foo → bar" is called
on whether one was using its <literal>.nativeDrv</literal> or <varname>depsFooBar</varname>, with exceptions for backwards
<literal>.crossDrv</literal>. This ugliness will disappear soon. compatibility that <varname>depsBuildHost</varname> is instead called
<varname>nativeBuildInputs</varname> and <varname>depsHostTarget</varname>
is instead called <varname>buildInputs</varname>. Nixpkgs is now structured
so that each <varname>depsFooBar</varname> is automatically taken from
<varname>pkgsFooBar</varname>. (These <varname>pkgsFooBar</varname>s are
quite new, so there is no special case for
<varname>nativeBuildInputs</varname> and <varname>buildInputs</varname>.)
For example, <varname>pkgsBuildHost.gcc</varname> should be used at
build-time, while <varname>pkgsHostTarget.gcc</varname> should be used at
run-time.
</para> </para>
</note>
<para>
Now, for most of Nixpkgs's history, there were no
<varname>pkgsFooBar</varname> attributes, and most packages have not been
refactored to use it explicitly. Prior to those, there were just
<varname>buildPackages</varname>, <varname>pkgs</varname>, and
<varname>targetPackages</varname>. Those are now redefined as aliases to
<varname>pkgsBuildHost</varname>, <varname>pkgsHostTarget</varname>, and
<varname>pkgsTargetTarget</varname>. It is acceptable, even
recommended, to use them for libraries to show that the host platform is
irrelevant.
</para>
<para>
But before that, there was just <varname>pkgs</varname>, even though both
<varname>buildInputs</varname> and <varname>nativeBuildInputs</varname>
existed. [Cross barely worked, and those were implemented with some hacks
on <varname>mkDerivation</varname> to override dependencies.] What this
means is the vast majority of packages do not use any explicit package set
to populate their dependencies, just using whatever
<varname>callPackage</varname> gives them even if they do correctly sort
their dependencies into the multiple lists described above. And indeed,
asking that users both sort their dependencies, <emphasis>and</emphasis>
take them from the right attribute set, is both too onerous and redundant,
so the recommended approach (for now) is to continue just categorizing by
list and not using an explicit package set.
</para>
<para>
To make this work, we "splice" together the six
<varname>pkgsFooBar</varname> package sets and have
<varname>callPackage</varname> actually take its arguments from that. This
is currently implemented in <filename>pkgs/top-level/splice.nix</filename>.
<varname>mkDerivation</varname> then, for each dependency attribute, pulls
the right derivation out from the splice. This splicing can be skipped when
not cross-compiling as the package sets are the same, but still is a bit
slow for cross-compiling. We'd like to do something better, but haven't
come up with anything yet.
</para>
</section>
<section xml:id="ssec-bootstrapping">
<title>Bootstrapping</title>
<para>
Each of the package sets described above come from a single bootstrapping
stage. While <filename>pkgs/top-level/default.nix</filename>, coordinates
the composition of stages at a high level,
<filename>pkgs/top-level/stage.nix</filename> "ties the knot" (creates the
fixed point) of each stage. The package sets are defined per-stage however,
so they can be thought of as edges between stages (the nodes) in a graph.
Compositions like <literal>pkgsBuildTarget.targetPackages</literal> can be
thought of as paths to this graph.
</para>
<para>
While there are many package sets, and thus many edges, the stages can also
be arranged in a linear chain. In other words, many of the edges are
redundant as far as connectivity is concerned. This hinges on the type of
bootstrapping we do. Currently for cross it is:
<orderedlist>
<listitem>
<para>
<literal>(native, native, native)</literal>
</para>
</listitem>
<listitem>
<para>
<literal>(native, native, foreign)</literal>
</para>
</listitem>
<listitem>
<para>
<literal>(native, foreign, foreign)</literal>
</para>
</listitem>
</orderedlist>
In each stage, <varname>pkgsBuildHost</varname> refers the the previous
stage, <varname>pkgsBuildBuild</varname> refers to the one before that, and
<varname>pkgsHostTarget</varname> refers to the current one, and
<varname>pkgsTargetTarget</varname> refers to the next one. When there is
no previous or next stage, they instead refer to the current stage. Note
how all the invariants regarding the mapping between dependency and depending
packages' build host and target platforms are preserved.
<varname>pkgsBuildTarget</varname> and <varname>pkgsHostHost</varname> are
more complex in that the stage fitting the requirements isn't always a
fixed chain of "prevs" and "nexts" away (modulo the "saturating"
self-references at the ends). We just special case each instead. All the primary
edges are implemented is in <filename>pkgs/stdenv/booter.nix</filename>,
and secondarily aliases in <filename>pkgs/top-level/stage.nix</filename>.
</para>
<note>
<para>
Note the native stages are bootstrapped in legacy ways that predate the
current cross implementation. This is why the the bootstrapping stages
leading up to the final stages are ignored inthe previous paragraph.
</para>
</note>
<para>
If one looks at the 3 platform triples, one can see that they overlap such
that one could put them together into a chain like:
<programlisting>
(native, native, native, foreign, foreign)
</programlisting>
If one imagines the saturating self references at the end being replaced
with infinite stages, and then overlays those platform triples, one ends up
with the infinite tuple:
<programlisting>
(native..., native, native, native, foreign, foreign, foreign...)
</programlisting>
On can then imagine any sequence of platforms such that there are bootstrap
stages with their 3 platforms determined by "sliding a window" that is the
3 tuple through the sequence. This was the original model for
bootstrapping. Without a target platform (assume a better world where all
compilers are multi-target and all standard libraries are built in their
own derivation), this is sufficient. Conversely if one wishes to cross
compile "faster", with a "Canadian Cross" bootstraping stage where
<literal>build != host != target</literal>, more bootstrapping stages are
needed since no sliding window providess the pesky
<varname>pkgsBuildTarget</varname> package set since it skips the Canadian
cross stage's "host".
</para>
<note>
<para>
It is much better to refer to <varname>buildPackages</varname> than
<varname>targetPackages</varname>, or more broadly package sets that do
not mention "target". There are three reasons for this.
</para>
<para>
First, it is because bootstrapping stages do not have a unique
<varname>targetPackages</varname>. For example a <literal>(x86-linux,
x86-linux, arm-linux)</literal> and <literal>(x86-linux, x86-linux,
x86-windows)</literal> package set both have a <literal>(x86-linux,
x86-linux, x86-linux)</literal> package set. Because there is no canonical
<varname>targetPackages</varname> for such a native (<literal>build ==
host == target</literal>) package set, we set their
<varname>targetPackages</varname>
</para>
<para>
Second, it is because this is a frequent source of hard-to-follow
"infinite recursions" / cycles. When only package sets that don't mention
target are used, the package set forms a directed acyclic graph. This
means that all cycles that exist are confined to one stage. This means
they are a lot smaller, and easier to follow in the code or a backtrace. It
also means they are present in native and cross builds alike, and so more
likely to be caught by CI and other users.
</para>
<para>
Thirdly, it is because everything target-mentioning only exists to
accommodate compilers with lousy build systems that insist on the compiler
itself and standard library being built together. Of course that is bad
because bigger derivations means longer rebuilds. It is also problematic because
it tends to make the standard libraries less like other libraries than
they could be, complicating code and build systems alike. Because of the
other problems, and because of these innate disadvantages, compilers ought
to be packaged another way where possible.
</para>
</note>
<note>
<para>
If one explores Nixpkgs, they will see derivations with names like
<literal>gccCross</literal>. Such <literal>*Cross</literal> derivations is
a holdover from before we properly distinguished between the host and
target platforms—the derivation with "Cross" in the name covered the
<literal>build = host != target</literal> case, while the other covered
the <literal>host = target</literal>, with build platform the same or not
based on whether one was using its <literal>.nativeDrv</literal> or
<literal>.crossDrv</literal>. This ugliness will disappear soon.
</para>
</note>
</section>
</section> </section>
</chapter> </chapter>

View File

@ -417,10 +417,11 @@ pkgs.dockerTools.buildLayeredImage {
pullImage { pullImage {
imageName = "nixos/nix"; <co xml:id='ex-dockerTools-pullImage-1' /> imageName = "nixos/nix"; <co xml:id='ex-dockerTools-pullImage-1' />
imageDigest = "sha256:20d9485b25ecfd89204e843a962c1bd70e9cc6858d65d7f5fadc340246e2116b"; <co xml:id='ex-dockerTools-pullImage-2' /> imageDigest = "sha256:20d9485b25ecfd89204e843a962c1bd70e9cc6858d65d7f5fadc340246e2116b"; <co xml:id='ex-dockerTools-pullImage-2' />
finalImageTag = "1.11"; <co xml:id='ex-dockerTools-pullImage-3' /> finalImageName = "nix"; <co xml:id='ex-dockerTools-pullImage-3' />
sha256 = "0mqjy3zq2v6rrhizgb9nvhczl87lcfphq9601wcprdika2jz7qh8"; <co xml:id='ex-dockerTools-pullImage-4' /> finalImageTag = "1.11"; <co xml:id='ex-dockerTools-pullImage-4' />
os = "linux"; <co xml:id='ex-dockerTools-pullImage-5' /> sha256 = "0mqjy3zq2v6rrhizgb9nvhczl87lcfphq9601wcprdika2jz7qh8"; <co xml:id='ex-dockerTools-pullImage-5' />
arch = "x86_64"; <co xml:id='ex-dockerTools-pullImage-6' /> os = "linux"; <co xml:id='ex-dockerTools-pullImage-6' />
arch = "x86_64"; <co xml:id='ex-dockerTools-pullImage-7' />
} }
</programlisting> </programlisting>
</example> </example>
@ -436,21 +437,18 @@ pullImage {
<callout arearefs='ex-dockerTools-pullImage-2'> <callout arearefs='ex-dockerTools-pullImage-2'>
<para> <para>
<varname>imageDigest</varname> specifies the digest of the image to be <varname>imageDigest</varname> specifies the digest of the image to be
downloaded. Skopeo can be used to get the digest of an image, with its downloaded. This argument is required.
<varname>inspect</varname> subcommand. Since a given
<varname>imageName</varname> may transparently refer to a manifest list of
images which support multiple architectures and/or operating systems,
supply the `--override-os` and `--override-arch` arguments to specify
exactly which image you want. By default it will match the OS and
architecture of the host the command is run on.
<programlisting>
$ nix-shell --packages skopeo jq --command "skopeo --override-os linux --override-arch x86_64 inspect docker://docker.io/nixos/nix:1.11 | jq -r '.Digest'"
sha256:20d9485b25ecfd89204e843a962c1bd70e9cc6858d65d7f5fadc340246e2116b
</programlisting>
This argument is required.
</para> </para>
</callout> </callout>
<callout arearefs='ex-dockerTools-pullImage-3'> <callout arearefs='ex-dockerTools-pullImage-3'>
<para>
<varname>finalImageName</varname>, if specified, this is the name of the
image to be created. Note it is never used to fetch the image since we
prefer to rely on the immutable digest ID. By default it's equal to
<varname>imageName</varname>.
</para>
</callout>
<callout arearefs='ex-dockerTools-pullImage-4'>
<para> <para>
<varname>finalImageTag</varname>, if specified, this is the tag of the <varname>finalImageTag</varname>, if specified, this is the tag of the
image to be created. Note it is never used to fetch the image since we image to be created. Note it is never used to fetch the image since we
@ -458,25 +456,53 @@ sha256:20d9485b25ecfd89204e843a962c1bd70e9cc6858d65d7f5fadc340246e2116b
<literal>latest</literal>. <literal>latest</literal>.
</para> </para>
</callout> </callout>
<callout arearefs='ex-dockerTools-pullImage-4'> <callout arearefs='ex-dockerTools-pullImage-5'>
<para> <para>
<varname>sha256</varname> is the checksum of the whole fetched image. This <varname>sha256</varname> is the checksum of the whole fetched image. This
argument is required. argument is required.
</para> </para>
</callout> </callout>
<callout arearefs='ex-dockerTools-pullImage-5'> <callout arearefs='ex-dockerTools-pullImage-6'>
<para> <para>
<varname>os</varname>, if specified, is the operating system of the <varname>os</varname>, if specified, is the operating system of the
fetched image. By default it's <literal>linux</literal>. fetched image. By default it's <literal>linux</literal>.
</para> </para>
</callout> </callout>
<callout arearefs='ex-dockerTools-pullImage-6'> <callout arearefs='ex-dockerTools-pullImage-7'>
<para> <para>
<varname>arch</varname>, if specified, is the cpu architecture of the <varname>arch</varname>, if specified, is the cpu architecture of the
fetched image. By default it's <literal>x86_64</literal>. fetched image. By default it's <literal>x86_64</literal>.
</para> </para>
</callout> </callout>
</calloutlist> </calloutlist>
<para>
<literal>nix-prefetch-docker</literal> command can be used to get required
image parameters:
<programlisting>
$ nix run nixpkgs.nix-prefetch-docker -c nix-prefetch-docker --image-name mysql --image-tag 5
</programlisting>
Since a given <varname>imageName</varname> may transparently refer to a
manifest list of images which support multiple architectures and/or
operating systems, you can supply the <option>--os</option> and
<option>--arch</option> arguments to specify exactly which image you want.
By default it will match the OS and architecture of the host the command is
run on.
<programlisting>
$ nix-prefetch-docker --image-name mysql --image-tag 5 --arch x86_64 --os linux
</programlisting>
Desired image name and tag can be set using
<option>--final-image-name</option> and <option>--final-image-tag</option>
arguments:
<programlisting>
$ nix-prefetch-docker --image-name mysql --image-tag 5 --final-image-name eu.gcr.io/my-project/mysql --final-image-tag prod
</programlisting>
</para>
</section> </section>
<section xml:id="ssec-pkgs-dockerTools-exportImage"> <section xml:id="ssec-pkgs-dockerTools-exportImage">

View File

@ -3,12 +3,91 @@
xml:id="sec-language-go"> xml:id="sec-language-go">
<title>Go</title> <title>Go</title>
<para> <section xml:id="ssec-go-modules">
The function <varname>buildGoPackage</varname> builds standard Go programs. <title>Go modules</title>
</para>
<example xml:id='ex-buildGoPackage'> <para>
<title>buildGoPackage</title> The function <varname> buildGoModule </varname> builds Go programs managed
with Go modules. It builds a
<link xlink:href="https://github.com/golang/go/wiki/Modules">Go
modules</link> through a two phase build:
<itemizedlist>
<listitem>
<para>
An intermediate fetcher derivation. This derivation will be used to fetch
all of the dependencies of the Go module.
</para>
</listitem>
<listitem>
<para>
A final derivation will use the output of the intermediate derivation to
build the binaries and produce the final output.
</para>
</listitem>
</itemizedlist>
</para>
<example xml:id='ex-buildGoModule'>
<title>buildGoModule</title>
<programlisting>
pet = buildGoModule rec {
name = "pet-${version}";
version = "0.3.4";
src = fetchFromGitHub {
owner = "knqyf263";
repo = "pet";
rev = "v${version}";
sha256 = "0m2fzpqxk7hrbxsgqplkg7h2p7gv6s1miymv3gvw0cz039skag0s";
};
modSha256 = "1879j77k96684wi554rkjxydrj8g3hpp0kvxz03sd8dmwr3lh83j"; <co xml:id='ex-buildGoModule-1' />
subPackages = [ "." ]; <co xml:id='ex-buildGoModule-2' />
meta = with lib; {
description = "Simple command-line snippet manager, written in Go";
homepage = https://github.com/knqyf263/pet;
license = licenses.mit;
maintainers = with maintainers; [ kalbasit ];
platforms = platforms.linux ++ platforms.darwin;
};
}
</programlisting>
</example>
<para>
<xref linkend='ex-buildGoModule'/> is an example expression using
buildGoModule, the following arguments are of special significance to the
function:
<calloutlist>
<callout arearefs='ex-buildGoModule-1'>
<para>
<varname>modSha256</varname> is the hash of the output of the
intermediate fetcher derivation.
</para>
</callout>
<callout arearefs='ex-buildGoModule-2'>
<para>
<varname>subPackages</varname> limits the builder from building child
packages that have not been listed. If <varname>subPackages</varname> is
not specified, all child packages will be built.
</para>
</callout>
</calloutlist>
</para>
</section>
<section xml:id="ssec-go-legacy">
<title>Go legacy</title>
<para>
The function <varname> buildGoPackage </varname> builds legacy Go programs,
not supporting Go modules.
</para>
<example xml:id='ex-buildGoPackage'>
<title>buildGoPackage</title>
<programlisting> <programlisting>
deis = buildGoPackage rec { deis = buildGoPackage rec {
name = "deis-${version}"; name = "deis-${version}";
@ -29,56 +108,56 @@ deis = buildGoPackage rec {
buildFlags = "--tags release"; <co xml:id='ex-buildGoPackage-4' /> buildFlags = "--tags release"; <co xml:id='ex-buildGoPackage-4' />
} }
</programlisting> </programlisting>
</example> </example>
<para> <para>
<xref linkend='ex-buildGoPackage'/> is an example expression using <xref linkend='ex-buildGoPackage'/> is an example expression using
buildGoPackage, the following arguments are of special significance to the buildGoPackage, the following arguments are of special significance to the
function: function:
<calloutlist> <calloutlist>
<callout arearefs='ex-buildGoPackage-1'> <callout arearefs='ex-buildGoPackage-1'>
<para> <para>
<varname>goPackagePath</varname> specifies the package's canonical Go <varname>goPackagePath</varname> specifies the package's canonical Go
import path. import path.
</para> </para>
</callout> </callout>
<callout arearefs='ex-buildGoPackage-2'> <callout arearefs='ex-buildGoPackage-2'>
<para> <para>
<varname>subPackages</varname> limits the builder from building child <varname>subPackages</varname> limits the builder from building child
packages that have not been listed. If <varname>subPackages</varname> is packages that have not been listed. If <varname>subPackages</varname> is
not specified, all child packages will be built. not specified, all child packages will be built.
</para> </para>
<para> <para>
In this example only <literal>github.com/deis/deis/client</literal> will In this example only <literal>github.com/deis/deis/client</literal> will
be built. be built.
</para> </para>
</callout> </callout>
<callout arearefs='ex-buildGoPackage-3'> <callout arearefs='ex-buildGoPackage-3'>
<para> <para>
<varname>goDeps</varname> is where the Go dependencies of a Go program are <varname>goDeps</varname> is where the Go dependencies of a Go program
listed as a list of package source identified by Go import path. It could are listed as a list of package source identified by Go import path. It
be imported as a separate <varname>deps.nix</varname> file for could be imported as a separate <varname>deps.nix</varname> file for
readability. The dependency data structure is described below. readability. The dependency data structure is described below.
</para> </para>
</callout> </callout>
<callout arearefs='ex-buildGoPackage-4'> <callout arearefs='ex-buildGoPackage-4'>
<para> <para>
<varname>buildFlags</varname> is a list of flags passed to the go build <varname>buildFlags</varname> is a list of flags passed to the go build
command. command.
</para> </para>
</callout> </callout>
</calloutlist> </calloutlist>
</para> </para>
<para> <para>
The <varname>goDeps</varname> attribute can be imported from a separate The <varname>goDeps</varname> attribute can be imported from a separate
<varname>nix</varname> file that defines which Go libraries are needed and <varname>nix</varname> file that defines which Go libraries are needed and
should be included in <varname>GOPATH</varname> for should be included in <varname>GOPATH</varname> for
<varname>buildPhase</varname>. <varname>buildPhase</varname>.
</para> </para>
<example xml:id='ex-goDeps'> <example xml:id='ex-goDeps'>
<title>deps.nix</title> <title>deps.nix</title>
<programlisting> <programlisting>
[ <co xml:id='ex-goDeps-1' /> [ <co xml:id='ex-goDeps-1' />
{ {
@ -101,60 +180,62 @@ deis = buildGoPackage rec {
} }
] ]
</programlisting> </programlisting>
</example> </example>
<para> <para>
<calloutlist> <calloutlist>
<callout arearefs='ex-goDeps-1'> <callout arearefs='ex-goDeps-1'>
<para> <para>
<varname>goDeps</varname> is a list of Go dependencies. <varname>goDeps</varname> is a list of Go dependencies.
</para> </para>
</callout> </callout>
<callout arearefs='ex-goDeps-2'> <callout arearefs='ex-goDeps-2'>
<para> <para>
<varname>goPackagePath</varname> specifies Go package import path. <varname>goPackagePath</varname> specifies Go package import path.
</para> </para>
</callout> </callout>
<callout arearefs='ex-goDeps-3'> <callout arearefs='ex-goDeps-3'>
<para> <para>
<varname>fetch type</varname> that needs to be used to get package source. <varname>fetch type</varname> that needs to be used to get package
If <varname>git</varname> is used there should be <varname>url</varname>, source. If <varname>git</varname> is used there should be
<varname>rev</varname> and <varname>sha256</varname> defined next to it. <varname>url</varname>, <varname>rev</varname> and
</para> <varname>sha256</varname> defined next to it.
</callout> </para>
</calloutlist> </callout>
</para> </calloutlist>
</para>
<para> <para>
To extract dependency information from a Go package in automated way use To extract dependency information from a Go package in automated way use
<link xlink:href="https://github.com/kamilchm/go2nix">go2nix</link>. It can <link xlink:href="https://github.com/kamilchm/go2nix">go2nix</link>. It can
produce complete derivation and <varname>goDeps</varname> file for Go produce complete derivation and <varname>goDeps</varname> file for Go
programs. programs.
</para> </para>
<para> <para>
<varname>buildGoPackage</varname> produces <varname>buildGoPackage</varname> produces
<xref linkend='chap-multiple-output' xrefstyle="select: title" /> where <xref linkend='chap-multiple-output' xrefstyle="select: title" /> where
<varname>bin</varname> includes program binaries. You can test build a Go <varname>bin</varname> includes program binaries. You can test build a Go
binary as follows: binary as follows:
<screen> <screen>
$ nix-build -A deis.bin $ nix-build -A deis.bin
</screen> </screen>
or build all outputs with: or build all outputs with:
<screen> <screen>
$ nix-build -A deis.all $ nix-build -A deis.all
</screen> </screen>
<varname>bin</varname> output will be installed by default with <varname>bin</varname> output will be installed by default with
<varname>nix-env -i</varname> or <varname>systemPackages</varname>. <varname>nix-env -i</varname> or <varname>systemPackages</varname>.
</para> </para>
<para> <para>
You may use Go packages installed into the active Nix profiles by adding the You may use Go packages installed into the active Nix profiles by adding the
following to your ~/.bashrc: following to your ~/.bashrc:
<screen> <screen>
for p in $NIX_PROFILES; do for p in $NIX_PROFILES; do
GOPATH="$p/share/go:$GOPATH" GOPATH="$p/share/go:$GOPATH"
done done
</screen> </screen>
</para> </para>
</section>
</section> </section>

View File

@ -188,25 +188,24 @@ building Python libraries is `buildPythonPackage`. Let's see how we can build th
```nix ```nix
{ lib, buildPythonPackage, fetchPypi }: { lib, buildPythonPackage, fetchPypi }:
toolz = buildPythonPackage rec { buildPythonPackage rec {
pname = "toolz"; pname = "toolz";
version = "0.7.4"; version = "0.7.4";
src = fetchPypi { src = fetchPypi {
inherit pname version; inherit pname version;
sha256 = "43c2c9e5e7a16b6c88ba3088a9bfc82f7db8e13378be7c78d6c14a5f8ed05afd"; sha256 = "43c2c9e5e7a16b6c88ba3088a9bfc82f7db8e13378be7c78d6c14a5f8ed05afd";
};
doCheck = false;
meta = with lib; {
homepage = https://github.com/pytoolz/toolz;
description = "List processing tools and functional utilities";
license = licenses.bsd3;
maintainers = with maintainers; [ fridh ];
};
}; };
}
doCheck = false;
meta = with lib; {
homepage = https://github.com/pytoolz/toolz;
description = "List processing tools and functional utilities";
license = licenses.bsd3;
maintainers = with maintainers; [ fridh ];
};
};
``` ```
What happens here? The function `buildPythonPackage` is called and as argument What happens here? The function `buildPythonPackage` is called and as argument
@ -279,32 +278,31 @@ The following example shows which arguments are given to `buildPythonPackage` in
order to build [`datashape`](https://github.com/blaze/datashape). order to build [`datashape`](https://github.com/blaze/datashape).
```nix ```nix
{ # ... { lib, buildPythonPackage, fetchPypi, numpy, multipledispatch, dateutil, pytest }:
datashape = buildPythonPackage rec { buildPythonPackage rec {
pname = "datashape"; pname = "datashape";
version = "0.4.7"; version = "0.4.7";
src = fetchPypi { src = fetchPypi {
inherit pname version; inherit pname version;
sha256 = "14b2ef766d4c9652ab813182e866f493475e65e558bed0822e38bf07bba1a278"; sha256 = "14b2ef766d4c9652ab813182e866f493475e65e558bed0822e38bf07bba1a278";
}; };
checkInputs = with self; [ pytest ]; checkInputs = [ pytest ];
propagatedBuildInputs = with self; [ numpy multipledispatch dateutil ]; propagatedBuildInputs = [ numpy multipledispatch dateutil ];
meta = with lib; { meta = with lib; {
homepage = https://github.com/ContinuumIO/datashape; homepage = https://github.com/ContinuumIO/datashape;
description = "A data description language"; description = "A data description language";
license = licenses.bsd2; license = licenses.bsd2;
maintainers = with maintainers; [ fridh ]; maintainers = with maintainers; [ fridh ];
};
}; };
} }
``` ```
We can see several runtime dependencies, `numpy`, `multipledispatch`, and We can see several runtime dependencies, `numpy`, `multipledispatch`, and
`dateutil`. Furthermore, we have one `buildInput`, i.e. `pytest`. `pytest` is a `dateutil`. Furthermore, we have one `checkInputs`, i.e. `pytest`. `pytest` is a
test runner and is only used during the `checkPhase` and is therefore not added test runner and is only used during the `checkPhase` and is therefore not added
to `propagatedBuildInputs`. to `propagatedBuildInputs`.
@ -314,25 +312,24 @@ Python bindings to `libxml2` and `libxslt`. These libraries are only required
when building the bindings and are therefore added as `buildInputs`. when building the bindings and are therefore added as `buildInputs`.
```nix ```nix
{ # ... { lib, pkgs, buildPythonPackage, fetchPypi }:
lxml = buildPythonPackage rec { buildPythonPackage rec {
pname = "lxml"; pname = "lxml";
version = "3.4.4"; version = "3.4.4";
src = fetchPypi { src = fetchPypi {
inherit pname version; inherit pname version;
sha256 = "16a0fa97hym9ysdk3rmqz32xdjqmy4w34ld3rm3jf5viqjx65lxk"; sha256 = "16a0fa97hym9ysdk3rmqz32xdjqmy4w34ld3rm3jf5viqjx65lxk";
}; };
buildInputs = with self; [ pkgs.libxml2 pkgs.libxslt ]; buildInputs = [ pkgs.libxml2 pkgs.libxslt ];
meta = with lib; { meta = with lib; {
description = "Pythonic binding for the libxml2 and libxslt libraries"; description = "Pythonic binding for the libxml2 and libxslt libraries";
homepage = https://lxml.de; homepage = https://lxml.de;
license = licenses.bsd3; license = licenses.bsd3;
maintainers = with maintainers; [ sjourdois ]; maintainers = with maintainers; [ sjourdois ];
};
}; };
} }
``` ```
@ -348,35 +345,34 @@ find each of them in a different folder, and therefore we have to set `LDFLAGS`
and `CFLAGS`. and `CFLAGS`.
```nix ```nix
{ # ... { lib, pkgs, buildPythonPackage, fetchPypi, numpy, scipy }:
pyfftw = buildPythonPackage rec { buildPythonPackage rec {
pname = "pyFFTW"; pname = "pyFFTW";
version = "0.9.2"; version = "0.9.2";
src = fetchPypi { src = fetchPypi {
inherit pname version; inherit pname version;
sha256 = "f6bbb6afa93085409ab24885a1a3cdb8909f095a142f4d49e346f2bd1b789074"; sha256 = "f6bbb6afa93085409ab24885a1a3cdb8909f095a142f4d49e346f2bd1b789074";
}; };
buildInputs = [ pkgs.fftw pkgs.fftwFloat pkgs.fftwLongDouble]; buildInputs = [ pkgs.fftw pkgs.fftwFloat pkgs.fftwLongDouble];
propagatedBuildInputs = with self; [ numpy scipy ]; propagatedBuildInputs = [ numpy scipy ];
# Tests cannot import pyfftw. pyfftw works fine though. # Tests cannot import pyfftw. pyfftw works fine though.
doCheck = false; doCheck = false;
preConfigure = '' preConfigure = ''
export LDFLAGS="-L${pkgs.fftw.dev}/lib -L${pkgs.fftwFloat.out}/lib -L${pkgs.fftwLongDouble.out}/lib" export LDFLAGS="-L${pkgs.fftw.dev}/lib -L${pkgs.fftwFloat.out}/lib -L${pkgs.fftwLongDouble.out}/lib"
export CFLAGS="-I${pkgs.fftw.dev}/include -I${pkgs.fftwFloat.dev}/include -I${pkgs.fftwLongDouble.dev}/include" export CFLAGS="-I${pkgs.fftw.dev}/include -I${pkgs.fftwFloat.dev}/include -I${pkgs.fftwLongDouble.dev}/include"
''; '';
meta = with lib; { meta = with lib; {
description = "A pythonic wrapper around FFTW, the FFT library, presenting a unified interface for all the supported transforms"; description = "A pythonic wrapper around FFTW, the FFT library, presenting a unified interface for all the supported transforms";
homepage = http://hgomersall.github.com/pyFFTW; homepage = http://hgomersall.github.com/pyFFTW;
license = with licenses; [ bsd2 bsd3 ]; license = with licenses; [ bsd2 bsd3 ];
maintainers = with maintainers; [ fridh ]; maintainers = with maintainers; [ fridh ];
};
}; };
} }
``` ```
@ -404,7 +400,7 @@ Indeed, we can just add any package we like to have in our environment to `propa
```nix ```nix
with import <nixpkgs> {}; with import <nixpkgs> {};
with pkgs.python35Packages; with python35Packages;
buildPythonPackage rec { buildPythonPackage rec {
name = "mypackage"; name = "mypackage";
@ -437,7 +433,7 @@ Let's split the package definition from the environment definition.
We first create a function that builds `toolz` in `~/path/to/toolz/release.nix` We first create a function that builds `toolz` in `~/path/to/toolz/release.nix`
```nix ```nix
{ lib, pkgs, buildPythonPackage }: { lib, buildPythonPackage }:
buildPythonPackage rec { buildPythonPackage rec {
pname = "toolz"; pname = "toolz";
@ -457,18 +453,17 @@ buildPythonPackage rec {
} }
``` ```
It takes two arguments, `pkgs` and `buildPythonPackage`. It takes an argument `buildPythonPackage`.
We now call this function using `callPackage` in the definition of our environment We now call this function using `callPackage` in the definition of our environment
```nix ```nix
with import <nixpkgs> {}; with import <nixpkgs> {};
( let ( let
toolz = pkgs.callPackage /path/to/toolz/release.nix { toolz = callPackage /path/to/toolz/release.nix {
pkgs = pkgs; buildPythonPackage = python35Packages.buildPythonPackage;
buildPythonPackage = pkgs.python35Packages.buildPythonPackage;
}; };
in pkgs.python35.withPackages (ps: [ ps.numpy toolz ]) in python35.withPackages (ps: [ ps.numpy toolz ])
).env ).env
``` ```
@ -566,7 +561,7 @@ buildPythonPackage rec {
''; '';
checkInputs = [ hypothesis ]; checkInputs = [ hypothesis ];
buildInputs = [ setuptools_scm ]; nativeBuildInputs = [ setuptools_scm ];
propagatedBuildInputs = [ attrs py setuptools six pluggy ]; propagatedBuildInputs = [ attrs py setuptools six pluggy ];
meta = with lib; { meta = with lib; {
@ -586,11 +581,6 @@ The `buildPythonPackage` mainly does four things:
environment variable and add dependent libraries to script's `sys.path`. environment variable and add dependent libraries to script's `sys.path`.
* In the `installCheck` phase, `${python.interpreter} setup.py test` is ran. * In the `installCheck` phase, `${python.interpreter} setup.py test` is ran.
As in Perl, dependencies on other Python packages can be specified in the
`buildInputs` and `propagatedBuildInputs` attributes. If something is
exclusively a build-time dependency, use `buildInputs`; if it is (also) a runtime
dependency, use `propagatedBuildInputs`.
By default tests are run because `doCheck = true`. Test dependencies, like By default tests are run because `doCheck = true`. Test dependencies, like
e.g. the test runner, should be added to `checkInputs`. e.g. the test runner, should be added to `checkInputs`.
@ -734,7 +724,7 @@ Saving the following as `default.nix`
with import <nixpkgs> {}; with import <nixpkgs> {};
python.buildEnv.override { python.buildEnv.override {
extraLibs = [ pkgs.pythonPackages.pyramid ]; extraLibs = [ pythonPackages.pyramid ];
ignoreCollisions = true; ignoreCollisions = true;
} }
``` ```
@ -816,11 +806,12 @@ Given a `default.nix`:
```nix ```nix
with import <nixpkgs> {}; with import <nixpkgs> {};
buildPythonPackage { name = "myproject"; pythonPackages.buildPythonPackage {
name = "myproject";
buildInputs = with pythonPackages; [ pyramid ];
buildInputs = with pkgs.pythonPackages; [ pyramid ]; src = ./.;
}
src = ./.; }
``` ```
Running `nix-shell` with no arguments should give you Running `nix-shell` with no arguments should give you
@ -1006,10 +997,13 @@ Create this `default.nix` file, together with a `requirements.txt` and simply ex
```nix ```nix
with import <nixpkgs> {}; with import <nixpkgs> {};
with pkgs.python27Packages; with python27Packages;
stdenv.mkDerivation { stdenv.mkDerivation {
name = "impurePythonEnv"; name = "impurePythonEnv";
src = null;
buildInputs = [ buildInputs = [
# these packages are required for virtualenv and pip to work: # these packages are required for virtualenv and pip to work:
# #
@ -1029,14 +1023,15 @@ stdenv.mkDerivation {
libxslt libxslt
libzip libzip
stdenv stdenv
zlib ]; zlib
src = null; ];
shellHook = '' shellHook = ''
# set SOURCE_DATE_EPOCH so that we can use python wheels # set SOURCE_DATE_EPOCH so that we can use python wheels
SOURCE_DATE_EPOCH=$(date +%s) SOURCE_DATE_EPOCH=$(date +%s)
virtualenv --no-setuptools venv virtualenv --no-setuptools venv
export PATH=$PWD/venv/bin:$PATH export PATH=$PWD/venv/bin:$PATH
pip install -r requirements.txt pip install -r requirements.txt
''; '';
} }
``` ```

View File

@ -101,6 +101,13 @@
contain <varname>$outputBin</varname> and <varname>$outputLib</varname> are contain <varname>$outputBin</varname> and <varname>$outputLib</varname> are
also added. (See <xref linkend="multiple-output-file-type-groups" />.) also added. (See <xref linkend="multiple-output-file-type-groups" />.)
</para> </para>
<para>
In some cases it may be desirable to combine different outputs under a
single store path. A function <literal>symlinkJoin</literal> can be used to
do this. (Note that it may negate some closure size benefits of using a
multiple-output package.)
</para>
</section> </section>
<section xml:id="sec-multiple-outputs-"> <section xml:id="sec-multiple-outputs-">
<title>Writing a split derivation</title> <title>Writing a split derivation</title>

View File

@ -310,6 +310,10 @@ packageOverrides = pkgs: {
<section xml:id="sec-elm"> <section xml:id="sec-elm">
<title>Elm</title> <title>Elm</title>
<para>
To start a development environment do <command>nix-shell -p elmPackages.elm elmPackages.elm-format</command>
</para>
<para> <para>
To update Elm compiler, see To update Elm compiler, see
<filename>nixpkgs/pkgs/development/compilers/elm/README.md</filename>. <filename>nixpkgs/pkgs/development/compilers/elm/README.md</filename>.
@ -883,6 +887,33 @@ citrix_receiver.override {
On NixOS it can be installed using the following expression: On NixOS it can be installed using the following expression:
<programlisting>{ pkgs, ... }: { <programlisting>{ pkgs, ... }: {
fonts.fonts = with pkgs; [ noto-fonts-emoji ]; fonts.fonts = with pkgs; [ noto-fonts-emoji ];
}</programlisting>
</para>
</section>
</section>
<section xml:id="dlib">
<title>DLib</title>
<para>
<link xlink:href="http://dlib.net/">DLib</link> is a modern, C++-based toolkit which
provides several machine learning algorithms.
</para>
<section xml:id="compiling-without-avx-support">
<title>Compiling without AVX support</title>
<para>
Especially older CPUs don't support
<link xlink:href="https://en.wikipedia.org/wiki/Advanced_Vector_Extensions">AVX</link>
(<abbrev>Advanced Vector Extensions</abbrev>) instructions that are used by DLib to
optimize their algorithms.
</para>
<para>
On the affected hardware errors like <literal>Illegal instruction</literal> will occur.
In those cases AVX support needs to be disabled:
<programlisting>self: super: {
dlib = super.dlib.override { avxSupport = false; };
}</programlisting> }</programlisting>
</para> </para>
</section> </section>

View File

@ -189,14 +189,15 @@ $ git rebase --onto nixos-unstable BASEBRANCH FETCH_HEAD <co
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
The <link xlink:href="https://github.com/madjar/nox">nox</link> tool can The
be used to review a pull request content in a single command. It doesn't <link xlink:href="https://github.com/Mic92/nix-review">nix-review</link>
rebase on a channel branch so it might trigger multiple source builds. tool can be used to review a pull request content in a single command.
<varname>PRNUMBER</varname> should be replaced by the number at the end <varname>PRNUMBER</varname> should be replaced by the number at the end
of the pull request title. of the pull request title. You can also provide the full github pull
request url.
</para> </para>
<screen> <screen>
$ nix-shell -p nox --run "nox-review -k pr PRNUMBER" $ nix-shell -p nix-review --run "nix-review pr PRNUMBER"
</screen> </screen>
</listitem> </listitem>
</itemizedlist> </itemizedlist>

View File

@ -222,9 +222,10 @@ genericBuild
</footnote> </footnote>
But even if one is not cross compiling, the platforms imply whether or not But even if one is not cross compiling, the platforms imply whether or not
the dependency is needed at run-time or build-time, a concept that makes the dependency is needed at run-time or build-time, a concept that makes
perfect sense outside of cross compilation. For now, the run-time/build-time perfect sense outside of cross compilation. By default, the
distinction is just a hint for mental clarity, but in the future it perhaps run-time/build-time distinction is just a hint for mental clarity, but with
could be enforced. <varname>strictDeps</varname> set it is mostly enforced even in the native
case.
</para> </para>
<para> <para>
@ -348,7 +349,10 @@ let f(h, h + 1, i) = i + h
<para> <para>
Overall, the unifying theme here is that propagation shouldn't be Overall, the unifying theme here is that propagation shouldn't be
introducing transitive dependencies involving platforms the depending introducing transitive dependencies involving platforms the depending
package is unaware of. The offset bounds checking and definition of package is unaware of. [One can imagine the dependending package asking for
dependencies with the platforms it knows about; other platforms it doesn't
know how to ask for. The platform description in that scenario is a kind of
unforagable capability.] The offset bounds checking and definition of
<function>mapOffset</function> together ensure that this is the case. <function>mapOffset</function> together ensure that this is the case.
Discovering a new offset is discovering a new platform, and since those Discovering a new offset is discovering a new platform, and since those
platforms weren't in the derivation "spec" of the needing package, they platforms weren't in the derivation "spec" of the needing package, they
@ -2633,20 +2637,20 @@ addEnvHooks "$hostOffset" myBashFunction
happens. It prevents nix from cleaning up the build environment happens. It prevents nix from cleaning up the build environment
immediately and allows the user to attach to a build environment using immediately and allows the user to attach to a build environment using
the <command>cntr</command> command. Upon build error it will print the <command>cntr</command> command. Upon build error it will print
instructions on how to use <command>cntr</command>. Installing cntr and instructions on how to use <command>cntr</command>, which can be used to
running the command will provide shell access to the build sandbox of enter the environment for debugging. Installing cntr and running the
failed build. At <filename>/var/lib/cntr</filename> the sandboxed command will provide shell access to the build sandbox of failed build.
filesystem is mounted. All commands and files of the system are still At <filename>/var/lib/cntr</filename> the sandboxed filesystem is
accessible within the shell. To execute commands from the sandbox use mounted. All commands and files of the system are still accessible
the cntr exec subcommand. Note that <command>cntr</command> also needs within the shell. To execute commands from the sandbox use the cntr exec
to be executed on the machine that is doing the build, which might not subcommand. Note that <command>cntr</command> also needs to be executed
be the case when remote builders are enabled. <command>cntr</command> is on the machine that is doing the build, which might not be the case when
only supported on Linux-based platforms. To use it first add remote builders are enabled. <command>cntr</command> is only supported
<literal>cntr</literal> to your on Linux-based platforms. To use it first add <literal>cntr</literal> to
<literal>environment.systemPackages</literal> on NixOS or alternatively your <literal>environment.systemPackages</literal> on NixOS or
to the root user on non-NixOS systems. Then in the package that is alternatively to the root user on non-NixOS systems. Then in the package
supposed to be inspected, add <literal>breakpointHook</literal> to that is supposed to be inspected, add <literal>breakpointHook</literal>
<literal>nativeBuildInputs</literal>. to <literal>nativeBuildInputs</literal>.
<programlisting> <programlisting>
nativeBuildInputs = [ breakpointHook ]; nativeBuildInputs = [ breakpointHook ];
</programlisting> </programlisting>
@ -2750,9 +2754,9 @@ addEnvHooks "$hostOffset" myBashFunction
<listitem> <listitem>
<para> <para>
Overrides the configure, build, and install phases. This will run the Overrides the configure, build, and install phases. This will run the
"waf" script used by many projects. If waf doesnt exist, it will copy "waf" script used by many projects. If wafPath (default ./waf) doesnt
the version of waf available in Nixpkgs wafFlags can be used to pass exist, it will copy the version of waf available in Nixpkgs. wafFlags can
flags to the waf script. be used to pass flags to the waf script.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>

View File

@ -351,25 +351,24 @@ Additional information.
</section> </section>
<section xml:id="submitting-changes-tested-compilation"> <section xml:id="submitting-changes-tested-compilation">
<title>Tested compilation of all pkgs that depend on this change using <command>nox-review</command></title> <title>Tested compilation of all pkgs that depend on this change using <command>nix-review</command></title>
<para> <para>
If you are updating a package's version, you can use nox to make sure all If you are updating a package's version, you can use nix-review to make
packages that depend on the updated package still compile correctly. This sure all packages that depend on the updated package still compile
can be done using the nox utility. The <command>nox-review</command> correctly. The <command>nix-review</command> utility can look for and build
utility can look for and build all dependencies either based on uncommited all dependencies either based on uncommited changes with the
changes with the <literal>wip</literal> option or specifying a github pull <literal>wip</literal> option or specifying a github pull request number.
request number.
</para>
<para>
review uncommitted changes:
<screen>nix-shell -p nox --run "nox-review wip"</screen>
</para> </para>
<para> <para>
review changes from pull request number 12345: review changes from pull request number 12345:
<screen>nix-shell -p nox --run "nox-review pr 12345"</screen> <screen>nix-shell -p nix-review --run "nix-review pr 12345"</screen>
</para>
<para>
review uncommitted changes:
<screen>nix-shell -p nix-review --run "nix-review wip"</screen>
</para> </para>
</section> </section>

View File

@ -30,9 +30,12 @@ rec {
# nix-repl> converge (x: x / 2) 16 # nix-repl> converge (x: x / 2) 16
# 0 # 0
converge = f: x: converge = f: x:
if (f x) == x let
then x x' = f x;
else converge f (f x); in
if x' == x
then x
else converge f x';
# Modify the contents of an explicitly recursive attribute set in a way that # Modify the contents of an explicitly recursive attribute set in a way that
# honors `self`-references. This is accomplished with a function # honors `self`-references. This is accomplished with a function

View File

@ -7,7 +7,7 @@ let
in in
rec { rec {
inherit (builtins) head tail length isList elemAt concatLists filter elem genList; inherit (builtins) head tail length isList elemAt concatLists filter elem genList map;
/* Create a list consisting of a single element. `singleton x` is /* Create a list consisting of a single element. `singleton x` is
sometimes more convenient with respect to indentation than `[x]` sometimes more convenient with respect to indentation than `[x]`
@ -633,8 +633,7 @@ rec {
else else
let let
x = head list; x = head list;
xs = unique (drop 1 list); in [x] ++ unique (remove x list);
in [x] ++ remove x xs;
/* Intersects list 'e' and another list. O(nm) complexity. /* Intersects list 'e' and another list. O(nm) complexity.

View File

@ -12,8 +12,8 @@ rec {
# Bring in a path as a source, filtering out all Subversion and CVS # Bring in a path as a source, filtering out all Subversion and CVS
# directories, as well as backup files (*~). # directories, as well as backup files (*~).
cleanSourceFilter = name: type: let baseName = baseNameOf (toString name); in ! ( cleanSourceFilter = name: type: let baseName = baseNameOf (toString name); in ! (
# Filter out Subversion and CVS directories. # Filter out version control software files/directories
(type == "directory" && (baseName == ".git" || baseName == ".svn" || baseName == "CVS" || baseName == ".hg")) || (baseName == ".git" || type == "directory" && (baseName == ".svn" || baseName == "CVS" || baseName == ".hg")) ||
# Filter out editor backup / swap files. # Filter out editor backup / swap files.
lib.hasSuffix "~" baseName || lib.hasSuffix "~" baseName ||
builtins.match "^\\.sw[a-z]$" baseName != null || builtins.match "^\\.sw[a-z]$" baseName != null ||

View File

@ -90,7 +90,7 @@ rec {
/* Same as `concatMapStringsSep`, but the mapping function /* Same as `concatMapStringsSep`, but the mapping function
additionally receives the position of its argument. additionally receives the position of its argument.
Type: concatMapStringsSep :: string -> (int -> string -> string) -> [string] -> string Type: concatIMapStringsSep :: string -> (int -> string -> string) -> [string] -> string
Example: Example:
concatImapStringsSep "-" (pos: x: toString (x / pos)) [ 6 6 6 ] concatImapStringsSep "-" (pos: x: toString (x / pos)) [ 6 6 6 ]

View File

@ -34,7 +34,9 @@ rec {
else if final.isUClibc then "uclibc" else if final.isUClibc then "uclibc"
else if final.isAndroid then "bionic" else if final.isAndroid then "bionic"
else if final.isLinux /* default */ then "glibc" else if final.isLinux /* default */ then "glibc"
else if final.isMsp430 then "newlib"
else if final.isAvr then "avrlibc" else if final.isAvr then "avrlibc"
else if final.isNetBSD then "nblibc"
# TODO(@Ericson2314) think more about other operating systems # TODO(@Ericson2314) think more about other operating systems
else "native/impure"; else "native/impure";
extensions = { extensions = {

View File

@ -47,5 +47,5 @@ in rec {
unix = filterDoubles predicates.isUnix; unix = filterDoubles predicates.isUnix;
windows = filterDoubles predicates.isWindows; windows = filterDoubles predicates.isWindows;
mesaPlatforms = ["i686-linux" "x86_64-linux" "x86_64-darwin" "armv5tel-linux" "armv6l-linux" "armv7l-linux" "aarch64-linux" "powerpc64le-linux"]; mesaPlatforms = ["i686-linux" "x86_64-linux" "x86_64-darwin" "armv5tel-linux" "armv6l-linux" "armv7l-linux" "armv7a-linux" "aarch64-linux" "powerpc64le-linux"];
} }

View File

@ -44,14 +44,6 @@ rec {
platform = platforms.aarch64-multiplatform; platform = platforms.aarch64-multiplatform;
}; };
armv5te-android-prebuilt = rec {
config = "armv5tel-unknown-linux-androideabi";
sdkVer = "21";
ndkVer = "18b";
platform = platforms.armv5te-android;
useAndroidPrebuilt = true;
};
armv7a-android-prebuilt = rec { armv7a-android-prebuilt = rec {
config = "armv7a-unknown-linux-androideabi"; config = "armv7a-unknown-linux-androideabi";
sdkVer = "24"; sdkVer = "24";
@ -102,6 +94,11 @@ rec {
riscv64 = riscv "64"; riscv64 = riscv "64";
riscv32 = riscv "32"; riscv32 = riscv "32";
msp430 = {
config = "msp430-elf";
libc = "newlib";
};
avr = { avr = {
config = "avr"; config = "avr";
}; };
@ -212,4 +209,11 @@ rec {
libc = "msvcrt"; # This distinguishes the mingw (non posix) toolchain libc = "msvcrt"; # This distinguishes the mingw (non posix) toolchain
platform = {}; platform = {};
}; };
# BSDs
amd64-netbsd = {
config = "x86_64-unknown-netbsd";
libc = "nblibc";
};
} }

View File

@ -20,6 +20,7 @@ rec {
isRiscV = { cpu = { family = "riscv"; }; }; isRiscV = { cpu = { family = "riscv"; }; };
isSparc = { cpu = { family = "sparc"; }; }; isSparc = { cpu = { family = "sparc"; }; };
isWasm = { cpu = { family = "wasm"; }; }; isWasm = { cpu = { family = "wasm"; }; };
isMsp430 = { cpu = { family = "msp430"; }; };
isAvr = { cpu = { family = "avr"; }; }; isAvr = { cpu = { family = "avr"; }; };
isAlpha = { cpu = { family = "alpha"; }; }; isAlpha = { cpu = { family = "alpha"; }; };

View File

@ -109,6 +109,7 @@ rec {
alpha = { bits = 64; significantByte = littleEndian; family = "alpha"; }; alpha = { bits = 64; significantByte = littleEndian; family = "alpha"; };
msp430 = { bits = 16; significantByte = littleEndian; family = "msp430"; };
avr = { bits = 8; family = "avr"; }; avr = { bits = 8; family = "avr"; };
}; };

View File

@ -253,22 +253,11 @@ rec {
kernelTarget = "zImage"; kernelTarget = "zImage";
}; };
# https://developer.android.com/ndk/guides/abis#armeabi
armv5te-android = {
name = "armeabi";
gcc = {
arch = "armv5te";
float = "soft";
float-abi = "soft";
};
};
# https://developer.android.com/ndk/guides/abis#v7a # https://developer.android.com/ndk/guides/abis#v7a
armv7a-android = { armv7a-android = {
name = "armeabi-v7a"; name = "armeabi-v7a";
gcc = { gcc = {
arch = "armv7-a"; arch = "armv7-a";
float = "hard";
float-abi = "softfp"; float-abi = "softfp";
fpu = "vfpv3-d16"; fpu = "vfpv3-d16";
}; };

View File

@ -147,6 +147,11 @@
github = "aepsil0n"; github = "aepsil0n";
name = "Eduard Bopp"; name = "Eduard Bopp";
}; };
aerialx = {
email = "aaron+nixos@aaronlindsay.com";
github = "AerialX";
name = "Aaron Lindsay";
};
aespinosa = { aespinosa = {
email = "allan.espinosa@outlook.com"; email = "allan.espinosa@outlook.com";
github = "aespinosa"; github = "aespinosa";
@ -335,6 +340,11 @@
github = "andrew-d"; github = "andrew-d";
name = "Andrew Dunham"; name = "Andrew Dunham";
}; };
andrewchambers = {
email = "ac@acha.ninja";
github = "andrewchambers";
name = "Andrew Chambers";
};
andrewrk = { andrewrk = {
email = "superjoe30@gmail.com"; email = "superjoe30@gmail.com";
github = "andrewrk"; github = "andrewrk";
@ -523,6 +533,11 @@
email = "sivaraman.balaji@gmail.com"; email = "sivaraman.balaji@gmail.com";
name = "Balaji Sivaraman"; name = "Balaji Sivaraman";
}; };
balsoft = {
email = "balsoft75@gmail.com";
github = "balsoft";
name = "Alexander Bantyev";
};
bandresen = { bandresen = {
email = "bandresen@gmail.com"; email = "bandresen@gmail.com";
github = "bandresen"; github = "bandresen";
@ -1204,6 +1219,11 @@
github = "dgonyeo"; github = "dgonyeo";
name = "Derek Gonyeo"; name = "Derek Gonyeo";
}; };
dhkl = {
email = "david@davidslab.com";
github = "dhl";
name = "David Leung";
};
dipinhora = { dipinhora = {
email = "dipinhora+github@gmail.com"; email = "dipinhora+github@gmail.com";
github = "dipinhora"; github = "dipinhora";
@ -1214,6 +1234,11 @@
github = "disassembler"; github = "disassembler";
name = "Samuel Leathers"; name = "Samuel Leathers";
}; };
disserman = {
email = "disserman@gmail.com";
github = "divi255";
name = "Sergei S.";
};
dizfer = { dizfer = {
email = "david@izquierdofernandez.com"; email = "david@izquierdofernandez.com";
github = "dizfer"; github = "dizfer";
@ -1294,7 +1319,7 @@
name = "Tim Dysinger"; name = "Tim Dysinger";
}; };
dywedir = { dywedir = {
email = "dywedir@protonmail.ch"; email = "dywedir@gra.red";
github = "dywedir"; github = "dywedir";
name = "Vladyslav M."; name = "Vladyslav M.";
}; };
@ -1583,6 +1608,11 @@
github = "fdns"; github = "fdns";
name = "Felipe Espinoza"; name = "Felipe Espinoza";
}; };
ffinkdevs = {
email = "fink@h0st.space";
github = "ffinkdevs";
name = "Fabian Fink";
};
fgaz = { fgaz = {
email = "fgaz@fgaz.me"; email = "fgaz@fgaz.me";
github = "fgaz"; github = "fgaz";
@ -1593,6 +1623,11 @@
github = "FireyFly"; github = "FireyFly";
name = "Jonas Höglund"; name = "Jonas Höglund";
}; };
flexw = {
email = "felix.weilbach@t-online.de";
github = "FlexW";
name = "Felix Weilbach";
};
flokli = { flokli = {
email = "flokli@flokli.de"; email = "flokli@flokli.de";
github = "flokli"; github = "flokli";
@ -1765,6 +1800,11 @@
github = "Gerschtli"; github = "Gerschtli";
name = "Tobias Happ"; name = "Tobias Happ";
}; };
ggpeti = {
email = "ggpeti@gmail.com";
github = "ggpeti";
name = "Peter Ferenczy";
};
gilligan = { gilligan = {
email = "tobias.pflug@gmail.com"; email = "tobias.pflug@gmail.com";
github = "gilligan"; github = "gilligan";
@ -2365,6 +2405,11 @@
github = "juliendehos"; github = "juliendehos";
name = "Julien Dehos"; name = "Julien Dehos";
}; };
justinwoo = {
email = "moomoowoo@gmail.com";
github = "justinwoo";
name = "Justin Woo";
};
jwiegley = { jwiegley = {
email = "johnw@newartisans.com"; email = "johnw@newartisans.com";
github = "jwiegley"; github = "jwiegley";
@ -2658,6 +2703,11 @@
github = "limeytexan"; github = "limeytexan";
name = "Michael Brantley"; name = "Michael Brantley";
}; };
linarcx = {
email = "linarcx@gmail.com";
github = "linarcx";
name = "Kaveh Ahangar";
};
linc01n = { linc01n = {
email = "git@lincoln.hk"; email = "git@lincoln.hk";
github = "linc01n"; github = "linc01n";
@ -2698,6 +2748,11 @@
github = "lo1tuma"; github = "lo1tuma";
name = "Mathias Schreck"; name = "Mathias Schreck";
}; };
loewenheim = {
email = "loewenheim@mailbox.org";
github = "loewenheim";
name = "Sebastian Zivota";
};
lopsided98 = { lopsided98 = {
email = "benwolsieffer@gmail.com"; email = "benwolsieffer@gmail.com";
github = "lopsided98"; github = "lopsided98";
@ -2738,6 +2793,11 @@
github = "lucas8"; github = "lucas8";
name = "Luc Chabassier"; name = "Luc Chabassier";
}; };
lucus16 = {
email = "lars.jellema@gmail.com";
github = "Lucus16";
name = "Lars Jellema";
};
ludo = { ludo = {
email = "ludo@gnu.org"; email = "ludo@gnu.org";
github = "civodul"; github = "civodul";
@ -2845,6 +2905,11 @@
email = "markus@wotringer.de"; email = "markus@wotringer.de";
name = "Markus Wotringer"; name = "Markus Wotringer";
}; };
marius851000 = {
email = "mariusdavid@laposte.net";
name = "Marius David";
github = "marius851000";
};
marsam = { marsam = {
email = "marsam@users.noreply.github.com"; email = "marsam@users.noreply.github.com";
github = "marsam"; github = "marsam";
@ -2870,6 +2935,11 @@
github = "mathnerd314"; github = "mathnerd314";
name = "Mathnerd314"; name = "Mathnerd314";
}; };
matklad = {
email = "aleksey.kladov@gmail.com";
github = "matklad";
name = "matklad";
};
matthewbauer = { matthewbauer = {
email = "mjbauer95@gmail.com"; email = "mjbauer95@gmail.com";
github = "matthewbauer"; github = "matthewbauer";
@ -2955,6 +3025,11 @@
github = "meisternu"; github = "meisternu";
name = "Matt Miemiec"; name = "Matt Miemiec";
}; };
melchips = {
email = "truphemus.francois@gmail.com";
github = "melchips";
name = "Francois Truphemus";
};
melsigl = { melsigl = {
email = "melanie.bianca.sigl@gmail.com"; email = "melanie.bianca.sigl@gmail.com";
github = "melsigl"; github = "melsigl";
@ -2969,6 +3044,11 @@
email = "softs@metabarcoding.org"; email = "softs@metabarcoding.org";
name = "Celine Mercier"; name = "Celine Mercier";
}; };
mfossen = {
email = "msfossen@gmail.com";
github = "mfossen";
name = "Mitchell Fossen";
};
mgdelacroix = { mgdelacroix = {
email = "mgdelacroix@gmail.com"; email = "mgdelacroix@gmail.com";
github = "mgdelacroix"; github = "mgdelacroix";
@ -3237,6 +3317,11 @@
github = "mvnetbiz"; github = "mvnetbiz";
name = "Matt Votava"; name = "Matt Votava";
}; };
mwilsoninsight = {
email = "max.wilson@insight.com";
github = "mwilsoninsight";
name = "Max Wilson";
};
myrl = { myrl = {
email = "myrl.0xf@gmail.com"; email = "myrl.0xf@gmail.com";
github = "myrl"; github = "myrl";
@ -3376,6 +3461,11 @@
github = "nocoolnametom"; github = "nocoolnametom";
name = "Tom Doggett"; name = "Tom Doggett";
}; };
nomeata = {
email = "mail@joachim-breitner.de";
github = "nomeata";
name = "Joachim Breitner";
};
noneucat = { noneucat = {
email = "andy@lolc.at"; email = "andy@lolc.at";
github = "noneucat"; github = "noneucat";
@ -3469,6 +3559,11 @@
github = "olynch"; github = "olynch";
name = "Owen Lynch"; name = "Owen Lynch";
}; };
omnipotententity = {
email = "omnipotententity@gmail.com";
github = "omnipotententity";
name = "Michael Reilly";
};
OPNA2608 = { OPNA2608 = {
email = "christoph.neidahl@gmail.com"; email = "christoph.neidahl@gmail.com";
github = "OPNA2608"; github = "OPNA2608";
@ -3722,6 +3817,11 @@
github = "polyrod"; github = "polyrod";
name = "Maurizio Di Pietro"; name = "Maurizio Di Pietro";
}; };
pombeirp = {
email = "nix@endgr.33mail.com";
github = "PombeirP";
name = "Pedro Pombeiro";
};
pradeepchhetri = { pradeepchhetri = {
email = "pradeep.chhetri89@gmail.com"; email = "pradeep.chhetri89@gmail.com";
github = "pradeepchhetri"; github = "pradeepchhetri";
@ -3822,6 +3922,16 @@
fingerprint = "7573 56D7 79BB B888 773E 415E 736C CDF9 EF51 BD97"; fingerprint = "7573 56D7 79BB B888 773E 415E 736C CDF9 EF51 BD97";
}]; }];
}; };
rafaelgg = {
email = "rafael.garcia.gallego@gmail.com";
github = "rafaelgg";
name = "Rafael García";
};
raquelgb = {
email = "raquel.garcia.bautista@gmail.com";
github = "raquelgb";
name = "Raquel García";
};
ragge = { ragge = {
email = "r.dahlen@gmail.com"; email = "r.dahlen@gmail.com";
github = "ragnard"; github = "ragnard";
@ -3961,6 +4071,11 @@
github = "rittelle"; github = "rittelle";
name = "Lennart Rittel"; name = "Lennart Rittel";
}; };
rixed = {
email = "rixed-github@happyleptic.org";
github = "rixed";
name = "Cedric Cellier";
};
rkoe = { rkoe = {
email = "rk@simple-is-better.org"; email = "rk@simple-is-better.org";
github = "rkoe"; github = "rkoe";
@ -4041,6 +4156,11 @@
github = "rprospero"; github = "rprospero";
name = "Adam Washington"; name = "Adam Washington";
}; };
rps = {
email = "robbpseaton@gmail.com";
github = "robertseaton";
name = "Robert P. Seaton";
};
rszibele = { rszibele = {
email = "richard@szibele.com"; email = "richard@szibele.com";
github = "rszibele"; github = "rszibele";
@ -4469,6 +4589,11 @@
github = "stesie"; github = "stesie";
name = "Stefan Siegl"; name = "Stefan Siegl";
}; };
steve-chavez = {
email = "stevechavezast@gmail.com";
github = "steve-chavez";
name = "Steve Chávez";
};
steveej = { steveej = {
email = "mail@stefanjunker.de"; email = "mail@stefanjunker.de";
github = "steveej"; github = "steveej";
@ -4494,6 +4619,11 @@
github = "stumoss"; github = "stumoss";
name = "Stuart Moss"; name = "Stuart Moss";
}; };
suhr = {
email = "suhr@i2pmail.org";
github = "suhr";
name = "Сухарик";
};
SuprDewd = { SuprDewd = {
email = "suprdewd@gmail.com"; email = "suprdewd@gmail.com";
github = "SuprDewd"; github = "SuprDewd";
@ -4653,6 +4783,11 @@
github = "teozkr"; github = "teozkr";
name = "Teo Klestrup Röijezon"; name = "Teo Klestrup Röijezon";
}; };
terlar = {
email = "terlar@gmail.com";
github = "terlar";
name = "Terje Larsen";
};
teto = { teto = {
email = "mcoudron@hotmail.com"; email = "mcoudron@hotmail.com";
github = "teto"; github = "teto";
@ -5142,6 +5277,11 @@
github = "xnwdd"; github = "xnwdd";
name = "Guillermo NWDD"; name = "Guillermo NWDD";
}; };
xrelkd = {
email = "46590321+xrelkd@users.noreply.github.com";
github = "xrelkd";
name = "xrelkd";
};
xurei = { xurei = {
email = "olivier.bourdoux@gmail.com"; email = "olivier.bourdoux@gmail.com";
github = "xurei"; github = "xurei";
@ -5316,4 +5456,19 @@
github = "minijackson"; github = "minijackson";
name = "Rémi Nicole"; name = "Rémi Nicole";
}; };
shazow = {
email = "andrey.petrov@shazow.net";
github = "shazow";
name = "Andrey Petrov";
};
freezeboy = {
email = "freezeboy@users.noreply.github.com";
github = "freezeboy";
name = "freezeboy";
};
tesq0 = {
email = "mikolaj.galkowski@gmail.com";
github = "tesq0";
name = "Mikolaj Galkowski";
};
} }

View File

@ -14,12 +14,13 @@ fi
tmp=$(mktemp -d) tmp=$(mktemp -d)
pushd $tmp >/dev/null pushd $tmp >/dev/null
wget -nH -r -c --no-parent "${WGET_ARGS[@]}" >/dev/null wget -nH -r -c --no-parent "${WGET_ARGS[@]}" -A '*.tar.xz.sha256' -A '*.mirrorlist' >/dev/null
find -type f -name '*.mirrorlist' -delete
csv=$(mktemp) csv=$(mktemp)
find . -type f | while read src; do find . -type f | while read src; do
# Sanitize file name # Sanitize file name
filename=$(basename "$src" | tr '@' '_') filename=$(gawk '{ print $2 }' "$src" | tr '@' '_')
nameVersion="${filename%.tar.*}" nameVersion="${filename%.tar.*}"
name=$(echo "$nameVersion" | sed -e 's,-[[:digit:]].*,,' | sed -e 's,-opensource-src$,,' | sed -e 's,-everywhere-src$,,') name=$(echo "$nameVersion" | sed -e 's,-[[:digit:]].*,,' | sed -e 's,-opensource-src$,,' | sed -e 's,-everywhere-src$,,')
version=$(echo "$nameVersion" | sed -e 's,^\([[:alpha:]][[:alnum:]]*-\)\+,,') version=$(echo "$nameVersion" | sed -e 's,^\([[:alpha:]][[:alnum:]]*-\)\+,,')
@ -38,8 +39,8 @@ gawk -F , "{ print \$1 }" $csv | sort | uniq | while read name; do
latestVersion=$(echo "$versions" | sort -rV | head -n 1) latestVersion=$(echo "$versions" | sort -rV | head -n 1)
src=$(gawk -F , "/^$name,$latestVersion,/ { print \$3 }" $csv) src=$(gawk -F , "/^$name,$latestVersion,/ { print \$3 }" $csv)
filename=$(gawk -F , "/^$name,$latestVersion,/ { print \$4 }" $csv) filename=$(gawk -F , "/^$name,$latestVersion,/ { print \$4 }" $csv)
url="${src:2}" url="$(dirname "${src:2}")/$filename"
sha256=$(nix-hash --type sha256 --base32 --flat "$src") sha256=$(gawk '{ print $1 }' "$src")
cat >>"$SRCS" <<EOF cat >>"$SRCS" <<EOF
$name = { $name = {
version = "$latestVersion"; version = "$latestVersion";

View File

@ -6,13 +6,14 @@ debug: generated manual-combined.xml
manual-combined.xml: generated *.xml **/*.xml manual-combined.xml: generated *.xml **/*.xml
rm -f ./manual-combined.xml rm -f ./manual-combined.xml
nix-shell --packages xmloscopy \ nix-shell --pure -Q --packages xmloscopy \
--run "xmloscopy --docbook5 ./manual.xml ./manual-combined.xml" --run "xmloscopy --docbook5 ./manual.xml ./manual-combined.xml"
.PHONY: format .PHONY: format
format: format:
find ../../ -iname '*.xml' -type f -print0 | xargs -0 -I{} -n1 \ nix-shell --pure -Q --packages xmlformat \
xmlformat --config-file "../xmlformat.conf" -i {} --run "find ../../ -iname '*.xml' -type f -print0 | xargs -0 -I{} -n1 \
xmlformat --config-file '../xmlformat.conf' -i {}"
.PHONY: fix-misc-xml .PHONY: fix-misc-xml
fix-misc-xml: fix-misc-xml:

View File

@ -200,8 +200,9 @@ swapDevices = [ { device = "/dev/disk/by-label/swap"; } ];
<xref linkend="opt-services.postgresql.package"/> = pkgs.postgresql_10; <xref linkend="opt-services.postgresql.package"/> = pkgs.postgresql_10;
</programlisting> </programlisting>
The latter option definition changes the default PostgreSQL package used The latter option definition changes the default PostgreSQL package used
by NixOSs PostgreSQL service to 10.x. For more information on packages, by NixOSs PostgreSQL service to 10.x. For more information on
including how to add new ones, see <xref linkend="sec-custom-packages"/>. packages, including how to add new ones, see
<xref linkend="sec-custom-packages"/>.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>

View File

@ -21,6 +21,7 @@
<xi:include href="xfce.xml" /> <xi:include href="xfce.xml" />
<xi:include href="networking.xml" /> <xi:include href="networking.xml" />
<xi:include href="linux-kernel.xml" /> <xi:include href="linux-kernel.xml" />
<xi:include href="matrix.xml" />
<xi:include href="../generated/modules.xml" xpointer="xpointer(//section[@id='modules']/*)" /> <xi:include href="../generated/modules.xml" xpointer="xpointer(//section[@id='modules']/*)" />
<xi:include href="profiles.xml" /> <xi:include href="profiles.xml" />
<xi:include href="kubernetes.xml" /> <xi:include href="kubernetes.xml" />

View File

@ -27,8 +27,13 @@ nixos.firefox firefox-23.0 Mozilla Firefox - the browser, reloaded
<replaceable>...</replaceable> <replaceable>...</replaceable>
</screen> </screen>
The first column in the output is the <emphasis>attribute name</emphasis>, The first column in the output is the <emphasis>attribute name</emphasis>,
such as <literal>nixos.thunderbird</literal>. (The <literal>nixos</literal> such as <literal>nixos.thunderbird</literal>.
prefix allows distinguishing between different channels that you might have.) </para>
<para>
Note: the <literal>nixos</literal> prefix tells us that we want to get the
package from the <literal>nixos</literal> channel and works only in CLI tools.
In declarative configuration use <literal>pkgs</literal> prefix (variable).
</para> </para>
<para> <para>

View File

@ -4,15 +4,13 @@
version="5.0" version="5.0"
xml:id="sec-kubernetes"> xml:id="sec-kubernetes">
<title>Kubernetes</title> <title>Kubernetes</title>
<para> <para>
The NixOS Kubernetes module is a collective term for a handful of The NixOS Kubernetes module is a collective term for a handful of individual
individual submodules implementing the Kubernetes cluster components. submodules implementing the Kubernetes cluster components.
</para> </para>
<para> <para>
There are generally two ways of enabling Kubernetes on NixOS. There are generally two ways of enabling Kubernetes on NixOS. One way is to
One way is to enable and configure cluster components appropriately by hand: enable and configure cluster components appropriately by hand:
<programlisting> <programlisting>
services.kubernetes = { services.kubernetes = {
apiserver.enable = true; apiserver.enable = true;
@ -33,95 +31,82 @@ services.kubernetes = {
<programlisting> <programlisting>
<xref linkend="opt-services.kubernetes.roles"/> = [ "node" ]; <xref linkend="opt-services.kubernetes.roles"/> = [ "node" ];
</programlisting> </programlisting>
Assigning both the master and node roles is usable if you want a single Assigning both the master and node roles is usable if you want a single node
node Kubernetes cluster for dev or testing purposes: Kubernetes cluster for dev or testing purposes:
<programlisting> <programlisting>
<xref linkend="opt-services.kubernetes.roles"/> = [ "master" "node" ]; <xref linkend="opt-services.kubernetes.roles"/> = [ "master" "node" ];
</programlisting> </programlisting>
Note: Assigning either role will also default both Note: Assigning either role will also default both
<xref linkend="opt-services.kubernetes.flannel.enable"/> and <xref linkend="opt-services.kubernetes.flannel.enable"/> and
<xref linkend="opt-services.kubernetes.easyCerts"/> to true. <xref linkend="opt-services.kubernetes.easyCerts"/> to true. This sets up
This sets up flannel as CNI and activates automatic PKI bootstrapping. flannel as CNI and activates automatic PKI bootstrapping.
</para> </para>
<para> <para>
As of kubernetes 1.10.X it has been deprecated to open As of kubernetes 1.10.X it has been deprecated to open non-tls-enabled ports
non-tls-enabled ports on kubernetes components. Thus, from NixOS 19.03 all on kubernetes components. Thus, from NixOS 19.03 all plain HTTP ports have
plain HTTP ports have been disabled by default. been disabled by default. While opening insecure ports is still possible, it
While opening insecure ports is still possible, it is recommended not to is recommended not to bind these to other interfaces than loopback. To
bind these to other interfaces than loopback. re-enable the insecure port on the apiserver, see options:
<xref linkend="opt-services.kubernetes.apiserver.insecurePort"/> and
To re-enable the insecure port on the apiserver, see options: <xref linkend="opt-services.kubernetes.apiserver.insecureBindAddress"/>
<xref linkend="opt-services.kubernetes.apiserver.insecurePort"/>
and
<xref linkend="opt-services.kubernetes.apiserver.insecureBindAddress"/>
</para> </para>
<note> <note>
<para> <para>
As of NixOS 19.03, it is mandatory to configure: As of NixOS 19.03, it is mandatory to configure:
<xref linkend="opt-services.kubernetes.masterAddress"/>. <xref linkend="opt-services.kubernetes.masterAddress"/>. The masterAddress
The masterAddress must be resolveable and routeable by all cluster nodes. must be resolveable and routeable by all cluster nodes. In single node
In single node clusters, this can be set to <literal>localhost</literal>. clusters, this can be set to <literal>localhost</literal>.
</para> </para>
</note> </note>
<para> <para>
Role-based access control (RBAC) authorization mode is enabled by default. Role-based access control (RBAC) authorization mode is enabled by default.
This means that anonymous requests to the apiserver secure port will This means that anonymous requests to the apiserver secure port will
expectedly cause a permission denied error. All cluster components must expectedly cause a permission denied error. All cluster components must
therefore be configured with x509 certificates for two-way tls communication. therefore be configured with x509 certificates for two-way tls communication.
The x509 certificate subject section determines the roles and permissions The x509 certificate subject section determines the roles and permissions
granted by the apiserver to perform clusterwide or namespaced operations. granted by the apiserver to perform clusterwide or namespaced operations. See
See also: also:
<link <link
xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/rbac/"> xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/rbac/">
Using RBAC Authorization</link>. Using RBAC Authorization</link>.
</para> </para>
<para>
The NixOS kubernetes module provides an option for automatic certificate
bootstrapping and configuration,
<xref linkend="opt-services.kubernetes.easyCerts"/>.
The PKI bootstrapping process involves setting up a certificate authority
(CA) daemon (cfssl) on the kubernetes master node. cfssl generates a CA-cert
for the cluster, and uses the CA-cert for signing subordinate certs issued to
each of the cluster components. Subsequently, the certmgr daemon monitors
active certificates and renews them when needed. For single node Kubernetes
clusters, setting <xref linkend="opt-services.kubernetes.easyCerts"/> = true
is sufficient and no further action is required. For joining extra node
machines to an existing cluster on the other hand, establishing initial trust
is mandatory.
</para>
<para> <para>
To add new nodes to the cluster: The NixOS kubernetes module provides an option for automatic certificate
On any (non-master) cluster node where bootstrapping and configuration,
<xref linkend="opt-services.kubernetes.easyCerts"/> is enabled, the helper <xref linkend="opt-services.kubernetes.easyCerts"/>. The PKI bootstrapping
script <literal>nixos-kubernetes-node-join</literal> is available on PATH. process involves setting up a certificate authority (CA) daemon (cfssl) on
Given a token on stdin, it will copy the token to the kubernetes the kubernetes master node. cfssl generates a CA-cert for the cluster, and
secrets directory and restart the certmgr service. As requested uses the CA-cert for signing subordinate certs issued to each of the cluster
certificates are issued, the script will restart kubernetes cluster components. Subsequently, the certmgr daemon monitors active certificates and
components as needed for them to pick up new keypairs. renews them when needed. For single node Kubernetes clusters, setting
<xref linkend="opt-services.kubernetes.easyCerts"/> = true is sufficient and
no further action is required. For joining extra node machines to an existing
cluster on the other hand, establishing initial trust is mandatory.
</para>
<para>
To add new nodes to the cluster: On any (non-master) cluster node where
<xref linkend="opt-services.kubernetes.easyCerts"/> is enabled, the helper
script <literal>nixos-kubernetes-node-join</literal> is available on PATH.
Given a token on stdin, it will copy the token to the kubernetes secrets
directory and restart the certmgr service. As requested certificates are
issued, the script will restart kubernetes cluster components as needed for
them to pick up new keypairs.
</para> </para>
<note> <note>
<para> <para>
Multi-master (HA) clusters are not supported by the easyCerts module. Multi-master (HA) clusters are not supported by the easyCerts module.
</para> </para>
</note> </note>
<para> <para>
In order to interact with an RBAC-enabled cluster as an administrator, one In order to interact with an RBAC-enabled cluster as an administrator, one
needs to have cluster-admin privileges. By default, when easyCerts is needs to have cluster-admin privileges. By default, when easyCerts is
enabled, a cluster-admin kubeconfig file is generated and linked into enabled, a cluster-admin kubeconfig file is generated and linked into
<literal>/etc/kubernetes/cluster-admin.kubeconfig</literal> as determined by <literal>/etc/kubernetes/cluster-admin.kubeconfig</literal> as determined by
<xref linkend="opt-services.kubernetes.pki.etcClusterAdminKubeconfig"/>. <xref linkend="opt-services.kubernetes.pki.etcClusterAdminKubeconfig"/>.
<literal>export KUBECONFIG=/etc/kubernetes/cluster-admin.kubeconfig</literal> <literal>export KUBECONFIG=/etc/kubernetes/cluster-admin.kubeconfig</literal>
will make kubectl use this kubeconfig to access and authenticate the cluster. will make kubectl use this kubeconfig to access and authenticate the cluster.
The cluster-admin kubeconfig references an auto-generated keypair owned by The cluster-admin kubeconfig references an auto-generated keypair owned by
root. Thus, only root on the kubernetes master may obtain cluster-admin root. Thus, only root on the kubernetes master may obtain cluster-admin
rights by means of this file. rights by means of this file.
</para> </para>
</chapter> </chapter>

View File

@ -0,0 +1,203 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="module-services-matrix">
<title>Matrix</title>
<para>
<link xlink:href="https://matrix.org/">Matrix</link> is an open standard for
interoperable, decentralised, real-time communication over IP. It can be used
to power Instant Messaging, VoIP/WebRTC signalling, Internet of Things
communication - or anywhere you need a standard HTTP API for publishing and
subscribing to data whilst tracking the conversation history.
</para>
<para>
This chapter will show you how to set up your own, self-hosted Matrix
homeserver using the Synapse reference homeserver, and how to serve your own
copy of the Riot web client. See the
<link xlink:href="https://matrix.org/docs/projects/try-matrix-now.html">Try
Matrix Now!</link> overview page for links to Riot Apps for Android and iOS,
desktop clients, as well as bridges to other networks and other projects
around Matrix.
</para>
<section xml:id="module-services-matrix-synapse">
<title>Synapse Homeserver</title>
<para>
<link xlink:href="https://github.com/matrix-org/synapse">Synapse</link> is
the reference homeserver implementation of Matrix from the core development
team at matrix.org. The following configuration example will set up a
synapse server for the <literal>example.org</literal> domain, served from
the host <literal>myhostname.example.org</literal>. For more information,
please refer to the
<link xlink:href="https://github.com/matrix-org/synapse#synapse-installation">
installation instructions of Synapse </link>.
<programlisting>
let
fqdn =
let
join = hostName: domain: hostName + optionalString (domain != null) ".${domain}";
in join config.networking.hostName config.networking.domain;
in {
networking = {
hostName = "myhostname";
domain = "example.org";
};
networking.firewall.allowedTCPPorts = [ 80 443 ];
services.nginx = {
enable = true;
# only recommendedProxySettings and recommendedGzipSettings are strictly required,
# but the rest make sense as well
recommendedTlsSettings = true;
recommendedOptimisation = true;
recommendedGzipSettings = true;
recommendedProxySettings = true;
virtualHosts = {
# This host section can be placed on a different host than the rest,
# i.e. to delegate from the host being accessible as ${config.networking.domain}
# to another host actually running the Matrix homeserver.
"${config.networking.domain}" = {
locations."= /.well-known/matrix/server".extraConfig =
let
# use 443 instead of the default 8448 port to unite
# the client-server and server-server port for simplicity
server = { "m.server" = "${fqdn}:443"; };
in ''
add_header Content-Type application/json;
return 200 '${builtins.toJSON server}';
'';
locations."= /.well-known/matrix/client".extraConfig =
let
client = {
"m.homeserver" = { "base_url" = "https://${fqdn}"; };
"m.identity_server" = { "base_url" = "https://vector.im"; };
};
# ACAO required to allow riot-web on any URL to request this json file
in ''
add_header Content-Type application/json;
add_header Access-Control-Allow-Origin *;
return 200 '${builtins.toJSON client}';
'';
};
# Reverse proxy for Matrix client-server and server-server communication
${fqdn} = {
enableACME = true;
forceSSL = true;
# Or do a redirect instead of the 404, or whatever is appropriate for you.
# But do not put a Matrix Web client here! See the Riot Web section below.
locations."/".extraConfig = ''
return 404;
'';
# forward all Matrix API calls to the synapse Matrix homeserver
locations."/_matrix" = {
proxyPass = "http://[::1]:8008";
};
};
};
};
services.matrix-synapse = {
enable = true;
server_name = config.networking.domain;
listeners = [
{
port = 8008;
bind_address = "::1";
type = "http";
tls = false;
x_forwarded = true;
resources = [
{ names = [ "client" "federation" ]; compress = false; }
];
}
];
};
};
</programlisting>
</para>
<para>
If the <code>A</code> and <code>AAAA</code> DNS records on
<literal>example.org</literal> do not point on the same host as the records
for <code>myhostname.example.org</code>, you can easily move the
<code>/.well-known</code> virtualHost section of the code to the host that
is serving <literal>example.org</literal>, while the rest stays on
<literal>myhostname.example.org</literal> with no other changes required.
This pattern also allows to seamlessly move the homeserver from
<literal>myhostname.example.org</literal> to
<literal>myotherhost.example.org</literal> by only changing the
<code>/.well-known</code> redirection target.
</para>
<para>
If you want to run a server with public registration by anybody, you can
then enable <option>services.matrix-synapse.enable_registration =
true;</option>. Otherwise, or you can generate a registration secret with
<command>pwgen -s 64 1</command> and set it with
<option>services.matrix-synapse.registration_shared_secret</option>. To
create a new user or admin, run the following after you have set the secret
and have rebuilt NixOS:
<programlisting>
$ nix run nixpkgs.matrix-synapse
$ register_new_matrix_user -k &lt;your-registration-shared-secret&gt; http://localhost:8008
New user localpart: &lt;your-username&gt;
Password:
Confirm password:
Make admin [no]:
Success!
</programlisting>
In the example, this would create a user with the Matrix Identifier
<literal>@your-username:example.org</literal>. Note that the registration
secret ends up in the nix store and therefore is world-readable by any user
on your machine, so it makes sense to only temporarily activate the
<option>registration_shared_secret</option> option until a better solution
for NixOS is in place.
</para>
</section>
<section xml:id="module-services-matrix-riot-web">
<title>Riot Web Client</title>
<para>
<link xlink:href="https://github.com/vector-im/riot-web/">Riot Web</link> is
the reference web client for Matrix and developed by the core team at
matrix.org. The following snippet can be optionally added to the code before
to complete the synapse installation with a web client served at
<code>https://riot.myhostname.example.org</code> and
<code>https://riot.example.org</code>. Alternatively, you can use the hosted
copy at <link xlink:href="https://riot.im/app">https://riot.im/app</link>,
or use other web clients or native client applications. Due to the
<literal>/.well-known</literal> urls set up done above, many clients should
fill in the required connection details automatically when you enter your
Matrix Identifier. See
<link xlink:href="https://matrix.org/docs/projects/try-matrix-now.html">Try
Matrix Now!</link> for a list of existing clients and their supported
featureset.
<programlisting>
services.nginx.virtualHosts."riot.${fqdn}" = {
enableACME = true;
forceSSL = true;
serverAliases = [
"riot.${config.networking.domain}"
];
root = pkgs.riot-web;
};
</programlisting>
</para>
<para>
Note that the Riot developers do not recommend running Riot and your Matrix
homeserver on the same fully-qualified domain name for security reasons. In
the example, this means that you should not reuse the
<literal>myhostname.example.org</literal> virtualHost to also serve Riot,
but instead serve it on a different subdomain, like
<literal>riot.example.org</literal> in the example. See the
<link xlink:href="https://github.com/vector-im/riot-web#important-security-note">Riot
Important Security Notes</link> for more information on this subject.
</para>
</section>
</chapter>

View File

@ -112,9 +112,8 @@ true
$ nixos-option <xref linkend="opt-boot.kernelModules"/> $ nixos-option <xref linkend="opt-boot.kernelModules"/>
[ "tun" "ipv6" "loop" <replaceable>...</replaceable> ] [ "tun" "ipv6" "loop" <replaceable>...</replaceable> ]
</screen> </screen>
Interactive exploration of the configuration is possible using Interactive exploration of the configuration is possible using <command>nix
<command>nix repl</command>, a read-eval-print loop for Nix expressions. repl</command>, a read-eval-print loop for Nix expressions. A typical use:
A typical use:
<screen> <screen>
$ nix repl '&lt;nixpkgs/nixos>' $ nix repl '&lt;nixpkgs/nixos>'
@ -127,11 +126,10 @@ nix-repl> map (x: x.hostName) config.<xref linkend="opt-services.httpd.virtualHo
</para> </para>
<para> <para>
While abstracting your configuration, you may find it useful to generate While abstracting your configuration, you may find it useful to generate
modules using code, instead of writing files. The example modules using code, instead of writing files. The example below would have
below would have the same effect as importing a file which sets those the same effect as importing a file which sets those options.
options. <screen>
<screen>
{ config, pkgs, ... }: { config, pkgs, ... }:
let netConfig = { hostName }: { let netConfig = { hostName }: {
@ -143,5 +141,5 @@ nix-repl> map (x: x.hostName) config.<xref linkend="opt-services.httpd.virtualHo
{ imports = [ (netConfig "nixos.localdomain") ]; } { imports = [ (netConfig "nixos.localdomain") ]; }
</screen> </screen>
</para> </para>
</section> </section>

View File

@ -12,14 +12,14 @@
That is to say, expected usage is to add them to the imports list of your That is to say, expected usage is to add them to the imports list of your
<filename>/etc/configuration.nix</filename> as such: <filename>/etc/configuration.nix</filename> as such:
</para> </para>
<programlisting> <programlisting>
imports = [ imports = [
&lt;nixpkgs/nixos/modules/profiles/profile-name.nix&gt; &lt;nixpkgs/nixos/modules/profiles/profile-name.nix&gt;
]; ];
</programlisting> </programlisting>
<para> <para>
Even if some of these profiles seem only useful in the context of Even if some of these profiles seem only useful in the context of install
install media, many are actually intended to be used in real installs. media, many are actually intended to be used in real installs.
</para> </para>
<para> <para>
What follows is a brief explanation on the purpose and use-case for each What follows is a brief explanation on the purpose and use-case for each

View File

@ -1,15 +1,16 @@
<section xmlns="http://docbook.org/ns/docbook" <section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0" version="5.0"
xml:id="sec-profile-all-hardware"> xml:id="sec-profile-all-hardware">
<title>All Hardware</title> <title>All Hardware</title>
<para> <para>
Enables all hardware supported by NixOS: i.e., all firmware is Enables all hardware supported by NixOS: i.e., all firmware is included, and
included, and all devices from which one may boot are enabled in the initrd. all devices from which one may boot are enabled in the initrd. Its primary
Its primary use is in the NixOS installation CDs. use is in the NixOS installation CDs.
</para> </para>
<para> <para>
The enabled kernel modules include support for SATA and PATA, SCSI The enabled kernel modules include support for SATA and PATA, SCSI
(partially), USB, Firewire (untested), Virtio (QEMU, KVM, etc.), VMware, and (partially), USB, Firewire (untested), Virtio (QEMU, KVM, etc.), VMware, and

View File

@ -1,15 +1,15 @@
<section xmlns="http://docbook.org/ns/docbook" <section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0" version="5.0"
xml:id="sec-profile-base"> xml:id="sec-profile-base">
<title>Base</title> <title>Base</title>
<para> <para>
Defines the software packages included in the "minimal" Defines the software packages included in the "minimal" installation CD. It
installation CD. It installs several utilities useful in a simple recovery or installs several utilities useful in a simple recovery or install media, such
install media, such as a text-mode web browser, and tools for manipulating as a text-mode web browser, and tools for manipulating block devices,
block devices, networking, hardware diagnostics, and filesystems (with their networking, hardware diagnostics, and filesystems (with their respective
respective kernel modules). kernel modules).
</para> </para>
</section> </section>

View File

@ -1,14 +1,14 @@
<section xmlns="http://docbook.org/ns/docbook" <section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0" version="5.0"
xml:id="sec-profile-clone-config"> xml:id="sec-profile-clone-config">
<title>Clone Config</title> <title>Clone Config</title>
<para> <para>
This profile is used in installer images. This profile is used in installer images. It provides an editable
It provides an editable configuration.nix that imports all the modules that configuration.nix that imports all the modules that were also used when
were also used when creating the image in the first place. creating the image in the first place. As a result it allows users to edit
As a result it allows users to edit and rebuild the live-system. and rebuild the live-system.
</para> </para>
</section> </section>

View File

@ -1,13 +1,15 @@
<section xmlns="http://docbook.org/ns/docbook" <section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0" version="5.0"
xml:id="sec-profile-demo"> xml:id="sec-profile-demo">
<title>Demo</title> <title>Demo</title>
<para> <para>
This profile just enables a <systemitem class="username">demo</systemitem> user, with password <literal>demo</literal>, uid <literal>1000</literal>, <systemitem class="groupname">wheel</systemitem> This profile just enables a <systemitem class="username">demo</systemitem>
group and <link linkend="opt-services.xserver.displayManager.sddm.autoLogin"> user, with password <literal>demo</literal>, uid <literal>1000</literal>,
autologin in the SDDM display manager</link>. <systemitem class="groupname">wheel</systemitem> group and
<link linkend="opt-services.xserver.displayManager.sddm.autoLogin"> autologin
in the SDDM display manager</link>.
</para> </para>
</section> </section>

View File

@ -1,15 +1,16 @@
<section xmlns="http://docbook.org/ns/docbook" <section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0" version="5.0"
xml:id="sec-profile-docker-container"> xml:id="sec-profile-docker-container">
<title>Docker Container</title> <title>Docker Container</title>
<para> <para>
This is the profile from which the Docker images are generated. It prepares a This is the profile from which the Docker images are generated. It prepares a
working system by importing the <link linkend="sec-profile-minimal">Minimal</link> and working system by importing the
<link linkend="sec-profile-clone-config">Clone Config</link> profiles, and setting appropriate <link linkend="sec-profile-minimal">Minimal</link> and
configuration options that are useful inside a container context, like <link linkend="sec-profile-clone-config">Clone Config</link> profiles, and
<xref linkend="opt-boot.isContainer"/>. setting appropriate configuration options that are useful inside a container
context, like <xref linkend="opt-boot.isContainer"/>.
</para> </para>
</section> </section>

View File

@ -1,20 +1,21 @@
<section xmlns="http://docbook.org/ns/docbook" <section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0" version="5.0"
xml:id="sec-profile-graphical"> xml:id="sec-profile-graphical">
<title>Graphical</title> <title>Graphical</title>
<para> <para>
Defines a NixOS configuration with the Plasma 5 desktop. It's used by the Defines a NixOS configuration with the Plasma 5 desktop. It's used by the
graphical installation CD. graphical installation CD.
</para> </para>
<para> <para>
It sets <xref linkend="opt-services.xserver.enable"/>, It sets <xref linkend="opt-services.xserver.enable"/>,
<xref linkend="opt-services.xserver.displayManager.sddm.enable"/>, <xref linkend="opt-services.xserver.displayManager.sddm.enable"/>,
<xref linkend="opt-services.xserver.desktopManager.plasma5.enable"/> ( <xref linkend="opt-services.xserver.desktopManager.plasma5.enable"/> (
<link linkend="opt-services.xserver.desktopManager.plasma5.enableQt4Support"> <link linkend="opt-services.xserver.desktopManager.plasma5.enableQt4Support">
without Qt4 Support</link>), and without Qt4 Support</link>), and
<xref linkend="opt-services.xserver.libinput.enable"/> to true. It also <xref linkend="opt-services.xserver.libinput.enable"/> to true. It also
includes glxinfo and firefox in the system packages list. includes glxinfo and firefox in the system packages list.
</para> </para>

View File

@ -1,22 +1,24 @@
<section xmlns="http://docbook.org/ns/docbook" <section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0" version="5.0"
xml:id="sec-profile-hardened"> xml:id="sec-profile-hardened">
<title>Hardened</title> <title>Hardened</title>
<para> <para>
A profile with most (vanilla) hardening options enabled by default, A profile with most (vanilla) hardening options enabled by default,
potentially at the cost of features and performance. potentially at the cost of features and performance.
</para> </para>
<para> <para>
This includes a hardened kernel, and limiting the system information This includes a hardened kernel, and limiting the system information
available to processes through the <filename>/sys</filename> and available to processes through the <filename>/sys</filename> and
<filename>/proc</filename> filesystems. It also disables the User Namespaces <filename>/proc</filename> filesystems. It also disables the User Namespaces
feature of the kernel, which stops Nix from being able to build anything feature of the kernel, which stops Nix from being able to build anything
(this particular setting can be overriden via (this particular setting can be overriden via
<xref linkend="opt-security.allowUserNamespaces"/>). See the <literal <xref linkend="opt-security.allowUserNamespaces"/>). See the
<literal
xlink:href="https://github.com/nixos/nixpkgs/tree/master/nixos/modules/profiles/hardened.nix"> xlink:href="https://github.com/nixos/nixpkgs/tree/master/nixos/modules/profiles/hardened.nix">
profile source</literal> for further detail on which settings are altered. profile source</literal> for further detail on which settings are altered.
</para> </para>
</section> </section>

View File

@ -1,18 +1,19 @@
<section xmlns="http://docbook.org/ns/docbook" <section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0" version="5.0"
xml:id="sec-profile-headless"> xml:id="sec-profile-headless">
<title>Headless</title> <title>Headless</title>
<para> <para>
Common configuration for headless machines (e.g., Amazon EC2 instances). Common configuration for headless machines (e.g., Amazon EC2 instances).
</para> </para>
<para> <para>
Disables <link linkend="opt-sound.enable">sound</link>, Disables <link linkend="opt-sound.enable">sound</link>,
<link linkend="opt-boot.vesa">vesa</link>, serial consoles, <link linkend="opt-boot.vesa">vesa</link>, serial consoles,
<link linkend="opt-systemd.enableEmergencyMode">emergency mode</link>, <link linkend="opt-systemd.enableEmergencyMode">emergency mode</link>,
<link linkend="opt-boot.loader.grub.splashImage">grub splash images</link> and <link linkend="opt-boot.loader.grub.splashImage">grub splash images</link>
configures the kernel to reboot automatically on panic. and configures the kernel to reboot automatically on panic.
</para> </para>
</section> </section>

View File

@ -1,31 +1,34 @@
<section xmlns="http://docbook.org/ns/docbook" <section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0" version="5.0"
xml:id="sec-profile-installation-device"> xml:id="sec-profile-installation-device">
<title>Installation Device</title> <title>Installation Device</title>
<para> <para>
Provides a basic configuration for installation devices like CDs. This means Provides a basic configuration for installation devices like CDs. This means
enabling hardware scans, using the <link linkend="sec-profile-clone-config"> enabling hardware scans, using the <link linkend="sec-profile-clone-config">
Clone Config profile</link> to guarantee Clone Config profile</link> to guarantee
<filename>/etc/nixos/configuration.nix</filename> exists (for <filename>/etc/nixos/configuration.nix</filename> exists (for
<command>nixos-rebuild</command> to work), a copy of the Nixpkgs channel <command>nixos-rebuild</command> to work), a copy of the Nixpkgs channel
snapshot used to create the install media. snapshot used to create the install media.
</para> </para>
<para> <para>
Additionally, documentation for <link linkend="opt-documentation.enable"> Additionally, documentation for <link linkend="opt-documentation.enable">
Nixpkgs</link> and <link linkend="opt-documentation.nixos.enable">NixOS Nixpkgs</link> and <link linkend="opt-documentation.nixos.enable">NixOS
</link> are forcefully enabled (to override the </link> are forcefully enabled (to override the
<link linkend="sec-profile-minimal">Minimal profile</link> preference); the <link linkend="sec-profile-minimal">Minimal profile</link> preference); the
NixOS manual is shown automatically on TTY 8, sudo and udisks are disabled. NixOS manual is shown automatically on TTY 8, sudo and udisks are disabled.
Autologin is enabled as root. Autologin is enabled as root.
</para> </para>
<para> <para>
A message is shown to the user to start a display manager if needed, A message is shown to the user to start a display manager if needed, ssh with
ssh with <xref linkend="opt-services.openssh.permitRootLogin"/> are enabled (but <xref linkend="opt-services.openssh.permitRootLogin"/> are enabled (but
doesn't autostart). WPA Supplicant is also enabled without autostart. doesn't autostart). WPA Supplicant is also enabled without autostart.
</para> </para>
<para> <para>
Finally, vim is installed, root is set to not have a password, the kernel is Finally, vim is installed, root is set to not have a password, the kernel is
made more silent for remote public IP installs, and several settings are made more silent for remote public IP installs, and several settings are

View File

@ -1,16 +1,16 @@
<section xmlns="http://docbook.org/ns/docbook" <section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0" version="5.0"
xml:id="sec-profile-minimal"> xml:id="sec-profile-minimal">
<title>Minimal</title> <title>Minimal</title>
<para> <para>
This profile defines a small NixOS configuration. It does not contain any This profile defines a small NixOS configuration. It does not contain any
graphical stuff. It's a very short file that enables graphical stuff. It's a very short file that enables
<link linkend="opt-environment.noXlibs">noXlibs</link>, sets <link linkend="opt-environment.noXlibs">noXlibs</link>, sets
<link linkend="opt-i18n.supportedLocales">i18n.supportedLocales</link> <link linkend="opt-i18n.supportedLocales">i18n.supportedLocales</link> to
to only support the user-selected locale, only support the user-selected locale,
<link linkend="opt-documentation.enable">disables packages' documentation <link linkend="opt-documentation.enable">disables packages' documentation
</link>, and <link linkend="opt-sound.enable">disables sound</link>. </link>, and <link linkend="opt-sound.enable">disables sound</link>.
</para> </para>

View File

@ -4,10 +4,12 @@
version="5.0" version="5.0"
xml:id="sec-profile-qemu-guest"> xml:id="sec-profile-qemu-guest">
<title>QEMU Guest</title> <title>QEMU Guest</title>
<para> <para>
This profile contains common configuration for virtual machines running under This profile contains common configuration for virtual machines running under
QEMU (using virtio). QEMU (using virtio).
</para> </para>
<para> <para>
It makes virtio modules available on the initrd, sets the system time from It makes virtio modules available on the initrd, sets the system time from
the hardware clock to work around a bug in qemu-kvm, and the hardware clock to work around a bug in qemu-kvm, and

View File

@ -23,16 +23,14 @@
psk = "abcdefgh"; psk = "abcdefgh";
}; };
"free.wifi" = {}; "free.wifi" = {};
} };
</programlisting> </programlisting>
Be aware that keys will be written to the nix store in plaintext! When no Be aware that keys will be written to the nix store in plaintext! When no
networks are set, it will default to using a configuration file at networks are set, it will default to using a configuration file at
<literal>/etc/wpa_supplicant.conf</literal>. You should edit this file <literal>/etc/wpa_supplicant.conf</literal>. You should edit this file
yourself to define wireless networks, WPA keys and so on (see yourself to define wireless networks, WPA keys and so on (see <citerefentry>
<citerefentry> <refentrytitle>wpa_supplicant.conf</refentrytitle>
<refentrytitle>wpa_supplicant.conf</refentrytitle> <manvolnum>5</manvolnum> </citerefentry>).
<manvolnum>5</manvolnum>
</citerefentry>).
</para> </para>
<para> <para>

View File

@ -35,8 +35,8 @@
</para> </para>
<para> <para>
NixOSs default <emphasis>display manager</emphasis> (the program that NixOSs default <emphasis>display manager</emphasis> (the program that
provides a graphical login prompt and manages the X server) is LightDM. You can provides a graphical login prompt and manages the X server) is LightDM. You
select an alternative one by picking one of the following lines: can select an alternative one by picking one of the following lines:
<programlisting> <programlisting>
<xref linkend="opt-services.xserver.displayManager.sddm.enable"/> = true; <xref linkend="opt-services.xserver.displayManager.sddm.enable"/> = true;
<xref linkend="opt-services.xserver.displayManager.slim.enable"/> = true; <xref linkend="opt-services.xserver.displayManager.slim.enable"/> = true;
@ -59,9 +59,16 @@
<screen> <screen>
# systemctl start display-manager.service # systemctl start display-manager.service
</screen> </screen>
</para>
<para>
On 64-bit systems, if you want OpenGL for 32-bit programs such as in Wine,
you should also set the following:
<programlisting>
<xref linkend="opt-hardware.opengl.driSupport32Bit"/> = true;
</programlisting>
</para> </para>
<simplesect xml:id="sec-x11-graphics-cards-nvidia"> <simplesect xml:id="sec-x11-graphics-cards-nvidia">
<title>NVIDIA Graphics Cards</title> <title>Proprietary NVIDIA drivers</title>
<para> <para>
NVIDIA provides a proprietary driver for its graphics cards that has better NVIDIA provides a proprietary driver for its graphics cards that has better
3D performance than the X.org drivers. It is not enabled by default because 3D performance than the X.org drivers. It is not enabled by default because
@ -71,6 +78,7 @@
</programlisting> </programlisting>
Or if you have an older card, you may have to use one of the legacy drivers: Or if you have an older card, you may have to use one of the legacy drivers:
<programlisting> <programlisting>
<xref linkend="opt-services.xserver.videoDrivers"/> = [ "nvidiaLegacy390" ];
<xref linkend="opt-services.xserver.videoDrivers"/> = [ "nvidiaLegacy340" ]; <xref linkend="opt-services.xserver.videoDrivers"/> = [ "nvidiaLegacy340" ];
<xref linkend="opt-services.xserver.videoDrivers"/> = [ "nvidiaLegacy304" ]; <xref linkend="opt-services.xserver.videoDrivers"/> = [ "nvidiaLegacy304" ];
<xref linkend="opt-services.xserver.videoDrivers"/> = [ "nvidiaLegacy173" ]; <xref linkend="opt-services.xserver.videoDrivers"/> = [ "nvidiaLegacy173" ];
@ -78,16 +86,9 @@
You may need to reboot after enabling this driver to prevent a clash with You may need to reboot after enabling this driver to prevent a clash with
other kernel modules. other kernel modules.
</para> </para>
<para>
On 64-bit systems, if you want full acceleration for 32-bit programs such as
Wine, you should also set the following:
<programlisting>
<xref linkend="opt-hardware.opengl.driSupport32Bit"/> = true;
</programlisting>
</para>
</simplesect> </simplesect>
<simplesect xml:id="sec-x11--graphics-cards-amd"> <simplesect xml:id="sec-x11--graphics-cards-amd">
<title>AMD Graphics Cards</title> <title>Proprietary AMD drivers</title>
<para> <para>
AMD provides a proprietary driver for its graphics cards that has better 3D AMD provides a proprietary driver for its graphics cards that has better 3D
performance than the X.org drivers. It is not enabled by default because performance than the X.org drivers. It is not enabled by default because
@ -99,11 +100,8 @@
other kernel modules. other kernel modules.
</para> </para>
<para> <para>
On 64-bit systems, if you want full acceleration for 32-bit programs such as Note: for recent AMD GPUs you most likely want to keep either the defaults
Wine, you should also set the following: or <literal>"amdgpu"</literal> (both free).
<programlisting>
<xref linkend="opt-hardware.opengl.driSupport32Bit"/> = true;
</programlisting>
</para> </para>
</simplesect> </simplesect>
<simplesect xml:id="sec-x11-touchpads"> <simplesect xml:id="sec-x11-touchpads">

View File

@ -175,6 +175,12 @@
<literal>git tag -s -a -m &quot;Release 15.09&quot; 15.09</literal> <literal>git tag -s -a -m &quot;Release 15.09&quot; 15.09</literal>
</para> </para>
</listitem> </listitem>
<listitem>
<para>
Update "Chapter 4. Upgrading NixOS" section of the manual to match
new stable release version.
</para>
</listitem>
<listitem> <listitem>
<para> <para>
Update http://nixos.org/nixos/download.html and Update http://nixos.org/nixos/download.html and

View File

@ -77,10 +77,10 @@
Shared folders can be given a name and a path in the host system in the Shared folders can be given a name and a path in the host system in the
VirtualBox settings (Machine / Settings / Shared Folders, then click on the VirtualBox settings (Machine / Settings / Shared Folders, then click on the
"Add" icon). Add the following to the "Add" icon). Add the following to the
<literal>/etc/nixos/configuration.nix</literal> to auto-mount them. If you <literal>/etc/nixos/configuration.nix</literal> to auto-mount them. If you do
do not add <literal>"nofail"</literal>, the system will no boot properly. not add <literal>"nofail"</literal>, the system will no boot properly. The
The same goes for disabling <literal>rngd</literal> which is normally used same goes for disabling <literal>rngd</literal> which is normally used to get
to get randomness but this does not work in virtual machines. randomness but this does not work in virtual machines.
</para> </para>
<programlisting> <programlisting>

View File

@ -54,7 +54,7 @@
<para> <para>
To manually configure the network on the graphical installer, first disable To manually configure the network on the graphical installer, first disable
network-manager with <command>systemctl stop network-manager</command>. network-manager with <command>systemctl stop NetworkManager</command>.
</para> </para>
<para> <para>

View File

@ -14,11 +14,11 @@
<para> <para>
<emphasis>Stable channels</emphasis>, such as <emphasis>Stable channels</emphasis>, such as
<literal <literal
xlink:href="https://nixos.org/channels/nixos-17.03">nixos-17.03</literal>. xlink:href="https://nixos.org/channels/nixos-19.03">nixos-19.03</literal>.
These only get conservative bug fixes and package upgrades. For instance, These only get conservative bug fixes and package upgrades. For instance,
a channel update may cause the Linux kernel on your system to be upgraded a channel update may cause the Linux kernel on your system to be upgraded
from 4.9.16 to 4.9.17 (a minor bug fix), but not from from 4.19.34 to 4.19.38 (a minor bug fix), but not from
4.9.<replaceable>x</replaceable> to 4.11.<replaceable>x</replaceable> (a 4.19.<replaceable>x</replaceable> to 4.20.<replaceable>x</replaceable> (a
major change that has the potential to break things). Stable channels are major change that has the potential to break things). Stable channels are
generally maintained until the next stable branch is created. generally maintained until the next stable branch is created.
</para> </para>
@ -38,7 +38,7 @@
<para> <para>
<emphasis>Small channels</emphasis>, such as <emphasis>Small channels</emphasis>, such as
<literal <literal
xlink:href="https://nixos.org/channels/nixos-17.03-small">nixos-17.03-small</literal> xlink:href="https://nixos.org/channels/nixos-19.03-small">nixos-19.03-small</literal>
or or
<literal <literal
xlink:href="https://nixos.org/channels/nixos-unstable-small">nixos-unstable-small</literal>. xlink:href="https://nixos.org/channels/nixos-unstable-small">nixos-unstable-small</literal>.
@ -63,8 +63,8 @@
<para> <para>
When you first install NixOS, youre automatically subscribed to the NixOS When you first install NixOS, youre automatically subscribed to the NixOS
channel that corresponds to your installation source. For instance, if you channel that corresponds to your installation source. For instance, if you
installed from a 17.03 ISO, you will be subscribed to the installed from a 19.03 ISO, you will be subscribed to the
<literal>nixos-17.03</literal> channel. To see which NixOS channel youre <literal>nixos-19.03</literal> channel. To see which NixOS channel youre
subscribed to, run the following as root: subscribed to, run the following as root:
<screen> <screen>
# nix-channel --list | grep nixos # nix-channel --list | grep nixos
@ -75,13 +75,13 @@ nixos https://nixos.org/channels/nixos-unstable
# nix-channel --add https://nixos.org/channels/<replaceable>channel-name</replaceable> nixos # nix-channel --add https://nixos.org/channels/<replaceable>channel-name</replaceable> nixos
</screen> </screen>
(Be sure to include the <literal>nixos</literal> parameter at the end.) For (Be sure to include the <literal>nixos</literal> parameter at the end.) For
instance, to use the NixOS 17.03 stable channel: instance, to use the NixOS 19.03 stable channel:
<screen> <screen>
# nix-channel --add https://nixos.org/channels/nixos-17.03 nixos # nix-channel --add https://nixos.org/channels/nixos-19.03 nixos
</screen> </screen>
If you have a server, you may want to use the “small” channel instead: If you have a server, you may want to use the “small” channel instead:
<screen> <screen>
# nix-channel --add https://nixos.org/channels/nixos-17.03-small nixos # nix-channel --add https://nixos.org/channels/nixos-19.03-small nixos
</screen> </screen>
And if you want to live on the bleeding edge: And if you want to live on the bleeding edge:
<screen> <screen>
@ -127,7 +127,7 @@ nixos https://nixos.org/channels/nixos-unstable
current channel. (To see when the service runs, see <command>systemctl current channel. (To see when the service runs, see <command>systemctl
list-timers</command>.) You can also specify a channel explicitly, e.g. list-timers</command>.) You can also specify a channel explicitly, e.g.
<programlisting> <programlisting>
<xref linkend="opt-system.autoUpgrade.channel"/> = https://nixos.org/channels/nixos-17.03; <xref linkend="opt-system.autoUpgrade.channel"/> = https://nixos.org/channels/nixos-19.03;
</programlisting> </programlisting>
</para> </para>
</section> </section>

View File

@ -24,8 +24,14 @@
<arg> <arg>
<option>--help</option> <option>--help</option>
</arg> </arg>
<arg>
<option>--option</option>
<replaceable>name</replaceable>
<replaceable>value</replaceable>
</arg>
<arg choice="plain"> <arg choice="plain">
<replaceable>network.nix</replaceable> <replaceable>network.nix</replaceable>
</arg> </arg>
@ -115,6 +121,18 @@
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
<varlistentry>
<term>
<option>--option</option> <replaceable>name</replaceable> <replaceable>value</replaceable>
</term>
<listitem>
<para>Set the Nix configuration option
<replaceable>name</replaceable> to <replaceable>value</replaceable>.
This overrides settings in the Nix configuration file (see
<citerefentry><refentrytitle>nix.conf</refentrytitle><manvolnum>5</manvolnum></citerefentry>).
</para>
</listitem>
</varlistentry>
</variablelist> </variablelist>
</refsection> </refsection>
</refentry> </refentry>

View File

@ -13,18 +13,18 @@
</refnamediv> </refnamediv>
<refsynopsisdiv> <refsynopsisdiv>
<cmdsynopsis> <cmdsynopsis>
<command>nixos-generate-config</command> <command>nixos-generate-config</command>
<arg> <arg>
<option>--force</option> <option>--force</option>
</arg> </arg>
<arg> <arg>
<arg choice='plain'> <arg choice='plain'>
<option>--root</option> <option>--root</option>
</arg> </arg>
<replaceable>root</replaceable> <replaceable>root</replaceable>
</arg> </arg>
<arg> <arg>
<arg choice='plain'> <arg choice='plain'>
<option>--dir</option> <option>--dir</option>

View File

@ -13,39 +13,39 @@
</refnamediv> </refnamediv>
<refsynopsisdiv> <refsynopsisdiv>
<cmdsynopsis> <cmdsynopsis>
<command>nixos-rebuild</command><group choice='req'> <command>nixos-rebuild</command><group choice='req'>
<arg choice='plain'> <arg choice='plain'>
<option>switch</option> <option>switch</option>
</arg> </arg>
<arg choice='plain'> <arg choice='plain'>
<option>boot</option> <option>boot</option>
</arg> </arg>
<arg choice='plain'> <arg choice='plain'>
<option>test</option> <option>test</option>
</arg> </arg>
<arg choice='plain'> <arg choice='plain'>
<option>build</option> <option>build</option>
</arg> </arg>
<arg choice='plain'> <arg choice='plain'>
<option>dry-build</option> <option>dry-build</option>
</arg> </arg>
<arg choice='plain'> <arg choice='plain'>
<option>dry-activate</option> <option>dry-activate</option>
</arg> </arg>
<arg choice='plain'> <arg choice='plain'>
<option>edit</option> <option>edit</option>
</arg> </arg>
<arg choice='plain'> <arg choice='plain'>
<option>build-vm</option> <option>build-vm</option>
</arg> </arg>
<arg choice='plain'> <arg choice='plain'>
<option>build-vm-with-bootloader</option> <option>build-vm-with-bootloader</option>
</arg> </arg>
@ -54,33 +54,33 @@
<arg> <arg>
<option>--upgrade</option> <option>--upgrade</option>
</arg> </arg>
<arg> <arg>
<option>--install-bootloader</option> <option>--install-bootloader</option>
</arg> </arg>
<arg> <arg>
<option>--no-build-nix</option> <option>--no-build-nix</option>
</arg> </arg>
<arg> <arg>
<option>--fast</option> <option>--fast</option>
</arg> </arg>
<arg> <arg>
<option>--rollback</option> <option>--rollback</option>
</arg> </arg>
<arg> <arg>
<option>--builders</option> <option>--builders</option> <replaceable>builder-spec</replaceable>
<replaceable>builder-spec</replaceable>
</arg> </arg>
<sbr /> <sbr />
<arg> <arg>
<group choice='req'> <group choice='req'>
<arg choice='plain'> <arg choice='plain'>
<option>--profile-name</option> <option>--profile-name</option>
</arg> </arg>
<arg choice='plain'> <arg choice='plain'>
<option>-p</option> <option>-p</option>
</arg> </arg>
@ -198,7 +198,7 @@ $ nix-build /path/to/nixpkgs/nixos -A system
</term> </term>
<listitem> <listitem>
<para> <para>
Opens <filename>configuration.nix</filename> in the default editor. Opens <filename>configuration.nix</filename> in the default editor.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -334,25 +334,23 @@ $ ./result/bin/run-*-vm
</listitem> </listitem>
</varlistentry> </varlistentry>
<varlistentry> <varlistentry>
<term> <term>
<option>--builders</option> <option>--builders</option> <replaceable>builder-spec</replaceable>
<replaceable>builder-spec</replaceable> </term>
</term> <listitem>
<listitem> <para>
<para> Allow ad-hoc remote builders for building the new system. This requires
Allow ad-hoc remote builders for building the new system. the user executing <command>nixos-rebuild</command> (usually root) to be
This requires the user executing <command>nixos-rebuild</command> (usually configured as a trusted user in the Nix daemon. This can be achieved by
root) to be configured as a trusted user in the Nix daemon. This can be using the <literal>nix.trustedUsers</literal> NixOS option. Examples
achieved by using the <literal>nix.trustedUsers</literal> NixOS option. values for that option are described in the <literal>Remote builds
Examples values for that option are described in the chapter</literal> in the Nix manual, (i.e. <command>--builders
<literal>Remote builds chapter</literal> in the Nix manual, "ssh://bigbrother x86_64-linux"</command>). By specifying an empty string
(i.e. <command>--builders "ssh://bigbrother x86_64-linux"</command>). existing builders specified in <filename>/etc/nix/machines</filename> can
By specifying an empty string existing builders specified in be ignored: <command>--builders ""</command> for example when they are
<filename>/etc/nix/machines</filename> can be ignored: not reachable due to network connectivity.
<command>--builders ""</command> for example when they are not </para>
reachable due to network connectivity. </listitem>
</para>
</listitem>
</varlistentry> </varlistentry>
<varlistentry> <varlistentry>
<term> <term>

View File

@ -639,7 +639,8 @@ $ nix-instantiate -E '(import &lt;nixpkgsunstable&gt; {}).gitFull'
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
Groups <literal>kvm</literal> and <literal>render</literal> are introduced now, as systemd requires them. Groups <literal>kvm</literal> and <literal>render</literal> are introduced
now, as systemd requires them.
</para> </para>
</listitem> </listitem>
</itemizedlist> </itemizedlist>

View File

@ -20,48 +20,52 @@
<itemizedlist> <itemizedlist>
<listitem> <listitem>
<para> <para>
The default Python 3 interpreter is now CPython 3.7 instead of CPython 3.6. The default Python 3 interpreter is now CPython 3.7 instead of CPython
3.6.
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
Added the Pantheon desktop environment. Added the Pantheon desktop environment. It can be enabled through
It can be enabled through <varname>services.xserver.desktopManager.pantheon.enable</varname>. <varname>services.xserver.desktopManager.pantheon.enable</varname>.
</para> </para>
<note> <note>
<para> <para>
<varname>services.xserver.desktopManager.pantheon</varname> default enables lightdm By default, <varname>services.xserver.desktopManager.pantheon</varname>
as a display manager and using Pantheon's greeter. enables LightDM as a display manager, as pantheon's screen locking
implementation relies on it.
</para> </para>
<para> <para>
This is because of limitations with the screenlocking implementation, whereas the Because of that it is recommended to leave LightDM enabled. If you'd like
screenlocker would be non-functional without it. to disable it anyway, set
<option>services.xserver.displayManager.lightdm.enable</option> to
<literal>false</literal> and enable your preferred display manager.
</para> </para>
<para>
Because of that it is recommended to retain this precaution, however if you'd like to change this set:
</para>
<itemizedlist>
<listitem>
<para>
<option>services.xserver.displayManager.lightdm.enable</option>
</para>
</listitem>
<listitem>
<para>
<option>services.xserver.displayManager.lightdm.greeters.pantheon.enable</option>
</para>
</listitem>
</itemizedlist>
<para>to <literal>false</literal> and enable your preferred display manager.</para>
</note> </note>
<para>
Also note that Pantheon's LightDM greeter is not enabled by default,
because it has numerous issues in NixOS and isn't optimal for use here
yet.
</para>
</listitem>
<listitem>
<para>
A major refactoring of the Kubernetes module has been completed.
Refactorings primarily focus on decoupling components and enhancing
security. Two-way TLS and RBAC has been enabled by default for all
components, which slightly changes the way the module is configured. See:
<xref linkend="sec-kubernetes"/> for details.
</para>
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
A major refactoring of the Kubernetes module has been completed. There is now a set of <option>confinement</option> options for
Refactorings primarily focus on decoupling components and enhancing <option>systemd.services</option>, which allows to restrict services
security. Two-way TLS and RBAC has been enabled by default for all into a <citerefentry>
components, which slightly changes the way the module is configured. <refentrytitle>chroot</refentrytitle>
See: <xref linkend="sec-kubernetes"/> for details. <manvolnum>2</manvolnum>
</citerefentry>ed environment that only contains the store paths from
the runtime closure of the service.
</para> </para>
</listitem> </listitem>
</itemizedlist> </itemizedlist>
@ -87,10 +91,11 @@
<listitem> <listitem>
<para> <para>
There is a new <varname>security.googleOsLogin</varname> module for using There is a new <varname>security.googleOsLogin</varname> module for using
<link xlink:href="https://cloud.google.com/compute/docs/instances/managing-instance-access">OS Login</link> <link xlink:href="https://cloud.google.com/compute/docs/instances/managing-instance-access">OS
to manage SSH access to Google Compute Engine instances, which supersedes Login</link> to manage SSH access to Google Compute Engine instances,
the imperative and broken <literal>google-accounts-daemon</literal> used which supersedes the imperative and broken
in <literal>nixos/modules/virtualisation/google-compute-config.nix</literal>. <literal>google-accounts-daemon</literal> used in
<literal>nixos/modules/virtualisation/google-compute-config.nix</literal>.
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
@ -101,8 +106,9 @@
<listitem> <listitem>
<para> <para>
There is a new <varname>services.cockroachdb</varname> module for running There is a new <varname>services.cockroachdb</varname> module for running
CockroachDB databases. NixOS now ships with CockroachDB 2.1.x as well, available CockroachDB databases. NixOS now ships with CockroachDB 2.1.x as well,
on <literal>x86_64-linux</literal> and <literal>aarch64-linux</literal>. available on <literal>x86_64-linux</literal> and
<literal>aarch64-linux</literal>.
</para> </para>
</listitem> </listitem>
</itemizedlist> </itemizedlist>
@ -110,15 +116,15 @@
<itemizedlist> <itemizedlist>
<listitem> <listitem>
<para> <para>
<literal>./security/duosec.nix</literal> <literal>./security/duosec.nix</literal>
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
The <link xlink:href="https://duo.com/docs/duounix">PAM module for Duo The <link xlink:href="https://duo.com/docs/duounix">PAM module for Duo
Security</link> has been enabled for use. One can configure it using Security</link> has been enabled for use. One can configure it using the
the <option>security.duosec</option> options along with the <option>security.duosec</option> options along with the corresponding PAM
corresponding PAM option in option in
<option>security.pam.services.&lt;name?&gt;.duoSecurity.enable</option>. <option>security.pam.services.&lt;name?&gt;.duoSecurity.enable</option>.
</para> </para>
</listitem> </listitem>
@ -184,36 +190,37 @@
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
The <varname>buildPythonPackage</varname> function now sets <varname>strictDeps = true</varname> The <varname>buildPythonPackage</varname> function now sets
to help distinguish between native and non-native dependencies in order to <varname>strictDeps = true</varname> to help distinguish between native
improve cross-compilation compatibility. Note however that this may break and non-native dependencies in order to improve cross-compilation
user expressions. compatibility. Note however that this may break user expressions.
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
The <varname>buildPythonPackage</varname> function now sets <varname>LANG = C.UTF-8</varname> The <varname>buildPythonPackage</varname> function now sets <varname>LANG
to enable Unicode support. The <varname>glibcLocales</varname> package is no longer needed as a build input. = C.UTF-8</varname> to enable Unicode support. The
<varname>glibcLocales</varname> package is no longer needed as a build
input.
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
The Syncthing state and configuration data has been moved from The Syncthing state and configuration data has been moved from
<varname>services.syncthing.dataDir</varname> to the newly defined <varname>services.syncthing.dataDir</varname> to the newly defined
<varname>services.syncthing.configDir</varname>, which default to <varname>services.syncthing.configDir</varname>, which default to
<literal>/var/lib/syncthing/.config/syncthing</literal>. <literal>/var/lib/syncthing/.config/syncthing</literal>. This change makes
This change makes possible to share synced directories using ACLs possible to share synced directories using ACLs without Syncthing
without Syncthing resetting the permission on every start. resetting the permission on every start.
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
The <literal>ntp</literal> module now has sane default restrictions. The <literal>ntp</literal> module now has sane default restrictions. If
If you're relying on the previous defaults, which permitted all queries you're relying on the previous defaults, which permitted all queries and
and commands from all firewall-permitted sources, you can set commands from all firewall-permitted sources, you can set
<varname>services.ntp.restrictDefault</varname> and <varname>services.ntp.restrictDefault</varname> and
<varname>services.ntp.restrictSource</varname> to <varname>services.ntp.restrictSource</varname> to <literal>[]</literal>.
<literal>[]</literal>.
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
@ -241,17 +248,21 @@
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
Options Options
<literal>services.znc.confOptions.networks.<replaceable>name</replaceable>.userName</literal> and <literal>services.znc.confOptions.networks.<replaceable>name</replaceable>.userName</literal>
<literal>services.znc.confOptions.networks.<replaceable>name</replaceable>.modulePackages</literal> and
were removed. They were never used for anything and can therefore safely be removed. <literal>services.znc.confOptions.networks.<replaceable>name</replaceable>.modulePackages</literal>
were removed. They were never used for anything and can therefore safely
be removed.
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
Package <literal>wasm</literal> has been renamed <literal>proglodyte-wasm</literal>. The package Package <literal>wasm</literal> has been renamed
<literal>wasm</literal> will be pointed to <literal>ocamlPackages.wasm</literal> in 19.09, so <literal>proglodyte-wasm</literal>. The package <literal>wasm</literal>
make sure to update your configuration if you want to keep <literal>proglodyte-wasm</literal> will be pointed to <literal>ocamlPackages.wasm</literal> in 19.09, so make
sure to update your configuration if you want to keep
<literal>proglodyte-wasm</literal>
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
@ -279,37 +290,41 @@
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
Package <literal>consul-ui</literal> and passthrough <literal>consul.ui</literal> have been removed. Package <literal>consul-ui</literal> and passthrough
The package <literal>consul</literal> now uses upstream releases that vendor the UI into the binary. <literal>consul.ui</literal> have been removed. The package
See <link xlink:href="https://github.com/NixOS/nixpkgs/pull/48714#issuecomment-433454834">#48714</link> <literal>consul</literal> now uses upstream releases that vendor the UI
into the binary. See
<link xlink:href="https://github.com/NixOS/nixpkgs/pull/48714#issuecomment-433454834">#48714</link>
for details. for details.
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
Slurm introduces the new option Slurm introduces the new option
<literal>services.slurm.stateSaveLocation</literal>, <literal>services.slurm.stateSaveLocation</literal>, which is now set to
which is now set to <literal>/var/spool/slurm</literal> by default <literal>/var/spool/slurm</literal> by default (instead of
(instead of <literal>/var/spool</literal>). <literal>/var/spool</literal>). Make sure to move all files to the new
Make sure to move all files to the new directory or to set the option accordingly. directory or to set the option accordingly.
</para> </para>
<para> <para>
The slurmctld now runs as user <literal>slurm</literal> instead of <literal>root</literal>. The slurmctld now runs as user <literal>slurm</literal> instead of
If you want to keep slurmctld running as <literal>root</literal>, set <literal>root</literal>. If you want to keep slurmctld running as
<literal>services.slurm.user = root</literal>. <literal>root</literal>, set <literal>services.slurm.user =
root</literal>.
</para> </para>
<para> <para>
The options <literal>services.slurm.nodeName</literal> and The options <literal>services.slurm.nodeName</literal> and
<literal>services.slurm.partitionName</literal> are now sets of <literal>services.slurm.partitionName</literal> are now sets of strings to
strings to correctly reflect that fact that each of these correctly reflect that fact that each of these options can occour more
options can occour more than once in the configuration. than once in the configuration.
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
The <literal>solr</literal> package has been upgraded from 4.10.3 to 7.5.0 and has undergone The <literal>solr</literal> package has been upgraded from 4.10.3 to 7.5.0
some major changes. The <literal>services.solr</literal> module has been updated to reflect and has undergone some major changes. The <literal>services.solr</literal>
these changes. Please review http://lucene.apache.org/solr/ carefully before upgrading. module has been updated to reflect these changes. Please review
http://lucene.apache.org/solr/ carefully before upgrading.
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
@ -321,46 +336,49 @@
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
The option <literal>services.xserver.displayManager.job.logToFile</literal> which was The option
<literal>services.xserver.displayManager.job.logToFile</literal> which was
previously set to <literal>true</literal> when using the display managers previously set to <literal>true</literal> when using the display managers
<literal>lightdm</literal>, <literal>sddm</literal> or <literal>xpra</literal> has been <literal>lightdm</literal>, <literal>sddm</literal> or
reset to the default value (<literal>false</literal>). <literal>xpra</literal> has been reset to the default value
(<literal>false</literal>).
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
Network interface indiscriminate NixOS firewall options Network interface indiscriminate NixOS firewall options
(<literal>networking.firewall.allow*</literal>) are now preserved when also (<literal>networking.firewall.allow*</literal>) are now preserved when
setting interface specific rules such as <literal>networking.firewall.interfaces.en0.allow*</literal>. also setting interface specific rules such as
These rules continue to use the pseudo device "default" <literal>networking.firewall.interfaces.en0.allow*</literal>. These rules
(<literal>networking.firewall.interfaces.default.*</literal>), and assigning continue to use the pseudo device "default"
to this pseudo device will override the (<literal>networking.firewall.allow*</literal>) (<literal>networking.firewall.interfaces.default.*</literal>), and
options. assigning to this pseudo device will override the
</para> (<literal>networking.firewall.allow*</literal>) options.
</listitem> </para>
<listitem> </listitem>
<para> <listitem>
<para>
The <literal>nscd</literal> service now disables all caching of The <literal>nscd</literal> service now disables all caching of
<literal>passwd</literal> and <literal>group</literal> databases by <literal>passwd</literal> and <literal>group</literal> databases by
default. This was interferring with the correct functioning of the default. This was interferring with the correct functioning of the
<literal>libnss_systemd.so</literal> module which is used by <literal>libnss_systemd.so</literal> module which is used by
<literal>systemd</literal> to manage uids and usernames in the presence of <literal>systemd</literal> to manage uids and usernames in the presence of
<literal>DynamicUser=</literal> in systemd services. This was already the <literal>DynamicUser=</literal> in systemd services. This was already the
default behaviour in presence of <literal>services.sssd.enable = default behaviour in presence of <literal>services.sssd.enable =
true</literal> because nscd caching would interfere with true</literal> because nscd caching would interfere with
<literal>sssd</literal> in unpredictable ways as well. Because we're <literal>sssd</literal> in unpredictable ways as well. Because we're using
using nscd not for caching, but for convincing glibc to find NSS modules nscd not for caching, but for convincing glibc to find NSS modules in the
in the nix store instead of an absolute path, we have decided to disable nix store instead of an absolute path, we have decided to disable caching
caching globally now, as it's usually not the behaviour the user wants and globally now, as it's usually not the behaviour the user wants and can
can lead to surprising behaviour. Furthermore, negative caching of host lead to surprising behaviour. Furthermore, negative caching of host
lookups is also disabled now by default. This should fix the issue of dns lookups is also disabled now by default. This should fix the issue of dns
lookups failing in the presence of an unreliable network. lookups failing in the presence of an unreliable network.
</para> </para>
<para> <para>
If the old behaviour is desired, this can be restored by setting If the old behaviour is desired, this can be restored by setting the
the <literal>services.nscd.config</literal> option <literal>services.nscd.config</literal> option with the desired caching
with the desired caching parameters. parameters.
<programlisting> <programlisting>
services.nscd.config = services.nscd.config =
'' ''
server-user nscd server-user nscd
@ -393,97 +411,125 @@
shared hosts yes shared hosts yes
''; '';
</programlisting> </programlisting>
See <link xlink:href="https://github.com/NixOS/nixpkgs/pull/50316">#50316</link> See
<link xlink:href="https://github.com/NixOS/nixpkgs/pull/50316">#50316</link>
for details. for details.
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
GitLab Shell previously used the nix store paths for the GitLab Shell previously used the nix store paths for the
<literal>gitlab-shell</literal> command in its <literal>gitlab-shell</literal> command in its
<literal>authorized_keys</literal> file, which might stop working after <literal>authorized_keys</literal> file, which might stop working after
garbage collection. To circumvent that, we regenerated that file on each garbage collection. To circumvent that, we regenerated that file on each
startup. As <literal>gitlab-shell</literal> has now been changed to use startup. As <literal>gitlab-shell</literal> has now been changed to use
<literal>/var/run/current-system/sw/bin/gitlab-shell</literal>, this is <literal>/var/run/current-system/sw/bin/gitlab-shell</literal>, this is
not necessary anymore, but there might be leftover lines with a nix store not necessary anymore, but there might be leftover lines with a nix store
path. Regenerate the <literal>authorized_keys</literal> file via path. Regenerate the <literal>authorized_keys</literal> file via
<command>sudo -u git -H gitlab-rake gitlab:shell:setup</command> in that <command>sudo -u git -H gitlab-rake gitlab:shell:setup</command> in that
case. case.
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
The <literal>pam_unix</literal> account module is now loaded with its The <literal>pam_unix</literal> account module is now loaded with its
control field set to <literal>required</literal> instead of control field set to <literal>required</literal> instead of
<literal>sufficient</literal>, so that later PAM account modules that <literal>sufficient</literal>, so that later PAM account modules that
might do more extensive checks are being executed. might do more extensive checks are being executed. Previously, the whole
Previously, the whole account module verification was exited prematurely account module verification was exited prematurely in case a nss module
in case a nss module provided the account name to provided the account name to <literal>pam_unix</literal>. The LDAP and
<literal>pam_unix</literal>. SSSD NixOS modules already add their NSS modules when enabled. In case
The LDAP and SSSD NixOS modules already add their NSS modules when your setup breaks due to some later PAM account module previosuly
enabled. In case your setup breaks due to some later PAM account module shadowed, or failing NSS lookups, please file a bug. You can get back the
previosuly shadowed, or failing NSS lookups, please file a bug. You can old behaviour by manually setting <literal>
get back the old behaviour by manually setting <![CDATA[security.pam.services.<name?>.text]]>
<literal><![CDATA[security.pam.services.<name?>.text]]></literal>. </literal>.
</para>
</listitem>
<listitem>
<para>
The <literal>pam_unix</literal> password module is now loaded with its
control field set to <literal>sufficient</literal> instead of
<literal>required</literal>, so that password managed only
by later PAM password modules are being executed.
Previously, for example, changing an LDAP account's password through PAM
was not possible: the whole password module verification
was exited prematurely by <literal>pam_unix</literal>,
preventing <literal>pam_ldap</literal> to manage the password as it should.
</para>
</listitem>
<listitem>
<para>
<literal>fish</literal> has been upgraded to 3.0.
It comes with a number of improvements and backwards incompatible changes.
See the <literal>fish</literal> <link xlink:href="https://github.com/fish-shell/fish-shell/releases/tag/3.0.0">release notes</link> for more information.
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
The ibus-table input method has had a change in config format, which The <literal>pam_unix</literal> password module is now loaded with its
causes all previous settings to be lost. See control field set to <literal>sufficient</literal> instead of
<link xlink:href="https://github.com/mike-fabian/ibus-table/commit/f9195f877c5212fef0dfa446acb328c45ba5852b">this commit message</link> <literal>required</literal>, so that password managed only by later PAM
for details. password modules are being executed. Previously, for example, changing an
LDAP account's password through PAM was not possible: the whole password
module verification was exited prematurely by <literal>pam_unix</literal>,
preventing <literal>pam_ldap</literal> to manage the password as it
should.
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
NixOS module system type <literal>types.optionSet</literal> and <literal>fish</literal> has been upgraded to 3.0. It comes with a number
<literal>lib.mkOption</literal> argument <literal>options</literal> are deprecated. of improvements and backwards incompatible changes. See the
Use <literal>types.submodule</literal> instead. <literal>fish</literal>
(<link xlink:href="https://github.com/NixOS/nixpkgs/pull/54637">#54637</link>) <link xlink:href="https://github.com/fish-shell/fish-shell/releases/tag/3.0.0">release
</para> notes</link> for more information.
</listitem> </para>
<listitem> </listitem>
<para> <listitem>
<literal>matrix-synapse</literal> has been updated to version 0.99. It will <para>
<link xlink:href="https://github.com/matrix-org/synapse/pull/4509">no longer generate a self-signed certificate on first launch</link> The ibus-table input method has had a change in config format, which
and will be <link xlink:href="https://matrix.org/blog/2019/02/05/synapse-0-99-0/">the last version to accept self-signed certificates</link>. causes all previous settings to be lost. See
As such, it is now recommended to use a proper certificate verified by a <link xlink:href="https://github.com/mike-fabian/ibus-table/commit/f9195f877c5212fef0dfa446acb328c45ba5852b">this
root CA (for example Let's Encrypt). commit message</link> for details.
</para> </para>
</listitem> </listitem>
<listitem>
<para>
NixOS module system type <literal>types.optionSet</literal> and
<literal>lib.mkOption</literal> argument <literal>options</literal> are
deprecated. Use <literal>types.submodule</literal> instead.
(<link xlink:href="https://github.com/NixOS/nixpkgs/pull/54637">#54637</link>)
</para>
</listitem>
<listitem>
<para>
<literal>matrix-synapse</literal> has been updated to version 0.99. It
will <link xlink:href="https://github.com/matrix-org/synapse/pull/4509">no
longer generate a self-signed certificate on first launch</link> and will
be
<link xlink:href="https://matrix.org/blog/2019/02/05/synapse-0-99-0/">the
last version to accept self-signed certificates</link>. As such, it is now
recommended to use a proper certificate verified by a root CA (for example
Let's Encrypt). The new <link linkend="module-services-matrix">manual
chapter on Matrix</link> contains a working example of using nginx as a
reverse proxy in front of <literal>matrix-synapse</literal>, using Let's
Encrypt certificates.
</para>
</listitem>
<listitem> <listitem>
<para> <para>
<literal>mailutils</literal> now works by default when <literal>mailutils</literal> now works by default when
<literal>sendmail</literal> is not in a setuid wrapper. As a consequence, <literal>sendmail</literal> is not in a setuid wrapper. As a consequence,
the <literal>sendmailPath</literal> argument, having lost its main use, has the <literal>sendmailPath</literal> argument, having lost its main use,
been removed. has been removed.
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
<literal>graylog</literal> has been upgraded from version 2.* to 3.*. Some setups making use of extraConfig (especially those exposing Graylog via reverse proxies) need to be updated as upstream removed/replaced some settings. See <link xlink:href="http://docs.graylog.org/en/3.0/pages/upgrade/graylog-3.0.html#simplified-http-interface-configuration">Upgrading Graylog</link> for details. <literal>graylog</literal> has been upgraded from version 2.* to 3.*. Some
setups making use of extraConfig (especially those exposing Graylog via
reverse proxies) need to be updated as upstream removed/replaced some
settings. See
<link xlink:href="http://docs.graylog.org/en/3.0/pages/upgrade/graylog-3.0.html#simplified-http-interface-configuration">Upgrading
Graylog</link> for details.
</para> </para>
</listitem> </listitem>
<listitem>
<para>
The option <literal>users.ldap.bind.password</literal> was renamed to <literal>users.ldap.bind.passwordFile</literal>,
and needs to be readable by the <literal>nslcd</literal> user.
Same applies to the new <literal>users.ldap.daemon.rootpwmodpwFile</literal> option.
</para>
</listitem>
<listitem>
<para>
<literal>nodejs-6_x</literal> is end-of-life.
<literal>nodejs-6_x</literal>, <literal>nodejs-slim-6_x</literal> and
<literal>nodePackages_6_x</literal> are removed.
</para>
</listitem>
</itemizedlist> </itemizedlist>
</section> </section>
@ -498,197 +544,219 @@
<listitem> <listitem>
<para> <para>
The <option>services.matomo</option> module gained the option The <option>services.matomo</option> module gained the option
<option>services.matomo.package</option> which determines the used <option>services.matomo.package</option> which determines the used Matomo
Matomo version. version.
</para> </para>
<para> <para>
The Matomo module now also comes with the systemd service <literal>matomo-archive-processing.service</literal> The Matomo module now also comes with the systemd service
and a timer that automatically triggers archive processing every hour. <literal>matomo-archive-processing.service</literal> and a timer that
This means that you can safely automatically triggers archive processing every hour. This means that you
can safely
<link xlink:href="https://matomo.org/docs/setup-auto-archiving/#disable-browser-triggers-for-matomo-archiving-and-limit-matomo-reports-to-updating-every-hour"> <link xlink:href="https://matomo.org/docs/setup-auto-archiving/#disable-browser-triggers-for-matomo-archiving-and-limit-matomo-reports-to-updating-every-hour">
disable browser triggers for Matomo archiving disable browser triggers for Matomo archiving </link> at
</link> at <literal>Administration > System > General Settings</literal>. <literal>Administration > System > General Settings</literal>.
</para> </para>
<para> <para>
Additionally, you can enable to Additionally, you can enable to
<link xlink:href="https://matomo.org/docs/privacy/#step-2-delete-old-visitors-logs"> <link xlink:href="https://matomo.org/docs/privacy/#step-2-delete-old-visitors-logs">
delete old visitor logs delete old visitor logs </link> at <literal>Administration > System >
</link> at <literal>Administration > System > Privacy</literal>, Privacy</literal>, but make sure that you run <literal>systemctl start
but make sure that you run <literal>systemctl start matomo-archive-processing.service</literal> matomo-archive-processing.service</literal> at least once without errors
at least once without errors if you have already collected data before, if you have already collected data before, so that the reports get
so that the reports get archived before the source data gets deleted. archived before the source data gets deleted.
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
<literal>composableDerivation</literal> along with supporting library functions <literal>composableDerivation</literal> along with supporting library
has been removed. functions has been removed.
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
The deprecated <literal>truecrypt</literal> package has been removed The deprecated <literal>truecrypt</literal> package has been removed and
and <literal>truecrypt</literal> attribute is now an alias for <literal>truecrypt</literal> attribute is now an alias for
<literal>veracrypt</literal>. VeraCrypt is backward-compatible with <literal>veracrypt</literal>. VeraCrypt is backward-compatible with
TrueCrypt volumes. Note that <literal>cryptsetup</literal> also TrueCrypt volumes. Note that <literal>cryptsetup</literal> also supports
supports loading TrueCrypt volumes. loading TrueCrypt volumes.
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
The Kubernetes DNS addons, kube-dns, has been replaced with CoreDNS. The Kubernetes DNS addons, kube-dns, has been replaced with CoreDNS. This
This change is made in accordance with Kubernetes making CoreDNS the official default change is made in accordance with Kubernetes making CoreDNS the official
starting from default starting from
<link xlink:href="https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.11.md#sig-cluster-lifecycle">Kubernetes v1.11</link>. <link xlink:href="https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.11.md#sig-cluster-lifecycle">Kubernetes
Please beware that upgrading DNS-addon on existing clusters might induce v1.11</link>. Please beware that upgrading DNS-addon on existing clusters
minor downtime while the DNS-addon terminates and re-initializes. might induce minor downtime while the DNS-addon terminates and
Also note that the DNS-service now runs with 2 pod replicas by default. re-initializes. Also note that the DNS-service now runs with 2 pod
The desired number of replicas can be configured using: replicas by default. The desired number of replicas can be configured
<option>services.kubernetes.addons.dns.replicas</option>. using: <option>services.kubernetes.addons.dns.replicas</option>.
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
The quassel-webserver package and module was removed from nixpkgs due to the lack The quassel-webserver package and module was removed from nixpkgs due to
of maintainers. the lack of maintainers.
</para> </para>
</listitem>
<listitem>
<para>
The astah-community package was removed from nixpkgs due to it being discontinued and the downloads not being available anymore.
</para>
</listitem>
<listitem>
<para>
The httpd service now saves log files with a .log file extension by default for
easier integration with the logrotate service.
</para>
</listitem>
<listitem>
<para>
The owncloud server packages and httpd subservice module were removed
from nixpkgs due to the lack of maintainers.
</para>
</listitem>
<listitem>
<para>
It is possible now to uze ZRAM devices as general purpose ephemeral block devices,
not only as swap. Using more than 1 device as ZRAM swap is no longer recommended,
but is still possible by setting <literal>zramSwap.swapDevices</literal> explicitly.
</para>
<para>
ZRAM algorithm can be changed now.
</para>
<para>
Changes to ZRAM algorithm are applied during <literal>nixos-rebuild switch</literal>,
so make sure you have enough swap space on disk to survive ZRAM device rebuild. Alternatively,
use <literal>nixos-rebuild boot; reboot</literal>.
</para>
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
Flat volumes are now disabled by default in <literal>hardware.pulseaudio</literal>. The manual gained a <link linkend="module-services-matrix"> new chapter on
This has been done to prevent applications, which are unaware of this feature, setting self-hosting <literal>matrix-synapse</literal> and
their volumes to 100% on startup causing harm to your audio hardware and potentially your ears. <literal>riot-web</literal> </link>, the most prevalent server and client
implementations for the
<link xlink:href="https://matrix.org/">Matrix</link> federated
communication network.
</para>
</listitem>
<listitem>
<para>
The astah-community package was removed from nixpkgs due to it being
discontinued and the downloads not being available anymore.
</para>
</listitem>
<listitem>
<para>
The httpd service now saves log files with a .log file extension by
default for easier integration with the logrotate service.
</para>
</listitem>
<listitem>
<para>
The owncloud server packages and httpd subservice module were removed from
nixpkgs due to the lack of maintainers.
</para>
</listitem>
<listitem>
<para>
It is possible now to uze ZRAM devices as general purpose ephemeral block
devices, not only as swap. Using more than 1 device as ZRAM swap is no
longer recommended, but is still possible by setting
<literal>zramSwap.swapDevices</literal> explicitly.
</para>
<para>
ZRAM algorithm can be changed now.
</para>
<para>
Changes to ZRAM algorithm are applied during <literal>nixos-rebuild
switch</literal>, so make sure you have enough swap space on disk to
survive ZRAM device rebuild. Alternatively, use <literal>nixos-rebuild
boot; reboot</literal>.
</para>
</listitem>
<listitem>
<para>
Flat volumes are now disabled by default in
<literal>hardware.pulseaudio</literal>. This has been done to prevent
applications, which are unaware of this feature, setting their volumes to
100% on startup causing harm to your audio hardware and potentially your
ears.
</para> </para>
<note> <note>
<para> <para>
With this change application specific volumes are relative to the master volume which can be With this change application specific volumes are relative to the master
adjusted independently, whereas before they were absolute; meaning that in effect, it scaled the volume which can be adjusted independently, whereas before they were
device-volume with the volume of the loudest application. absolute; meaning that in effect, it scaled the device-volume with the
volume of the loudest application.
</para> </para>
</note> </note>
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
The <link xlink:href="https://github.com/DanielAdolfsson/ndppd"><literal>ndppd</literal></link> module The
now supports <link linkend="opt-services.ndppd.enable">all config options</link> provided by the current <link xlink:href="https://github.com/DanielAdolfsson/ndppd"><literal>ndppd</literal></link>
upstream version as service options. Additionally the <literal>ndppd</literal> package doesn't contain module now supports <link linkend="opt-services.ndppd.enable">all config
the systemd unit configuration from upstream anymore, the unit is completely configured by the NixOS module now. options</link> provided by the current upstream version as service
options. Additionally the <literal>ndppd</literal> package doesn't contain
the systemd unit configuration from upstream anymore, the unit is
completely configured by the NixOS module now.
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
New installs of NixOS will default to the Redmine 4.x series unless otherwise specified in New installs of NixOS will default to the Redmine 4.x series unless
<literal>services.redmine.package</literal> while existing installs of NixOS will default to otherwise specified in <literal>services.redmine.package</literal> while
the Redmine 3.x series. existing installs of NixOS will default to the Redmine 3.x series.
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
The <link linkend="opt-services.grafana.enable">Grafana module</link> now supports declarative The <link linkend="opt-services.grafana.enable">Grafana module</link> now
<link xlink:href="http://docs.grafana.org/administration/provisioning/">datasource and dashboard</link> supports declarative
provisioning. <link xlink:href="http://docs.grafana.org/administration/provisioning/">datasource
and dashboard</link> provisioning.
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
<para>
The use of insecure ports on kubernetes has been deprecated.
Thus options:
<varname>services.kubernetes.apiserver.port</varname> and
<varname>services.kubernetes.controllerManager.port</varname>
has been renamed to <varname>.insecurePort</varname>,
and default of both options has changed to 0 (disabled).
</para>
</listitem>
<listitem>
<para>
Note that the default value of
<varname>services.kubernetes.apiserver.bindAddress</varname>
has changed from 127.0.0.1 to 0.0.0.0, allowing the apiserver to be
accessible from outside the master node itself.
If the apiserver insecurePort is enabled,
it is strongly recommended to only bind on the loopback interface. See:
<varname>services.kubernetes.apiserver.insecurebindAddress</varname>.
</para>
</listitem>
<listitem>
<para>
The option <varname>services.kubernetes.apiserver.allowPrivileged</varname>
and <varname>services.kubernetes.kubelet.allowPrivileged</varname> now
defaults to false. Disallowing privileged containers on the cluster.
</para>
</listitem>
<listitem>
<para>
The kubernetes module does no longer add the kubernetes package to
<varname>environment.systemPackages</varname> implicitly.
</para>
</listitem>
<listitem>
<para>
The <literal>intel</literal> driver has been removed from the default list of
<link linkend="opt-services.xserver.videoDrivers">X.org video drivers</link>.
The <literal>modesetting</literal> driver should take over automatically,
it is better maintained upstream and has less problems with advanced X11 features.
This can lead to a change in the output names used by <literal>xrandr</literal>.
Some performance regressions on some GPU models might happen.
Some OpenCL and VA-API applications might also break
(Beignet seems to provide OpenCL support with
<literal>modesetting</literal> driver, too).
Kernel mode setting API does not support backlight control,
so <literal>xbacklight</literal> tool will not work;
backlight level can be controlled directly via <literal>/sys/</literal>
or with <literal>brightnessctl</literal>.
Users who need this functionality more than multi-output XRandR are advised
to add `intel` to `videoDrivers` and report an issue (or provide additional
details in an existing one)
</para>
</listitem>
<listitem>
<para>
Openmpi has been updated to version 4.0.0, which removes some deprecated MPI-1 symbols.
This may break some older applications that still rely on those symbols.
An upgrade guide can be found <link xlink:href="https://www.open-mpi.org/faq/?category=mpi-removed">here</link>.
</para>
<para> <para>
The nginx package now relies on OpenSSL 1.1 and supports TLS 1.3 by default. You can set the protocols used by the nginx service using <xref linkend="opt-services.nginx.sslProtocols"/>. The use of insecure ports on kubernetes has been deprecated. Thus options:
<varname>services.kubernetes.apiserver.port</varname> and
<varname>services.kubernetes.controllerManager.port</varname> has been
renamed to <varname>.insecurePort</varname>, and default of both options
has changed to 0 (disabled).
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
A new subcommand <command>nixos-rebuild edit</command> was added. Note that the default value of
</para> <varname>services.kubernetes.apiserver.bindAddress</varname> has changed
from 127.0.0.1 to 0.0.0.0, allowing the apiserver to be accessible from
outside the master node itself. If the apiserver insecurePort is enabled,
it is strongly recommended to only bind on the loopback interface. See:
<varname>services.kubernetes.apiserver.insecurebindAddress</varname>.
</para>
</listitem>
<listitem>
<para>
The option
<varname>services.kubernetes.apiserver.allowPrivileged</varname> and
<varname>services.kubernetes.kubelet.allowPrivileged</varname> now
defaults to false. Disallowing privileged containers on the cluster.
</para>
</listitem>
<listitem>
<para>
The kubernetes module does no longer add the kubernetes package to
<varname>environment.systemPackages</varname> implicitly.
</para>
</listitem>
<listitem>
<para>
The <literal>intel</literal> driver has been removed from the default list
of <link linkend="opt-services.xserver.videoDrivers">X.org video
drivers</link>. The <literal>modesetting</literal> driver should take over
automatically, it is better maintained upstream and has less problems with
advanced X11 features. This can lead to a change in the output names used
by <literal>xrandr</literal>. Some performance regressions on some GPU
models might happen. Some OpenCL and VA-API applications might also break
(Beignet seems to provide OpenCL support with
<literal>modesetting</literal> driver, too). Kernel mode setting API does
not support backlight control, so <literal>xbacklight</literal> tool will
not work; backlight level can be controlled directly via
<literal>/sys/</literal> or with <literal>brightnessctl</literal>. Users
who need this functionality more than multi-output XRandR are advised to
add `intel` to `videoDrivers` and report an issue (or provide additional
details in an existing one)
</para>
</listitem>
<listitem>
<para>
Openmpi has been updated to version 4.0.0, which removes some deprecated
MPI-1 symbols. This may break some older applications that still rely on
those symbols. An upgrade guide can be found
<link xlink:href="https://www.open-mpi.org/faq/?category=mpi-removed">here</link>.
</para>
<para>
The nginx package now relies on OpenSSL 1.1 and supports TLS 1.3 by
default. You can set the protocols used by the nginx service using
<xref linkend="opt-services.nginx.sslProtocols"/>.
</para>
</listitem>
<listitem>
<para>
A new subcommand <command>nixos-rebuild edit</command> was added.
</para>
</listitem> </listitem>
</itemizedlist> </itemizedlist>
</section> </section>

View File

@ -35,9 +35,53 @@
The following new services were added since the last release: The following new services were added since the last release:
</para> </para>
</section>
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-release-19.09-incompatibilities">
<title>Backward Incompatibilities</title>
<para>
When upgrading from a previous release, please be aware of the following
incompatible changes:
</para>
<itemizedlist> <itemizedlist>
<listitem> <listitem>
<para /> <para>
Buildbot no longer supports Python 2, as support was dropped upstream in
version 2.0.0. Configurations may need to be modified to make them
compatible with Python 3.
</para>
</listitem>
<listitem>
<para>
PostgreSQL now uses
<filename class="directory">/run/postgresql</filename> as its socket
directory instead of <filename class="directory">/tmp</filename>. So
if you run an application like eg. Nextcloud, where you need to use
the Unix socket path as the database host name, you need to change it
accordingly.
</para>
</listitem>
<listitem>
<para>
The options <option>services.prometheus.alertmanager.user</option> and
<option>services.prometheus.alertmanager.group</option> have been removed
because the alertmanager service is now using systemd's <link
xlink:href="http://0pointer.net/blog/dynamic-users-with-systemd.html">
DynamicUser mechanism</link> which obviates these options.
</para>
</listitem>
<listitem>
<para>
The NetworkManager systemd unit was renamed back from network-manager.service to
NetworkManager.service for better compatibility with other applications expecting this name.
The same applies to ModemManager where modem-manager.service is now called ModemManager.service again.
</para>
</listitem> </listitem>
</itemizedlist> </itemizedlist>
</section> </section>
@ -53,16 +97,44 @@
<listitem> <listitem>
<para> <para>
The <option>documentation</option> module gained an option named The <option>documentation</option> module gained an option named
<option>documentation.nixos.includeAllModules</option> which makes the generated <option>documentation.nixos.includeAllModules</option> which makes the
<citerefentry><refentrytitle>configuration.nix</refentrytitle> generated <citerefentry>
<manvolnum>5</manvolnum></citerefentry> manual page include all options from all NixOS modules <refentrytitle>configuration.nix</refentrytitle>
included in a given <literal>configuration.nix</literal> configuration file. Currently, it is <manvolnum>5</manvolnum></citerefentry> manual page include all options
set to <literal>false</literal> by default as enabling it frequently prevents evaluation. But from all NixOS modules included in a given
the plan is to eventually have it set to <literal>true</literal> by default. Please set it to <literal>configuration.nix</literal> configuration file. Currently, it is
<literal>true</literal> now in your <literal>configuration.nix</literal> and fix all the bugs set to <literal>false</literal> by default as enabling it frequently
it uncovers. prevents evaluation. But the plan is to eventually have it set to
<literal>true</literal> by default. Please set it to
<literal>true</literal> now in your <literal>configuration.nix</literal>
and fix all the bugs it uncovers.
</para> </para>
</listitem> </listitem>
<listitem>
<para>
The <literal>vlc</literal> package gained support for Chromecast
streaming, enabled by default. TCP port 8010 must be open for it to work,
so something like <literal>networking.firewall.allowedTCPPorts = [ 8010
];</literal> may be required in your configuration. Also consider enabling
<link xlink:href="https://nixos.wiki/wiki/Accelerated_Video_Playback">
Accelerated Video Playback</link> for better transcoding performance.
</para>
</listitem>
<listitem>
<para>
The following changes apply if the <literal>stateVersion</literal> is
changed to 19.09 or higher. For <literal>stateVersion = "19.03"</literal>
or lower the old behavior is preserved.
</para>
<itemizedlist>
<listitem>
<para>
<literal>solr.package</literal> defaults to
<literal>pkgs.solr_8</literal>.
</para>
</listitem>
</itemizedlist>
</listitem>
</itemizedlist> </itemizedlist>
</section> </section>
</section> </section>

View File

@ -53,6 +53,7 @@ in {
pkgs = import ../../../.. { inherit (pkgs) system; }; # ensure we use the regular qemu-kvm package pkgs = import ../../../.. { inherit (pkgs) system; }; # ensure we use the regular qemu-kvm package
partitionTableType = if config.ec2.hvm then "legacy" else "none"; partitionTableType = if config.ec2.hvm then "legacy" else "none";
diskSize = cfg.sizeMB; diskSize = cfg.sizeMB;
fsType = "ext4";
configFile = pkgs.writeText "configuration.nix" configFile = pkgs.writeText "configuration.nix"
'' ''
{ {

View File

@ -27,25 +27,29 @@ let
''; '';
}; };
nslcdConfig = { nslcdConfig = writeText "nslcd.conf" ''
target = "nslcd.conf"; uid nslcd
source = writeText "nslcd.conf" '' gid nslcd
uid nslcd uri ${cfg.server}
gid nslcd base ${cfg.base}
uri ${cfg.server} timelimit ${toString cfg.timeLimit}
base ${cfg.base} bind_timelimit ${toString cfg.bind.timeLimit}
timelimit ${toString cfg.timeLimit} ${optionalString (cfg.bind.distinguishedName != "")
bind_timelimit ${toString cfg.bind.timeLimit} "binddn ${cfg.bind.distinguishedName}" }
${optionalString (cfg.bind.distinguishedName != "") ${optionalString (cfg.daemon.rootpwmoddn != "")
"binddn ${cfg.bind.distinguishedName}" } "rootpwmoddn ${cfg.daemon.rootpwmoddn}" }
${optionalString (cfg.daemon.rootpwmoddn != "") ${optionalString (cfg.daemon.extraConfig != "") cfg.daemon.extraConfig }
"rootpwmoddn ${cfg.daemon.rootpwmoddn}" } '';
${optionalString (cfg.daemon.extraConfig != "") cfg.daemon.extraConfig }
'';
};
insertLdapPassword = !config.users.ldap.daemon.enable && # nslcd normally reads configuration from /etc/nslcd.conf.
config.users.ldap.bind.distinguishedName != ""; # this file might contain secrets. We append those at runtime,
# so redirect its location to something more temporary.
nslcdWrapped = runCommandNoCC "nslcd-wrapped" { nativeBuildInputs = [ makeWrapper ]; } ''
mkdir -p $out/bin
makeWrapper ${nss_pam_ldapd}/sbin/nslcd $out/bin/nslcd \
--set LD_PRELOAD "${pkgs.libredirect}/lib/libredirect.so" \
--set NIX_REDIRECTS "/etc/nslcd.conf=/run/nslcd/nslcd.conf"
'';
in in
@ -139,13 +143,13 @@ in
''; '';
}; };
rootpwmodpw = mkOption { rootpwmodpwFile = mkOption {
default = ""; default = "";
example = "/run/keys/nslcd.rootpwmodpw"; example = "/run/keys/nslcd.rootpwmodpw";
type = types.str; type = types.str;
description = '' description = ''
The path to a file containing the credentials with which The path to a file containing the credentials with which to bind to
to bind to the LDAP server if the root user tries to change a user's password the LDAP server if the root user tries to change a user's password.
''; '';
}; };
}; };
@ -161,7 +165,7 @@ in
''; '';
}; };
password = mkOption { passwordFile = mkOption {
default = "/etc/ldap/bind.password"; default = "/etc/ldap/bind.password";
type = types.str; type = types.str;
description = '' description = ''
@ -220,14 +224,14 @@ in
config = mkIf cfg.enable { config = mkIf cfg.enable {
environment.etc = if cfg.daemon.enable then [nslcdConfig] else [ldapConfig]; environment.etc = optional (!cfg.daemon.enable) ldapConfig;
system.activationScripts = mkIf insertLdapPassword { system.activationScripts = mkIf (!cfg.daemon.enable) {
ldap = stringAfter [ "etc" "groups" "users" ] '' ldap = stringAfter [ "etc" "groups" "users" ] ''
if test -f "${cfg.bind.password}" ; then if test -f "${cfg.bind.passwordFile}" ; then
umask 0077 umask 0077
conf="$(mktemp)" conf="$(mktemp)"
printf 'bindpw %s\n' "$(cat ${cfg.bind.password})" | printf 'bindpw %s\n' "$(cat ${cfg.bind.passwordFile})" |
cat ${ldapConfig.source} - >"$conf" cat ${ldapConfig.source} - >"$conf"
mv -fT "$conf" /etc/ldap.conf mv -fT "$conf" /etc/ldap.conf
fi fi
@ -251,7 +255,6 @@ in
}; };
systemd.services = mkIf cfg.daemon.enable { systemd.services = mkIf cfg.daemon.enable {
nslcd = { nslcd = {
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
@ -259,32 +262,32 @@ in
umask 0077 umask 0077
conf="$(mktemp)" conf="$(mktemp)"
{ {
cat ${nslcdConfig.source} cat ${nslcdConfig}
test -z '${cfg.bind.distinguishedName}' -o ! -f '${cfg.bind.password}' || test -z '${cfg.bind.distinguishedName}' -o ! -f '${cfg.bind.passwordFile}' ||
printf 'bindpw %s\n' "$(cat '${cfg.bind.password}')" printf 'bindpw %s\n' "$(cat '${cfg.bind.passwordFile}')"
test -z '${cfg.daemon.rootpwmoddn}' -o ! -f '${cfg.daemon.rootpwmodpw}' || test -z '${cfg.daemon.rootpwmoddn}' -o ! -f '${cfg.daemon.rootpwmodpwFile}' ||
printf 'rootpwmodpw %s\n' "$(cat '${cfg.daemon.rootpwmodpw}')" printf 'rootpwmodpw %s\n' "$(cat '${cfg.daemon.rootpwmodpwFile}')"
} >"$conf" } >"$conf"
mv -fT "$conf" /etc/nslcd.conf mv -fT "$conf" /run/nslcd/nslcd.conf
''; '';
restartTriggers = [ "/run/nslcd/nslcd.conf" ];
# NOTE: because one cannot pass a custom config path to `nslcd`
# (which is only able to use `/etc/nslcd.conf`)
# changes in `nslcdConfig` won't change `serviceConfig`,
# and thus won't restart `nslcd`.
# Therefore `restartTriggers` is used on `/etc/nslcd.conf`.
restartTriggers = [ nslcdConfig.source ];
serviceConfig = { serviceConfig = {
ExecStart = "${nss_pam_ldapd}/sbin/nslcd"; ExecStart = "${nslcdWrapped}/bin/nslcd";
Type = "forking"; Type = "forking";
PIDFile = "/run/nslcd/nslcd.pid";
Restart = "always"; Restart = "always";
User = "nslcd";
Group = "nslcd";
RuntimeDirectory = [ "nslcd" ]; RuntimeDirectory = [ "nslcd" ];
PIDFile = "/run/nslcd/nslcd.pid";
}; };
}; };
}; };
}; };
imports =
[ (mkRenamedOptionModule [ "users" "ldap" "bind" "password"] [ "users" "ldap" "bind" "passwordFile"])
];
} }

View File

@ -34,7 +34,7 @@ with lib;
networkmanager-openvpn = super.networkmanager-openvpn.override { withGnome = false; }; networkmanager-openvpn = super.networkmanager-openvpn.override { withGnome = false; };
networkmanager-vpnc = super.networkmanager-vpnc.override { withGnome = false; }; networkmanager-vpnc = super.networkmanager-vpnc.override { withGnome = false; };
networkmanager-iodine = super.networkmanager-iodine.override { withGnome = false; }; networkmanager-iodine = super.networkmanager-iodine.override { withGnome = false; };
pinentry = super.pinentry.override { gtk2 = null; qt = null; }; pinentry = super.pinentry.override { gtk2 = null; gcr = null; qt = null; };
gobject-introspection = super.gobject-introspection.override { x11Support = false; }; gobject-introspection = super.gobject-introspection.override { x11Support = false; };
})); }));
}; };

View File

@ -7,7 +7,7 @@ with lib;
let let
requiredPackages = map lib.lowPrio requiredPackages = map (pkg: setPrio ((pkg.meta.priority or 5) + 3) pkg)
[ config.nix.package [ config.nix.package
pkgs.acl pkgs.acl
pkgs.attr pkgs.attr

View File

@ -38,6 +38,12 @@ in {
firmwareLinuxNonfree firmwareLinuxNonfree
intel2200BGFirmware intel2200BGFirmware
rtl8192su-firmware rtl8192su-firmware
rt5677-firmware
rtl8723bs-firmware
rtlwifi_new-firmware
zd1211fw
alsa-firmware
openelec-dvb-firmware
] ++ optional (pkgs.stdenv.hostPlatform.isAarch32 || pkgs.stdenv.hostPlatform.isAarch64) raspberrypiWirelessFirmware ] ++ optional (pkgs.stdenv.hostPlatform.isAarch32 || pkgs.stdenv.hostPlatform.isAarch64) raspberrypiWirelessFirmware
++ optionals (versionOlder config.boot.kernelPackages.kernel.version "4.13") [ ++ optionals (versionOlder config.boot.kernelPackages.kernel.version "4.13") [
rtl8723bs-firmware rtl8723bs-firmware
@ -54,6 +60,10 @@ in {
}]; }];
hardware.firmware = with pkgs; [ hardware.firmware = with pkgs; [
broadcom-bt-firmware broadcom-bt-firmware
b43Firmware_5_1_138
b43Firmware_6_30_163_46
b43FirmwareCutter
facetimehd-firmware
]; ];
}) })
]; ];

View File

@ -0,0 +1,28 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.hardware.logitech;
in {
options.hardware.logitech = {
enable = mkEnableOption "Logitech Devices";
enableGraphical = mkOption {
type = types.bool;
default = false;
description = "Enable graphical support applications.";
};
};
config = lib.mkIf cfg.enable {
environment.systemPackages = [
pkgs.ltunify
] ++ lib.optional cfg.enableGraphical pkgs.solaar;
# ltunifi and solaar both provide udev rules but the most up-to-date have been split
# out into a dedicated derivation
services.udev.packages = with pkgs; [ logitech-udev-rules ];
};
}

View File

@ -88,7 +88,7 @@ let
# result in incorrect boot entries. # result in incorrect boot entries.
baseIsolinuxCfg = '' baseIsolinuxCfg = ''
SERIAL 0 38400 SERIAL 0 115200
TIMEOUT ${builtins.toString syslinuxTimeout} TIMEOUT ${builtins.toString syslinuxTimeout}
UI vesamenu.c32 UI vesamenu.c32
MENU TITLE NixOS MENU TITLE NixOS
@ -165,6 +165,8 @@ let
else else
"# No refind for ${targetArch}" "# No refind for ${targetArch}"
; ;
grubPkgs = if config.boot.loader.grub.forcei686 then pkgs.pkgsi686Linux else pkgs;
grubMenuCfg = '' grubMenuCfg = ''
# #
@ -241,7 +243,7 @@ let
# Modules that may or may not be available per-platform. # Modules that may or may not be available per-platform.
echo "Adding additional modules:" echo "Adding additional modules:"
for mod in efi_uga; do for mod in efi_uga; do
if [ -f ${pkgs.grub2_efi}/lib/grub/${pkgs.grub2_efi.grubTarget}/$mod.mod ]; then if [ -f ${grubPkgs.grub2_efi}/lib/grub/${grubPkgs.grub2_efi.grubTarget}/$mod.mod ]; then
echo " - $mod" echo " - $mod"
MODULES+=" $mod" MODULES+=" $mod"
fi fi
@ -249,9 +251,9 @@ let
# Make our own efi program, we can't rely on "grub-install" since it seems to # Make our own efi program, we can't rely on "grub-install" since it seems to
# probe for devices, even with --skip-fs-probe. # probe for devices, even with --skip-fs-probe.
${pkgs.grub2_efi}/bin/grub-mkimage -o $out/EFI/boot/boot${targetArch}.efi -p /EFI/boot -O ${pkgs.grub2_efi.grubTarget} \ ${grubPkgs.grub2_efi}/bin/grub-mkimage -o $out/EFI/boot/boot${targetArch}.efi -p /EFI/boot -O ${grubPkgs.grub2_efi.grubTarget} \
$MODULES $MODULES
cp ${pkgs.grub2_efi}/share/grub/unicode.pf2 $out/EFI/boot/ cp ${grubPkgs.grub2_efi}/share/grub/unicode.pf2 $out/EFI/boot/
cat <<EOF > $out/EFI/boot/grub.cfg cat <<EOF > $out/EFI/boot/grub.cfg
@ -362,7 +364,7 @@ let
# Name used by UEFI for architectures. # Name used by UEFI for architectures.
targetArch = targetArch =
if pkgs.stdenv.isi686 then if pkgs.stdenv.isi686 || config.boot.loader.grub.forcei686 then
"ia32" "ia32"
else if pkgs.stdenv.isx86_64 then else if pkgs.stdenv.isx86_64 then
"x64" "x64"
@ -506,7 +508,7 @@ in
# here and it causes a cyclic dependency. # here and it causes a cyclic dependency.
boot.loader.grub.enable = false; boot.loader.grub.enable = false;
environment.systemPackages = [ pkgs.grub2 pkgs.grub2_efi ] environment.systemPackages = [ grubPkgs.grub2 grubPkgs.grub2_efi ]
++ optional canx86BiosBoot pkgs.syslinux ++ optional canx86BiosBoot pkgs.syslinux
; ;

View File

@ -1,6 +1,6 @@
{ {
x86_64-linux = "/nix/store/pid1yakjasch4pwl63nzbj22z9zf0q26-nix-2.2"; x86_64-linux = "/nix/store/hbhdjn5ik3byg642d1m11k3k3s0kn3py-nix-2.2.2";
i686-linux = "/nix/store/qpkl0cxy0xh4h432lv2qsjrmhvx5x2vy-nix-2.2"; i686-linux = "/nix/store/fz5cikwvj3n0a6zl44h6l2z3cin64mda-nix-2.2.2";
aarch64-linux = "/nix/store/0jg7h94x986d8cskg6gcfza9x67spdbp-nix-2.2"; aarch64-linux = "/nix/store/2gba4cyl4wvxzfbhmli90jy4n5aj0kjj-nix-2.2.2";
x86_64-darwin = "/nix/store/a48whqkmxnsfhwbk6nay74iyc1cf0lr2-nix-2.2"; x86_64-darwin = "/nix/store/87i4fp46jfw9yl8c7i9gx75m5yph7irl-nix-2.2.2";
} }

View File

@ -9,49 +9,44 @@ showUsage() {
# Parse valid argument options # Parse valid argument options
PARAMS=`getopt -n $0 -o h -l no-out-link,show-trace,help -- "$@"` nixBuildArgs=()
networkExpr=
if [ $? != 0 ] while [ $# -gt 0 ]; do
then
showUsage
exit 1
fi
eval set -- "$PARAMS"
# Evaluate valid options
while [ "$1" != "--" ]
do
case "$1" in case "$1" in
--no-out-link) --no-out-link)
noOutLinkArg="--no-out-link" nixBuildArgs+=("--no-out-link")
;; ;;
--show-trace) --show-trace)
showTraceArg="--show-trace" nixBuildArgs+=("--show-trace")
;; ;;
-h|--help) -h|--help)
showUsage showUsage
exit 0 exit 0
;; ;;
--option)
shift
nixBuildArgs+=("--option" "$1" "$2"); shift
;;
*)
if [ ! -z "$networkExpr" ]; then
echo "Network expression already set!"
showUsage
exit 1
fi
networkExpr="$(readlink -f $1)"
;;
esac esac
shift shift
done done
shift if [ -z "$networkExpr" ]
# Validate the given options
if [ "$1" = "" ]
then then
echo "ERROR: A network expression must be specified!" >&2 echo "ERROR: A network expression must be specified!" >&2
exit 1 exit 1
else
networkExpr=$(readlink -f $1)
fi fi
# Build a network of VMs # Build a network of VMs
nix-build '<nixpkgs/nixos/modules/installer/tools/nixos-build-vms/build-vms.nix>' \ nix-build '<nixpkgs/nixos/modules/installer/tools/nixos-build-vms/build-vms.nix>' \
--argstr networkExpr $networkExpr $noOutLinkArg $showTraceArg --argstr networkExpr $networkExpr "${nixBuildArgs[@]}"

View File

@ -35,6 +35,7 @@
./config/users-groups.nix ./config/users-groups.nix
./config/vpnc.nix ./config/vpnc.nix
./config/zram.nix ./config/zram.nix
./hardware/acpilight.nix
./hardware/all-firmware.nix ./hardware/all-firmware.nix
./hardware/bladeRF.nix ./hardware/bladeRF.nix
./hardware/brightnessctl.nix ./hardware/brightnessctl.nix
@ -45,6 +46,7 @@
./hardware/sensor/iio.nix ./hardware/sensor/iio.nix
./hardware/ksm.nix ./hardware/ksm.nix
./hardware/ledger.nix ./hardware/ledger.nix
./hardware/logitech.nix
./hardware/mcelog.nix ./hardware/mcelog.nix
./hardware/network/b43.nix ./hardware/network/b43.nix
./hardware/nitrokey.nix ./hardware/nitrokey.nix
@ -129,7 +131,6 @@
./programs/sysdig.nix ./programs/sysdig.nix
./programs/systemtap.nix ./programs/systemtap.nix
./programs/sway.nix ./programs/sway.nix
./programs/sway-beta.nix
./programs/thefuck.nix ./programs/thefuck.nix
./programs/tmux.nix ./programs/tmux.nix
./programs/udevil.nix ./programs/udevil.nix
@ -137,6 +138,7 @@
./programs/vim.nix ./programs/vim.nix
./programs/wavemon.nix ./programs/wavemon.nix
./programs/way-cooler.nix ./programs/way-cooler.nix
./programs/waybar.nix
./programs/wireshark.nix ./programs/wireshark.nix
./programs/xfs_quota.nix ./programs/xfs_quota.nix
./programs/xonsh.nix ./programs/xonsh.nix
@ -171,6 +173,7 @@
./security/rtkit.nix ./security/rtkit.nix
./security/wrappers/default.nix ./security/wrappers/default.nix
./security/sudo.nix ./security/sudo.nix
./security/systemd-confinement.nix
./services/admin/oxidized.nix ./services/admin/oxidized.nix
./services/admin/salt/master.nix ./services/admin/salt/master.nix
./services/admin/salt/minion.nix ./services/admin/salt/minion.nix
@ -252,6 +255,8 @@
./services/databases/virtuoso.nix ./services/databases/virtuoso.nix
./services/desktops/accountsservice.nix ./services/desktops/accountsservice.nix
./services/desktops/bamf.nix ./services/desktops/bamf.nix
./services/desktops/deepin/dde-daemon.nix
./services/desktops/deepin/deepin-menu.nix
./services/desktops/dleyna-renderer.nix ./services/desktops/dleyna-renderer.nix
./services/desktops/dleyna-server.nix ./services/desktops/dleyna-server.nix
./services/desktops/pantheon/contractor.nix ./services/desktops/pantheon/contractor.nix
@ -262,8 +267,10 @@
./services/desktops/pipewire.nix ./services/desktops/pipewire.nix
./services/desktops/gnome3/at-spi2-core.nix ./services/desktops/gnome3/at-spi2-core.nix
./services/desktops/gnome3/chrome-gnome-shell.nix ./services/desktops/gnome3/chrome-gnome-shell.nix
./services/desktops/gnome3/evince.nix
./services/desktops/gnome3/evolution-data-server.nix ./services/desktops/gnome3/evolution-data-server.nix
./services/desktops/gnome3/file-roller.nix ./services/desktops/gnome3/file-roller.nix
./services/desktops/gnome3/glib-networking.nix
./services/desktops/gnome3/gnome-disks.nix ./services/desktops/gnome3/gnome-disks.nix
./services/desktops/gnome3/gnome-documents.nix ./services/desktops/gnome3/gnome-documents.nix
./services/desktops/gnome3/gnome-keyring.nix ./services/desktops/gnome3/gnome-keyring.nix
@ -311,6 +318,7 @@
./services/hardware/ratbagd.nix ./services/hardware/ratbagd.nix
./services/hardware/sane.nix ./services/hardware/sane.nix
./services/hardware/sane_extra_backends/brscan4.nix ./services/hardware/sane_extra_backends/brscan4.nix
./services/hardware/sane_extra_backends/dsseries.nix
./services/hardware/tcsd.nix ./services/hardware/tcsd.nix
./services/hardware/tlp.nix ./services/hardware/tlp.nix
./services/hardware/thinkfan.nix ./services/hardware/thinkfan.nix
@ -347,6 +355,7 @@
./services/mail/exim.nix ./services/mail/exim.nix
./services/mail/freepops.nix ./services/mail/freepops.nix
./services/mail/mail.nix ./services/mail/mail.nix
./services/mail/mailcatcher.nix
./services/mail/mailhog.nix ./services/mail/mailhog.nix
./services/mail/mlmmj.nix ./services/mail/mlmmj.nix
./services/mail/offlineimap.nix ./services/mail/offlineimap.nix
@ -482,6 +491,7 @@
./services/monitoring/prometheus/default.nix ./services/monitoring/prometheus/default.nix
./services/monitoring/prometheus/alertmanager.nix ./services/monitoring/prometheus/alertmanager.nix
./services/monitoring/prometheus/exporters.nix ./services/monitoring/prometheus/exporters.nix
./services/monitoring/prometheus/pushgateway.nix
./services/monitoring/riemann.nix ./services/monitoring/riemann.nix
./services/monitoring/riemann-dash.nix ./services/monitoring/riemann-dash.nix
./services/monitoring/riemann-tools.nix ./services/monitoring/riemann-tools.nix
@ -578,6 +588,7 @@
./services/networking/keepalived/default.nix ./services/networking/keepalived/default.nix
./services/networking/keybase.nix ./services/networking/keybase.nix
./services/networking/kippo.nix ./services/networking/kippo.nix
./services/networking/knot.nix
./services/networking/kresd.nix ./services/networking/kresd.nix
./services/networking/lambdabot.nix ./services/networking/lambdabot.nix
./services/networking/libreswan.nix ./services/networking/libreswan.nix
@ -627,6 +638,7 @@
./services/networking/prosody.nix ./services/networking/prosody.nix
./services/networking/quagga.nix ./services/networking/quagga.nix
./services/networking/quassel.nix ./services/networking/quassel.nix
./services/networking/quicktun.nix
./services/networking/racoon.nix ./services/networking/racoon.nix
./services/networking/radicale.nix ./services/networking/radicale.nix
./services/networking/radvd.nix ./services/networking/radvd.nix
@ -664,6 +676,7 @@
./services/networking/tinydns.nix ./services/networking/tinydns.nix
./services/networking/tftpd.nix ./services/networking/tftpd.nix
./services/networking/tox-bootstrapd.nix ./services/networking/tox-bootstrapd.nix
./services/networking/tox-node.nix
./services/networking/toxvpn.nix ./services/networking/toxvpn.nix
./services/networking/tvheadend.nix ./services/networking/tvheadend.nix
./services/networking/unbound.nix ./services/networking/unbound.nix
@ -736,10 +749,12 @@
./services/web-apps/atlassian/crowd.nix ./services/web-apps/atlassian/crowd.nix
./services/web-apps/atlassian/jira.nix ./services/web-apps/atlassian/jira.nix
./services/web-apps/codimd.nix ./services/web-apps/codimd.nix
./services/web-apps/documize.nix
./services/web-apps/frab.nix ./services/web-apps/frab.nix
./services/web-apps/icingaweb2/icingaweb2.nix ./services/web-apps/icingaweb2/icingaweb2.nix
./services/web-apps/icingaweb2/module-monitoring.nix ./services/web-apps/icingaweb2/module-monitoring.nix
./services/web-apps/mattermost.nix ./services/web-apps/mattermost.nix
./services/web-apps/miniflux.nix
./services/web-apps/nextcloud.nix ./services/web-apps/nextcloud.nix
./services/web-apps/nexus.nix ./services/web-apps/nexus.nix
./services/web-apps/pgpkeyserver-lite.nix ./services/web-apps/pgpkeyserver-lite.nix
@ -765,6 +780,7 @@
./services/web-servers/nginx/default.nix ./services/web-servers/nginx/default.nix
./services/web-servers/nginx/gitweb.nix ./services/web-servers/nginx/gitweb.nix
./services/web-servers/phpfpm/default.nix ./services/web-servers/phpfpm/default.nix
./services/web-servers/unit/default.nix
./services/web-servers/shellinabox.nix ./services/web-servers/shellinabox.nix
./services/web-servers/tomcat.nix ./services/web-servers/tomcat.nix
./services/web-servers/traefik.nix ./services/web-servers/traefik.nix
@ -872,9 +888,11 @@
./tasks/trackpoint.nix ./tasks/trackpoint.nix
./tasks/powertop.nix ./tasks/powertop.nix
./testing/service-runner.nix ./testing/service-runner.nix
./virtualisation/anbox.nix
./virtualisation/container-config.nix ./virtualisation/container-config.nix
./virtualisation/containers.nix ./virtualisation/containers.nix
./virtualisation/docker.nix ./virtualisation/docker.nix
./virtualisation/docker-containers.nix
./virtualisation/ecs-agent.nix ./virtualisation/ecs-agent.nix
./virtualisation/libvirtd.nix ./virtualisation/libvirtd.nix
./virtualisation/lxc.nix ./virtualisation/lxc.nix

View File

@ -226,9 +226,7 @@ in
environment.shells = environment.shells =
[ "/run/current-system/sw/bin/bash" [ "/run/current-system/sw/bin/bash"
"/var/run/current-system/sw/bin/bash"
"/run/current-system/sw/bin/sh" "/run/current-system/sw/bin/sh"
"/var/run/current-system/sw/bin/sh"
"${pkgs.bashInteractive}/bin/bash" "${pkgs.bashInteractive}/bin/bash"
"${pkgs.bashInteractive}/bin/sh" "${pkgs.bashInteractive}/bin/sh"
]; ];

View File

@ -4,15 +4,34 @@ with lib;
{ {
###### interface options.programs.browserpass.enable = mkEnableOption "Browserpass native messaging host";
options = {
programs.browserpass.enable = mkEnableOption "the NativeMessaging configuration for Chromium, Chrome, and Vivaldi.";
};
###### implementation
config = mkIf config.programs.browserpass.enable { config = mkIf config.programs.browserpass.enable {
environment.systemPackages = [ pkgs.browserpass ]; environment.etc = let
environment.etc = { appId = "com.github.browserpass.native.json";
source = part: "${pkgs.browserpass}/lib/browserpass/${part}/${appId}";
in {
# chromium
"chromium/native-messaging-hosts/${appId}".source = source "hosts/chromium";
"chromium/policies/managed/${appId}".source = source "policies/chromium";
# chrome
"opt/chrome/native-messaging-hosts/${appId}".source = source "hosts/chromium";
"opt/chrome/policies/managed/${appId}".source = source "policies/chromium";
# vivaldi
"opt/vivaldi/native-messaging-hosts/${appId}".source = source "hosts/chromium";
"opt/vivaldi/policies/managed/${appId}".source = source "policies/chromium";
# brave
"opt/brave/native-messaging-hosts/${appId}".source = source "hosts/chromium";
"opt/brave/policies/managed/${appId}".source = source "policies/chromium";
}
# As with the v2 backwards compatibility in the pkgs.browserpass
# declaration, this part can be removed once the browser extension
# auto-updates to v3 (planned 2019-04-13, see
# https://github.com/browserpass/browserpass-native/issues/31)
// {
"chromium/native-messaging-hosts/com.dannyvankooten.browserpass.json".source = "${pkgs.browserpass}/etc/chrome-host.json"; "chromium/native-messaging-hosts/com.dannyvankooten.browserpass.json".source = "${pkgs.browserpass}/etc/chrome-host.json";
"chromium/policies/managed/com.dannyvankooten.browserpass.json".source = "${pkgs.browserpass}/etc/chrome-policy.json"; "chromium/policies/managed/com.dannyvankooten.browserpass.json".source = "${pkgs.browserpass}/etc/chrome-policy.json";
"opt/chrome/native-messaging-hosts/com.dannyvankooten.browserpass.json".source = "${pkgs.browserpass}/etc/chrome-host.json"; "opt/chrome/native-messaging-hosts/com.dannyvankooten.browserpass.json".source = "${pkgs.browserpass}/etc/chrome-host.json";

View File

@ -232,7 +232,6 @@ in
environment.shells = [ environment.shells = [
"/run/current-system/sw/bin/fish" "/run/current-system/sw/bin/fish"
"/var/run/current-system/sw/bin/fish"
"${pkgs.fish}/bin/fish" "${pkgs.fish}/bin/fish"
]; ];

View File

@ -11,6 +11,15 @@ in
{ {
options.programs.gnupg = { options.programs.gnupg = {
package = mkOption {
type = types.package;
default = pkgs.gnupg;
defaultText = "pkgs.gnupg";
description = ''
The gpg package that should be used.
'';
};
agent.enable = mkOption { agent.enable = mkOption {
type = types.bool; type = types.bool;
default = false; default = false;
@ -75,7 +84,7 @@ in
wantedBy = [ "sockets.target" ]; wantedBy = [ "sockets.target" ];
}; };
systemd.packages = [ pkgs.gnupg ]; systemd.packages = [ cfg.package ];
environment.interactiveShellInit = '' environment.interactiveShellInit = ''
# Bind gpg-agent to this TTY if gpg commands are used. # Bind gpg-agent to this TTY if gpg commands are used.
@ -84,12 +93,12 @@ in
'' + (optionalString cfg.agent.enableSSHSupport '' '' + (optionalString cfg.agent.enableSSHSupport ''
# SSH agent protocol doesn't support changing TTYs, so bind the agent # SSH agent protocol doesn't support changing TTYs, so bind the agent
# to every new TTY. # to every new TTY.
${pkgs.gnupg}/bin/gpg-connect-agent --quiet updatestartuptty /bye > /dev/null ${cfg.package}/bin/gpg-connect-agent --quiet updatestartuptty /bye > /dev/null
''); '');
environment.extraInit = mkIf cfg.agent.enableSSHSupport '' environment.extraInit = mkIf cfg.agent.enableSSHSupport ''
if [ -z "$SSH_AUTH_SOCK" ]; then if [ -z "$SSH_AUTH_SOCK" ]; then
export SSH_AUTH_SOCK=$(${pkgs.gnupg}/bin/gpgconf --list-dirs agent-ssh-socket) export SSH_AUTH_SOCK=$(${cfg.package}/bin/gpgconf --list-dirs agent-ssh-socket)
fi fi
''; '';

View File

@ -1,91 +0,0 @@
{ config, pkgs, lib, ... }:
with lib;
let
cfg = config.programs.sway-beta;
swayPackage = cfg.package;
swayWrapped = pkgs.writeShellScriptBin "sway" ''
set -o errexit
if [ ! "$_SWAY_WRAPPER_ALREADY_EXECUTED" ]; then
export _SWAY_WRAPPER_ALREADY_EXECUTED=1
${cfg.extraSessionCommands}
fi
if [ "$DBUS_SESSION_BUS_ADDRESS" ]; then
export DBUS_SESSION_BUS_ADDRESS
exec ${swayPackage}/bin/sway "$@"
else
exec ${pkgs.dbus}/bin/dbus-run-session ${swayPackage}/bin/sway "$@"
fi
'';
swayJoined = pkgs.symlinkJoin {
name = "sway-joined";
paths = [ swayWrapped swayPackage ];
};
in {
options.programs.sway-beta = {
enable = mkEnableOption ''
Sway, the i3-compatible tiling Wayland compositor. This module will be removed after the final release of Sway 1.0
'';
package = mkOption {
type = types.package;
default = pkgs.sway-beta;
defaultText = "pkgs.sway-beta";
description = ''
The package to be used for `sway`.
'';
};
extraSessionCommands = mkOption {
type = types.lines;
default = "";
example = ''
export SDL_VIDEODRIVER=wayland
# needs qt5.qtwayland in systemPackages
export QT_QPA_PLATFORM=wayland
export QT_WAYLAND_DISABLE_WINDOWDECORATION="1"
# Fix for some Java AWT applications (e.g. Android Studio),
# use this if they aren't displayed properly:
export _JAVA_AWT_WM_NONREPARENTING=1
'';
description = ''
Shell commands executed just before Sway is started.
'';
};
extraPackages = mkOption {
type = with types; listOf package;
default = with pkgs; [
swaylock swayidle
xwayland rxvt_unicode dmenu
];
defaultText = literalExample ''
with pkgs; [ swaylock swayidle xwayland rxvt_unicode dmenu ];
'';
example = literalExample ''
with pkgs; [
xwayland
i3status i3status-rust
termite rofi light
]
'';
description = ''
Extra packages to be installed system wide.
'';
};
};
config = mkIf cfg.enable {
environment.systemPackages = [ swayJoined ] ++ cfg.extraPackages;
security.pam.services.swaylock = {};
hardware.opengl.enable = mkDefault true;
fonts.enableDefaultFonts = mkDefault true;
programs.dconf.enable = mkDefault true;
};
meta.maintainers = with lib.maintainers; [ gnidorah primeos colemickens ];
}

View File

@ -16,9 +16,9 @@ let
if [ "$DBUS_SESSION_BUS_ADDRESS" ]; then if [ "$DBUS_SESSION_BUS_ADDRESS" ]; then
export DBUS_SESSION_BUS_ADDRESS export DBUS_SESSION_BUS_ADDRESS
exec sway-setcap "$@" exec ${swayPackage}/bin/sway "$@"
else else
exec ${pkgs.dbus}/bin/dbus-run-session sway-setcap "$@" exec ${pkgs.dbus}/bin/dbus-run-session ${swayPackage}/bin/sway "$@"
fi fi
''; '';
swayJoined = pkgs.symlinkJoin { swayJoined = pkgs.symlinkJoin {
@ -28,22 +28,24 @@ let
in { in {
options.programs.sway = { options.programs.sway = {
enable = mkEnableOption '' enable = mkEnableOption ''
the tiling Wayland compositor Sway. After adding yourself to the "sway" Sway, the i3-compatible tiling Wayland compositor. You can manually launch
group you can manually launch Sway by executing "sway" from a terminal. Sway by executing "exec sway" on a TTY. Copy /etc/sway/config to
If you call "sway" with any parameters the extraSessionCommands won't be ~/.config/sway/config to modify the default configuration. See
executed and Sway won't be launched with dbus-launch''; https://github.com/swaywm/sway/wiki and "man 5 sway" for more information.
Please have a look at the "extraSessionCommands" example for running
programs natively under Wayland'';
extraSessionCommands = mkOption { extraSessionCommands = mkOption {
type = types.lines; type = types.lines;
default = ""; default = "";
example = '' example = ''
# Define a keymap (US QWERTY is the default) export SDL_VIDEODRIVER=wayland
export XKB_DEFAULT_LAYOUT=de,us # needs qt5.qtwayland in systemPackages
export XKB_DEFAULT_VARIANT=nodeadkeys export QT_QPA_PLATFORM=wayland
export XKB_DEFAULT_OPTIONS=grp:alt_shift_toggle,caps:escape export QT_WAYLAND_DISABLE_WINDOWDECORATION="1"
# Change the Keyboard repeat delay and rate # Fix for some Java AWT applications (e.g. Android Studio),
export WLC_REPEAT_DELAY=660 # use this if they aren't displayed properly:
export WLC_REPEAT_RATE=25 export _JAVA_AWT_WM_NONREPARENTING=1
''; '';
description = '' description = ''
Shell commands executed just before Sway is started. Shell commands executed just before Sway is started.
@ -53,14 +55,17 @@ in {
extraPackages = mkOption { extraPackages = mkOption {
type = with types; listOf package; type = with types; listOf package;
default = with pkgs; [ default = with pkgs; [
i3status xwayland rxvt_unicode dmenu swaylock swayidle
xwayland rxvt_unicode dmenu
]; ];
defaultText = literalExample '' defaultText = literalExample ''
with pkgs; [ i3status xwayland rxvt_unicode dmenu ]; with pkgs; [ swaylock swayidle xwayland rxvt_unicode dmenu ];
''; '';
example = literalExample '' example = literalExample ''
with pkgs; [ with pkgs; [
i3lock light termite xwayland
i3status i3status-rust
termite rofi light
] ]
''; '';
description = '' description = ''
@ -70,23 +75,19 @@ in {
}; };
config = mkIf cfg.enable { config = mkIf cfg.enable {
environment.systemPackages = [ swayJoined ] ++ cfg.extraPackages; environment = {
security.wrappers.sway = { systemPackages = [ swayJoined ] ++ cfg.extraPackages;
program = "sway-setcap"; etc = {
source = "${swayPackage}/bin/sway"; "sway/config".source = "${swayPackage}/etc/sway/config";
capabilities = "cap_sys_ptrace,cap_sys_tty_config=eip"; #"sway/security.d".source = "${swayPackage}/etc/sway/security.d/";
owner = "root"; #"sway/config.d".source = "${swayPackage}/etc/sway/config.d/";
group = "sway"; };
permissions = "u+rx,g+rx";
}; };
users.groups.sway = {};
security.pam.services.swaylock = {}; security.pam.services.swaylock = {};
hardware.opengl.enable = mkDefault true; hardware.opengl.enable = mkDefault true;
fonts.enableDefaultFonts = mkDefault true; fonts.enableDefaultFonts = mkDefault true;
programs.dconf.enable = mkDefault true; programs.dconf.enable = mkDefault true;
}; };
meta.maintainers = with lib.maintainers; [ gnidorah primeos ]; meta.maintainers = with lib.maintainers; [ gnidorah primeos colemickens ];
} }

View File

@ -0,0 +1,20 @@
{ lib, pkgs, config, ... }:
with lib;
{
options.programs.waybar = {
enable = mkEnableOption "waybar";
};
config = mkIf config.programs.waybar.enable {
systemd.user.services.waybar = {
description = "Waybar as systemd service";
wantedBy = [ "graphical-session.target" ];
partOf = [ "graphical-session.target" ];
script = "${pkgs.waybar}/bin/waybar";
};
};
meta.maintainers = [ maintainers.FlorianFranzen ];
}

View File

@ -50,7 +50,6 @@ in
environment.shells = environment.shells =
[ "/run/current-system/sw/bin/xonsh" [ "/run/current-system/sw/bin/xonsh"
"/var/run/current-system/sw/bin/xonsh"
"${pkgs.xonsh}/bin/xonsh" "${pkgs.xonsh}/bin/xonsh"
]; ];

View File

@ -87,9 +87,9 @@
<para> <para>
<emphasis>Please keep in mind that this is not compatible with <emphasis>Please keep in mind that this is not compatible with
<literal>programs.zsh.ohMyZsh.custom</literal> as it requires an immutable store <literal>programs.zsh.ohMyZsh.custom</literal> as it requires an immutable
path while <literal>custom</literal> shall remain mutable! An evaluation store path while <literal>custom</literal> shall remain mutable! An
failure will be thrown if both <literal>custom</literal> and evaluation failure will be thrown if both <literal>custom</literal> and
<literal>customPkgs</literal> are set.</emphasis> <literal>customPkgs</literal> are set.</emphasis>
</para> </para>
</section> </section>

View File

@ -79,6 +79,33 @@ in
type = types.lines; type = types.lines;
}; };
histSize = mkOption {
default = 2000;
description = ''
Change history size.
'';
type = types.int;
};
histFile = mkOption {
default = "$HOME/.zsh_history";
description = ''
Change history file.
'';
type = types.str;
};
setOptions = mkOption {
type = types.listOf types.str;
default = [
"HIST_IGNORE_DUPS" "SHARE_HISTORY" "HIST_FCNTL_LOCK"
];
example = [ "EXTENDED_HISTORY" "RM_STAR_WAIT" ];
description = ''
Configure zsh options.
'';
};
enableCompletion = mkOption { enableCompletion = mkOption {
default = true; default = true;
description = '' description = ''
@ -162,12 +189,12 @@ in
. /etc/zinputrc . /etc/zinputrc
# history defaults # Don't export these, otherwise other shells (bash) will try to use same histfile
SAVEHIST=2000 SAVEHIST=${toString cfg.histSize}
HISTSIZE=2000 HISTSIZE=${toString cfg.histSize}
HISTFILE=$HOME/.zsh_history HISTFILE=${cfg.histFile}
setopt HIST_IGNORE_DUPS SHARE_HISTORY HIST_FCNTL_LOCK ${optionalString (cfg.setOptions != []) "setopt ${concatStringsSep " " cfg.setOptions}"}
HELPDIR="${pkgs.zsh}/share/zsh/$ZSH_VERSION/help" HELPDIR="${pkgs.zsh}/share/zsh/$ZSH_VERSION/help"
@ -203,7 +230,6 @@ in
environment.shells = environment.shells =
[ "/run/current-system/sw/bin/zsh" [ "/run/current-system/sw/bin/zsh"
"/var/run/current-system/sw/bin/zsh"
"${pkgs.zsh}/bin/zsh" "${pkgs.zsh}/bin/zsh"
]; ];

View File

@ -4,26 +4,12 @@ with lib;
{ {
imports = [ imports = [
# !!! These were renamed the other way, but got reverted later.
# !!! Drop these before 18.09 is released.
(mkRenamedOptionModule [ "system" "nixos" "stateVersion" ] [ "system" "stateVersion" ])
(mkRenamedOptionModule [ "system" "nixos" "defaultChannel" ] [ "system" "defaultChannel" ])
(mkRenamedOptionModule [ "environment" "x11Packages" ] [ "environment" "systemPackages" ])
(mkRenamedOptionModule [ "environment" "enableBashCompletion" ] [ "programs" "bash" "enableCompletion" ])
(mkRenamedOptionModule [ "environment" "nix" ] [ "nix" "package" ])
(mkRenamedOptionModule [ "fonts" "enableFontConfig" ] [ "fonts" "fontconfig" "enable" ])
(mkRenamedOptionModule [ "fonts" "extraFonts" ] [ "fonts" "fonts" ])
(mkRenamedOptionModule [ "networking" "enableWLAN" ] [ "networking" "wireless" "enable" ])
(mkRenamedOptionModule [ "networking" "enableRT73Firmware" ] [ "hardware" "enableRedistributableFirmware" ]) (mkRenamedOptionModule [ "networking" "enableRT73Firmware" ] [ "hardware" "enableRedistributableFirmware" ])
(mkRenamedOptionModule [ "networking" "enableIntel3945ABGFirmware" ] [ "hardware" "enableRedistributableFirmware" ]) (mkRenamedOptionModule [ "networking" "enableIntel3945ABGFirmware" ] [ "hardware" "enableRedistributableFirmware" ])
(mkRenamedOptionModule [ "networking" "enableIntel2100BGFirmware" ] [ "hardware" "enableRedistributableFirmware" ]) (mkRenamedOptionModule [ "networking" "enableIntel2100BGFirmware" ] [ "hardware" "enableRedistributableFirmware" ])
(mkRenamedOptionModule [ "networking" "enableRalinkFirmware" ] [ "hardware" "enableRedistributableFirmware" ]) (mkRenamedOptionModule [ "networking" "enableRalinkFirmware" ] [ "hardware" "enableRedistributableFirmware" ])
(mkRenamedOptionModule [ "networking" "enableRTL8192cFirmware" ] [ "hardware" "enableRedistributableFirmware" ]) (mkRenamedOptionModule [ "networking" "enableRTL8192cFirmware" ] [ "hardware" "enableRedistributableFirmware" ])
(mkRenamedOptionModule [ "networking" "networkmanager" "useDnsmasq" ] [ "networking" "networkmanager" "dns" ]) (mkRenamedOptionModule [ "networking" "networkmanager" "useDnsmasq" ] [ "networking" "networkmanager" "dns" ])
(mkRenamedOptionModule [ "services" "cadvisor" "host" ] [ "services" "cadvisor" "listenAddress" ])
(mkChangedOptionModule [ "services" "printing" "gutenprint" ] [ "services" "printing" "drivers" ] (mkChangedOptionModule [ "services" "printing" "gutenprint" ] [ "services" "printing" "drivers" ]
(config: (config:
let enabled = getAttrFromPath [ "services" "printing" "gutenprint" ] config; let enabled = getAttrFromPath [ "services" "printing" "gutenprint" ] config;
@ -33,11 +19,7 @@ with lib;
let value = getAttrFromPath [ "services" "ddclient" "domain" ] config; let value = getAttrFromPath [ "services" "ddclient" "domain" ] config;
in if value != "" then [ value ] else [])) in if value != "" then [ value ] else []))
(mkRemovedOptionModule [ "services" "ddclient" "homeDir" ] "") (mkRemovedOptionModule [ "services" "ddclient" "homeDir" ] "")
(mkRenamedOptionModule [ "services" "elasticsearch" "host" ] [ "services" "elasticsearch" "listenAddress" ])
(mkRenamedOptionModule [ "services" "graphite" "api" "host" ] [ "services" "graphite" "api" "listenAddress" ])
(mkRenamedOptionModule [ "services" "graphite" "web" "host" ] [ "services" "graphite" "web" "listenAddress" ])
(mkRenamedOptionModule [ "services" "i2pd" "extIp" ] [ "services" "i2pd" "address" ]) (mkRenamedOptionModule [ "services" "i2pd" "extIp" ] [ "services" "i2pd" "address" ])
(mkRenamedOptionModule [ "services" "kibana" "host" ] [ "services" "kibana" "listenAddress" ])
(mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "admissionControl" ] [ "services" "kubernetes" "apiserver" "enableAdmissionPlugins" ]) (mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "admissionControl" ] [ "services" "kubernetes" "apiserver" "enableAdmissionPlugins" ])
(mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "address" ] ["services" "kubernetes" "apiserver" "bindAddress"]) (mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "address" ] ["services" "kubernetes" "apiserver" "bindAddress"])
(mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "port" ] ["services" "kubernetes" "apiserver" "insecurePort"]) (mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "port" ] ["services" "kubernetes" "apiserver" "insecurePort"])
@ -54,7 +36,6 @@ with lib;
(mkRenamedOptionModule [ "services" "kubernetes" "proxy" "address" ] ["services" "kubernetes" "proxy" "bindAddress"]) (mkRenamedOptionModule [ "services" "kubernetes" "proxy" "address" ] ["services" "kubernetes" "proxy" "bindAddress"])
(mkRemovedOptionModule [ "services" "kubernetes" "verbose" ] "") (mkRemovedOptionModule [ "services" "kubernetes" "verbose" ] "")
(mkRenamedOptionModule [ "services" "logstash" "address" ] [ "services" "logstash" "listenAddress" ]) (mkRenamedOptionModule [ "services" "logstash" "address" ] [ "services" "logstash" "listenAddress" ])
(mkRenamedOptionModule [ "services" "mpd" "network" "host" ] [ "services" "mpd" "network" "listenAddress" ])
(mkRenamedOptionModule [ "services" "neo4j" "host" ] [ "services" "neo4j" "defaultListenAddress" ]) (mkRenamedOptionModule [ "services" "neo4j" "host" ] [ "services" "neo4j" "defaultListenAddress" ])
(mkRenamedOptionModule [ "services" "neo4j" "listenAddress" ] [ "services" "neo4j" "defaultListenAddress" ]) (mkRenamedOptionModule [ "services" "neo4j" "listenAddress" ] [ "services" "neo4j" "defaultListenAddress" ])
(mkRenamedOptionModule [ "services" "neo4j" "enableBolt" ] [ "services" "neo4j" "bolt" "enable" ]) (mkRenamedOptionModule [ "services" "neo4j" "enableBolt" ] [ "services" "neo4j" "bolt" "enable" ])
@ -64,10 +45,8 @@ with lib;
(mkRemovedOptionModule [ "services" "neo4j" "port" ] "Use services.neo4j.http.listenAddress instead.") (mkRemovedOptionModule [ "services" "neo4j" "port" ] "Use services.neo4j.http.listenAddress instead.")
(mkRemovedOptionModule [ "services" "neo4j" "boltPort" ] "Use services.neo4j.bolt.listenAddress instead.") (mkRemovedOptionModule [ "services" "neo4j" "boltPort" ] "Use services.neo4j.bolt.listenAddress instead.")
(mkRemovedOptionModule [ "services" "neo4j" "httpsPort" ] "Use services.neo4j.https.listenAddress instead.") (mkRemovedOptionModule [ "services" "neo4j" "httpsPort" ] "Use services.neo4j.https.listenAddress instead.")
(mkRenamedOptionModule [ "services" "shout" "host" ] [ "services" "shout" "listenAddress" ]) (mkRemovedOptionModule [ "services" "prometheus" "alertmanager" "user" ] "The alertmanager service is now using systemd's DynamicUser mechanism which obviates a user setting.")
(mkRenamedOptionModule [ "services" "sslh" "host" ] [ "services" "sslh" "listenAddress" ]) (mkRemovedOptionModule [ "services" "prometheus" "alertmanager" "group" ] "The alertmanager service is now using systemd's DynamicUser mechanism which obviates a group setting.")
(mkRenamedOptionModule [ "services" "statsd" "host" ] [ "services" "statsd" "listenAddress" ])
(mkRenamedOptionModule [ "services" "subsonic" "host" ] [ "services" "subsonic" "listenAddress" ])
(mkRenamedOptionModule [ "services" "tor" "relay" "portSpec" ] [ "services" "tor" "relay" "port" ]) (mkRenamedOptionModule [ "services" "tor" "relay" "portSpec" ] [ "services" "tor" "relay" "port" ])
(mkRenamedOptionModule [ "services" "vmwareGuest" ] [ "virtualisation" "vmware" "guest" ]) (mkRenamedOptionModule [ "services" "vmwareGuest" ] [ "virtualisation" "vmware" "guest" ])
(mkRenamedOptionModule [ "jobs" ] [ "systemd" "services" ]) (mkRenamedOptionModule [ "jobs" ] [ "systemd" "services" ])
@ -91,82 +70,31 @@ with lib;
(mkRenamedOptionModule [ "services" "xserver" "displayManager" "logToJournal" ] [ "services" "xserver" "displayManager" "job" "logToJournal" ]) (mkRenamedOptionModule [ "services" "xserver" "displayManager" "logToJournal" ] [ "services" "xserver" "displayManager" "job" "logToJournal" ])
# Old Grub-related options. # Old Grub-related options.
(mkRenamedOptionModule [ "boot" "initrd" "extraKernelModules" ] [ "boot" "initrd" "kernelModules" ])
(mkRenamedOptionModule [ "boot" "extraKernelParams" ] [ "boot" "kernelParams" ])
(mkRenamedOptionModule [ "boot" "loader" "grub" "timeout" ] [ "boot" "loader" "timeout" ]) (mkRenamedOptionModule [ "boot" "loader" "grub" "timeout" ] [ "boot" "loader" "timeout" ])
(mkRenamedOptionModule [ "boot" "loader" "gummiboot" "timeout" ] [ "boot" "loader" "timeout" ]) (mkRenamedOptionModule [ "boot" "loader" "gummiboot" "timeout" ] [ "boot" "loader" "timeout" ])
# smartd
(mkRenamedOptionModule [ "services" "smartd" "deviceOpts" ] [ "services" "smartd" "defaults" "monitored" ])
# OpenSSH # OpenSSH
(mkRenamedOptionModule [ "services" "sshd" "ports" ] [ "services" "openssh" "ports" ])
(mkAliasOptionModule [ "services" "sshd" "enable" ] [ "services" "openssh" "enable" ]) (mkAliasOptionModule [ "services" "sshd" "enable" ] [ "services" "openssh" "enable" ])
(mkRenamedOptionModule [ "services" "sshd" "allowSFTP" ] [ "services" "openssh" "allowSFTP" ])
(mkRenamedOptionModule [ "services" "sshd" "forwardX11" ] [ "services" "openssh" "forwardX11" ])
(mkRenamedOptionModule [ "services" "sshd" "gatewayPorts" ] [ "services" "openssh" "gatewayPorts" ])
(mkRenamedOptionModule [ "services" "sshd" "permitRootLogin" ] [ "services" "openssh" "permitRootLogin" ])
(mkRenamedOptionModule [ "services" "xserver" "startSSHAgent" ] [ "services" "xserver" "startOpenSSHAgent" ])
(mkRenamedOptionModule [ "services" "xserver" "startOpenSSHAgent" ] [ "programs" "ssh" "startAgent" ])
(mkAliasOptionModule [ "services" "openssh" "knownHosts" ] [ "programs" "ssh" "knownHosts" ]) (mkAliasOptionModule [ "services" "openssh" "knownHosts" ] [ "programs" "ssh" "knownHosts" ])
# VirtualBox
(mkRenamedOptionModule [ "services" "virtualbox" "enable" ] [ "virtualisation" "virtualbox" "guest" "enable" ])
(mkRenamedOptionModule [ "services" "virtualboxGuest" "enable" ] [ "virtualisation" "virtualbox" "guest" "enable" ])
(mkRenamedOptionModule [ "programs" "virtualbox" "enable" ] [ "virtualisation" "virtualbox" "host" "enable" ])
(mkRenamedOptionModule [ "programs" "virtualbox" "addNetworkInterface" ] [ "virtualisation" "virtualbox" "host" "addNetworkInterface" ])
(mkRenamedOptionModule [ "programs" "virtualbox" "enableHardening" ] [ "virtualisation" "virtualbox" "host" "enableHardening" ])
(mkRenamedOptionModule [ "services" "virtualboxHost" "enable" ] [ "virtualisation" "virtualbox" "host" "enable" ])
(mkRenamedOptionModule [ "services" "virtualboxHost" "addNetworkInterface" ] [ "virtualisation" "virtualbox" "host" "addNetworkInterface" ])
(mkRenamedOptionModule [ "services" "virtualboxHost" "enableHardening" ] [ "virtualisation" "virtualbox" "host" "enableHardening" ])
# libvirtd # libvirtd
(mkRemovedOptionModule [ "virtualisation" "libvirtd" "enableKVM" ] (mkRemovedOptionModule [ "virtualisation" "libvirtd" "enableKVM" ]
"Set the option `virtualisation.libvirtd.qemuPackage' instead.") "Set the option `virtualisation.libvirtd.qemuPackage' instead.")
# Tarsnap
(mkRenamedOptionModule [ "services" "tarsnap" "config" ] [ "services" "tarsnap" "archives" ])
# ibus # ibus
(mkRenamedOptionModule [ "programs" "ibus" "plugins" ] [ "i18n" "inputMethod" "ibus" "engines" ]) (mkRenamedOptionModule [ "programs" "ibus" "plugins" ] [ "i18n" "inputMethod" "ibus" "engines" ])
# proxy
(mkRenamedOptionModule [ "nix" "proxy" ] [ "networking" "proxy" "default" ])
# sandboxing # sandboxing
(mkRenamedOptionModule [ "nix" "useChroot" ] [ "nix" "useSandbox" ]) (mkRenamedOptionModule [ "nix" "useChroot" ] [ "nix" "useSandbox" ])
(mkRenamedOptionModule [ "nix" "chrootDirs" ] [ "nix" "sandboxPaths" ]) (mkRenamedOptionModule [ "nix" "chrootDirs" ] [ "nix" "sandboxPaths" ])
# KDE
(mkRenamedOptionModule [ "kde" "extraPackages" ] [ "environment" "systemPackages" ])
(mkRenamedOptionModule [ "environment" "kdePackages" ] [ "environment" "systemPackages" ])
# Multiple efi bootloaders now
(mkRenamedOptionModule [ "boot" "loader" "efi" "efibootmgr" "enable" ] [ "boot" "loader" "efi" "canTouchEfiVariables" ])
# NixOS environment changes
# !!! this hardcodes bash, could we detect from config which shell is actually used?
(mkRenamedOptionModule [ "environment" "promptInit" ] [ "programs" "bash" "promptInit" ])
(mkRenamedOptionModule [ "services" "xserver" "driSupport" ] [ "hardware" "opengl" "driSupport" ])
(mkRenamedOptionModule [ "services" "xserver" "driSupport32Bit" ] [ "hardware" "opengl" "driSupport32Bit" ])
(mkRenamedOptionModule [ "services" "xserver" "s3tcSupport" ] [ "hardware" "opengl" "s3tcSupport" ])
(mkRenamedOptionModule [ "hardware" "opengl" "videoDrivers" ] [ "services" "xserver" "videoDrivers" ])
(mkRenamedOptionModule [ "services" "xserver" "vaapiDrivers" ] [ "hardware" "opengl" "extraPackages" ]) (mkRenamedOptionModule [ "services" "xserver" "vaapiDrivers" ] [ "hardware" "opengl" "extraPackages" ])
(mkRenamedOptionModule [ "services" "mysql55" ] [ "services" "mysql" ])
(mkAliasOptionModule [ "environment" "checkConfigurationOptions" ] [ "_module" "check" ]) (mkAliasOptionModule [ "environment" "checkConfigurationOptions" ] [ "_module" "check" ])
# opendkim # opendkim
(mkRenamedOptionModule [ "services" "opendkim" "keyFile" ] [ "services" "opendkim" "keyPath" ]) (mkRenamedOptionModule [ "services" "opendkim" "keyFile" ] [ "services" "opendkim" "keyPath" ])
# XBMC
(mkRenamedOptionModule [ "services" "xserver" "windowManager" "xbmc" ] [ "services" "xserver" "desktopManager" "kodi" ])
(mkRenamedOptionModule [ "services" "xserver" "desktopManager" "xbmc" ] [ "services" "xserver" "desktopManager" "kodi" ])
(mkRenamedOptionModule [ "services" "hostapd" "extraCfg" ] [ "services" "hostapd" "extraConfig" ])
# Enlightenment # Enlightenment
(mkRenamedOptionModule [ "services" "xserver" "desktopManager" "e19" "enable" ] [ "services" "xserver" "desktopManager" "enlightenment" "enable" ]) (mkRenamedOptionModule [ "services" "xserver" "desktopManager" "e19" "enable" ] [ "services" "xserver" "desktopManager" "enlightenment" "enable" ])
@ -208,7 +136,7 @@ with lib;
inetPort = [ "services" "postgrey" "inetPort" ]; inetPort = [ "services" "postgrey" "inetPort" ];
in in
if value inetAddr == null if value inetAddr == null
then { path = "/var/run/postgrey.sock"; } then { path = "/run/postgrey.sock"; }
else { addr = value inetAddr; port = value inetPort; } else { addr = value inetAddr; port = value inetPort; }
)) ))

View File

@ -76,7 +76,7 @@ in
}; };
failmode = mkOption { failmode = mkOption {
type = types.enum [ "safe" "enum" ]; type = types.enum [ "safe" "secure" ];
default = "safe"; default = "safe";
description = '' description = ''
On service or configuration errors that prevent Duo On service or configuration errors that prevent Duo

View File

@ -48,6 +48,16 @@ let
''; '';
}; };
yubicoAuth = mkOption {
default = config.security.pam.yubico.enable;
type = types.bool;
description = ''
If set, users listed in
<filename>~/.yubico/authorized_yubikeys</filename>
are able to log in with the asociated Yubikey tokens.
'';
};
googleAuthenticator = { googleAuthenticator = {
enable = mkOption { enable = mkOption {
default = false; default = false;
@ -340,6 +350,8 @@ let
"auth sufficient ${pkgs.pam_usb}/lib/security/pam_usb.so"} "auth sufficient ${pkgs.pam_usb}/lib/security/pam_usb.so"}
${let oath = config.security.pam.oath; in optionalString cfg.oathAuth ${let oath = config.security.pam.oath; in optionalString cfg.oathAuth
"auth requisite ${pkgs.oathToolkit}/lib/security/pam_oath.so window=${toString oath.window} usersfile=${toString oath.usersFile} digits=${toString oath.digits}"} "auth requisite ${pkgs.oathToolkit}/lib/security/pam_oath.so window=${toString oath.window} usersfile=${toString oath.usersFile} digits=${toString oath.digits}"}
${let yubi = config.security.pam.yubico; in optionalString cfg.yubicoAuth
"auth ${yubi.control} ${pkgs.yubico-pam}/lib/security/pam_yubico.so id=${toString yubi.id} ${optionalString yubi.debug "debug"}"}
'' + '' +
# Modules in this block require having the password set in PAM_AUTHTOK. # Modules in this block require having the password set in PAM_AUTHTOK.
# pam_unix is marked as 'sufficient' on NixOS which means nothing will run # pam_unix is marked as 'sufficient' on NixOS which means nothing will run
@ -398,6 +410,8 @@ let
"password sufficient ${pam_krb5}/lib/security/pam_krb5.so use_first_pass"} "password sufficient ${pam_krb5}/lib/security/pam_krb5.so use_first_pass"}
${optionalString config.services.samba.syncPasswordsByPam ${optionalString config.services.samba.syncPasswordsByPam
"password optional ${pkgs.samba}/lib/security/pam_smbpass.so nullok use_authtok try_first_pass"} "password optional ${pkgs.samba}/lib/security/pam_smbpass.so nullok use_authtok try_first_pass"}
${optionalString cfg.enableGnomeKeyring
"password optional ${pkgs.gnome3.gnome-keyring}/lib/security/pam_gnome_keyring.so use_authtok"}
# Session management. # Session management.
${optionalString cfg.setEnvironment '' ${optionalString cfg.setEnvironment ''
@ -636,6 +650,54 @@ in
}; };
}; };
security.pam.yubico = {
enable = mkOption {
default = false;
type = types.bool;
description = ''
Enables Yubico PAM (<literal>yubico-pam</literal>) module.
If set, users listed in
<filename>~/.yubico/authorized_yubikeys</filename>
are able to log in with the associated Yubikey tokens.
The file must have only one line:
<literal>username:yubikey_token_id1:yubikey_token_id2</literal>
More information can be found <link
xlink:href="https://developers.yubico.com/yubico-pam/">here</link>.
'';
};
control = mkOption {
default = "sufficient";
type = types.enum [ "required" "requisite" "sufficient" "optional" ];
description = ''
This option sets pam "control".
If you want to have multi factor authentication, use "required".
If you want to use Yubikey instead of regular password, use "sufficient".
Read
<citerefentry>
<refentrytitle>pam.conf</refentrytitle>
<manvolnum>5</manvolnum>
</citerefentry>
for better understanding of this option.
'';
};
id = mkOption {
example = "42";
type = types.string;
description = "client id";
};
debug = mkOption {
default = false;
type = types.bool;
description = ''
Debug output to stderr.
'';
};
};
security.pam.enableEcryptfs = mkOption { security.pam.enableEcryptfs = mkOption {
default = false; default = false;
description = '' description = ''

View File

@ -0,0 +1,199 @@
{ config, pkgs, lib, ... }:
let
toplevelConfig = config;
inherit (lib) types;
inherit (import ../system/boot/systemd-lib.nix {
inherit config pkgs lib;
}) mkPathSafeName;
in {
options.systemd.services = lib.mkOption {
type = types.attrsOf (types.submodule ({ name, config, ... }: {
options.confinement.enable = lib.mkOption {
type = types.bool;
default = false;
description = ''
If set, all the required runtime store paths for this service are
bind-mounted into a <literal>tmpfs</literal>-based <citerefentry>
<refentrytitle>chroot</refentrytitle>
<manvolnum>2</manvolnum>
</citerefentry>.
'';
};
options.confinement.fullUnit = lib.mkOption {
type = types.bool;
default = false;
description = ''
Whether to include the full closure of the systemd unit file into the
chroot, instead of just the dependencies for the executables.
<warning><para>While it may be tempting to just enable this option to
make things work quickly, please be aware that this might add paths
to the closure of the chroot that you didn't anticipate. It's better
to use <option>confinement.packages</option> to <emphasis
role="strong">explicitly</emphasis> add additional store paths to the
chroot.</para></warning>
'';
};
options.confinement.packages = lib.mkOption {
type = types.listOf (types.either types.str types.package);
default = [];
description = let
mkScOption = optName: "<option>serviceConfig.${optName}</option>";
in ''
Additional packages or strings with context to add to the closure of
the chroot. By default, this includes all the packages from the
${lib.concatMapStringsSep ", " mkScOption [
"ExecReload" "ExecStartPost" "ExecStartPre" "ExecStop"
"ExecStopPost"
]} and ${mkScOption "ExecStart"} options. If you want to have all the
dependencies of this systemd unit, you can use
<option>confinement.fullUnit</option>.
<note><para>The store paths listed in <option>path</option> are
<emphasis role="strong">not</emphasis> included in the closure as
well as paths from other options except those listed
above.</para></note>
'';
};
options.confinement.binSh = lib.mkOption {
type = types.nullOr types.path;
default = toplevelConfig.environment.binsh;
defaultText = "config.environment.binsh";
example = lib.literalExample "\${pkgs.dash}/bin/dash";
description = ''
The program to make available as <filename>/bin/sh</filename> inside
the chroot. If this is set to <literal>null</literal>, no
<filename>/bin/sh</filename> is provided at all.
This is useful for some applications, which for example use the
<citerefentry>
<refentrytitle>system</refentrytitle>
<manvolnum>3</manvolnum>
</citerefentry> library function to execute commands.
'';
};
options.confinement.mode = lib.mkOption {
type = types.enum [ "full-apivfs" "chroot-only" ];
default = "full-apivfs";
description = ''
The value <literal>full-apivfs</literal> (the default) sets up
private <filename class="directory">/dev</filename>, <filename
class="directory">/proc</filename>, <filename
class="directory">/sys</filename> and <filename
class="directory">/tmp</filename> file systems in a separate user
name space.
If this is set to <literal>chroot-only</literal>, only the file
system name space is set up along with the call to <citerefentry>
<refentrytitle>chroot</refentrytitle>
<manvolnum>2</manvolnum>
</citerefentry>.
<note><para>This doesn't cover network namespaces and is solely for
file system level isolation.</para></note>
'';
};
config = let
rootName = "${mkPathSafeName name}-chroot";
inherit (config.confinement) binSh fullUnit;
wantsAPIVFS = lib.mkDefault (config.confinement.mode == "full-apivfs");
in lib.mkIf config.confinement.enable {
serviceConfig = {
RootDirectory = pkgs.runCommand rootName {} "mkdir \"$out\"";
TemporaryFileSystem = "/";
PrivateMounts = lib.mkDefault true;
# https://github.com/NixOS/nixpkgs/issues/14645 is a future attempt
# to change some of these to default to true.
#
# If we run in chroot-only mode, having something like PrivateDevices
# set to true by default will mount /dev within the chroot, whereas
# with "chroot-only" it's expected that there are no /dev, /proc and
# /sys file systems available.
#
# However, if this suddenly becomes true, the attack surface will
# increase, so let's explicitly set these options to true/false
# depending on the mode.
MountAPIVFS = wantsAPIVFS;
PrivateDevices = wantsAPIVFS;
PrivateTmp = wantsAPIVFS;
PrivateUsers = wantsAPIVFS;
ProtectControlGroups = wantsAPIVFS;
ProtectKernelModules = wantsAPIVFS;
ProtectKernelTunables = wantsAPIVFS;
};
confinement.packages = let
execOpts = [
"ExecReload" "ExecStart" "ExecStartPost" "ExecStartPre" "ExecStop"
"ExecStopPost"
];
execPkgs = lib.concatMap (opt: let
isSet = config.serviceConfig ? ${opt};
in lib.optional isSet config.serviceConfig.${opt}) execOpts;
unitAttrs = toplevelConfig.systemd.units."${name}.service";
allPkgs = lib.singleton (builtins.toJSON unitAttrs);
unitPkgs = if fullUnit then allPkgs else execPkgs;
in unitPkgs ++ lib.optional (binSh != null) binSh;
};
}));
};
config.assertions = lib.concatLists (lib.mapAttrsToList (name: cfg: let
whatOpt = optName: "The 'serviceConfig' option '${optName}' for"
+ " service '${name}' is enabled in conjunction with"
+ " 'confinement.enable'";
in lib.optionals cfg.confinement.enable [
{ assertion = !cfg.serviceConfig.RootDirectoryStartOnly or false;
message = "${whatOpt "RootDirectoryStartOnly"}, but right now systemd"
+ " doesn't support restricting bind-mounts to 'ExecStart'."
+ " Please either define a separate service or find a way to run"
+ " commands other than ExecStart within the chroot.";
}
{ assertion = !cfg.serviceConfig.DynamicUser or false;
message = "${whatOpt "DynamicUser"}. Please create a dedicated user via"
+ " the 'users.users' option instead as this combination is"
+ " currently not supported.";
}
]) config.systemd.services);
config.systemd.packages = lib.concatLists (lib.mapAttrsToList (name: cfg: let
rootPaths = let
contents = lib.concatStringsSep "\n" cfg.confinement.packages;
in pkgs.writeText "${mkPathSafeName name}-string-contexts.txt" contents;
chrootPaths = pkgs.runCommand "${mkPathSafeName name}-chroot-paths" {
closureInfo = pkgs.closureInfo { inherit rootPaths; };
serviceName = "${name}.service";
excludedPath = rootPaths;
} ''
mkdir -p "$out/lib/systemd/system"
serviceFile="$out/lib/systemd/system/$serviceName"
echo '[Service]' > "$serviceFile"
# /bin/sh is special here, because the option value could contain a
# symlink and we need to properly resolve it.
${lib.optionalString (cfg.confinement.binSh != null) ''
binsh=${lib.escapeShellArg cfg.confinement.binSh}
realprog="$(readlink -e "$binsh")"
echo "BindReadOnlyPaths=$realprog:/bin/sh" >> "$serviceFile"
''}
while read storePath; do
if [ -L "$storePath" ]; then
# Currently, systemd can't cope with symlinks in Bind(ReadOnly)Paths,
# so let's just bind-mount the target to that location.
echo "BindReadOnlyPaths=$(readlink -e "$storePath"):$storePath"
elif [ "$storePath" != "$excludedPath" ]; then
echo "BindReadOnlyPaths=$storePath"
fi
done < "$closureInfo/store-paths" >> "$serviceFile"
'';
in lib.optional cfg.confinement.enable chrootPaths) config.systemd.services);
}

View File

@ -15,7 +15,7 @@ in {
enable = mkEnableOption "ympd, the MPD Web GUI"; enable = mkEnableOption "ympd, the MPD Web GUI";
webPort = mkOption { webPort = mkOption {
type = types.string; type = types.either types.str types.port; # string for backwards compat
default = "8080"; default = "8080";
description = "The port where ympd's web interface will be available."; description = "The port where ympd's web interface will be available.";
example = "ssl://8080:/path/to/ssl-private-key.pem"; example = "ssl://8080:/path/to/ssl-private-key.pem";
@ -49,7 +49,7 @@ in {
systemd.services.ympd = { systemd.services.ympd = {
description = "Standalone MPD Web GUI written in C"; description = "Standalone MPD Web GUI written in C";
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
serviceConfig.ExecStart = "${pkgs.ympd}/bin/ympd --host ${cfg.mpd.host} --port ${toString cfg.mpd.port} --webport ${cfg.webPort} --user nobody"; serviceConfig.ExecStart = "${pkgs.ympd}/bin/ympd --host ${cfg.mpd.host} --port ${toString cfg.mpd.port} --webport ${toString cfg.webPort} --user nobody";
}; };
}; };

View File

@ -15,7 +15,7 @@ let
Name = "${fd_cfg.name}"; Name = "${fd_cfg.name}";
FDPort = ${toString fd_cfg.port}; FDPort = ${toString fd_cfg.port};
WorkingDirectory = "${libDir}"; WorkingDirectory = "${libDir}";
Pid Directory = "/var/run"; Pid Directory = "/run";
${fd_cfg.extraClientConfig} ${fd_cfg.extraClientConfig}
} }
@ -41,7 +41,7 @@ let
Name = "${sd_cfg.name}"; Name = "${sd_cfg.name}";
SDPort = ${toString sd_cfg.port}; SDPort = ${toString sd_cfg.port};
WorkingDirectory = "${libDir}"; WorkingDirectory = "${libDir}";
Pid Directory = "/var/run"; Pid Directory = "/run";
${sd_cfg.extraStorageConfig} ${sd_cfg.extraStorageConfig}
} }
@ -77,7 +77,7 @@ let
Password = "${dir_cfg.password}"; Password = "${dir_cfg.password}";
DirPort = ${toString dir_cfg.port}; DirPort = ${toString dir_cfg.port};
Working Directory = "${libDir}"; Working Directory = "${libDir}";
Pid Directory = "/var/run/"; Pid Directory = "/run/";
QueryFile = "${pkgs.bacula}/etc/query.sql"; QueryFile = "${pkgs.bacula}/etc/query.sql";
${dir_cfg.extraDirectorConfig} ${dir_cfg.extraDirectorConfig}
} }

View File

@ -63,18 +63,49 @@ in
}; };
enable = mkEnableOption "Whether to enable Kubernetes addon manager."; enable = mkEnableOption "Whether to enable Kubernetes addon manager.";
kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes addon manager";
bootstrapAddonsKubeconfig = top.lib.mkKubeConfigOptions "Kubernetes addon manager bootstrap";
}; };
###### implementation ###### implementation
config = mkIf cfg.enable { config = let
addonManagerPaths = filter (a: a != null) [
cfg.kubeconfig.caFile
cfg.kubeconfig.certFile
cfg.kubeconfig.keyFile
];
bootstrapAddonsPaths = filter (a: a != null) [
cfg.bootstrapAddonsKubeconfig.caFile
cfg.bootstrapAddonsKubeconfig.certFile
cfg.bootstrapAddonsKubeconfig.keyFile
];
in mkIf cfg.enable {
environment.etc."kubernetes/addons".source = "${addons}/"; environment.etc."kubernetes/addons".source = "${addons}/";
#TODO: Get rid of kube-addon-manager in the future for the following reasons
# - it is basically just a shell script wrapped around kubectl
# - it assumes that it is clusterAdmin or can gain clusterAdmin rights through serviceAccount
# - it is designed to be used with k8s system components only
# - it would be better with a more Nix-oriented way of managing addons
systemd.services.kube-addon-manager = { systemd.services.kube-addon-manager = {
description = "Kubernetes addon manager"; description = "Kubernetes addon manager";
wantedBy = [ "kubernetes.target" ]; wantedBy = [ "kubernetes.target" ];
after = [ "kube-apiserver.service" ]; after = [ "kube-node-online.target" ];
environment.ADDON_PATH = "/etc/kubernetes/addons/"; before = [ "kubernetes.target" ];
path = [ pkgs.gawk ]; environment = {
ADDON_PATH = "/etc/kubernetes/addons/";
KUBECONFIG = top.lib.mkKubeConfig "kube-addon-manager" cfg.kubeconfig;
};
path = with pkgs; [ gawk kubectl ];
preStart = ''
until kubectl -n kube-system get serviceaccounts/default 2>/dev/null; do
echo kubectl -n kube-system get serviceaccounts/default: exit status $?
sleep 2
done
'';
serviceConfig = { serviceConfig = {
Slice = "kubernetes.slice"; Slice = "kubernetes.slice";
ExecStart = "${top.package}/bin/kube-addons"; ExecStart = "${top.package}/bin/kube-addons";
@ -84,8 +115,52 @@ in
Restart = "on-failure"; Restart = "on-failure";
RestartSec = 10; RestartSec = 10;
}; };
unitConfig.ConditionPathExists = addonManagerPaths;
}; };
systemd.paths.kube-addon-manager = {
wantedBy = [ "kube-addon-manager.service" ];
pathConfig = {
PathExists = addonManagerPaths;
PathChanged = addonManagerPaths;
};
};
services.kubernetes.addonManager.kubeconfig.server = mkDefault top.apiserverAddress;
systemd.services.kube-addon-manager-bootstrap = mkIf (top.apiserver.enable && top.addonManager.bootstrapAddons != {}) {
wantedBy = [ "kube-control-plane-online.target" ];
after = [ "kube-apiserver.service" ];
before = [ "kube-control-plane-online.target" ];
path = [ pkgs.kubectl ];
environment = {
KUBECONFIG = top.lib.mkKubeConfig "kube-addon-manager-bootstrap" cfg.bootstrapAddonsKubeconfig;
};
preStart = with pkgs; let
files = mapAttrsToList (n: v: writeText "${n}.json" (builtins.toJSON v))
cfg.bootstrapAddons;
in ''
until kubectl auth can-i '*' '*' -q 2>/dev/null; do
echo kubectl auth can-i '*' '*': exit status $?
sleep 2
done
kubectl apply -f ${concatStringsSep " \\\n -f " files}
'';
script = "echo Ok";
unitConfig.ConditionPathExists = bootstrapAddonsPaths;
};
systemd.paths.kube-addon-manager-bootstrap = {
wantedBy = [ "kube-addon-manager-bootstrap.service" ];
pathConfig = {
PathExists = bootstrapAddonsPaths;
PathChanged = bootstrapAddonsPaths;
};
};
services.kubernetes.addonManager.bootstrapAddonsKubeconfig.server = mkDefault top.apiserverAddress;
services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled
(let (let
name = system:kube-addon-manager; name = system:kube-addon-manager;

View File

@ -169,6 +169,23 @@ in {
}; };
}; };
kubernetes-dashboard-cm = {
apiVersion = "v1";
kind = "ConfigMap";
metadata = {
labels = {
k8s-app = "kubernetes-dashboard";
# Allows editing resource and makes sure it is created first.
"addonmanager.kubernetes.io/mode" = "EnsureExists";
};
name = "kubernetes-dashboard-settings";
namespace = "kube-system";
};
};
};
services.kubernetes.addonManager.bootstrapAddons = mkMerge [{
kubernetes-dashboard-sa = { kubernetes-dashboard-sa = {
apiVersion = "v1"; apiVersion = "v1";
kind = "ServiceAccount"; kind = "ServiceAccount";
@ -210,20 +227,9 @@ in {
}; };
type = "Opaque"; type = "Opaque";
}; };
kubernetes-dashboard-cm = { }
apiVersion = "v1";
kind = "ConfigMap"; (optionalAttrs cfg.rbac.enable
metadata = {
labels = {
k8s-app = "kubernetes-dashboard";
# Allows editing resource and makes sure it is created first.
"addonmanager.kubernetes.io/mode" = "EnsureExists";
};
name = "kubernetes-dashboard-settings";
namespace = "kube-system";
};
};
} // (optionalAttrs cfg.rbac.enable
(let (let
subjects = [{ subjects = [{
kind = "ServiceAccount"; kind = "ServiceAccount";
@ -323,6 +329,6 @@ in {
inherit subjects; inherit subjects;
}; };
}) })
)); ))];
}; };
} }

View File

@ -190,6 +190,18 @@ in
default = null; default = null;
}; };
proxyClientCertFile = mkOption {
description = "Client certificate to use for connections to proxy.";
default = null;
type = nullOr path;
};
proxyClientKeyFile = mkOption {
description = "Key to use for connections to proxy.";
default = null;
type = nullOr path;
};
runtimeConfig = mkOption { runtimeConfig = mkOption {
description = '' description = ''
Api runtime configuration. See Api runtime configuration. See
@ -278,11 +290,32 @@ in
###### implementation ###### implementation
config = mkMerge [ config = mkMerge [
(mkIf cfg.enable { (let
apiserverPaths = filter (a: a != null) [
cfg.clientCaFile
cfg.etcd.caFile
cfg.etcd.certFile
cfg.etcd.keyFile
cfg.kubeletClientCaFile
cfg.kubeletClientCertFile
cfg.kubeletClientKeyFile
cfg.serviceAccountKeyFile
cfg.tlsCertFile
cfg.tlsKeyFile
];
etcdPaths = filter (a: a != null) [
config.services.etcd.trustedCaFile
config.services.etcd.certFile
config.services.etcd.keyFile
];
in mkIf cfg.enable {
systemd.services.kube-apiserver = { systemd.services.kube-apiserver = {
description = "Kubernetes APIServer Service"; description = "Kubernetes APIServer Service";
wantedBy = [ "kubernetes.target" ]; wantedBy = [ "kube-control-plane-online.target" ];
after = [ "network.target" ]; after = [ "certmgr.service" ];
before = [ "kube-control-plane-online.target" ];
serviceConfig = { serviceConfig = {
Slice = "kubernetes.slice"; Slice = "kubernetes.slice";
ExecStart = ''${top.package}/bin/kube-apiserver \ ExecStart = ''${top.package}/bin/kube-apiserver \
@ -324,6 +357,10 @@ in
"--kubelet-client-key=${cfg.kubeletClientKeyFile}"} \ "--kubelet-client-key=${cfg.kubeletClientKeyFile}"} \
${optionalString (cfg.preferredAddressTypes != null) ${optionalString (cfg.preferredAddressTypes != null)
"--kubelet-preferred-address-types=${cfg.preferredAddressTypes}"} \ "--kubelet-preferred-address-types=${cfg.preferredAddressTypes}"} \
${optionalString (cfg.proxyClientCertFile != null)
"--proxy-client-cert-file=${cfg.proxyClientCertFile}"} \
${optionalString (cfg.proxyClientKeyFile != null)
"--proxy-client-key-file=${cfg.proxyClientKeyFile}"} \
--insecure-bind-address=${cfg.insecureBindAddress} \ --insecure-bind-address=${cfg.insecureBindAddress} \
--insecure-port=${toString cfg.insecurePort} \ --insecure-port=${toString cfg.insecurePort} \
${optionalString (cfg.runtimeConfig != "") ${optionalString (cfg.runtimeConfig != "")
@ -349,6 +386,15 @@ in
Restart = "on-failure"; Restart = "on-failure";
RestartSec = 5; RestartSec = 5;
}; };
unitConfig.ConditionPathExists = apiserverPaths;
};
systemd.paths.kube-apiserver = mkIf top.apiserver.enable {
wantedBy = [ "kube-apiserver.service" ];
pathConfig = {
PathExists = apiserverPaths;
PathChanged = apiserverPaths;
};
}; };
services.etcd = { services.etcd = {
@ -362,6 +408,18 @@ in
initialAdvertisePeerUrls = mkDefault ["https://${top.masterAddress}:2380"]; initialAdvertisePeerUrls = mkDefault ["https://${top.masterAddress}:2380"];
}; };
systemd.services.etcd = {
unitConfig.ConditionPathExists = etcdPaths;
};
systemd.paths.etcd = {
wantedBy = [ "etcd.service" ];
pathConfig = {
PathExists = etcdPaths;
PathChanged = etcdPaths;
};
};
services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled { services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled {
apiserver-kubelet-api-admin-crb = { apiserver-kubelet-api-admin-crb = {
@ -397,6 +455,11 @@ in
] ++ cfg.extraSANs; ] ++ cfg.extraSANs;
action = "systemctl restart kube-apiserver.service"; action = "systemctl restart kube-apiserver.service";
}; };
apiserverProxyClient = mkCert {
name = "kube-apiserver-proxy-client";
CN = "front-proxy-client";
action = "systemctl restart kube-apiserver.service";
};
apiserverKubeletClient = mkCert { apiserverKubeletClient = mkCert {
name = "kube-apiserver-kubelet-client"; name = "kube-apiserver-kubelet-client";
CN = "system:kube-apiserver"; CN = "system:kube-apiserver";

View File

@ -104,11 +104,31 @@ in
}; };
###### implementation ###### implementation
config = mkIf cfg.enable { config = let
systemd.services.kube-controller-manager = {
controllerManagerPaths = filter (a: a != null) [
cfg.kubeconfig.caFile
cfg.kubeconfig.certFile
cfg.kubeconfig.keyFile
cfg.rootCaFile
cfg.serviceAccountKeyFile
cfg.tlsCertFile
cfg.tlsKeyFile
];
in mkIf cfg.enable {
systemd.services.kube-controller-manager = rec {
description = "Kubernetes Controller Manager Service"; description = "Kubernetes Controller Manager Service";
wantedBy = [ "kubernetes.target" ]; wantedBy = [ "kube-control-plane-online.target" ];
after = [ "kube-apiserver.service" ]; after = [ "kube-apiserver.service" ];
before = [ "kube-control-plane-online.target" ];
environment.KUBECONFIG = top.lib.mkKubeConfig "kube-controller-manager" cfg.kubeconfig;
preStart = ''
until kubectl auth can-i get /api -q 2>/dev/null; do
echo kubectl auth can-i get /api: exit status $?
sleep 2
done
'';
serviceConfig = { serviceConfig = {
RestartSec = "30s"; RestartSec = "30s";
Restart = "on-failure"; Restart = "on-failure";
@ -120,7 +140,7 @@ in
"--cluster-cidr=${cfg.clusterCidr}"} \ "--cluster-cidr=${cfg.clusterCidr}"} \
${optionalString (cfg.featureGates != []) ${optionalString (cfg.featureGates != [])
"--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \ "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
--kubeconfig=${top.lib.mkKubeConfig "kube-controller-manager" cfg.kubeconfig} \ --kubeconfig=${environment.KUBECONFIG} \
--leader-elect=${boolToString cfg.leaderElect} \ --leader-elect=${boolToString cfg.leaderElect} \
${optionalString (cfg.rootCaFile!=null) ${optionalString (cfg.rootCaFile!=null)
"--root-ca-file=${cfg.rootCaFile}"} \ "--root-ca-file=${cfg.rootCaFile}"} \
@ -141,7 +161,16 @@ in
User = "kubernetes"; User = "kubernetes";
Group = "kubernetes"; Group = "kubernetes";
}; };
path = top.path; path = top.path ++ [ pkgs.kubectl ];
unitConfig.ConditionPathExists = controllerManagerPaths;
};
systemd.paths.kube-controller-manager = {
wantedBy = [ "kube-controller-manager.service" ];
pathConfig = {
PathExists = controllerManagerPaths;
PathChanged = controllerManagerPaths;
};
}; };
services.kubernetes.pki.certs = with top.lib; { services.kubernetes.pki.certs = with top.lib; {

View File

@ -263,6 +263,30 @@ in {
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
}; };
systemd.targets.kube-control-plane-online = {
wantedBy = [ "kubernetes.target" ];
before = [ "kubernetes.target" ];
};
systemd.services.kube-control-plane-online = rec {
description = "Kubernetes control plane is online";
wantedBy = [ "kube-control-plane-online.target" ];
after = [ "kube-scheduler.service" "kube-controller-manager.service" ];
before = [ "kube-control-plane-online.target" ];
environment.KUBECONFIG = cfg.lib.mkKubeConfig "default" cfg.kubeconfig;
path = [ pkgs.kubectl ];
preStart = ''
until kubectl get --raw=/healthz 2>/dev/null; do
echo kubectl get --raw=/healthz: exit status $?
sleep 3
done
'';
script = "echo Ok";
serviceConfig = {
TimeoutSec = "500";
};
};
systemd.tmpfiles.rules = [ systemd.tmpfiles.rules = [
"d /opt/cni/bin 0755 root root -" "d /opt/cni/bin 0755 root root -"
"d /run/kubernetes 0755 kubernetes kubernetes -" "d /run/kubernetes 0755 kubernetes kubernetes -"
@ -286,6 +310,8 @@ in {
services.kubernetes.apiserverAddress = mkDefault ("https://${if cfg.apiserver.advertiseAddress != null services.kubernetes.apiserverAddress = mkDefault ("https://${if cfg.apiserver.advertiseAddress != null
then cfg.apiserver.advertiseAddress then cfg.apiserver.advertiseAddress
else "${cfg.masterAddress}:${toString cfg.apiserver.securePort}"}"); else "${cfg.masterAddress}:${toString cfg.apiserver.securePort}"}");
services.kubernetes.kubeconfig.server = mkDefault cfg.apiserverAddress;
}) })
]; ];
} }

View File

@ -24,16 +24,26 @@ in
###### interface ###### interface
options.services.kubernetes.flannel = { options.services.kubernetes.flannel = {
enable = mkEnableOption "enable flannel networking"; enable = mkEnableOption "enable flannel networking";
kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes flannel";
}; };
###### implementation ###### implementation
config = mkIf cfg.enable { config = let
flannelPaths = filter (a: a != null) [
cfg.kubeconfig.caFile
cfg.kubeconfig.certFile
cfg.kubeconfig.keyFile
];
kubeconfig = top.lib.mkKubeConfig "flannel" cfg.kubeconfig;
in mkIf cfg.enable {
services.flannel = { services.flannel = {
enable = mkDefault true; enable = mkDefault true;
network = mkDefault top.clusterCidr; network = mkDefault top.clusterCidr;
inherit storageBackend; inherit storageBackend kubeconfig;
nodeName = config.services.kubernetes.kubelet.hostname; nodeName = top.kubelet.hostname;
}; };
services.kubernetes.kubelet = { services.kubernetes.kubelet = {
@ -48,24 +58,66 @@ in
}]; }];
}; };
systemd.services."mk-docker-opts" = { systemd.services.mk-docker-opts = {
description = "Pre-Docker Actions"; description = "Pre-Docker Actions";
wantedBy = [ "flannel.target" ];
before = [ "flannel.target" ];
path = with pkgs; [ gawk gnugrep ]; path = with pkgs; [ gawk gnugrep ];
script = '' script = ''
${mkDockerOpts}/mk-docker-opts -d /run/flannel/docker ${mkDockerOpts}/mk-docker-opts -d /run/flannel/docker
systemctl restart docker systemctl restart docker
''; '';
unitConfig.ConditionPathExists = [ "/run/flannel/subnet.env" ];
serviceConfig.Type = "oneshot"; serviceConfig.Type = "oneshot";
}; };
systemd.paths."flannel-subnet-env" = { systemd.paths.flannel-subnet-env = {
wantedBy = [ "flannel.service" ]; wantedBy = [ "mk-docker-opts.service" ];
pathConfig = { pathConfig = {
PathModified = "/run/flannel/subnet.env"; PathExists = [ "/run/flannel/subnet.env" ];
PathChanged = [ "/run/flannel/subnet.env" ];
Unit = "mk-docker-opts.service"; Unit = "mk-docker-opts.service";
}; };
}; };
systemd.targets.flannel = {
wantedBy = [ "kube-node-online.target" ];
before = [ "kube-node-online.target" ];
};
systemd.services.flannel = {
wantedBy = [ "flannel.target" ];
after = [ "kubelet.target" ];
before = [ "flannel.target" ];
path = with pkgs; [ iptables kubectl ];
environment.KUBECONFIG = kubeconfig;
preStart = let
args = [
"--selector=kubernetes.io/hostname=${top.kubelet.hostname}"
# flannel exits if node is not registered yet, before that there is no podCIDR
"--output=jsonpath={.items[0].spec.podCIDR}"
# if jsonpath cannot be resolved exit with status 1
"--allow-missing-template-keys=false"
];
in ''
until kubectl get nodes ${concatStringsSep " " args} 2>/dev/null; do
echo Waiting for ${top.kubelet.hostname} to be RegisteredNode
sleep 1
done
'';
unitConfig.ConditionPathExists = flannelPaths;
};
systemd.paths.flannel = {
wantedBy = [ "flannel.service" ];
pathConfig = {
PathExists = flannelPaths;
PathChanged = flannelPaths;
};
};
services.kubernetes.flannel.kubeconfig.server = mkDefault top.apiserverAddress;
systemd.services.docker = { systemd.services.docker = {
environment.DOCKER_OPTS = "-b none"; environment.DOCKER_OPTS = "-b none";
serviceConfig.EnvironmentFile = "-/run/flannel/docker"; serviceConfig.EnvironmentFile = "-/run/flannel/docker";
@ -92,7 +144,6 @@ in
# give flannel som kubernetes rbac permissions if applicable # give flannel som kubernetes rbac permissions if applicable
services.kubernetes.addonManager.bootstrapAddons = mkIf ((storageBackend == "kubernetes") && (elem "RBAC" top.apiserver.authorizationMode)) { services.kubernetes.addonManager.bootstrapAddons = mkIf ((storageBackend == "kubernetes") && (elem "RBAC" top.apiserver.authorizationMode)) {
flannel-cr = { flannel-cr = {
apiVersion = "rbac.authorization.k8s.io/v1beta1"; apiVersion = "rbac.authorization.k8s.io/v1beta1";
kind = "ClusterRole"; kind = "ClusterRole";
@ -128,7 +179,6 @@ in
name = "flannel-client"; name = "flannel-client";
}]; }];
}; };
}; };
}; };
} }

View File

@ -241,21 +241,28 @@ in
###### implementation ###### implementation
config = mkMerge [ config = mkMerge [
(mkIf cfg.enable { (let
kubeletPaths = filter (a: a != null) [
cfg.kubeconfig.caFile
cfg.kubeconfig.certFile
cfg.kubeconfig.keyFile
cfg.clientCaFile
cfg.tlsCertFile
cfg.tlsKeyFile
];
in mkIf cfg.enable {
services.kubernetes.kubelet.seedDockerImages = [infraContainer]; services.kubernetes.kubelet.seedDockerImages = [infraContainer];
systemd.services.kubelet = { systemd.services.kubelet = {
description = "Kubernetes Kubelet Service"; description = "Kubernetes Kubelet Service";
wantedBy = [ "kubernetes.target" ]; wantedBy = [ "kubelet.target" ];
after = [ "network.target" "docker.service" "kube-apiserver.service" ]; after = [ "kube-control-plane-online.target" ];
before = [ "kubelet.target" ];
path = with pkgs; [ gitMinimal openssh docker utillinux iproute ethtool thin-provisioning-tools iptables socat ] ++ top.path; path = with pkgs; [ gitMinimal openssh docker utillinux iproute ethtool thin-provisioning-tools iptables socat ] ++ top.path;
preStart = '' preStart = ''
${concatMapStrings (img: '' rm -f /opt/cni/bin/* || true
echo "Seeding docker image: ${img}"
docker load <${img}
'') cfg.seedDockerImages}
rm /opt/cni/bin/* || true
${concatMapStrings (package: '' ${concatMapStrings (package: ''
echo "Linking cni package: ${package}" echo "Linking cni package: ${package}"
ln -fs ${package}/bin/* /opt/cni/bin ln -fs ${package}/bin/* /opt/cni/bin
@ -308,6 +315,56 @@ in
''; '';
WorkingDirectory = top.dataDir; WorkingDirectory = top.dataDir;
}; };
unitConfig.ConditionPathExists = kubeletPaths;
};
systemd.paths.kubelet = {
wantedBy = [ "kubelet.service" ];
pathConfig = {
PathExists = kubeletPaths;
PathChanged = kubeletPaths;
};
};
systemd.services.docker.before = [ "kubelet.service" ];
systemd.services.docker-seed-images = {
wantedBy = [ "docker.service" ];
after = [ "docker.service" ];
before = [ "kubelet.service" ];
path = with pkgs; [ docker ];
preStart = ''
${concatMapStrings (img: ''
echo "Seeding docker image: ${img}"
docker load <${img}
'') cfg.seedDockerImages}
'';
script = "echo Ok";
serviceConfig.Type = "oneshot";
serviceConfig.RemainAfterExit = true;
serviceConfig.Slice = "kubernetes.slice";
};
systemd.services.kubelet-online = {
wantedBy = [ "kube-node-online.target" ];
after = [ "flannel.target" "kubelet.target" ];
before = [ "kube-node-online.target" ];
# it is complicated. flannel needs kubelet to run the pause container before
# it discusses the node CIDR with apiserver and afterwards configures and restarts
# dockerd. Until then prevent creating any pods because they have to be recreated anyway
# because the network of docker0 has been changed by flannel.
script = let
docker-env = "/run/flannel/docker";
flannel-date = "stat --print=%Y ${docker-env}";
docker-date = "systemctl show --property=ActiveEnterTimestamp --value docker";
in ''
until test -f ${docker-env} ; do sleep 1 ; done
while test `${flannel-date}` -gt `date +%s --date="$(${docker-date})"` ; do
sleep 1
done
'';
serviceConfig.Type = "oneshot";
serviceConfig.Slice = "kubernetes.slice";
}; };
# Allways include cni plugins # Allways include cni plugins
@ -354,5 +411,16 @@ in
}; };
}) })
{
systemd.targets.kubelet = {
wantedBy = [ "kube-node-online.target" ];
before = [ "kube-node-online.target" ];
};
systemd.targets.kube-node-online = {
wantedBy = [ "kubernetes.target" ];
before = [ "kubernetes.target" ];
};
}
]; ];
} }

View File

@ -27,12 +27,11 @@ let
certmgrAPITokenPath = "${top.secretsPath}/${cfsslAPITokenBaseName}"; certmgrAPITokenPath = "${top.secretsPath}/${cfsslAPITokenBaseName}";
cfsslAPITokenLength = 32; cfsslAPITokenLength = 32;
clusterAdminKubeconfig = with cfg.certs.clusterAdmin; clusterAdminKubeconfig = with cfg.certs.clusterAdmin; {
top.lib.mkKubeConfig "cluster-admin" { server = top.apiserverAddress;
server = top.apiserverAddress; certFile = cert;
certFile = cert; keyFile = key;
keyFile = key; };
};
remote = with config.services; "https://${kubernetes.masterAddress}:${toString cfssl.port}"; remote = with config.services; "https://${kubernetes.masterAddress}:${toString cfssl.port}";
in in
@ -119,6 +118,12 @@ in
cfsslCertPathPrefix = "${config.services.cfssl.dataDir}/cfssl"; cfsslCertPathPrefix = "${config.services.cfssl.dataDir}/cfssl";
cfsslCert = "${cfsslCertPathPrefix}.pem"; cfsslCert = "${cfsslCertPathPrefix}.pem";
cfsslKey = "${cfsslCertPathPrefix}-key.pem"; cfsslKey = "${cfsslCertPathPrefix}-key.pem";
cfsslPort = toString config.services.cfssl.port;
certmgrPaths = [
top.caFile
certmgrAPITokenPath
];
in in
{ {
@ -168,13 +173,40 @@ in
chown cfssl "${cfsslAPITokenPath}" && chmod 400 "${cfsslAPITokenPath}" chown cfssl "${cfsslAPITokenPath}" && chmod 400 "${cfsslAPITokenPath}"
'')]); '')]);
systemd.targets.cfssl-online = {
wantedBy = [ "network-online.target" ];
after = [ "cfssl.service" "network-online.target" "cfssl-online.service" ];
};
systemd.services.cfssl-online = {
description = "Wait for ${remote} to be reachable.";
wantedBy = [ "cfssl-online.target" ];
before = [ "cfssl-online.target" ];
path = [ pkgs.curl ];
preStart = ''
until curl --fail-early -fskd '{}' ${remote}/api/v1/cfssl/info -o /dev/null; do
echo curl ${remote}/api/v1/cfssl/info: exit status $?
sleep 2
done
'';
script = "echo Ok";
serviceConfig = {
TimeoutSec = "300";
};
};
systemd.services.kube-certmgr-bootstrap = { systemd.services.kube-certmgr-bootstrap = {
description = "Kubernetes certmgr bootstrapper"; description = "Kubernetes certmgr bootstrapper";
wantedBy = [ "certmgr.service" ]; wantedBy = [ "cfssl-online.target" ];
after = [ "cfssl.target" ]; after = [ "cfssl-online.target" ];
before = [ "certmgr.service" ];
path = with pkgs; [ curl cfssl ];
script = concatStringsSep "\n" ['' script = concatStringsSep "\n" [''
set -e set -e
mkdir -p $(dirname ${certmgrAPITokenPath})
mkdir -p $(dirname ${top.caFile})
# If there's a cfssl (cert issuer) running locally, then don't rely on user to # If there's a cfssl (cert issuer) running locally, then don't rely on user to
# manually paste it in place. Just symlink. # manually paste it in place. Just symlink.
# otherwise, create the target file, ready for users to insert the token # otherwise, create the target file, ready for users to insert the token
@ -186,15 +218,18 @@ in
fi fi
'' ''
(optionalString (cfg.pkiTrustOnBootstrap) '' (optionalString (cfg.pkiTrustOnBootstrap) ''
if [ ! -f "${top.caFile}" ] || [ $(cat "${top.caFile}" | wc -c) -lt 1 ]; then if [ ! -s "${top.caFile}" ]; then
${pkgs.curl}/bin/curl --fail-early -f -kd '{}' ${remote}/api/v1/cfssl/info | \ until test -s ${top.caFile}.json; do
${pkgs.cfssl}/bin/cfssljson -stdout >${top.caFile} sleep 2
curl --fail-early -fskd '{}' ${remote}/api/v1/cfssl/info -o ${top.caFile}.json
done
cfssljson -f ${top.caFile}.json -stdout >${top.caFile}
rm ${top.caFile}.json
fi fi
'') '')
]; ];
serviceConfig = { serviceConfig = {
RestartSec = "10s"; TimeoutSec = "500";
Restart = "on-failure";
}; };
}; };
@ -230,35 +265,28 @@ in
mapAttrs mkSpec cfg.certs; mapAttrs mkSpec cfg.certs;
}; };
#TODO: Get rid of kube-addon-manager in the future for the following reasons systemd.services.certmgr = {
# - it is basically just a shell script wrapped around kubectl wantedBy = [ "cfssl-online.target" ];
# - it assumes that it is clusterAdmin or can gain clusterAdmin rights through serviceAccount after = [ "cfssl-online.target" "kube-certmgr-bootstrap.service" ];
# - it is designed to be used with k8s system components only preStart = ''
# - it would be better with a more Nix-oriented way of managing addons while ! test -s ${certmgrAPITokenPath} ; do
systemd.services.kube-addon-manager = mkIf top.addonManager.enable (mkMerge [{ sleep 1
environment.KUBECONFIG = with cfg.certs.addonManager; echo Waiting for ${certmgrAPITokenPath}
top.lib.mkKubeConfig "addon-manager" { done
server = top.apiserverAddress; '';
certFile = cert; unitConfig.ConditionPathExists = certmgrPaths;
keyFile = key; };
};
}
(optionalAttrs (top.addonManager.bootstrapAddons != {}) { systemd.paths.certmgr = {
serviceConfig.PermissionsStartOnly = true; wantedBy = [ "certmgr.service" ];
preStart = with pkgs; pathConfig = {
let PathExists = certmgrPaths;
files = mapAttrsToList (n: v: writeText "${n}.json" (builtins.toJSON v)) PathChanged = certmgrPaths;
top.addonManager.bootstrapAddons; };
in };
''
export KUBECONFIG=${clusterAdminKubeconfig}
${kubectl}/bin/kubectl apply -f ${concatStringsSep " \\\n -f " files}
'';
})]);
environment.etc.${cfg.etcClusterAdminKubeconfig}.source = mkIf (!isNull cfg.etcClusterAdminKubeconfig) environment.etc.${cfg.etcClusterAdminKubeconfig}.source = mkIf (!isNull cfg.etcClusterAdminKubeconfig)
clusterAdminKubeconfig; (top.lib.mkKubeConfig "cluster-admin" clusterAdminKubeconfig);
environment.systemPackages = mkIf (top.kubelet.enable || top.proxy.enable) [ environment.systemPackages = mkIf (top.kubelet.enable || top.proxy.enable) [
(pkgs.writeScriptBin "nixos-kubernetes-node-join" '' (pkgs.writeScriptBin "nixos-kubernetes-node-join" ''
@ -284,38 +312,22 @@ in
exit 1 exit 1
fi fi
do_restart=$(test -s ${certmgrAPITokenPath} && echo -n y || echo -n n)
echo $token > ${certmgrAPITokenPath} echo $token > ${certmgrAPITokenPath}
chmod 600 ${certmgrAPITokenPath} chmod 600 ${certmgrAPITokenPath}
echo "Restarting certmgr..." >&1 if [ y = $do_restart ]; then
systemctl restart certmgr echo "Restarting certmgr..." >&1
systemctl restart certmgr
fi
echo "Waiting for certs to appear..." >&1 echo "Node joined succesfully" >&1
${optionalString top.kubelet.enable ''
while [ ! -f ${cfg.certs.kubelet.cert} ]; do sleep 1; done
echo "Restarting kubelet..." >&1
systemctl restart kubelet
''}
${optionalString top.proxy.enable ''
while [ ! -f ${cfg.certs.kubeProxyClient.cert} ]; do sleep 1; done
echo "Restarting kube-proxy..." >&1
systemctl restart kube-proxy
''}
${optionalString top.flannel.enable ''
while [ ! -f ${cfg.certs.flannelClient.cert} ]; do sleep 1; done
echo "Restarting flannel..." >&1
systemctl restart flannel
''}
echo "Node joined succesfully"
'')]; '')];
# isolate etcd on loopback at the master node # isolate etcd on loopback at the master node
# easyCerts doesn't support multimaster clusters anyway atm. # easyCerts doesn't support multimaster clusters anyway atm.
services.etcd = with cfg.certs.etcd; { services.etcd = mkIf top.apiserver.enable (with cfg.certs.etcd; {
listenClientUrls = ["https://127.0.0.1:2379"]; listenClientUrls = ["https://127.0.0.1:2379"];
listenPeerUrls = ["https://127.0.0.1:2380"]; listenPeerUrls = ["https://127.0.0.1:2380"];
advertiseClientUrls = ["https://etcd.local:2379"]; advertiseClientUrls = ["https://etcd.local:2379"];
@ -324,19 +336,11 @@ in
certFile = mkDefault cert; certFile = mkDefault cert;
keyFile = mkDefault key; keyFile = mkDefault key;
trustedCaFile = mkDefault caCert; trustedCaFile = mkDefault caCert;
}; });
networking.extraHosts = mkIf (config.services.etcd.enable) '' networking.extraHosts = mkIf (config.services.etcd.enable) ''
127.0.0.1 etcd.${top.addons.dns.clusterDomain} etcd.local 127.0.0.1 etcd.${top.addons.dns.clusterDomain} etcd.local
''; '';
services.flannel = with cfg.certs.flannelClient; {
kubeconfig = top.lib.mkKubeConfig "flannel" {
server = top.apiserverAddress;
certFile = cert;
keyFile = key;
};
};
services.kubernetes = { services.kubernetes = {
apiserver = mkIf top.apiserver.enable (with cfg.certs.apiServer; { apiserver = mkIf top.apiserver.enable (with cfg.certs.apiServer; {
@ -353,7 +357,16 @@ in
kubeletClientCaFile = mkDefault caCert; kubeletClientCaFile = mkDefault caCert;
kubeletClientCertFile = mkDefault cfg.certs.apiserverKubeletClient.cert; kubeletClientCertFile = mkDefault cfg.certs.apiserverKubeletClient.cert;
kubeletClientKeyFile = mkDefault cfg.certs.apiserverKubeletClient.key; kubeletClientKeyFile = mkDefault cfg.certs.apiserverKubeletClient.key;
proxyClientCertFile = mkDefault cfg.certs.apiserverProxyClient.cert;
proxyClientKeyFile = mkDefault cfg.certs.apiserverProxyClient.key;
}); });
addonManager = mkIf top.addonManager.enable {
kubeconfig = with cfg.certs.addonManager; {
certFile = mkDefault cert;
keyFile = mkDefault key;
};
bootstrapAddonsKubeconfig = clusterAdminKubeconfig;
};
controllerManager = mkIf top.controllerManager.enable { controllerManager = mkIf top.controllerManager.enable {
serviceAccountKeyFile = mkDefault cfg.certs.serviceAccount.key; serviceAccountKeyFile = mkDefault cfg.certs.serviceAccount.key;
rootCaFile = cfg.certs.controllerManagerClient.caCert; rootCaFile = cfg.certs.controllerManagerClient.caCert;
@ -362,6 +375,12 @@ in
keyFile = mkDefault key; keyFile = mkDefault key;
}; };
}; };
flannel = mkIf top.flannel.enable {
kubeconfig = with cfg.certs.flannelClient; {
certFile = cert;
keyFile = key;
};
};
scheduler = mkIf top.scheduler.enable { scheduler = mkIf top.scheduler.enable {
kubeconfig = with cfg.certs.schedulerClient; { kubeconfig = with cfg.certs.schedulerClient; {
certFile = mkDefault cert; certFile = mkDefault cert;

View File

@ -45,12 +45,28 @@ in
}; };
###### implementation ###### implementation
config = mkIf cfg.enable { config = let
systemd.services.kube-proxy = {
proxyPaths = filter (a: a != null) [
cfg.kubeconfig.caFile
cfg.kubeconfig.certFile
cfg.kubeconfig.keyFile
];
in mkIf cfg.enable {
systemd.services.kube-proxy = rec {
description = "Kubernetes Proxy Service"; description = "Kubernetes Proxy Service";
wantedBy = [ "kubernetes.target" ]; wantedBy = [ "kube-node-online.target" ];
after = [ "kube-apiserver.service" ]; after = [ "kubelet-online.service" ];
path = with pkgs; [ iptables conntrack_tools ]; before = [ "kube-node-online.target" ];
environment.KUBECONFIG = top.lib.mkKubeConfig "kube-proxy" cfg.kubeconfig;
path = with pkgs; [ iptables conntrack_tools kubectl ];
preStart = ''
until kubectl auth can-i get nodes/${top.kubelet.hostname} -q 2>/dev/null; do
echo kubectl auth can-i get nodes/${top.kubelet.hostname}: exit status $?
sleep 2
done
'';
serviceConfig = { serviceConfig = {
Slice = "kubernetes.slice"; Slice = "kubernetes.slice";
ExecStart = ''${top.package}/bin/kube-proxy \ ExecStart = ''${top.package}/bin/kube-proxy \
@ -59,7 +75,7 @@ in
"--cluster-cidr=${top.clusterCidr}"} \ "--cluster-cidr=${top.clusterCidr}"} \
${optionalString (cfg.featureGates != []) ${optionalString (cfg.featureGates != [])
"--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \ "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
--kubeconfig=${top.lib.mkKubeConfig "kube-proxy" cfg.kubeconfig} \ --kubeconfig=${environment.KUBECONFIG} \
${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \ ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
${cfg.extraOpts} ${cfg.extraOpts}
''; '';
@ -67,6 +83,15 @@ in
Restart = "on-failure"; Restart = "on-failure";
RestartSec = 5; RestartSec = 5;
}; };
unitConfig.ConditionPathExists = proxyPaths;
};
systemd.paths.kube-proxy = {
wantedBy = [ "kube-proxy.service" ];
pathConfig = {
PathExists = proxyPaths;
PathChanged = proxyPaths;
};
}; };
services.kubernetes.pki.certs = { services.kubernetes.pki.certs = {

View File

@ -56,18 +56,35 @@ in
}; };
###### implementation ###### implementation
config = mkIf cfg.enable { config = let
systemd.services.kube-scheduler = {
schedulerPaths = filter (a: a != null) [
cfg.kubeconfig.caFile
cfg.kubeconfig.certFile
cfg.kubeconfig.keyFile
];
in mkIf cfg.enable {
systemd.services.kube-scheduler = rec {
description = "Kubernetes Scheduler Service"; description = "Kubernetes Scheduler Service";
wantedBy = [ "kubernetes.target" ]; wantedBy = [ "kube-control-plane-online.target" ];
after = [ "kube-apiserver.service" ]; after = [ "kube-apiserver.service" ];
before = [ "kube-control-plane-online.target" ];
environment.KUBECONFIG = top.lib.mkKubeConfig "kube-scheduler" cfg.kubeconfig;
path = [ pkgs.kubectl ];
preStart = ''
until kubectl auth can-i get /api -q 2>/dev/null; do
echo kubectl auth can-i get /api: exit status $?
sleep 2
done
'';
serviceConfig = { serviceConfig = {
Slice = "kubernetes.slice"; Slice = "kubernetes.slice";
ExecStart = ''${top.package}/bin/kube-scheduler \ ExecStart = ''${top.package}/bin/kube-scheduler \
--address=${cfg.address} \ --address=${cfg.address} \
${optionalString (cfg.featureGates != []) ${optionalString (cfg.featureGates != [])
"--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \ "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
--kubeconfig=${top.lib.mkKubeConfig "kube-scheduler" cfg.kubeconfig} \ --kubeconfig=${environment.KUBECONFIG} \
--leader-elect=${boolToString cfg.leaderElect} \ --leader-elect=${boolToString cfg.leaderElect} \
--port=${toString cfg.port} \ --port=${toString cfg.port} \
${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \ ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
@ -79,6 +96,15 @@ in
Restart = "on-failure"; Restart = "on-failure";
RestartSec = 5; RestartSec = 5;
}; };
unitConfig.ConditionPathExists = schedulerPaths;
};
systemd.paths.kube-scheduler = {
wantedBy = [ "kube-scheduler.service" ];
pathConfig = {
PathExists = schedulerPaths;
PathChanged = schedulerPaths;
};
}; };
services.kubernetes.pki.certs = { services.kubernetes.pki.certs = {

View File

@ -199,10 +199,10 @@ in {
package = mkOption { package = mkOption {
type = types.package; type = types.package;
default = pkgs.pythonPackages.buildbot-full; default = pkgs.python3Packages.buildbot-full;
defaultText = "pkgs.pythonPackages.buildbot-full"; defaultText = "pkgs.python3Packages.buildbot-full";
description = "Package to use for buildbot."; description = "Package to use for buildbot.";
example = literalExample "pkgs.python3Packages.buildbot-full"; example = literalExample "pkgs.python3Packages.buildbot";
}; };
packages = mkOption { packages = mkOption {

View File

@ -118,10 +118,10 @@ in {
package = mkOption { package = mkOption {
type = types.package; type = types.package;
default = pkgs.pythonPackages.buildbot-worker; default = pkgs.python3Packages.buildbot-worker;
defaultText = "pkgs.pythonPackages.buildbot-worker"; defaultText = "pkgs.python3Packages.buildbot-worker";
description = "Package to use for buildbot worker."; description = "Package to use for buildbot worker.";
example = literalExample "pkgs.python3Packages.buildbot-worker"; example = literalExample "pkgs.python2Packages.buildbot-worker";
}; };
packages = mkOption { packages = mkOption {

View File

@ -85,7 +85,7 @@ in {
uriFile = mkOption { uriFile = mkOption {
type = types.path; type = types.path;
default = "/var/run/couchdb/couchdb.uri"; default = "/run/couchdb/couchdb.uri";
description = '' description = ''
This file contains the full URI that can be used to access this This file contains the full URI that can be used to access this
instance of CouchDB. It is used to help discover the port CouchDB is instance of CouchDB. It is used to help discover the port CouchDB is

View File

@ -65,7 +65,7 @@ in
}; };
pidFile = mkOption { pidFile = mkOption {
default = "/var/run/mongodb.pid"; default = "/run/mongodb.pid";
description = "Location of MongoDB pid file"; description = "Location of MongoDB pid file";
}; };

View File

@ -103,6 +103,24 @@ in
}; };
initialDatabases = mkOption { initialDatabases = mkOption {
type = types.listOf (types.submodule {
options = {
name = mkOption {
type = types.str;
description = ''
The name of the database to create.
'';
};
schema = mkOption {
type = types.nullOr types.path;
default = null;
description = ''
The initial schema of the database; if null (the default),
an empty database is created.
'';
};
};
});
default = []; default = [];
description = '' description = ''
List of database names and their initial schemas that should be used to create databases on the first startup List of database names and their initial schemas that should be used to create databases on the first startup
@ -115,11 +133,13 @@ in
}; };
initialScript = mkOption { initialScript = mkOption {
type = types.nullOr types.lines;
default = null; default = null;
description = "A file containing SQL statements to be executed on the first startup. Can be used for granting certain permissions on the database"; description = "A file containing SQL statements to be executed on the first startup. Can be used for granting certain permissions on the database";
}; };
ensureDatabases = mkOption { ensureDatabases = mkOption {
type = types.listOf types.str;
default = []; default = [];
description = '' description = ''
Ensures that the specified databases exist. Ensures that the specified databases exist.
@ -134,6 +154,38 @@ in
}; };
ensureUsers = mkOption { ensureUsers = mkOption {
type = types.listOf (types.submodule {
options = {
name = mkOption {
type = types.str;
description = ''
Name of the user to ensure.
'';
};
ensurePermissions = mkOption {
type = types.attrsOf types.str;
default = {};
description = ''
Permissions to ensure for the user, specified as attribute set.
The attribute names specify the database and tables to grant the permissions for,
separated by a dot. You may use wildcards here.
The attribute values specfiy the permissions to grant.
You may specify one or multiple comma-separated SQL privileges here.
For more information on how to specify the target
and on which privileges exist, see the
<link xlink:href="https://mariadb.com/kb/en/library/grant/">GRANT syntax</link>.
The attributes are used as <code>GRANT ''${attrName} ON ''${attrValue}</code>.
'';
example = literalExample ''
{
"database.*" = "ALL PRIVILEGES";
"*.*" = "SELECT, LOCK TABLES";
}
'';
};
};
});
default = []; default = [];
description = '' description = ''
Ensures that the specified users exist and have at least the ensured permissions. Ensures that the specified users exist and have at least the ensured permissions.
@ -143,20 +195,22 @@ in
option is changed. This means that users created and permissions assigned once through this option or option is changed. This means that users created and permissions assigned once through this option or
otherwise have to be removed manually. otherwise have to be removed manually.
''; '';
example = literalExample ''[ example = literalExample ''
{ [
name = "nextcloud"; {
ensurePermissions = { name = "nextcloud";
"nextcloud.*" = "ALL PRIVILEGES"; ensurePermissions = {
}; "nextcloud.*" = "ALL PRIVILEGES";
} };
{ }
name = "backup"; {
ensurePermissions = { name = "backup";
"*.*" = "SELECT, LOCK TABLES"; ensurePermissions = {
}; "*.*" = "SELECT, LOCK TABLES";
} };
]''; }
]
'';
}; };
# FIXME: remove this option; it's a really bad idea. # FIXME: remove this option; it's a really bad idea.

View File

@ -8,7 +8,20 @@ let
openldap = pkgs.openldap; openldap = pkgs.openldap;
dataFile = pkgs.writeText "ldap-contents.ldif" cfg.declarativeContents; dataFile = pkgs.writeText "ldap-contents.ldif" cfg.declarativeContents;
configFile = pkgs.writeText "slapd.conf" cfg.extraConfig; configFile = pkgs.writeText "slapd.conf" ((optionalString cfg.defaultSchemas ''
include ${pkgs.openldap.out}/etc/schema/core.schema
include ${pkgs.openldap.out}/etc/schema/cosine.schema
include ${pkgs.openldap.out}/etc/schema/inetorgperson.schema
include ${pkgs.openldap.out}/etc/schema/nis.schema
'') + ''
${cfg.extraConfig}
database ${cfg.database}
suffix ${cfg.suffix}
rootdn ${cfg.rootdn}
rootpw ${cfg.rootpw}
directory ${cfg.dataDir}
${cfg.extraDatabaseConfig}
'');
configOpts = if cfg.configDir == null then "-f ${configFile}" configOpts = if cfg.configDir == null then "-f ${configFile}"
else "-F ${cfg.configDir}"; else "-F ${cfg.configDir}";
in in
@ -54,6 +67,52 @@ in
description = "The database directory."; description = "The database directory.";
}; };
defaultSchemas = mkOption {
type = types.bool;
default = true;
description = ''
Include the default schemas core, cosine, inetorgperson and nis.
This setting will be ignored if configDir is set.
'';
};
database = mkOption {
type = types.str;
default = "mdb";
description = ''
Database type to use for the LDAP.
This setting will be ignored if configDir is set.
'';
};
suffix = mkOption {
type = types.str;
example = "dc=example,dc=org";
description = ''
Specify the DN suffix of queries that will be passed to this backend
database.
This setting will be ignored if configDir is set.
'';
};
rootdn = mkOption {
type = types.str;
example = "cn=admin,dc=example,dc=org";
description = ''
Specify the distinguished name that is not subject to access control
or administrative limit restrictions for operations on this database.
This setting will be ignored if configDir is set.
'';
};
rootpw = mkOption {
type = types.str;
description = ''
Password for the root user.
This setting will be ignored if configDir is set.
'';
};
logLevel = mkOption { logLevel = mkOption {
type = types.str; type = types.str;
default = "0"; default = "0";
@ -118,6 +177,39 @@ in
# ... # ...
''; '';
}; };
extraDatabaseConfig = mkOption {
type = types.lines;
default = "";
description = ''
slapd.conf configuration after the database option.
This setting will be ignored if configDir is set.
'';
example = ''
# Indices to maintain for this directory
# unique id so equality match only
index uid eq
# allows general searching on commonname, givenname and email
index cn,gn,mail eq,sub
# allows multiple variants on surname searching
index sn eq,sub
# sub above includes subintial,subany,subfinal
# optimise department searches
index ou eq
# if searches will include objectClass uncomment following
# index objectClass eq
# shows use of default index parameter
index default eq,sub
# indices missing - uses default eq,sub
index telephonenumber
# other database parameters
# read more in slapd.conf reference section
cachesize 10000
checkpoint 128 15
'';
};
}; };
}; };
@ -134,8 +226,8 @@ in
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
after = [ "network.target" ]; after = [ "network.target" ];
preStart = '' preStart = ''
mkdir -p /var/run/slapd mkdir -p /run/slapd
chown -R "${cfg.user}:${cfg.group}" /var/run/slapd chown -R "${cfg.user}:${cfg.group}" /run/slapd
${optionalString (cfg.declarativeContents != null) '' ${optionalString (cfg.declarativeContents != null) ''
rm -Rf "${cfg.dataDir}" rm -Rf "${cfg.dataDir}"
''} ''}

View File

@ -238,6 +238,7 @@ in
User = "postgres"; User = "postgres";
Group = "postgres"; Group = "postgres";
PermissionsStartOnly = true; PermissionsStartOnly = true;
RuntimeDirectory = "postgresql";
Type = if lib.versionAtLeast cfg.package.version "9.6" Type = if lib.versionAtLeast cfg.package.version "9.6"
then "notify" then "notify"
else "simple"; else "simple";

View File

@ -95,7 +95,7 @@ in
type = with types; nullOr path; type = with types; nullOr path;
default = null; default = null;
description = "The path to the socket to bind to."; description = "The path to the socket to bind to.";
example = "/var/run/redis.sock"; example = "/run/redis.sock";
}; };
logLevel = mkOption { logLevel = mkOption {

Some files were not shown because too many files have changed in this diff Show More