Merge remote-tracking branch 'upstream/master' into override-unstable-nix

This commit is contained in:
John Ericson 2019-03-10 13:53:59 -04:00
commit 4dfe9f9eb8
2092 changed files with 35394 additions and 14399 deletions

10
.github/CODEOWNERS vendored
View File

@ -58,11 +58,11 @@
/doc/languages-frameworks/python.section.md @FRidh /doc/languages-frameworks/python.section.md @FRidh
# Haskell # Haskell
/pkgs/development/compilers/ghc @peti @ryantm @basvandijk /pkgs/development/compilers/ghc @peti @basvandijk
/pkgs/development/haskell-modules @peti @ryantm @basvandijk /pkgs/development/haskell-modules @peti @basvandijk
/pkgs/development/haskell-modules/default.nix @peti @ryantm @basvandijk /pkgs/development/haskell-modules/default.nix @peti @basvandijk
/pkgs/development/haskell-modules/generic-builder.nix @peti @ryantm @basvandijk /pkgs/development/haskell-modules/generic-builder.nix @peti @basvandijk
/pkgs/development/haskell-modules/hoogle.nix @peti @ryantm @basvandijk /pkgs/development/haskell-modules/hoogle.nix @peti @basvandijk
# Perl # Perl
/pkgs/development/interpreters/perl @volth /pkgs/development/interpreters/perl @volth

View File

@ -1 +1 @@
19.03 19.09

7
doc/.gitignore vendored
View File

@ -1,7 +1,8 @@
*.chapter.xml *.chapter.xml
*.section.xml *.section.xml
.version .version
out functions/library/generated
manual-full.xml
highlightjs
functions/library/locations.xml functions/library/locations.xml
highlightjs
manual-full.xml
out

View File

@ -197,20 +197,14 @@ args.stdenv.mkDerivation (args // {
<title>Package naming</title> <title>Package naming</title>
<para> <para>
The key words The key words <emphasis>must</emphasis>, <emphasis>must not</emphasis>,
<emphasis>must</emphasis>, <emphasis>required</emphasis>, <emphasis>shall</emphasis>, <emphasis>shall
<emphasis>must not</emphasis>, not</emphasis>, <emphasis>should</emphasis>, <emphasis>should
<emphasis>required</emphasis>, not</emphasis>, <emphasis>recommended</emphasis>, <emphasis>may</emphasis>,
<emphasis>shall</emphasis>, and <emphasis>optional</emphasis> in this section are to be interpreted as
<emphasis>shall not</emphasis>, described in <link xlink:href="https://tools.ietf.org/html/rfc2119">RFC
<emphasis>should</emphasis>, 2119</link>. Only <emphasis>emphasized</emphasis> words are to be
<emphasis>should not</emphasis>, interpreted in this way.
<emphasis>recommended</emphasis>,
<emphasis>may</emphasis>,
and <emphasis>optional</emphasis> in this section
are to be interpreted as described in
<link xlink:href="https://tools.ietf.org/html/rfc2119">RFC 2119</link>.
Only <emphasis>emphasized</emphasis> words are to be interpreted in this way.
</para> </para>
<para> <para>
@ -253,8 +247,8 @@ args.stdenv.mkDerivation (args // {
<itemizedlist> <itemizedlist>
<listitem> <listitem>
<para> <para>
The <literal>name</literal> attribute <emphasis>should</emphasis> The <literal>name</literal> attribute <emphasis>should</emphasis> be
be identical to the upstream package name. identical to the upstream package name.
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
@ -275,28 +269,29 @@ args.stdenv.mkDerivation (args // {
<para> <para>
If a package is not a release but a commit from a repository, then the If a package is not a release but a commit from a repository, then the
version part of the name <emphasis>must</emphasis> be the date of that version part of the name <emphasis>must</emphasis> be the date of that
(fetched) commit. The date <emphasis>must</emphasis> be in <literal>"YYYY-MM-DD"</literal> (fetched) commit. The date <emphasis>must</emphasis> be in
format. Also append <literal>"unstable"</literal> to the name - e.g., <literal>"YYYY-MM-DD"</literal> format. Also append
<literal>"unstable"</literal> to the name - e.g.,
<literal>"pkgname-unstable-2014-09-23"</literal>. <literal>"pkgname-unstable-2014-09-23"</literal>.
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
Dashes in the package name <emphasis>should</emphasis> be preserved in new variable names, Dashes in the package name <emphasis>should</emphasis> be preserved in
rather than converted to underscores or camel cased — e.g., new variable names, rather than converted to underscores or camel cased
<varname>http-parser</varname> instead of <varname>http_parser</varname> — e.g., <varname>http-parser</varname> instead of
or <varname>httpParser</varname>. The hyphenated style is preferred in <varname>http_parser</varname> or <varname>httpParser</varname>. The
all three package names. hyphenated style is preferred in all three package names.
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
If there are multiple versions of a package, this <emphasis>should</emphasis> be reflected in If there are multiple versions of a package, this
the variable names in <filename>all-packages.nix</filename>, e.g. <emphasis>should</emphasis> be reflected in the variable names in
<varname>json-c-0-9</varname> and <varname>json-c-0-11</varname>. If <filename>all-packages.nix</filename>, e.g. <varname>json-c-0-9</varname>
there is an obvious “default” version, make an attribute like and <varname>json-c-0-11</varname>. If there is an obvious “default”
<literal>json-c = json-c-0-9;</literal>. See also version, make an attribute like <literal>json-c = json-c-0-9;</literal>.
<xref linkend="sec-versioning" /> See also <xref linkend="sec-versioning" />
</para> </para>
</listitem> </listitem>
</itemizedlist> </itemizedlist>
@ -814,8 +809,8 @@ args.stdenv.mkDerivation (args // {
<para> <para>
There are multiple ways to fetch a package source in nixpkgs. The general There are multiple ways to fetch a package source in nixpkgs. The general
guideline is that you should package reproducible sources with a high degree of guideline is that you should package reproducible sources with a high degree
availability. Right now there is only one fetcher which has mirroring of availability. Right now there is only one fetcher which has mirroring
support and that is <literal>fetchurl</literal>. Note that you should also support and that is <literal>fetchurl</literal>. Note that you should also
prefer protocols which have a corresponding proxy environment variable. prefer protocols which have a corresponding proxy environment variable.
</para> </para>
@ -869,8 +864,10 @@ src = fetchFromGitHub {
} }
</programlisting> </programlisting>
Find the value to put as <literal>sha256</literal> by running Find the value to put as <literal>sha256</literal> by running
<literal>nix run -f '&lt;nixpkgs&gt;' nix-prefetch-github -c nix-prefetch-github --rev 1f795f9f44607cc5bec70d1300150bfefcef2aae NixOS nix</literal> <literal>nix run -f '&lt;nixpkgs&gt;' nix-prefetch-github -c
or <literal>nix-prefetch-url --unpack https://github.com/NixOS/nix/archive/1f795f9f44607cc5bec70d1300150bfefcef2aae.tar.gz</literal>. nix-prefetch-github --rev 1f795f9f44607cc5bec70d1300150bfefcef2aae NixOS
nix</literal> or <literal>nix-prefetch-url --unpack
https://github.com/NixOS/nix/archive/1f795f9f44607cc5bec70d1300150bfefcef2aae.tar.gz</literal>.
</para> </para>
</listitem> </listitem>
</itemizedlist> </itemizedlist>
@ -953,17 +950,23 @@ $ nix-hash --type sha256 --to-base32 <replaceable>HASH</replaceable>
would be replace hash with a fake one and rebuild. Nix build will fail and would be replace hash with a fake one and rebuild. Nix build will fail and
error message will contain desired hash. error message will contain desired hash.
</para> </para>
<warning><para>This method has security problems. Check below for details.</para></warning> <warning>
<para>
This method has security problems. Check below for details.
</para>
</warning>
</listitem> </listitem>
</orderedlist> </orderedlist>
<section xml:id="sec-source-hashes-security"> <section xml:id="sec-source-hashes-security">
<title>Obtaining hashes securely</title> <title>Obtaining hashes securely</title>
<para> <para>
Let's say Man-in-the-Middle (MITM) sits close to your network. Then instead of fetching Let's say Man-in-the-Middle (MITM) sits close to your network. Then instead
source you can fetch malware, and instead of source hash you get hash of malware. Here are of fetching source you can fetch malware, and instead of source hash you
security considerations for this scenario: get hash of malware. Here are security considerations for this scenario:
</para> </para>
<itemizedlist> <itemizedlist>
<listitem> <listitem>
<para> <para>
@ -972,7 +975,8 @@ $ nix-hash --type sha256 --to-base32 <replaceable>HASH</replaceable>
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
hashes from upstream (in method 3) should be obtained via secure protocol; hashes from upstream (in method 3) should be obtained via secure
protocol;
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
@ -982,12 +986,12 @@ $ nix-hash --type sha256 --to-base32 <replaceable>HASH</replaceable>
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
<literal>https://</literal> URLs are not secure in method 5. When obtaining hashes <literal>https://</literal> URLs are not secure in method 5. When
with fake hash method, TLS checks are disabled. So obtaining hashes with fake hash method, TLS checks are disabled. So
refetch source hash from several different networks to exclude MITM scenario. refetch source hash from several different networks to exclude MITM
Alternatively, use fake hash method to make Nix error, but instead of extracting scenario. Alternatively, use fake hash method to make Nix error, but
hash from error, extract <literal>https://</literal> URL and prefetch it instead of extracting hash from error, extract
with method 1. <literal>https://</literal> URL and prefetch it with method 1.
</para> </para>
</listitem> </listitem>
</itemizedlist> </itemizedlist>

View File

@ -132,13 +132,13 @@
</itemizedlist> </itemizedlist>
<para> <para>
The difference between a package being unsupported on some system and The difference between a package being unsupported on some system and being
being broken is admittedly a bit fuzzy. If a program broken is admittedly a bit fuzzy. If a program <emphasis>ought</emphasis> to
<emphasis>ought</emphasis> to work on a certain platform, but doesn't, the work on a certain platform, but doesn't, the platform should be included in
platform should be included in <literal>meta.platforms</literal>, but marked <literal>meta.platforms</literal>, but marked as broken with e.g.
as broken with e.g. <literal>meta.broken = <literal>meta.broken = !hostPlatform.isWindows</literal>. Of course, this
!hostPlatform.isWindows</literal>. Of course, this begs the question of what begs the question of what "ought" means exactly. That is left to the package
"ought" means exactly. That is left to the package maintainer. maintainer.
</para> </para>
</section> </section>
<section xml:id="sec-allow-unfree"> <section xml:id="sec-allow-unfree">
@ -175,9 +175,8 @@
</programlisting> </programlisting>
</para> </para>
<para> <para>
For a more useful example, try the following. This configuration For a more useful example, try the following. This configuration only
only allows unfree packages named flash player and visual studio allows unfree packages named flash player and visual studio code:
code:
<programlisting> <programlisting>
{ {
allowUnfreePredicate = (pkg: builtins.elem allowUnfreePredicate = (pkg: builtins.elem

View File

@ -6,17 +6,17 @@
<title>Introduction</title> <title>Introduction</title>
<para> <para>
"Cross-compilation" means compiling a program on one machine for another type "Cross-compilation" means compiling a program on one machine for another
of machine. For example, a typical use of cross-compilation is to compile type of machine. For example, a typical use of cross-compilation is to
programs for embedded devices. These devices often don't have the computing compile programs for embedded devices. These devices often don't have the
power and memory to compile their own programs. One might think that computing power and memory to compile their own programs. One might think
cross-compilation is a fairly niche concern. However, there are significant that cross-compilation is a fairly niche concern. However, there are
advantages to rigorously distinguishing between build-time and run-time significant advantages to rigorously distinguishing between build-time and
environments! This applies even when one is developing and deploying on the run-time environments! This applies even when one is developing and
same machine. Nixpkgs is increasingly adopting the opinion that packages deploying on the same machine. Nixpkgs is increasingly adopting the opinion
should be written with cross-compilation in mind, and nixpkgs should evaluate that packages should be written with cross-compilation in mind, and nixpkgs
in a similar way (by minimizing cross-compilation-specific special cases) should evaluate in a similar way (by minimizing cross-compilation-specific
whether or not one is cross-compiling. special cases) whether or not one is cross-compiling.
</para> </para>
<para> <para>
@ -34,7 +34,8 @@
<title>Platform parameters</title> <title>Platform parameters</title>
<para> <para>
Nixpkgs follows the <link Nixpkgs follows the
<link
xlink:href="https://gcc.gnu.org/onlinedocs/gccint/Configure-Terms.html">conventions xlink:href="https://gcc.gnu.org/onlinedocs/gccint/Configure-Terms.html">conventions
of GNU autoconf</link>. We distinguish between 3 types of platforms when of GNU autoconf</link>. We distinguish between 3 types of platforms when
building a derivation: <wordasword>build</wordasword>, building a derivation: <wordasword>build</wordasword>,
@ -95,10 +96,10 @@
The build process of certain compilers is written in such a way that the The build process of certain compilers is written in such a way that the
compiler resulting from a single build can itself only produce binaries compiler resulting from a single build can itself only produce binaries
for a single platform. The task of specifying this single "target for a single platform. The task of specifying this single "target
platform" is thus pushed to build time of the compiler. The root cause of platform" is thus pushed to build time of the compiler. The root cause
this that the compiler (which will be run on the host) and the standard of this is that the compiler (which will be run on the host) and the
library/runtime (which will be run on the target) are built by a single standard library/runtime (which will be run on the target) are built by
build process. a single build process.
</para> </para>
<para> <para>
There is no fundamental need to think about a single target ahead of There is no fundamental need to think about a single target ahead of
@ -136,9 +137,9 @@
This is a two-component shorthand for the platform. Examples of this This is a two-component shorthand for the platform. Examples of this
would be "x86_64-darwin" and "i686-linux"; see would be "x86_64-darwin" and "i686-linux"; see
<literal>lib.systems.doubles</literal> for more. The first component <literal>lib.systems.doubles</literal> for more. The first component
corresponds to the CPU architecture of the platform and the second to the corresponds to the CPU architecture of the platform and the second to
operating system of the platform (<literal>[cpu]-[os]</literal>). This the operating system of the platform (<literal>[cpu]-[os]</literal>).
format has built-in support in Nix, such as the This format has built-in support in Nix, such as the
<varname>builtins.currentSystem</varname> impure string. <varname>builtins.currentSystem</varname> impure string.
</para> </para>
</listitem> </listitem>
@ -149,14 +150,14 @@
</term> </term>
<listitem> <listitem>
<para> <para>
This is a 3- or 4- component shorthand for the platform. Examples of this This is a 3- or 4- component shorthand for the platform. Examples of
would be <literal>x86_64-unknown-linux-gnu</literal> and this would be <literal>x86_64-unknown-linux-gnu</literal> and
<literal>aarch64-apple-darwin14</literal>. This is a standard format <literal>aarch64-apple-darwin14</literal>. This is a standard format
called the "LLVM target triple", as they are pioneered by LLVM. In the called the "LLVM target triple", as they are pioneered by LLVM. In the
4-part form, this corresponds to 4-part form, this corresponds to
<literal>[cpu]-[vendor]-[os]-[abi]</literal>. This format is strictly <literal>[cpu]-[vendor]-[os]-[abi]</literal>. This format is strictly
more informative than the "Nix host double", as the previous format could more informative than the "Nix host double", as the previous format
analogously be termed. This needs a better name than could analogously be termed. This needs a better name than
<varname>config</varname>! <varname>config</varname>!
</para> </para>
</listitem> </listitem>
@ -167,11 +168,10 @@
</term> </term>
<listitem> <listitem>
<para> <para>
This is a Nix representation of a parsed LLVM target triple This is a Nix representation of a parsed LLVM target triple with
with white-listed components. This can be specified directly, white-listed components. This can be specified directly, or actually
or actually parsed from the <varname>config</varname>. See parsed from the <varname>config</varname>. See
<literal>lib.systems.parse</literal> for the exact <literal>lib.systems.parse</literal> for the exact representation.
representation.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -253,15 +253,15 @@
<para> <para>
Some examples will make this clearer. If a package is being built with a Some examples will make this clearer. If a package is being built with a
<literal>(build, host, target)</literal> platform triple of <literal>(foo, <literal>(build, host, target)</literal> platform triple of <literal>(foo,
bar, bar)</literal>, then its build-time dependencies would have a triple of bar, bar)</literal>, then its build-time dependencies would have a triple
<literal>(foo, foo, bar)</literal>, and <emphasis>those packages'</emphasis> of <literal>(foo, foo, bar)</literal>, and <emphasis>those
build-time dependencies would have a triple of <literal>(foo, foo, packages'</emphasis> build-time dependencies would have a triple of
foo)</literal>. In other words, it should take two "rounds" of following <literal>(foo, foo, foo)</literal>. In other words, it should take two
build-time dependency edges before one reaches a fixed point where, by the "rounds" of following build-time dependency edges before one reaches a
sliding window principle, the platform triple no longer changes. Indeed, fixed point where, by the sliding window principle, the platform triple no
this happens with cross-compilation, where only rounds of native longer changes. Indeed, this happens with cross-compilation, where only
dependencies starting with the second necessarily coincide with native rounds of native dependencies starting with the second necessarily coincide
packages. with native packages.
</para> </para>
<note> <note>
@ -273,23 +273,24 @@
</note> </note>
<para> <para>
How does this work in practice? Nixpkgs is now structured so that build-time How does this work in practice? Nixpkgs is now structured so that
dependencies are taken from <varname>buildPackages</varname>, whereas build-time dependencies are taken from <varname>buildPackages</varname>,
run-time dependencies are taken from the top level attribute set. For whereas run-time dependencies are taken from the top level attribute set.
example, <varname>buildPackages.gcc</varname> should be used at build-time, For example, <varname>buildPackages.gcc</varname> should be used at
while <varname>gcc</varname> should be used at run-time. Now, for most of build-time, while <varname>gcc</varname> should be used at run-time. Now,
Nixpkgs's history, there was no <varname>buildPackages</varname>, and most for most of Nixpkgs's history, there was no
packages have not been refactored to use it explicitly. Instead, one can use <varname>buildPackages</varname>, and most packages have not been
the six (<emphasis>gasp</emphasis>) attributes used for specifying refactored to use it explicitly. Instead, one can use the six
dependencies as documented in <xref linkend="ssec-stdenv-dependencies"/>. We (<emphasis>gasp</emphasis>) attributes used for specifying dependencies as
"splice" together the run-time and build-time package sets with documented in <xref linkend="ssec-stdenv-dependencies"/>. We "splice"
<varname>callPackage</varname>, and then <varname>mkDerivation</varname> for together the run-time and build-time package sets with
each of four attributes pulls the right derivation out. This splicing can be <varname>callPackage</varname>, and then <varname>mkDerivation</varname>
skipped when not cross-compiling as the package sets are the same, but is a for each of four attributes pulls the right derivation out. This splicing
bit slow for cross-compiling. Because of this, a best-of-both-worlds can be skipped when not cross-compiling as the package sets are the same,
solution is in the works with no splicing or explicit access of but is a bit slow for cross-compiling. Because of this, a
<varname>buildPackages</varname> needed. For now, feel free to use either best-of-both-worlds solution is in the works with no splicing or explicit
method. access of <varname>buildPackages</varname> needed. For now, feel free to
use either method.
</para> </para>
<note> <note>
@ -311,8 +312,8 @@
should be answered here. Ideally, the information above is exhaustive, so should be answered here. Ideally, the information above is exhaustive, so
this section cannot provide any new information, but it is ludicrous and this section cannot provide any new information, but it is ludicrous and
cruel to expect everyone to spend effort working through the interaction of cruel to expect everyone to spend effort working through the interaction of
many features just to figure out the same answer to the same common problem. many features just to figure out the same answer to the same common
Feel free to add to this list! problem. Feel free to add to this list!
</para> </para>
<qandaset> <qandaset>
@ -434,14 +435,15 @@ nix-build &lt;nixpkgs&gt; --arg crossSystem '{ config = "&lt;arch&gt;-&lt;os&gt;
build plan or package set. A simple "build vs deploy" dichotomy is adequate: build plan or package set. A simple "build vs deploy" dichotomy is adequate:
the sliding window principle described in the previous section shows how to the sliding window principle described in the previous section shows how to
interpolate between the these two "end points" to get the 3 platform triple interpolate between the these two "end points" to get the 3 platform triple
for each bootstrapping stage. That means for any package a given package set, for each bootstrapping stage. That means for any package a given package
even those not bound on the top level but only reachable via dependencies or set, even those not bound on the top level but only reachable via
<varname>buildPackages</varname>, the three platforms will be defined as one dependencies or <varname>buildPackages</varname>, the three platforms will
of <varname>localSystem</varname> or <varname>crossSystem</varname>, with the be defined as one of <varname>localSystem</varname> or
former replacing the latter as one traverses build-time dependencies. A last <varname>crossSystem</varname>, with the former replacing the latter as one
simple difference is that <varname>crossSystem</varname> should be null when traverses build-time dependencies. A last simple difference is that
one doesn't want to cross-compile, while the <varname>*Platform</varname>s <varname>crossSystem</varname> should be null when one doesn't want to
are always non-null. <varname>localSystem</varname> is always non-null. cross-compile, while the <varname>*Platform</varname>s are always non-null.
<varname>localSystem</varname> is always non-null.
</para> </para>
</section> </section>
<!--============================================================--> <!--============================================================-->
@ -455,13 +457,13 @@ nix-build &lt;nixpkgs&gt; --arg crossSystem '{ config = "&lt;arch&gt;-&lt;os&gt;
<note> <note>
<para> <para>
If one explores Nixpkgs, they will see derivations with names like If one explores Nixpkgs, they will see derivations with names like
<literal>gccCross</literal>. Such <literal>*Cross</literal> derivations is a <literal>gccCross</literal>. Such <literal>*Cross</literal> derivations is
holdover from before we properly distinguished between the host and target a holdover from before we properly distinguished between the host and
platforms—the derivation with "Cross" in the name covered the <literal>build target platforms—the derivation with "Cross" in the name covered the
= host != target</literal> case, while the other covered the <literal>host = <literal>build = host != target</literal> case, while the other covered the
target</literal>, with build platform the same or not based on whether one <literal>host = target</literal>, with build platform the same or not based
was using its <literal>.nativeDrv</literal> or <literal>.crossDrv</literal>. on whether one was using its <literal>.nativeDrv</literal> or
This ugliness will disappear soon. <literal>.crossDrv</literal>. This ugliness will disappear soon.
</para> </para>
</note> </note>
</section> </section>

View File

@ -16,6 +16,7 @@
<xi:include href="functions/fhs-environments.xml" /> <xi:include href="functions/fhs-environments.xml" />
<xi:include href="functions/shell.xml" /> <xi:include href="functions/shell.xml" />
<xi:include href="functions/dockertools.xml" /> <xi:include href="functions/dockertools.xml" />
<xi:include href="functions/appimagetools.xml" />
<xi:include href="functions/prefer-remote-fetch.xml" /> <xi:include href="functions/prefer-remote-fetch.xml" />
<xi:include href="functions/nix-gitignore.xml" /> <xi:include href="functions/nix-gitignore.xml" />
</chapter> </chapter>

View File

@ -0,0 +1,118 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
xml:id="sec-pkgs-appimageTools">
<title>pkgs.appimageTools</title>
<para>
<varname>pkgs.appimageTools</varname> is a set of functions for extracting
and wrapping <link xlink:href="https://appimage.org/">AppImage</link> files.
They are meant to be used if traditional packaging from source is infeasible,
or it would take too long. To quickly run an AppImage file,
<literal>pkgs.appimage-run</literal> can be used as well.
</para>
<warning>
<para>
The <varname>appimageTools</varname> API is unstable and may be subject to
backwards-incompatible changes in the future.
</para>
</warning>
<section xml:id="ssec-pkgs-appimageTools-formats">
<title>AppImage formats</title>
<para>
There are different formats for AppImages, see
<link xlink:href="https://github.com/AppImage/AppImageSpec/blob/74ad9ca2f94bf864a4a0dac1f369dd4f00bd1c28/draft.md#image-format">the
specification</link> for details.
</para>
<itemizedlist>
<listitem>
<para>
Type 1 images are ISO 9660 files that are also ELF executables.
</para>
</listitem>
<listitem>
<para>
Type 2 images are ELF executables with an appended filesystem.
</para>
</listitem>
</itemizedlist>
<para>
They can be told apart with <command>file -k</command>:
</para>
<screen>
<prompt>$ </prompt>file -k type1.AppImage
type1.AppImage: ELF 64-bit LSB executable, x86-64, version 1 (SYSV) ISO 9660 CD-ROM filesystem data 'AppImage' (Lepton 3.x), scale 0-0,
spot sensor temperature 0.000000, unit celsius, color scheme 0, calibration: offset 0.000000, slope 0.000000, dynamically linked, interpreter /lib64/ld-linux-x86-64.so.2, for GNU/Linux 2.6.18, BuildID[sha1]=d629f6099d2344ad82818172add1d38c5e11bc6d, stripped\012- data
<prompt>$ </prompt>file -k type2.AppImage
type2.AppImage: ELF 64-bit LSB executable, x86-64, version 1 (SYSV) (Lepton 3.x), scale 232-60668, spot sensor temperature -4.187500, color scheme 15, show scale bar, calibration: offset -0.000000, slope 0.000000 (Lepton 2.x), scale 4111-45000, spot sensor temperature 412442.250000, color scheme 3, minimum point enabled, calibration: offset -75402534979642766821519867692934234112.000000, slope 5815371847733706829839455140374904832.000000, dynamically linked, interpreter /lib64/ld-linux-x86-64.so.2, for GNU/Linux 2.6.18, BuildID[sha1]=79dcc4e55a61c293c5e19edbd8d65b202842579f, stripped\012- data
</screen>
<para>
Note how the type 1 AppImage is described as an <literal>ISO 9660 CD-ROM
filesystem</literal>, and the type 2 AppImage is not.
</para>
</section>
<section xml:id="ssec-pkgs-appimageTools-wrapping">
<title>Wrapping</title>
<para>
Depending on the type of AppImage you're wrapping, you'll have to use
<varname>wrapType1</varname> or <varname>wrapType2</varname>.
</para>
<programlisting>
appimageTools.wrapType2 { # or wrapType1
name = "patchwork"; <co xml:id='ex-appimageTools-wrapping-1' />
src = fetchurl { <co xml:id='ex-appimageTools-wrapping-2' />
url = https://github.com/ssbc/patchwork/releases/download/v3.11.4/Patchwork-3.11.4-linux-x86_64.AppImage;
sha256 = "1blsprpkvm0ws9b96gb36f0rbf8f5jgmw4x6dsb1kswr4ysf591s";
};
extraPkgs = pkgs: with pkgs; [ ]; <co xml:id='ex-appimageTools-wrapping-3' />
}</programlisting>
<calloutlist>
<callout arearefs='ex-appimageTools-wrapping-1'>
<para>
<varname>name</varname> specifies the name of the resulting image.
</para>
</callout>
<callout arearefs='ex-appimageTools-wrapping-2'>
<para>
<varname>src</varname> specifies the AppImage file to extract.
</para>
</callout>
<callout arearefs='ex-appimageTools-wrapping-2'>
<para>
<varname>extraPkgs</varname> allows you to pass a function to include
additional packages inside the FHS environment your AppImage is going to
run in. There are a few ways to learn which dependencies an application
needs:
<itemizedlist>
<listitem>
<para>
Looking through the extracted AppImage files, reading its scripts and
running <command>patchelf</command> and <command>ldd</command> on its
executables. This can also be done in <command>appimage-run</command>,
by setting <command>APPIMAGE_DEBUG_EXEC=bash</command>.
</para>
</listitem>
<listitem>
<para>
Running <command>strace -vfefile</command> on the wrapped executable,
looking for libraries that can't be found.
</para>
</listitem>
</itemizedlist>
</para>
</callout>
</calloutlist>
</section>
</section>

View File

@ -24,9 +24,9 @@
<para> <para>
This function is analogous to the <command>docker build</command> command, This function is analogous to the <command>docker build</command> command,
in that it can be used to build a Docker-compatible repository tarball containing in that it can be used to build a Docker-compatible repository tarball
a single image with one or multiple layers. As such, the result is suitable containing a single image with one or multiple layers. As such, the result
for being loaded in Docker with <command>docker load</command>. is suitable for being loaded in Docker with <command>docker load</command>.
</para> </para>
<para> <para>
@ -47,7 +47,7 @@ buildImage {
contents = pkgs.redis; <co xml:id='ex-dockerTools-buildImage-6' /> contents = pkgs.redis; <co xml:id='ex-dockerTools-buildImage-6' />
runAsRoot = '' <co xml:id='ex-dockerTools-buildImage-runAsRoot' /> runAsRoot = '' <co xml:id='ex-dockerTools-buildImage-runAsRoot' />
#!${stdenv.shell} #!${pkgs.runtimeShell}
mkdir -p /data mkdir -p /data
''; '';
@ -190,8 +190,8 @@ buildImage {
By default <function>buildImage</function> will use a static date of one By default <function>buildImage</function> will use a static date of one
second past the UNIX Epoch. This allows <function>buildImage</function> to second past the UNIX Epoch. This allows <function>buildImage</function> to
produce binary reproducible images. When listing images with produce binary reproducible images. When listing images with
<command>docker images</command>, the newly created images will be <command>docker images</command>, the newly created images will be listed
listed like this: like this:
</para> </para>
<screen><![CDATA[ <screen><![CDATA[
$ docker images $ docker images
@ -402,9 +402,9 @@ pkgs.dockerTools.buildLayeredImage {
<para> <para>
This function is analogous to the <command>docker pull</command> command, in This function is analogous to the <command>docker pull</command> command, in
that it can be used to pull a Docker image from a Docker registry. By default that it can be used to pull a Docker image from a Docker registry. By
<link xlink:href="https://hub.docker.com/">Docker Hub</link> is used to pull default <link xlink:href="https://hub.docker.com/">Docker Hub</link> is used
images. to pull images.
</para> </para>
<para> <para>
@ -484,10 +484,10 @@ sha256:20d9485b25ecfd89204e843a962c1bd70e9cc6858d65d7f5fadc340246e2116b
<para> <para>
This function is analogous to the <command>docker export</command> command, This function is analogous to the <command>docker export</command> command,
in that it can be used to flatten a Docker image that contains multiple layers. It in that it can be used to flatten a Docker image that contains multiple
is in fact the result of the merge of all the layers of the image. As such, layers. It is in fact the result of the merge of all the layers of the
the result is suitable for being imported in Docker with <command>docker image. As such, the result is suitable for being imported in Docker with
import</command>. <command>docker import</command>.
</para> </para>
<note> <note>
@ -544,7 +544,7 @@ buildImage {
name = "shadow-basic"; name = "shadow-basic";
runAsRoot = '' runAsRoot = ''
#!${stdenv.shell} #!${pkgs.runtimeShell}
${shadowSetup} ${shadowSetup}
groupadd -r redis groupadd -r redis
useradd -r -g redis redis useradd -r -g redis redis

View File

@ -5,21 +5,18 @@
<title>Fetcher functions</title> <title>Fetcher functions</title>
<para> <para>
When using Nix, you will frequently need to download source code When using Nix, you will frequently need to download source code and other
and other files from the internet. Nixpkgs comes with a few helper files from the internet. Nixpkgs comes with a few helper functions that allow
functions that allow you to fetch fixed-output derivations in a you to fetch fixed-output derivations in a structured way.
structured way.
</para> </para>
<para> <para>
The two fetcher primitives are <function>fetchurl</function> and The two fetcher primitives are <function>fetchurl</function> and
<function>fetchzip</function>. Both of these have two required <function>fetchzip</function>. Both of these have two required arguments, a
arguments, a URL and a hash. The hash is typically URL and a hash. The hash is typically <literal>sha256</literal>, although
<literal>sha256</literal>, although many more hash algorithms are many more hash algorithms are supported. Nixpkgs contributors are currently
supported. Nixpkgs contributors are currently recommended to use recommended to use <literal>sha256</literal>. This hash will be used by Nix
<literal>sha256</literal>. This hash will be used by Nix to to identify your source. A typical usage of fetchurl is provided below.
identify your source. A typical usage of fetchurl is provided
below.
</para> </para>
<programlisting><![CDATA[ <programlisting><![CDATA[
@ -37,30 +34,28 @@ stdenv.mkDerivation {
<para> <para>
The main difference between <function>fetchurl</function> and The main difference between <function>fetchurl</function> and
<function>fetchzip</function> is in how they store the contents. <function>fetchzip</function> is in how they store the contents.
<function>fetchurl</function> will store the unaltered contents of <function>fetchurl</function> will store the unaltered contents of the URL
the URL within the Nix store. <function>fetchzip</function> on the within the Nix store. <function>fetchzip</function> on the other hand will
other hand will decompress the archive for you, making files and decompress the archive for you, making files and directories directly
directories directly accessible in the future. accessible in the future. <function>fetchzip</function> can only be used with
<function>fetchzip</function> can only be used with archives. archives. Despite the name, <function>fetchzip</function> is not limited to
Despite the name, <function>fetchzip</function> is not limited to
.zip files and can also be used with any tarball. .zip files and can also be used with any tarball.
</para> </para>
<para> <para>
<function>fetchpatch</function> works very similarly to <function>fetchpatch</function> works very similarly to
<function>fetchurl</function> with the same arguments expected. It <function>fetchurl</function> with the same arguments expected. It expects
expects patch files as a source and and performs normalization on patch files as a source and and performs normalization on them before
them before computing the checksum. For example it will remove computing the checksum. For example it will remove comments or other unstable
comments or other unstable parts that are sometimes added by parts that are sometimes added by version control systems and can change over
version control systems and can change over time. time.
</para> </para>
<para> <para>
Other fetcher functions allow you to add source code directly from Other fetcher functions allow you to add source code directly from a VCS such
a VCS such as subversion or git. These are mostly straightforward as subversion or git. These are mostly straightforward names based on the
names based on the name of the command used with the VCS system. name of the command used with the VCS system. Because they give you a working
Because they give you a working repository, they act most like repository, they act most like <function>fetchzip</function>.
<function>fetchzip</function>.
</para> </para>
<variablelist> <variablelist>
@ -70,9 +65,8 @@ stdenv.mkDerivation {
</term> </term>
<listitem> <listitem>
<para> <para>
Used with Subversion. Expects <literal>url</literal> to a Used with Subversion. Expects <literal>url</literal> to a Subversion
Subversion directory, <literal>rev</literal>, and directory, <literal>rev</literal>, and <literal>sha256</literal>.
<literal>sha256</literal>.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -84,9 +78,8 @@ stdenv.mkDerivation {
<para> <para>
Used with Git. Expects <literal>url</literal> to a Git repo, Used with Git. Expects <literal>url</literal> to a Git repo,
<literal>rev</literal>, and <literal>sha256</literal>. <literal>rev</literal>, and <literal>sha256</literal>.
<literal>rev</literal> in this case can be full the git commit <literal>rev</literal> in this case can be full the git commit id (SHA1
id (SHA1 hash) or a tag name like hash) or a tag name like <literal>refs/tags/v1.0</literal>.
<literal>refs/tags/v1.0</literal>.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -96,8 +89,8 @@ stdenv.mkDerivation {
</term> </term>
<listitem> <listitem>
<para> <para>
Used with Fossil. Expects <literal>url</literal> to a Fossil Used with Fossil. Expects <literal>url</literal> to a Fossil archive,
archive, <literal>rev</literal>, and <literal>sha256</literal>. <literal>rev</literal>, and <literal>sha256</literal>.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -107,8 +100,8 @@ stdenv.mkDerivation {
</term> </term>
<listitem> <listitem>
<para> <para>
Used with CVS. Expects <literal>cvsRoot</literal>, Used with CVS. Expects <literal>cvsRoot</literal>, <literal>tag</literal>,
<literal>tag</literal>, and <literal>sha256</literal>. and <literal>sha256</literal>.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -126,11 +119,10 @@ stdenv.mkDerivation {
</variablelist> </variablelist>
<para> <para>
A number of fetcher functions wrap part of A number of fetcher functions wrap part of <function>fetchurl</function> and
<function>fetchurl</function> and <function>fetchzip</function>. <function>fetchzip</function>. They are mainly convenience functions intended
They are mainly convenience functions intended for commonly used for commonly used destinations of source code in Nixpkgs. These wrapper
destinations of source code in Nixpkgs. These wrapper fetchers are fetchers are listed below.
listed below.
</para> </para>
<variablelist> <variablelist>
@ -141,17 +133,15 @@ stdenv.mkDerivation {
<listitem> <listitem>
<para> <para>
<function>fetchFromGitHub</function> expects four arguments. <function>fetchFromGitHub</function> expects four arguments.
<literal>owner</literal> is a string corresponding to the <literal>owner</literal> is a string corresponding to the GitHub user or
GitHub user or organization that controls this repository. organization that controls this repository. <literal>repo</literal>
<literal>repo</literal> corresponds to the name of the corresponds to the name of the software repository. These are located at
software repository. These are located at the top of every the top of every GitHub HTML page as
GitHub HTML page as <literal>owner</literal>/<literal>repo</literal>. <literal>rev</literal>
<literal>owner</literal>/<literal>repo</literal>. corresponds to the Git commit hash or tag (e.g <literal>v1.0</literal>)
<literal>rev</literal> corresponds to the Git commit hash or that will be downloaded from Git. Finally, <literal>sha256</literal>
tag (e.g <literal>v1.0</literal>) that will be downloaded from corresponds to the hash of the extracted directory. Again, other hash
Git. Finally, <literal>sha256</literal> corresponds to the algorithms are also available but <literal>sha256</literal> is currently
hash of the extracted directory. Again, other hash algorithms
are also available but <literal>sha256</literal> is currently
preferred. preferred.
</para> </para>
</listitem> </listitem>
@ -162,8 +152,8 @@ stdenv.mkDerivation {
</term> </term>
<listitem> <listitem>
<para> <para>
This is used with GitLab repositories. The arguments expected This is used with GitLab repositories. The arguments expected are very
are very similar to fetchFromGitHub above. similar to fetchFromGitHub above.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -173,8 +163,8 @@ stdenv.mkDerivation {
</term> </term>
<listitem> <listitem>
<para> <para>
This is used with BitBucket repositories. The arguments expected This is used with BitBucket repositories. The arguments expected are very
are very similar to fetchFromGitHub above. similar to fetchFromGitHub above.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -184,8 +174,8 @@ stdenv.mkDerivation {
</term> </term>
<listitem> <listitem>
<para> <para>
This is used with Savannah repositories. The arguments expected This is used with Savannah repositories. The arguments expected are very
are very similar to fetchFromGitHub above. similar to fetchFromGitHub above.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -195,12 +185,10 @@ stdenv.mkDerivation {
</term> </term>
<listitem> <listitem>
<para> <para>
This is used with repo.or.cz repositories. The arguments This is used with repo.or.cz repositories. The arguments expected are very
expected are very similar to fetchFromGitHub above. similar to fetchFromGitHub above.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
</variablelist> </variablelist>
</section> </section>

View File

@ -16,9 +16,14 @@
<!-- These docs are generated via nixdoc. To add another generated <!-- These docs are generated via nixdoc. To add another generated
library function file to this list, the file library function file to this list, the file
`lib-function-docs.nix` must also be updated. --> `lib-function-docs.nix` must also be updated. -->
<xi:include href="./library/generated/strings.xml" /> <xi:include href="./library/generated/strings.xml" />
<xi:include href="./library/generated/trivial.xml" /> <xi:include href="./library/generated/trivial.xml" />
<xi:include href="./library/generated/lists.xml" /> <xi:include href="./library/generated/lists.xml" />
<xi:include href="./library/generated/debug.xml" /> <xi:include href="./library/generated/debug.xml" />
<xi:include href="./library/generated/options.xml" /> <xi:include href="./library/generated/options.xml" />
</section> </section>

View File

@ -17,9 +17,9 @@
<literal>pkgs.nix-gitignore</literal> exports a number of functions, but <literal>pkgs.nix-gitignore</literal> exports a number of functions, but
you'll most likely need either <literal>gitignoreSource</literal> or you'll most likely need either <literal>gitignoreSource</literal> or
<literal>gitignoreSourcePure</literal>. As their first argument, they both <literal>gitignoreSourcePure</literal>. As their first argument, they both
accept either 1. a file with gitignore lines or 2. a string accept either 1. a file with gitignore lines or 2. a string with gitignore
with gitignore lines, or 3. a list of either of the two. They will be lines, or 3. a list of either of the two. They will be concatenated into a
concatenated into a single big string. single big string.
</para> </para>
<programlisting><![CDATA[ <programlisting><![CDATA[
@ -40,8 +40,8 @@
]]></programlisting> ]]></programlisting>
<para> <para>
These functions are derived from the <literal>Filter</literal> functions These functions are derived from the <literal>Filter</literal> functions by
by setting the first filter argument to <literal>(_: _: true)</literal>: setting the first filter argument to <literal>(_: _: true)</literal>:
</para> </para>
<programlisting><![CDATA[ <programlisting><![CDATA[
@ -50,7 +50,12 @@ gitignoreSource = gitignoreFilterSource (_: _: true);
]]></programlisting> ]]></programlisting>
<para> <para>
Those filter functions accept the same arguments the <literal>builtins.filterSource</literal> function would pass to its filters, thus <literal>fn: gitignoreFilterSourcePure fn ""</literal> should be extensionally equivalent to <literal>filterSource</literal>. The file is blacklisted iff it's blacklisted by either your filter or the gitignoreFilter. Those filter functions accept the same arguments the
<literal>builtins.filterSource</literal> function would pass to its filters,
thus <literal>fn: gitignoreFilterSourcePure fn ""</literal> should be
extensionally equivalent to <literal>filterSource</literal>. The file is
blacklisted iff it's blacklisted by either your filter or the
gitignoreFilter.
</para> </para>
<para> <para>
@ -66,7 +71,8 @@ gitignoreFilter = ign: root: filterPattern (gitignoreToPatterns ign) root;
<title>gitignore files in subdirectories</title> <title>gitignore files in subdirectories</title>
<para> <para>
If you wish to use a filter that would search for .gitignore files in subdirectories, just like git does by default, use this function: If you wish to use a filter that would search for .gitignore files in
subdirectories, just like git does by default, use this function:
</para> </para>
<programlisting><![CDATA[ <programlisting><![CDATA[

View File

@ -7,16 +7,14 @@
<para> <para>
<function>prefer-remote-fetch</function> is an overlay that download sources <function>prefer-remote-fetch</function> is an overlay that download sources
on remote builder. This is useful when the evaluating machine has a slow on remote builder. This is useful when the evaluating machine has a slow
upload while the builder can fetch faster directly from the source. upload while the builder can fetch faster directly from the source. To use
To use it, put the following snippet as a new overlay: it, put the following snippet as a new overlay:
<programlisting> <programlisting>
self: super: self: super:
(super.prefer-remote-fetch self super) (super.prefer-remote-fetch self super)
</programlisting> </programlisting>
A full configuration example for that sets the overlay up for your own
A full configuration example for that sets the overlay up for your own account, account, could look like this
could look like this
<programlisting> <programlisting>
$ mkdir ~/.config/nixpkgs/overlays/ $ mkdir ~/.config/nixpkgs/overlays/
$ cat &gt; ~/.config/nixpkgs/overlays/prefer-remote-fetch.nix &lt;&lt;EOF $ cat &gt; ~/.config/nixpkgs/overlays/prefer-remote-fetch.nix &lt;&lt;EOF

View File

@ -5,12 +5,11 @@
<title>Trivial builders</title> <title>Trivial builders</title>
<para> <para>
Nixpkgs provides a couple of functions that help with building Nixpkgs provides a couple of functions that help with building derivations.
derivations. The most important one, The most important one, <function>stdenv.mkDerivation</function>, has already
<function>stdenv.mkDerivation</function>, has already been been documented above. The following functions wrap
documented above. The following functions wrap <function>stdenv.mkDerivation</function>, making it easier to use in certain
<function>stdenv.mkDerivation</function>, making it easier to use cases.
in certain cases.
</para> </para>
<variablelist> <variablelist>
@ -22,21 +21,18 @@
<para> <para>
This takes three arguments, <literal>name</literal>, This takes three arguments, <literal>name</literal>,
<literal>env</literal>, and <literal>buildCommand</literal>. <literal>env</literal>, and <literal>buildCommand</literal>.
<literal>name</literal> is just the name that Nix will append <literal>name</literal> is just the name that Nix will append to the store
to the store path in the same way that path in the same way that <literal>stdenv.mkDerivation</literal> uses its
<literal>stdenv.mkDerivation</literal> uses its <literal>name</literal> attribute. <literal>env</literal> is an attribute
<literal>name</literal> attribute. <literal>env</literal> is an set specifying environment variables that will be set for this derivation.
attribute set specifying environment variables that will be set These attributes are then passed to the wrapped
for this derivation. These attributes are then passed to the <literal>stdenv.mkDerivation</literal>. <literal>buildCommand</literal>
wrapped <literal>stdenv.mkDerivation</literal>. specifies the commands that will be run to create this derivation. Note
<literal>buildCommand</literal> specifies the commands that that you will need to create <literal>$out</literal> for Nix to register
will be run to create this derivation. Note that you will need the command as successful.
to create <literal>$out</literal> for Nix to register the
command as successful.
</para> </para>
<para> <para>
An example of using <literal>runCommand</literal> is provided An example of using <literal>runCommand</literal> is provided below.
below.
</para> </para>
<programlisting> <programlisting>
(import &lt;nixpkgs&gt; {}).runCommand "my-example" {} '' (import &lt;nixpkgs&gt; {}).runCommand "my-example" {} ''
@ -66,39 +62,33 @@
</term> </term>
<listitem> <listitem>
<para> <para>
This works just like <literal>runCommand</literal>. The only This works just like <literal>runCommand</literal>. The only difference is
difference is that it also provides a C compiler in that it also provides a C compiler in <literal>buildCommand</literal>s
<literal>buildCommand</literal>s environment. To minimize your environment. To minimize your dependencies, you should only use this if
dependencies, you should only use this if you are sure you will you are sure you will need a C compiler as part of running your command.
need a C compiler as part of running your command.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
<varlistentry> <varlistentry>
<term> <term>
<literal>writeTextFile</literal>, <literal>writeText</literal>, <literal>writeTextFile</literal>, <literal>writeText</literal>, <literal>writeTextDir</literal>, <literal>writeScript</literal>, <literal>writeScriptBin</literal>
<literal>writeTextDir</literal>, <literal>writeScript</literal>,
<literal>writeScriptBin</literal>
</term> </term>
<listitem> <listitem>
<para> <para>
These functions write <literal>text</literal> to the Nix store. These functions write <literal>text</literal> to the Nix store. This is
This is useful for creating scripts from Nix expressions. useful for creating scripts from Nix expressions.
<literal>writeTextFile</literal> takes an attribute set and <literal>writeTextFile</literal> takes an attribute set and expects two
expects two arguments, <literal>name</literal> and arguments, <literal>name</literal> and <literal>text</literal>.
<literal>text</literal>. <literal>name</literal> corresponds to <literal>name</literal> corresponds to the name used in the Nix store
the name used in the Nix store path. <literal>text</literal> path. <literal>text</literal> will be the contents of the file. You can
will be the contents of the file. You can also set also set <literal>executable</literal> to true to make this file have the
<literal>executable</literal> to true to make this file have executable bit set.
the executable bit set.
</para> </para>
<para> <para>
Many more commands wrap <literal>writeTextFile</literal> Many more commands wrap <literal>writeTextFile</literal> including
including <literal>writeText</literal>, <literal>writeText</literal>, <literal>writeTextDir</literal>,
<literal>writeTextDir</literal>, <literal>writeScript</literal>, and <literal>writeScriptBin</literal>.
<literal>writeScript</literal>, and These are convenience functions over <literal>writeTextFile</literal>.
<literal>writeScriptBin</literal>. These are convenience
functions over <literal>writeTextFile</literal>.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -109,16 +99,15 @@
<listitem> <listitem>
<para> <para>
This can be used to put many derivations into the same directory This can be used to put many derivations into the same directory
structure. It works by creating a new derivation and adding structure. It works by creating a new derivation and adding symlinks to
symlinks to each of the paths listed. It expects two arguments, each of the paths listed. It expects two arguments,
<literal>name</literal>, and <literal>paths</literal>. <literal>name</literal>, and <literal>paths</literal>.
<literal>name</literal> is the name used in the Nix store path <literal>name</literal> is the name used in the Nix store path for the
for the created derivation. <literal>paths</literal> is a list of created derivation. <literal>paths</literal> is a list of paths that will
paths that will be symlinked. These paths can be to Nix store be symlinked. These paths can be to Nix store derivations or any other
derivations or any other subdirectory contained within. subdirectory contained within.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
</variablelist> </variablelist>
</section> </section>

View File

@ -7,33 +7,32 @@
OCaml libraries should be installed in OCaml libraries should be installed in
<literal>$(out)/lib/ocaml/${ocaml.version}/site-lib/</literal>. Such <literal>$(out)/lib/ocaml/${ocaml.version}/site-lib/</literal>. Such
directories are automatically added to the <literal>$OCAMLPATH</literal> directories are automatically added to the <literal>$OCAMLPATH</literal>
environment variable when building another package that depends on them environment variable when building another package that depends on them or
or when opening a <literal>nix-shell</literal>. when opening a <literal>nix-shell</literal>.
</para> </para>
<para> <para>
Given that most of the OCaml ecosystem is now built with dune, Given that most of the OCaml ecosystem is now built with dune, nixpkgs
nixpkgs includes a convenience build support function called includes a convenience build support function called
<literal>buildDunePackage</literal> that will build an OCaml package <literal>buildDunePackage</literal> that will build an OCaml package using
using dune, OCaml and findlib and any additional dependencies provided dune, OCaml and findlib and any additional dependencies provided as
as <literal>buildInputs</literal> or <literal>propagatedBuildInputs</literal>. <literal>buildInputs</literal> or <literal>propagatedBuildInputs</literal>.
</para> </para>
<para> <para>
Here is a simple package example. It defines an (optional) attribute Here is a simple package example. It defines an (optional) attribute
<literal>minimumOCamlVersion</literal> that will be used to throw a <literal>minimumOCamlVersion</literal> that will be used to throw a
descriptive evaluation error if building with an older OCaml is attempted. descriptive evaluation error if building with an older OCaml is attempted. It
It uses the <literal>fetchFromGitHub</literal> fetcher to get its source. uses the <literal>fetchFromGitHub</literal> fetcher to get its source. It
It sets the <literal>doCheck</literal> (optional) attribute to sets the <literal>doCheck</literal> (optional) attribute to
<literal>true</literal> which means that tests will be run with <literal>true</literal> which means that tests will be run with <literal>dune
<literal>dune runtest -p angstrom</literal> after the build runtest -p angstrom</literal> after the build (<literal>dune build -p
(<literal>dune build -p angstrom</literal>) is complete. angstrom</literal>) is complete. It uses <literal>alcotest</literal> as a
It uses <literal>alcotest</literal> as a build input (because it is needed build input (because it is needed to run the tests) and
to run the tests) and <literal>bigstringaf</literal> and <literal>bigstringaf</literal> and <literal>result</literal> as propagated
<literal>result</literal> as propagated build inputs (thus they will also build inputs (thus they will also be available to libraries depending on this
be available to libraries depending on this library). library). The library will be installed using the
The library will be installed using the <literal>angstrom.install</literal> <literal>angstrom.install</literal> file that dune generates.
file that dune generates.
</para> </para>
<programlisting> <programlisting>
@ -69,8 +68,8 @@ buildDunePackage rec {
Here is a second example, this time using a source archive generated with Here is a second example, this time using a source archive generated with
<literal>dune-release</literal>. It is a good idea to use this archive when <literal>dune-release</literal>. It is a good idea to use this archive when
it is available as it will usually contain substituted variables such as a it is available as it will usually contain substituted variables such as a
<literal>%%VERSION%%</literal> field. This library does not depend <literal>%%VERSION%%</literal> field. This library does not depend on any
on any other OCaml library and no tests are run after building it. other OCaml library and no tests are run after building it.
</para> </para>
<programlisting> <programlisting>
@ -95,5 +94,4 @@ buildDunePackage rec {
}; };
} }
</programlisting> </programlisting>
</section> </section>

View File

@ -602,11 +602,10 @@ as the interpreter unless overridden otherwise.
All parameters from `stdenv.mkDerivation` function are still supported. The following are specific to `buildPythonPackage`: All parameters from `stdenv.mkDerivation` function are still supported. The following are specific to `buildPythonPackage`:
* `catchConflicts ? true`: If `true`, abort package build if a package name appears more than once in dependency tree. Default is `true`. * `catchConflicts ? true`: If `true`, abort package build if a package name appears more than once in dependency tree. Default is `true`.
* `checkInputs ? []`: Dependencies needed for running the `checkPhase`. These are added to `buildInputs` when `doCheck = true`.
* `disabled` ? false: If `true`, package is not build for the particular Python interpreter version. * `disabled` ? false: If `true`, package is not build for the particular Python interpreter version.
* `dontWrapPythonPrograms ? false`: Skip wrapping of python programs. * `dontWrapPythonPrograms ? false`: Skip wrapping of python programs.
* `installFlags ? []`: A list of strings. Arguments to be passed to `pip install`. To pass options to `python setup.py install`, use `--install-option`. E.g., `installFlags=["--install-option='--cpp_implementation'"]. * `installFlags ? []`: A list of strings. Arguments to be passed to `pip install`. To pass options to `python setup.py install`, use `--install-option`. E.g., `installFlags=["--install-option='--cpp_implementation'"]`.
* `format ? "setuptools"`: Format of the source. Valid options are `"setuptools"`, `"flit"`, `"wheel"`, and `"other"`. `"setuptools"` is for when the source has a `setup.py` and `setuptools` is used to build a wheel, `flit`, in case `flit` should be used to build a wheel, and `wheel` in case a wheel is provided. Use `other` when a custom `buildPhase` and/or `installPhase` is needed. * `format ? "setuptools"`: Format of the source. Valid options are `"setuptools"`, `"pyproject"`, `"flit"`, `"wheel"`, and `"other"`. `"setuptools"` is for when the source has a `setup.py` and `setuptools` is used to build a wheel, `flit`, in case `flit` should be used to build a wheel, and `wheel` in case a wheel is provided. Use `other` when a custom `buildPhase` and/or `installPhase` is needed.
* `makeWrapperArgs ? []`: A list of strings. Arguments to be passed to `makeWrapper`, which wraps generated binaries. By default, the arguments to `makeWrapper` set `PATH` and `PYTHONPATH` environment variables before calling the binary. Additional arguments here can allow a developer to set environment variables which will be available when the binary is run. For example, `makeWrapperArgs = ["--set FOO BAR" "--set BAZ QUX"]`. * `makeWrapperArgs ? []`: A list of strings. Arguments to be passed to `makeWrapper`, which wraps generated binaries. By default, the arguments to `makeWrapper` set `PATH` and `PYTHONPATH` environment variables before calling the binary. Additional arguments here can allow a developer to set environment variables which will be available when the binary is run. For example, `makeWrapperArgs = ["--set FOO BAR" "--set BAZ QUX"]`.
* `namePrefix`: Prepends text to `${name}` parameter. In case of libraries, this defaults to `"python3.5-"` for Python 3.5, etc., and in case of applications to `""`. * `namePrefix`: Prepends text to `${name}` parameter. In case of libraries, this defaults to `"python3.5-"` for Python 3.5, etc., and in case of applications to `""`.
* `pythonPath ? []`: List of packages to be added into `$PYTHONPATH`. Packages in `pythonPath` are not propagated (contrary to `propagatedBuildInputs`). * `pythonPath ? []`: List of packages to be added into `$PYTHONPATH`. Packages in `pythonPath` are not propagated (contrary to `propagatedBuildInputs`).
@ -615,6 +614,14 @@ All parameters from `stdenv.mkDerivation` function are still supported. The foll
* `removeBinByteCode ? true`: Remove bytecode from `/bin`. Bytecode is only created when the filenames end with `.py`. * `removeBinByteCode ? true`: Remove bytecode from `/bin`. Bytecode is only created when the filenames end with `.py`.
* `setupPyBuildFlags ? []`: List of flags passed to `setup.py build_ext` command. * `setupPyBuildFlags ? []`: List of flags passed to `setup.py build_ext` command.
The `stdenv.mkDerivation` function accepts various parameters for describing build inputs (see "Specifying dependencies"). The following are of special
interest for Python packages, either because these are primarily used, or because their behaviour is different:
* `nativeBuildInputs ? []`: Build-time only dependencies. Typically executables as well as the items listed in `setup_requires`.
* `buildInputs ? []`: Build and/or run-time dependencies that need to be be compiled for the host machine. Typically non-Python libraries which are being linked.
* `checkInputs ? []`: Dependencies needed for running the `checkPhase`. These are added to `nativeBuildInputs` when `doCheck = true`. Items listed in `tests_require` go here.
* `propagatedBuildInputs ? []`: Aside from propagating dependencies, `buildPythonPackage` also injects code into and wraps executables with the paths included in this list. Items listed in `install_requires` go here.
##### Overriding Python packages ##### Overriding Python packages
The `buildPythonPackage` function has a `overridePythonAttrs` method that The `buildPythonPackage` function has a `overridePythonAttrs` method that
@ -874,7 +881,6 @@ example of such a situation is when `py.test` is used.
''; '';
} }
``` ```
- Unicode issues can typically be fixed by including `glibcLocales` in `buildInputs` and exporting `LC_ALL=en_US.utf-8`.
- Tests that attempt to access `$HOME` can be fixed by using the following work-around before running tests (e.g. `preCheck`): `export HOME=$(mktemp -d)` - Tests that attempt to access `$HOME` can be fixed by using the following work-around before running tests (e.g. `preCheck`): `export HOME=$(mktemp -d)`
## FAQ ## FAQ
@ -1123,6 +1129,14 @@ LLVM implementation. To use that one instead, Intel recommends users set it with
Note that `mkl` is only available on `x86_64-{linux,darwin}` platforms; Note that `mkl` is only available on `x86_64-{linux,darwin}` platforms;
moreover, Hydra is not building and distributing pre-compiled binaries using it. moreover, Hydra is not building and distributing pre-compiled binaries using it.
### What inputs do `setup_requires`, `install_requires` and `tests_require` map to?
In a `setup.py` or `setup.cfg` it is common to declare dependencies:
* `setup_requires` corresponds to `nativeBuildInputs`
* `install_requires` corresponds to `propagatedBuildInputs`
* `tests_require` corresponds to `checkInputs`
## Contributing ## Contributing
### Contributing guidelines ### Contributing guidelines

View File

@ -307,19 +307,20 @@ packageOverrides = pkgs: {
</screen> </screen>
</para> </para>
</section> </section>
<section xml:id="sec-elm"> <section xml:id="sec-elm">
<title>Elm</title> <title>Elm</title>
<para> <para>
To update Elm compiler, see <filename>nixpkgs/pkgs/development/compilers/elm/README.md</filename>. To update Elm compiler, see
<filename>nixpkgs/pkgs/development/compilers/elm/README.md</filename>.
</para> </para>
<para> <para>
To package Elm applications, <link xlink:href="https://github.com/hercules-ci/elm2nix#elm2nix">read about elm2nix</link>. To package Elm applications,
<link xlink:href="https://github.com/hercules-ci/elm2nix#elm2nix">read about
elm2nix</link>.
</para> </para>
</section> </section>
<section xml:id="sec-shell-helpers"> <section xml:id="sec-shell-helpers">
<title>Interactive shell helpers</title> <title>Interactive shell helpers</title>

View File

@ -96,8 +96,8 @@
</programlisting> </programlisting>
<para> <para>
The package <literal>xcbuild</literal> can be used to build projects that The package <literal>xcbuild</literal> can be used to build projects that
really depend on Xcode. However, this replacement is not 100% really depend on Xcode. However, this replacement is not 100% compatible
compatible with Xcode and can occasionally cause issues. with Xcode and can occasionally cause issues.
</para> </para>
</listitem> </listitem>
</itemizedlist> </itemizedlist>

View File

@ -148,8 +148,8 @@ $ git add pkgs/development/libraries/libfoo/default.nix</screen>
<listitem> <listitem>
<para> <para>
You can use <command>nix-prefetch-url</command> You can use <command>nix-prefetch-url</command>
<replaceable>url</replaceable> to get the <replaceable>url</replaceable> to get the SHA-256 hash of source
SHA-256 hash of source distributions. There are similar commands as distributions. There are similar commands as
<command>nix-prefetch-git</command> and <command>nix-prefetch-git</command> and
<command>nix-prefetch-hg</command> available in <command>nix-prefetch-hg</command> available in
<literal>nix-prefetch-scripts</literal> package. <literal>nix-prefetch-scripts</literal> package.

View File

@ -24,11 +24,13 @@
<para> <para>
The high change rate of Nixpkgs makes any pull request that remains open for The high change rate of Nixpkgs makes any pull request that remains open for
too long subject to conflicts that will require extra work from the submitter too long subject to conflicts that will require extra work from the submitter
or the merger. Reviewing pull requests in a timely manner and being responsive or the merger. Reviewing pull requests in a timely manner and being
to the comments is the key to avoid this issue. GitHub provides sort filters responsive to the comments is the key to avoid this issue. GitHub provides
that can be used to see the <link sort filters that can be used to see the
<link
xlink:href="https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-desc">most xlink:href="https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-desc">most
recently</link> and the <link recently</link> and the
<link
xlink:href="https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-asc">least xlink:href="https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-asc">least
recently</link> updated pull requests. We highly encourage looking at recently</link> updated pull requests. We highly encourage looking at
<link xlink:href="https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+review%3Anone+status%3Asuccess+-label%3A%222.status%3A+work-in-progress%22+no%3Aproject+no%3Aassignee+no%3Amilestone"> <link xlink:href="https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+review%3Anone+status%3Asuccess+-label%3A%222.status%3A+work-in-progress%22+no%3Aproject+no%3Aassignee+no%3Amilestone">
@ -609,8 +611,8 @@ policy.
create an issue or post on create an issue or post on
<link <link
xlink:href="https://discourse.nixos.org">Discourse</link> with xlink:href="https://discourse.nixos.org">Discourse</link> with
references of packages and modules they maintain so the maintainership can be references of packages and modules they maintain so the maintainership can
taken over by other contributors. be taken over by other contributors.
</para> </para>
</section> </section>
</chapter> </chapter>

View File

@ -228,18 +228,17 @@ genericBuild
</para> </para>
<para> <para>
The extension of <envar>PATH</envar> with dependencies, alluded to The extension of <envar>PATH</envar> with dependencies, alluded to above,
above, proceeds according to the relative platforms alone. The proceeds according to the relative platforms alone. The process is carried
process is carried out only for dependencies whose host platform out only for dependencies whose host platform matches the new derivation's
matches the new derivation's build platform i.e. dependencies which build platform i.e. dependencies which run on the platform where the new
run on the platform where the new derivation will be built. derivation will be built.
<footnote xml:id="footnote-stdenv-native-dependencies-in-path"> <footnote xml:id="footnote-stdenv-native-dependencies-in-path">
<para> <para>
Currently, this means for native builds all dependencies are put Currently, this means for native builds all dependencies are put on the
on the <envar>PATH</envar>. But in the future that may not be the <envar>PATH</envar>. But in the future that may not be the case for sake
case for sake of matching cross: the platforms would be assumed of matching cross: the platforms would be assumed to be unique for native
to be unique for native and cross builds alike, so only the and cross builds alike, so only the <varname>depsBuild*</varname> and
<varname>depsBuild*</varname> and
<varname>nativeBuildInputs</varname> would be added to the <varname>nativeBuildInputs</varname> would be added to the
<envar>PATH</envar>. <envar>PATH</envar>.
</para> </para>
@ -252,9 +251,10 @@ genericBuild
<para> <para>
The dependency is propagated when it forces some of its other-transitive The dependency is propagated when it forces some of its other-transitive
(non-immediate) downstream dependencies to also take it on as an immediate (non-immediate) downstream dependencies to also take it on as an immediate
dependency. Nix itself already takes a package's transitive dependencies into dependency. Nix itself already takes a package's transitive dependencies
account, but this propagation ensures nixpkgs-specific infrastructure like into account, but this propagation ensures nixpkgs-specific infrastructure
setup hooks (mentioned above) also are run as if the propagated dependency. like setup hooks (mentioned above) also are run as if the propagated
dependency.
</para> </para>
<para> <para>
@ -270,9 +270,9 @@ genericBuild
described by the current dependency's platform offsets. This results in sort described by the current dependency's platform offsets. This results in sort
a transitive closure of the dependency relation, with the offsets being a transitive closure of the dependency relation, with the offsets being
approximately summed when two dependency links are combined. We also prune approximately summed when two dependency links are combined. We also prune
transitive dependencies whose combined offsets go out-of-bounds, which can be transitive dependencies whose combined offsets go out-of-bounds, which can
viewed as a filter over that transitive closure removing dependencies that be viewed as a filter over that transitive closure removing dependencies
are blatantly absurd. that are blatantly absurd.
</para> </para>
<para> <para>
@ -287,8 +287,8 @@ genericBuild
propagation logic. propagation logic.
</para> </para>
</footnote> </footnote>
They're confusing in very different ways so... hopefully if something doesn't They're confusing in very different ways so... hopefully if something
make sense in one presentation, it will in the other! doesn't make sense in one presentation, it will in the other!
<programlisting> <programlisting>
let mapOffset(h, t, i) = i + (if i &lt;= 0 then h else t - 1) let mapOffset(h, t, i) = i + (if i &lt;= 0 then h else t - 1)
@ -324,31 +324,31 @@ let f(h, h + 1, i) = i + (if i &lt;= 0 then h else (h + 1) - 1)
let f(h, h + 1, i) = i + (if i &lt;= 0 then h else h) let f(h, h + 1, i) = i + (if i &lt;= 0 then h else h)
let f(h, h + 1, i) = i + h let f(h, h + 1, i) = i + h
</programlisting> </programlisting>
This is where "sum-like" comes in from above: We can just sum all of the host This is where "sum-like" comes in from above: We can just sum all of the
offsets to get the host offset of the transitive dependency. The target host offsets to get the host offset of the transitive dependency. The target
offset is the transitive dependency is simply the host offset + 1, just as it offset is the transitive dependency is simply the host offset + 1, just as
was with the dependencies composed to make this transitive one; it can be it was with the dependencies composed to make this transitive one; it can be
ignored as it doesn't add any new information. ignored as it doesn't add any new information.
</para> </para>
<para> <para>
Because of the bounds checks, the uncommon cases are <literal>h = t</literal> Because of the bounds checks, the uncommon cases are <literal>h =
and <literal>h + 2 = t</literal>. In the former case, the motivation for t</literal> and <literal>h + 2 = t</literal>. In the former case, the
<function>mapOffset</function> is that since its host and target platforms motivation for <function>mapOffset</function> is that since its host and
are the same, no transitive dependency of it should be able to "discover" an target platforms are the same, no transitive dependency of it should be able
offset greater than its reduced target offsets. to "discover" an offset greater than its reduced target offsets.
<function>mapOffset</function> effectively "squashes" all its transitive <function>mapOffset</function> effectively "squashes" all its transitive
dependencies' offsets so that none will ever be greater than the target dependencies' offsets so that none will ever be greater than the target
offset of the original <literal>h = t</literal> package. In the other case, offset of the original <literal>h = t</literal> package. In the other case,
<literal>h + 1</literal> is skipped over between the host and target offsets. <literal>h + 1</literal> is skipped over between the host and target
Instead of squashing the offsets, we need to "rip" them apart so no offsets. Instead of squashing the offsets, we need to "rip" them apart so no
transitive dependencies' offset is that one. transitive dependencies' offset is that one.
</para> </para>
<para> <para>
Overall, the unifying theme here is that propagation shouldn't be introducing Overall, the unifying theme here is that propagation shouldn't be
transitive dependencies involving platforms the depending package is unaware introducing transitive dependencies involving platforms the depending
of. The offset bounds checking and definition of package is unaware of. The offset bounds checking and definition of
<function>mapOffset</function> together ensure that this is the case. <function>mapOffset</function> together ensure that this is the case.
Discovering a new offset is discovering a new platform, and since those Discovering a new offset is discovering a new platform, and since those
platforms weren't in the derivation "spec" of the needing package, they platforms weren't in the derivation "spec" of the needing package, they
@ -381,8 +381,8 @@ let f(h, h + 1, i) = i + h
Since these packages are able to be run at build-time, they are always Since these packages are able to be run at build-time, they are always
added to the <envar>PATH</envar>, as described above. But since these added to the <envar>PATH</envar>, as described above. But since these
packages are only guaranteed to be able to run then, they shouldn't packages are only guaranteed to be able to run then, they shouldn't
persist as run-time dependencies. This isn't currently enforced, but could persist as run-time dependencies. This isn't currently enforced, but
be in the future. could be in the future.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -396,10 +396,10 @@ let f(h, h + 1, i) = i + h
platform, and target platform is the new derivation's host platform. This platform, and target platform is the new derivation's host platform. This
means a <literal>-1</literal> host offset and <literal>0</literal> target means a <literal>-1</literal> host offset and <literal>0</literal> target
offset from the new derivation's platforms. These are programs and offset from the new derivation's platforms. These are programs and
libraries used at build-time that, if they are a compiler or similar tool, libraries used at build-time that, if they are a compiler or similar
produce code to run at run-time—i.e. tools used to build the new tool, produce code to run at run-time—i.e. tools used to build the new
derivation. If the dependency doesn't care about the target platform (i.e. derivation. If the dependency doesn't care about the target platform
isn't a compiler or similar tool), put it here, rather than in (i.e. isn't a compiler or similar tool), put it here, rather than in
<varname>depsBuildBuild</varname> or <varname>depsBuildTarget</varname>. <varname>depsBuildBuild</varname> or <varname>depsBuildTarget</varname>.
This could be called <varname>depsBuildHost</varname> but This could be called <varname>depsBuildHost</varname> but
<varname>nativeBuildInputs</varname> is used for historical continuity. <varname>nativeBuildInputs</varname> is used for historical continuity.
@ -407,8 +407,9 @@ let f(h, h + 1, i) = i + h
<para> <para>
Since these packages are able to be run at build-time, they are added to Since these packages are able to be run at build-time, they are added to
the <envar>PATH</envar>, as described above. But since these packages are the <envar>PATH</envar>, as described above. But since these packages are
only guaranteed to be able to run then, they shouldn't persist as run-time only guaranteed to be able to run then, they shouldn't persist as
dependencies. This isn't currently enforced, but could be in the future. run-time dependencies. This isn't currently enforced, but could be in the
future.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -421,33 +422,36 @@ let f(h, h + 1, i) = i + h
A list of dependencies whose host platform is the new derivation's build A list of dependencies whose host platform is the new derivation's build
platform, and target platform is the new derivation's target platform. platform, and target platform is the new derivation's target platform.
This means a <literal>-1</literal> host offset and <literal>1</literal> This means a <literal>-1</literal> host offset and <literal>1</literal>
target offset from the new derivation's platforms. These are programs used target offset from the new derivation's platforms. These are programs
at build time that produce code to run with code produced by the depending used at build time that produce code to run with code produced by the
package. Most commonly, these are tools used to build the runtime or depending package. Most commonly, these are tools used to build the
standard library that the currently-being-built compiler will inject into runtime or standard library that the currently-being-built compiler will
any code it compiles. In many cases, the currently-being-built-compiler is inject into any code it compiles. In many cases, the
itself employed for that task, but when that compiler won't run (i.e. its currently-being-built-compiler is itself employed for that task, but when
build and host platform differ) this is not possible. Other times, the that compiler won't run (i.e. its build and host platform differ) this is
compiler relies on some other tool, like binutils, that is always built not possible. Other times, the compiler relies on some other tool, like
separately so that the dependency is unconditional. binutils, that is always built separately so that the dependency is
unconditional.
</para> </para>
<para> <para>
This is a somewhat confusing concept to wrap ones head around, and for This is a somewhat confusing concept to wrap ones head around, and for
good reason. As the only dependency type where the platform offsets are good reason. As the only dependency type where the platform offsets are
not adjacent integers, it requires thinking of a bootstrapping stage not adjacent integers, it requires thinking of a bootstrapping stage
<emphasis>two</emphasis> away from the current one. It and its use-case go <emphasis>two</emphasis> away from the current one. It and its use-case
hand in hand and are both considered poor form: try to not need this sort go hand in hand and are both considered poor form: try to not need this
of dependency, and try to avoid building standard libraries and runtimes sort of dependency, and try to avoid building standard libraries and
in the same derivation as the compiler produces code using them. Instead runtimes in the same derivation as the compiler produces code using them.
strive to build those like a normal library, using the newly-built Instead strive to build those like a normal library, using the
compiler just as a normal library would. In short, do not use this newly-built compiler just as a normal library would. In short, do not use
attribute unless you are packaging a compiler and are sure it is needed. this attribute unless you are packaging a compiler and are sure it is
needed.
</para> </para>
<para> <para>
Since these packages are able to run at build time, they are added to the Since these packages are able to run at build time, they are added to the
<envar>PATH</envar>, as described above. But since these packages are only <envar>PATH</envar>, as described above. But since these packages are
guaranteed to be able to run then, they shouldn't persist as run-time only guaranteed to be able to run then, they shouldn't persist as
dependencies. This isn't currently enforced, but could be in the future. run-time dependencies. This isn't currently enforced, but could be in the
future.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -462,11 +466,11 @@ let f(h, h + 1, i) = i + h
and <literal>0</literal> target offset from the new derivation's host and <literal>0</literal> target offset from the new derivation's host
platform. These are packages used at run-time to generate code also used platform. These are packages used at run-time to generate code also used
at run-time. In practice, this would usually be tools used by compilers at run-time. In practice, this would usually be tools used by compilers
for macros or a metaprogramming system, or libraries used by the macros or for macros or a metaprogramming system, or libraries used by the macros
metaprogramming code itself. It's always preferable to use a or metaprogramming code itself. It's always preferable to use a
<varname>depsBuildBuild</varname> dependency in the derivation being built <varname>depsBuildBuild</varname> dependency in the derivation being
over a <varname>depsHostHost</varname> on the tool doing the building for built over a <varname>depsHostHost</varname> on the tool doing the
this purpose. building for this purpose.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -481,8 +485,8 @@ let f(h, h + 1, i) = i + h
<literal>1</literal> target offset from the new derivation's host <literal>1</literal> target offset from the new derivation's host
platform. This would be called <varname>depsHostTarget</varname> but for platform. This would be called <varname>depsHostTarget</varname> but for
historical continuity. If the dependency doesn't care about the target historical continuity. If the dependency doesn't care about the target
platform (i.e. isn't a compiler or similar tool), put it here, rather than platform (i.e. isn't a compiler or similar tool), put it here, rather
in <varname>depsBuildBuild</varname>. than in <varname>depsBuildBuild</varname>.
</para> </para>
<para> <para>
These are often programs and libraries used by the new derivation at These are often programs and libraries used by the new derivation at
@ -664,10 +668,11 @@ passthru = {
<literal>hello.baz.value1</literal>. We don't specify any usage or schema <literal>hello.baz.value1</literal>. We don't specify any usage or schema
of <literal>passthru</literal> - it is meant for values that would be of <literal>passthru</literal> - it is meant for values that would be
useful outside the derivation in other parts of a Nix expression (e.g. in useful outside the derivation in other parts of a Nix expression (e.g. in
other derivations). An example would be to convey some specific dependency other derivations). An example would be to convey some specific
of your derivation which contains a program with plugins support. Later, dependency of your derivation which contains a program with plugins
others who make derivations with plugins can use passed-through dependency support. Later, others who make derivations with plugins can use
to ensure that their plugin would be binary-compatible with built program. passed-through dependency to ensure that their plugin would be
binary-compatible with built program.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -677,9 +682,9 @@ passthru = {
</term> </term>
<listitem> <listitem>
<para> <para>
A script to be run by <filename>maintainers/scripts/update.nix</filename> when A script to be run by <filename>maintainers/scripts/update.nix</filename>
the package is matched. It needs to be an executable file, either on the file when the package is matched. It needs to be an executable file, either on
system: the file system:
<programlisting> <programlisting>
passthru.updateScript = ./update.sh; passthru.updateScript = ./update.sh;
</programlisting> </programlisting>
@ -695,16 +700,24 @@ passthru.updateScript = writeScript "update-zoom-us" ''
update-source-version zoom-us "$version" update-source-version zoom-us "$version"
''; '';
</programlisting> </programlisting>
The attribute can also contain a list, a script followed by arguments to be passed to it: The attribute can also contain a list, a script followed by arguments to
be passed to it:
<programlisting> <programlisting>
passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]; passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ];
</programlisting> </programlisting>
Note that the update scripts will be run in parallel by default; you should avoid running <command>git commit</command> or any other commands that cannot handle that. Note that the update scripts will be run in parallel by default; you
should avoid running <command>git commit</command> or any other commands
that cannot handle that.
</para> </para>
<para> <para>
For information about how to run the updates, execute For information about how to run the updates, execute
<cmdsynopsis><command>nix-shell</command> <arg>maintainers/scripts/update.nix</arg></cmdsynopsis>. <cmdsynopsis>
<command>nix-shell</command>
<arg>
maintainers/scripts/update.nix
</arg>
</cmdsynopsis>
.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -1178,8 +1191,8 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
By default, when cross compiling, the configure script has By default, when cross compiling, the configure script has
<option>--build=...</option> and <option>--host=...</option> passed. <option>--build=...</option> and <option>--host=...</option> passed.
Packages can instead pass <literal>[ "build" "host" "target" ]</literal> Packages can instead pass <literal>[ "build" "host" "target" ]</literal>
or a subset to control exactly which platform flags are passed. Compilers or a subset to control exactly which platform flags are passed.
and other tools can use this to also pass the target platform. Compilers and other tools can use this to also pass the target platform.
<footnote xml:id="footnote-stdenv-build-time-guessing-impurity"> <footnote xml:id="footnote-stdenv-build-time-guessing-impurity">
<para> <para>
Eventually these will be passed building natively as well, to improve Eventually these will be passed building natively as well, to improve
@ -1694,10 +1707,11 @@ installTargets = "install-bin install-doc";</programlisting>
</term> </term>
<listitem> <listitem>
<para> <para>
A package can export a <link linkend="ssec-setup-hooks">setup hook</link> A package can export a <link linkend="ssec-setup-hooks">setup
by setting this variable. The setup hook, if defined, is copied to hook</link> by setting this variable. The setup hook, if defined, is
<filename>$out/nix-support/setup-hook</filename>. Environment variables copied to <filename>$out/nix-support/setup-hook</filename>. Environment
are then substituted in it using <function variables are then substituted in it using
<function
linkend="fun-substituteAll">substituteAll</function>. linkend="fun-substituteAll">substituteAll</function>.
</para> </para>
</listitem> </listitem>
@ -1812,8 +1826,8 @@ set debug-file-directory ~/.nix-profile/lib/debug
<listitem> <listitem>
<para> <para>
A list of dependencies used by the phase. This gets included in A list of dependencies used by the phase. This gets included in
<varname>nativeBuildInputs</varname> when <varname>doInstallCheck</varname> is <varname>nativeBuildInputs</varname> when
set. <varname>doInstallCheck</varname> is set.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -2160,10 +2174,11 @@ someVar=$(stripHash $name)
dependency derivation is already built just the same—depending is just dependency derivation is already built just the same—depending is just
needing something to exist, and needing is idempotent. However, a dependency needing something to exist, and needing is idempotent. However, a dependency
specified twice will have its setup hook run twice, and that could easily specified twice will have its setup hook run twice, and that could easily
change the build environment (though a well-written setup hook will therefore change the build environment (though a well-written setup hook will
strive to be idempotent so this is in fact not observable). More broadly, therefore strive to be idempotent so this is in fact not observable). More
setup hooks are anti-modular in that multiple dependencies, whether the same broadly, setup hooks are anti-modular in that multiple dependencies, whether
or different, should not interfere and yet their setup hooks may well do so. the same or different, should not interfere and yet their setup hooks may
well do so.
</para> </para>
<para> <para>
@ -2185,11 +2200,12 @@ someVar=$(stripHash $name)
Returning to the C compiler wrapper example, if the wrapper itself is an Returning to the C compiler wrapper example, if the wrapper itself is an
<literal>n</literal> dependency, then it only wants to accumulate flags from <literal>n</literal> dependency, then it only wants to accumulate flags from
<literal>n + 1</literal> dependencies, as only those ones match the <literal>n + 1</literal> dependencies, as only those ones match the
compiler's target platform. The <envar>hostOffset</envar> variable is defined compiler's target platform. The <envar>hostOffset</envar> variable is
with the current dependency's host offset <envar>targetOffset</envar> with defined with the current dependency's host offset
its target offset, before its setup hook is sourced. Additionally, since most <envar>targetOffset</envar> with its target offset, before its setup hook is
environment hooks don't care about the target platform, that means the setup sourced. Additionally, since most environment hooks don't care about the
hook can append to the right bash array by doing something like target platform, that means the setup hook can append to the right bash
array by doing something like
<programlisting language="bash"> <programlisting language="bash">
addEnvHooks "$hostOffset" myBashFunction addEnvHooks "$hostOffset" myBashFunction
</programlisting> </programlisting>
@ -2204,11 +2220,10 @@ addEnvHooks "$hostOffset" myBashFunction
</para> </para>
<para> <para>
First, lets cover some setup hooks that are part of Nixpkgs First, lets cover some setup hooks that are part of Nixpkgs default
default stdenv. This means that they are run for every package stdenv. This means that they are run for every package built using
built using <function>stdenv.mkDerivation</function>. Some of <function>stdenv.mkDerivation</function>. Some of these are platform
these are platform specific, so they may run on Linux but not specific, so they may run on Linux but not Darwin or vice-versa.
Darwin or vice-versa.
<variablelist> <variablelist>
<varlistentry> <varlistentry>
<term> <term>
@ -2217,10 +2232,9 @@ addEnvHooks "$hostOffset" myBashFunction
<listitem> <listitem>
<para> <para>
This setup hook moves any installed documentation to the This setup hook moves any installed documentation to the
<literal>/share</literal> subdirectory directory. This includes <literal>/share</literal> subdirectory directory. This includes the man,
the man, doc and info directories. This is needed for legacy doc and info directories. This is needed for legacy programs that do not
programs that do not know how to use the know how to use the <literal>share</literal> subdirectory.
<literal>share</literal> subdirectory.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -2230,9 +2244,9 @@ addEnvHooks "$hostOffset" myBashFunction
</term> </term>
<listitem> <listitem>
<para> <para>
This setup hook compresses any man pages that have been This setup hook compresses any man pages that have been installed. The
installed. The compression is done using the gzip program. This compression is done using the gzip program. This helps to reduce the
helps to reduce the installed size of packages. installed size of packages.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -2242,10 +2256,9 @@ addEnvHooks "$hostOffset" myBashFunction
</term> </term>
<listitem> <listitem>
<para> <para>
This runs the strip command on installed binaries and This runs the strip command on installed binaries and libraries. This
libraries. This removes unnecessary information like debug removes unnecessary information like debug symbols when they are not
symbols when they are not needed. This also helps to reduce the needed. This also helps to reduce the installed size of packages.
installed size of packages.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -2255,13 +2268,12 @@ addEnvHooks "$hostOffset" myBashFunction
</term> </term>
<listitem> <listitem>
<para> <para>
This setup hook patches installed scripts to use the full path This setup hook patches installed scripts to use the full path to the
to the shebang interpreter. A shebang interpreter is the first shebang interpreter. A shebang interpreter is the first commented line
commented line of a script telling the operating system which of a script telling the operating system which program will run the
program will run the script (e.g <literal>#!/bin/bash</literal>). In script (e.g <literal>#!/bin/bash</literal>). In Nix, we want an exact
Nix, we want an exact path to that interpreter to be used. This path to that interpreter to be used. This often replaces
often replaces <literal>/bin/sh</literal> with a path in the <literal>/bin/sh</literal> with a path in the Nix store.
Nix store.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -2271,10 +2283,10 @@ addEnvHooks "$hostOffset" myBashFunction
</term> </term>
<listitem> <listitem>
<para> <para>
This verifies that no references are left from the install This verifies that no references are left from the install binaries to
binaries to the directory used to build those binaries. This the directory used to build those binaries. This ensures that the
ensures that the binaries do not need things outside the Nix binaries do not need things outside the Nix store. This is currently
store. This is currently supported in Linux only. supported in Linux only.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -2284,12 +2296,12 @@ addEnvHooks "$hostOffset" myBashFunction
</term> </term>
<listitem> <listitem>
<para> <para>
This setup hook adds configure flags that tell packages to This setup hook adds configure flags that tell packages to install files
install files into any one of the proper outputs listed in into any one of the proper outputs listed in <literal>outputs</literal>.
<literal>outputs</literal>. This behavior can be turned off by setting This behavior can be turned off by setting
<literal>setOutputFlags</literal> to false in the derivation <literal>setOutputFlags</literal> to false in the derivation
environment. See <xref linkend="chap-multiple-output"/> for environment. See <xref linkend="chap-multiple-output"/> for more
more information. information.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -2299,9 +2311,9 @@ addEnvHooks "$hostOffset" myBashFunction
</term> </term>
<listitem> <listitem>
<para> <para>
This setup hook moves any binaries installed in the sbin This setup hook moves any binaries installed in the sbin subdirectory
subdirectory into bin. In addition, a link is provided from into bin. In addition, a link is provided from sbin to bin for
sbin to bin for compatibility. compatibility.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -2311,9 +2323,9 @@ addEnvHooks "$hostOffset" myBashFunction
</term> </term>
<listitem> <listitem>
<para> <para>
This setup hook moves any libraries installed in the lib64 This setup hook moves any libraries installed in the lib64 subdirectory
subdirectory into lib. In addition, a link is provided from into lib. In addition, a link is provided from lib64 to lib for
lib64 to lib for compatibility. compatibility.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -2323,8 +2335,8 @@ addEnvHooks "$hostOffset" myBashFunction
</term> </term>
<listitem> <listitem>
<para> <para>
This sets <literal>SOURCE_DATE_EPOCH</literal> to the This sets <literal>SOURCE_DATE_EPOCH</literal> to the modification time
modification time of the most recent file. of the most recent file.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -2335,19 +2347,19 @@ addEnvHooks "$hostOffset" myBashFunction
<listitem> <listitem>
<para> <para>
The Bintools Wrapper wraps the binary utilities for a bunch of The Bintools Wrapper wraps the binary utilities for a bunch of
miscellaneous purposes. These are GNU Binutils when targetting Linux, and miscellaneous purposes. These are GNU Binutils when targetting Linux,
a mix of cctools and GNU binutils for Darwin. [The "Bintools" name is and a mix of cctools and GNU binutils for Darwin. [The "Bintools" name
supposed to be a compromise between "Binutils" and "cctools" not denoting is supposed to be a compromise between "Binutils" and "cctools" not
any specific implementation.] Specifically, the underlying bintools denoting any specific implementation.] Specifically, the underlying
package, and a C standard library (glibc or Darwin's libSystem, just for bintools package, and a C standard library (glibc or Darwin's libSystem,
the dynamic loader) are all fed in, and dependency finding, hardening just for the dynamic loader) are all fed in, and dependency finding,
(see below), and purity checks for each are handled by the Bintools hardening (see below), and purity checks for each are handled by the
Wrapper. Packages typically depend on CC Wrapper, which in turn (at run Bintools Wrapper. Packages typically depend on CC Wrapper, which in turn
time) depends on the Bintools Wrapper. (at run time) depends on the Bintools Wrapper.
</para> </para>
<para> <para>
The Bintools Wrapper was only just recently split off from CC Wrapper, so The Bintools Wrapper was only just recently split off from CC Wrapper,
the division of labor is still being worked out. For example, it so the division of labor is still being worked out. For example, it
shouldn't care about about the C standard library, but just take a shouldn't care about about the C standard library, but just take a
derivation with the dynamic loader (which happens to be the glibc on derivation with the dynamic loader (which happens to be the glibc on
linux). Dependency finding however is a task both wrappers will continue linux). Dependency finding however is a task both wrappers will continue
@ -2357,11 +2369,12 @@ addEnvHooks "$hostOffset" myBashFunction
<varname>nativeBuildInputs</varname>) in environment variables. The <varname>nativeBuildInputs</varname>) in environment variables. The
Bintools Wrapper's setup hook causes any <filename>lib</filename> and Bintools Wrapper's setup hook causes any <filename>lib</filename> and
<filename>lib64</filename> subdirectories to be added to <filename>lib64</filename> subdirectories to be added to
<envar>NIX_LDFLAGS</envar>. Since the CC Wrapper and the Bintools Wrapper <envar>NIX_LDFLAGS</envar>. Since the CC Wrapper and the Bintools
use the same strategy, most of the Bintools Wrapper code is sparsely Wrapper use the same strategy, most of the Bintools Wrapper code is
commented and refers to the CC Wrapper. But the CC Wrapper's code, by sparsely commented and refers to the CC Wrapper. But the CC Wrapper's
contrast, has quite lengthy comments. The Bintools Wrapper merely cites code, by contrast, has quite lengthy comments. The Bintools Wrapper
those, rather than repeating them, to avoid falling out of sync. merely cites those, rather than repeating them, to avoid falling out of
sync.
</para> </para>
<para> <para>
A final task of the setup hook is defining a number of standard A final task of the setup hook is defining a number of standard
@ -2370,8 +2383,8 @@ addEnvHooks "$hostOffset" myBashFunction
under the assumption that the Bintools Wrapper's binaries will be on the under the assumption that the Bintools Wrapper's binaries will be on the
path. Firstly, this helps poorly-written packages, e.g. ones that look path. Firstly, this helps poorly-written packages, e.g. ones that look
for just <command>gcc</command> when <envar>CC</envar> isn't defined yet for just <command>gcc</command> when <envar>CC</envar> isn't defined yet
<command>clang</command> is to be used. Secondly, this helps packages not <command>clang</command> is to be used. Secondly, this helps packages
get confused when cross-compiling, in which case multiple Bintools not get confused when cross-compiling, in which case multiple Bintools
Wrappers may simultaneously be in use. Wrappers may simultaneously be in use.
<footnote xml:id="footnote-stdenv-per-platform-wrapper"> <footnote xml:id="footnote-stdenv-per-platform-wrapper">
<para> <para>
@ -2387,16 +2400,16 @@ addEnvHooks "$hostOffset" myBashFunction
Wrappers, properly disambiguating them. Wrappers, properly disambiguating them.
</para> </para>
<para> <para>
A problem with this final task is that the Bintools Wrapper is honest and A problem with this final task is that the Bintools Wrapper is honest
defines <envar>LD</envar> as <command>ld</command>. Most packages, and defines <envar>LD</envar> as <command>ld</command>. Most packages,
however, firstly use the C compiler for linking, secondly use however, firstly use the C compiler for linking, secondly use
<envar>LD</envar> anyways, defining it as the C compiler, and thirdly, <envar>LD</envar> anyways, defining it as the C compiler, and thirdly,
only so define <envar>LD</envar> when it is undefined as a fallback. This only so define <envar>LD</envar> when it is undefined as a fallback.
triple-threat means Bintools Wrapper will break those packages, as LD is This triple-threat means Bintools Wrapper will break those packages, as
already defined as the actual linker which the package won't override yet LD is already defined as the actual linker which the package won't
doesn't want to use. The workaround is to define, just for the override yet doesn't want to use. The workaround is to define, just for
problematic package, <envar>LD</envar> as the C compiler. A good way to the problematic package, <envar>LD</envar> as the C compiler. A good way
do this would be <command>preConfigure = "LD=$CC"</command>. to do this would be <command>preConfigure = "LD=$CC"</command>.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -2406,13 +2419,13 @@ addEnvHooks "$hostOffset" myBashFunction
</term> </term>
<listitem> <listitem>
<para> <para>
The CC Wrapper wraps a C toolchain for a bunch of miscellaneous purposes. The CC Wrapper wraps a C toolchain for a bunch of miscellaneous
Specifically, a C compiler (GCC or Clang), wrapped binary tools, and a C purposes. Specifically, a C compiler (GCC or Clang), wrapped binary
standard library (glibc or Darwin's libSystem, just for the dynamic tools, and a C standard library (glibc or Darwin's libSystem, just for
loader) are all fed in, and dependency finding, hardening (see below), the dynamic loader) are all fed in, and dependency finding, hardening
and purity checks for each are handled by the CC Wrapper. Packages (see below), and purity checks for each are handled by the CC Wrapper.
typically depend on the CC Wrapper, which in turn (at run-time) depends Packages typically depend on the CC Wrapper, which in turn (at run-time)
on the Bintools Wrapper. depends on the Bintools Wrapper.
</para> </para>
<para> <para>
Dependency finding is undoubtedly the main task of the CC Wrapper. This Dependency finding is undoubtedly the main task of the CC Wrapper. This
@ -2438,10 +2451,9 @@ addEnvHooks "$hostOffset" myBashFunction
</para> </para>
<para> <para>
Here are some more packages that provide a setup hook. Since the Here are some more packages that provide a setup hook. Since the list of
list of hooks is extensible, this is not an exhaustive list the hooks is extensible, this is not an exhaustive list the mechanism is only to
mechanism is only to be used as a last resort, it might cover most be used as a last resort, it might cover most uses.
uses.
<variablelist> <variablelist>
<varlistentry> <varlistentry>
<term> <term>
@ -2499,11 +2511,11 @@ addEnvHooks "$hostOffset" myBashFunction
<listitem> <listitem>
<para> <para>
The <varname>autoreconfHook</varname> derivation adds The <varname>autoreconfHook</varname> derivation adds
<varname>autoreconfPhase</varname>, which runs autoreconf, libtoolize and <varname>autoreconfPhase</varname>, which runs autoreconf, libtoolize
automake, essentially preparing the configure script in autotools-based and automake, essentially preparing the configure script in
builds. Most autotools-based packages come with the configure script autotools-based builds. Most autotools-based packages come with the
pre-generated, but this hook is necessary for a few packages and when you configure script pre-generated, but this hook is necessary for a few
need to patch the packages configure scripts. packages and when you need to patch the packages configure scripts.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -2547,9 +2559,9 @@ addEnvHooks "$hostOffset" myBashFunction
</term> </term>
<listitem> <listitem>
<para> <para>
Exports <envar>GDK_PIXBUF_MODULE_FILE</envar> environment variable to the Exports <envar>GDK_PIXBUF_MODULE_FILE</envar> environment variable to
builder. Add librsvg package to <varname>buildInputs</varname> to get svg the builder. Add librsvg package to <varname>buildInputs</varname> to
support. get svg support.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -2595,14 +2607,13 @@ addEnvHooks "$hostOffset" myBashFunction
<para> <para>
This is useful for programs that use <citerefentry> This is useful for programs that use <citerefentry>
<refentrytitle>dlopen</refentrytitle> <refentrytitle>dlopen</refentrytitle>
<manvolnum>3</manvolnum> <manvolnum>3</manvolnum> </citerefentry> to load libraries at runtime.
</citerefentry> to load libraries at runtime.
</para> </para>
<para> <para>
In certain situations you may want to run the main command In certain situations you may want to run the main command
(<command>autoPatchelf</command>) of the setup hook on a file or a set (<command>autoPatchelf</command>) of the setup hook on a file or a set
of directories instead of unconditionally patching all outputs. This of directories instead of unconditionally patching all outputs. This can
can be done by setting the <envar>dontAutoPatchelf</envar> environment be done by setting the <envar>dontAutoPatchelf</envar> environment
variable to a non-empty value. variable to a non-empty value.
</para> </para>
<para> <para>
@ -2619,22 +2630,22 @@ addEnvHooks "$hostOffset" myBashFunction
<listitem> <listitem>
<para> <para>
This hook will make a build pause instead of stopping when a failure This hook will make a build pause instead of stopping when a failure
happens. It prevents nix from cleaning up the build environment immediately and happens. It prevents nix from cleaning up the build environment
allows the user to attach to a build environment using the immediately and allows the user to attach to a build environment using
<command>cntr</command> command. Upon build error it will print the <command>cntr</command> command. Upon build error it will print
instructions on how to use <command>cntr</command>. Installing instructions on how to use <command>cntr</command>. Installing cntr and
cntr and running the command will provide shell access to the build running the command will provide shell access to the build sandbox of
sandbox of failed build. At <filename>/var/lib/cntr</filename> the failed build. At <filename>/var/lib/cntr</filename> the sandboxed
sandboxed filesystem is mounted. All commands and files of the system are filesystem is mounted. All commands and files of the system are still
still accessible within the shell. To execute commands from the sandbox accessible within the shell. To execute commands from the sandbox use
use the cntr exec subcommand. Note that <command>cntr</command> also the cntr exec subcommand. Note that <command>cntr</command> also needs
needs to be executed on the machine that is doing the build, which might to be executed on the machine that is doing the build, which might not
not be the case when remote builders are enabled. be the case when remote builders are enabled. <command>cntr</command> is
<command>cntr</command> is only supported on Linux-based platforms. To only supported on Linux-based platforms. To use it first add
use it first add <literal>cntr</literal> to your <literal>cntr</literal> to your
<literal>environment.systemPackages</literal> on NixOS or alternatively to <literal>environment.systemPackages</literal> on NixOS or alternatively
the root user on non-NixOS systems. Then in the package that is supposed to the root user on non-NixOS systems. Then in the package that is
to be inspected, add <literal>breakpointHook</literal> to supposed to be inspected, add <literal>breakpointHook</literal> to
<literal>nativeBuildInputs</literal>. <literal>nativeBuildInputs</literal>.
<programlisting> <programlisting>
nativeBuildInputs = [ breakpointHook ]; nativeBuildInputs = [ breakpointHook ];
@ -2650,14 +2661,13 @@ addEnvHooks "$hostOffset" myBashFunction
</term> </term>
<listitem> <listitem>
<para> <para>
A few libraries automatically add to A few libraries automatically add to <literal>NIX_LDFLAGS</literal>
<literal>NIX_LDFLAGS</literal> their library, making their their library, making their symbols automatically available to the
symbols automatically available to the linker. This includes linker. This includes libiconv and libintl (gettext). This is done to
libiconv and libintl (gettext). This is done to provide provide compatibility between GNU Linux, where libiconv and libintl are
compatibility between GNU Linux, where libiconv and libintl bundled in, and other systems where that might not be the case.
are bundled in, and other systems where that might not be the Sometimes, this behavior is not desired. To disable this behavior, set
case. Sometimes, this behavior is not desired. To disable <literal>dontAddExtraLibs</literal>.
this behavior, set <literal>dontAddExtraLibs</literal>.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -2668,15 +2678,15 @@ addEnvHooks "$hostOffset" myBashFunction
<listitem> <listitem>
<para> <para>
Overrides the default configure phase to run the CMake command. By Overrides the default configure phase to run the CMake command. By
default, we use the Make generator of CMake. In default, we use the Make generator of CMake. In addition, dependencies
addition, dependencies are added automatically to CMAKE_PREFIX_PATH so are added automatically to CMAKE_PREFIX_PATH so that packages are
that packages are correctly detected by CMake. Some additional flags correctly detected by CMake. Some additional flags are passed in to give
are passed in to give similar behavior to configure-based packages. You similar behavior to configure-based packages. You can disable this
can disable this hooks behavior by setting configurePhase to a custom hooks behavior by setting configurePhase to a custom value, or by
value, or by setting dontUseCmakeConfigure. cmakeFlags controls flags setting dontUseCmakeConfigure. cmakeFlags controls flags passed only to
passed only to CMake. By default, parallel building is enabled as CMake CMake. By default, parallel building is enabled as CMake supports
supports parallel building almost everywhere. When Ninja is also in parallel building almost everywhere. When Ninja is also in use, CMake
use, CMake will detect that and use the ninja generator. will detect that and use the ninja generator.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -2689,8 +2699,8 @@ addEnvHooks "$hostOffset" myBashFunction
Overrides the build and install phases to run the “xcbuild” command. Overrides the build and install phases to run the “xcbuild” command.
This hook is needed when a project only comes with build files for the This hook is needed when a project only comes with build files for the
XCode build system. You can disable this behavior by setting buildPhase XCode build system. You can disable this behavior by setting buildPhase
and configurePhase to a custom value. xcbuildFlags controls flags and configurePhase to a custom value. xcbuildFlags controls flags passed
passed only to xcbuild. only to xcbuild.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -2703,8 +2713,8 @@ addEnvHooks "$hostOffset" myBashFunction
Overrides the configure phase to run meson to generate Ninja files. You Overrides the configure phase to run meson to generate Ninja files. You
can disable this behavior by setting configurePhase to a custom value, can disable this behavior by setting configurePhase to a custom value,
or by setting dontUseMesonConfigure. To run these files, you should or by setting dontUseMesonConfigure. To run these files, you should
accompany meson with ninja. mesonFlags controls only the flags passed accompany meson with ninja. mesonFlags controls only the flags passed to
to meson. By default, parallel building is enabled as Meson supports meson. By default, parallel building is enabled as Meson supports
parallel building almost everywhere. parallel building almost everywhere.
</para> </para>
</listitem> </listitem>

View File

@ -59,7 +59,7 @@ let
stringLength sub substring tail; stringLength sub substring tail;
inherit (trivial) id const concat or and bitAnd bitOr bitXor bitNot inherit (trivial) id const concat or and bitAnd bitOr bitXor bitNot
boolToString mergeAttrs flip mapNullable inNixShell min max boolToString mergeAttrs flip mapNullable inNixShell min max
importJSON warn info nixpkgsVersion version mod compare importJSON warn info showWarnings nixpkgsVersion version mod compare
splitByAndCompare functionArgs setFunctionArgs isFunction; splitByAndCompare functionArgs setFunctionArgs isFunction;
inherit (fixedPoints) fix fix' converge extends composeExtensions inherit (fixedPoints) fix fix' converge extends composeExtensions
makeExtensible makeExtensibleWithCustomName; makeExtensible makeExtensibleWithCustomName;
@ -109,7 +109,7 @@ let
mkFixStrictness mkOrder mkBefore mkAfter mkAliasDefinitions mkFixStrictness mkOrder mkBefore mkAfter mkAliasDefinitions
mkAliasAndWrapDefinitions fixMergeModules mkRemovedOptionModule mkAliasAndWrapDefinitions fixMergeModules mkRemovedOptionModule
mkRenamedOptionModule mkMergedOptionModule mkChangedOptionModule mkRenamedOptionModule mkMergedOptionModule mkChangedOptionModule
mkAliasOptionModule mkAliasOptionModuleWithPriority doRename filterModules; mkAliasOptionModule doRename filterModules;
inherit (options) isOption mkEnableOption mkSinkUndeclaredOptions inherit (options) isOption mkEnableOption mkSinkUndeclaredOptions
mergeDefaultOption mergeOneOption mergeEqualOption getValues mergeDefaultOption mergeOneOption mergeEqualOption getValues
getFiles optionAttrSetToDocList optionAttrSetToDocList' getFiles optionAttrSetToDocList optionAttrSetToDocList'

View File

@ -561,6 +561,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = "OpenSSL License"; fullName = "OpenSSL License";
}; };
osl2 = spdx {
spdxId = "OSL-2.0";
fullName = "Open Software License 2.0";
};
osl21 = spdx { osl21 = spdx {
spdxId = "OSL-2.1"; spdxId = "OSL-2.1";
fullName = "Open Software License 2.1"; fullName = "Open Software License 2.1";

View File

@ -476,8 +476,22 @@ rec {
optionSet to options of type submodule. FIXME: remove optionSet to options of type submodule. FIXME: remove
eventually. */ eventually. */
fixupOptionType = loc: opt: fixupOptionType = loc: opt:
let
options = opt.options or
(throw "Option `${showOption loc'}' has type optionSet but has no option attribute, in ${showFiles opt.declarations}.");
f = tp:
let optionSetIn = type: (tp.name == type) && (tp.functor.wrapped.name == "optionSet");
in
if tp.name == "option set" || tp.name == "submodule" then
throw "The option ${showOption loc} uses submodules without a wrapping type, in ${showFiles opt.declarations}."
else if optionSetIn "attrsOf" then types.attrsOf (types.submodule options)
else if optionSetIn "loaOf" then types.loaOf (types.submodule options)
else if optionSetIn "listOf" then types.listOf (types.submodule options)
else if optionSetIn "nullOr" then types.nullOr (types.submodule options)
else tp;
in
if opt.type.getSubModules or null == null if opt.type.getSubModules or null == null
then opt // { type = opt.type or types.unspecified; } then opt // { type = f (opt.type or types.unspecified); }
else opt // { type = opt.type.substSubModules opt.options; options = []; }; else opt // { type = opt.type.substSubModules opt.options; options = []; };
@ -596,6 +610,9 @@ rec {
forwards any definitions of boot.copyKernels to forwards any definitions of boot.copyKernels to
boot.loader.grub.copyKernels while printing a warning. boot.loader.grub.copyKernels while printing a warning.
This also copies over the priority from the aliased option to the
non-aliased option.
*/ */
mkRenamedOptionModule = from: to: doRename { mkRenamedOptionModule = from: to: doRename {
inherit from to; inherit from to;
@ -690,16 +707,7 @@ rec {
use = id; use = id;
}; };
/* Like mkAliasOptionModule, but copy over the priority of the option as well. */ doRename = { from, to, visible, warn, use, withPriority ? true }:
mkAliasOptionModuleWithPriority = from: to: doRename {
inherit from to;
visible = true;
warn = false;
use = id;
withPriority = true;
};
doRename = { from, to, visible, warn, use, withPriority ? false }:
{ config, options, ... }: { config, options, ... }:
let let
fromOpt = getAttrFromPath from options; fromOpt = getAttrFromPath from options;

View File

@ -48,6 +48,8 @@ rec {
visible ? null, visible ? null,
# Whether the option can be set only once # Whether the option can be set only once
readOnly ? null, readOnly ? null,
# Deprecated, used by types.optionSet.
options ? null
} @ attrs: } @ attrs:
attrs // { _type = "option"; }; attrs // { _type = "option"; };
@ -141,7 +143,7 @@ rec {
docOption = rec { docOption = rec {
loc = opt.loc; loc = opt.loc;
name = showOption opt.loc; name = showOption opt.loc;
description = opt.description or (throw "Option `${name}' has no description."); description = opt.description or (lib.warn "Option `${name}' has no description." "This option has no description.");
declarations = filter (x: x != unknownModule) opt.declarations; declarations = filter (x: x != unknownModule) opt.declarations;
internal = opt.internal or false; internal = opt.internal or false;
visible = opt.visible or true; visible = opt.visible or true;

View File

@ -24,6 +24,8 @@ rec {
config = parse.tripleFromSystem final.parsed; config = parse.tripleFromSystem final.parsed;
# Just a guess, based on `system` # Just a guess, based on `system`
platform = platforms.selectBySystem final.system; platform = platforms.selectBySystem final.system;
# Determine whether we are compatible with the provided CPU
isCompatible = platform: parse.isCompatible final.parsed.cpu platform.parsed.cpu;
# Derived meta-data # Derived meta-data
libc = libc =
/**/ if final.isDarwin then "libSystem" /**/ if final.isDarwin then "libSystem"
@ -98,13 +100,14 @@ rec {
wine = (pkgs.winePackagesFor wine-name).minimal; wine = (pkgs.winePackagesFor wine-name).minimal;
in in
if final.parsed.kernel.name == pkgs.stdenv.hostPlatform.parsed.kernel.name && if final.parsed.kernel.name == pkgs.stdenv.hostPlatform.parsed.kernel.name &&
(final.parsed.cpu.name == pkgs.stdenv.hostPlatform.parsed.cpu.name || pkgs.stdenv.hostPlatform.isCompatible final
(final.isi686 && pkgs.stdenv.hostPlatform.isx86_64)) then "${pkgs.runtimeShell} -c"
then pkgs.runtimeShell
else if final.isWindows else if final.isWindows
then "${wine}/bin/${wine-name}" then "${wine}/bin/${wine-name}"
else if final.isLinux && pkgs.stdenv.hostPlatform.isLinux else if final.isLinux && pkgs.stdenv.hostPlatform.isLinux
then "${qemu-user}/bin/qemu-${final.qemuArch}" then "${qemu-user}/bin/qemu-${final.qemuArch}"
else if final.isWasm
then "${pkgs.v8}/bin/d8"
else throw "Don't know how to run ${final.config} executables."; else throw "Don't know how to run ${final.config} executables.";
} // mapAttrs (n: v: v final.parsed) inspect.predicates } // mapAttrs (n: v: v final.parsed) inspect.predicates

View File

@ -21,6 +21,7 @@ rec {
isSparc = { cpu = { family = "sparc"; }; }; isSparc = { cpu = { family = "sparc"; }; };
isWasm = { cpu = { family = "wasm"; }; }; isWasm = { cpu = { family = "wasm"; }; };
isAvr = { cpu = { family = "avr"; }; }; isAvr = { cpu = { family = "avr"; }; };
isAlpha = { cpu = { family = "alpha"; }; };
is32bit = { cpu = { bits = 32; }; }; is32bit = { cpu = { bits = 32; }; };
is64bit = { cpu = { bits = 64; }; }; is64bit = { cpu = { bits = 64; }; };

View File

@ -112,6 +112,66 @@ rec {
avr = { bits = 8; family = "avr"; }; avr = { bits = 8; family = "avr"; };
}; };
# Determine where two CPUs are compatible with each other. That is,
# can we run code built for system b on system a? For that to
# happen, then the set of all possible possible programs that system
# b accepts must be a subset of the set of all programs that system
# a accepts. This compatibility relation forms a category where each
# CPU is an object and each arrow from a to b represents
# compatibility. CPUs with multiple modes of Endianness are
# isomorphic while all CPUs are endomorphic because any program
# built for a CPU can run on that CPU.
isCompatible = a: b: with cpuTypes; lib.any lib.id [
# x86
(b == i386 && isCompatible a i486)
(b == i486 && isCompatible a i586)
(b == i586 && isCompatible a i686)
# NOTE: Not true in some cases. Like in WSL mode.
(b == i686 && isCompatible a x86_64)
# ARM
(b == arm && isCompatible a armv5tel)
(b == armv5tel && isCompatible a armv6m)
(b == armv6m && isCompatible a armv6l)
(b == armv6l && isCompatible a armv7a)
(b == armv7a && isCompatible a armv7r)
(b == armv7r && isCompatible a armv7m)
(b == armv7m && isCompatible a armv7l)
(b == armv7l && isCompatible a armv8a)
(b == armv8a && isCompatible a armv8r)
(b == armv8r && isCompatible a armv8m)
# NOTE: not always true! Some arm64 cpus dont support arm32 mode.
(b == armv8m && isCompatible a aarch64)
(b == aarch64 && a == aarch64_be)
(b == aarch64_be && isCompatible a aarch64)
# PowerPC
(b == powerpc && isCompatible a powerpc64)
(b == powerpcle && isCompatible a powerpc)
(b == powerpc && a == powerpcle)
(b == powerpc64le && isCompatible a powerpc64)
(b == powerpc64 && a == powerpc64le)
# MIPS
(b == mips && isCompatible a mips64)
(b == mips && a == mipsel)
(b == mipsel && isCompatible a mips)
(b == mips64 && a == mips64el)
(b == mips64el && isCompatible a mips64)
# RISCV
(b == riscv32 && isCompatible a riscv64)
# SPARC
(b == sparc && isCompatible a sparc64)
# WASM
(b == wasm32 && isCompatible a wasm64)
# identity
(b == a)
];
################################################################################ ################################################################################
types.openVendor = mkOptionType { types.openVendor = mkOptionType {

View File

@ -149,7 +149,7 @@ checkConfigOutput "1 2 3 4 5 6 7 8 9 10" config.result ./loaOf-with-long-list.ni
# Check loaOf with many merges of lists. # Check loaOf with many merges of lists.
checkConfigOutput "1 2 3 4 5 6 7 8 9 10" config.result ./loaOf-with-many-list-merges.nix checkConfigOutput "1 2 3 4 5 6 7 8 9 10" config.result ./loaOf-with-many-list-merges.nix
# Check mkAliasOptionModuleWithPriority. # Check mkAliasOptionModule.
checkConfigOutput "true" config.enable ./alias-with-priority.nix checkConfigOutput "true" config.enable ./alias-with-priority.nix
checkConfigOutput "true" config.enableAlias ./alias-with-priority.nix checkConfigOutput "true" config.enableAlias ./alias-with-priority.nix
checkConfigOutput "false" config.enable ./alias-with-priority-can-override.nix checkConfigOutput "false" config.enable ./alias-with-priority-can-override.nix

View File

@ -1,5 +1,8 @@
# This is a test to show that mkAliasOptionModule sets the priority correctly # This is a test to show that mkAliasOptionModule sets the priority correctly
# for aliased options. # for aliased options.
#
# This test shows that an alias with a high priority is able to override
# a non-aliased option.
{ config, lib, ... }: { config, lib, ... }:
@ -32,10 +35,10 @@ with lib;
imports = [ imports = [
# Create an alias for the "enable" option. # Create an alias for the "enable" option.
(mkAliasOptionModuleWithPriority [ "enableAlias" ] [ "enable" ]) (mkAliasOptionModule [ "enableAlias" ] [ "enable" ])
# Disable the aliased option, but with a default (low) priority so it # Disable the aliased option with a high priority so it
# should be able to be overridden by the next import. # should override the next import.
( { config, lib, ... }: ( { config, lib, ... }:
{ {
enableAlias = lib.mkForce false; enableAlias = lib.mkForce false;

View File

@ -1,5 +1,8 @@
# This is a test to show that mkAliasOptionModule sets the priority correctly # This is a test to show that mkAliasOptionModule sets the priority correctly
# for aliased options. # for aliased options.
#
# This test shows that an alias with a low priority is able to be overridden
# with a non-aliased option.
{ config, lib, ... }: { config, lib, ... }:
@ -32,7 +35,7 @@ with lib;
imports = [ imports = [
# Create an alias for the "enable" option. # Create an alias for the "enable" option.
(mkAliasOptionModuleWithPriority [ "enableAlias" ] [ "enable" ]) (mkAliasOptionModule [ "enableAlias" ] [ "enable" ])
# Disable the aliased option, but with a default (low) priority so it # Disable the aliased option, but with a default (low) priority so it
# should be able to be overridden by the next import. # should be able to be overridden by the next import.

View File

@ -134,7 +134,7 @@ rec {
On each release the first letter is bumped and a new animal is chosen On each release the first letter is bumped and a new animal is chosen
starting with that new letter. starting with that new letter.
*/ */
codeName = "Koi"; codeName = "Loris";
/* Returns the current nixpkgs version suffix as string. */ /* Returns the current nixpkgs version suffix as string. */
versionSuffix = versionSuffix =
@ -259,9 +259,10 @@ rec {
# TODO: figure out a clever way to integrate location information from # TODO: figure out a clever way to integrate location information from
# something like __unsafeGetAttrPos. # something like __unsafeGetAttrPos.
warn = msg: builtins.trace "WARNING: ${msg}"; warn = msg: builtins.trace "warning: ${msg}";
info = msg: builtins.trace "INFO: ${msg}"; info = msg: builtins.trace "INFO: ${msg}";
showWarnings = warnings: res: lib.fold (w: x: warn w x) res warnings;
## Function annotations ## Function annotations

View File

@ -469,8 +469,10 @@ rec {
# Obsolete alternative to configOf. It takes its option # Obsolete alternative to configOf. It takes its option
# declarations from the options attribute of containing option # declarations from the options attribute of containing option
# declaration. # declaration.
optionSet = builtins.throw "types.optionSet is deprecated; use types.submodule instead" "optionSet"; optionSet = mkOptionType {
name = builtins.trace "types.optionSet is deprecated; use types.submodule instead" "optionSet";
description = "option set";
};
# Augment the given type with an additional type check function. # Augment the given type with an additional type check function.
addCheck = elemType: check: elemType // { check = x: elemType.check x && check x; }; addCheck = elemType: check: elemType // { check = x: elemType.check x && check x; };

View File

@ -38,6 +38,15 @@
See `./scripts/check-maintainer-github-handles.sh` for an example on how to work with this data. See `./scripts/check-maintainer-github-handles.sh` for an example on how to work with this data.
*/ */
{ {
"0x4A6F" = {
email = "0x4A6F@shackspace.de";
name = "Joachim Ernst";
github = "0x4A6F";
keys = [{
longkeyid = "rsa8192/0x87027528B006D66D";
fingerprint = "F466 A548 AD3F C1F1 8C88 4576 8702 7528 B006 D66D";
}];
};
"1000101" = { "1000101" = {
email = "jan.hrnko@satoshilabs.com"; email = "jan.hrnko@satoshilabs.com";
github = "1000101"; github = "1000101";
@ -752,6 +761,11 @@
github = "calbrecht"; github = "calbrecht";
name = "Christian Albrecht"; name = "Christian Albrecht";
}; };
callahad = {
email = "dan.callahan@gmail.com";
github = "callahad";
name = "Dan Callahan";
};
calvertvl = { calvertvl = {
email = "calvertvl@gmail.com"; email = "calvertvl@gmail.com";
github = "calvertvl"; github = "calvertvl";
@ -812,6 +826,11 @@
github = "cdepillabout"; github = "cdepillabout";
name = "Dennis Gosnell"; name = "Dennis Gosnell";
}; };
ceedubs = {
email = "ceedubs@gmail.com";
github = "ceedubs";
name = "Cody Allen";
};
cfouche = { cfouche = {
email = "chaddai.fouche@gmail.com"; email = "chaddai.fouche@gmail.com";
github = "Chaddai"; github = "Chaddai";
@ -1618,6 +1637,10 @@
email = "fpletz@fnordicwalking.de"; email = "fpletz@fnordicwalking.de";
github = "fpletz"; github = "fpletz";
name = "Franz Pletz"; name = "Franz Pletz";
keys = [{
longkeyid = "rsa4096/0x846FDED7792617B4";
fingerprint = "8A39 615D CE78 AF08 2E23 F303 846F DED7 7926 17B4";
}];
}; };
fps = { fps = {
email = "mista.tapas@gmx.net"; email = "mista.tapas@gmx.net";
@ -2223,10 +2246,6 @@
github = "jmettes"; github = "jmettes";
name = "Jonathan Mettes"; name = "Jonathan Mettes";
}; };
Jo = {
email = "0x4A6F@shackspace.de";
name = "Joachim Ernst";
};
joachifm = { joachifm = {
email = "joachifm@fastmail.fm"; email = "joachifm@fastmail.fm";
github = "joachifm"; github = "joachifm";
@ -2284,6 +2303,11 @@
joko = { joko = {
email = "ioannis.koutras@gmail.com"; email = "ioannis.koutras@gmail.com";
github = "jokogr"; github = "jokogr";
keys = [{
# compare with https://keybase.io/joko
longkeyid = "rsa2048/0x85EAE7D9DF56C5CA";
fingerprint = "B154 A8F9 0610 DB45 0CA8 CF39 85EA E7D9 DF56 C5CA";
}];
name = "Ioannis Koutras"; name = "Ioannis Koutras";
}; };
jonafato = { jonafato = {
@ -2439,6 +2463,11 @@
github = "kisonecat"; github = "kisonecat";
name = "Jim Fowler"; name = "Jim Fowler";
}; };
kjuvi = {
email = "quentin.vaucher@pm.me";
github = "kjuvi";
name = "Quentin Vaucher";
};
kkallio = { kkallio = {
email = "tierpluspluslists@gmail.com"; email = "tierpluspluslists@gmail.com";
name = "Karn Kallio"; name = "Karn Kallio";
@ -2619,6 +2648,11 @@
github = "lihop"; github = "lihop";
name = "Leroy Hopson"; name = "Leroy Hopson";
}; };
lilyball = {
email = "lily@sb.org";
github = "lilyball";
name = "Lily Ballard";
};
limeytexan = { limeytexan = {
email = "limeytexan@gmail.com"; email = "limeytexan@gmail.com";
github = "limeytexan"; github = "limeytexan";
@ -4303,6 +4337,15 @@
github = "sleexyz"; github = "sleexyz";
name = "Sean Lee"; name = "Sean Lee";
}; };
smakarov = {
email = "setser200018@gmail.com";
github = "setser";
name = "Sergey Makarov";
keys = [{
longkeyid = "rsa2048/6AA23A1193B7064B";
fingerprint = "6F8A 18AE 4101 103F 3C54 24B9 6AA2 3A11 93B7 064B";
}];
};
smaret = { smaret = {
email = "sebastien.maret@icloud.com"; email = "sebastien.maret@icloud.com";
github = "smaret"; github = "smaret";
@ -4337,6 +4380,15 @@
github = "solson"; github = "solson";
name = "Scott Olson"; name = "Scott Olson";
}; };
sondr3 = {
email = "nilsen.sondre@gmail.com";
github = "sondr3";
name = "Sondre Nilsen";
keys = [{
longkeyid = "ed25519/0x25676BCBFFAD76B1";
fingerprint = "0EC3 FA89 EFBA B421 F82E 40B0 2567 6BCB FFAD 76B1";
}];
};
sorki = { sorki = {
email = "srk@48.io"; email = "srk@48.io";
github = "sorki"; github = "sorki";
@ -4876,9 +4928,13 @@
name = "Vincent Bernardoff"; name = "Vincent Bernardoff";
}; };
vcunat = { vcunat = {
email = "vcunat@gmail.com";
github = "vcunat";
name = "Vladimír Čunát"; name = "Vladimír Čunát";
email = "v@cunat.cz"; # vcunat@gmail.com predominated in commits before 2019/03
github = "vcunat";
keys = [{
longkeyid = "rsa4096/0xE747DF1F9575A3AA";
fingerprint = "B600 6460 B60A 80E7 8206 2449 E747 DF1F 9575 A3AA";
}];
}; };
vdemeester = { vdemeester = {
email = "vincent@sbr.pm"; email = "vincent@sbr.pm";
@ -5001,6 +5057,11 @@
email = "windenntw@gmail.com"; email = "windenntw@gmail.com";
name = "Antonio Vargas Gonzalez"; name = "Antonio Vargas Gonzalez";
}; };
winpat = {
email = "patrickwinter@posteo.ch";
github = "winpat";
name = "Patrick Winter";
};
wizeman = { wizeman = {
email = "rcorreia@wizy.org"; email = "rcorreia@wizy.org";
github = "wizeman"; github = "wizeman";
@ -5027,7 +5088,7 @@
name = "Kranium Gikos Mendoza"; name = "Kranium Gikos Mendoza";
}; };
worldofpeace = { worldofpeace = {
email = "worldofpeace@users.noreply.github.com"; email = "worldofpeace@protonmail.ch";
github = "worldofpeace"; github = "worldofpeace";
name = "Worldofpeace"; name = "Worldofpeace";
}; };
@ -5240,4 +5301,9 @@
github = "shmish111"; github = "shmish111";
name = "David Smith"; name = "David Smith";
}; };
minijackson = {
email = "minijackson@riseup.net";
github = "minijackson";
name = "Rémi Nicole";
};
} }

View File

@ -1,7 +1,7 @@
# nix name, luarocks name, server, version/additionnal args
ansicolors, ansicolors,
argparse, argparse,
basexx, basexx,
cqueues
dkjson dkjson
fifo fifo
inspect inspect
@ -18,15 +18,15 @@ lua-term,
luabitop, luabitop,
luaevent, luaevent,
luacheck luacheck
luaffi,http://luarocks.org/dev, luaffi,,http://luarocks.org/dev,
luuid, luuid,
penlight, penlight,
say, say,
luv, luv,
luasystem, luasystem,
mediator_lua,http://luarocks.org/manifests/teto mediator_lua,,http://luarocks.org/manifests/teto
mpack,http://luarocks.org/manifests/teto mpack,,http://luarocks.org/manifests/teto
nvim-client,http://luarocks.org/manifests/teto nvim-client,,http://luarocks.org/manifests/teto
busted,http://luarocks.org/manifests/teto busted,,http://luarocks.org/manifests/teto
luassert,http://luarocks.org/manifests/teto luassert,,http://luarocks.org/manifests/teto
coxpcall,https://luarocks.org/manifests/hisham,1.17.0-1 coxpcall,,https://luarocks.org/manifests/hisham,1.17.0-1

1 ansicolors, # nix name, luarocks name, server, version/additionnal args
1 # nix name, luarocks name, server, version/additionnal args
2 ansicolors, ansicolors,
3 argparse, argparse,
4 basexx, basexx,
cqueues
5 dkjson dkjson
6 fifo fifo
7 inspect inspect
18 luabitop, luabitop,
19 luaevent, luaevent,
20 luacheck luacheck
21 luaffi,http://luarocks.org/dev, luaffi,,http://luarocks.org/dev,
22 luuid, luuid,
23 penlight, penlight,
24 say, say,
25 luv, luv,
26 luasystem, luasystem,
27 mediator_lua,http://luarocks.org/manifests/teto mediator_lua,,http://luarocks.org/manifests/teto
28 mpack,http://luarocks.org/manifests/teto mpack,,http://luarocks.org/manifests/teto
29 nvim-client,http://luarocks.org/manifests/teto nvim-client,,http://luarocks.org/manifests/teto
30 busted,http://luarocks.org/manifests/teto busted,,http://luarocks.org/manifests/teto
31 luassert,http://luarocks.org/manifests/teto luassert,,http://luarocks.org/manifests/teto
32 coxpcall,https://luarocks.org/manifests/hisham,1.17.0-1 coxpcall,,https://luarocks.org/manifests/hisham,1.17.0-1

View File

@ -61,7 +61,7 @@ nixpkgs$ ${0} ${GENERATED_NIXFILE}
These packages are manually refined in lua-overrides.nix These packages are manually refined in lua-overrides.nix
*/ */
{ self, lua, stdenv, fetchurl, fetchgit, pkgs, ... } @ args: { self, stdenv, fetchurl, fetchgit, pkgs, ... } @ args:
self: super: self: super:
with self; with self;
{ {
@ -74,17 +74,18 @@ FOOTER="
function convert_pkg () { function convert_pkg () {
pkg="$1" nix_pkg_name="$1"
lua_pkg_name="$2"
server="" server=""
if [ ! -z "$2" ]; then if [ ! -z "$3" ]; then
server=" --server=$2" server=" --server=$3"
fi fi
version="${3:-}" version="${3:-}"
echo "looking at $pkg (version $version) from server [$server]" >&2 echo "looking at $lua_pkg_name (version $version) from server [$server]" >&2
cmd="luarocks nix $server $pkg $version" cmd="luarocks nix $server $lua_pkg_name $version"
drv="$($cmd)" drv="$nix_pkg_name = $($cmd)"
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "Failed to convert $pkg" >&2 echo "Failed to convert $pkg" >&2
echo "$drv" >&2 echo "$drv" >&2
@ -98,12 +99,17 @@ echo "$HEADER" | tee "$TMP_FILE"
# list of packages with format # list of packages with format
# name,server,version # name,server,version
while IFS=, read -r pkg_name server version while IFS=, read -r nix_pkg_name lua_pkg_name server version
do do
if [ -z "$pkg_name" ]; then if [ "${nix_pkg_name:0:1}" == "#" ]; then
echo "Skipping empty package name" >&2 echo "Skipping comment ${nix_pkg_name}" >&2
continue
fi fi
convert_pkg "$pkg_name" "$server" "$version" if [ -z "$lua_pkg_name" ]; then
echo "Using nix_name as lua_pkg_name" >&2
lua_pkg_name="$nix_pkg_name"
fi
convert_pkg "$nix_pkg_name" "$lua_pkg_name" "$server" "$version"
done < "$CSV_FILE" done < "$CSV_FILE"
# close the set # close the set

View File

@ -4,7 +4,7 @@
version="5.0" version="5.0"
xml:id="ch-running"> xml:id="ch-running">
<title>Administration</title> <title>Administration</title>
<partintro> <partintro xml:id="ch-running-intro">
<para> <para>
This chapter describes various aspects of managing a running NixOS system, This chapter describes various aspects of managing a running NixOS system,
such as how to use the <command>systemd</command> service manager. such as how to use the <command>systemd</command> service manager.

View File

@ -4,7 +4,7 @@
version="5.0" version="5.0"
xml:id="ch-configuration"> xml:id="ch-configuration">
<title>Configuration</title> <title>Configuration</title>
<partintro> <partintro xml:id="ch-configuration-intro">
<para> <para>
This chapter describes how to configure various aspects of a NixOS machine This chapter describes how to configure various aspects of a NixOS machine
through the configuration file through the configuration file
@ -23,5 +23,6 @@
<xi:include href="linux-kernel.xml" /> <xi:include href="linux-kernel.xml" />
<xi:include href="../generated/modules.xml" xpointer="xpointer(//section[@id='modules']/*)" /> <xi:include href="../generated/modules.xml" xpointer="xpointer(//section[@id='modules']/*)" />
<xi:include href="profiles.xml" /> <xi:include href="profiles.xml" />
<xi:include href="kubernetes.xml" />
<!-- Apache; libvirtd virtualisation --> <!-- Apache; libvirtd virtualisation -->
</part> </part>

View File

@ -0,0 +1,127 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-kubernetes">
<title>Kubernetes</title>
<para>
The NixOS Kubernetes module is a collective term for a handful of
individual submodules implementing the Kubernetes cluster components.
</para>
<para>
There are generally two ways of enabling Kubernetes on NixOS.
One way is to enable and configure cluster components appropriately by hand:
<programlisting>
services.kubernetes = {
apiserver.enable = true;
controllerManager.enable = true;
scheduler.enable = true;
addonManager.enable = true;
proxy.enable = true;
flannel.enable = true;
};
</programlisting>
Another way is to assign cluster roles ("master" and/or "node") to the host.
This enables apiserver, controllerManager, scheduler, addonManager,
kube-proxy and etcd:
<programlisting>
<xref linkend="opt-services.kubernetes.roles"/> = [ "master" ];
</programlisting>
While this will enable the kubelet and kube-proxy only:
<programlisting>
<xref linkend="opt-services.kubernetes.roles"/> = [ "node" ];
</programlisting>
Assigning both the master and node roles is usable if you want a single
node Kubernetes cluster for dev or testing purposes:
<programlisting>
<xref linkend="opt-services.kubernetes.roles"/> = [ "master" "node" ];
</programlisting>
Note: Assigning either role will also default both
<xref linkend="opt-services.kubernetes.flannel.enable"/> and
<xref linkend="opt-services.kubernetes.easyCerts"/> to true.
This sets up flannel as CNI and activates automatic PKI bootstrapping.
</para>
<para>
As of kubernetes 1.10.X it has been deprecated to open
non-tls-enabled ports on kubernetes components. Thus, from NixOS 19.03 all
plain HTTP ports have been disabled by default.
While opening insecure ports is still possible, it is recommended not to
bind these to other interfaces than loopback.
To re-enable the insecure port on the apiserver, see options:
<xref linkend="opt-services.kubernetes.apiserver.insecurePort"/>
and
<xref linkend="opt-services.kubernetes.apiserver.insecureBindAddress"/>
</para>
<note>
<para>
As of NixOS 19.03, it is mandatory to configure:
<xref linkend="opt-services.kubernetes.masterAddress"/>.
The masterAddress must be resolveable and routeable by all cluster nodes.
In single node clusters, this can be set to <literal>localhost</literal>.
</para>
</note>
<para>
Role-based access control (RBAC) authorization mode is enabled by default.
This means that anonymous requests to the apiserver secure port will
expectedly cause a permission denied error. All cluster components must
therefore be configured with x509 certificates for two-way tls communication.
The x509 certificate subject section determines the roles and permissions
granted by the apiserver to perform clusterwide or namespaced operations.
See also:
<link
xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/rbac/">
Using RBAC Authorization</link>.
</para>
<para>
The NixOS kubernetes module provides an option for automatic certificate
bootstrapping and configuration,
<xref linkend="opt-services.kubernetes.easyCerts"/>.
The PKI bootstrapping process involves setting up a certificate authority
(CA) daemon (cfssl) on the kubernetes master node. cfssl generates a CA-cert
for the cluster, and uses the CA-cert for signing subordinate certs issued to
each of the cluster components. Subsequently, the certmgr daemon monitors
active certificates and renews them when needed. For single node Kubernetes
clusters, setting <xref linkend="opt-services.kubernetes.easyCerts"/> = true
is sufficient and no further action is required. For joining extra node
machines to an existing cluster on the other hand, establishing initial trust
is mandatory.
</para>
<para>
To add new nodes to the cluster:
On any (non-master) cluster node where
<xref linkend="opt-services.kubernetes.easyCerts"/> is enabled, the helper
script <literal>nixos-kubernetes-node-join</literal> is available on PATH.
Given a token on stdin, it will copy the token to the kubernetes
secrets directory and restart the certmgr service. As requested
certificates are issued, the script will restart kubernetes cluster
components as needed for them to pick up new keypairs.
</para>
<note>
<para>
Multi-master (HA) clusters are not supported by the easyCerts module.
</para>
</note>
<para>
In order to interact with an RBAC-enabled cluster as an administrator, one
needs to have cluster-admin privileges. By default, when easyCerts is
enabled, a cluster-admin kubeconfig file is generated and linked into
<literal>/etc/kubernetes/cluster-admin.kubeconfig</literal> as determined by
<xref linkend="opt-services.kubernetes.pki.etcClusterAdminKubeconfig"/>.
<literal>export KUBECONFIG=/etc/kubernetes/cluster-admin.kubeconfig</literal>
will make kubectl use this kubeconfig to access and authenticate the cluster.
The cluster-admin kubeconfig references an auto-generated keypair owned by
root. Thus, only root on the kubernetes master may obtain cluster-admin
rights by means of this file.
</para>
</chapter>

View File

@ -36,8 +36,25 @@
</para> </para>
<para> <para>
If you are using WPA2 the <command>wpa_passphrase</command> tool might be If you are using WPA2 you can generate pskRaw key using
useful to generate the <literal>wpa_supplicant.conf</literal>. <command>wpa_passphrase</command>:
<screen>
$ wpa_passphrase ESSID PSK
network={
ssid="echelon"
#psk="abcdefgh"
psk=dca6d6ed41f4ab5a984c9f55f6f66d4efdc720ebf66959810f4329bb391c5435
}
</screen>
<programlisting>
<xref linkend="opt-networking.wireless.networks"/> = {
echelon = {
pskRaw = "dca6d6ed41f4ab5a984c9f55f6f66d4efdc720ebf66959810f4329bb391c5435";
};
}
</programlisting>
or you can use it to directly generate the
<literal>wpa_supplicant.conf</literal>:
<screen> <screen>
# wpa_passphrase ESSID PSK > /etc/wpa_supplicant.conf</screen> # wpa_passphrase ESSID PSK > /etc/wpa_supplicant.conf</screen>
After you have edited the <literal>wpa_supplicant.conf</literal>, you need to After you have edited the <literal>wpa_supplicant.conf</literal>, you need to

View File

@ -268,7 +268,10 @@ in rec {
--stringparam id.warnings "1" \ --stringparam id.warnings "1" \
--nonet --output $dst/ \ --nonet --output $dst/ \
${docbook_xsl_ns}/xml/xsl/docbook/xhtml/chunktoc.xsl \ ${docbook_xsl_ns}/xml/xsl/docbook/xhtml/chunktoc.xsl \
${manual-combined}/manual-combined.xml ${manual-combined}/manual-combined.xml \
|& tee xsltproc.out
grep "^ID recommended on" xsltproc.out &>/dev/null && echo "error: some IDs are missing" && false
rm xsltproc.out
mkdir -p $dst/images/callouts mkdir -p $dst/images/callouts
cp ${docbook_xsl_ns}/xml/xsl/docbook/images/callouts/*.svg $dst/images/callouts/ cp ${docbook_xsl_ns}/xml/xsl/docbook/images/callouts/*.svg $dst/images/callouts/
@ -327,6 +330,7 @@ in rec {
# Generate manpages. # Generate manpages.
mkdir -p $out/share/man mkdir -p $out/share/man
xsltproc --nonet \ xsltproc --nonet \
--maxdepth 6000 \
--param man.output.in.separate.dir 1 \ --param man.output.in.separate.dir 1 \
--param man.output.base.dir "'$out/share/man/'" \ --param man.output.base.dir "'$out/share/man/'" \
--param man.endnotes.are.numbered 0 \ --param man.endnotes.are.numbered 0 \

View File

@ -4,7 +4,7 @@
version="5.0" version="5.0"
xml:id="ch-development"> xml:id="ch-development">
<title>Development</title> <title>Development</title>
<partintro> <partintro xml:id="ch-development-intro">
<para> <para>
This chapter describes how you can modify and extend NixOS. This chapter describes how you can modify and extend NixOS.
</para> </para>

View File

@ -60,13 +60,6 @@
Make sure a channel is created at http://nixos.org/channels/. </link> Make sure a channel is created at http://nixos.org/channels/. </link>
</para> </para>
</listitem> </listitem>
<listitem>
<para>
<link xlink:href="https://github.com/NixOS/nixpkgs/settings/branches">
Let a GitHub nixpkgs admin lock the branch on github for you. (so
developers cant force push) </link>
</para>
</listitem>
<listitem> <listitem>
<para> <para>
<link xlink:href="https://github.com/NixOS/nixpkgs/compare/bdf161ed8d21...6b63c4616790"> <link xlink:href="https://github.com/NixOS/nixpkgs/compare/bdf161ed8d21...6b63c4616790">

View File

@ -4,7 +4,7 @@
version="5.0" version="5.0"
xml:id="ch-installation"> xml:id="ch-installation">
<title>Installation</title> <title>Installation</title>
<partintro> <partintro xml:id="ch-installation-intro">
<para> <para>
This section describes how to obtain, install, and configure NixOS for This section describes how to obtain, install, and configure NixOS for
first-time use. first-time use.

View File

@ -377,6 +377,10 @@
option can be set to <literal>true</literal> to automatically add them to option can be set to <literal>true</literal> to automatically add them to
the grub menu. the grub menu.
</para> </para>
<para>
If you need to configure networking for your machine the configuration
options are described in <xref linkend="sec-networking"/>.
</para>
<para> <para>
Another critical option is <option>fileSystems</option>, specifying the Another critical option is <option>fileSystems</option>, specifying the
file systems that need to be mounted by NixOS. However, you typically file systems that need to be mounted by NixOS. However, you typically

View File

@ -38,6 +38,10 @@
<option>dry-activate</option> <option>dry-activate</option>
</arg> </arg>
<arg choice='plain'>
<option>edit</option>
</arg>
<arg choice='plain'> <arg choice='plain'>
<option>build-vm</option> <option>build-vm</option>
</arg> </arg>
@ -188,6 +192,16 @@ $ nix-build /path/to/nixpkgs/nixos -A system
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
<varlistentry>
<term>
<option>edit</option>
</term>
<listitem>
<para>
Opens <filename>configuration.nix</filename> in the default editor.
</para>
</listitem>
</varlistentry>
<varlistentry> <varlistentry>
<term> <term>
<option>build-vm</option> <option>build-vm</option>

View File

@ -8,6 +8,7 @@
This section lists the release notes for each stable version of NixOS and This section lists the release notes for each stable version of NixOS and
current unstable revision. current unstable revision.
</para> </para>
<xi:include href="rl-1909.xml" />
<xi:include href="rl-1903.xml" /> <xi:include href="rl-1903.xml" />
<xi:include href="rl-1809.xml" /> <xi:include href="rl-1809.xml" />
<xi:include href="rl-1803.xml" /> <xi:include href="rl-1803.xml" />

View File

@ -55,6 +55,15 @@
<para>to <literal>false</literal> and enable your preferred display manager.</para> <para>to <literal>false</literal> and enable your preferred display manager.</para>
</note> </note>
</listitem> </listitem>
<listitem>
<para>
A major refactoring of the Kubernetes module has been completed.
Refactorings primarily focus on decoupling components and enhancing
security. Two-way TLS and RBAC has been enabled by default for all
components, which slightly changes the way the module is configured.
See: <xref linkend="sec-kubernetes"/> for details.
</para>
</listitem>
</itemizedlist> </itemizedlist>
</section> </section>
@ -84,6 +93,35 @@
in <literal>nixos/modules/virtualisation/google-compute-config.nix</literal>. in <literal>nixos/modules/virtualisation/google-compute-config.nix</literal>.
</para> </para>
</listitem> </listitem>
<listitem>
<para>
<literal>./services/misc/beanstalkd.nix</literal>
</para>
</listitem>
<listitem>
<para>
There is a new <varname>services.cockroachdb</varname> module for running
CockroachDB databases. NixOS now ships with CockroachDB 2.1.x as well, available
on <literal>x86_64-linux</literal> and <literal>aarch64-linux</literal>.
</para>
</listitem>
</itemizedlist>
<itemizedlist>
<listitem>
<para>
<literal>./security/duosec.nix</literal>
</para>
</listitem>
<listitem>
<para>
The <link xlink:href="https://duo.com/docs/duounix">PAM module for Duo
Security</link> has been enabled for use. One can configure it using
the <option>security.duosec</option> options along with the
corresponding PAM option in
<option>security.pam.services.&lt;name?&gt;.duoSecurity.enable</option>.
</para>
</listitem>
</itemizedlist> </itemizedlist>
</section> </section>
@ -144,6 +182,20 @@
</listitem> </listitem>
</itemizedlist> </itemizedlist>
</listitem> </listitem>
<listitem>
<para>
The <varname>buildPythonPackage</varname> function now sets <varname>strictDeps = true</varname>
to help distinguish between native and non-native dependencies in order to
improve cross-compilation compatibility. Note however that this may break
user expressions.
</para>
</listitem>
<listitem>
<para>
The <varname>buildPythonPackage</varname> function now sets <varname>LANG = C.UTF-8</varname>
to enable Unicode support. The <varname>glibcLocales</varname> package is no longer needed as a build input.
</para>
</listitem>
<listitem> <listitem>
<para> <para>
The Syncthing state and configuration data has been moved from The Syncthing state and configuration data has been moved from
@ -404,8 +456,8 @@
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
Support for NixOS module system type <literal>types.optionSet</literal> and NixOS module system type <literal>types.optionSet</literal> and
<literal>lib.mkOption</literal> argument <literal>options</literal> is removed. <literal>lib.mkOption</literal> argument <literal>options</literal> are deprecated.
Use <literal>types.submodule</literal> instead. Use <literal>types.submodule</literal> instead.
(<link xlink:href="https://github.com/NixOS/nixpkgs/pull/54637">#54637</link>) (<link xlink:href="https://github.com/NixOS/nixpkgs/pull/54637">#54637</link>)
</para> </para>
@ -427,6 +479,11 @@
been removed. been removed.
</para> </para>
</listitem> </listitem>
<listitem>
<para>
<literal>graylog</literal> has been upgraded from version 2.* to 3.*. Some setups making use of extraConfig (especially those exposing Graylog via reverse proxies) need to be updated as upstream removed/replaced some settings. See <link xlink:href="http://docs.graylog.org/en/3.0/pages/upgrade/graylog-3.0.html#simplified-http-interface-configuration">Upgrading Graylog</link> for details.
</para>
</listitem>
</itemizedlist> </itemizedlist>
</section> </section>
@ -520,7 +577,7 @@
but is still possible by setting <literal>zramSwap.swapDevices</literal> explicitly. but is still possible by setting <literal>zramSwap.swapDevices</literal> explicitly.
</para> </para>
<para> <para>
Default algorithm for ZRAM swap was changed to <literal>zstd</literal>. ZRAM algorithm can be changed now.
</para> </para>
<para> <para>
Changes to ZRAM algorithm are applied during <literal>nixos-rebuild switch</literal>, Changes to ZRAM algorithm are applied during <literal>nixos-rebuild switch</literal>,
@ -564,6 +621,75 @@
provisioning. provisioning.
</para> </para>
</listitem> </listitem>
<listitem>
<para>
The use of insecure ports on kubernetes has been deprecated.
Thus options:
<varname>services.kubernetes.apiserver.port</varname> and
<varname>services.kubernetes.controllerManager.port</varname>
has been renamed to <varname>.insecurePort</varname>,
and default of both options has changed to 0 (disabled).
</para>
</listitem>
<listitem>
<para>
Note that the default value of
<varname>services.kubernetes.apiserver.bindAddress</varname>
has changed from 127.0.0.1 to 0.0.0.0, allowing the apiserver to be
accessible from outside the master node itself.
If the apiserver insecurePort is enabled,
it is strongly recommended to only bind on the loopback interface. See:
<varname>services.kubernetes.apiserver.insecurebindAddress</varname>.
</para>
</listitem>
<listitem>
<para>
The option <varname>services.kubernetes.apiserver.allowPrivileged</varname>
and <varname>services.kubernetes.kubelet.allowPrivileged</varname> now
defaults to false. Disallowing privileged containers on the cluster.
</para>
</listitem>
<listitem>
<para>
The kubernetes module does no longer add the kubernetes package to
<varname>environment.systemPackages</varname> implicitly.
</para>
</listitem>
<listitem>
<para>
The <literal>intel</literal> driver has been removed from the default list of
<link linkend="opt-services.xserver.videoDrivers">X.org video drivers</link>.
The <literal>modesetting</literal> driver should take over automatically,
it is better maintained upstream and has less problems with advanced X11 features.
This can lead to a change in the output names used by <literal>xrandr</literal>.
Some performance regressions on some GPU models might happen.
Some OpenCL and VA-API applications might also break
(Beignet seems to provide OpenCL support with
<literal>modesetting</literal> driver, too).
Kernel mode setting API does not support backlight control,
so <literal>xbacklight</literal> tool will not work;
backlight level can be controlled directly via <literal>/sys/</literal>
or with <literal>brightnessctl</literal>.
Users who need this functionality more than multi-output XRandR are advised
to add `intel` to `videoDrivers` and report an issue (or provide additional
details in an existing one)
</para>
</listitem>
<listitem>
<para>
Openmpi has been updated to version 4.0.0, which removes some deprecated MPI-1 symbols.
This may break some older applications that still rely on those symbols.
An upgrade guide can be found <link xlink:href="https://www.open-mpi.org/faq/?category=mpi-removed">here</link>.
</para>
<para>
The nginx package now relies on OpenSSL 1.1 and supports TLS 1.3 by default. You can set the protocols used by the nginx service using <xref linkend="opt-services.nginx.sslProtocols"/>.
</para>
</listitem>
<listitem>
<para>
A new subcommand <command>nixos-rebuild edit</command> was added.
</para>
</listitem>
</itemizedlist> </itemizedlist>
</section> </section>
</section> </section>

View File

@ -0,0 +1,68 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-release-19.09">
<title>Release 19.09 (“Loris”, 2019/09/??)</title>
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-release-19.09-highlights">
<title>Highlights</title>
<para>
In addition to numerous new and upgraded packages, this release has the
following highlights:
</para>
<itemizedlist>
<listitem>
<para />
</listitem>
</itemizedlist>
</section>
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-release-19.09-new-services">
<title>New Services</title>
<para>
The following new services were added since the last release:
</para>
<itemizedlist>
<listitem>
<para />
</listitem>
</itemizedlist>
</section>
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-release-19.09-notable-changes">
<title>Other Notable Changes</title>
<itemizedlist>
<listitem>
<para>
The <option>documentation</option> module gained an option named
<option>documentation.nixos.includeAllModules</option> which makes the generated
<citerefentry><refentrytitle>configuration.nix</refentrytitle>
<manvolnum>5</manvolnum></citerefentry> manual page include all options from all NixOS modules
included in a given <literal>configuration.nix</literal> configuration file. Currently, it is
set to <literal>false</literal> by default as enabling it frequently prevents evaluation. But
the plan is to eventually have it set to <literal>true</literal> by default. Please set it to
<literal>true</literal> now in your <literal>configuration.nix</literal> and fix all the bugs
it uncovers.
</para>
</listitem>
</itemizedlist>
</section>
</section>

View File

@ -51,7 +51,7 @@ in rec {
# system configuration. # system configuration.
inherit (lib.evalModules { inherit (lib.evalModules {
inherit prefix check; inherit prefix check;
modules = modules ++ extraModules ++ baseModules ++ [ pkgsModule ]; modules = baseModules ++ extraModules ++ [ pkgsModule ] ++ modules;
args = extraArgs; args = extraArgs;
specialArgs = specialArgs =
{ modulesPath = builtins.toString ../modules; } // specialArgs; { modulesPath = builtins.toString ../modules; } // specialArgs;
@ -60,7 +60,7 @@ in rec {
# These are the extra arguments passed to every module. In # These are the extra arguments passed to every module. In
# particular, Nixpkgs is passed through the "pkgs" argument. # particular, Nixpkgs is passed through the "pkgs" argument.
extraArgs = extraArgs_ // { extraArgs = extraArgs_ // {
inherit modules baseModules; inherit baseModules extraModules modules;
}; };
inherit (config._module.args) pkgs; inherit (config._module.args) pkgs;

View File

@ -0,0 +1,23 @@
# nix-build '<nixpkgs/nixos>' -A config.system.build.cloudstackImage --arg configuration "{ imports = [ ./nixos/maintainers/scripts/cloudstack/cloudstack-image.nix ]; }"
{ config, lib, pkgs, ... }:
with lib;
{
imports =
[ ../../../modules/virtualisation/cloudstack-config.nix ];
system.build.cloudstackImage = import ../../../lib/make-disk-image.nix {
inherit lib config pkgs;
diskSize = 8192;
format = "qcow2";
configFile = pkgs.writeText "configuration.nix"
''
{
imports = [ <nixpkgs/nixos/modules/virtualisation/cloudstack-config.nix> ];
}
'';
};
}

View File

@ -55,7 +55,9 @@ let
localConf = pkgs.writeText "fc-local.conf" cfg.localConf; localConf = pkgs.writeText "fc-local.conf" cfg.localConf;
# The configuration to be included in /etc/font/ # The configuration to be included in /etc/font/
penultimateConf = pkgs.runCommand "font-penultimate-conf" {} '' penultimateConf = pkgs.runCommand "font-penultimate-conf" {
preferLocalBuild = true;
} ''
support_folder=$out/etc/fonts/conf.d support_folder=$out/etc/fonts/conf.d
latest_folder=$out/etc/fonts/${latestVersion}/conf.d latest_folder=$out/etc/fonts/${latestVersion}/conf.d

View File

@ -7,7 +7,7 @@ let cfg = config.fonts.fontconfig.ultimate;
latestVersion = pkgs.fontconfig.configVersion; latestVersion = pkgs.fontconfig.configVersion;
# The configuration to be included in /etc/font/ # The configuration to be included in /etc/font/
confPkg = pkgs.runCommand "font-ultimate-conf" {} '' confPkg = pkgs.runCommand "font-ultimate-conf" { preferLocalBuild = true; } ''
support_folder=$out/etc/fonts/conf.d support_folder=$out/etc/fonts/conf.d
latest_folder=$out/etc/fonts/${latestVersion}/conf.d latest_folder=$out/etc/fonts/${latestVersion}/conf.d

View File

@ -190,7 +190,7 @@ let cfg = config.fonts.fontconfig;
''; '';
# fontconfig configuration package # fontconfig configuration package
confPkg = pkgs.runCommand "fontconfig-conf" {} '' confPkg = pkgs.runCommand "fontconfig-conf" { preferLocalBuild = true; } ''
support_folder=$out/etc/fonts support_folder=$out/etc/fonts
latest_folder=$out/etc/fonts/${latestVersion} latest_folder=$out/etc/fonts/${latestVersion}

View File

@ -4,7 +4,7 @@ with lib;
let let
x11Fonts = pkgs.runCommand "X11-fonts" { } '' x11Fonts = pkgs.runCommand "X11-fonts" { preferLocalBuild = true; } ''
mkdir -p "$out/share/X11-fonts" mkdir -p "$out/share/X11-fonts"
find ${toString config.fonts.fonts} \ find ${toString config.fonts.fonts} \
\( -name fonts.dir -o -name '*.ttf' -o -name '*.otf' \) \ \( -name fonts.dir -o -name '*.ttf' -o -name '*.otf' \) \

View File

@ -34,7 +34,7 @@ with lib;
networkmanager-openvpn = super.networkmanager-openvpn.override { withGnome = false; }; networkmanager-openvpn = super.networkmanager-openvpn.override { withGnome = false; };
networkmanager-vpnc = super.networkmanager-vpnc.override { withGnome = false; }; networkmanager-vpnc = super.networkmanager-vpnc.override { withGnome = false; };
networkmanager-iodine = super.networkmanager-iodine.override { withGnome = false; }; networkmanager-iodine = super.networkmanager-iodine.override { withGnome = false; };
pinentry = super.pinentry_ncurses; pinentry = super.pinentry.override { gtk2 = null; qt = null; };
gobject-introspection = super.gobject-introspection.override { x11Support = false; }; gobject-introspection = super.gobject-introspection.override { x11Support = false; };
})); }));
}; };

View File

@ -61,6 +61,15 @@ in {
}; };
}; };
system.nssHosts = mkOption {
type = types.listOf types.str;
default = [];
example = [ "mdns" ];
description = ''
List of host entries to configure in <filename>/etc/nsswitch.conf</filename>.
'';
};
}; };
config = { config = {
@ -85,7 +94,7 @@ in {
group: ${concatStringsSep " " passwdArray} group: ${concatStringsSep " " passwdArray}
shadow: ${concatStringsSep " " shadowArray} shadow: ${concatStringsSep " " shadowArray}
hosts: ${concatStringsSep " " hostArray} hosts: ${concatStringsSep " " config.system.nssHosts}
networks: files networks: files
ethers: files ethers: files
@ -94,6 +103,8 @@ in {
rpc: files rpc: files
''; '';
system.nssHosts = hostArray;
# Systemd provides nss-myhostname to ensure that our hostname # Systemd provides nss-myhostname to ensure that our hostname
# always resolves to a valid IP address. It returns all locally # always resolves to a valid IP address. It returns all locally
# configured IP addresses, or ::1 and 127.0.0.2 as # configured IP addresses, or ::1 and 127.0.0.2 as

View File

@ -91,13 +91,13 @@ in
}; };
algorithm = mkOption { algorithm = mkOption {
default = "zstd"; default = "lzo";
example = "lzo"; example = "lz4";
type = with types; either (enum [ "lzo" "lz4" "zstd" ]) str; type = with types; either (enum [ "lzo" "lz4" "zstd" ]) str;
description = '' description = ''
Compression algorithm. <literal>lzo</literal> has good compression, Compression algorithm. <literal>lzo</literal> has good compression,
but is slow. <literal>lz4</literal> has bad compression, but is fast. but is slow. <literal>lz4</literal> has bad compression, but is fast.
<literal>zstd</literal> is both good compression and fast. <literal>zstd</literal> is both good compression and fast, but requires newer kernel.
You can check what other algorithms are supported by your zram device with You can check what other algorithms are supported by your zram device with
<programlisting>cat /sys/class/block/zram*/comp_algorithm</programlisting> <programlisting>cat /sys/class/block/zram*/comp_algorithm</programlisting>
''; '';

View File

@ -0,0 +1,24 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.hardware.acpilight;
in
{
options = {
hardware.acpilight = {
enable = mkOption {
default = false;
type = types.bool;
description = ''
Enable acpilight.
This will allow brightness control via xbacklight from users in the video group
'';
};
};
};
config = mkIf cfg.enable {
services.udev.packages = with pkgs; [ acpilight ];
};
}

View File

@ -0,0 +1,14 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.hardware.ledger;
in {
options.hardware.ledger.enable = mkEnableOption "udev rules for Ledger devices";
config = mkIf cfg.enable {
services.udev.packages = [ pkgs.ledger-udev-rules ];
};
}

View File

@ -172,6 +172,11 @@ in
environment.systemPackages = [ nvidia_x11.bin nvidia_x11.settings ] environment.systemPackages = [ nvidia_x11.bin nvidia_x11.settings ]
++ lib.filter (p: p != null) [ nvidia_x11.persistenced ]; ++ lib.filter (p: p != null) [ nvidia_x11.persistenced ];
systemd.tmpfiles.rules = optional config.virtualisation.docker.enableNvidia
"L+ /run/nvidia-docker/bin - - - - ${nvidia_x11.bin}/origBin"
++ optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
"L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
boot.extraModulePackages = [ nvidia_x11.bin ]; boot.extraModulePackages = [ nvidia_x11.bin ];
# nvidia-uvm is required by CUDA applications. # nvidia-uvm is required by CUDA applications.

View File

@ -29,6 +29,7 @@ runCommand "uvcdynctrl-udev-rules-${version}"
]; ];
dontPatchELF = true; dontPatchELF = true;
dontStrip = true; dontStrip = true;
preferLocalBuild = true;
} }
'' ''
mkdir -p "$out/lib/udev" mkdir -p "$out/lib/udev"

View File

@ -13,7 +13,7 @@ let
# user, as expected by nixos-rebuild/nixos-install. FIXME: merge # user, as expected by nixos-rebuild/nixos-install. FIXME: merge
# with make-channel.nix. # with make-channel.nix.
channelSources = pkgs.runCommand "nixos-${config.system.nixos.version}" channelSources = pkgs.runCommand "nixos-${config.system.nixos.version}"
{ } { preferLocalBuild = true; }
'' ''
mkdir -p $out mkdir -p $out
cp -prd ${nixpkgs.outPath} $out/nixos cp -prd ${nixpkgs.outPath} $out/nixos

View File

@ -31,6 +31,10 @@ with lib;
# there is no power management backend such as upower). # there is no power management backend such as upower).
powerManagement.enable = true; powerManagement.enable = true;
# Enable sound in graphical iso's.
hardware.pulseaudio.enable = true;
hardware.pulseaudio.systemWide = true; # Needed since we run plasma as root.
environment.systemPackages = [ environment.systemPackages = [
# Include gparted for partitioning disks. # Include gparted for partitioning disks.
pkgs.gparted pkgs.gparted

View File

@ -138,7 +138,18 @@ fi
# Ask the user to set a root password, but only if the passwd command # Ask the user to set a root password, but only if the passwd command
# exists (i.e. when mutable user accounts are enabled). # exists (i.e. when mutable user accounts are enabled).
if [[ -z $noRootPasswd ]] && [ -t 0 ]; then if [[ -z $noRootPasswd ]] && [ -t 0 ]; then
nixos-enter --root "$mountPoint" -c '[[ -e /nix/var/nix/profiles/system/sw/bin/passwd ]] && echo "setting root password..." && /nix/var/nix/profiles/system/sw/bin/passwd' if nixos-enter --root "$mountPoint" -c 'test -e /nix/var/nix/profiles/system/sw/bin/passwd'; then
set +e
nixos-enter --root "$mountPoint" -c 'echo "setting root password..." && /nix/var/nix/profiles/system/sw/bin/passwd'
exit_code=$?
set -e
if [[ $exit_code != 0 ]]; then
echo "Setting a root password failed with the above printed error."
echo "You can set the root password manually by executing \`nixos-enter --root ${mountPoint@Q}\` and then running \`passwd\` in the shell of the new system."
exit $exit_code
fi
fi
fi fi
echo "installation finished!" echo "installation finished!"

View File

@ -29,7 +29,7 @@ while [ "$#" -gt 0 ]; do
--help) --help)
showSyntax showSyntax
;; ;;
switch|boot|test|build|dry-build|dry-run|dry-activate|build-vm|build-vm-with-bootloader) switch|boot|test|build|edit|dry-build|dry-run|dry-activate|build-vm|build-vm-with-bootloader)
if [ "$i" = dry-run ]; then i=dry-build; fi if [ "$i" = dry-run ]; then i=dry-build; fi
action="$i" action="$i"
;; ;;
@ -227,6 +227,13 @@ if [ -z "$_NIXOS_REBUILD_REEXEC" -a -n "$canRun" -a -z "$fast" ]; then
fi fi
fi fi
# Find configuration.nix and open editor instead of building.
if [ "$action" = edit ]; then
NIXOS_CONFIG=${NIXOS_CONFIG:-$(nix-instantiate --find-file nixos-config)}
exec "${EDITOR:-nano}" "$NIXOS_CONFIG"
exit 1
fi
tmpDir=$(mktemp -t -d nixos-rebuild.XXXXXX) tmpDir=$(mktemp -t -d nixos-rebuild.XXXXXX)
SSHOPTS="$NIX_SSHOPTS -o ControlMaster=auto -o ControlPath=$tmpDir/ssh-%n -o ControlPersist=60" SSHOPTS="$NIX_SSHOPTS -o ControlMaster=auto -o ControlPath=$tmpDir/ssh-%n -o ControlPersist=60"
@ -260,6 +267,14 @@ if [ -n "$rollback" -o "$action" = dry-build ]; then
buildNix= buildNix=
fi fi
nixSystem() {
machine="$(uname -m)"
if [[ "$machine" =~ i.86 ]]; then
machine=i686
fi
echo $machine-linux
}
prebuiltNix() { prebuiltNix() {
machine="$1" machine="$1"
if [ "$machine" = x86_64 ]; then if [ "$machine" = x86_64 ]; then
@ -279,7 +294,9 @@ if [ -n "$buildNix" ]; then
nixDrv= nixDrv=
if ! nixDrv="$(nix-instantiate '<nixpkgs/nixos>' --add-root $tmpDir/nix.drv --indirect -A config.nix.package.out "${extraBuildFlags[@]}")"; then if ! nixDrv="$(nix-instantiate '<nixpkgs/nixos>' --add-root $tmpDir/nix.drv --indirect -A config.nix.package.out "${extraBuildFlags[@]}")"; then
if ! nixDrv="$(nix-instantiate '<nixpkgs>' --add-root $tmpDir/nix.drv --indirect -A nix "${extraBuildFlags[@]}")"; then if ! nixDrv="$(nix-instantiate '<nixpkgs>' --add-root $tmpDir/nix.drv --indirect -A nix "${extraBuildFlags[@]}")"; then
if ! nixStorePath="$(nix-instantiate --eval '<nixpkgs/nixos/modules/installer/tools/nix-fallback-paths.nix>' -A $(nixSystem) | sed -e 's/^"//' -e 's/"$//')"; then
nixStorePath="$(prebuiltNix "$(uname -m)")" nixStorePath="$(prebuiltNix "$(uname -m)")"
fi
if ! nix-store -r $nixStorePath --add-root $tmpDir/nix --indirect \ if ! nix-store -r $nixStorePath --add-root $tmpDir/nix --indirect \
--option extra-binary-caches https://cache.nixos.org/; then --option extra-binary-caches https://cache.nixos.org/; then
echo "warning: don't know how to get latest Nix" >&2 echo "warning: don't know how to get latest Nix" >&2

View File

@ -57,7 +57,5 @@ with lib;
# Enable the OpenSSH daemon. # Enable the OpenSSH daemon.
# services.openssh.enable = true; # services.openssh.enable = true;
system.stateVersion = mkDefault "18.03";
''; '';
} }

View File

@ -1,4 +1,4 @@
{ config, lib, pkgs, baseModules, ... }: { config, lib, pkgs, baseModules, extraModules, modules, ... }:
with lib; with lib;
@ -6,6 +6,8 @@ let
cfg = config.documentation; cfg = config.documentation;
manualModules = baseModules ++ optionals cfg.nixos.includeAllModules (extraModules ++ modules);
/* For the purpose of generating docs, evaluate options with each derivation /* For the purpose of generating docs, evaluate options with each derivation
in `pkgs` (recursively) replaced by a fake with path "\${pkgs.attribute.path}". in `pkgs` (recursively) replaced by a fake with path "\${pkgs.attribute.path}".
It isn't perfect, but it seems to cover a vast majority of use cases. It isn't perfect, but it seems to cover a vast majority of use cases.
@ -18,7 +20,7 @@ let
options = options =
let let
scrubbedEval = evalModules { scrubbedEval = evalModules {
modules = [ { nixpkgs.localSystem = config.nixpkgs.localSystem; } ] ++ baseModules; modules = [ { nixpkgs.localSystem = config.nixpkgs.localSystem; } ] ++ manualModules;
args = (config._module.args) // { modules = [ ]; }; args = (config._module.args) // { modules = [ ]; };
specialArgs = { pkgs = scrubDerivations "pkgs" pkgs; }; specialArgs = { pkgs = scrubDerivations "pkgs" pkgs; };
}; };
@ -146,6 +148,17 @@ in
''; '';
}; };
nixos.includeAllModules = mkOption {
type = types.bool;
default = false;
description = ''
Whether the generated NixOS's documentation should include documentation for all
the options from all the NixOS modules included in the current
<literal>configuration.nix</literal>. Disabling this will make the manual
generator to ignore options defined outside of <literal>baseModules</literal>.
'';
};
}; };
}; };

View File

@ -272,7 +272,7 @@
nzbget = 245; nzbget = 245;
mosquitto = 246; mosquitto = 246;
toxvpn = 247; toxvpn = 247;
squeezelite = 248; # squeezelite = 248; # DynamicUser = true
turnserver = 249; turnserver = 249;
smokeping = 250; smokeping = 250;
gocd-agent = 251; gocd-agent = 251;

View File

@ -44,6 +44,7 @@
./hardware/digitalbitbox.nix ./hardware/digitalbitbox.nix
./hardware/sensor/iio.nix ./hardware/sensor/iio.nix
./hardware/ksm.nix ./hardware/ksm.nix
./hardware/ledger.nix
./hardware/mcelog.nix ./hardware/mcelog.nix
./hardware/network/b43.nix ./hardware/network/b43.nix
./hardware/nitrokey.nix ./hardware/nitrokey.nix
@ -181,6 +182,7 @@
./services/audio/mpd.nix ./services/audio/mpd.nix
./services/audio/mopidy.nix ./services/audio/mopidy.nix
./services/audio/slimserver.nix ./services/audio/slimserver.nix
./services/audio/snapserver.nix
./services/audio/squeezelite.nix ./services/audio/squeezelite.nix
./services/audio/ympd.nix ./services/audio/ympd.nix
./services/backup/bacula.nix ./services/backup/bacula.nix
@ -188,6 +190,7 @@
./services/backup/duplicati.nix ./services/backup/duplicati.nix
./services/backup/crashplan.nix ./services/backup/crashplan.nix
./services/backup/crashplan-small-business.nix ./services/backup/crashplan-small-business.nix
./services/backup/duplicity.nix
./services/backup/mysql-backup.nix ./services/backup/mysql-backup.nix
./services/backup/postgresql-backup.nix ./services/backup/postgresql-backup.nix
./services/backup/restic.nix ./services/backup/restic.nix
@ -196,9 +199,17 @@
./services/backup/tarsnap.nix ./services/backup/tarsnap.nix
./services/backup/znapzend.nix ./services/backup/znapzend.nix
./services/cluster/hadoop/default.nix ./services/cluster/hadoop/default.nix
./services/cluster/kubernetes/addons/dns.nix
./services/cluster/kubernetes/addons/dashboard.nix
./services/cluster/kubernetes/addon-manager.nix
./services/cluster/kubernetes/apiserver.nix
./services/cluster/kubernetes/controller-manager.nix
./services/cluster/kubernetes/default.nix ./services/cluster/kubernetes/default.nix
./services/cluster/kubernetes/dns.nix ./services/cluster/kubernetes/flannel.nix
./services/cluster/kubernetes/dashboard.nix ./services/cluster/kubernetes/kubelet.nix
./services/cluster/kubernetes/pki.nix
./services/cluster/kubernetes/proxy.nix
./services/cluster/kubernetes/scheduler.nix
./services/computing/boinc/client.nix ./services/computing/boinc/client.nix
./services/computing/torque/server.nix ./services/computing/torque/server.nix
./services/computing/torque/mom.nix ./services/computing/torque/mom.nix
@ -259,6 +270,7 @@
./services/desktops/gnome3/gnome-online-accounts.nix ./services/desktops/gnome3/gnome-online-accounts.nix
./services/desktops/gnome3/gnome-remote-desktop.nix ./services/desktops/gnome3/gnome-remote-desktop.nix
./services/desktops/gnome3/gnome-online-miners.nix ./services/desktops/gnome3/gnome-online-miners.nix
./services/desktops/gnome3/gnome-settings-daemon.nix
./services/desktops/gnome3/gnome-terminal-server.nix ./services/desktops/gnome3/gnome-terminal-server.nix
./services/desktops/gnome3/gnome-user-share.nix ./services/desktops/gnome3/gnome-user-share.nix
./services/desktops/gnome3/gpaste.nix ./services/desktops/gnome3/gpaste.nix
@ -284,6 +296,7 @@
./services/hardware/acpid.nix ./services/hardware/acpid.nix
./services/hardware/actkbd.nix ./services/hardware/actkbd.nix
./services/hardware/bluetooth.nix ./services/hardware/bluetooth.nix
./services/hardware/bolt.nix
./services/hardware/brltty.nix ./services/hardware/brltty.nix
./services/hardware/freefall.nix ./services/hardware/freefall.nix
./services/hardware/fwupd.nix ./services/hardware/fwupd.nix
@ -327,6 +340,7 @@
./services/logging/syslog-ng.nix ./services/logging/syslog-ng.nix
./services/logging/syslogd.nix ./services/logging/syslogd.nix
./services/mail/clamsmtp.nix ./services/mail/clamsmtp.nix
./services/mail/davmail.nix
./services/mail/dkimproxy-out.nix ./services/mail/dkimproxy-out.nix
./services/mail/dovecot.nix ./services/mail/dovecot.nix
./services/mail/dspam.nix ./services/mail/dspam.nix
@ -352,6 +366,7 @@
./services/misc/apache-kafka.nix ./services/misc/apache-kafka.nix
./services/misc/autofs.nix ./services/misc/autofs.nix
./services/misc/autorandr.nix ./services/misc/autorandr.nix
./services/misc/beanstalkd.nix
./services/misc/bees.nix ./services/misc/bees.nix
./services/misc/bepasty.nix ./services/misc/bepasty.nix
./services/misc/canto-daemon.nix ./services/misc/canto-daemon.nix
@ -413,7 +428,7 @@
./services/misc/parsoid.nix ./services/misc/parsoid.nix
./services/misc/phd.nix ./services/misc/phd.nix
./services/misc/plex.nix ./services/misc/plex.nix
./services/misc/plexpy.nix ./services/misc/tautulli.nix
./services/misc/pykms.nix ./services/misc/pykms.nix
./services/misc/radarr.nix ./services/misc/radarr.nix
./services/misc/redmine.nix ./services/misc/redmine.nix
@ -517,6 +532,7 @@
./services/networking/cntlm.nix ./services/networking/cntlm.nix
./services/networking/connman.nix ./services/networking/connman.nix
./services/networking/consul.nix ./services/networking/consul.nix
./services/networking/coredns.nix
./services/networking/coturn.nix ./services/networking/coturn.nix
./services/networking/dante.nix ./services/networking/dante.nix
./services/networking/ddclient.nix ./services/networking/ddclient.nix

View File

@ -14,5 +14,9 @@
libinput.enable = true; # for touchpad support on many laptops libinput.enable = true; # for touchpad support on many laptops
}; };
# Enable sound in virtualbox appliances.
hardware.pulseaudio.enable = true;
hardware.pulseaudio.systemWide = true; # Needed since we run plasma as root.
environment.systemPackages = [ pkgs.glxinfo pkgs.firefox ]; environment.systemPackages = [ pkgs.glxinfo pkgs.firefox ];
} }

View File

@ -102,7 +102,7 @@ in
# Emacs term mode doesn't support xterm title escape sequence (\e]0;) # Emacs term mode doesn't support xterm title escape sequence (\e]0;)
PS1="\n\[\033[$PROMPT_COLOR\][\u@\h:\w]\\$\[\033[0m\] " PS1="\n\[\033[$PROMPT_COLOR\][\u@\h:\w]\\$\[\033[0m\] "
else else
PS1="\n\[\033[$PROMPT_COLOR\][\[\e]0;\u@\h: \w\a\]\u@\h:\w]\$\[\033[0m\] " PS1="\n\[\033[$PROMPT_COLOR\][\[\e]0;\u@\h: \w\a\]\u@\h:\w]\\$\[\033[0m\] "
fi fi
if test "$TERM" = "xterm"; then if test "$TERM" = "xterm"; then
PS1="\[\033]2;\h:\u:\w\007\]$PS1" PS1="\[\033]2;\h:\u:\w\007\]$PS1"

View File

@ -169,6 +169,59 @@ in
end end
''; '';
programs.fish.interactiveShellInit = ''
# add completions generated by NixOS to $fish_complete_path
begin
# joins with null byte to acommodate all characters in paths, then respectively gets all paths before (exclusive) / after (inclusive) the first one including "generated_completions",
# splits by null byte, and then removes all empty lines produced by using 'string'
set -l prev (string join0 $fish_complete_path | string match --regex "^.*?(?=\x00[^\x00]*generated_completions.*)" | string split0 | string match -er ".")
set -l post (string join0 $fish_complete_path | string match --regex "[^\x00]*generated_completions.*" | string split0 | string match -er ".")
set fish_complete_path $prev "/etc/fish/generated_completions" $post
end
'';
environment.etc."fish/generated_completions".source =
let
patchedGenerator = pkgs.stdenv.mkDerivation {
name = "fish_patched-completion-generator";
srcs = [
"${pkgs.fish}/share/fish/tools/create_manpage_completions.py"
"${pkgs.fish}/share/fish/tools/deroff.py"
];
unpackCmd = "cp $curSrc $(basename $curSrc)";
sourceRoot = ".";
patches = [ ./fish_completion-generator.patch ]; # to prevent collisions of identical completion files
dontBuild = true;
installPhase = ''
mkdir -p $out
cp * $out/
'';
preferLocalBuild = true;
allowSubstitutes = false;
};
generateCompletions = package: pkgs.runCommand
"${package.name}_fish-completions"
(
{
inherit package;
preferLocalBuild = true;
allowSubstitutes = false;
}
// optionalAttrs (package ? meta.priority) { meta.priority = package.meta.priority; }
)
''
mkdir -p $out
if [ -d $package/share/man ]; then
find $package/share/man -type f | xargs ${pkgs.python3.interpreter} ${patchedGenerator}/create_manpage_completions.py --directory $out >/dev/null
fi
'';
in
pkgs.buildEnv {
name = "system_fish-completions";
ignoreCollisions = true;
paths = map generateCompletions config.environment.systemPackages;
};
# include programs that bring their own completions # include programs that bring their own completions
environment.pathsToLink = [] environment.pathsToLink = []
++ optional cfg.vendor.config.enable "/share/fish/vendor_conf.d" ++ optional cfg.vendor.config.enable "/share/fish/vendor_conf.d"

View File

@ -0,0 +1,11 @@
--- a/create_manpage_completions.py
+++ b/create_manpage_completions.py
@@ -776,8 +776,6 @@ def parse_manpage_at_path(manpage_path, output_directory):
built_command_output.insert(0, "# " + CMDNAME)
- # Output the magic word Autogenerated so we can tell if we can overwrite this
- built_command_output.insert(1, "# Autogenerated from man page " + manpage_path)
# built_command_output.insert(2, "# using " + parser.__class__.__name__) # XXX MISATTRIBUTES THE CULPABILE PARSER! Was really using Type2 but reporting TypeDeroffManParser
for line in built_command_output:

View File

@ -85,11 +85,13 @@ in
# SSH agent protocol doesn't support changing TTYs, so bind the agent # SSH agent protocol doesn't support changing TTYs, so bind the agent
# to every new TTY. # to every new TTY.
${pkgs.gnupg}/bin/gpg-connect-agent --quiet updatestartuptty /bye > /dev/null ${pkgs.gnupg}/bin/gpg-connect-agent --quiet updatestartuptty /bye > /dev/null
'');
environment.extraInit = mkIf cfg.agent.enableSSHSupport ''
if [ -z "$SSH_AUTH_SOCK" ]; then if [ -z "$SSH_AUTH_SOCK" ]; then
export SSH_AUTH_SOCK=$(${pkgs.gnupg}/bin/gpgconf --list-dirs agent-ssh-socket) export SSH_AUTH_SOCK=$(${pkgs.gnupg}/bin/gpgconf --list-dirs agent-ssh-socket)
fi fi
''); '';
assertions = [ assertions = [
{ assertion = cfg.agent.enableSSHSupport -> !config.programs.ssh.startAgent; { assertion = cfg.agent.enableSSHSupport -> !config.programs.ssh.startAgent;

View File

@ -25,7 +25,7 @@ let
''; '';
lessKey = pkgs.runCommand "lesskey" lessKey = pkgs.runCommand "lesskey"
{ src = pkgs.writeText "lessconfig" configText; } { src = pkgs.writeText "lessconfig" configText; preferLocalBuild = true; }
"${pkgs.less}/bin/lesskey -o $out $src"; "${pkgs.less}/bin/lesskey -o $out $src";
in in

View File

@ -40,9 +40,19 @@ with lib;
(mkRenamedOptionModule [ "services" "kibana" "host" ] [ "services" "kibana" "listenAddress" ]) (mkRenamedOptionModule [ "services" "kibana" "host" ] [ "services" "kibana" "listenAddress" ])
(mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "admissionControl" ] [ "services" "kubernetes" "apiserver" "enableAdmissionPlugins" ]) (mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "admissionControl" ] [ "services" "kubernetes" "apiserver" "enableAdmissionPlugins" ])
(mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "address" ] ["services" "kubernetes" "apiserver" "bindAddress"]) (mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "address" ] ["services" "kubernetes" "apiserver" "bindAddress"])
(mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "port" ] ["services" "kubernetes" "apiserver" "insecurePort"])
(mkRemovedOptionModule [ "services" "kubernetes" "apiserver" "publicAddress" ] "") (mkRemovedOptionModule [ "services" "kubernetes" "apiserver" "publicAddress" ] "")
(mkRenamedOptionModule [ "services" "kubernetes" "addons" "dashboard" "enableRBAC" ] [ "services" "kubernetes" "addons" "dashboard" "rbac" "enable" ]) (mkRenamedOptionModule [ "services" "kubernetes" "addons" "dashboard" "enableRBAC" ] [ "services" "kubernetes" "addons" "dashboard" "rbac" "enable" ])
(mkRenamedOptionModule [ "services" "kubernetes" "controllerManager" "address" ] ["services" "kubernetes" "controllerManager" "bindAddress"])
(mkRenamedOptionModule [ "services" "kubernetes" "controllerManager" "port" ] ["services" "kubernetes" "controllerManager" "insecurePort"])
(mkRenamedOptionModule [ "services" "kubernetes" "etcd" "servers" ] [ "services" "kubernetes" "apiserver" "etcd" "servers" ])
(mkRenamedOptionModule [ "services" "kubernetes" "etcd" "keyFile" ] [ "services" "kubernetes" "apiserver" "etcd" "keyFile" ])
(mkRenamedOptionModule [ "services" "kubernetes" "etcd" "certFile" ] [ "services" "kubernetes" "apiserver" "etcd" "certFile" ])
(mkRenamedOptionModule [ "services" "kubernetes" "etcd" "caFile" ] [ "services" "kubernetes" "apiserver" "etcd" "caFile" ])
(mkRemovedOptionModule [ "services" "kubernetes" "kubelet" "applyManifests" ] "")
(mkRemovedOptionModule [ "services" "kubernetes" "kubelet" "cadvisorPort" ] "") (mkRemovedOptionModule [ "services" "kubernetes" "kubelet" "cadvisorPort" ] "")
(mkRenamedOptionModule [ "services" "kubernetes" "proxy" "address" ] ["services" "kubernetes" "proxy" "bindAddress"])
(mkRemovedOptionModule [ "services" "kubernetes" "verbose" ] "")
(mkRenamedOptionModule [ "services" "logstash" "address" ] [ "services" "logstash" "listenAddress" ]) (mkRenamedOptionModule [ "services" "logstash" "address" ] [ "services" "logstash" "listenAddress" ])
(mkRenamedOptionModule [ "services" "mpd" "network" "host" ] [ "services" "mpd" "network" "listenAddress" ]) (mkRenamedOptionModule [ "services" "mpd" "network" "host" ] [ "services" "mpd" "network" "listenAddress" ])
(mkRenamedOptionModule [ "services" "neo4j" "host" ] [ "services" "neo4j" "defaultListenAddress" ]) (mkRenamedOptionModule [ "services" "neo4j" "host" ] [ "services" "neo4j" "defaultListenAddress" ])
@ -176,6 +186,9 @@ with lib;
# parsoid # parsoid
(mkRemovedOptionModule [ "services" "parsoid" "interwikis" ] [ "services" "parsoid" "wikis" ]) (mkRemovedOptionModule [ "services" "parsoid" "interwikis" ] [ "services" "parsoid" "wikis" ])
# plexpy / tautulli
(mkRenamedOptionModule [ "services" "plexpy" ] [ "services" "tautulli" ])
# piwik was renamed to matomo # piwik was renamed to matomo
(mkRenamedOptionModule [ "services" "piwik" "enable" ] [ "services" "matomo" "enable" ]) (mkRenamedOptionModule [ "services" "piwik" "enable" ] [ "services" "matomo" "enable" ])
(mkRenamedOptionModule [ "services" "piwik" "webServerUser" ] [ "services" "matomo" "webServerUser" ]) (mkRenamedOptionModule [ "services" "piwik" "webServerUser" ] [ "services" "matomo" "webServerUser" ])

View File

@ -14,6 +14,7 @@ let
{ files = { files =
cfg.certificateFiles ++ cfg.certificateFiles ++
[ (builtins.toFile "extra.crt" (concatStringsSep "\n" cfg.certificates)) ]; [ (builtins.toFile "extra.crt" (concatStringsSep "\n" cfg.certificates)) ];
preferLocalBuild = true;
} }
'' ''
cat $files > $out cat $files > $out

View File

@ -7,7 +7,7 @@ let
boolToStr = b: if b then "yes" else "no"; boolToStr = b: if b then "yes" else "no";
configFile = '' configFilePam = ''
[duo] [duo]
ikey=${cfg.ikey} ikey=${cfg.ikey}
skey=${cfg.skey} skey=${cfg.skey}
@ -16,21 +16,24 @@ let
failmode=${cfg.failmode} failmode=${cfg.failmode}
pushinfo=${boolToStr cfg.pushinfo} pushinfo=${boolToStr cfg.pushinfo}
autopush=${boolToStr cfg.autopush} autopush=${boolToStr cfg.autopush}
motd=${boolToStr cfg.motd}
prompts=${toString cfg.prompts} prompts=${toString cfg.prompts}
accept_env_factor=${boolToStr cfg.acceptEnvFactor}
fallback_local_ip=${boolToStr cfg.fallbackLocalIP} fallback_local_ip=${boolToStr cfg.fallbackLocalIP}
''; '';
configFileLogin = configFilePam + ''
motd=${boolToStr cfg.motd}
accept_env_factor=${boolToStr cfg.acceptEnvFactor}
'';
loginCfgFile = optional cfg.ssh.enable loginCfgFile = optional cfg.ssh.enable
{ source = pkgs.writeText "login_duo.conf" configFile; { source = pkgs.writeText "login_duo.conf" configFileLogin;
mode = "0600"; mode = "0600";
user = "sshd"; user = "sshd";
target = "duo/login_duo.conf"; target = "duo/login_duo.conf";
}; };
pamCfgFile = optional cfg.pam.enable pamCfgFile = optional cfg.pam.enable
{ source = pkgs.writeText "pam_duo.conf" configFile; { source = pkgs.writeText "pam_duo.conf" configFilePam;
mode = "0600"; mode = "0600";
user = "sshd"; user = "sshd";
target = "duo/pam_duo.conf"; target = "duo/pam_duo.conf";
@ -180,12 +183,6 @@ in
}; };
config = mkIf (cfg.ssh.enable || cfg.pam.enable) { config = mkIf (cfg.ssh.enable || cfg.pam.enable) {
assertions =
[ { assertion = !cfg.pam.enable;
message = "PAM support is currently not implemented.";
}
];
environment.systemPackages = [ pkgs.duo-unix ]; environment.systemPackages = [ pkgs.duo-unix ];
security.wrappers.login_duo.source = "${pkgs.duo-unix.out}/bin/login_duo"; security.wrappers.login_duo.source = "${pkgs.duo-unix.out}/bin/login_duo";

View File

@ -131,6 +131,18 @@ let
''; '';
}; };
duoSecurity = {
enable = mkOption {
default = false;
type = types.bool;
description = ''
If set, use the Duo Security pam module
<literal>pam_duo</literal> for authentication. Requires
configuration of <option>security.duosec</option> options.
'';
};
};
startSession = mkOption { startSession = mkOption {
default = false; default = false;
type = types.bool; type = types.bool;
@ -340,7 +352,8 @@ let
|| cfg.pamMount || cfg.pamMount
|| cfg.enableKwallet || cfg.enableKwallet
|| cfg.enableGnomeKeyring || cfg.enableGnomeKeyring
|| cfg.googleAuthenticator.enable)) '' || cfg.googleAuthenticator.enable
|| cfg.duoSecurity.enable)) ''
auth required pam_unix.so ${optionalString cfg.allowNullPassword "nullok"} likeauth auth required pam_unix.so ${optionalString cfg.allowNullPassword "nullok"} likeauth
${optionalString config.security.pam.enableEcryptfs ${optionalString config.security.pam.enableEcryptfs
"auth optional ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so unwrap"} "auth optional ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so unwrap"}
@ -350,9 +363,11 @@ let
("auth optional ${pkgs.plasma5.kwallet-pam}/lib/security/pam_kwallet5.so" + ("auth optional ${pkgs.plasma5.kwallet-pam}/lib/security/pam_kwallet5.so" +
" kwalletd=${pkgs.libsForQt5.kwallet.bin}/bin/kwalletd5")} " kwalletd=${pkgs.libsForQt5.kwallet.bin}/bin/kwalletd5")}
${optionalString cfg.enableGnomeKeyring ${optionalString cfg.enableGnomeKeyring
("auth optional ${pkgs.gnome3.gnome-keyring}/lib/security/pam_gnome_keyring.so")} "auth optional ${pkgs.gnome3.gnome-keyring}/lib/security/pam_gnome_keyring.so"}
${optionalString cfg.googleAuthenticator.enable ${optionalString cfg.googleAuthenticator.enable
"auth required ${pkgs.googleAuthenticator}/lib/security/pam_google_authenticator.so no_increment_hotp"} "auth required ${pkgs.googleAuthenticator}/lib/security/pam_google_authenticator.so no_increment_hotp"}
${optionalString cfg.duoSecurity.enable
"auth required ${pkgs.duo-unix}/lib/security/pam_duo.so"}
'') + '' '') + ''
${optionalString cfg.unixAuth ${optionalString cfg.unixAuth
"auth sufficient pam_unix.so ${optionalString cfg.allowNullPassword "nullok"} likeauth try_first_pass"} "auth sufficient pam_unix.so ${optionalString cfg.allowNullPassword "nullok"} likeauth try_first_pass"}

View File

@ -215,7 +215,10 @@ in
environment.etc = singleton environment.etc = singleton
{ source = { source =
pkgs.runCommand "sudoers" pkgs.runCommand "sudoers"
{ src = pkgs.writeText "sudoers-in" cfg.configFile; } {
src = pkgs.writeText "sudoers-in" cfg.configFile;
preferLocalBuild = true;
}
# Make sure that the sudoers file is syntactically valid. # Make sure that the sudoers file is syntactically valid.
# (currently disabled - NIXOS-66) # (currently disabled - NIXOS-66)
"${pkgs.buildPackages.sudo}/sbin/visudo -f $src -c && cp $src $out"; "${pkgs.buildPackages.sudo}/sbin/visudo -f $src -c && cp $src $out";

View File

@ -0,0 +1,217 @@
{ config, lib, pkgs, ... }:
with lib;
let
package = "snapcast";
name = "snapserver";
cfg = config.services.snapserver;
# Using types.nullOr to inherit upstream defaults.
sampleFormat = mkOption {
type = with types; nullOr str;
default = null;
description = ''
Default sample format.
'';
example = "48000:16:2";
};
codec = mkOption {
type = with types; nullOr str;
default = null;
description = ''
Default audio compression method.
'';
example = "flac";
};
streamToOption = name: opt:
let
os = val:
optionalString (val != null) "${val}";
os' = prefixx: val:
optionalString (val != null) (prefixx + "${val}");
flatten = key: value:
"&${key}=${value}";
in
"-s ${opt.type}://" + os opt.location + "?" + os' "name=" name
+ concatStrings (mapAttrsToList flatten opt.query);
optionalNull = val: ret:
optional (val != null) ret;
optionString = concatStringsSep " " (mapAttrsToList streamToOption cfg.streams
++ ["-p ${toString cfg.port}"]
++ ["--controlPort ${toString cfg.controlPort}"]
++ optionalNull cfg.sampleFormat "--sampleFormat ${cfg.sampleFormat}"
++ optionalNull cfg.codec "-c ${cfg.codec}"
++ optionalNull cfg.streamBuffer "--streamBuffer ${cfg.streamBuffer}"
++ optionalNull cfg.buffer "-b ${cfg.buffer}"
++ optional cfg.sendToMuted "--sendToMuted");
in {
###### interface
options = {
services.snapserver = {
enable = mkOption {
type = types.bool;
default = false;
description = ''
Whether to enable snapserver.
'';
};
port = mkOption {
type = types.port;
default = 1704;
description = ''
The port that snapclients can connect to.
'';
};
controlPort = mkOption {
type = types.port;
default = 1705;
description = ''
The port for control connections (JSON-RPC).
'';
};
openFirewall = mkOption {
type = types.bool;
default = true;
description = ''
Whether to automatically open the specified ports in the firewall.
'';
};
inherit sampleFormat;
inherit codec;
streams = mkOption {
type = with types; attrsOf (submodule {
options = {
location = mkOption {
type = types.path;
description = ''
The location of the pipe.
'';
};
type = mkOption {
type = types.enum [ "pipe" "file" "process" "spotify" "airplay" ];
default = "pipe";
description = ''
The type of input stream.
'';
};
query = mkOption {
type = attrsOf str;
default = {};
description = ''
Key-value pairs that convey additional parameters about a stream.
'';
example = literalExample ''
# for type == "pipe":
{
mode = "listen";
};
# for type == "process":
{
params = "--param1 --param2";
logStderr = "true";
};
'';
};
inherit sampleFormat;
inherit codec;
};
});
default = { default = {}; };
description = ''
The definition for an input source.
'';
example = literalExample ''
{
mpd = {
type = "pipe";
location = "/run/snapserver/mpd";
sampleFormat = "48000:16:2";
codec = "pcm";
};
};
'';
};
streamBuffer = mkOption {
type = with types; nullOr int;
default = null;
description = ''
Stream read (input) buffer in ms.
'';
example = 20;
};
buffer = mkOption {
type = with types; nullOr int;
default = null;
description = ''
Network buffer in ms.
'';
example = 1000;
};
sendToMuted = mkOption {
type = types.bool;
default = false;
description = ''
Send audio to muted clients.
'';
};
};
};
###### implementation
config = mkIf cfg.enable {
systemd.services.snapserver = {
after = [ "network.target" ];
description = "Snapserver";
wantedBy = [ "multi-user.target" ];
before = [ "mpd.service" "mopidy.service" ];
serviceConfig = {
DynamicUser = true;
ExecStart = "${pkgs.snapcast}/bin/snapserver --daemon ${optionString}";
Type = "forking";
LimitRTPRIO = 50;
LimitRTTIME = "infinity";
NoNewPrivileges = true;
PIDFile = "/run/${name}/pid";
ProtectKernelTunables = true;
ProtectControlGroups = true;
ProtectKernelModules = true;
RestrictAddressFamilies = "AF_INET AF_INET6 AF_UNIX";
RestrictNamespaces = true;
RuntimeDirectory = name;
StateDirectory = name;
};
};
networking.firewall.allowedTCPPorts = optionals cfg.openFirewall [ cfg.port cfg.controlPort ];
};
meta = {
maintainers = with maintainers; [ tobim ];
};
}

View File

@ -3,8 +3,7 @@
with lib; with lib;
let let
dataDir = "/var/lib/squeezelite";
uid = config.ids.uids.squeezelite;
cfg = config.services.squeezelite; cfg = config.services.squeezelite;
in { in {
@ -17,14 +16,6 @@ in {
enable = mkEnableOption "Squeezelite, a software Squeezebox emulator"; enable = mkEnableOption "Squeezelite, a software Squeezebox emulator";
dataDir = mkOption {
default = "/var/lib/squeezelite";
type = types.str;
description = ''
The directory where Squeezelite stores its name file.
'';
};
extraArguments = mkOption { extraArguments = mkOption {
default = ""; default = "";
type = types.str; type = types.str;
@ -46,22 +37,14 @@ in {
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
after = [ "network.target" "sound.target" ]; after = [ "network.target" "sound.target" ];
description = "Software Squeezebox emulator"; description = "Software Squeezebox emulator";
preStart = "mkdir -p ${cfg.dataDir} && chown -R squeezelite ${cfg.dataDir}";
serviceConfig = { serviceConfig = {
ExecStart = "${pkgs.squeezelite}/bin/squeezelite -N ${cfg.dataDir}/player-name ${cfg.extraArguments}"; DynamicUser = true;
User = "squeezelite"; ExecStart = "${pkgs.squeezelite}/bin/squeezelite -N ${dataDir}/player-name ${cfg.extraArguments}";
PermissionsStartOnly = true; StateDirectory = builtins.baseNameOf dataDir;
SupplementaryGroups = "audio";
}; };
}; };
users.users.squeezelite= {
inherit uid;
group = "nogroup";
extraGroups = [ "audio" ];
description = "Squeezelite user";
home = "${cfg.dataDir}";
};
}; };
} }

View File

@ -0,0 +1,141 @@
{ config, lib, pkgs, ...}:
with lib;
let
cfg = config.services.duplicity;
stateDirectory = "/var/lib/duplicity";
localTarget = if hasPrefix "file://" cfg.targetUrl
then removePrefix "file://" cfg.targetUrl else null;
in {
options.services.duplicity = {
enable = mkEnableOption "backups with duplicity";
root = mkOption {
type = types.path;
default = "/";
description = ''
Root directory to backup.
'';
};
include = mkOption {
type = types.listOf types.str;
default = [];
example = [ "/home" ];
description = ''
List of paths to include into the backups. See the FILE SELECTION
section in <citerefentry><refentrytitle>duplicity</refentrytitle>
<manvolnum>1</manvolnum></citerefentry> for details on the syntax.
'';
};
exclude = mkOption {
type = types.listOf types.str;
default = [];
description = ''
List of paths to exclude from backups. See the FILE SELECTION section in
<citerefentry><refentrytitle>duplicity</refentrytitle>
<manvolnum>1</manvolnum></citerefentry> for details on the syntax.
'';
};
targetUrl = mkOption {
type = types.str;
example = "s3://host:port/prefix";
description = ''
Target url to backup to. See the URL FORMAT section in
<citerefentry><refentrytitle>duplicity</refentrytitle>
<manvolnum>1</manvolnum></citerefentry> for supported urls.
'';
};
secretFile = mkOption {
type = types.nullOr types.path;
default = null;
description = ''
Path of a file containing secrets (gpg passphrase, access key...) in
the format of EnvironmentFile as described by
<citerefentry><refentrytitle>systemd.exec</refentrytitle>
<manvolnum>5</manvolnum></citerefentry>. For example:
<programlisting>
PASSPHRASE=<replaceable>...</replaceable>
AWS_ACCESS_KEY_ID=<replaceable>...</replaceable>
AWS_SECRET_ACCESS_KEY=<replaceable>...</replaceable>
</programlisting>
'';
};
frequency = mkOption {
type = types.nullOr types.str;
default = "daily";
description = ''
Run duplicity with the given frequency (see
<citerefentry><refentrytitle>systemd.time</refentrytitle>
<manvolnum>7</manvolnum></citerefentry> for the format).
If null, do not run automatically.
'';
};
extraFlags = mkOption {
type = types.listOf types.str;
default = [];
example = [ "--full-if-older-than" "1M" ];
description = ''
Extra command-line flags passed to duplicity. See
<citerefentry><refentrytitle>duplicity</refentrytitle>
<manvolnum>1</manvolnum></citerefentry>.
'';
};
};
config = mkIf cfg.enable {
systemd = {
services.duplicity = {
description = "backup files with duplicity";
environment.HOME = stateDirectory;
serviceConfig = {
ExecStart = ''
${pkgs.duplicity}/bin/duplicity ${escapeShellArgs (
[
cfg.root
cfg.targetUrl
"--archive-dir" stateDirectory
]
++ concatMap (p: [ "--include" p ]) cfg.include
++ concatMap (p: [ "--exclude" p ]) cfg.exclude
++ cfg.extraFlags)}
'';
PrivateTmp = true;
ProtectSystem = "strict";
ProtectHome = "read-only";
StateDirectory = baseNameOf stateDirectory;
} // optionalAttrs (localTarget != null) {
ReadWritePaths = localTarget;
} // optionalAttrs (cfg.secretFile != null) {
EnvironmentFile = cfg.secretFile;
};
} // optionalAttrs (cfg.frequency != null) {
startAt = cfg.frequency;
};
tmpfiles.rules = optional (localTarget != null) "d ${localTarget} 0700 root root -";
};
assertions = singleton {
# Duplicity will fail if the last file selection option is an include. It
# is not always possible to detect but this simple case can be caught.
assertion = cfg.include != [] -> cfg.exclude != [] || cfg.extraFlags != [];
message = ''
Duplicity will fail if you only specify included paths ("Because the
default is to include all files, the expression is redundant. Exiting
because this probably isn't what you meant.")
'';
};
};
}

View File

@ -0,0 +1,167 @@
{ config, lib, pkgs, ... }:
with lib;
let
top = config.services.kubernetes;
cfg = top.addonManager;
isRBACEnabled = elem "RBAC" top.apiserver.authorizationMode;
addons = pkgs.runCommand "kubernetes-addons" { } ''
mkdir -p $out
# since we are mounting the addons to the addon manager, they need to be copied
${concatMapStringsSep ";" (a: "cp -v ${a}/* $out/") (mapAttrsToList (name: addon:
pkgs.writeTextDir "${name}.json" (builtins.toJSON addon)
) (cfg.addons))}
'';
in
{
###### interface
options.services.kubernetes.addonManager = with lib.types; {
bootstrapAddons = mkOption {
description = ''
Bootstrap addons are like regular addons, but they are applied with cluster-admin rigths.
They are applied at addon-manager startup only.
'';
default = { };
type = attrsOf attrs;
example = literalExample ''
{
"my-service" = {
"apiVersion" = "v1";
"kind" = "Service";
"metadata" = {
"name" = "my-service";
"namespace" = "default";
};
"spec" = { ... };
};
}
'';
};
addons = mkOption {
description = "Kubernetes addons (any kind of Kubernetes resource can be an addon).";
default = { };
type = attrsOf (either attrs (listOf attrs));
example = literalExample ''
{
"my-service" = {
"apiVersion" = "v1";
"kind" = "Service";
"metadata" = {
"name" = "my-service";
"namespace" = "default";
};
"spec" = { ... };
};
}
// import <nixpkgs/nixos/modules/services/cluster/kubernetes/dashboard.nix> { cfg = config.services.kubernetes; };
'';
};
enable = mkEnableOption "Whether to enable Kubernetes addon manager.";
};
###### implementation
config = mkIf cfg.enable {
environment.etc."kubernetes/addons".source = "${addons}/";
systemd.services.kube-addon-manager = {
description = "Kubernetes addon manager";
wantedBy = [ "kubernetes.target" ];
after = [ "kube-apiserver.service" ];
environment.ADDON_PATH = "/etc/kubernetes/addons/";
path = [ pkgs.gawk ];
serviceConfig = {
Slice = "kubernetes.slice";
ExecStart = "${top.package}/bin/kube-addons";
WorkingDirectory = top.dataDir;
User = "kubernetes";
Group = "kubernetes";
Restart = "on-failure";
RestartSec = 10;
};
};
services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled
(let
name = system:kube-addon-manager;
namespace = "kube-system";
in
{
kube-addon-manager-r = {
apiVersion = "rbac.authorization.k8s.io/v1";
kind = "Role";
metadata = {
inherit name namespace;
};
rules = [{
apiGroups = ["*"];
resources = ["*"];
verbs = ["*"];
}];
};
kube-addon-manager-rb = {
apiVersion = "rbac.authorization.k8s.io/v1";
kind = "RoleBinding";
metadata = {
inherit name namespace;
};
roleRef = {
apiGroup = "rbac.authorization.k8s.io";
kind = "Role";
inherit name;
};
subjects = [{
apiGroup = "rbac.authorization.k8s.io";
kind = "User";
inherit name;
}];
};
kube-addon-manager-cluster-lister-cr = {
apiVersion = "rbac.authorization.k8s.io/v1";
kind = "ClusterRole";
metadata = {
name = "${name}:cluster-lister";
};
rules = [{
apiGroups = ["*"];
resources = ["*"];
verbs = ["list"];
}];
};
kube-addon-manager-cluster-lister-crb = {
apiVersion = "rbac.authorization.k8s.io/v1";
kind = "ClusterRoleBinding";
metadata = {
name = "${name}:cluster-lister";
};
roleRef = {
apiGroup = "rbac.authorization.k8s.io";
kind = "ClusterRole";
name = "${name}:cluster-lister";
};
subjects = [{
kind = "User";
inherit name;
}];
};
});
services.kubernetes.pki.certs = {
addonManager = top.lib.mkCert {
name = "kube-addon-manager";
CN = "system:kube-addon-manager";
action = "systemctl restart kube-addon-manager.service";
};
};
};
}

View File

@ -38,6 +38,18 @@ in {
type = types.int; type = types.int;
}; };
reconcileMode = mkOption {
description = ''
Controls the addon manager reconciliation mode for the DNS addon.
Setting reconcile mode to EnsureExists makes it possible to tailor DNS behavior by editing the coredns ConfigMap.
See: <link xlink:href="https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/addon-manager/README.md"/>.
'';
default = "Reconcile";
type = types.enum [ "Reconcile" "EnsureExists" ];
};
coredns = mkOption { coredns = mkOption {
description = "Docker image to seed for the CoreDNS container."; description = "Docker image to seed for the CoreDNS container.";
type = types.attrs; type = types.attrs;
@ -54,21 +66,7 @@ in {
services.kubernetes.kubelet.seedDockerImages = services.kubernetes.kubelet.seedDockerImages =
singleton (pkgs.dockerTools.pullImage cfg.coredns); singleton (pkgs.dockerTools.pullImage cfg.coredns);
services.kubernetes.addonManager.addons = { services.kubernetes.addonManager.bootstrapAddons = {
coredns-sa = {
apiVersion = "v1";
kind = "ServiceAccount";
metadata = {
labels = {
"addonmanager.kubernetes.io/mode" = "Reconcile";
"k8s-app" = "kube-dns";
"kubernetes.io/cluster-service" = "true";
};
name = "coredns";
namespace = "kube-system";
};
};
coredns-cr = { coredns-cr = {
apiVersion = "rbac.authorization.k8s.io/v1beta1"; apiVersion = "rbac.authorization.k8s.io/v1beta1";
kind = "ClusterRole"; kind = "ClusterRole";
@ -123,13 +121,29 @@ in {
} }
]; ];
}; };
};
services.kubernetes.addonManager.addons = {
coredns-sa = {
apiVersion = "v1";
kind = "ServiceAccount";
metadata = {
labels = {
"addonmanager.kubernetes.io/mode" = "Reconcile";
"k8s-app" = "kube-dns";
"kubernetes.io/cluster-service" = "true";
};
name = "coredns";
namespace = "kube-system";
};
};
coredns-cm = { coredns-cm = {
apiVersion = "v1"; apiVersion = "v1";
kind = "ConfigMap"; kind = "ConfigMap";
metadata = { metadata = {
labels = { labels = {
"addonmanager.kubernetes.io/mode" = "Reconcile"; "addonmanager.kubernetes.io/mode" = cfg.reconcileMode;
"k8s-app" = "kube-dns"; "k8s-app" = "kube-dns";
"kubernetes.io/cluster-service" = "true"; "kubernetes.io/cluster-service" = "true";
}; };
@ -160,7 +174,7 @@ in {
kind = "Deployment"; kind = "Deployment";
metadata = { metadata = {
labels = { labels = {
"addonmanager.kubernetes.io/mode" = "Reconcile"; "addonmanager.kubernetes.io/mode" = cfg.reconcileMode;
"k8s-app" = "kube-dns"; "k8s-app" = "kube-dns";
"kubernetes.io/cluster-service" = "true"; "kubernetes.io/cluster-service" = "true";
"kubernetes.io/name" = "CoreDNS"; "kubernetes.io/name" = "CoreDNS";

View File

@ -0,0 +1,428 @@
{ config, lib, pkgs, ... }:
with lib;
let
top = config.services.kubernetes;
cfg = top.apiserver;
isRBACEnabled = elem "RBAC" cfg.authorizationMode;
apiserverServiceIP = (concatStringsSep "." (
take 3 (splitString "." cfg.serviceClusterIpRange
)) + ".1");
in
{
###### interface
options.services.kubernetes.apiserver = with lib.types; {
advertiseAddress = mkOption {
description = ''
Kubernetes apiserver IP address on which to advertise the apiserver
to members of the cluster. This address must be reachable by the rest
of the cluster.
'';
default = null;
type = nullOr str;
};
allowPrivileged = mkOption {
description = "Whether to allow privileged containers on Kubernetes.";
default = false;
type = bool;
};
authorizationMode = mkOption {
description = ''
Kubernetes apiserver authorization mode (AlwaysAllow/AlwaysDeny/ABAC/Webhook/RBAC/Node). See
<link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authorization/"/>
'';
default = ["RBAC" "Node"]; # Enabling RBAC by default, although kubernetes default is AllowAllow
type = listOf (enum ["AlwaysAllow" "AlwaysDeny" "ABAC" "Webhook" "RBAC" "Node"]);
};
authorizationPolicy = mkOption {
description = ''
Kubernetes apiserver authorization policy file. See
<link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authorization/"/>
'';
default = [];
type = listOf attrs;
};
basicAuthFile = mkOption {
description = ''
Kubernetes apiserver basic authentication file. See
<link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authentication"/>
'';
default = null;
type = nullOr path;
};
bindAddress = mkOption {
description = ''
The IP address on which to listen for the --secure-port port.
The associated interface(s) must be reachable by the rest
of the cluster, and by CLI/web clients.
'';
default = "0.0.0.0";
type = str;
};
clientCaFile = mkOption {
description = "Kubernetes apiserver CA file for client auth.";
default = top.caFile;
type = nullOr path;
};
disableAdmissionPlugins = mkOption {
description = ''
Kubernetes admission control plugins to disable. See
<link xlink:href="https://kubernetes.io/docs/admin/admission-controllers/"/>
'';
default = [];
type = listOf str;
};
enable = mkEnableOption "Kubernetes apiserver";
enableAdmissionPlugins = mkOption {
description = ''
Kubernetes admission control plugins to enable. See
<link xlink:href="https://kubernetes.io/docs/admin/admission-controllers/"/>
'';
default = [
"NamespaceLifecycle" "LimitRanger" "ServiceAccount"
"ResourceQuota" "DefaultStorageClass" "DefaultTolerationSeconds"
"NodeRestriction"
];
example = [
"NamespaceLifecycle" "NamespaceExists" "LimitRanger"
"SecurityContextDeny" "ServiceAccount" "ResourceQuota"
"PodSecurityPolicy" "NodeRestriction" "DefaultStorageClass"
];
type = listOf str;
};
etcd = {
servers = mkOption {
description = "List of etcd servers.";
default = ["http://127.0.0.1:2379"];
type = types.listOf types.str;
};
keyFile = mkOption {
description = "Etcd key file.";
default = null;
type = types.nullOr types.path;
};
certFile = mkOption {
description = "Etcd cert file.";
default = null;
type = types.nullOr types.path;
};
caFile = mkOption {
description = "Etcd ca file.";
default = top.caFile;
type = types.nullOr types.path;
};
};
extraOpts = mkOption {
description = "Kubernetes apiserver extra command line options.";
default = "";
type = str;
};
extraSANs = mkOption {
description = "Extra x509 Subject Alternative Names to be added to the kubernetes apiserver tls cert.";
default = [];
type = listOf str;
};
featureGates = mkOption {
description = "List set of feature gates";
default = top.featureGates;
type = listOf str;
};
insecureBindAddress = mkOption {
description = "The IP address on which to serve the --insecure-port.";
default = "127.0.0.1";
type = str;
};
insecurePort = mkOption {
description = "Kubernetes apiserver insecure listening port. (0 = disabled)";
default = 0;
type = int;
};
kubeletClientCaFile = mkOption {
description = "Path to a cert file for connecting to kubelet.";
default = top.caFile;
type = nullOr path;
};
kubeletClientCertFile = mkOption {
description = "Client certificate to use for connections to kubelet.";
default = null;
type = nullOr path;
};
kubeletClientKeyFile = mkOption {
description = "Key to use for connections to kubelet.";
default = null;
type = nullOr path;
};
kubeletHttps = mkOption {
description = "Whether to use https for connections to kubelet.";
default = true;
type = bool;
};
runtimeConfig = mkOption {
description = ''
Api runtime configuration. See
<link xlink:href="https://kubernetes.io/docs/tasks/administer-cluster/cluster-management/"/>
'';
default = "authentication.k8s.io/v1beta1=true";
example = "api/all=false,api/v1=true";
type = str;
};
storageBackend = mkOption {
description = ''
Kubernetes apiserver storage backend.
'';
default = "etcd3";
type = enum ["etcd2" "etcd3"];
};
securePort = mkOption {
description = "Kubernetes apiserver secure port.";
default = 6443;
type = int;
};
serviceAccountKeyFile = mkOption {
description = ''
Kubernetes apiserver PEM-encoded x509 RSA private or public key file,
used to verify ServiceAccount tokens. By default tls private key file
is used.
'';
default = null;
type = nullOr path;
};
serviceClusterIpRange = mkOption {
description = ''
A CIDR notation IP range from which to assign service cluster IPs.
This must not overlap with any IP ranges assigned to nodes for pods.
'';
default = "10.0.0.0/24";
type = str;
};
tlsCertFile = mkOption {
description = "Kubernetes apiserver certificate file.";
default = null;
type = nullOr path;
};
tlsKeyFile = mkOption {
description = "Kubernetes apiserver private key file.";
default = null;
type = nullOr path;
};
tokenAuthFile = mkOption {
description = ''
Kubernetes apiserver token authentication file. See
<link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authentication"/>
'';
default = null;
type = nullOr path;
};
verbosity = mkOption {
description = ''
Optional glog verbosity level for logging statements. See
<link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
'';
default = null;
type = nullOr int;
};
webhookConfig = mkOption {
description = ''
Kubernetes apiserver Webhook config file. It uses the kubeconfig file format.
See <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/webhook/"/>
'';
default = null;
type = nullOr path;
};
};
###### implementation
config = mkMerge [
(mkIf cfg.enable {
systemd.services.kube-apiserver = {
description = "Kubernetes APIServer Service";
wantedBy = [ "kubernetes.target" ];
after = [ "network.target" ];
serviceConfig = {
Slice = "kubernetes.slice";
ExecStart = ''${top.package}/bin/kube-apiserver \
--allow-privileged=${boolToString cfg.allowPrivileged} \
--authorization-mode=${concatStringsSep "," cfg.authorizationMode} \
${optionalString (elem "ABAC" cfg.authorizationMode)
"--authorization-policy-file=${
pkgs.writeText "kube-auth-policy.jsonl"
(concatMapStringsSep "\n" (l: builtins.toJSON l) cfg.authorizationPolicy)
}"
} \
${optionalString (elem "Webhook" cfg.authorizationMode)
"--authorization-webhook-config-file=${cfg.webhookConfig}"
} \
--bind-address=${cfg.bindAddress} \
${optionalString (cfg.advertiseAddress != null)
"--advertise-address=${cfg.advertiseAddress}"} \
${optionalString (cfg.clientCaFile != null)
"--client-ca-file=${cfg.clientCaFile}"} \
--disable-admission-plugins=${concatStringsSep "," cfg.disableAdmissionPlugins} \
--enable-admission-plugins=${concatStringsSep "," cfg.enableAdmissionPlugins} \
--etcd-servers=${concatStringsSep "," cfg.etcd.servers} \
${optionalString (cfg.etcd.caFile != null)
"--etcd-cafile=${cfg.etcd.caFile}"} \
${optionalString (cfg.etcd.certFile != null)
"--etcd-certfile=${cfg.etcd.certFile}"} \
${optionalString (cfg.etcd.keyFile != null)
"--etcd-keyfile=${cfg.etcd.keyFile}"} \
${optionalString (cfg.featureGates != [])
"--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
${optionalString (cfg.basicAuthFile != null)
"--basic-auth-file=${cfg.basicAuthFile}"} \
--kubelet-https=${boolToString cfg.kubeletHttps} \
${optionalString (cfg.kubeletClientCaFile != null)
"--kubelet-certificate-authority=${cfg.kubeletClientCaFile}"} \
${optionalString (cfg.kubeletClientCertFile != null)
"--kubelet-client-certificate=${cfg.kubeletClientCertFile}"} \
${optionalString (cfg.kubeletClientKeyFile != null)
"--kubelet-client-key=${cfg.kubeletClientKeyFile}"} \
--insecure-bind-address=${cfg.insecureBindAddress} \
--insecure-port=${toString cfg.insecurePort} \
${optionalString (cfg.runtimeConfig != "")
"--runtime-config=${cfg.runtimeConfig}"} \
--secure-port=${toString cfg.securePort} \
${optionalString (cfg.serviceAccountKeyFile!=null)
"--service-account-key-file=${cfg.serviceAccountKeyFile}"} \
--service-cluster-ip-range=${cfg.serviceClusterIpRange} \
--storage-backend=${cfg.storageBackend} \
${optionalString (cfg.tlsCertFile != null)
"--tls-cert-file=${cfg.tlsCertFile}"} \
${optionalString (cfg.tlsKeyFile != null)
"--tls-private-key-file=${cfg.tlsKeyFile}"} \
${optionalString (cfg.tokenAuthFile != null)
"--token-auth-file=${cfg.tokenAuthFile}"} \
${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
${cfg.extraOpts}
'';
WorkingDirectory = top.dataDir;
User = "kubernetes";
Group = "kubernetes";
AmbientCapabilities = "cap_net_bind_service";
Restart = "on-failure";
RestartSec = 5;
};
};
services.etcd = {
clientCertAuth = mkDefault true;
peerClientCertAuth = mkDefault true;
listenClientUrls = mkDefault ["https://0.0.0.0:2379"];
listenPeerUrls = mkDefault ["https://0.0.0.0:2380"];
advertiseClientUrls = mkDefault ["https://${top.masterAddress}:2379"];
initialCluster = mkDefault ["${top.masterAddress}=https://${top.masterAddress}:2380"];
name = mkDefault top.masterAddress;
initialAdvertisePeerUrls = mkDefault ["https://${top.masterAddress}:2380"];
};
services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled {
apiserver-kubelet-api-admin-crb = {
apiVersion = "rbac.authorization.k8s.io/v1";
kind = "ClusterRoleBinding";
metadata = {
name = "system:kube-apiserver:kubelet-api-admin";
};
roleRef = {
apiGroup = "rbac.authorization.k8s.io";
kind = "ClusterRole";
name = "system:kubelet-api-admin";
};
subjects = [{
kind = "User";
name = "system:kube-apiserver";
}];
};
};
services.kubernetes.pki.certs = with top.lib; {
apiServer = mkCert {
name = "kube-apiserver";
CN = "kubernetes";
hosts = [
"kubernetes.default.svc"
"kubernetes.default.svc.${top.addons.dns.clusterDomain}"
cfg.advertiseAddress
top.masterAddress
apiserverServiceIP
"127.0.0.1"
] ++ cfg.extraSANs;
action = "systemctl restart kube-apiserver.service";
};
apiserverKubeletClient = mkCert {
name = "kube-apiserver-kubelet-client";
CN = "system:kube-apiserver";
action = "systemctl restart kube-apiserver.service";
};
apiserverEtcdClient = mkCert {
name = "kube-apiserver-etcd-client";
CN = "etcd-client";
action = "systemctl restart kube-apiserver.service";
};
clusterAdmin = mkCert {
name = "cluster-admin";
CN = "cluster-admin";
fields = {
O = "system:masters";
};
privateKeyOwner = "root";
};
etcd = mkCert {
name = "etcd";
CN = top.masterAddress;
hosts = [
"etcd.local"
"etcd.${top.addons.dns.clusterDomain}"
top.masterAddress
cfg.advertiseAddress
];
privateKeyOwner = "etcd";
action = "systemctl restart etcd.service";
};
};
})
];
}

View File

@ -0,0 +1,162 @@
{ config, lib, pkgs, ... }:
with lib;
let
top = config.services.kubernetes;
cfg = top.controllerManager;
in
{
###### interface
options.services.kubernetes.controllerManager = with lib.types; {
allocateNodeCIDRs = mkOption {
description = "Whether to automatically allocate CIDR ranges for cluster nodes.";
default = true;
type = bool;
};
bindAddress = mkOption {
description = "Kubernetes controller manager listening address.";
default = "127.0.0.1";
type = str;
};
clusterCidr = mkOption {
description = "Kubernetes CIDR Range for Pods in cluster.";
default = top.clusterCidr;
type = str;
};
enable = mkEnableOption "Kubernetes controller manager.";
extraOpts = mkOption {
description = "Kubernetes controller manager extra command line options.";
default = "";
type = str;
};
featureGates = mkOption {
description = "List set of feature gates";
default = top.featureGates;
type = listOf str;
};
insecurePort = mkOption {
description = "Kubernetes controller manager insecure listening port.";
default = 0;
type = int;
};
kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes controller manager";
leaderElect = mkOption {
description = "Whether to start leader election before executing main loop.";
type = bool;
default = true;
};
rootCaFile = mkOption {
description = ''
Kubernetes controller manager certificate authority file included in
service account's token secret.
'';
default = top.caFile;
type = nullOr path;
};
securePort = mkOption {
description = "Kubernetes controller manager secure listening port.";
default = 10252;
type = int;
};
serviceAccountKeyFile = mkOption {
description = ''
Kubernetes controller manager PEM-encoded private RSA key file used to
sign service account tokens
'';
default = null;
type = nullOr path;
};
tlsCertFile = mkOption {
description = "Kubernetes controller-manager certificate file.";
default = null;
type = nullOr path;
};
tlsKeyFile = mkOption {
description = "Kubernetes controller-manager private key file.";
default = null;
type = nullOr path;
};
verbosity = mkOption {
description = ''
Optional glog verbosity level for logging statements. See
<link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
'';
default = null;
type = nullOr int;
};
};
###### implementation
config = mkIf cfg.enable {
systemd.services.kube-controller-manager = {
description = "Kubernetes Controller Manager Service";
wantedBy = [ "kubernetes.target" ];
after = [ "kube-apiserver.service" ];
serviceConfig = {
RestartSec = "30s";
Restart = "on-failure";
Slice = "kubernetes.slice";
ExecStart = ''${top.package}/bin/kube-controller-manager \
--allocate-node-cidrs=${boolToString cfg.allocateNodeCIDRs} \
--bind-address=${cfg.bindAddress} \
${optionalString (cfg.clusterCidr!=null)
"--cluster-cidr=${cfg.clusterCidr}"} \
${optionalString (cfg.featureGates != [])
"--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
--kubeconfig=${top.lib.mkKubeConfig "kube-controller-manager" cfg.kubeconfig} \
--leader-elect=${boolToString cfg.leaderElect} \
${optionalString (cfg.rootCaFile!=null)
"--root-ca-file=${cfg.rootCaFile}"} \
--port=${toString cfg.insecurePort} \
--secure-port=${toString cfg.securePort} \
${optionalString (cfg.serviceAccountKeyFile!=null)
"--service-account-private-key-file=${cfg.serviceAccountKeyFile}"} \
${optionalString (cfg.tlsCertFile!=null)
"--tls-cert-file=${cfg.tlsCertFile}"} \
${optionalString (cfg.tlsKeyFile!=null)
"--tls-private-key-file=${cfg.tlsKeyFile}"} \
${optionalString (elem "RBAC" top.apiserver.authorizationMode)
"--use-service-account-credentials"} \
${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
${cfg.extraOpts}
'';
WorkingDirectory = top.dataDir;
User = "kubernetes";
Group = "kubernetes";
};
path = top.path;
};
services.kubernetes.pki.certs = with top.lib; {
controllerManager = mkCert {
name = "kube-controller-manager";
CN = "kube-controller-manager";
action = "systemctl restart kube-controller-manager.service";
};
controllerManagerClient = mkCert {
name = "kube-controller-manager-client";
CN = "system:kube-controller-manager";
action = "systemctl restart kube-controller-manager.service";
};
};
services.kubernetes.controllerManager.kubeconfig.server = mkDefault top.apiserverAddress;
};
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,134 @@
{ config, lib, pkgs, ... }:
with lib;
let
top = config.services.kubernetes;
cfg = top.flannel;
# we want flannel to use kubernetes itself as configuration backend, not direct etcd
storageBackend = "kubernetes";
# needed for flannel to pass options to docker
mkDockerOpts = pkgs.runCommand "mk-docker-opts" {
buildInputs = [ pkgs.makeWrapper ];
} ''
mkdir -p $out
cp ${pkgs.kubernetes.src}/cluster/centos/node/bin/mk-docker-opts.sh $out/mk-docker-opts.sh
# bashInteractive needed for `compgen`
makeWrapper ${pkgs.bashInteractive}/bin/bash $out/mk-docker-opts --add-flags "$out/mk-docker-opts.sh"
'';
in
{
###### interface
options.services.kubernetes.flannel = {
enable = mkEnableOption "enable flannel networking";
};
###### implementation
config = mkIf cfg.enable {
services.flannel = {
enable = mkDefault true;
network = mkDefault top.clusterCidr;
inherit storageBackend;
nodeName = config.services.kubernetes.kubelet.hostname;
};
services.kubernetes.kubelet = {
networkPlugin = mkDefault "cni";
cni.config = mkDefault [{
name = "mynet";
type = "flannel";
delegate = {
isDefaultGateway = true;
bridge = "docker0";
};
}];
};
systemd.services."mk-docker-opts" = {
description = "Pre-Docker Actions";
path = with pkgs; [ gawk gnugrep ];
script = ''
${mkDockerOpts}/mk-docker-opts -d /run/flannel/docker
systemctl restart docker
'';
serviceConfig.Type = "oneshot";
};
systemd.paths."flannel-subnet-env" = {
wantedBy = [ "flannel.service" ];
pathConfig = {
PathModified = "/run/flannel/subnet.env";
Unit = "mk-docker-opts.service";
};
};
systemd.services.docker = {
environment.DOCKER_OPTS = "-b none";
serviceConfig.EnvironmentFile = "-/run/flannel/docker";
};
# read environment variables generated by mk-docker-opts
virtualisation.docker.extraOptions = "$DOCKER_OPTS";
networking = {
firewall.allowedUDPPorts = [
8285 # flannel udp
8472 # flannel vxlan
];
dhcpcd.denyInterfaces = [ "docker*" "flannel*" ];
};
services.kubernetes.pki.certs = {
flannelClient = top.lib.mkCert {
name = "flannel-client";
CN = "flannel-client";
action = "systemctl restart flannel.service";
};
};
# give flannel som kubernetes rbac permissions if applicable
services.kubernetes.addonManager.bootstrapAddons = mkIf ((storageBackend == "kubernetes") && (elem "RBAC" top.apiserver.authorizationMode)) {
flannel-cr = {
apiVersion = "rbac.authorization.k8s.io/v1beta1";
kind = "ClusterRole";
metadata = { name = "flannel"; };
rules = [{
apiGroups = [ "" ];
resources = [ "pods" ];
verbs = [ "get" ];
}
{
apiGroups = [ "" ];
resources = [ "nodes" ];
verbs = [ "list" "watch" ];
}
{
apiGroups = [ "" ];
resources = [ "nodes/status" ];
verbs = [ "patch" ];
}];
};
flannel-crb = {
apiVersion = "rbac.authorization.k8s.io/v1beta1";
kind = "ClusterRoleBinding";
metadata = { name = "flannel"; };
roleRef = {
apiGroup = "rbac.authorization.k8s.io";
kind = "ClusterRole";
name = "flannel";
};
subjects = [{
kind = "User";
name = "flannel-client";
}];
};
};
};
}

View File

@ -0,0 +1,358 @@
{ config, lib, pkgs, ... }:
with lib;
let
top = config.services.kubernetes;
cfg = top.kubelet;
cniConfig =
if cfg.cni.config != [] && !(isNull cfg.cni.configDir) then
throw "Verbatim CNI-config and CNI configDir cannot both be set."
else if !(isNull cfg.cni.configDir) then
cfg.cni.configDir
else
(pkgs.buildEnv {
name = "kubernetes-cni-config";
paths = imap (i: entry:
pkgs.writeTextDir "${toString (10+i)}-${entry.type}.conf" (builtins.toJSON entry)
) cfg.cni.config;
});
infraContainer = pkgs.dockerTools.buildImage {
name = "pause";
tag = "latest";
contents = top.package.pause;
config.Cmd = "/bin/pause";
};
kubeconfig = top.lib.mkKubeConfig "kubelet" cfg.kubeconfig;
manifests = pkgs.buildEnv {
name = "kubernetes-manifests";
paths = mapAttrsToList (name: manifest:
pkgs.writeTextDir "${name}.json" (builtins.toJSON manifest)
) cfg.manifests;
};
manifestPath = "kubernetes/manifests";
taintOptions = with lib.types; { name, ... }: {
options = {
key = mkOption {
description = "Key of taint.";
default = name;
type = str;
};
value = mkOption {
description = "Value of taint.";
type = str;
};
effect = mkOption {
description = "Effect of taint.";
example = "NoSchedule";
type = enum ["NoSchedule" "PreferNoSchedule" "NoExecute"];
};
};
};
taints = concatMapStringsSep "," (v: "${v.key}=${v.value}:${v.effect}") (mapAttrsToList (n: v: v) cfg.taints);
in
{
###### interface
options.services.kubernetes.kubelet = with lib.types; {
address = mkOption {
description = "Kubernetes kubelet info server listening address.";
default = "0.0.0.0";
type = str;
};
allowPrivileged = mkOption {
description = "Whether to allow Kubernetes containers to request privileged mode.";
default = false;
type = bool;
};
clusterDns = mkOption {
description = "Use alternative DNS.";
default = "10.1.0.1";
type = str;
};
clusterDomain = mkOption {
description = "Use alternative domain.";
default = config.services.kubernetes.addons.dns.clusterDomain;
type = str;
};
clientCaFile = mkOption {
description = "Kubernetes apiserver CA file for client authentication.";
default = top.caFile;
type = nullOr path;
};
cni = {
packages = mkOption {
description = "List of network plugin packages to install.";
type = listOf package;
default = [];
};
config = mkOption {
description = "Kubernetes CNI configuration.";
type = listOf attrs;
default = [];
example = literalExample ''
[{
"cniVersion": "0.2.0",
"name": "mynet",
"type": "bridge",
"bridge": "cni0",
"isGateway": true,
"ipMasq": true,
"ipam": {
"type": "host-local",
"subnet": "10.22.0.0/16",
"routes": [
{ "dst": "0.0.0.0/0" }
]
}
} {
"cniVersion": "0.2.0",
"type": "loopback"
}]
'';
};
configDir = mkOption {
description = "Path to Kubernetes CNI configuration directory.";
type = nullOr path;
default = null;
};
};
enable = mkEnableOption "Kubernetes kubelet.";
extraOpts = mkOption {
description = "Kubernetes kubelet extra command line options.";
default = "";
type = str;
};
featureGates = mkOption {
description = "List set of feature gates";
default = top.featureGates;
type = listOf str;
};
healthz = {
bind = mkOption {
description = "Kubernetes kubelet healthz listening address.";
default = "127.0.0.1";
type = str;
};
port = mkOption {
description = "Kubernetes kubelet healthz port.";
default = 10248;
type = int;
};
};
hostname = mkOption {
description = "Kubernetes kubelet hostname override.";
default = config.networking.hostName;
type = str;
};
kubeconfig = top.lib.mkKubeConfigOptions "Kubelet";
manifests = mkOption {
description = "List of manifests to bootstrap with kubelet (only pods can be created as manifest entry)";
type = attrsOf attrs;
default = {};
};
networkPlugin = mkOption {
description = "Network plugin to use by Kubernetes.";
type = nullOr (enum ["cni" "kubenet"]);
default = "kubenet";
};
nodeIp = mkOption {
description = "IP address of the node. If set, kubelet will use this IP address for the node.";
default = null;
type = nullOr str;
};
registerNode = mkOption {
description = "Whether to auto register kubelet with API server.";
default = true;
type = bool;
};
port = mkOption {
description = "Kubernetes kubelet info server listening port.";
default = 10250;
type = int;
};
seedDockerImages = mkOption {
description = "List of docker images to preload on system";
default = [];
type = listOf package;
};
taints = mkOption {
description = "Node taints (https://kubernetes.io/docs/concepts/configuration/assign-pod-node/).";
default = {};
type = attrsOf (submodule [ taintOptions ]);
};
tlsCertFile = mkOption {
description = "File containing x509 Certificate for HTTPS.";
default = null;
type = nullOr path;
};
tlsKeyFile = mkOption {
description = "File containing x509 private key matching tlsCertFile.";
default = null;
type = nullOr path;
};
unschedulable = mkOption {
description = "Whether to set node taint to unschedulable=true as it is the case of node that has only master role.";
default = false;
type = bool;
};
verbosity = mkOption {
description = ''
Optional glog verbosity level for logging statements. See
<link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
'';
default = null;
type = nullOr int;
};
};
###### implementation
config = mkMerge [
(mkIf cfg.enable {
services.kubernetes.kubelet.seedDockerImages = [infraContainer];
systemd.services.kubelet = {
description = "Kubernetes Kubelet Service";
wantedBy = [ "kubernetes.target" ];
after = [ "network.target" "docker.service" "kube-apiserver.service" ];
path = with pkgs; [ gitMinimal openssh docker utillinux iproute ethtool thin-provisioning-tools iptables socat ] ++ top.path;
preStart = ''
${concatMapStrings (img: ''
echo "Seeding docker image: ${img}"
docker load <${img}
'') cfg.seedDockerImages}
rm /opt/cni/bin/* || true
${concatMapStrings (package: ''
echo "Linking cni package: ${package}"
ln -fs ${package}/bin/* /opt/cni/bin
'') cfg.cni.packages}
'';
serviceConfig = {
Slice = "kubernetes.slice";
CPUAccounting = true;
MemoryAccounting = true;
Restart = "on-failure";
RestartSec = "1000ms";
ExecStart = ''${top.package}/bin/kubelet \
--address=${cfg.address} \
--allow-privileged=${boolToString cfg.allowPrivileged} \
--authentication-token-webhook \
--authentication-token-webhook-cache-ttl="10s" \
--authorization-mode=Webhook \
${optionalString (cfg.clientCaFile != null)
"--client-ca-file=${cfg.clientCaFile}"} \
${optionalString (cfg.clusterDns != "")
"--cluster-dns=${cfg.clusterDns}"} \
${optionalString (cfg.clusterDomain != "")
"--cluster-domain=${cfg.clusterDomain}"} \
--cni-conf-dir=${cniConfig} \
${optionalString (cfg.featureGates != [])
"--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
--hairpin-mode=hairpin-veth \
--healthz-bind-address=${cfg.healthz.bind} \
--healthz-port=${toString cfg.healthz.port} \
--hostname-override=${cfg.hostname} \
--kubeconfig=${kubeconfig} \
${optionalString (cfg.networkPlugin != null)
"--network-plugin=${cfg.networkPlugin}"} \
${optionalString (cfg.nodeIp != null)
"--node-ip=${cfg.nodeIp}"} \
--pod-infra-container-image=pause \
${optionalString (cfg.manifests != {})
"--pod-manifest-path=/etc/${manifestPath}"} \
--port=${toString cfg.port} \
--register-node=${boolToString cfg.registerNode} \
${optionalString (taints != "")
"--register-with-taints=${taints}"} \
--root-dir=${top.dataDir} \
${optionalString (cfg.tlsCertFile != null)
"--tls-cert-file=${cfg.tlsCertFile}"} \
${optionalString (cfg.tlsKeyFile != null)
"--tls-private-key-file=${cfg.tlsKeyFile}"} \
${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
${cfg.extraOpts}
'';
WorkingDirectory = top.dataDir;
};
};
# Allways include cni plugins
services.kubernetes.kubelet.cni.packages = [pkgs.cni-plugins];
boot.kernelModules = ["br_netfilter"];
services.kubernetes.kubelet.hostname = with config.networking;
mkDefault (hostName + optionalString (!isNull domain) ".${domain}");
services.kubernetes.pki.certs = with top.lib; {
kubelet = mkCert {
name = "kubelet";
CN = top.kubelet.hostname;
action = "systemctl restart kubelet.service";
};
kubeletClient = mkCert {
name = "kubelet-client";
CN = "system:node:${top.kubelet.hostname}";
fields = {
O = "system:nodes";
};
action = "systemctl restart kubelet.service";
};
};
services.kubernetes.kubelet.kubeconfig.server = mkDefault top.apiserverAddress;
})
(mkIf (cfg.enable && cfg.manifests != {}) {
environment.etc = mapAttrs' (name: manifest:
nameValuePair "${manifestPath}/${name}.json" {
text = builtins.toJSON manifest;
mode = "0755";
}
) cfg.manifests;
})
(mkIf (cfg.unschedulable && cfg.enable) {
services.kubernetes.kubelet.taints.unschedulable = {
value = "true";
effect = "NoSchedule";
};
})
];
}

View File

@ -0,0 +1,388 @@
{ config, lib, pkgs, ... }:
with lib;
let
top = config.services.kubernetes;
cfg = top.pki;
csrCA = pkgs.writeText "kube-pki-cacert-csr.json" (builtins.toJSON {
key = {
algo = "rsa";
size = 2048;
};
names = singleton cfg.caSpec;
});
csrCfssl = pkgs.writeText "kube-pki-cfssl-csr.json" (builtins.toJSON {
key = {
algo = "rsa";
size = 2048;
};
CN = top.masterAddress;
});
cfsslAPITokenBaseName = "apitoken.secret";
cfsslAPITokenPath = "${config.services.cfssl.dataDir}/${cfsslAPITokenBaseName}";
certmgrAPITokenPath = "${top.secretsPath}/${cfsslAPITokenBaseName}";
cfsslAPITokenLength = 32;
clusterAdminKubeconfig = with cfg.certs.clusterAdmin;
top.lib.mkKubeConfig "cluster-admin" {
server = top.apiserverAddress;
certFile = cert;
keyFile = key;
};
remote = with config.services; "https://${kubernetes.masterAddress}:${toString cfssl.port}";
in
{
###### interface
options.services.kubernetes.pki = with lib.types; {
enable = mkEnableOption "Whether to enable easyCert issuer service.";
certs = mkOption {
description = "List of certificate specs to feed to cert generator.";
default = {};
type = attrs;
};
genCfsslCACert = mkOption {
description = ''
Whether to automatically generate cfssl CA certificate and key,
if they don't exist.
'';
default = true;
type = bool;
};
genCfsslAPICerts = mkOption {
description = ''
Whether to automatically generate cfssl API webserver TLS cert and key,
if they don't exist.
'';
default = true;
type = bool;
};
genCfsslAPIToken = mkOption {
description = ''
Whether to automatically generate cfssl API-token secret,
if they doesn't exist.
'';
default = true;
type = bool;
};
pkiTrustOnBootstrap = mkOption {
description = "Whether to always trust remote cfssl server upon initial PKI bootstrap.";
default = true;
type = bool;
};
caCertPathPrefix = mkOption {
description = ''
Path-prefrix for the CA-certificate to be used for cfssl signing.
Suffixes ".pem" and "-key.pem" will be automatically appended for
the public and private keys respectively.
'';
default = "${config.services.cfssl.dataDir}/ca";
type = str;
};
caSpec = mkOption {
description = "Certificate specification for the auto-generated CAcert.";
default = {
CN = "kubernetes-cluster-ca";
O = "NixOS";
OU = "services.kubernetes.pki.caSpec";
L = "auto-generated";
};
type = attrs;
};
etcClusterAdminKubeconfig = mkOption {
description = ''
Symlink a kubeconfig with cluster-admin privileges to environment path
(/etc/&lt;path&gt;).
'';
default = null;
type = nullOr str;
};
};
###### implementation
config = mkIf cfg.enable
(let
cfsslCertPathPrefix = "${config.services.cfssl.dataDir}/cfssl";
cfsslCert = "${cfsslCertPathPrefix}.pem";
cfsslKey = "${cfsslCertPathPrefix}-key.pem";
in
{
services.cfssl = mkIf (top.apiserver.enable) {
enable = true;
address = "0.0.0.0";
tlsCert = cfsslCert;
tlsKey = cfsslKey;
configFile = toString (pkgs.writeText "cfssl-config.json" (builtins.toJSON {
signing = {
profiles = {
default = {
usages = ["digital signature"];
auth_key = "default";
expiry = "720h";
};
};
};
auth_keys = {
default = {
type = "standard";
key = "file:${cfsslAPITokenPath}";
};
};
}));
};
systemd.services.cfssl.preStart = with pkgs; with config.services.cfssl; mkIf (top.apiserver.enable)
(concatStringsSep "\n" [
"set -e"
(optionalString cfg.genCfsslCACert ''
if [ ! -f "${cfg.caCertPathPrefix}.pem" ]; then
${cfssl}/bin/cfssl genkey -initca ${csrCA} | \
${cfssl}/bin/cfssljson -bare ${cfg.caCertPathPrefix}
fi
'')
(optionalString cfg.genCfsslAPICerts ''
if [ ! -f "${dataDir}/cfssl.pem" ]; then
${cfssl}/bin/cfssl gencert -ca "${cfg.caCertPathPrefix}.pem" -ca-key "${cfg.caCertPathPrefix}-key.pem" ${csrCfssl} | \
${cfssl}/bin/cfssljson -bare ${cfsslCertPathPrefix}
fi
'')
(optionalString cfg.genCfsslAPIToken ''
if [ ! -f "${cfsslAPITokenPath}" ]; then
head -c ${toString (cfsslAPITokenLength / 2)} /dev/urandom | od -An -t x | tr -d ' ' >"${cfsslAPITokenPath}"
fi
chown cfssl "${cfsslAPITokenPath}" && chmod 400 "${cfsslAPITokenPath}"
'')]);
systemd.services.kube-certmgr-bootstrap = {
description = "Kubernetes certmgr bootstrapper";
wantedBy = [ "certmgr.service" ];
after = [ "cfssl.target" ];
script = concatStringsSep "\n" [''
set -e
# If there's a cfssl (cert issuer) running locally, then don't rely on user to
# manually paste it in place. Just symlink.
# otherwise, create the target file, ready for users to insert the token
if [ -f "${cfsslAPITokenPath}" ]; then
ln -fs "${cfsslAPITokenPath}" "${certmgrAPITokenPath}"
else
touch "${certmgrAPITokenPath}" && chmod 600 "${certmgrAPITokenPath}"
fi
''
(optionalString (cfg.pkiTrustOnBootstrap) ''
if [ ! -f "${top.caFile}" ] || [ $(cat "${top.caFile}" | wc -c) -lt 1 ]; then
${pkgs.curl}/bin/curl --fail-early -f -kd '{}' ${remote}/api/v1/cfssl/info | \
${pkgs.cfssl}/bin/cfssljson -stdout >${top.caFile}
fi
'')
];
serviceConfig = {
RestartSec = "10s";
Restart = "on-failure";
};
};
services.certmgr = {
enable = true;
package = pkgs.certmgr-selfsigned;
svcManager = "command";
specs =
let
mkSpec = _: cert: {
inherit (cert) action;
authority = {
inherit remote;
file.path = cert.caCert;
root_ca = cert.caCert;
profile = "default";
auth_key_file = certmgrAPITokenPath;
};
certificate = {
path = cert.cert;
};
private_key = cert.privateKeyOptions;
request = {
inherit (cert) CN hosts;
key = {
algo = "rsa";
size = 2048;
};
names = [ cert.fields ];
};
};
in
mapAttrs mkSpec cfg.certs;
};
#TODO: Get rid of kube-addon-manager in the future for the following reasons
# - it is basically just a shell script wrapped around kubectl
# - it assumes that it is clusterAdmin or can gain clusterAdmin rights through serviceAccount
# - it is designed to be used with k8s system components only
# - it would be better with a more Nix-oriented way of managing addons
systemd.services.kube-addon-manager = mkIf top.addonManager.enable (mkMerge [{
environment.KUBECONFIG = with cfg.certs.addonManager;
top.lib.mkKubeConfig "addon-manager" {
server = top.apiserverAddress;
certFile = cert;
keyFile = key;
};
}
(optionalAttrs (top.addonManager.bootstrapAddons != {}) {
serviceConfig.PermissionsStartOnly = true;
preStart = with pkgs;
let
files = mapAttrsToList (n: v: writeText "${n}.json" (builtins.toJSON v))
top.addonManager.bootstrapAddons;
in
''
export KUBECONFIG=${clusterAdminKubeconfig}
${kubectl}/bin/kubectl apply -f ${concatStringsSep " \\\n -f " files}
'';
})]);
environment.etc.${cfg.etcClusterAdminKubeconfig}.source = mkIf (!isNull cfg.etcClusterAdminKubeconfig)
clusterAdminKubeconfig;
environment.systemPackages = mkIf (top.kubelet.enable || top.proxy.enable) [
(pkgs.writeScriptBin "nixos-kubernetes-node-join" ''
set -e
exec 1>&2
if [ $# -gt 0 ]; then
echo "Usage: $(basename $0)"
echo ""
echo "No args. Apitoken must be provided on stdin."
echo "To get the apitoken, execute: 'sudo cat ${certmgrAPITokenPath}' on the master node."
exit 1
fi
if [ $(id -u) != 0 ]; then
echo "Run as root please."
exit 1
fi
read -r token
if [ ''${#token} != ${toString cfsslAPITokenLength} ]; then
echo "Token must be of length ${toString cfsslAPITokenLength}."
exit 1
fi
echo $token > ${certmgrAPITokenPath}
chmod 600 ${certmgrAPITokenPath}
echo "Restarting certmgr..." >&1
systemctl restart certmgr
echo "Waiting for certs to appear..." >&1
${optionalString top.kubelet.enable ''
while [ ! -f ${cfg.certs.kubelet.cert} ]; do sleep 1; done
echo "Restarting kubelet..." >&1
systemctl restart kubelet
''}
${optionalString top.proxy.enable ''
while [ ! -f ${cfg.certs.kubeProxyClient.cert} ]; do sleep 1; done
echo "Restarting kube-proxy..." >&1
systemctl restart kube-proxy
''}
${optionalString top.flannel.enable ''
while [ ! -f ${cfg.certs.flannelClient.cert} ]; do sleep 1; done
echo "Restarting flannel..." >&1
systemctl restart flannel
''}
echo "Node joined succesfully"
'')];
# isolate etcd on loopback at the master node
# easyCerts doesn't support multimaster clusters anyway atm.
services.etcd = with cfg.certs.etcd; {
listenClientUrls = ["https://127.0.0.1:2379"];
listenPeerUrls = ["https://127.0.0.1:2380"];
advertiseClientUrls = ["https://etcd.local:2379"];
initialCluster = ["${top.masterAddress}=https://etcd.local:2380"];
initialAdvertisePeerUrls = ["https://etcd.local:2380"];
certFile = mkDefault cert;
keyFile = mkDefault key;
trustedCaFile = mkDefault caCert;
};
networking.extraHosts = mkIf (config.services.etcd.enable) ''
127.0.0.1 etcd.${top.addons.dns.clusterDomain} etcd.local
'';
services.flannel = with cfg.certs.flannelClient; {
kubeconfig = top.lib.mkKubeConfig "flannel" {
server = top.apiserverAddress;
certFile = cert;
keyFile = key;
};
};
services.kubernetes = {
apiserver = mkIf top.apiserver.enable (with cfg.certs.apiServer; {
etcd = with cfg.certs.apiserverEtcdClient; {
servers = ["https://etcd.local:2379"];
certFile = mkDefault cert;
keyFile = mkDefault key;
caFile = mkDefault caCert;
};
clientCaFile = mkDefault caCert;
tlsCertFile = mkDefault cert;
tlsKeyFile = mkDefault key;
serviceAccountKeyFile = mkDefault cfg.certs.serviceAccount.cert;
kubeletClientCaFile = mkDefault caCert;
kubeletClientCertFile = mkDefault cfg.certs.apiserverKubeletClient.cert;
kubeletClientKeyFile = mkDefault cfg.certs.apiserverKubeletClient.key;
});
controllerManager = mkIf top.controllerManager.enable {
serviceAccountKeyFile = mkDefault cfg.certs.serviceAccount.key;
rootCaFile = cfg.certs.controllerManagerClient.caCert;
kubeconfig = with cfg.certs.controllerManagerClient; {
certFile = mkDefault cert;
keyFile = mkDefault key;
};
};
scheduler = mkIf top.scheduler.enable {
kubeconfig = with cfg.certs.schedulerClient; {
certFile = mkDefault cert;
keyFile = mkDefault key;
};
};
kubelet = mkIf top.kubelet.enable {
clientCaFile = mkDefault cfg.certs.kubelet.caCert;
tlsCertFile = mkDefault cfg.certs.kubelet.cert;
tlsKeyFile = mkDefault cfg.certs.kubelet.key;
kubeconfig = with cfg.certs.kubeletClient; {
certFile = mkDefault cert;
keyFile = mkDefault key;
};
};
proxy = mkIf top.proxy.enable {
kubeconfig = with cfg.certs.kubeProxyClient; {
certFile = mkDefault cert;
keyFile = mkDefault key;
};
};
};
});
}

View File

@ -0,0 +1,82 @@
{ config, lib, pkgs, ... }:
with lib;
let
top = config.services.kubernetes;
cfg = top.proxy;
in
{
###### interface
options.services.kubernetes.proxy = with lib.types; {
bindAddress = mkOption {
description = "Kubernetes proxy listening address.";
default = "0.0.0.0";
type = str;
};
enable = mkEnableOption "Whether to enable Kubernetes proxy.";
extraOpts = mkOption {
description = "Kubernetes proxy extra command line options.";
default = "";
type = str;
};
featureGates = mkOption {
description = "List set of feature gates";
default = top.featureGates;
type = listOf str;
};
kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes proxy";
verbosity = mkOption {
description = ''
Optional glog verbosity level for logging statements. See
<link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
'';
default = null;
type = nullOr int;
};
};
###### implementation
config = mkIf cfg.enable {
systemd.services.kube-proxy = {
description = "Kubernetes Proxy Service";
wantedBy = [ "kubernetes.target" ];
after = [ "kube-apiserver.service" ];
path = with pkgs; [ iptables conntrack_tools ];
serviceConfig = {
Slice = "kubernetes.slice";
ExecStart = ''${top.package}/bin/kube-proxy \
--bind-address=${cfg.bindAddress} \
${optionalString (top.clusterCidr!=null)
"--cluster-cidr=${top.clusterCidr}"} \
${optionalString (cfg.featureGates != [])
"--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
--kubeconfig=${top.lib.mkKubeConfig "kube-proxy" cfg.kubeconfig} \
${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
${cfg.extraOpts}
'';
WorkingDirectory = top.dataDir;
Restart = "on-failure";
RestartSec = 5;
};
};
services.kubernetes.pki.certs = {
kubeProxyClient = top.lib.mkCert {
name = "kube-proxy-client";
CN = "system:kube-proxy";
action = "systemctl restart kube-proxy.service";
};
};
services.kubernetes.proxy.kubeconfig.server = mkDefault top.apiserverAddress;
};
}

View File

@ -0,0 +1,94 @@
{ config, lib, pkgs, ... }:
with lib;
let
top = config.services.kubernetes;
cfg = top.scheduler;
in
{
###### interface
options.services.kubernetes.scheduler = with lib.types; {
address = mkOption {
description = "Kubernetes scheduler listening address.";
default = "127.0.0.1";
type = str;
};
enable = mkEnableOption "Whether to enable Kubernetes scheduler.";
extraOpts = mkOption {
description = "Kubernetes scheduler extra command line options.";
default = "";
type = str;
};
featureGates = mkOption {
description = "List set of feature gates";
default = top.featureGates;
type = listOf str;
};
kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes scheduler";
leaderElect = mkOption {
description = "Whether to start leader election before executing main loop.";
type = bool;
default = true;
};
port = mkOption {
description = "Kubernetes scheduler listening port.";
default = 10251;
type = int;
};
verbosity = mkOption {
description = ''
Optional glog verbosity level for logging statements. See
<link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
'';
default = null;
type = nullOr int;
};
};
###### implementation
config = mkIf cfg.enable {
systemd.services.kube-scheduler = {
description = "Kubernetes Scheduler Service";
wantedBy = [ "kubernetes.target" ];
after = [ "kube-apiserver.service" ];
serviceConfig = {
Slice = "kubernetes.slice";
ExecStart = ''${top.package}/bin/kube-scheduler \
--address=${cfg.address} \
${optionalString (cfg.featureGates != [])
"--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
--kubeconfig=${top.lib.mkKubeConfig "kube-scheduler" cfg.kubeconfig} \
--leader-elect=${boolToString cfg.leaderElect} \
--port=${toString cfg.port} \
${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
${cfg.extraOpts}
'';
WorkingDirectory = top.dataDir;
User = "kubernetes";
Group = "kubernetes";
Restart = "on-failure";
RestartSec = 5;
};
};
services.kubernetes.pki.certs = {
schedulerClient = top.lib.mkCert {
name = "kube-scheduler-client";
CN = "system:kube-scheduler";
action = "systemctl restart kube-scheduler.service";
};
};
services.kubernetes.scheduler.kubeconfig.server = mkDefault top.apiserverAddress;
};
}

View File

@ -24,7 +24,7 @@ let
EOF EOF
chmod 755 $out/${name} chmod 755 $out/${name}
''; '';
in pkgs.runCommand "buildkite-agent-hooks" {} '' in pkgs.runCommand "buildkite-agent-hooks" { preferLocalBuild = true; } ''
mkdir $out mkdir $out
${concatStringsSep "\n" (mapAttrsToList mkHookEntry (filterAttrs (n: v: v != null) cfg.hooks))} ${concatStringsSep "\n" (mapAttrsToList mkHookEntry (filterAttrs (n: v: v != null) cfg.hooks))}
''; '';

View File

@ -8,6 +8,7 @@ let
if (cfg.configFile == null) then if (cfg.configFile == null) then
(pkgs.runCommand "config.toml" { (pkgs.runCommand "config.toml" {
buildInputs = [ pkgs.remarshal ]; buildInputs = [ pkgs.remarshal ];
preferLocalBuild = true;
} '' } ''
remarshal -if json -of toml \ remarshal -if json -of toml \
< ${pkgs.writeText "config.json" (builtins.toJSON cfg.configOptions)} \ < ${pkgs.writeText "config.json" (builtins.toJSON cfg.configOptions)} \

View File

@ -18,7 +18,7 @@ let
</configuration> </configuration>
''; '';
configDir = pkgs.runCommand "hbase-config-dir" {} '' configDir = pkgs.runCommand "hbase-config-dir" { preferLocalBuild = true; } ''
mkdir -p $out mkdir -p $out
cp ${cfg.package}/conf/* $out/ cp ${cfg.package}/conf/* $out/
rm $out/hbase-site.xml rm $out/hbase-site.xml

View File

@ -98,6 +98,7 @@ let
configFile = pkgs.runCommand "config.toml" { configFile = pkgs.runCommand "config.toml" {
buildInputs = [ pkgs.remarshal ]; buildInputs = [ pkgs.remarshal ];
preferLocalBuild = true;
} '' } ''
remarshal -if json -of toml \ remarshal -if json -of toml \
< ${pkgs.writeText "config.json" (builtins.toJSON configOptions)} \ < ${pkgs.writeText "config.json" (builtins.toJSON configOptions)} \

View File

@ -146,7 +146,7 @@ in
chown -R "${cfg.user}:${cfg.group}" "${cfg.dataDir}" chown -R "${cfg.user}:${cfg.group}" "${cfg.dataDir}"
''; '';
serviceConfig.ExecStart = serviceConfig.ExecStart =
"${openldap.out}/libexec/slapd -d ${cfg.logLevel} " + "${openldap.out}/libexec/slapd -d '${cfg.logLevel}' " +
"-u '${cfg.user}' -g '${cfg.group}' " + "-u '${cfg.user}' -g '${cfg.group}' " +
"-h '${concatStringsSep " " cfg.urlList}' " + "-h '${concatStringsSep " " cfg.urlList}' " +
"${configOpts}"; "${configOpts}";

Some files were not shown because too many files have changed in this diff Show More