Merge remote-tracking branch 'upstream/master' into override-unstable-nix
This commit is contained in:
commit
4dfe9f9eb8
|
@ -58,11 +58,11 @@
|
|||
/doc/languages-frameworks/python.section.md @FRidh
|
||||
|
||||
# Haskell
|
||||
/pkgs/development/compilers/ghc @peti @ryantm @basvandijk
|
||||
/pkgs/development/haskell-modules @peti @ryantm @basvandijk
|
||||
/pkgs/development/haskell-modules/default.nix @peti @ryantm @basvandijk
|
||||
/pkgs/development/haskell-modules/generic-builder.nix @peti @ryantm @basvandijk
|
||||
/pkgs/development/haskell-modules/hoogle.nix @peti @ryantm @basvandijk
|
||||
/pkgs/development/compilers/ghc @peti @basvandijk
|
||||
/pkgs/development/haskell-modules @peti @basvandijk
|
||||
/pkgs/development/haskell-modules/default.nix @peti @basvandijk
|
||||
/pkgs/development/haskell-modules/generic-builder.nix @peti @basvandijk
|
||||
/pkgs/development/haskell-modules/hoogle.nix @peti @basvandijk
|
||||
|
||||
# Perl
|
||||
/pkgs/development/interpreters/perl @volth
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
*.chapter.xml
|
||||
*.section.xml
|
||||
.version
|
||||
out
|
||||
manual-full.xml
|
||||
highlightjs
|
||||
functions/library/generated
|
||||
functions/library/locations.xml
|
||||
highlightjs
|
||||
manual-full.xml
|
||||
out
|
||||
|
|
|
@ -197,20 +197,14 @@ args.stdenv.mkDerivation (args // {
|
|||
<title>Package naming</title>
|
||||
|
||||
<para>
|
||||
The key words
|
||||
<emphasis>must</emphasis>,
|
||||
<emphasis>must not</emphasis>,
|
||||
<emphasis>required</emphasis>,
|
||||
<emphasis>shall</emphasis>,
|
||||
<emphasis>shall not</emphasis>,
|
||||
<emphasis>should</emphasis>,
|
||||
<emphasis>should not</emphasis>,
|
||||
<emphasis>recommended</emphasis>,
|
||||
<emphasis>may</emphasis>,
|
||||
and <emphasis>optional</emphasis> in this section
|
||||
are to be interpreted as described in
|
||||
<link xlink:href="https://tools.ietf.org/html/rfc2119">RFC 2119</link>.
|
||||
Only <emphasis>emphasized</emphasis> words are to be interpreted in this way.
|
||||
The key words <emphasis>must</emphasis>, <emphasis>must not</emphasis>,
|
||||
<emphasis>required</emphasis>, <emphasis>shall</emphasis>, <emphasis>shall
|
||||
not</emphasis>, <emphasis>should</emphasis>, <emphasis>should
|
||||
not</emphasis>, <emphasis>recommended</emphasis>, <emphasis>may</emphasis>,
|
||||
and <emphasis>optional</emphasis> in this section are to be interpreted as
|
||||
described in <link xlink:href="https://tools.ietf.org/html/rfc2119">RFC
|
||||
2119</link>. Only <emphasis>emphasized</emphasis> words are to be
|
||||
interpreted in this way.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
|
@ -253,15 +247,15 @@ args.stdenv.mkDerivation (args // {
|
|||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>name</literal> attribute <emphasis>should</emphasis>
|
||||
be identical to the upstream package name.
|
||||
The <literal>name</literal> attribute <emphasis>should</emphasis> be
|
||||
identical to the upstream package name.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>name</literal> attribute <emphasis>must not</emphasis>
|
||||
contain uppercase letters — e.g., <literal>"mplayer-1.0rc2"</literal>
|
||||
instead of <literal>"MPlayer-1.0rc2"</literal>.
|
||||
The <literal>name</literal> attribute <emphasis>must not</emphasis>
|
||||
contain uppercase letters — e.g., <literal>"mplayer-1.0rc2"</literal>
|
||||
instead of <literal>"MPlayer-1.0rc2"</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
|
@ -275,28 +269,29 @@ args.stdenv.mkDerivation (args // {
|
|||
<para>
|
||||
If a package is not a release but a commit from a repository, then the
|
||||
version part of the name <emphasis>must</emphasis> be the date of that
|
||||
(fetched) commit. The date <emphasis>must</emphasis> be in <literal>"YYYY-MM-DD"</literal>
|
||||
format. Also append <literal>"unstable"</literal> to the name - e.g.,
|
||||
(fetched) commit. The date <emphasis>must</emphasis> be in
|
||||
<literal>"YYYY-MM-DD"</literal> format. Also append
|
||||
<literal>"unstable"</literal> to the name - e.g.,
|
||||
<literal>"pkgname-unstable-2014-09-23"</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Dashes in the package name <emphasis>should</emphasis> be preserved in new variable names,
|
||||
rather than converted to underscores or camel cased — e.g.,
|
||||
<varname>http-parser</varname> instead of <varname>http_parser</varname>
|
||||
or <varname>httpParser</varname>. The hyphenated style is preferred in
|
||||
all three package names.
|
||||
Dashes in the package name <emphasis>should</emphasis> be preserved in
|
||||
new variable names, rather than converted to underscores or camel cased
|
||||
— e.g., <varname>http-parser</varname> instead of
|
||||
<varname>http_parser</varname> or <varname>httpParser</varname>. The
|
||||
hyphenated style is preferred in all three package names.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
If there are multiple versions of a package, this <emphasis>should</emphasis> be reflected in
|
||||
the variable names in <filename>all-packages.nix</filename>, e.g.
|
||||
<varname>json-c-0-9</varname> and <varname>json-c-0-11</varname>. If
|
||||
there is an obvious “default” version, make an attribute like
|
||||
<literal>json-c = json-c-0-9;</literal>. See also
|
||||
<xref linkend="sec-versioning" />
|
||||
If there are multiple versions of a package, this
|
||||
<emphasis>should</emphasis> be reflected in the variable names in
|
||||
<filename>all-packages.nix</filename>, e.g. <varname>json-c-0-9</varname>
|
||||
and <varname>json-c-0-11</varname>. If there is an obvious “default”
|
||||
version, make an attribute like <literal>json-c = json-c-0-9;</literal>.
|
||||
See also <xref linkend="sec-versioning" />
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
|
@ -814,8 +809,8 @@ args.stdenv.mkDerivation (args // {
|
|||
|
||||
<para>
|
||||
There are multiple ways to fetch a package source in nixpkgs. The general
|
||||
guideline is that you should package reproducible sources with a high degree of
|
||||
availability. Right now there is only one fetcher which has mirroring
|
||||
guideline is that you should package reproducible sources with a high degree
|
||||
of availability. Right now there is only one fetcher which has mirroring
|
||||
support and that is <literal>fetchurl</literal>. Note that you should also
|
||||
prefer protocols which have a corresponding proxy environment variable.
|
||||
</para>
|
||||
|
@ -869,8 +864,10 @@ src = fetchFromGitHub {
|
|||
}
|
||||
</programlisting>
|
||||
Find the value to put as <literal>sha256</literal> by running
|
||||
<literal>nix run -f '<nixpkgs>' nix-prefetch-github -c nix-prefetch-github --rev 1f795f9f44607cc5bec70d1300150bfefcef2aae NixOS nix</literal>
|
||||
or <literal>nix-prefetch-url --unpack https://github.com/NixOS/nix/archive/1f795f9f44607cc5bec70d1300150bfefcef2aae.tar.gz</literal>.
|
||||
<literal>nix run -f '<nixpkgs>' nix-prefetch-github -c
|
||||
nix-prefetch-github --rev 1f795f9f44607cc5bec70d1300150bfefcef2aae NixOS
|
||||
nix</literal> or <literal>nix-prefetch-url --unpack
|
||||
https://github.com/NixOS/nix/archive/1f795f9f44607cc5bec70d1300150bfefcef2aae.tar.gz</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
|
@ -953,17 +950,23 @@ $ nix-hash --type sha256 --to-base32 <replaceable>HASH</replaceable>
|
|||
would be replace hash with a fake one and rebuild. Nix build will fail and
|
||||
error message will contain desired hash.
|
||||
</para>
|
||||
<warning><para>This method has security problems. Check below for details.</para></warning>
|
||||
<warning>
|
||||
<para>
|
||||
This method has security problems. Check below for details.
|
||||
</para>
|
||||
</warning>
|
||||
</listitem>
|
||||
</orderedlist>
|
||||
|
||||
<section xml:id="sec-source-hashes-security">
|
||||
<title>Obtaining hashes securely</title>
|
||||
|
||||
<para>
|
||||
Let's say Man-in-the-Middle (MITM) sits close to your network. Then instead of fetching
|
||||
source you can fetch malware, and instead of source hash you get hash of malware. Here are
|
||||
security considerations for this scenario:
|
||||
Let's say Man-in-the-Middle (MITM) sits close to your network. Then instead
|
||||
of fetching source you can fetch malware, and instead of source hash you
|
||||
get hash of malware. Here are security considerations for this scenario:
|
||||
</para>
|
||||
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
|
@ -972,7 +975,8 @@ $ nix-hash --type sha256 --to-base32 <replaceable>HASH</replaceable>
|
|||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
hashes from upstream (in method 3) should be obtained via secure protocol;
|
||||
hashes from upstream (in method 3) should be obtained via secure
|
||||
protocol;
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
|
@ -982,12 +986,12 @@ $ nix-hash --type sha256 --to-base32 <replaceable>HASH</replaceable>
|
|||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>https://</literal> URLs are not secure in method 5. When obtaining hashes
|
||||
with fake hash method, TLS checks are disabled. So
|
||||
refetch source hash from several different networks to exclude MITM scenario.
|
||||
Alternatively, use fake hash method to make Nix error, but instead of extracting
|
||||
hash from error, extract <literal>https://</literal> URL and prefetch it
|
||||
with method 1.
|
||||
<literal>https://</literal> URLs are not secure in method 5. When
|
||||
obtaining hashes with fake hash method, TLS checks are disabled. So
|
||||
refetch source hash from several different networks to exclude MITM
|
||||
scenario. Alternatively, use fake hash method to make Nix error, but
|
||||
instead of extracting hash from error, extract
|
||||
<literal>https://</literal> URL and prefetch it with method 1.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
|
|
|
@ -132,13 +132,13 @@
|
|||
</itemizedlist>
|
||||
|
||||
<para>
|
||||
The difference between a package being unsupported on some system and
|
||||
being broken is admittedly a bit fuzzy. If a program
|
||||
<emphasis>ought</emphasis> to work on a certain platform, but doesn't, the
|
||||
platform should be included in <literal>meta.platforms</literal>, but marked
|
||||
as broken with e.g. <literal>meta.broken =
|
||||
!hostPlatform.isWindows</literal>. Of course, this begs the question of what
|
||||
"ought" means exactly. That is left to the package maintainer.
|
||||
The difference between a package being unsupported on some system and being
|
||||
broken is admittedly a bit fuzzy. If a program <emphasis>ought</emphasis> to
|
||||
work on a certain platform, but doesn't, the platform should be included in
|
||||
<literal>meta.platforms</literal>, but marked as broken with e.g.
|
||||
<literal>meta.broken = !hostPlatform.isWindows</literal>. Of course, this
|
||||
begs the question of what "ought" means exactly. That is left to the package
|
||||
maintainer.
|
||||
</para>
|
||||
</section>
|
||||
<section xml:id="sec-allow-unfree">
|
||||
|
@ -175,9 +175,8 @@
|
|||
</programlisting>
|
||||
</para>
|
||||
<para>
|
||||
For a more useful example, try the following. This configuration
|
||||
only allows unfree packages named flash player and visual studio
|
||||
code:
|
||||
For a more useful example, try the following. This configuration only
|
||||
allows unfree packages named flash player and visual studio code:
|
||||
<programlisting>
|
||||
{
|
||||
allowUnfreePredicate = (pkg: builtins.elem
|
||||
|
|
|
@ -6,17 +6,17 @@
|
|||
<title>Introduction</title>
|
||||
|
||||
<para>
|
||||
"Cross-compilation" means compiling a program on one machine for another type
|
||||
of machine. For example, a typical use of cross-compilation is to compile
|
||||
programs for embedded devices. These devices often don't have the computing
|
||||
power and memory to compile their own programs. One might think that
|
||||
cross-compilation is a fairly niche concern. However, there are significant
|
||||
advantages to rigorously distinguishing between build-time and run-time
|
||||
environments! This applies even when one is developing and deploying on the
|
||||
same machine. Nixpkgs is increasingly adopting the opinion that packages
|
||||
should be written with cross-compilation in mind, and nixpkgs should evaluate
|
||||
in a similar way (by minimizing cross-compilation-specific special cases)
|
||||
whether or not one is cross-compiling.
|
||||
"Cross-compilation" means compiling a program on one machine for another
|
||||
type of machine. For example, a typical use of cross-compilation is to
|
||||
compile programs for embedded devices. These devices often don't have the
|
||||
computing power and memory to compile their own programs. One might think
|
||||
that cross-compilation is a fairly niche concern. However, there are
|
||||
significant advantages to rigorously distinguishing between build-time and
|
||||
run-time environments! This applies even when one is developing and
|
||||
deploying on the same machine. Nixpkgs is increasingly adopting the opinion
|
||||
that packages should be written with cross-compilation in mind, and nixpkgs
|
||||
should evaluate in a similar way (by minimizing cross-compilation-specific
|
||||
special cases) whether or not one is cross-compiling.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
|
@ -34,15 +34,16 @@
|
|||
<title>Platform parameters</title>
|
||||
|
||||
<para>
|
||||
Nixpkgs follows the <link
|
||||
Nixpkgs follows the
|
||||
<link
|
||||
xlink:href="https://gcc.gnu.org/onlinedocs/gccint/Configure-Terms.html">conventions
|
||||
of GNU autoconf</link>. We distinguish between 3 types of platforms when
|
||||
building a derivation: <wordasword>build</wordasword>,
|
||||
<wordasword>host</wordasword>, and <wordasword>target</wordasword>. In
|
||||
summary, <wordasword>build</wordasword> is the platform on which a package
|
||||
is being built, <wordasword>host</wordasword> is the platform on which it
|
||||
will run. The third attribute, <wordasword>target</wordasword>, is relevant
|
||||
only for certain specific compilers and build tools.
|
||||
of GNU autoconf</link>. We distinguish between 3 types of platforms when
|
||||
building a derivation: <wordasword>build</wordasword>,
|
||||
<wordasword>host</wordasword>, and <wordasword>target</wordasword>. In
|
||||
summary, <wordasword>build</wordasword> is the platform on which a package
|
||||
is being built, <wordasword>host</wordasword> is the platform on which it
|
||||
will run. The third attribute, <wordasword>target</wordasword>, is relevant
|
||||
only for certain specific compilers and build tools.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
|
@ -95,10 +96,10 @@
|
|||
The build process of certain compilers is written in such a way that the
|
||||
compiler resulting from a single build can itself only produce binaries
|
||||
for a single platform. The task of specifying this single "target
|
||||
platform" is thus pushed to build time of the compiler. The root cause of
|
||||
this that the compiler (which will be run on the host) and the standard
|
||||
library/runtime (which will be run on the target) are built by a single
|
||||
build process.
|
||||
platform" is thus pushed to build time of the compiler. The root cause
|
||||
of this is that the compiler (which will be run on the host) and the
|
||||
standard library/runtime (which will be run on the target) are built by
|
||||
a single build process.
|
||||
</para>
|
||||
<para>
|
||||
There is no fundamental need to think about a single target ahead of
|
||||
|
@ -136,9 +137,9 @@
|
|||
This is a two-component shorthand for the platform. Examples of this
|
||||
would be "x86_64-darwin" and "i686-linux"; see
|
||||
<literal>lib.systems.doubles</literal> for more. The first component
|
||||
corresponds to the CPU architecture of the platform and the second to the
|
||||
operating system of the platform (<literal>[cpu]-[os]</literal>). This
|
||||
format has built-in support in Nix, such as the
|
||||
corresponds to the CPU architecture of the platform and the second to
|
||||
the operating system of the platform (<literal>[cpu]-[os]</literal>).
|
||||
This format has built-in support in Nix, such as the
|
||||
<varname>builtins.currentSystem</varname> impure string.
|
||||
</para>
|
||||
</listitem>
|
||||
|
@ -149,14 +150,14 @@
|
|||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This is a 3- or 4- component shorthand for the platform. Examples of this
|
||||
would be <literal>x86_64-unknown-linux-gnu</literal> and
|
||||
This is a 3- or 4- component shorthand for the platform. Examples of
|
||||
this would be <literal>x86_64-unknown-linux-gnu</literal> and
|
||||
<literal>aarch64-apple-darwin14</literal>. This is a standard format
|
||||
called the "LLVM target triple", as they are pioneered by LLVM. In the
|
||||
4-part form, this corresponds to
|
||||
<literal>[cpu]-[vendor]-[os]-[abi]</literal>. This format is strictly
|
||||
more informative than the "Nix host double", as the previous format could
|
||||
analogously be termed. This needs a better name than
|
||||
more informative than the "Nix host double", as the previous format
|
||||
could analogously be termed. This needs a better name than
|
||||
<varname>config</varname>!
|
||||
</para>
|
||||
</listitem>
|
||||
|
@ -167,11 +168,10 @@
|
|||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This is a Nix representation of a parsed LLVM target triple
|
||||
with white-listed components. This can be specified directly,
|
||||
or actually parsed from the <varname>config</varname>. See
|
||||
<literal>lib.systems.parse</literal> for the exact
|
||||
representation.
|
||||
This is a Nix representation of a parsed LLVM target triple with
|
||||
white-listed components. This can be specified directly, or actually
|
||||
parsed from the <varname>config</varname>. See
|
||||
<literal>lib.systems.parse</literal> for the exact representation.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
@ -253,15 +253,15 @@
|
|||
<para>
|
||||
Some examples will make this clearer. If a package is being built with a
|
||||
<literal>(build, host, target)</literal> platform triple of <literal>(foo,
|
||||
bar, bar)</literal>, then its build-time dependencies would have a triple of
|
||||
<literal>(foo, foo, bar)</literal>, and <emphasis>those packages'</emphasis>
|
||||
build-time dependencies would have a triple of <literal>(foo, foo,
|
||||
foo)</literal>. In other words, it should take two "rounds" of following
|
||||
build-time dependency edges before one reaches a fixed point where, by the
|
||||
sliding window principle, the platform triple no longer changes. Indeed,
|
||||
this happens with cross-compilation, where only rounds of native
|
||||
dependencies starting with the second necessarily coincide with native
|
||||
packages.
|
||||
bar, bar)</literal>, then its build-time dependencies would have a triple
|
||||
of <literal>(foo, foo, bar)</literal>, and <emphasis>those
|
||||
packages'</emphasis> build-time dependencies would have a triple of
|
||||
<literal>(foo, foo, foo)</literal>. In other words, it should take two
|
||||
"rounds" of following build-time dependency edges before one reaches a
|
||||
fixed point where, by the sliding window principle, the platform triple no
|
||||
longer changes. Indeed, this happens with cross-compilation, where only
|
||||
rounds of native dependencies starting with the second necessarily coincide
|
||||
with native packages.
|
||||
</para>
|
||||
|
||||
<note>
|
||||
|
@ -273,23 +273,24 @@
|
|||
</note>
|
||||
|
||||
<para>
|
||||
How does this work in practice? Nixpkgs is now structured so that build-time
|
||||
dependencies are taken from <varname>buildPackages</varname>, whereas
|
||||
run-time dependencies are taken from the top level attribute set. For
|
||||
example, <varname>buildPackages.gcc</varname> should be used at build-time,
|
||||
while <varname>gcc</varname> should be used at run-time. Now, for most of
|
||||
Nixpkgs's history, there was no <varname>buildPackages</varname>, and most
|
||||
packages have not been refactored to use it explicitly. Instead, one can use
|
||||
the six (<emphasis>gasp</emphasis>) attributes used for specifying
|
||||
dependencies as documented in <xref linkend="ssec-stdenv-dependencies"/>. We
|
||||
"splice" together the run-time and build-time package sets with
|
||||
<varname>callPackage</varname>, and then <varname>mkDerivation</varname> for
|
||||
each of four attributes pulls the right derivation out. This splicing can be
|
||||
skipped when not cross-compiling as the package sets are the same, but is a
|
||||
bit slow for cross-compiling. Because of this, a best-of-both-worlds
|
||||
solution is in the works with no splicing or explicit access of
|
||||
<varname>buildPackages</varname> needed. For now, feel free to use either
|
||||
method.
|
||||
How does this work in practice? Nixpkgs is now structured so that
|
||||
build-time dependencies are taken from <varname>buildPackages</varname>,
|
||||
whereas run-time dependencies are taken from the top level attribute set.
|
||||
For example, <varname>buildPackages.gcc</varname> should be used at
|
||||
build-time, while <varname>gcc</varname> should be used at run-time. Now,
|
||||
for most of Nixpkgs's history, there was no
|
||||
<varname>buildPackages</varname>, and most packages have not been
|
||||
refactored to use it explicitly. Instead, one can use the six
|
||||
(<emphasis>gasp</emphasis>) attributes used for specifying dependencies as
|
||||
documented in <xref linkend="ssec-stdenv-dependencies"/>. We "splice"
|
||||
together the run-time and build-time package sets with
|
||||
<varname>callPackage</varname>, and then <varname>mkDerivation</varname>
|
||||
for each of four attributes pulls the right derivation out. This splicing
|
||||
can be skipped when not cross-compiling as the package sets are the same,
|
||||
but is a bit slow for cross-compiling. Because of this, a
|
||||
best-of-both-worlds solution is in the works with no splicing or explicit
|
||||
access of <varname>buildPackages</varname> needed. For now, feel free to
|
||||
use either method.
|
||||
</para>
|
||||
|
||||
<note>
|
||||
|
@ -311,8 +312,8 @@
|
|||
should be answered here. Ideally, the information above is exhaustive, so
|
||||
this section cannot provide any new information, but it is ludicrous and
|
||||
cruel to expect everyone to spend effort working through the interaction of
|
||||
many features just to figure out the same answer to the same common problem.
|
||||
Feel free to add to this list!
|
||||
many features just to figure out the same answer to the same common
|
||||
problem. Feel free to add to this list!
|
||||
</para>
|
||||
|
||||
<qandaset>
|
||||
|
@ -434,14 +435,15 @@ nix-build <nixpkgs> --arg crossSystem '{ config = "<arch>-<os>
|
|||
build plan or package set. A simple "build vs deploy" dichotomy is adequate:
|
||||
the sliding window principle described in the previous section shows how to
|
||||
interpolate between the these two "end points" to get the 3 platform triple
|
||||
for each bootstrapping stage. That means for any package a given package set,
|
||||
even those not bound on the top level but only reachable via dependencies or
|
||||
<varname>buildPackages</varname>, the three platforms will be defined as one
|
||||
of <varname>localSystem</varname> or <varname>crossSystem</varname>, with the
|
||||
former replacing the latter as one traverses build-time dependencies. A last
|
||||
simple difference is that <varname>crossSystem</varname> should be null when
|
||||
one doesn't want to cross-compile, while the <varname>*Platform</varname>s
|
||||
are always non-null. <varname>localSystem</varname> is always non-null.
|
||||
for each bootstrapping stage. That means for any package a given package
|
||||
set, even those not bound on the top level but only reachable via
|
||||
dependencies or <varname>buildPackages</varname>, the three platforms will
|
||||
be defined as one of <varname>localSystem</varname> or
|
||||
<varname>crossSystem</varname>, with the former replacing the latter as one
|
||||
traverses build-time dependencies. A last simple difference is that
|
||||
<varname>crossSystem</varname> should be null when one doesn't want to
|
||||
cross-compile, while the <varname>*Platform</varname>s are always non-null.
|
||||
<varname>localSystem</varname> is always non-null.
|
||||
</para>
|
||||
</section>
|
||||
<!--============================================================-->
|
||||
|
@ -455,13 +457,13 @@ nix-build <nixpkgs> --arg crossSystem '{ config = "<arch>-<os>
|
|||
<note>
|
||||
<para>
|
||||
If one explores Nixpkgs, they will see derivations with names like
|
||||
<literal>gccCross</literal>. Such <literal>*Cross</literal> derivations is a
|
||||
holdover from before we properly distinguished between the host and target
|
||||
platforms—the derivation with "Cross" in the name covered the <literal>build
|
||||
= host != target</literal> case, while the other covered the <literal>host =
|
||||
target</literal>, with build platform the same or not based on whether one
|
||||
was using its <literal>.nativeDrv</literal> or <literal>.crossDrv</literal>.
|
||||
This ugliness will disappear soon.
|
||||
<literal>gccCross</literal>. Such <literal>*Cross</literal> derivations is
|
||||
a holdover from before we properly distinguished between the host and
|
||||
target platforms—the derivation with "Cross" in the name covered the
|
||||
<literal>build = host != target</literal> case, while the other covered the
|
||||
<literal>host = target</literal>, with build platform the same or not based
|
||||
on whether one was using its <literal>.nativeDrv</literal> or
|
||||
<literal>.crossDrv</literal>. This ugliness will disappear soon.
|
||||
</para>
|
||||
</note>
|
||||
</section>
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
<xi:include href="functions/fhs-environments.xml" />
|
||||
<xi:include href="functions/shell.xml" />
|
||||
<xi:include href="functions/dockertools.xml" />
|
||||
<xi:include href="functions/appimagetools.xml" />
|
||||
<xi:include href="functions/prefer-remote-fetch.xml" />
|
||||
<xi:include href="functions/nix-gitignore.xml" />
|
||||
</chapter>
|
||||
|
|
|
@ -0,0 +1,118 @@
|
|||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xml:id="sec-pkgs-appimageTools">
|
||||
<title>pkgs.appimageTools</title>
|
||||
|
||||
<para>
|
||||
<varname>pkgs.appimageTools</varname> is a set of functions for extracting
|
||||
and wrapping <link xlink:href="https://appimage.org/">AppImage</link> files.
|
||||
They are meant to be used if traditional packaging from source is infeasible,
|
||||
or it would take too long. To quickly run an AppImage file,
|
||||
<literal>pkgs.appimage-run</literal> can be used as well.
|
||||
</para>
|
||||
|
||||
<warning>
|
||||
<para>
|
||||
The <varname>appimageTools</varname> API is unstable and may be subject to
|
||||
backwards-incompatible changes in the future.
|
||||
</para>
|
||||
</warning>
|
||||
|
||||
<section xml:id="ssec-pkgs-appimageTools-formats">
|
||||
<title>AppImage formats</title>
|
||||
|
||||
<para>
|
||||
There are different formats for AppImages, see
|
||||
<link xlink:href="https://github.com/AppImage/AppImageSpec/blob/74ad9ca2f94bf864a4a0dac1f369dd4f00bd1c28/draft.md#image-format">the
|
||||
specification</link> for details.
|
||||
</para>
|
||||
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
Type 1 images are ISO 9660 files that are also ELF executables.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Type 2 images are ELF executables with an appended filesystem.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
|
||||
<para>
|
||||
They can be told apart with <command>file -k</command>:
|
||||
</para>
|
||||
|
||||
<screen>
|
||||
<prompt>$ </prompt>file -k type1.AppImage
|
||||
type1.AppImage: ELF 64-bit LSB executable, x86-64, version 1 (SYSV) ISO 9660 CD-ROM filesystem data 'AppImage' (Lepton 3.x), scale 0-0,
|
||||
spot sensor temperature 0.000000, unit celsius, color scheme 0, calibration: offset 0.000000, slope 0.000000, dynamically linked, interpreter /lib64/ld-linux-x86-64.so.2, for GNU/Linux 2.6.18, BuildID[sha1]=d629f6099d2344ad82818172add1d38c5e11bc6d, stripped\012- data
|
||||
|
||||
<prompt>$ </prompt>file -k type2.AppImage
|
||||
type2.AppImage: ELF 64-bit LSB executable, x86-64, version 1 (SYSV) (Lepton 3.x), scale 232-60668, spot sensor temperature -4.187500, color scheme 15, show scale bar, calibration: offset -0.000000, slope 0.000000 (Lepton 2.x), scale 4111-45000, spot sensor temperature 412442.250000, color scheme 3, minimum point enabled, calibration: offset -75402534979642766821519867692934234112.000000, slope 5815371847733706829839455140374904832.000000, dynamically linked, interpreter /lib64/ld-linux-x86-64.so.2, for GNU/Linux 2.6.18, BuildID[sha1]=79dcc4e55a61c293c5e19edbd8d65b202842579f, stripped\012- data
|
||||
</screen>
|
||||
|
||||
<para>
|
||||
Note how the type 1 AppImage is described as an <literal>ISO 9660 CD-ROM
|
||||
filesystem</literal>, and the type 2 AppImage is not.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section xml:id="ssec-pkgs-appimageTools-wrapping">
|
||||
<title>Wrapping</title>
|
||||
|
||||
<para>
|
||||
Depending on the type of AppImage you're wrapping, you'll have to use
|
||||
<varname>wrapType1</varname> or <varname>wrapType2</varname>.
|
||||
</para>
|
||||
|
||||
<programlisting>
|
||||
appimageTools.wrapType2 { # or wrapType1
|
||||
name = "patchwork"; <co xml:id='ex-appimageTools-wrapping-1' />
|
||||
src = fetchurl { <co xml:id='ex-appimageTools-wrapping-2' />
|
||||
url = https://github.com/ssbc/patchwork/releases/download/v3.11.4/Patchwork-3.11.4-linux-x86_64.AppImage;
|
||||
sha256 = "1blsprpkvm0ws9b96gb36f0rbf8f5jgmw4x6dsb1kswr4ysf591s";
|
||||
};
|
||||
extraPkgs = pkgs: with pkgs; [ ]; <co xml:id='ex-appimageTools-wrapping-3' />
|
||||
}</programlisting>
|
||||
|
||||
<calloutlist>
|
||||
<callout arearefs='ex-appimageTools-wrapping-1'>
|
||||
<para>
|
||||
<varname>name</varname> specifies the name of the resulting image.
|
||||
</para>
|
||||
</callout>
|
||||
<callout arearefs='ex-appimageTools-wrapping-2'>
|
||||
<para>
|
||||
<varname>src</varname> specifies the AppImage file to extract.
|
||||
</para>
|
||||
</callout>
|
||||
<callout arearefs='ex-appimageTools-wrapping-2'>
|
||||
<para>
|
||||
<varname>extraPkgs</varname> allows you to pass a function to include
|
||||
additional packages inside the FHS environment your AppImage is going to
|
||||
run in. There are a few ways to learn which dependencies an application
|
||||
needs:
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
Looking through the extracted AppImage files, reading its scripts and
|
||||
running <command>patchelf</command> and <command>ldd</command> on its
|
||||
executables. This can also be done in <command>appimage-run</command>,
|
||||
by setting <command>APPIMAGE_DEBUG_EXEC=bash</command>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Running <command>strace -vfefile</command> on the wrapped executable,
|
||||
looking for libraries that can't be found.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
</callout>
|
||||
</calloutlist>
|
||||
</section>
|
||||
</section>
|
|
@ -24,9 +24,9 @@
|
|||
|
||||
<para>
|
||||
This function is analogous to the <command>docker build</command> command,
|
||||
in that it can be used to build a Docker-compatible repository tarball containing
|
||||
a single image with one or multiple layers. As such, the result is suitable
|
||||
for being loaded in Docker with <command>docker load</command>.
|
||||
in that it can be used to build a Docker-compatible repository tarball
|
||||
containing a single image with one or multiple layers. As such, the result
|
||||
is suitable for being loaded in Docker with <command>docker load</command>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
|
@ -47,7 +47,7 @@ buildImage {
|
|||
|
||||
contents = pkgs.redis; <co xml:id='ex-dockerTools-buildImage-6' />
|
||||
runAsRoot = '' <co xml:id='ex-dockerTools-buildImage-runAsRoot' />
|
||||
#!${stdenv.shell}
|
||||
#!${pkgs.runtimeShell}
|
||||
mkdir -p /data
|
||||
'';
|
||||
|
||||
|
@ -190,8 +190,8 @@ buildImage {
|
|||
By default <function>buildImage</function> will use a static date of one
|
||||
second past the UNIX Epoch. This allows <function>buildImage</function> to
|
||||
produce binary reproducible images. When listing images with
|
||||
<command>docker images</command>, the newly created images will be
|
||||
listed like this:
|
||||
<command>docker images</command>, the newly created images will be listed
|
||||
like this:
|
||||
</para>
|
||||
<screen><![CDATA[
|
||||
$ docker images
|
||||
|
@ -402,9 +402,9 @@ pkgs.dockerTools.buildLayeredImage {
|
|||
|
||||
<para>
|
||||
This function is analogous to the <command>docker pull</command> command, in
|
||||
that it can be used to pull a Docker image from a Docker registry. By default
|
||||
<link xlink:href="https://hub.docker.com/">Docker Hub</link> is used to pull
|
||||
images.
|
||||
that it can be used to pull a Docker image from a Docker registry. By
|
||||
default <link xlink:href="https://hub.docker.com/">Docker Hub</link> is used
|
||||
to pull images.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
|
@ -484,10 +484,10 @@ sha256:20d9485b25ecfd89204e843a962c1bd70e9cc6858d65d7f5fadc340246e2116b
|
|||
|
||||
<para>
|
||||
This function is analogous to the <command>docker export</command> command,
|
||||
in that it can be used to flatten a Docker image that contains multiple layers. It
|
||||
is in fact the result of the merge of all the layers of the image. As such,
|
||||
the result is suitable for being imported in Docker with <command>docker
|
||||
import</command>.
|
||||
in that it can be used to flatten a Docker image that contains multiple
|
||||
layers. It is in fact the result of the merge of all the layers of the
|
||||
image. As such, the result is suitable for being imported in Docker with
|
||||
<command>docker import</command>.
|
||||
</para>
|
||||
|
||||
<note>
|
||||
|
@ -544,7 +544,7 @@ buildImage {
|
|||
name = "shadow-basic";
|
||||
|
||||
runAsRoot = ''
|
||||
#!${stdenv.shell}
|
||||
#!${pkgs.runtimeShell}
|
||||
${shadowSetup}
|
||||
groupadd -r redis
|
||||
useradd -r -g redis redis
|
||||
|
|
|
@ -5,24 +5,21 @@
|
|||
<title>Fetcher functions</title>
|
||||
|
||||
<para>
|
||||
When using Nix, you will frequently need to download source code
|
||||
and other files from the internet. Nixpkgs comes with a few helper
|
||||
functions that allow you to fetch fixed-output derivations in a
|
||||
structured way.
|
||||
When using Nix, you will frequently need to download source code and other
|
||||
files from the internet. Nixpkgs comes with a few helper functions that allow
|
||||
you to fetch fixed-output derivations in a structured way.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The two fetcher primitives are <function>fetchurl</function> and
|
||||
<function>fetchzip</function>. Both of these have two required
|
||||
arguments, a URL and a hash. The hash is typically
|
||||
<literal>sha256</literal>, although many more hash algorithms are
|
||||
supported. Nixpkgs contributors are currently recommended to use
|
||||
<literal>sha256</literal>. This hash will be used by Nix to
|
||||
identify your source. A typical usage of fetchurl is provided
|
||||
below.
|
||||
The two fetcher primitives are <function>fetchurl</function> and
|
||||
<function>fetchzip</function>. Both of these have two required arguments, a
|
||||
URL and a hash. The hash is typically <literal>sha256</literal>, although
|
||||
many more hash algorithms are supported. Nixpkgs contributors are currently
|
||||
recommended to use <literal>sha256</literal>. This hash will be used by Nix
|
||||
to identify your source. A typical usage of fetchurl is provided below.
|
||||
</para>
|
||||
|
||||
<programlisting><![CDATA[
|
||||
<programlisting><![CDATA[
|
||||
{ stdenv, fetchurl }:
|
||||
|
||||
stdenv.mkDerivation {
|
||||
|
@ -35,172 +32,163 @@ stdenv.mkDerivation {
|
|||
]]></programlisting>
|
||||
|
||||
<para>
|
||||
The main difference between <function>fetchurl</function> and
|
||||
<function>fetchzip</function> is in how they store the contents.
|
||||
<function>fetchurl</function> will store the unaltered contents of
|
||||
the URL within the Nix store. <function>fetchzip</function> on the
|
||||
other hand will decompress the archive for you, making files and
|
||||
directories directly accessible in the future.
|
||||
<function>fetchzip</function> can only be used with archives.
|
||||
Despite the name, <function>fetchzip</function> is not limited to
|
||||
.zip files and can also be used with any tarball.
|
||||
The main difference between <function>fetchurl</function> and
|
||||
<function>fetchzip</function> is in how they store the contents.
|
||||
<function>fetchurl</function> will store the unaltered contents of the URL
|
||||
within the Nix store. <function>fetchzip</function> on the other hand will
|
||||
decompress the archive for you, making files and directories directly
|
||||
accessible in the future. <function>fetchzip</function> can only be used with
|
||||
archives. Despite the name, <function>fetchzip</function> is not limited to
|
||||
.zip files and can also be used with any tarball.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
<function>fetchpatch</function> works very similarly to
|
||||
<function>fetchurl</function> with the same arguments expected. It
|
||||
expects patch files as a source and and performs normalization on
|
||||
them before computing the checksum. For example it will remove
|
||||
comments or other unstable parts that are sometimes added by
|
||||
version control systems and can change over time.
|
||||
<function>fetchpatch</function> works very similarly to
|
||||
<function>fetchurl</function> with the same arguments expected. It expects
|
||||
patch files as a source and and performs normalization on them before
|
||||
computing the checksum. For example it will remove comments or other unstable
|
||||
parts that are sometimes added by version control systems and can change over
|
||||
time.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Other fetcher functions allow you to add source code directly from
|
||||
a VCS such as subversion or git. These are mostly straightforward
|
||||
names based on the name of the command used with the VCS system.
|
||||
Because they give you a working repository, they act most like
|
||||
<function>fetchzip</function>.
|
||||
Other fetcher functions allow you to add source code directly from a VCS such
|
||||
as subversion or git. These are mostly straightforward names based on the
|
||||
name of the command used with the VCS system. Because they give you a working
|
||||
repository, they act most like <function>fetchzip</function>.
|
||||
</para>
|
||||
|
||||
<variablelist>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>fetchsvn</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Used with Subversion. Expects <literal>url</literal> to a
|
||||
Subversion directory, <literal>rev</literal>, and
|
||||
<literal>sha256</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>fetchgit</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Used with Git. Expects <literal>url</literal> to a Git repo,
|
||||
<literal>rev</literal>, and <literal>sha256</literal>.
|
||||
<literal>rev</literal> in this case can be full the git commit
|
||||
id (SHA1 hash) or a tag name like
|
||||
<literal>refs/tags/v1.0</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>fetchfossil</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Used with Fossil. Expects <literal>url</literal> to a Fossil
|
||||
archive, <literal>rev</literal>, and <literal>sha256</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>fetchcvs</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Used with CVS. Expects <literal>cvsRoot</literal>,
|
||||
<literal>tag</literal>, and <literal>sha256</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>fetchhg</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Used with Mercurial. Expects <literal>url</literal>,
|
||||
<literal>rev</literal>, and <literal>sha256</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>fetchsvn</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Used with Subversion. Expects <literal>url</literal> to a Subversion
|
||||
directory, <literal>rev</literal>, and <literal>sha256</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>fetchgit</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Used with Git. Expects <literal>url</literal> to a Git repo,
|
||||
<literal>rev</literal>, and <literal>sha256</literal>.
|
||||
<literal>rev</literal> in this case can be full the git commit id (SHA1
|
||||
hash) or a tag name like <literal>refs/tags/v1.0</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>fetchfossil</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Used with Fossil. Expects <literal>url</literal> to a Fossil archive,
|
||||
<literal>rev</literal>, and <literal>sha256</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>fetchcvs</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Used with CVS. Expects <literal>cvsRoot</literal>, <literal>tag</literal>,
|
||||
and <literal>sha256</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>fetchhg</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Used with Mercurial. Expects <literal>url</literal>,
|
||||
<literal>rev</literal>, and <literal>sha256</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
|
||||
<para>
|
||||
A number of fetcher functions wrap part of
|
||||
<function>fetchurl</function> and <function>fetchzip</function>.
|
||||
They are mainly convenience functions intended for commonly used
|
||||
destinations of source code in Nixpkgs. These wrapper fetchers are
|
||||
listed below.
|
||||
A number of fetcher functions wrap part of <function>fetchurl</function> and
|
||||
<function>fetchzip</function>. They are mainly convenience functions intended
|
||||
for commonly used destinations of source code in Nixpkgs. These wrapper
|
||||
fetchers are listed below.
|
||||
</para>
|
||||
|
||||
<variablelist>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>fetchFromGitHub</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
<function>fetchFromGitHub</function> expects four arguments.
|
||||
<literal>owner</literal> is a string corresponding to the
|
||||
GitHub user or organization that controls this repository.
|
||||
<literal>repo</literal> corresponds to the name of the
|
||||
software repository. These are located at the top of every
|
||||
GitHub HTML page as
|
||||
<literal>owner</literal>/<literal>repo</literal>.
|
||||
<literal>rev</literal> corresponds to the Git commit hash or
|
||||
tag (e.g <literal>v1.0</literal>) that will be downloaded from
|
||||
Git. Finally, <literal>sha256</literal> corresponds to the
|
||||
hash of the extracted directory. Again, other hash algorithms
|
||||
are also available but <literal>sha256</literal> is currently
|
||||
preferred.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>fetchFromGitLab</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This is used with GitLab repositories. The arguments expected
|
||||
are very similar to fetchFromGitHub above.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>fetchFromBitbucket</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This is used with BitBucket repositories. The arguments expected
|
||||
are very similar to fetchFromGitHub above.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>fetchFromSavannah</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This is used with Savannah repositories. The arguments expected
|
||||
are very similar to fetchFromGitHub above.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>fetchFromRepoOrCz</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This is used with repo.or.cz repositories. The arguments
|
||||
expected are very similar to fetchFromGitHub above.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>fetchFromGitHub</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
<function>fetchFromGitHub</function> expects four arguments.
|
||||
<literal>owner</literal> is a string corresponding to the GitHub user or
|
||||
organization that controls this repository. <literal>repo</literal>
|
||||
corresponds to the name of the software repository. These are located at
|
||||
the top of every GitHub HTML page as
|
||||
<literal>owner</literal>/<literal>repo</literal>. <literal>rev</literal>
|
||||
corresponds to the Git commit hash or tag (e.g <literal>v1.0</literal>)
|
||||
that will be downloaded from Git. Finally, <literal>sha256</literal>
|
||||
corresponds to the hash of the extracted directory. Again, other hash
|
||||
algorithms are also available but <literal>sha256</literal> is currently
|
||||
preferred.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>fetchFromGitLab</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This is used with GitLab repositories. The arguments expected are very
|
||||
similar to fetchFromGitHub above.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>fetchFromBitbucket</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This is used with BitBucket repositories. The arguments expected are very
|
||||
similar to fetchFromGitHub above.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>fetchFromSavannah</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This is used with Savannah repositories. The arguments expected are very
|
||||
similar to fetchFromGitHub above.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>fetchFromRepoOrCz</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This is used with repo.or.cz repositories. The arguments expected are very
|
||||
similar to fetchFromGitHub above.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
|
||||
|
||||
</section>
|
||||
|
|
|
@ -13,12 +13,17 @@
|
|||
|
||||
<xi:include href="./library/attrsets.xml" />
|
||||
|
||||
<!-- These docs are generated via nixdoc. To add another generated
|
||||
<!-- These docs are generated via nixdoc. To add another generated
|
||||
library function file to this list, the file
|
||||
`lib-function-docs.nix` must also be updated. -->
|
||||
|
||||
<xi:include href="./library/generated/strings.xml" />
|
||||
|
||||
<xi:include href="./library/generated/trivial.xml" />
|
||||
|
||||
<xi:include href="./library/generated/lists.xml" />
|
||||
|
||||
<xi:include href="./library/generated/debug.xml" />
|
||||
|
||||
<xi:include href="./library/generated/options.xml" />
|
||||
</section>
|
||||
|
|
|
@ -14,15 +14,15 @@
|
|||
<title>Usage</title>
|
||||
|
||||
<para>
|
||||
<literal>pkgs.nix-gitignore</literal> exports a number of functions, but
|
||||
you'll most likely need either <literal>gitignoreSource</literal> or
|
||||
<literal>gitignoreSourcePure</literal>. As their first argument, they both
|
||||
accept either 1. a file with gitignore lines or 2. a string
|
||||
with gitignore lines, or 3. a list of either of the two. They will be
|
||||
concatenated into a single big string.
|
||||
<literal>pkgs.nix-gitignore</literal> exports a number of functions, but
|
||||
you'll most likely need either <literal>gitignoreSource</literal> or
|
||||
<literal>gitignoreSourcePure</literal>. As their first argument, they both
|
||||
accept either 1. a file with gitignore lines or 2. a string with gitignore
|
||||
lines, or 3. a list of either of the two. They will be concatenated into a
|
||||
single big string.
|
||||
</para>
|
||||
|
||||
<programlisting><![CDATA[
|
||||
<programlisting><![CDATA[
|
||||
{ pkgs ? import <nixpkgs> {} }:
|
||||
|
||||
nix-gitignore.gitignoreSource [] ./source
|
||||
|
@ -40,24 +40,29 @@
|
|||
]]></programlisting>
|
||||
|
||||
<para>
|
||||
These functions are derived from the <literal>Filter</literal> functions
|
||||
by setting the first filter argument to <literal>(_: _: true)</literal>:
|
||||
These functions are derived from the <literal>Filter</literal> functions by
|
||||
setting the first filter argument to <literal>(_: _: true)</literal>:
|
||||
</para>
|
||||
|
||||
<programlisting><![CDATA[
|
||||
<programlisting><![CDATA[
|
||||
gitignoreSourcePure = gitignoreFilterSourcePure (_: _: true);
|
||||
gitignoreSource = gitignoreFilterSource (_: _: true);
|
||||
]]></programlisting>
|
||||
|
||||
<para>
|
||||
Those filter functions accept the same arguments the <literal>builtins.filterSource</literal> function would pass to its filters, thus <literal>fn: gitignoreFilterSourcePure fn ""</literal> should be extensionally equivalent to <literal>filterSource</literal>. The file is blacklisted iff it's blacklisted by either your filter or the gitignoreFilter.
|
||||
Those filter functions accept the same arguments the
|
||||
<literal>builtins.filterSource</literal> function would pass to its filters,
|
||||
thus <literal>fn: gitignoreFilterSourcePure fn ""</literal> should be
|
||||
extensionally equivalent to <literal>filterSource</literal>. The file is
|
||||
blacklisted iff it's blacklisted by either your filter or the
|
||||
gitignoreFilter.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
If you want to make your own filter from scratch, you may use
|
||||
</para>
|
||||
If you want to make your own filter from scratch, you may use
|
||||
</para>
|
||||
|
||||
<programlisting><![CDATA[
|
||||
<programlisting><![CDATA[
|
||||
gitignoreFilter = ign: root: filterPattern (gitignoreToPatterns ign) root;
|
||||
]]></programlisting>
|
||||
</section>
|
||||
|
@ -66,10 +71,11 @@ gitignoreFilter = ign: root: filterPattern (gitignoreToPatterns ign) root;
|
|||
<title>gitignore files in subdirectories</title>
|
||||
|
||||
<para>
|
||||
If you wish to use a filter that would search for .gitignore files in subdirectories, just like git does by default, use this function:
|
||||
</para>
|
||||
If you wish to use a filter that would search for .gitignore files in
|
||||
subdirectories, just like git does by default, use this function:
|
||||
</para>
|
||||
|
||||
<programlisting><![CDATA[
|
||||
<programlisting><![CDATA[
|
||||
gitignoreFilterRecursiveSource = filter: patterns: root:
|
||||
# OR
|
||||
gitignoreRecursiveSource = gitignoreFilterSourcePure (_: _: true);
|
||||
|
|
|
@ -7,17 +7,15 @@
|
|||
<para>
|
||||
<function>prefer-remote-fetch</function> is an overlay that download sources
|
||||
on remote builder. This is useful when the evaluating machine has a slow
|
||||
upload while the builder can fetch faster directly from the source.
|
||||
To use it, put the following snippet as a new overlay:
|
||||
<programlisting>
|
||||
upload while the builder can fetch faster directly from the source. To use
|
||||
it, put the following snippet as a new overlay:
|
||||
<programlisting>
|
||||
self: super:
|
||||
(super.prefer-remote-fetch self super)
|
||||
</programlisting>
|
||||
|
||||
A full configuration example for that sets the overlay up for your own account,
|
||||
could look like this
|
||||
|
||||
<programlisting>
|
||||
A full configuration example for that sets the overlay up for your own
|
||||
account, could look like this
|
||||
<programlisting>
|
||||
$ mkdir ~/.config/nixpkgs/overlays/
|
||||
$ cat > ~/.config/nixpkgs/overlays/prefer-remote-fetch.nix <<EOF
|
||||
self: super: super.prefer-remote-fetch self super
|
||||
|
|
|
@ -5,12 +5,11 @@
|
|||
<title>Trivial builders</title>
|
||||
|
||||
<para>
|
||||
Nixpkgs provides a couple of functions that help with building
|
||||
derivations. The most important one,
|
||||
<function>stdenv.mkDerivation</function>, has already been
|
||||
documented above. The following functions wrap
|
||||
<function>stdenv.mkDerivation</function>, making it easier to use
|
||||
in certain cases.
|
||||
Nixpkgs provides a couple of functions that help with building derivations.
|
||||
The most important one, <function>stdenv.mkDerivation</function>, has already
|
||||
been documented above. The following functions wrap
|
||||
<function>stdenv.mkDerivation</function>, making it easier to use in certain
|
||||
cases.
|
||||
</para>
|
||||
|
||||
<variablelist>
|
||||
|
@ -19,26 +18,23 @@
|
|||
<literal>runCommand</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This takes three arguments, <literal>name</literal>,
|
||||
<literal>env</literal>, and <literal>buildCommand</literal>.
|
||||
<literal>name</literal> is just the name that Nix will append
|
||||
to the store path in the same way that
|
||||
<literal>stdenv.mkDerivation</literal> uses its
|
||||
<literal>name</literal> attribute. <literal>env</literal> is an
|
||||
attribute set specifying environment variables that will be set
|
||||
for this derivation. These attributes are then passed to the
|
||||
wrapped <literal>stdenv.mkDerivation</literal>.
|
||||
<literal>buildCommand</literal> specifies the commands that
|
||||
will be run to create this derivation. Note that you will need
|
||||
to create <literal>$out</literal> for Nix to register the
|
||||
command as successful.
|
||||
</para>
|
||||
<para>
|
||||
An example of using <literal>runCommand</literal> is provided
|
||||
below.
|
||||
</para>
|
||||
<programlisting>
|
||||
<para>
|
||||
This takes three arguments, <literal>name</literal>,
|
||||
<literal>env</literal>, and <literal>buildCommand</literal>.
|
||||
<literal>name</literal> is just the name that Nix will append to the store
|
||||
path in the same way that <literal>stdenv.mkDerivation</literal> uses its
|
||||
<literal>name</literal> attribute. <literal>env</literal> is an attribute
|
||||
set specifying environment variables that will be set for this derivation.
|
||||
These attributes are then passed to the wrapped
|
||||
<literal>stdenv.mkDerivation</literal>. <literal>buildCommand</literal>
|
||||
specifies the commands that will be run to create this derivation. Note
|
||||
that you will need to create <literal>$out</literal> for Nix to register
|
||||
the command as successful.
|
||||
</para>
|
||||
<para>
|
||||
An example of using <literal>runCommand</literal> is provided below.
|
||||
</para>
|
||||
<programlisting>
|
||||
(import <nixpkgs> {}).runCommand "my-example" {} ''
|
||||
echo My example command is running
|
||||
|
||||
|
@ -65,41 +61,35 @@
|
|||
<literal>runCommandCC</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This works just like <literal>runCommand</literal>. The only
|
||||
difference is that it also provides a C compiler in
|
||||
<literal>buildCommand</literal>’s environment. To minimize your
|
||||
dependencies, you should only use this if you are sure you will
|
||||
need a C compiler as part of running your command.
|
||||
<para>
|
||||
This works just like <literal>runCommand</literal>. The only difference is
|
||||
that it also provides a C compiler in <literal>buildCommand</literal>’s
|
||||
environment. To minimize your dependencies, you should only use this if
|
||||
you are sure you will need a C compiler as part of running your command.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>writeTextFile</literal>, <literal>writeText</literal>,
|
||||
<literal>writeTextDir</literal>, <literal>writeScript</literal>,
|
||||
<literal>writeScriptBin</literal>
|
||||
<literal>writeTextFile</literal>, <literal>writeText</literal>, <literal>writeTextDir</literal>, <literal>writeScript</literal>, <literal>writeScriptBin</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
These functions write <literal>text</literal> to the Nix store.
|
||||
This is useful for creating scripts from Nix expressions.
|
||||
<literal>writeTextFile</literal> takes an attribute set and
|
||||
expects two arguments, <literal>name</literal> and
|
||||
<literal>text</literal>. <literal>name</literal> corresponds to
|
||||
the name used in the Nix store path. <literal>text</literal>
|
||||
will be the contents of the file. You can also set
|
||||
<literal>executable</literal> to true to make this file have
|
||||
the executable bit set.
|
||||
</para>
|
||||
<para>
|
||||
Many more commands wrap <literal>writeTextFile</literal>
|
||||
including <literal>writeText</literal>,
|
||||
<literal>writeTextDir</literal>,
|
||||
<literal>writeScript</literal>, and
|
||||
<literal>writeScriptBin</literal>. These are convenience
|
||||
functions over <literal>writeTextFile</literal>.
|
||||
</para>
|
||||
<para>
|
||||
These functions write <literal>text</literal> to the Nix store. This is
|
||||
useful for creating scripts from Nix expressions.
|
||||
<literal>writeTextFile</literal> takes an attribute set and expects two
|
||||
arguments, <literal>name</literal> and <literal>text</literal>.
|
||||
<literal>name</literal> corresponds to the name used in the Nix store
|
||||
path. <literal>text</literal> will be the contents of the file. You can
|
||||
also set <literal>executable</literal> to true to make this file have the
|
||||
executable bit set.
|
||||
</para>
|
||||
<para>
|
||||
Many more commands wrap <literal>writeTextFile</literal> including
|
||||
<literal>writeText</literal>, <literal>writeTextDir</literal>,
|
||||
<literal>writeScript</literal>, and <literal>writeScriptBin</literal>.
|
||||
These are convenience functions over <literal>writeTextFile</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
|
@ -109,16 +99,15 @@
|
|||
<listitem>
|
||||
<para>
|
||||
This can be used to put many derivations into the same directory
|
||||
structure. It works by creating a new derivation and adding
|
||||
symlinks to each of the paths listed. It expects two arguments,
|
||||
structure. It works by creating a new derivation and adding symlinks to
|
||||
each of the paths listed. It expects two arguments,
|
||||
<literal>name</literal>, and <literal>paths</literal>.
|
||||
<literal>name</literal> is the name used in the Nix store path
|
||||
for the created derivation. <literal>paths</literal> is a list of
|
||||
paths that will be symlinked. These paths can be to Nix store
|
||||
derivations or any other subdirectory contained within.
|
||||
<literal>name</literal> is the name used in the Nix store path for the
|
||||
created derivation. <literal>paths</literal> is a list of paths that will
|
||||
be symlinked. These paths can be to Nix store derivations or any other
|
||||
subdirectory contained within.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
|
||||
</section>
|
||||
|
|
|
@ -4,39 +4,38 @@
|
|||
<title>OCaml</title>
|
||||
|
||||
<para>
|
||||
OCaml libraries should be installed in
|
||||
<literal>$(out)/lib/ocaml/${ocaml.version}/site-lib/</literal>. Such
|
||||
directories are automatically added to the <literal>$OCAMLPATH</literal>
|
||||
environment variable when building another package that depends on them
|
||||
or when opening a <literal>nix-shell</literal>.
|
||||
OCaml libraries should be installed in
|
||||
<literal>$(out)/lib/ocaml/${ocaml.version}/site-lib/</literal>. Such
|
||||
directories are automatically added to the <literal>$OCAMLPATH</literal>
|
||||
environment variable when building another package that depends on them or
|
||||
when opening a <literal>nix-shell</literal>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Given that most of the OCaml ecosystem is now built with dune,
|
||||
nixpkgs includes a convenience build support function called
|
||||
<literal>buildDunePackage</literal> that will build an OCaml package
|
||||
using dune, OCaml and findlib and any additional dependencies provided
|
||||
as <literal>buildInputs</literal> or <literal>propagatedBuildInputs</literal>.
|
||||
Given that most of the OCaml ecosystem is now built with dune, nixpkgs
|
||||
includes a convenience build support function called
|
||||
<literal>buildDunePackage</literal> that will build an OCaml package using
|
||||
dune, OCaml and findlib and any additional dependencies provided as
|
||||
<literal>buildInputs</literal> or <literal>propagatedBuildInputs</literal>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Here is a simple package example. It defines an (optional) attribute
|
||||
<literal>minimumOCamlVersion</literal> that will be used to throw a
|
||||
descriptive evaluation error if building with an older OCaml is attempted.
|
||||
It uses the <literal>fetchFromGitHub</literal> fetcher to get its source.
|
||||
It sets the <literal>doCheck</literal> (optional) attribute to
|
||||
<literal>true</literal> which means that tests will be run with
|
||||
<literal>dune runtest -p angstrom</literal> after the build
|
||||
(<literal>dune build -p angstrom</literal>) is complete.
|
||||
It uses <literal>alcotest</literal> as a build input (because it is needed
|
||||
to run the tests) and <literal>bigstringaf</literal> and
|
||||
<literal>result</literal> as propagated build inputs (thus they will also
|
||||
be available to libraries depending on this library).
|
||||
The library will be installed using the <literal>angstrom.install</literal>
|
||||
file that dune generates.
|
||||
Here is a simple package example. It defines an (optional) attribute
|
||||
<literal>minimumOCamlVersion</literal> that will be used to throw a
|
||||
descriptive evaluation error if building with an older OCaml is attempted. It
|
||||
uses the <literal>fetchFromGitHub</literal> fetcher to get its source. It
|
||||
sets the <literal>doCheck</literal> (optional) attribute to
|
||||
<literal>true</literal> which means that tests will be run with <literal>dune
|
||||
runtest -p angstrom</literal> after the build (<literal>dune build -p
|
||||
angstrom</literal>) is complete. It uses <literal>alcotest</literal> as a
|
||||
build input (because it is needed to run the tests) and
|
||||
<literal>bigstringaf</literal> and <literal>result</literal> as propagated
|
||||
build inputs (thus they will also be available to libraries depending on this
|
||||
library). The library will be installed using the
|
||||
<literal>angstrom.install</literal> file that dune generates.
|
||||
</para>
|
||||
|
||||
<programlisting>
|
||||
<programlisting>
|
||||
{ stdenv, fetchFromGitHub, buildDunePackage, alcotest, result, bigstringaf }:
|
||||
|
||||
buildDunePackage rec {
|
||||
|
@ -66,14 +65,14 @@ buildDunePackage rec {
|
|||
</programlisting>
|
||||
|
||||
<para>
|
||||
Here is a second example, this time using a source archive generated with
|
||||
<literal>dune-release</literal>. It is a good idea to use this archive when
|
||||
it is available as it will usually contain substituted variables such as a
|
||||
<literal>%%VERSION%%</literal> field. This library does not depend
|
||||
on any other OCaml library and no tests are run after building it.
|
||||
Here is a second example, this time using a source archive generated with
|
||||
<literal>dune-release</literal>. It is a good idea to use this archive when
|
||||
it is available as it will usually contain substituted variables such as a
|
||||
<literal>%%VERSION%%</literal> field. This library does not depend on any
|
||||
other OCaml library and no tests are run after building it.
|
||||
</para>
|
||||
|
||||
<programlisting>
|
||||
<programlisting>
|
||||
{ stdenv, fetchurl, buildDunePackage }:
|
||||
|
||||
buildDunePackage rec {
|
||||
|
@ -95,5 +94,4 @@ buildDunePackage rec {
|
|||
};
|
||||
}
|
||||
</programlisting>
|
||||
|
||||
</section>
|
||||
|
|
|
@ -602,11 +602,10 @@ as the interpreter unless overridden otherwise.
|
|||
All parameters from `stdenv.mkDerivation` function are still supported. The following are specific to `buildPythonPackage`:
|
||||
|
||||
* `catchConflicts ? true`: If `true`, abort package build if a package name appears more than once in dependency tree. Default is `true`.
|
||||
* `checkInputs ? []`: Dependencies needed for running the `checkPhase`. These are added to `buildInputs` when `doCheck = true`.
|
||||
* `disabled` ? false: If `true`, package is not build for the particular Python interpreter version.
|
||||
* `dontWrapPythonPrograms ? false`: Skip wrapping of python programs.
|
||||
* `installFlags ? []`: A list of strings. Arguments to be passed to `pip install`. To pass options to `python setup.py install`, use `--install-option`. E.g., `installFlags=["--install-option='--cpp_implementation'"].
|
||||
* `format ? "setuptools"`: Format of the source. Valid options are `"setuptools"`, `"flit"`, `"wheel"`, and `"other"`. `"setuptools"` is for when the source has a `setup.py` and `setuptools` is used to build a wheel, `flit`, in case `flit` should be used to build a wheel, and `wheel` in case a wheel is provided. Use `other` when a custom `buildPhase` and/or `installPhase` is needed.
|
||||
* `installFlags ? []`: A list of strings. Arguments to be passed to `pip install`. To pass options to `python setup.py install`, use `--install-option`. E.g., `installFlags=["--install-option='--cpp_implementation'"]`.
|
||||
* `format ? "setuptools"`: Format of the source. Valid options are `"setuptools"`, `"pyproject"`, `"flit"`, `"wheel"`, and `"other"`. `"setuptools"` is for when the source has a `setup.py` and `setuptools` is used to build a wheel, `flit`, in case `flit` should be used to build a wheel, and `wheel` in case a wheel is provided. Use `other` when a custom `buildPhase` and/or `installPhase` is needed.
|
||||
* `makeWrapperArgs ? []`: A list of strings. Arguments to be passed to `makeWrapper`, which wraps generated binaries. By default, the arguments to `makeWrapper` set `PATH` and `PYTHONPATH` environment variables before calling the binary. Additional arguments here can allow a developer to set environment variables which will be available when the binary is run. For example, `makeWrapperArgs = ["--set FOO BAR" "--set BAZ QUX"]`.
|
||||
* `namePrefix`: Prepends text to `${name}` parameter. In case of libraries, this defaults to `"python3.5-"` for Python 3.5, etc., and in case of applications to `""`.
|
||||
* `pythonPath ? []`: List of packages to be added into `$PYTHONPATH`. Packages in `pythonPath` are not propagated (contrary to `propagatedBuildInputs`).
|
||||
|
@ -615,6 +614,14 @@ All parameters from `stdenv.mkDerivation` function are still supported. The foll
|
|||
* `removeBinByteCode ? true`: Remove bytecode from `/bin`. Bytecode is only created when the filenames end with `.py`.
|
||||
* `setupPyBuildFlags ? []`: List of flags passed to `setup.py build_ext` command.
|
||||
|
||||
The `stdenv.mkDerivation` function accepts various parameters for describing build inputs (see "Specifying dependencies"). The following are of special
|
||||
interest for Python packages, either because these are primarily used, or because their behaviour is different:
|
||||
|
||||
* `nativeBuildInputs ? []`: Build-time only dependencies. Typically executables as well as the items listed in `setup_requires`.
|
||||
* `buildInputs ? []`: Build and/or run-time dependencies that need to be be compiled for the host machine. Typically non-Python libraries which are being linked.
|
||||
* `checkInputs ? []`: Dependencies needed for running the `checkPhase`. These are added to `nativeBuildInputs` when `doCheck = true`. Items listed in `tests_require` go here.
|
||||
* `propagatedBuildInputs ? []`: Aside from propagating dependencies, `buildPythonPackage` also injects code into and wraps executables with the paths included in this list. Items listed in `install_requires` go here.
|
||||
|
||||
##### Overriding Python packages
|
||||
|
||||
The `buildPythonPackage` function has a `overridePythonAttrs` method that
|
||||
|
@ -874,7 +881,6 @@ example of such a situation is when `py.test` is used.
|
|||
'';
|
||||
}
|
||||
```
|
||||
- Unicode issues can typically be fixed by including `glibcLocales` in `buildInputs` and exporting `LC_ALL=en_US.utf-8`.
|
||||
- Tests that attempt to access `$HOME` can be fixed by using the following work-around before running tests (e.g. `preCheck`): `export HOME=$(mktemp -d)`
|
||||
|
||||
## FAQ
|
||||
|
@ -1123,6 +1129,14 @@ LLVM implementation. To use that one instead, Intel recommends users set it with
|
|||
Note that `mkl` is only available on `x86_64-{linux,darwin}` platforms;
|
||||
moreover, Hydra is not building and distributing pre-compiled binaries using it.
|
||||
|
||||
### What inputs do `setup_requires`, `install_requires` and `tests_require` map to?
|
||||
|
||||
In a `setup.py` or `setup.cfg` it is common to declare dependencies:
|
||||
|
||||
* `setup_requires` corresponds to `nativeBuildInputs`
|
||||
* `install_requires` corresponds to `propagatedBuildInputs`
|
||||
* `tests_require` corresponds to `checkInputs`
|
||||
|
||||
## Contributing
|
||||
|
||||
### Contributing guidelines
|
||||
|
|
|
@ -307,19 +307,20 @@ packageOverrides = pkgs: {
|
|||
</screen>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section xml:id="sec-elm">
|
||||
<title>Elm</title>
|
||||
|
||||
<para>
|
||||
To update Elm compiler, see <filename>nixpkgs/pkgs/development/compilers/elm/README.md</filename>.
|
||||
To update Elm compiler, see
|
||||
<filename>nixpkgs/pkgs/development/compilers/elm/README.md</filename>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To package Elm applications, <link xlink:href="https://github.com/hercules-ci/elm2nix#elm2nix">read about elm2nix</link>.
|
||||
To package Elm applications,
|
||||
<link xlink:href="https://github.com/hercules-ci/elm2nix#elm2nix">read about
|
||||
elm2nix</link>.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section xml:id="sec-shell-helpers">
|
||||
<title>Interactive shell helpers</title>
|
||||
|
||||
|
|
|
@ -96,8 +96,8 @@
|
|||
</programlisting>
|
||||
<para>
|
||||
The package <literal>xcbuild</literal> can be used to build projects that
|
||||
really depend on Xcode. However, this replacement is not 100%
|
||||
compatible with Xcode and can occasionally cause issues.
|
||||
really depend on Xcode. However, this replacement is not 100% compatible
|
||||
with Xcode and can occasionally cause issues.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
|
|
|
@ -148,8 +148,8 @@ $ git add pkgs/development/libraries/libfoo/default.nix</screen>
|
|||
<listitem>
|
||||
<para>
|
||||
You can use <command>nix-prefetch-url</command>
|
||||
<replaceable>url</replaceable> to get the
|
||||
SHA-256 hash of source distributions. There are similar commands as
|
||||
<replaceable>url</replaceable> to get the SHA-256 hash of source
|
||||
distributions. There are similar commands as
|
||||
<command>nix-prefetch-git</command> and
|
||||
<command>nix-prefetch-hg</command> available in
|
||||
<literal>nix-prefetch-scripts</literal> package.
|
||||
|
|
|
@ -24,11 +24,13 @@
|
|||
<para>
|
||||
The high change rate of Nixpkgs makes any pull request that remains open for
|
||||
too long subject to conflicts that will require extra work from the submitter
|
||||
or the merger. Reviewing pull requests in a timely manner and being responsive
|
||||
to the comments is the key to avoid this issue. GitHub provides sort filters
|
||||
that can be used to see the <link
|
||||
or the merger. Reviewing pull requests in a timely manner and being
|
||||
responsive to the comments is the key to avoid this issue. GitHub provides
|
||||
sort filters that can be used to see the
|
||||
<link
|
||||
xlink:href="https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-desc">most
|
||||
recently</link> and the <link
|
||||
recently</link> and the
|
||||
<link
|
||||
xlink:href="https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-asc">least
|
||||
recently</link> updated pull requests. We highly encourage looking at
|
||||
<link xlink:href="https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+review%3Anone+status%3Asuccess+-label%3A%222.status%3A+work-in-progress%22+no%3Aproject+no%3Aassignee+no%3Amilestone">
|
||||
|
@ -609,8 +611,8 @@ policy.
|
|||
create an issue or post on
|
||||
<link
|
||||
xlink:href="https://discourse.nixos.org">Discourse</link> with
|
||||
references of packages and modules they maintain so the maintainership can be
|
||||
taken over by other contributors.
|
||||
references of packages and modules they maintain so the maintainership can
|
||||
be taken over by other contributors.
|
||||
</para>
|
||||
</section>
|
||||
</chapter>
|
||||
|
|
566
doc/stdenv.xml
566
doc/stdenv.xml
|
@ -228,18 +228,17 @@ genericBuild
|
|||
</para>
|
||||
|
||||
<para>
|
||||
The extension of <envar>PATH</envar> with dependencies, alluded to
|
||||
above, proceeds according to the relative platforms alone. The
|
||||
process is carried out only for dependencies whose host platform
|
||||
matches the new derivation's build platform i.e. dependencies which
|
||||
run on the platform where the new derivation will be built.
|
||||
The extension of <envar>PATH</envar> with dependencies, alluded to above,
|
||||
proceeds according to the relative platforms alone. The process is carried
|
||||
out only for dependencies whose host platform matches the new derivation's
|
||||
build platform i.e. dependencies which run on the platform where the new
|
||||
derivation will be built.
|
||||
<footnote xml:id="footnote-stdenv-native-dependencies-in-path">
|
||||
<para>
|
||||
Currently, this means for native builds all dependencies are put
|
||||
on the <envar>PATH</envar>. But in the future that may not be the
|
||||
case for sake of matching cross: the platforms would be assumed
|
||||
to be unique for native and cross builds alike, so only the
|
||||
<varname>depsBuild*</varname> and
|
||||
Currently, this means for native builds all dependencies are put on the
|
||||
<envar>PATH</envar>. But in the future that may not be the case for sake
|
||||
of matching cross: the platforms would be assumed to be unique for native
|
||||
and cross builds alike, so only the <varname>depsBuild*</varname> and
|
||||
<varname>nativeBuildInputs</varname> would be added to the
|
||||
<envar>PATH</envar>.
|
||||
</para>
|
||||
|
@ -252,9 +251,10 @@ genericBuild
|
|||
<para>
|
||||
The dependency is propagated when it forces some of its other-transitive
|
||||
(non-immediate) downstream dependencies to also take it on as an immediate
|
||||
dependency. Nix itself already takes a package's transitive dependencies into
|
||||
account, but this propagation ensures nixpkgs-specific infrastructure like
|
||||
setup hooks (mentioned above) also are run as if the propagated dependency.
|
||||
dependency. Nix itself already takes a package's transitive dependencies
|
||||
into account, but this propagation ensures nixpkgs-specific infrastructure
|
||||
like setup hooks (mentioned above) also are run as if the propagated
|
||||
dependency.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
|
@ -270,9 +270,9 @@ genericBuild
|
|||
described by the current dependency's platform offsets. This results in sort
|
||||
a transitive closure of the dependency relation, with the offsets being
|
||||
approximately summed when two dependency links are combined. We also prune
|
||||
transitive dependencies whose combined offsets go out-of-bounds, which can be
|
||||
viewed as a filter over that transitive closure removing dependencies that
|
||||
are blatantly absurd.
|
||||
transitive dependencies whose combined offsets go out-of-bounds, which can
|
||||
be viewed as a filter over that transitive closure removing dependencies
|
||||
that are blatantly absurd.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
|
@ -287,8 +287,8 @@ genericBuild
|
|||
propagation logic.
|
||||
</para>
|
||||
</footnote>
|
||||
They're confusing in very different ways so... hopefully if something doesn't
|
||||
make sense in one presentation, it will in the other!
|
||||
They're confusing in very different ways so... hopefully if something
|
||||
doesn't make sense in one presentation, it will in the other!
|
||||
<programlisting>
|
||||
let mapOffset(h, t, i) = i + (if i <= 0 then h else t - 1)
|
||||
|
||||
|
@ -324,31 +324,31 @@ let f(h, h + 1, i) = i + (if i <= 0 then h else (h + 1) - 1)
|
|||
let f(h, h + 1, i) = i + (if i <= 0 then h else h)
|
||||
let f(h, h + 1, i) = i + h
|
||||
</programlisting>
|
||||
This is where "sum-like" comes in from above: We can just sum all of the host
|
||||
offsets to get the host offset of the transitive dependency. The target
|
||||
offset is the transitive dependency is simply the host offset + 1, just as it
|
||||
was with the dependencies composed to make this transitive one; it can be
|
||||
This is where "sum-like" comes in from above: We can just sum all of the
|
||||
host offsets to get the host offset of the transitive dependency. The target
|
||||
offset is the transitive dependency is simply the host offset + 1, just as
|
||||
it was with the dependencies composed to make this transitive one; it can be
|
||||
ignored as it doesn't add any new information.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Because of the bounds checks, the uncommon cases are <literal>h = t</literal>
|
||||
and <literal>h + 2 = t</literal>. In the former case, the motivation for
|
||||
<function>mapOffset</function> is that since its host and target platforms
|
||||
are the same, no transitive dependency of it should be able to "discover" an
|
||||
offset greater than its reduced target offsets.
|
||||
Because of the bounds checks, the uncommon cases are <literal>h =
|
||||
t</literal> and <literal>h + 2 = t</literal>. In the former case, the
|
||||
motivation for <function>mapOffset</function> is that since its host and
|
||||
target platforms are the same, no transitive dependency of it should be able
|
||||
to "discover" an offset greater than its reduced target offsets.
|
||||
<function>mapOffset</function> effectively "squashes" all its transitive
|
||||
dependencies' offsets so that none will ever be greater than the target
|
||||
offset of the original <literal>h = t</literal> package. In the other case,
|
||||
<literal>h + 1</literal> is skipped over between the host and target offsets.
|
||||
Instead of squashing the offsets, we need to "rip" them apart so no
|
||||
<literal>h + 1</literal> is skipped over between the host and target
|
||||
offsets. Instead of squashing the offsets, we need to "rip" them apart so no
|
||||
transitive dependencies' offset is that one.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Overall, the unifying theme here is that propagation shouldn't be introducing
|
||||
transitive dependencies involving platforms the depending package is unaware
|
||||
of. The offset bounds checking and definition of
|
||||
Overall, the unifying theme here is that propagation shouldn't be
|
||||
introducing transitive dependencies involving platforms the depending
|
||||
package is unaware of. The offset bounds checking and definition of
|
||||
<function>mapOffset</function> together ensure that this is the case.
|
||||
Discovering a new offset is discovering a new platform, and since those
|
||||
platforms weren't in the derivation "spec" of the needing package, they
|
||||
|
@ -381,8 +381,8 @@ let f(h, h + 1, i) = i + h
|
|||
Since these packages are able to be run at build-time, they are always
|
||||
added to the <envar>PATH</envar>, as described above. But since these
|
||||
packages are only guaranteed to be able to run then, they shouldn't
|
||||
persist as run-time dependencies. This isn't currently enforced, but could
|
||||
be in the future.
|
||||
persist as run-time dependencies. This isn't currently enforced, but
|
||||
could be in the future.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
@ -396,10 +396,10 @@ let f(h, h + 1, i) = i + h
|
|||
platform, and target platform is the new derivation's host platform. This
|
||||
means a <literal>-1</literal> host offset and <literal>0</literal> target
|
||||
offset from the new derivation's platforms. These are programs and
|
||||
libraries used at build-time that, if they are a compiler or similar tool,
|
||||
produce code to run at run-time—i.e. tools used to build the new
|
||||
derivation. If the dependency doesn't care about the target platform (i.e.
|
||||
isn't a compiler or similar tool), put it here, rather than in
|
||||
libraries used at build-time that, if they are a compiler or similar
|
||||
tool, produce code to run at run-time—i.e. tools used to build the new
|
||||
derivation. If the dependency doesn't care about the target platform
|
||||
(i.e. isn't a compiler or similar tool), put it here, rather than in
|
||||
<varname>depsBuildBuild</varname> or <varname>depsBuildTarget</varname>.
|
||||
This could be called <varname>depsBuildHost</varname> but
|
||||
<varname>nativeBuildInputs</varname> is used for historical continuity.
|
||||
|
@ -407,8 +407,9 @@ let f(h, h + 1, i) = i + h
|
|||
<para>
|
||||
Since these packages are able to be run at build-time, they are added to
|
||||
the <envar>PATH</envar>, as described above. But since these packages are
|
||||
only guaranteed to be able to run then, they shouldn't persist as run-time
|
||||
dependencies. This isn't currently enforced, but could be in the future.
|
||||
only guaranteed to be able to run then, they shouldn't persist as
|
||||
run-time dependencies. This isn't currently enforced, but could be in the
|
||||
future.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
@ -421,33 +422,36 @@ let f(h, h + 1, i) = i + h
|
|||
A list of dependencies whose host platform is the new derivation's build
|
||||
platform, and target platform is the new derivation's target platform.
|
||||
This means a <literal>-1</literal> host offset and <literal>1</literal>
|
||||
target offset from the new derivation's platforms. These are programs used
|
||||
at build time that produce code to run with code produced by the depending
|
||||
package. Most commonly, these are tools used to build the runtime or
|
||||
standard library that the currently-being-built compiler will inject into
|
||||
any code it compiles. In many cases, the currently-being-built-compiler is
|
||||
itself employed for that task, but when that compiler won't run (i.e. its
|
||||
build and host platform differ) this is not possible. Other times, the
|
||||
compiler relies on some other tool, like binutils, that is always built
|
||||
separately so that the dependency is unconditional.
|
||||
target offset from the new derivation's platforms. These are programs
|
||||
used at build time that produce code to run with code produced by the
|
||||
depending package. Most commonly, these are tools used to build the
|
||||
runtime or standard library that the currently-being-built compiler will
|
||||
inject into any code it compiles. In many cases, the
|
||||
currently-being-built-compiler is itself employed for that task, but when
|
||||
that compiler won't run (i.e. its build and host platform differ) this is
|
||||
not possible. Other times, the compiler relies on some other tool, like
|
||||
binutils, that is always built separately so that the dependency is
|
||||
unconditional.
|
||||
</para>
|
||||
<para>
|
||||
This is a somewhat confusing concept to wrap one’s head around, and for
|
||||
good reason. As the only dependency type where the platform offsets are
|
||||
not adjacent integers, it requires thinking of a bootstrapping stage
|
||||
<emphasis>two</emphasis> away from the current one. It and its use-case go
|
||||
hand in hand and are both considered poor form: try to not need this sort
|
||||
of dependency, and try to avoid building standard libraries and runtimes
|
||||
in the same derivation as the compiler produces code using them. Instead
|
||||
strive to build those like a normal library, using the newly-built
|
||||
compiler just as a normal library would. In short, do not use this
|
||||
attribute unless you are packaging a compiler and are sure it is needed.
|
||||
<emphasis>two</emphasis> away from the current one. It and its use-case
|
||||
go hand in hand and are both considered poor form: try to not need this
|
||||
sort of dependency, and try to avoid building standard libraries and
|
||||
runtimes in the same derivation as the compiler produces code using them.
|
||||
Instead strive to build those like a normal library, using the
|
||||
newly-built compiler just as a normal library would. In short, do not use
|
||||
this attribute unless you are packaging a compiler and are sure it is
|
||||
needed.
|
||||
</para>
|
||||
<para>
|
||||
Since these packages are able to run at build time, they are added to the
|
||||
<envar>PATH</envar>, as described above. But since these packages are only
|
||||
guaranteed to be able to run then, they shouldn't persist as run-time
|
||||
dependencies. This isn't currently enforced, but could be in the future.
|
||||
<envar>PATH</envar>, as described above. But since these packages are
|
||||
only guaranteed to be able to run then, they shouldn't persist as
|
||||
run-time dependencies. This isn't currently enforced, but could be in the
|
||||
future.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
@ -462,11 +466,11 @@ let f(h, h + 1, i) = i + h
|
|||
and <literal>0</literal> target offset from the new derivation's host
|
||||
platform. These are packages used at run-time to generate code also used
|
||||
at run-time. In practice, this would usually be tools used by compilers
|
||||
for macros or a metaprogramming system, or libraries used by the macros or
|
||||
metaprogramming code itself. It's always preferable to use a
|
||||
<varname>depsBuildBuild</varname> dependency in the derivation being built
|
||||
over a <varname>depsHostHost</varname> on the tool doing the building for
|
||||
this purpose.
|
||||
for macros or a metaprogramming system, or libraries used by the macros
|
||||
or metaprogramming code itself. It's always preferable to use a
|
||||
<varname>depsBuildBuild</varname> dependency in the derivation being
|
||||
built over a <varname>depsHostHost</varname> on the tool doing the
|
||||
building for this purpose.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
@ -481,8 +485,8 @@ let f(h, h + 1, i) = i + h
|
|||
<literal>1</literal> target offset from the new derivation's host
|
||||
platform. This would be called <varname>depsHostTarget</varname> but for
|
||||
historical continuity. If the dependency doesn't care about the target
|
||||
platform (i.e. isn't a compiler or similar tool), put it here, rather than
|
||||
in <varname>depsBuildBuild</varname>.
|
||||
platform (i.e. isn't a compiler or similar tool), put it here, rather
|
||||
than in <varname>depsBuildBuild</varname>.
|
||||
</para>
|
||||
<para>
|
||||
These are often programs and libraries used by the new derivation at
|
||||
|
@ -664,10 +668,11 @@ passthru = {
|
|||
<literal>hello.baz.value1</literal>. We don't specify any usage or schema
|
||||
of <literal>passthru</literal> - it is meant for values that would be
|
||||
useful outside the derivation in other parts of a Nix expression (e.g. in
|
||||
other derivations). An example would be to convey some specific dependency
|
||||
of your derivation which contains a program with plugins support. Later,
|
||||
others who make derivations with plugins can use passed-through dependency
|
||||
to ensure that their plugin would be binary-compatible with built program.
|
||||
other derivations). An example would be to convey some specific
|
||||
dependency of your derivation which contains a program with plugins
|
||||
support. Later, others who make derivations with plugins can use
|
||||
passed-through dependency to ensure that their plugin would be
|
||||
binary-compatible with built program.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
@ -677,9 +682,9 @@ passthru = {
|
|||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
A script to be run by <filename>maintainers/scripts/update.nix</filename> when
|
||||
the package is matched. It needs to be an executable file, either on the file
|
||||
system:
|
||||
A script to be run by <filename>maintainers/scripts/update.nix</filename>
|
||||
when the package is matched. It needs to be an executable file, either on
|
||||
the file system:
|
||||
<programlisting>
|
||||
passthru.updateScript = ./update.sh;
|
||||
</programlisting>
|
||||
|
@ -695,16 +700,24 @@ passthru.updateScript = writeScript "update-zoom-us" ''
|
|||
update-source-version zoom-us "$version"
|
||||
'';
|
||||
</programlisting>
|
||||
The attribute can also contain a list, a script followed by arguments to be passed to it:
|
||||
The attribute can also contain a list, a script followed by arguments to
|
||||
be passed to it:
|
||||
<programlisting>
|
||||
passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ];
|
||||
</programlisting>
|
||||
Note that the update scripts will be run in parallel by default; you should avoid running <command>git commit</command> or any other commands that cannot handle that.
|
||||
Note that the update scripts will be run in parallel by default; you
|
||||
should avoid running <command>git commit</command> or any other commands
|
||||
that cannot handle that.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
For information about how to run the updates, execute
|
||||
<cmdsynopsis><command>nix-shell</command> <arg>maintainers/scripts/update.nix</arg></cmdsynopsis>.
|
||||
<cmdsynopsis>
|
||||
<command>nix-shell</command>
|
||||
<arg>
|
||||
maintainers/scripts/update.nix
|
||||
</arg>
|
||||
</cmdsynopsis>
|
||||
.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
@ -1178,8 +1191,8 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
|
|||
By default, when cross compiling, the configure script has
|
||||
<option>--build=...</option> and <option>--host=...</option> passed.
|
||||
Packages can instead pass <literal>[ "build" "host" "target" ]</literal>
|
||||
or a subset to control exactly which platform flags are passed. Compilers
|
||||
and other tools can use this to also pass the target platform.
|
||||
or a subset to control exactly which platform flags are passed.
|
||||
Compilers and other tools can use this to also pass the target platform.
|
||||
<footnote xml:id="footnote-stdenv-build-time-guessing-impurity">
|
||||
<para>
|
||||
Eventually these will be passed building natively as well, to improve
|
||||
|
@ -1694,10 +1707,11 @@ installTargets = "install-bin install-doc";</programlisting>
|
|||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
A package can export a <link linkend="ssec-setup-hooks">setup hook</link>
|
||||
by setting this variable. The setup hook, if defined, is copied to
|
||||
<filename>$out/nix-support/setup-hook</filename>. Environment variables
|
||||
are then substituted in it using <function
|
||||
A package can export a <link linkend="ssec-setup-hooks">setup
|
||||
hook</link> by setting this variable. The setup hook, if defined, is
|
||||
copied to <filename>$out/nix-support/setup-hook</filename>. Environment
|
||||
variables are then substituted in it using
|
||||
<function
|
||||
linkend="fun-substituteAll">substituteAll</function>.
|
||||
</para>
|
||||
</listitem>
|
||||
|
@ -1812,8 +1826,8 @@ set debug-file-directory ~/.nix-profile/lib/debug
|
|||
<listitem>
|
||||
<para>
|
||||
A list of dependencies used by the phase. This gets included in
|
||||
<varname>nativeBuildInputs</varname> when <varname>doInstallCheck</varname> is
|
||||
set.
|
||||
<varname>nativeBuildInputs</varname> when
|
||||
<varname>doInstallCheck</varname> is set.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
@ -2160,10 +2174,11 @@ someVar=$(stripHash $name)
|
|||
dependency derivation is already built just the same—depending is just
|
||||
needing something to exist, and needing is idempotent. However, a dependency
|
||||
specified twice will have its setup hook run twice, and that could easily
|
||||
change the build environment (though a well-written setup hook will therefore
|
||||
strive to be idempotent so this is in fact not observable). More broadly,
|
||||
setup hooks are anti-modular in that multiple dependencies, whether the same
|
||||
or different, should not interfere and yet their setup hooks may well do so.
|
||||
change the build environment (though a well-written setup hook will
|
||||
therefore strive to be idempotent so this is in fact not observable). More
|
||||
broadly, setup hooks are anti-modular in that multiple dependencies, whether
|
||||
the same or different, should not interfere and yet their setup hooks may
|
||||
well do so.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
|
@ -2185,11 +2200,12 @@ someVar=$(stripHash $name)
|
|||
Returning to the C compiler wrapper example, if the wrapper itself is an
|
||||
<literal>n</literal> dependency, then it only wants to accumulate flags from
|
||||
<literal>n + 1</literal> dependencies, as only those ones match the
|
||||
compiler's target platform. The <envar>hostOffset</envar> variable is defined
|
||||
with the current dependency's host offset <envar>targetOffset</envar> with
|
||||
its target offset, before its setup hook is sourced. Additionally, since most
|
||||
environment hooks don't care about the target platform, that means the setup
|
||||
hook can append to the right bash array by doing something like
|
||||
compiler's target platform. The <envar>hostOffset</envar> variable is
|
||||
defined with the current dependency's host offset
|
||||
<envar>targetOffset</envar> with its target offset, before its setup hook is
|
||||
sourced. Additionally, since most environment hooks don't care about the
|
||||
target platform, that means the setup hook can append to the right bash
|
||||
array by doing something like
|
||||
<programlisting language="bash">
|
||||
addEnvHooks "$hostOffset" myBashFunction
|
||||
</programlisting>
|
||||
|
@ -2204,24 +2220,22 @@ addEnvHooks "$hostOffset" myBashFunction
|
|||
</para>
|
||||
|
||||
<para>
|
||||
First, let’s cover some setup hooks that are part of Nixpkgs
|
||||
default stdenv. This means that they are run for every package
|
||||
built using <function>stdenv.mkDerivation</function>. Some of
|
||||
these are platform specific, so they may run on Linux but not
|
||||
Darwin or vice-versa.
|
||||
<variablelist>
|
||||
First, let’s cover some setup hooks that are part of Nixpkgs default
|
||||
stdenv. This means that they are run for every package built using
|
||||
<function>stdenv.mkDerivation</function>. Some of these are platform
|
||||
specific, so they may run on Linux but not Darwin or vice-versa.
|
||||
<variablelist>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>move-docs.sh</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
<para>
|
||||
This setup hook moves any installed documentation to the
|
||||
<literal>/share</literal> subdirectory directory. This includes
|
||||
the man, doc and info directories. This is needed for legacy
|
||||
programs that do not know how to use the
|
||||
<literal>share</literal> subdirectory.
|
||||
</para>
|
||||
<literal>/share</literal> subdirectory directory. This includes the man,
|
||||
doc and info directories. This is needed for legacy programs that do not
|
||||
know how to use the <literal>share</literal> subdirectory.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
|
@ -2229,11 +2243,11 @@ addEnvHooks "$hostOffset" myBashFunction
|
|||
<literal>compress-man-pages.sh</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This setup hook compresses any man pages that have been
|
||||
installed. The compression is done using the gzip program. This
|
||||
helps to reduce the installed size of packages.
|
||||
</para>
|
||||
<para>
|
||||
This setup hook compresses any man pages that have been installed. The
|
||||
compression is done using the gzip program. This helps to reduce the
|
||||
installed size of packages.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
|
@ -2241,12 +2255,11 @@ addEnvHooks "$hostOffset" myBashFunction
|
|||
<literal>strip.sh</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This runs the strip command on installed binaries and
|
||||
libraries. This removes unnecessary information like debug
|
||||
symbols when they are not needed. This also helps to reduce the
|
||||
installed size of packages.
|
||||
</para>
|
||||
<para>
|
||||
This runs the strip command on installed binaries and libraries. This
|
||||
removes unnecessary information like debug symbols when they are not
|
||||
needed. This also helps to reduce the installed size of packages.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
|
@ -2254,15 +2267,14 @@ addEnvHooks "$hostOffset" myBashFunction
|
|||
<literal>patch-shebangs.sh</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This setup hook patches installed scripts to use the full path
|
||||
to the shebang interpreter. A shebang interpreter is the first
|
||||
commented line of a script telling the operating system which
|
||||
program will run the script (e.g <literal>#!/bin/bash</literal>). In
|
||||
Nix, we want an exact path to that interpreter to be used. This
|
||||
often replaces <literal>/bin/sh</literal> with a path in the
|
||||
Nix store.
|
||||
</para>
|
||||
<para>
|
||||
This setup hook patches installed scripts to use the full path to the
|
||||
shebang interpreter. A shebang interpreter is the first commented line
|
||||
of a script telling the operating system which program will run the
|
||||
script (e.g <literal>#!/bin/bash</literal>). In Nix, we want an exact
|
||||
path to that interpreter to be used. This often replaces
|
||||
<literal>/bin/sh</literal> with a path in the Nix store.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
|
@ -2270,12 +2282,12 @@ addEnvHooks "$hostOffset" myBashFunction
|
|||
<literal>audit-tmpdir.sh</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This verifies that no references are left from the install
|
||||
binaries to the directory used to build those binaries. This
|
||||
ensures that the binaries do not need things outside the Nix
|
||||
store. This is currently supported in Linux only.
|
||||
</para>
|
||||
<para>
|
||||
This verifies that no references are left from the install binaries to
|
||||
the directory used to build those binaries. This ensures that the
|
||||
binaries do not need things outside the Nix store. This is currently
|
||||
supported in Linux only.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
|
@ -2283,14 +2295,14 @@ addEnvHooks "$hostOffset" myBashFunction
|
|||
<literal>multiple-outputs.sh</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This setup hook adds configure flags that tell packages to
|
||||
install files into any one of the proper outputs listed in
|
||||
<literal>outputs</literal>. This behavior can be turned off by setting
|
||||
<para>
|
||||
This setup hook adds configure flags that tell packages to install files
|
||||
into any one of the proper outputs listed in <literal>outputs</literal>.
|
||||
This behavior can be turned off by setting
|
||||
<literal>setOutputFlags</literal> to false in the derivation
|
||||
environment. See <xref linkend="chap-multiple-output"/> for
|
||||
more information.
|
||||
</para>
|
||||
environment. See <xref linkend="chap-multiple-output"/> for more
|
||||
information.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
|
@ -2298,11 +2310,11 @@ addEnvHooks "$hostOffset" myBashFunction
|
|||
<literal>move-sbin.sh</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This setup hook moves any binaries installed in the sbin
|
||||
subdirectory into bin. In addition, a link is provided from
|
||||
sbin to bin for compatibility.
|
||||
</para>
|
||||
<para>
|
||||
This setup hook moves any binaries installed in the sbin subdirectory
|
||||
into bin. In addition, a link is provided from sbin to bin for
|
||||
compatibility.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
|
@ -2310,11 +2322,11 @@ addEnvHooks "$hostOffset" myBashFunction
|
|||
<literal>move-lib64.sh</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This setup hook moves any libraries installed in the lib64
|
||||
subdirectory into lib. In addition, a link is provided from
|
||||
lib64 to lib for compatibility.
|
||||
</para>
|
||||
<para>
|
||||
This setup hook moves any libraries installed in the lib64 subdirectory
|
||||
into lib. In addition, a link is provided from lib64 to lib for
|
||||
compatibility.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
|
@ -2322,10 +2334,10 @@ addEnvHooks "$hostOffset" myBashFunction
|
|||
<literal>set-source-date-epoch-to-latest.sh</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This sets <literal>SOURCE_DATE_EPOCH</literal> to the
|
||||
modification time of the most recent file.
|
||||
</para>
|
||||
<para>
|
||||
This sets <literal>SOURCE_DATE_EPOCH</literal> to the modification time
|
||||
of the most recent file.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
|
@ -2335,19 +2347,19 @@ addEnvHooks "$hostOffset" myBashFunction
|
|||
<listitem>
|
||||
<para>
|
||||
The Bintools Wrapper wraps the binary utilities for a bunch of
|
||||
miscellaneous purposes. These are GNU Binutils when targetting Linux, and
|
||||
a mix of cctools and GNU binutils for Darwin. [The "Bintools" name is
|
||||
supposed to be a compromise between "Binutils" and "cctools" not denoting
|
||||
any specific implementation.] Specifically, the underlying bintools
|
||||
package, and a C standard library (glibc or Darwin's libSystem, just for
|
||||
the dynamic loader) are all fed in, and dependency finding, hardening
|
||||
(see below), and purity checks for each are handled by the Bintools
|
||||
Wrapper. Packages typically depend on CC Wrapper, which in turn (at run
|
||||
time) depends on the Bintools Wrapper.
|
||||
miscellaneous purposes. These are GNU Binutils when targetting Linux,
|
||||
and a mix of cctools and GNU binutils for Darwin. [The "Bintools" name
|
||||
is supposed to be a compromise between "Binutils" and "cctools" not
|
||||
denoting any specific implementation.] Specifically, the underlying
|
||||
bintools package, and a C standard library (glibc or Darwin's libSystem,
|
||||
just for the dynamic loader) are all fed in, and dependency finding,
|
||||
hardening (see below), and purity checks for each are handled by the
|
||||
Bintools Wrapper. Packages typically depend on CC Wrapper, which in turn
|
||||
(at run time) depends on the Bintools Wrapper.
|
||||
</para>
|
||||
<para>
|
||||
The Bintools Wrapper was only just recently split off from CC Wrapper, so
|
||||
the division of labor is still being worked out. For example, it
|
||||
The Bintools Wrapper was only just recently split off from CC Wrapper,
|
||||
so the division of labor is still being worked out. For example, it
|
||||
shouldn't care about about the C standard library, but just take a
|
||||
derivation with the dynamic loader (which happens to be the glibc on
|
||||
linux). Dependency finding however is a task both wrappers will continue
|
||||
|
@ -2357,11 +2369,12 @@ addEnvHooks "$hostOffset" myBashFunction
|
|||
<varname>nativeBuildInputs</varname>) in environment variables. The
|
||||
Bintools Wrapper's setup hook causes any <filename>lib</filename> and
|
||||
<filename>lib64</filename> subdirectories to be added to
|
||||
<envar>NIX_LDFLAGS</envar>. Since the CC Wrapper and the Bintools Wrapper
|
||||
use the same strategy, most of the Bintools Wrapper code is sparsely
|
||||
commented and refers to the CC Wrapper. But the CC Wrapper's code, by
|
||||
contrast, has quite lengthy comments. The Bintools Wrapper merely cites
|
||||
those, rather than repeating them, to avoid falling out of sync.
|
||||
<envar>NIX_LDFLAGS</envar>. Since the CC Wrapper and the Bintools
|
||||
Wrapper use the same strategy, most of the Bintools Wrapper code is
|
||||
sparsely commented and refers to the CC Wrapper. But the CC Wrapper's
|
||||
code, by contrast, has quite lengthy comments. The Bintools Wrapper
|
||||
merely cites those, rather than repeating them, to avoid falling out of
|
||||
sync.
|
||||
</para>
|
||||
<para>
|
||||
A final task of the setup hook is defining a number of standard
|
||||
|
@ -2370,8 +2383,8 @@ addEnvHooks "$hostOffset" myBashFunction
|
|||
under the assumption that the Bintools Wrapper's binaries will be on the
|
||||
path. Firstly, this helps poorly-written packages, e.g. ones that look
|
||||
for just <command>gcc</command> when <envar>CC</envar> isn't defined yet
|
||||
<command>clang</command> is to be used. Secondly, this helps packages not
|
||||
get confused when cross-compiling, in which case multiple Bintools
|
||||
<command>clang</command> is to be used. Secondly, this helps packages
|
||||
not get confused when cross-compiling, in which case multiple Bintools
|
||||
Wrappers may simultaneously be in use.
|
||||
<footnote xml:id="footnote-stdenv-per-platform-wrapper">
|
||||
<para>
|
||||
|
@ -2387,16 +2400,16 @@ addEnvHooks "$hostOffset" myBashFunction
|
|||
Wrappers, properly disambiguating them.
|
||||
</para>
|
||||
<para>
|
||||
A problem with this final task is that the Bintools Wrapper is honest and
|
||||
defines <envar>LD</envar> as <command>ld</command>. Most packages,
|
||||
A problem with this final task is that the Bintools Wrapper is honest
|
||||
and defines <envar>LD</envar> as <command>ld</command>. Most packages,
|
||||
however, firstly use the C compiler for linking, secondly use
|
||||
<envar>LD</envar> anyways, defining it as the C compiler, and thirdly,
|
||||
only so define <envar>LD</envar> when it is undefined as a fallback. This
|
||||
triple-threat means Bintools Wrapper will break those packages, as LD is
|
||||
already defined as the actual linker which the package won't override yet
|
||||
doesn't want to use. The workaround is to define, just for the
|
||||
problematic package, <envar>LD</envar> as the C compiler. A good way to
|
||||
do this would be <command>preConfigure = "LD=$CC"</command>.
|
||||
only so define <envar>LD</envar> when it is undefined as a fallback.
|
||||
This triple-threat means Bintools Wrapper will break those packages, as
|
||||
LD is already defined as the actual linker which the package won't
|
||||
override yet doesn't want to use. The workaround is to define, just for
|
||||
the problematic package, <envar>LD</envar> as the C compiler. A good way
|
||||
to do this would be <command>preConfigure = "LD=$CC"</command>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
@ -2406,13 +2419,13 @@ addEnvHooks "$hostOffset" myBashFunction
|
|||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
The CC Wrapper wraps a C toolchain for a bunch of miscellaneous purposes.
|
||||
Specifically, a C compiler (GCC or Clang), wrapped binary tools, and a C
|
||||
standard library (glibc or Darwin's libSystem, just for the dynamic
|
||||
loader) are all fed in, and dependency finding, hardening (see below),
|
||||
and purity checks for each are handled by the CC Wrapper. Packages
|
||||
typically depend on the CC Wrapper, which in turn (at run-time) depends
|
||||
on the Bintools Wrapper.
|
||||
The CC Wrapper wraps a C toolchain for a bunch of miscellaneous
|
||||
purposes. Specifically, a C compiler (GCC or Clang), wrapped binary
|
||||
tools, and a C standard library (glibc or Darwin's libSystem, just for
|
||||
the dynamic loader) are all fed in, and dependency finding, hardening
|
||||
(see below), and purity checks for each are handled by the CC Wrapper.
|
||||
Packages typically depend on the CC Wrapper, which in turn (at run-time)
|
||||
depends on the Bintools Wrapper.
|
||||
</para>
|
||||
<para>
|
||||
Dependency finding is undoubtedly the main task of the CC Wrapper. This
|
||||
|
@ -2434,14 +2447,13 @@ addEnvHooks "$hostOffset" myBashFunction
|
|||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
</variablelist>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Here are some more packages that provide a setup hook. Since the
|
||||
list of hooks is extensible, this is not an exhaustive list the
|
||||
mechanism is only to be used as a last resort, it might cover most
|
||||
uses.
|
||||
Here are some more packages that provide a setup hook. Since the list of
|
||||
hooks is extensible, this is not an exhaustive list the mechanism is only to
|
||||
be used as a last resort, it might cover most uses.
|
||||
<variablelist>
|
||||
<varlistentry>
|
||||
<term>
|
||||
|
@ -2499,11 +2511,11 @@ addEnvHooks "$hostOffset" myBashFunction
|
|||
<listitem>
|
||||
<para>
|
||||
The <varname>autoreconfHook</varname> derivation adds
|
||||
<varname>autoreconfPhase</varname>, which runs autoreconf, libtoolize and
|
||||
automake, essentially preparing the configure script in autotools-based
|
||||
builds. Most autotools-based packages come with the configure script
|
||||
pre-generated, but this hook is necessary for a few packages and when you
|
||||
need to patch the package’s configure scripts.
|
||||
<varname>autoreconfPhase</varname>, which runs autoreconf, libtoolize
|
||||
and automake, essentially preparing the configure script in
|
||||
autotools-based builds. Most autotools-based packages come with the
|
||||
configure script pre-generated, but this hook is necessary for a few
|
||||
packages and when you need to patch the package’s configure scripts.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
@ -2547,9 +2559,9 @@ addEnvHooks "$hostOffset" myBashFunction
|
|||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Exports <envar>GDK_PIXBUF_MODULE_FILE</envar> environment variable to the
|
||||
builder. Add librsvg package to <varname>buildInputs</varname> to get svg
|
||||
support.
|
||||
Exports <envar>GDK_PIXBUF_MODULE_FILE</envar> environment variable to
|
||||
the builder. Add librsvg package to <varname>buildInputs</varname> to
|
||||
get svg support.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
@ -2594,21 +2606,20 @@ addEnvHooks "$hostOffset" myBashFunction
|
|||
</para>
|
||||
<para>
|
||||
This is useful for programs that use <citerefentry>
|
||||
<refentrytitle>dlopen</refentrytitle>
|
||||
<manvolnum>3</manvolnum>
|
||||
</citerefentry> to load libraries at runtime.
|
||||
<refentrytitle>dlopen</refentrytitle>
|
||||
<manvolnum>3</manvolnum> </citerefentry> to load libraries at runtime.
|
||||
</para>
|
||||
<para>
|
||||
In certain situations you may want to run the main command
|
||||
(<command>autoPatchelf</command>) of the setup hook on a file or a set
|
||||
of directories instead of unconditionally patching all outputs. This
|
||||
can be done by setting the <envar>dontAutoPatchelf</envar> environment
|
||||
variable to a non-empty value.
|
||||
In certain situations you may want to run the main command
|
||||
(<command>autoPatchelf</command>) of the setup hook on a file or a set
|
||||
of directories instead of unconditionally patching all outputs. This can
|
||||
be done by setting the <envar>dontAutoPatchelf</envar> environment
|
||||
variable to a non-empty value.
|
||||
</para>
|
||||
<para>
|
||||
The <command>autoPatchelf</command> command also recognizes a
|
||||
<parameter class="command">--no-recurse</parameter> command line flag,
|
||||
which prevents it from recursing into subdirectories.
|
||||
The <command>autoPatchelf</command> command also recognizes a
|
||||
<parameter class="command">--no-recurse</parameter> command line flag,
|
||||
which prevents it from recursing into subdirectories.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
@ -2619,22 +2630,22 @@ addEnvHooks "$hostOffset" myBashFunction
|
|||
<listitem>
|
||||
<para>
|
||||
This hook will make a build pause instead of stopping when a failure
|
||||
happens. It prevents nix from cleaning up the build environment immediately and
|
||||
allows the user to attach to a build environment using the
|
||||
<command>cntr</command> command. Upon build error it will print
|
||||
instructions on how to use <command>cntr</command>. Installing
|
||||
cntr and running the command will provide shell access to the build
|
||||
sandbox of failed build. At <filename>/var/lib/cntr</filename> the
|
||||
sandboxed filesystem is mounted. All commands and files of the system are
|
||||
still accessible within the shell. To execute commands from the sandbox
|
||||
use the cntr exec subcommand. Note that <command>cntr</command> also
|
||||
needs to be executed on the machine that is doing the build, which might
|
||||
not be the case when remote builders are enabled.
|
||||
<command>cntr</command> is only supported on Linux-based platforms. To
|
||||
use it first add <literal>cntr</literal> to your
|
||||
<literal>environment.systemPackages</literal> on NixOS or alternatively to
|
||||
the root user on non-NixOS systems. Then in the package that is supposed
|
||||
to be inspected, add <literal>breakpointHook</literal> to
|
||||
happens. It prevents nix from cleaning up the build environment
|
||||
immediately and allows the user to attach to a build environment using
|
||||
the <command>cntr</command> command. Upon build error it will print
|
||||
instructions on how to use <command>cntr</command>. Installing cntr and
|
||||
running the command will provide shell access to the build sandbox of
|
||||
failed build. At <filename>/var/lib/cntr</filename> the sandboxed
|
||||
filesystem is mounted. All commands and files of the system are still
|
||||
accessible within the shell. To execute commands from the sandbox use
|
||||
the cntr exec subcommand. Note that <command>cntr</command> also needs
|
||||
to be executed on the machine that is doing the build, which might not
|
||||
be the case when remote builders are enabled. <command>cntr</command> is
|
||||
only supported on Linux-based platforms. To use it first add
|
||||
<literal>cntr</literal> to your
|
||||
<literal>environment.systemPackages</literal> on NixOS or alternatively
|
||||
to the root user on non-NixOS systems. Then in the package that is
|
||||
supposed to be inspected, add <literal>breakpointHook</literal> to
|
||||
<literal>nativeBuildInputs</literal>.
|
||||
<programlisting>
|
||||
nativeBuildInputs = [ breakpointHook ];
|
||||
|
@ -2649,16 +2660,15 @@ addEnvHooks "$hostOffset" myBashFunction
|
|||
libiconv, libintl
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
A few libraries automatically add to
|
||||
<literal>NIX_LDFLAGS</literal> their library, making their
|
||||
symbols automatically available to the linker. This includes
|
||||
libiconv and libintl (gettext). This is done to provide
|
||||
compatibility between GNU Linux, where libiconv and libintl
|
||||
are bundled in, and other systems where that might not be the
|
||||
case. Sometimes, this behavior is not desired. To disable
|
||||
this behavior, set <literal>dontAddExtraLibs</literal>.
|
||||
</para>
|
||||
<para>
|
||||
A few libraries automatically add to <literal>NIX_LDFLAGS</literal>
|
||||
their library, making their symbols automatically available to the
|
||||
linker. This includes libiconv and libintl (gettext). This is done to
|
||||
provide compatibility between GNU Linux, where libiconv and libintl are
|
||||
bundled in, and other systems where that might not be the case.
|
||||
Sometimes, this behavior is not desired. To disable this behavior, set
|
||||
<literal>dontAddExtraLibs</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
|
@ -2666,17 +2676,17 @@ addEnvHooks "$hostOffset" myBashFunction
|
|||
cmake
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Overrides the default configure phase to run the CMake command. By
|
||||
default, we use the Make generator of CMake. In
|
||||
addition, dependencies are added automatically to CMAKE_PREFIX_PATH so
|
||||
that packages are correctly detected by CMake. Some additional flags
|
||||
are passed in to give similar behavior to configure-based packages. You
|
||||
can disable this hook’s behavior by setting configurePhase to a custom
|
||||
value, or by setting dontUseCmakeConfigure. cmakeFlags controls flags
|
||||
passed only to CMake. By default, parallel building is enabled as CMake
|
||||
supports parallel building almost everywhere. When Ninja is also in
|
||||
use, CMake will detect that and use the ninja generator.
|
||||
<para>
|
||||
Overrides the default configure phase to run the CMake command. By
|
||||
default, we use the Make generator of CMake. In addition, dependencies
|
||||
are added automatically to CMAKE_PREFIX_PATH so that packages are
|
||||
correctly detected by CMake. Some additional flags are passed in to give
|
||||
similar behavior to configure-based packages. You can disable this
|
||||
hook’s behavior by setting configurePhase to a custom value, or by
|
||||
setting dontUseCmakeConfigure. cmakeFlags controls flags passed only to
|
||||
CMake. By default, parallel building is enabled as CMake supports
|
||||
parallel building almost everywhere. When Ninja is also in use, CMake
|
||||
will detect that and use the ninja generator.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
@ -2685,12 +2695,12 @@ addEnvHooks "$hostOffset" myBashFunction
|
|||
xcbuildHook
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Overrides the build and install phases to run the “xcbuild” command.
|
||||
This hook is needed when a project only comes with build files for the
|
||||
XCode build system. You can disable this behavior by setting buildPhase
|
||||
and configurePhase to a custom value. xcbuildFlags controls flags
|
||||
passed only to xcbuild.
|
||||
<para>
|
||||
Overrides the build and install phases to run the “xcbuild” command.
|
||||
This hook is needed when a project only comes with build files for the
|
||||
XCode build system. You can disable this behavior by setting buildPhase
|
||||
and configurePhase to a custom value. xcbuildFlags controls flags passed
|
||||
only to xcbuild.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
@ -2699,13 +2709,13 @@ addEnvHooks "$hostOffset" myBashFunction
|
|||
meson
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Overrides the configure phase to run meson to generate Ninja files. You
|
||||
can disable this behavior by setting configurePhase to a custom value,
|
||||
or by setting dontUseMesonConfigure. To run these files, you should
|
||||
accompany meson with ninja. mesonFlags controls only the flags passed
|
||||
to meson. By default, parallel building is enabled as Meson supports
|
||||
parallel building almost everywhere.
|
||||
<para>
|
||||
Overrides the configure phase to run meson to generate Ninja files. You
|
||||
can disable this behavior by setting configurePhase to a custom value,
|
||||
or by setting dontUseMesonConfigure. To run these files, you should
|
||||
accompany meson with ninja. mesonFlags controls only the flags passed to
|
||||
meson. By default, parallel building is enabled as Meson supports
|
||||
parallel building almost everywhere.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
@ -2714,11 +2724,11 @@ addEnvHooks "$hostOffset" myBashFunction
|
|||
ninja
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Overrides the build, install, and check phase to run ninja instead of
|
||||
make. You can disable this behavior with the dontUseNinjaBuild,
|
||||
dontUseNinjaInstall, and dontUseNinjaCheck, respectively. Parallel
|
||||
building is enabled by default in Ninja.
|
||||
<para>
|
||||
Overrides the build, install, and check phase to run ninja instead of
|
||||
make. You can disable this behavior with the dontUseNinjaBuild,
|
||||
dontUseNinjaInstall, and dontUseNinjaCheck, respectively. Parallel
|
||||
building is enabled by default in Ninja.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
@ -2727,9 +2737,9 @@ addEnvHooks "$hostOffset" myBashFunction
|
|||
unzip
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This setup hook will allow you to unzip .zip files specified in $src.
|
||||
There are many similar packages like unrar, undmg, etc.
|
||||
<para>
|
||||
This setup hook will allow you to unzip .zip files specified in $src.
|
||||
There are many similar packages like unrar, undmg, etc.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
@ -2738,11 +2748,11 @@ addEnvHooks "$hostOffset" myBashFunction
|
|||
wafHook
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Overrides the configure, build, and install phases. This will run the
|
||||
"waf" script used by many projects. If waf doesn’t exist, it will copy
|
||||
the version of waf available in Nixpkgs wafFlags can be used to pass
|
||||
flags to the waf script.
|
||||
<para>
|
||||
Overrides the configure, build, and install phases. This will run the
|
||||
"waf" script used by many projects. If waf doesn’t exist, it will copy
|
||||
the version of waf available in Nixpkgs wafFlags can be used to pass
|
||||
flags to the waf script.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
@ -2751,14 +2761,14 @@ addEnvHooks "$hostOffset" myBashFunction
|
|||
scons
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Overrides the build, install, and check phases. This uses the scons
|
||||
build system as a replacement for make. scons does not provide a
|
||||
configure phase, so everything is managed at build and install time.
|
||||
<para>
|
||||
Overrides the build, install, and check phases. This uses the scons
|
||||
build system as a replacement for make. scons does not provide a
|
||||
configure phase, so everything is managed at build and install time.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
</variablelist>
|
||||
</para>
|
||||
</section>
|
||||
<section xml:id="sec-purity-in-nixpkgs">
|
||||
|
|
|
@ -59,7 +59,7 @@ let
|
|||
stringLength sub substring tail;
|
||||
inherit (trivial) id const concat or and bitAnd bitOr bitXor bitNot
|
||||
boolToString mergeAttrs flip mapNullable inNixShell min max
|
||||
importJSON warn info nixpkgsVersion version mod compare
|
||||
importJSON warn info showWarnings nixpkgsVersion version mod compare
|
||||
splitByAndCompare functionArgs setFunctionArgs isFunction;
|
||||
inherit (fixedPoints) fix fix' converge extends composeExtensions
|
||||
makeExtensible makeExtensibleWithCustomName;
|
||||
|
@ -109,7 +109,7 @@ let
|
|||
mkFixStrictness mkOrder mkBefore mkAfter mkAliasDefinitions
|
||||
mkAliasAndWrapDefinitions fixMergeModules mkRemovedOptionModule
|
||||
mkRenamedOptionModule mkMergedOptionModule mkChangedOptionModule
|
||||
mkAliasOptionModule mkAliasOptionModuleWithPriority doRename filterModules;
|
||||
mkAliasOptionModule doRename filterModules;
|
||||
inherit (options) isOption mkEnableOption mkSinkUndeclaredOptions
|
||||
mergeDefaultOption mergeOneOption mergeEqualOption getValues
|
||||
getFiles optionAttrSetToDocList optionAttrSetToDocList'
|
||||
|
|
|
@ -561,6 +561,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
|
|||
fullName = "OpenSSL License";
|
||||
};
|
||||
|
||||
osl2 = spdx {
|
||||
spdxId = "OSL-2.0";
|
||||
fullName = "Open Software License 2.0";
|
||||
};
|
||||
|
||||
osl21 = spdx {
|
||||
spdxId = "OSL-2.1";
|
||||
fullName = "Open Software License 2.1";
|
||||
|
|
|
@ -476,8 +476,22 @@ rec {
|
|||
optionSet to options of type submodule. FIXME: remove
|
||||
eventually. */
|
||||
fixupOptionType = loc: opt:
|
||||
if opt.type.getSubModules or null == null
|
||||
then opt // { type = opt.type or types.unspecified; }
|
||||
let
|
||||
options = opt.options or
|
||||
(throw "Option `${showOption loc'}' has type optionSet but has no option attribute, in ${showFiles opt.declarations}.");
|
||||
f = tp:
|
||||
let optionSetIn = type: (tp.name == type) && (tp.functor.wrapped.name == "optionSet");
|
||||
in
|
||||
if tp.name == "option set" || tp.name == "submodule" then
|
||||
throw "The option ${showOption loc} uses submodules without a wrapping type, in ${showFiles opt.declarations}."
|
||||
else if optionSetIn "attrsOf" then types.attrsOf (types.submodule options)
|
||||
else if optionSetIn "loaOf" then types.loaOf (types.submodule options)
|
||||
else if optionSetIn "listOf" then types.listOf (types.submodule options)
|
||||
else if optionSetIn "nullOr" then types.nullOr (types.submodule options)
|
||||
else tp;
|
||||
in
|
||||
if opt.type.getSubModules or null == null
|
||||
then opt // { type = f (opt.type or types.unspecified); }
|
||||
else opt // { type = opt.type.substSubModules opt.options; options = []; };
|
||||
|
||||
|
||||
|
@ -596,6 +610,9 @@ rec {
|
|||
|
||||
forwards any definitions of boot.copyKernels to
|
||||
boot.loader.grub.copyKernels while printing a warning.
|
||||
|
||||
This also copies over the priority from the aliased option to the
|
||||
non-aliased option.
|
||||
*/
|
||||
mkRenamedOptionModule = from: to: doRename {
|
||||
inherit from to;
|
||||
|
@ -690,16 +707,7 @@ rec {
|
|||
use = id;
|
||||
};
|
||||
|
||||
/* Like ‘mkAliasOptionModule’, but copy over the priority of the option as well. */
|
||||
mkAliasOptionModuleWithPriority = from: to: doRename {
|
||||
inherit from to;
|
||||
visible = true;
|
||||
warn = false;
|
||||
use = id;
|
||||
withPriority = true;
|
||||
};
|
||||
|
||||
doRename = { from, to, visible, warn, use, withPriority ? false }:
|
||||
doRename = { from, to, visible, warn, use, withPriority ? true }:
|
||||
{ config, options, ... }:
|
||||
let
|
||||
fromOpt = getAttrFromPath from options;
|
||||
|
|
|
@ -48,6 +48,8 @@ rec {
|
|||
visible ? null,
|
||||
# Whether the option can be set only once
|
||||
readOnly ? null,
|
||||
# Deprecated, used by types.optionSet.
|
||||
options ? null
|
||||
} @ attrs:
|
||||
attrs // { _type = "option"; };
|
||||
|
||||
|
@ -141,7 +143,7 @@ rec {
|
|||
docOption = rec {
|
||||
loc = opt.loc;
|
||||
name = showOption opt.loc;
|
||||
description = opt.description or (throw "Option `${name}' has no description.");
|
||||
description = opt.description or (lib.warn "Option `${name}' has no description." "This option has no description.");
|
||||
declarations = filter (x: x != unknownModule) opt.declarations;
|
||||
internal = opt.internal or false;
|
||||
visible = opt.visible or true;
|
||||
|
|
|
@ -24,6 +24,8 @@ rec {
|
|||
config = parse.tripleFromSystem final.parsed;
|
||||
# Just a guess, based on `system`
|
||||
platform = platforms.selectBySystem final.system;
|
||||
# Determine whether we are compatible with the provided CPU
|
||||
isCompatible = platform: parse.isCompatible final.parsed.cpu platform.parsed.cpu;
|
||||
# Derived meta-data
|
||||
libc =
|
||||
/**/ if final.isDarwin then "libSystem"
|
||||
|
@ -98,13 +100,14 @@ rec {
|
|||
wine = (pkgs.winePackagesFor wine-name).minimal;
|
||||
in
|
||||
if final.parsed.kernel.name == pkgs.stdenv.hostPlatform.parsed.kernel.name &&
|
||||
(final.parsed.cpu.name == pkgs.stdenv.hostPlatform.parsed.cpu.name ||
|
||||
(final.isi686 && pkgs.stdenv.hostPlatform.isx86_64))
|
||||
then pkgs.runtimeShell
|
||||
pkgs.stdenv.hostPlatform.isCompatible final
|
||||
then "${pkgs.runtimeShell} -c"
|
||||
else if final.isWindows
|
||||
then "${wine}/bin/${wine-name}"
|
||||
else if final.isLinux && pkgs.stdenv.hostPlatform.isLinux
|
||||
then "${qemu-user}/bin/qemu-${final.qemuArch}"
|
||||
else if final.isWasm
|
||||
then "${pkgs.v8}/bin/d8"
|
||||
else throw "Don't know how to run ${final.config} executables.";
|
||||
|
||||
} // mapAttrs (n: v: v final.parsed) inspect.predicates
|
||||
|
|
|
@ -21,6 +21,7 @@ rec {
|
|||
isSparc = { cpu = { family = "sparc"; }; };
|
||||
isWasm = { cpu = { family = "wasm"; }; };
|
||||
isAvr = { cpu = { family = "avr"; }; };
|
||||
isAlpha = { cpu = { family = "alpha"; }; };
|
||||
|
||||
is32bit = { cpu = { bits = 32; }; };
|
||||
is64bit = { cpu = { bits = 64; }; };
|
||||
|
|
|
@ -112,6 +112,66 @@ rec {
|
|||
avr = { bits = 8; family = "avr"; };
|
||||
};
|
||||
|
||||
# Determine where two CPUs are compatible with each other. That is,
|
||||
# can we run code built for system b on system a? For that to
|
||||
# happen, then the set of all possible possible programs that system
|
||||
# b accepts must be a subset of the set of all programs that system
|
||||
# a accepts. This compatibility relation forms a category where each
|
||||
# CPU is an object and each arrow from a to b represents
|
||||
# compatibility. CPUs with multiple modes of Endianness are
|
||||
# isomorphic while all CPUs are endomorphic because any program
|
||||
# built for a CPU can run on that CPU.
|
||||
isCompatible = a: b: with cpuTypes; lib.any lib.id [
|
||||
# x86
|
||||
(b == i386 && isCompatible a i486)
|
||||
(b == i486 && isCompatible a i586)
|
||||
(b == i586 && isCompatible a i686)
|
||||
# NOTE: Not true in some cases. Like in WSL mode.
|
||||
(b == i686 && isCompatible a x86_64)
|
||||
|
||||
# ARM
|
||||
(b == arm && isCompatible a armv5tel)
|
||||
(b == armv5tel && isCompatible a armv6m)
|
||||
(b == armv6m && isCompatible a armv6l)
|
||||
(b == armv6l && isCompatible a armv7a)
|
||||
(b == armv7a && isCompatible a armv7r)
|
||||
(b == armv7r && isCompatible a armv7m)
|
||||
(b == armv7m && isCompatible a armv7l)
|
||||
(b == armv7l && isCompatible a armv8a)
|
||||
(b == armv8a && isCompatible a armv8r)
|
||||
(b == armv8r && isCompatible a armv8m)
|
||||
# NOTE: not always true! Some arm64 cpus don’t support arm32 mode.
|
||||
(b == armv8m && isCompatible a aarch64)
|
||||
(b == aarch64 && a == aarch64_be)
|
||||
(b == aarch64_be && isCompatible a aarch64)
|
||||
|
||||
# PowerPC
|
||||
(b == powerpc && isCompatible a powerpc64)
|
||||
(b == powerpcle && isCompatible a powerpc)
|
||||
(b == powerpc && a == powerpcle)
|
||||
(b == powerpc64le && isCompatible a powerpc64)
|
||||
(b == powerpc64 && a == powerpc64le)
|
||||
|
||||
# MIPS
|
||||
(b == mips && isCompatible a mips64)
|
||||
(b == mips && a == mipsel)
|
||||
(b == mipsel && isCompatible a mips)
|
||||
(b == mips64 && a == mips64el)
|
||||
(b == mips64el && isCompatible a mips64)
|
||||
|
||||
# RISCV
|
||||
(b == riscv32 && isCompatible a riscv64)
|
||||
|
||||
# SPARC
|
||||
(b == sparc && isCompatible a sparc64)
|
||||
|
||||
# WASM
|
||||
(b == wasm32 && isCompatible a wasm64)
|
||||
|
||||
# identity
|
||||
(b == a)
|
||||
];
|
||||
|
||||
################################################################################
|
||||
|
||||
types.openVendor = mkOptionType {
|
||||
|
|
|
@ -149,7 +149,7 @@ checkConfigOutput "1 2 3 4 5 6 7 8 9 10" config.result ./loaOf-with-long-list.ni
|
|||
# Check loaOf with many merges of lists.
|
||||
checkConfigOutput "1 2 3 4 5 6 7 8 9 10" config.result ./loaOf-with-many-list-merges.nix
|
||||
|
||||
# Check mkAliasOptionModuleWithPriority.
|
||||
# Check mkAliasOptionModule.
|
||||
checkConfigOutput "true" config.enable ./alias-with-priority.nix
|
||||
checkConfigOutput "true" config.enableAlias ./alias-with-priority.nix
|
||||
checkConfigOutput "false" config.enable ./alias-with-priority-can-override.nix
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
# This is a test to show that mkAliasOptionModule sets the priority correctly
|
||||
# for aliased options.
|
||||
#
|
||||
# This test shows that an alias with a high priority is able to override
|
||||
# a non-aliased option.
|
||||
|
||||
{ config, lib, ... }:
|
||||
|
||||
|
@ -32,10 +35,10 @@ with lib;
|
|||
|
||||
imports = [
|
||||
# Create an alias for the "enable" option.
|
||||
(mkAliasOptionModuleWithPriority [ "enableAlias" ] [ "enable" ])
|
||||
(mkAliasOptionModule [ "enableAlias" ] [ "enable" ])
|
||||
|
||||
# Disable the aliased option, but with a default (low) priority so it
|
||||
# should be able to be overridden by the next import.
|
||||
# Disable the aliased option with a high priority so it
|
||||
# should override the next import.
|
||||
( { config, lib, ... }:
|
||||
{
|
||||
enableAlias = lib.mkForce false;
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
# This is a test to show that mkAliasOptionModule sets the priority correctly
|
||||
# for aliased options.
|
||||
#
|
||||
# This test shows that an alias with a low priority is able to be overridden
|
||||
# with a non-aliased option.
|
||||
|
||||
{ config, lib, ... }:
|
||||
|
||||
|
@ -32,7 +35,7 @@ with lib;
|
|||
|
||||
imports = [
|
||||
# Create an alias for the "enable" option.
|
||||
(mkAliasOptionModuleWithPriority [ "enableAlias" ] [ "enable" ])
|
||||
(mkAliasOptionModule [ "enableAlias" ] [ "enable" ])
|
||||
|
||||
# Disable the aliased option, but with a default (low) priority so it
|
||||
# should be able to be overridden by the next import.
|
||||
|
|
|
@ -134,7 +134,7 @@ rec {
|
|||
On each release the first letter is bumped and a new animal is chosen
|
||||
starting with that new letter.
|
||||
*/
|
||||
codeName = "Koi";
|
||||
codeName = "Loris";
|
||||
|
||||
/* Returns the current nixpkgs version suffix as string. */
|
||||
versionSuffix =
|
||||
|
@ -259,9 +259,10 @@ rec {
|
|||
# TODO: figure out a clever way to integrate location information from
|
||||
# something like __unsafeGetAttrPos.
|
||||
|
||||
warn = msg: builtins.trace "WARNING: ${msg}";
|
||||
warn = msg: builtins.trace "[1;31mwarning: ${msg}[0m";
|
||||
info = msg: builtins.trace "INFO: ${msg}";
|
||||
|
||||
showWarnings = warnings: res: lib.fold (w: x: warn w x) res warnings;
|
||||
|
||||
## Function annotations
|
||||
|
||||
|
|
|
@ -469,8 +469,10 @@ rec {
|
|||
# Obsolete alternative to configOf. It takes its option
|
||||
# declarations from the ‘options’ attribute of containing option
|
||||
# declaration.
|
||||
optionSet = builtins.throw "types.optionSet is deprecated; use types.submodule instead" "optionSet";
|
||||
|
||||
optionSet = mkOptionType {
|
||||
name = builtins.trace "types.optionSet is deprecated; use types.submodule instead" "optionSet";
|
||||
description = "option set";
|
||||
};
|
||||
# Augment the given type with an additional type check function.
|
||||
addCheck = elemType: check: elemType // { check = x: elemType.check x && check x; };
|
||||
|
||||
|
|
|
@ -38,6 +38,15 @@
|
|||
See `./scripts/check-maintainer-github-handles.sh` for an example on how to work with this data.
|
||||
*/
|
||||
{
|
||||
"0x4A6F" = {
|
||||
email = "0x4A6F@shackspace.de";
|
||||
name = "Joachim Ernst";
|
||||
github = "0x4A6F";
|
||||
keys = [{
|
||||
longkeyid = "rsa8192/0x87027528B006D66D";
|
||||
fingerprint = "F466 A548 AD3F C1F1 8C88 4576 8702 7528 B006 D66D";
|
||||
}];
|
||||
};
|
||||
"1000101" = {
|
||||
email = "jan.hrnko@satoshilabs.com";
|
||||
github = "1000101";
|
||||
|
@ -752,6 +761,11 @@
|
|||
github = "calbrecht";
|
||||
name = "Christian Albrecht";
|
||||
};
|
||||
callahad = {
|
||||
email = "dan.callahan@gmail.com";
|
||||
github = "callahad";
|
||||
name = "Dan Callahan";
|
||||
};
|
||||
calvertvl = {
|
||||
email = "calvertvl@gmail.com";
|
||||
github = "calvertvl";
|
||||
|
@ -812,6 +826,11 @@
|
|||
github = "cdepillabout";
|
||||
name = "Dennis Gosnell";
|
||||
};
|
||||
ceedubs = {
|
||||
email = "ceedubs@gmail.com";
|
||||
github = "ceedubs";
|
||||
name = "Cody Allen";
|
||||
};
|
||||
cfouche = {
|
||||
email = "chaddai.fouche@gmail.com";
|
||||
github = "Chaddai";
|
||||
|
@ -1618,6 +1637,10 @@
|
|||
email = "fpletz@fnordicwalking.de";
|
||||
github = "fpletz";
|
||||
name = "Franz Pletz";
|
||||
keys = [{
|
||||
longkeyid = "rsa4096/0x846FDED7792617B4";
|
||||
fingerprint = "8A39 615D CE78 AF08 2E23 F303 846F DED7 7926 17B4";
|
||||
}];
|
||||
};
|
||||
fps = {
|
||||
email = "mista.tapas@gmx.net";
|
||||
|
@ -2223,10 +2246,6 @@
|
|||
github = "jmettes";
|
||||
name = "Jonathan Mettes";
|
||||
};
|
||||
Jo = {
|
||||
email = "0x4A6F@shackspace.de";
|
||||
name = "Joachim Ernst";
|
||||
};
|
||||
joachifm = {
|
||||
email = "joachifm@fastmail.fm";
|
||||
github = "joachifm";
|
||||
|
@ -2284,6 +2303,11 @@
|
|||
joko = {
|
||||
email = "ioannis.koutras@gmail.com";
|
||||
github = "jokogr";
|
||||
keys = [{
|
||||
# compare with https://keybase.io/joko
|
||||
longkeyid = "rsa2048/0x85EAE7D9DF56C5CA";
|
||||
fingerprint = "B154 A8F9 0610 DB45 0CA8 CF39 85EA E7D9 DF56 C5CA";
|
||||
}];
|
||||
name = "Ioannis Koutras";
|
||||
};
|
||||
jonafato = {
|
||||
|
@ -2439,6 +2463,11 @@
|
|||
github = "kisonecat";
|
||||
name = "Jim Fowler";
|
||||
};
|
||||
kjuvi = {
|
||||
email = "quentin.vaucher@pm.me";
|
||||
github = "kjuvi";
|
||||
name = "Quentin Vaucher";
|
||||
};
|
||||
kkallio = {
|
||||
email = "tierpluspluslists@gmail.com";
|
||||
name = "Karn Kallio";
|
||||
|
@ -2619,6 +2648,11 @@
|
|||
github = "lihop";
|
||||
name = "Leroy Hopson";
|
||||
};
|
||||
lilyball = {
|
||||
email = "lily@sb.org";
|
||||
github = "lilyball";
|
||||
name = "Lily Ballard";
|
||||
};
|
||||
limeytexan = {
|
||||
email = "limeytexan@gmail.com";
|
||||
github = "limeytexan";
|
||||
|
@ -4303,6 +4337,15 @@
|
|||
github = "sleexyz";
|
||||
name = "Sean Lee";
|
||||
};
|
||||
smakarov = {
|
||||
email = "setser200018@gmail.com";
|
||||
github = "setser";
|
||||
name = "Sergey Makarov";
|
||||
keys = [{
|
||||
longkeyid = "rsa2048/6AA23A1193B7064B";
|
||||
fingerprint = "6F8A 18AE 4101 103F 3C54 24B9 6AA2 3A11 93B7 064B";
|
||||
}];
|
||||
};
|
||||
smaret = {
|
||||
email = "sebastien.maret@icloud.com";
|
||||
github = "smaret";
|
||||
|
@ -4337,6 +4380,15 @@
|
|||
github = "solson";
|
||||
name = "Scott Olson";
|
||||
};
|
||||
sondr3 = {
|
||||
email = "nilsen.sondre@gmail.com";
|
||||
github = "sondr3";
|
||||
name = "Sondre Nilsen";
|
||||
keys = [{
|
||||
longkeyid = "ed25519/0x25676BCBFFAD76B1";
|
||||
fingerprint = "0EC3 FA89 EFBA B421 F82E 40B0 2567 6BCB FFAD 76B1";
|
||||
}];
|
||||
};
|
||||
sorki = {
|
||||
email = "srk@48.io";
|
||||
github = "sorki";
|
||||
|
@ -4876,9 +4928,13 @@
|
|||
name = "Vincent Bernardoff";
|
||||
};
|
||||
vcunat = {
|
||||
email = "vcunat@gmail.com";
|
||||
github = "vcunat";
|
||||
name = "Vladimír Čunát";
|
||||
email = "v@cunat.cz"; # vcunat@gmail.com predominated in commits before 2019/03
|
||||
github = "vcunat";
|
||||
keys = [{
|
||||
longkeyid = "rsa4096/0xE747DF1F9575A3AA";
|
||||
fingerprint = "B600 6460 B60A 80E7 8206 2449 E747 DF1F 9575 A3AA";
|
||||
}];
|
||||
};
|
||||
vdemeester = {
|
||||
email = "vincent@sbr.pm";
|
||||
|
@ -5001,6 +5057,11 @@
|
|||
email = "windenntw@gmail.com";
|
||||
name = "Antonio Vargas Gonzalez";
|
||||
};
|
||||
winpat = {
|
||||
email = "patrickwinter@posteo.ch";
|
||||
github = "winpat";
|
||||
name = "Patrick Winter";
|
||||
};
|
||||
wizeman = {
|
||||
email = "rcorreia@wizy.org";
|
||||
github = "wizeman";
|
||||
|
@ -5027,7 +5088,7 @@
|
|||
name = "Kranium Gikos Mendoza";
|
||||
};
|
||||
worldofpeace = {
|
||||
email = "worldofpeace@users.noreply.github.com";
|
||||
email = "worldofpeace@protonmail.ch";
|
||||
github = "worldofpeace";
|
||||
name = "Worldofpeace";
|
||||
};
|
||||
|
@ -5240,4 +5301,9 @@
|
|||
github = "shmish111";
|
||||
name = "David Smith";
|
||||
};
|
||||
minijackson = {
|
||||
email = "minijackson@riseup.net";
|
||||
github = "minijackson";
|
||||
name = "Rémi Nicole";
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# nix name, luarocks name, server, version/additionnal args
|
||||
ansicolors,
|
||||
argparse,
|
||||
basexx,
|
||||
cqueues
|
||||
dkjson
|
||||
fifo
|
||||
inspect
|
||||
|
@ -18,15 +18,15 @@ lua-term,
|
|||
luabitop,
|
||||
luaevent,
|
||||
luacheck
|
||||
luaffi,http://luarocks.org/dev,
|
||||
luaffi,,http://luarocks.org/dev,
|
||||
luuid,
|
||||
penlight,
|
||||
say,
|
||||
luv,
|
||||
luasystem,
|
||||
mediator_lua,http://luarocks.org/manifests/teto
|
||||
mpack,http://luarocks.org/manifests/teto
|
||||
nvim-client,http://luarocks.org/manifests/teto
|
||||
busted,http://luarocks.org/manifests/teto
|
||||
luassert,http://luarocks.org/manifests/teto
|
||||
coxpcall,https://luarocks.org/manifests/hisham,1.17.0-1
|
||||
mediator_lua,,http://luarocks.org/manifests/teto
|
||||
mpack,,http://luarocks.org/manifests/teto
|
||||
nvim-client,,http://luarocks.org/manifests/teto
|
||||
busted,,http://luarocks.org/manifests/teto
|
||||
luassert,,http://luarocks.org/manifests/teto
|
||||
coxpcall,,https://luarocks.org/manifests/hisham,1.17.0-1
|
||||
|
|
|
|
@ -61,7 +61,7 @@ nixpkgs$ ${0} ${GENERATED_NIXFILE}
|
|||
|
||||
These packages are manually refined in lua-overrides.nix
|
||||
*/
|
||||
{ self, lua, stdenv, fetchurl, fetchgit, pkgs, ... } @ args:
|
||||
{ self, stdenv, fetchurl, fetchgit, pkgs, ... } @ args:
|
||||
self: super:
|
||||
with self;
|
||||
{
|
||||
|
@ -74,17 +74,18 @@ FOOTER="
|
|||
|
||||
|
||||
function convert_pkg () {
|
||||
pkg="$1"
|
||||
nix_pkg_name="$1"
|
||||
lua_pkg_name="$2"
|
||||
server=""
|
||||
if [ ! -z "$2" ]; then
|
||||
server=" --server=$2"
|
||||
if [ ! -z "$3" ]; then
|
||||
server=" --server=$3"
|
||||
fi
|
||||
|
||||
version="${3:-}"
|
||||
|
||||
echo "looking at $pkg (version $version) from server [$server]" >&2
|
||||
cmd="luarocks nix $server $pkg $version"
|
||||
drv="$($cmd)"
|
||||
echo "looking at $lua_pkg_name (version $version) from server [$server]" >&2
|
||||
cmd="luarocks nix $server $lua_pkg_name $version"
|
||||
drv="$nix_pkg_name = $($cmd)"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to convert $pkg" >&2
|
||||
echo "$drv" >&2
|
||||
|
@ -98,12 +99,17 @@ echo "$HEADER" | tee "$TMP_FILE"
|
|||
|
||||
# list of packages with format
|
||||
# name,server,version
|
||||
while IFS=, read -r pkg_name server version
|
||||
while IFS=, read -r nix_pkg_name lua_pkg_name server version
|
||||
do
|
||||
if [ -z "$pkg_name" ]; then
|
||||
echo "Skipping empty package name" >&2
|
||||
if [ "${nix_pkg_name:0:1}" == "#" ]; then
|
||||
echo "Skipping comment ${nix_pkg_name}" >&2
|
||||
continue
|
||||
fi
|
||||
convert_pkg "$pkg_name" "$server" "$version"
|
||||
if [ -z "$lua_pkg_name" ]; then
|
||||
echo "Using nix_name as lua_pkg_name" >&2
|
||||
lua_pkg_name="$nix_pkg_name"
|
||||
fi
|
||||
convert_pkg "$nix_pkg_name" "$lua_pkg_name" "$server" "$version"
|
||||
done < "$CSV_FILE"
|
||||
|
||||
# close the set
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
version="5.0"
|
||||
xml:id="ch-running">
|
||||
<title>Administration</title>
|
||||
<partintro>
|
||||
<partintro xml:id="ch-running-intro">
|
||||
<para>
|
||||
This chapter describes various aspects of managing a running NixOS system,
|
||||
such as how to use the <command>systemd</command> service manager.
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
version="5.0"
|
||||
xml:id="ch-configuration">
|
||||
<title>Configuration</title>
|
||||
<partintro>
|
||||
<partintro xml:id="ch-configuration-intro">
|
||||
<para>
|
||||
This chapter describes how to configure various aspects of a NixOS machine
|
||||
through the configuration file
|
||||
|
@ -23,5 +23,6 @@
|
|||
<xi:include href="linux-kernel.xml" />
|
||||
<xi:include href="../generated/modules.xml" xpointer="xpointer(//section[@id='modules']/*)" />
|
||||
<xi:include href="profiles.xml" />
|
||||
<xi:include href="kubernetes.xml" />
|
||||
<!-- Apache; libvirtd virtualisation -->
|
||||
</part>
|
||||
|
|
|
@ -0,0 +1,127 @@
|
|||
<chapter xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="sec-kubernetes">
|
||||
<title>Kubernetes</title>
|
||||
|
||||
<para>
|
||||
The NixOS Kubernetes module is a collective term for a handful of
|
||||
individual submodules implementing the Kubernetes cluster components.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
There are generally two ways of enabling Kubernetes on NixOS.
|
||||
One way is to enable and configure cluster components appropriately by hand:
|
||||
<programlisting>
|
||||
services.kubernetes = {
|
||||
apiserver.enable = true;
|
||||
controllerManager.enable = true;
|
||||
scheduler.enable = true;
|
||||
addonManager.enable = true;
|
||||
proxy.enable = true;
|
||||
flannel.enable = true;
|
||||
};
|
||||
</programlisting>
|
||||
Another way is to assign cluster roles ("master" and/or "node") to the host.
|
||||
This enables apiserver, controllerManager, scheduler, addonManager,
|
||||
kube-proxy and etcd:
|
||||
<programlisting>
|
||||
<xref linkend="opt-services.kubernetes.roles"/> = [ "master" ];
|
||||
</programlisting>
|
||||
While this will enable the kubelet and kube-proxy only:
|
||||
<programlisting>
|
||||
<xref linkend="opt-services.kubernetes.roles"/> = [ "node" ];
|
||||
</programlisting>
|
||||
Assigning both the master and node roles is usable if you want a single
|
||||
node Kubernetes cluster for dev or testing purposes:
|
||||
<programlisting>
|
||||
<xref linkend="opt-services.kubernetes.roles"/> = [ "master" "node" ];
|
||||
</programlisting>
|
||||
Note: Assigning either role will also default both
|
||||
<xref linkend="opt-services.kubernetes.flannel.enable"/> and
|
||||
<xref linkend="opt-services.kubernetes.easyCerts"/> to true.
|
||||
This sets up flannel as CNI and activates automatic PKI bootstrapping.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
As of kubernetes 1.10.X it has been deprecated to open
|
||||
non-tls-enabled ports on kubernetes components. Thus, from NixOS 19.03 all
|
||||
plain HTTP ports have been disabled by default.
|
||||
While opening insecure ports is still possible, it is recommended not to
|
||||
bind these to other interfaces than loopback.
|
||||
|
||||
To re-enable the insecure port on the apiserver, see options:
|
||||
<xref linkend="opt-services.kubernetes.apiserver.insecurePort"/>
|
||||
and
|
||||
<xref linkend="opt-services.kubernetes.apiserver.insecureBindAddress"/>
|
||||
</para>
|
||||
|
||||
<note>
|
||||
<para>
|
||||
As of NixOS 19.03, it is mandatory to configure:
|
||||
<xref linkend="opt-services.kubernetes.masterAddress"/>.
|
||||
The masterAddress must be resolveable and routeable by all cluster nodes.
|
||||
In single node clusters, this can be set to <literal>localhost</literal>.
|
||||
</para>
|
||||
</note>
|
||||
|
||||
<para>
|
||||
Role-based access control (RBAC) authorization mode is enabled by default.
|
||||
This means that anonymous requests to the apiserver secure port will
|
||||
expectedly cause a permission denied error. All cluster components must
|
||||
therefore be configured with x509 certificates for two-way tls communication.
|
||||
The x509 certificate subject section determines the roles and permissions
|
||||
granted by the apiserver to perform clusterwide or namespaced operations.
|
||||
See also:
|
||||
<link
|
||||
xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/rbac/">
|
||||
Using RBAC Authorization</link>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The NixOS kubernetes module provides an option for automatic certificate
|
||||
bootstrapping and configuration,
|
||||
<xref linkend="opt-services.kubernetes.easyCerts"/>.
|
||||
The PKI bootstrapping process involves setting up a certificate authority
|
||||
(CA) daemon (cfssl) on the kubernetes master node. cfssl generates a CA-cert
|
||||
for the cluster, and uses the CA-cert for signing subordinate certs issued to
|
||||
each of the cluster components. Subsequently, the certmgr daemon monitors
|
||||
active certificates and renews them when needed. For single node Kubernetes
|
||||
clusters, setting <xref linkend="opt-services.kubernetes.easyCerts"/> = true
|
||||
is sufficient and no further action is required. For joining extra node
|
||||
machines to an existing cluster on the other hand, establishing initial trust
|
||||
is mandatory.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To add new nodes to the cluster:
|
||||
On any (non-master) cluster node where
|
||||
<xref linkend="opt-services.kubernetes.easyCerts"/> is enabled, the helper
|
||||
script <literal>nixos-kubernetes-node-join</literal> is available on PATH.
|
||||
Given a token on stdin, it will copy the token to the kubernetes
|
||||
secrets directory and restart the certmgr service. As requested
|
||||
certificates are issued, the script will restart kubernetes cluster
|
||||
components as needed for them to pick up new keypairs.
|
||||
</para>
|
||||
|
||||
<note>
|
||||
<para>
|
||||
Multi-master (HA) clusters are not supported by the easyCerts module.
|
||||
</para>
|
||||
</note>
|
||||
|
||||
<para>
|
||||
In order to interact with an RBAC-enabled cluster as an administrator, one
|
||||
needs to have cluster-admin privileges. By default, when easyCerts is
|
||||
enabled, a cluster-admin kubeconfig file is generated and linked into
|
||||
<literal>/etc/kubernetes/cluster-admin.kubeconfig</literal> as determined by
|
||||
<xref linkend="opt-services.kubernetes.pki.etcClusterAdminKubeconfig"/>.
|
||||
<literal>export KUBECONFIG=/etc/kubernetes/cluster-admin.kubeconfig</literal>
|
||||
will make kubectl use this kubeconfig to access and authenticate the cluster.
|
||||
The cluster-admin kubeconfig references an auto-generated keypair owned by
|
||||
root. Thus, only root on the kubernetes master may obtain cluster-admin
|
||||
rights by means of this file.
|
||||
</para>
|
||||
|
||||
</chapter>
|
|
@ -36,8 +36,25 @@
|
|||
</para>
|
||||
|
||||
<para>
|
||||
If you are using WPA2 the <command>wpa_passphrase</command> tool might be
|
||||
useful to generate the <literal>wpa_supplicant.conf</literal>.
|
||||
If you are using WPA2 you can generate pskRaw key using
|
||||
<command>wpa_passphrase</command>:
|
||||
<screen>
|
||||
$ wpa_passphrase ESSID PSK
|
||||
network={
|
||||
ssid="echelon"
|
||||
#psk="abcdefgh"
|
||||
psk=dca6d6ed41f4ab5a984c9f55f6f66d4efdc720ebf66959810f4329bb391c5435
|
||||
}
|
||||
</screen>
|
||||
<programlisting>
|
||||
<xref linkend="opt-networking.wireless.networks"/> = {
|
||||
echelon = {
|
||||
pskRaw = "dca6d6ed41f4ab5a984c9f55f6f66d4efdc720ebf66959810f4329bb391c5435";
|
||||
};
|
||||
}
|
||||
</programlisting>
|
||||
or you can use it to directly generate the
|
||||
<literal>wpa_supplicant.conf</literal>:
|
||||
<screen>
|
||||
# wpa_passphrase ESSID PSK > /etc/wpa_supplicant.conf</screen>
|
||||
After you have edited the <literal>wpa_supplicant.conf</literal>, you need to
|
||||
|
|
|
@ -268,7 +268,10 @@ in rec {
|
|||
--stringparam id.warnings "1" \
|
||||
--nonet --output $dst/ \
|
||||
${docbook_xsl_ns}/xml/xsl/docbook/xhtml/chunktoc.xsl \
|
||||
${manual-combined}/manual-combined.xml
|
||||
${manual-combined}/manual-combined.xml \
|
||||
|& tee xsltproc.out
|
||||
grep "^ID recommended on" xsltproc.out &>/dev/null && echo "error: some IDs are missing" && false
|
||||
rm xsltproc.out
|
||||
|
||||
mkdir -p $dst/images/callouts
|
||||
cp ${docbook_xsl_ns}/xml/xsl/docbook/images/callouts/*.svg $dst/images/callouts/
|
||||
|
@ -327,6 +330,7 @@ in rec {
|
|||
# Generate manpages.
|
||||
mkdir -p $out/share/man
|
||||
xsltproc --nonet \
|
||||
--maxdepth 6000 \
|
||||
--param man.output.in.separate.dir 1 \
|
||||
--param man.output.base.dir "'$out/share/man/'" \
|
||||
--param man.endnotes.are.numbered 0 \
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
version="5.0"
|
||||
xml:id="ch-development">
|
||||
<title>Development</title>
|
||||
<partintro>
|
||||
<partintro xml:id="ch-development-intro">
|
||||
<para>
|
||||
This chapter describes how you can modify and extend NixOS.
|
||||
</para>
|
||||
|
|
|
@ -60,13 +60,6 @@
|
|||
Make sure a channel is created at http://nixos.org/channels/. </link>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<link xlink:href="https://github.com/NixOS/nixpkgs/settings/branches">
|
||||
Let a GitHub nixpkgs admin lock the branch on github for you. (so
|
||||
developers can’t force push) </link>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<link xlink:href="https://github.com/NixOS/nixpkgs/compare/bdf161ed8d21...6b63c4616790">
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
version="5.0"
|
||||
xml:id="ch-installation">
|
||||
<title>Installation</title>
|
||||
<partintro>
|
||||
<partintro xml:id="ch-installation-intro">
|
||||
<para>
|
||||
This section describes how to obtain, install, and configure NixOS for
|
||||
first-time use.
|
||||
|
|
|
@ -377,6 +377,10 @@
|
|||
option can be set to <literal>true</literal> to automatically add them to
|
||||
the grub menu.
|
||||
</para>
|
||||
<para>
|
||||
If you need to configure networking for your machine the configuration
|
||||
options are described in <xref linkend="sec-networking"/>.
|
||||
</para>
|
||||
<para>
|
||||
Another critical option is <option>fileSystems</option>, specifying the
|
||||
file systems that need to be mounted by NixOS. However, you typically
|
||||
|
|
|
@ -38,6 +38,10 @@
|
|||
<option>dry-activate</option>
|
||||
</arg>
|
||||
|
||||
<arg choice='plain'>
|
||||
<option>edit</option>
|
||||
</arg>
|
||||
|
||||
<arg choice='plain'>
|
||||
<option>build-vm</option>
|
||||
</arg>
|
||||
|
@ -188,6 +192,16 @@ $ nix-build /path/to/nixpkgs/nixos -A system
|
|||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<option>edit</option>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Opens <filename>configuration.nix</filename> in the default editor.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<option>build-vm</option>
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
This section lists the release notes for each stable version of NixOS and
|
||||
current unstable revision.
|
||||
</para>
|
||||
<xi:include href="rl-1909.xml" />
|
||||
<xi:include href="rl-1903.xml" />
|
||||
<xi:include href="rl-1809.xml" />
|
||||
<xi:include href="rl-1803.xml" />
|
||||
|
|
|
@ -55,6 +55,15 @@
|
|||
<para>to <literal>false</literal> and enable your preferred display manager.</para>
|
||||
</note>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
A major refactoring of the Kubernetes module has been completed.
|
||||
Refactorings primarily focus on decoupling components and enhancing
|
||||
security. Two-way TLS and RBAC has been enabled by default for all
|
||||
components, which slightly changes the way the module is configured.
|
||||
See: <xref linkend="sec-kubernetes"/> for details.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
|
||||
|
@ -84,6 +93,35 @@
|
|||
in <literal>nixos/modules/virtualisation/google-compute-config.nix</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>./services/misc/beanstalkd.nix</literal>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
There is a new <varname>services.cockroachdb</varname> module for running
|
||||
CockroachDB databases. NixOS now ships with CockroachDB 2.1.x as well, available
|
||||
on <literal>x86_64-linux</literal> and <literal>aarch64-linux</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>./security/duosec.nix</literal>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <link xlink:href="https://duo.com/docs/duounix">PAM module for Duo
|
||||
Security</link> has been enabled for use. One can configure it using
|
||||
the <option>security.duosec</option> options along with the
|
||||
corresponding PAM option in
|
||||
<option>security.pam.services.<name?>.duoSecurity.enable</option>.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
|
||||
|
@ -144,6 +182,20 @@
|
|||
</listitem>
|
||||
</itemizedlist>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <varname>buildPythonPackage</varname> function now sets <varname>strictDeps = true</varname>
|
||||
to help distinguish between native and non-native dependencies in order to
|
||||
improve cross-compilation compatibility. Note however that this may break
|
||||
user expressions.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <varname>buildPythonPackage</varname> function now sets <varname>LANG = C.UTF-8</varname>
|
||||
to enable Unicode support. The <varname>glibcLocales</varname> package is no longer needed as a build input.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The Syncthing state and configuration data has been moved from
|
||||
|
@ -404,8 +456,8 @@
|
|||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Support for NixOS module system type <literal>types.optionSet</literal> and
|
||||
<literal>lib.mkOption</literal> argument <literal>options</literal> is removed.
|
||||
NixOS module system type <literal>types.optionSet</literal> and
|
||||
<literal>lib.mkOption</literal> argument <literal>options</literal> are deprecated.
|
||||
Use <literal>types.submodule</literal> instead.
|
||||
(<link xlink:href="https://github.com/NixOS/nixpkgs/pull/54637">#54637</link>)
|
||||
</para>
|
||||
|
@ -427,6 +479,11 @@
|
|||
been removed.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>graylog</literal> has been upgraded from version 2.* to 3.*. Some setups making use of extraConfig (especially those exposing Graylog via reverse proxies) need to be updated as upstream removed/replaced some settings. See <link xlink:href="http://docs.graylog.org/en/3.0/pages/upgrade/graylog-3.0.html#simplified-http-interface-configuration">Upgrading Graylog</link> for details.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
|
||||
|
@ -520,7 +577,7 @@
|
|||
but is still possible by setting <literal>zramSwap.swapDevices</literal> explicitly.
|
||||
</para>
|
||||
<para>
|
||||
Default algorithm for ZRAM swap was changed to <literal>zstd</literal>.
|
||||
ZRAM algorithm can be changed now.
|
||||
</para>
|
||||
<para>
|
||||
Changes to ZRAM algorithm are applied during <literal>nixos-rebuild switch</literal>,
|
||||
|
@ -564,6 +621,75 @@
|
|||
provisioning.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The use of insecure ports on kubernetes has been deprecated.
|
||||
Thus options:
|
||||
<varname>services.kubernetes.apiserver.port</varname> and
|
||||
<varname>services.kubernetes.controllerManager.port</varname>
|
||||
has been renamed to <varname>.insecurePort</varname>,
|
||||
and default of both options has changed to 0 (disabled).
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Note that the default value of
|
||||
<varname>services.kubernetes.apiserver.bindAddress</varname>
|
||||
has changed from 127.0.0.1 to 0.0.0.0, allowing the apiserver to be
|
||||
accessible from outside the master node itself.
|
||||
If the apiserver insecurePort is enabled,
|
||||
it is strongly recommended to only bind on the loopback interface. See:
|
||||
<varname>services.kubernetes.apiserver.insecurebindAddress</varname>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The option <varname>services.kubernetes.apiserver.allowPrivileged</varname>
|
||||
and <varname>services.kubernetes.kubelet.allowPrivileged</varname> now
|
||||
defaults to false. Disallowing privileged containers on the cluster.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The kubernetes module does no longer add the kubernetes package to
|
||||
<varname>environment.systemPackages</varname> implicitly.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>intel</literal> driver has been removed from the default list of
|
||||
<link linkend="opt-services.xserver.videoDrivers">X.org video drivers</link>.
|
||||
The <literal>modesetting</literal> driver should take over automatically,
|
||||
it is better maintained upstream and has less problems with advanced X11 features.
|
||||
This can lead to a change in the output names used by <literal>xrandr</literal>.
|
||||
Some performance regressions on some GPU models might happen.
|
||||
Some OpenCL and VA-API applications might also break
|
||||
(Beignet seems to provide OpenCL support with
|
||||
<literal>modesetting</literal> driver, too).
|
||||
Kernel mode setting API does not support backlight control,
|
||||
so <literal>xbacklight</literal> tool will not work;
|
||||
backlight level can be controlled directly via <literal>/sys/</literal>
|
||||
or with <literal>brightnessctl</literal>.
|
||||
Users who need this functionality more than multi-output XRandR are advised
|
||||
to add `intel` to `videoDrivers` and report an issue (or provide additional
|
||||
details in an existing one)
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Openmpi has been updated to version 4.0.0, which removes some deprecated MPI-1 symbols.
|
||||
This may break some older applications that still rely on those symbols.
|
||||
An upgrade guide can be found <link xlink:href="https://www.open-mpi.org/faq/?category=mpi-removed">here</link>.
|
||||
</para>
|
||||
<para>
|
||||
The nginx package now relies on OpenSSL 1.1 and supports TLS 1.3 by default. You can set the protocols used by the nginx service using <xref linkend="opt-services.nginx.sslProtocols"/>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
A new subcommand <command>nixos-rebuild edit</command> was added.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
</section>
|
||||
|
|
|
@ -0,0 +1,68 @@
|
|||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="sec-release-19.09">
|
||||
<title>Release 19.09 (“Loris”, 2019/09/??)</title>
|
||||
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="sec-release-19.09-highlights">
|
||||
<title>Highlights</title>
|
||||
|
||||
<para>
|
||||
In addition to numerous new and upgraded packages, this release has the
|
||||
following highlights:
|
||||
</para>
|
||||
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para />
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="sec-release-19.09-new-services">
|
||||
<title>New Services</title>
|
||||
|
||||
<para>
|
||||
The following new services were added since the last release:
|
||||
</para>
|
||||
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para />
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="sec-release-19.09-notable-changes">
|
||||
<title>Other Notable Changes</title>
|
||||
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
The <option>documentation</option> module gained an option named
|
||||
<option>documentation.nixos.includeAllModules</option> which makes the generated
|
||||
<citerefentry><refentrytitle>configuration.nix</refentrytitle>
|
||||
<manvolnum>5</manvolnum></citerefentry> manual page include all options from all NixOS modules
|
||||
included in a given <literal>configuration.nix</literal> configuration file. Currently, it is
|
||||
set to <literal>false</literal> by default as enabling it frequently prevents evaluation. But
|
||||
the plan is to eventually have it set to <literal>true</literal> by default. Please set it to
|
||||
<literal>true</literal> now in your <literal>configuration.nix</literal> and fix all the bugs
|
||||
it uncovers.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
</section>
|
|
@ -51,7 +51,7 @@ in rec {
|
|||
# system configuration.
|
||||
inherit (lib.evalModules {
|
||||
inherit prefix check;
|
||||
modules = modules ++ extraModules ++ baseModules ++ [ pkgsModule ];
|
||||
modules = baseModules ++ extraModules ++ [ pkgsModule ] ++ modules;
|
||||
args = extraArgs;
|
||||
specialArgs =
|
||||
{ modulesPath = builtins.toString ../modules; } // specialArgs;
|
||||
|
@ -60,7 +60,7 @@ in rec {
|
|||
# These are the extra arguments passed to every module. In
|
||||
# particular, Nixpkgs is passed through the "pkgs" argument.
|
||||
extraArgs = extraArgs_ // {
|
||||
inherit modules baseModules;
|
||||
inherit baseModules extraModules modules;
|
||||
};
|
||||
|
||||
inherit (config._module.args) pkgs;
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
# nix-build '<nixpkgs/nixos>' -A config.system.build.cloudstackImage --arg configuration "{ imports = [ ./nixos/maintainers/scripts/cloudstack/cloudstack-image.nix ]; }"
|
||||
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
{
|
||||
imports =
|
||||
[ ../../../modules/virtualisation/cloudstack-config.nix ];
|
||||
|
||||
system.build.cloudstackImage = import ../../../lib/make-disk-image.nix {
|
||||
inherit lib config pkgs;
|
||||
diskSize = 8192;
|
||||
format = "qcow2";
|
||||
configFile = pkgs.writeText "configuration.nix"
|
||||
''
|
||||
{
|
||||
imports = [ <nixpkgs/nixos/modules/virtualisation/cloudstack-config.nix> ];
|
||||
}
|
||||
'';
|
||||
};
|
||||
|
||||
}
|
|
@ -55,7 +55,9 @@ let
|
|||
localConf = pkgs.writeText "fc-local.conf" cfg.localConf;
|
||||
|
||||
# The configuration to be included in /etc/font/
|
||||
penultimateConf = pkgs.runCommand "font-penultimate-conf" {} ''
|
||||
penultimateConf = pkgs.runCommand "font-penultimate-conf" {
|
||||
preferLocalBuild = true;
|
||||
} ''
|
||||
support_folder=$out/etc/fonts/conf.d
|
||||
latest_folder=$out/etc/fonts/${latestVersion}/conf.d
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ let cfg = config.fonts.fontconfig.ultimate;
|
|||
latestVersion = pkgs.fontconfig.configVersion;
|
||||
|
||||
# The configuration to be included in /etc/font/
|
||||
confPkg = pkgs.runCommand "font-ultimate-conf" {} ''
|
||||
confPkg = pkgs.runCommand "font-ultimate-conf" { preferLocalBuild = true; } ''
|
||||
support_folder=$out/etc/fonts/conf.d
|
||||
latest_folder=$out/etc/fonts/${latestVersion}/conf.d
|
||||
|
||||
|
|
|
@ -190,7 +190,7 @@ let cfg = config.fonts.fontconfig;
|
|||
'';
|
||||
|
||||
# fontconfig configuration package
|
||||
confPkg = pkgs.runCommand "fontconfig-conf" {} ''
|
||||
confPkg = pkgs.runCommand "fontconfig-conf" { preferLocalBuild = true; } ''
|
||||
support_folder=$out/etc/fonts
|
||||
latest_folder=$out/etc/fonts/${latestVersion}
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@ with lib;
|
|||
|
||||
let
|
||||
|
||||
x11Fonts = pkgs.runCommand "X11-fonts" { } ''
|
||||
x11Fonts = pkgs.runCommand "X11-fonts" { preferLocalBuild = true; } ''
|
||||
mkdir -p "$out/share/X11-fonts"
|
||||
find ${toString config.fonts.fonts} \
|
||||
\( -name fonts.dir -o -name '*.ttf' -o -name '*.otf' \) \
|
||||
|
|
|
@ -34,7 +34,7 @@ with lib;
|
|||
networkmanager-openvpn = super.networkmanager-openvpn.override { withGnome = false; };
|
||||
networkmanager-vpnc = super.networkmanager-vpnc.override { withGnome = false; };
|
||||
networkmanager-iodine = super.networkmanager-iodine.override { withGnome = false; };
|
||||
pinentry = super.pinentry_ncurses;
|
||||
pinentry = super.pinentry.override { gtk2 = null; qt = null; };
|
||||
gobject-introspection = super.gobject-introspection.override { x11Support = false; };
|
||||
}));
|
||||
};
|
||||
|
|
|
@ -61,6 +61,15 @@ in {
|
|||
};
|
||||
};
|
||||
|
||||
system.nssHosts = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
example = [ "mdns" ];
|
||||
description = ''
|
||||
List of host entries to configure in <filename>/etc/nsswitch.conf</filename>.
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
config = {
|
||||
|
@ -85,7 +94,7 @@ in {
|
|||
group: ${concatStringsSep " " passwdArray}
|
||||
shadow: ${concatStringsSep " " shadowArray}
|
||||
|
||||
hosts: ${concatStringsSep " " hostArray}
|
||||
hosts: ${concatStringsSep " " config.system.nssHosts}
|
||||
networks: files
|
||||
|
||||
ethers: files
|
||||
|
@ -94,6 +103,8 @@ in {
|
|||
rpc: files
|
||||
'';
|
||||
|
||||
system.nssHosts = hostArray;
|
||||
|
||||
# Systemd provides nss-myhostname to ensure that our hostname
|
||||
# always resolves to a valid IP address. It returns all locally
|
||||
# configured IP addresses, or ::1 and 127.0.0.2 as
|
||||
|
|
|
@ -91,13 +91,13 @@ in
|
|||
};
|
||||
|
||||
algorithm = mkOption {
|
||||
default = "zstd";
|
||||
example = "lzo";
|
||||
default = "lzo";
|
||||
example = "lz4";
|
||||
type = with types; either (enum [ "lzo" "lz4" "zstd" ]) str;
|
||||
description = ''
|
||||
Compression algorithm. <literal>lzo</literal> has good compression,
|
||||
but is slow. <literal>lz4</literal> has bad compression, but is fast.
|
||||
<literal>zstd</literal> is both good compression and fast.
|
||||
<literal>zstd</literal> is both good compression and fast, but requires newer kernel.
|
||||
You can check what other algorithms are supported by your zram device with
|
||||
<programlisting>cat /sys/class/block/zram*/comp_algorithm</programlisting>
|
||||
'';
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.hardware.acpilight;
|
||||
in
|
||||
{
|
||||
options = {
|
||||
hardware.acpilight = {
|
||||
enable = mkOption {
|
||||
default = false;
|
||||
type = types.bool;
|
||||
description = ''
|
||||
Enable acpilight.
|
||||
This will allow brightness control via xbacklight from users in the video group
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
services.udev.packages = with pkgs; [ acpilight ];
|
||||
};
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.hardware.ledger;
|
||||
|
||||
in {
|
||||
options.hardware.ledger.enable = mkEnableOption "udev rules for Ledger devices";
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
services.udev.packages = [ pkgs.ledger-udev-rules ];
|
||||
};
|
||||
}
|
|
@ -172,6 +172,11 @@ in
|
|||
environment.systemPackages = [ nvidia_x11.bin nvidia_x11.settings ]
|
||||
++ lib.filter (p: p != null) [ nvidia_x11.persistenced ];
|
||||
|
||||
systemd.tmpfiles.rules = optional config.virtualisation.docker.enableNvidia
|
||||
"L+ /run/nvidia-docker/bin - - - - ${nvidia_x11.bin}/origBin"
|
||||
++ optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
|
||||
"L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
|
||||
|
||||
boot.extraModulePackages = [ nvidia_x11.bin ];
|
||||
|
||||
# nvidia-uvm is required by CUDA applications.
|
||||
|
|
|
@ -29,6 +29,7 @@ runCommand "uvcdynctrl-udev-rules-${version}"
|
|||
];
|
||||
dontPatchELF = true;
|
||||
dontStrip = true;
|
||||
preferLocalBuild = true;
|
||||
}
|
||||
''
|
||||
mkdir -p "$out/lib/udev"
|
||||
|
|
|
@ -13,7 +13,7 @@ let
|
|||
# user, as expected by nixos-rebuild/nixos-install. FIXME: merge
|
||||
# with make-channel.nix.
|
||||
channelSources = pkgs.runCommand "nixos-${config.system.nixos.version}"
|
||||
{ }
|
||||
{ preferLocalBuild = true; }
|
||||
''
|
||||
mkdir -p $out
|
||||
cp -prd ${nixpkgs.outPath} $out/nixos
|
||||
|
|
|
@ -31,6 +31,10 @@ with lib;
|
|||
# there is no power management backend such as upower).
|
||||
powerManagement.enable = true;
|
||||
|
||||
# Enable sound in graphical iso's.
|
||||
hardware.pulseaudio.enable = true;
|
||||
hardware.pulseaudio.systemWide = true; # Needed since we run plasma as root.
|
||||
|
||||
environment.systemPackages = [
|
||||
# Include gparted for partitioning disks.
|
||||
pkgs.gparted
|
||||
|
|
|
@ -138,7 +138,18 @@ fi
|
|||
# Ask the user to set a root password, but only if the passwd command
|
||||
# exists (i.e. when mutable user accounts are enabled).
|
||||
if [[ -z $noRootPasswd ]] && [ -t 0 ]; then
|
||||
nixos-enter --root "$mountPoint" -c '[[ -e /nix/var/nix/profiles/system/sw/bin/passwd ]] && echo "setting root password..." && /nix/var/nix/profiles/system/sw/bin/passwd'
|
||||
if nixos-enter --root "$mountPoint" -c 'test -e /nix/var/nix/profiles/system/sw/bin/passwd'; then
|
||||
set +e
|
||||
nixos-enter --root "$mountPoint" -c 'echo "setting root password..." && /nix/var/nix/profiles/system/sw/bin/passwd'
|
||||
exit_code=$?
|
||||
set -e
|
||||
|
||||
if [[ $exit_code != 0 ]]; then
|
||||
echo "Setting a root password failed with the above printed error."
|
||||
echo "You can set the root password manually by executing \`nixos-enter --root ${mountPoint@Q}\` and then running \`passwd\` in the shell of the new system."
|
||||
exit $exit_code
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "installation finished!"
|
||||
|
|
|
@ -29,7 +29,7 @@ while [ "$#" -gt 0 ]; do
|
|||
--help)
|
||||
showSyntax
|
||||
;;
|
||||
switch|boot|test|build|dry-build|dry-run|dry-activate|build-vm|build-vm-with-bootloader)
|
||||
switch|boot|test|build|edit|dry-build|dry-run|dry-activate|build-vm|build-vm-with-bootloader)
|
||||
if [ "$i" = dry-run ]; then i=dry-build; fi
|
||||
action="$i"
|
||||
;;
|
||||
|
@ -227,6 +227,13 @@ if [ -z "$_NIXOS_REBUILD_REEXEC" -a -n "$canRun" -a -z "$fast" ]; then
|
|||
fi
|
||||
fi
|
||||
|
||||
# Find configuration.nix and open editor instead of building.
|
||||
if [ "$action" = edit ]; then
|
||||
NIXOS_CONFIG=${NIXOS_CONFIG:-$(nix-instantiate --find-file nixos-config)}
|
||||
exec "${EDITOR:-nano}" "$NIXOS_CONFIG"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
tmpDir=$(mktemp -t -d nixos-rebuild.XXXXXX)
|
||||
SSHOPTS="$NIX_SSHOPTS -o ControlMaster=auto -o ControlPath=$tmpDir/ssh-%n -o ControlPersist=60"
|
||||
|
@ -260,6 +267,14 @@ if [ -n "$rollback" -o "$action" = dry-build ]; then
|
|||
buildNix=
|
||||
fi
|
||||
|
||||
nixSystem() {
|
||||
machine="$(uname -m)"
|
||||
if [[ "$machine" =~ i.86 ]]; then
|
||||
machine=i686
|
||||
fi
|
||||
echo $machine-linux
|
||||
}
|
||||
|
||||
prebuiltNix() {
|
||||
machine="$1"
|
||||
if [ "$machine" = x86_64 ]; then
|
||||
|
@ -279,7 +294,9 @@ if [ -n "$buildNix" ]; then
|
|||
nixDrv=
|
||||
if ! nixDrv="$(nix-instantiate '<nixpkgs/nixos>' --add-root $tmpDir/nix.drv --indirect -A config.nix.package.out "${extraBuildFlags[@]}")"; then
|
||||
if ! nixDrv="$(nix-instantiate '<nixpkgs>' --add-root $tmpDir/nix.drv --indirect -A nix "${extraBuildFlags[@]}")"; then
|
||||
nixStorePath="$(prebuiltNix "$(uname -m)")"
|
||||
if ! nixStorePath="$(nix-instantiate --eval '<nixpkgs/nixos/modules/installer/tools/nix-fallback-paths.nix>' -A $(nixSystem) | sed -e 's/^"//' -e 's/"$//')"; then
|
||||
nixStorePath="$(prebuiltNix "$(uname -m)")"
|
||||
fi
|
||||
if ! nix-store -r $nixStorePath --add-root $tmpDir/nix --indirect \
|
||||
--option extra-binary-caches https://cache.nixos.org/; then
|
||||
echo "warning: don't know how to get latest Nix" >&2
|
||||
|
|
|
@ -57,7 +57,5 @@ with lib;
|
|||
|
||||
# Enable the OpenSSH daemon.
|
||||
# services.openssh.enable = true;
|
||||
|
||||
system.stateVersion = mkDefault "18.03";
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
{ config, lib, pkgs, baseModules, ... }:
|
||||
{ config, lib, pkgs, baseModules, extraModules, modules, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
|
@ -6,6 +6,8 @@ let
|
|||
|
||||
cfg = config.documentation;
|
||||
|
||||
manualModules = baseModules ++ optionals cfg.nixos.includeAllModules (extraModules ++ modules);
|
||||
|
||||
/* For the purpose of generating docs, evaluate options with each derivation
|
||||
in `pkgs` (recursively) replaced by a fake with path "\${pkgs.attribute.path}".
|
||||
It isn't perfect, but it seems to cover a vast majority of use cases.
|
||||
|
@ -18,7 +20,7 @@ let
|
|||
options =
|
||||
let
|
||||
scrubbedEval = evalModules {
|
||||
modules = [ { nixpkgs.localSystem = config.nixpkgs.localSystem; } ] ++ baseModules;
|
||||
modules = [ { nixpkgs.localSystem = config.nixpkgs.localSystem; } ] ++ manualModules;
|
||||
args = (config._module.args) // { modules = [ ]; };
|
||||
specialArgs = { pkgs = scrubDerivations "pkgs" pkgs; };
|
||||
};
|
||||
|
@ -146,6 +148,17 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
nixos.includeAllModules = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether the generated NixOS's documentation should include documentation for all
|
||||
the options from all the NixOS modules included in the current
|
||||
<literal>configuration.nix</literal>. Disabling this will make the manual
|
||||
generator to ignore options defined outside of <literal>baseModules</literal>.
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
|
|
@ -272,7 +272,7 @@
|
|||
nzbget = 245;
|
||||
mosquitto = 246;
|
||||
toxvpn = 247;
|
||||
squeezelite = 248;
|
||||
# squeezelite = 248; # DynamicUser = true
|
||||
turnserver = 249;
|
||||
smokeping = 250;
|
||||
gocd-agent = 251;
|
||||
|
|
|
@ -44,6 +44,7 @@
|
|||
./hardware/digitalbitbox.nix
|
||||
./hardware/sensor/iio.nix
|
||||
./hardware/ksm.nix
|
||||
./hardware/ledger.nix
|
||||
./hardware/mcelog.nix
|
||||
./hardware/network/b43.nix
|
||||
./hardware/nitrokey.nix
|
||||
|
@ -181,6 +182,7 @@
|
|||
./services/audio/mpd.nix
|
||||
./services/audio/mopidy.nix
|
||||
./services/audio/slimserver.nix
|
||||
./services/audio/snapserver.nix
|
||||
./services/audio/squeezelite.nix
|
||||
./services/audio/ympd.nix
|
||||
./services/backup/bacula.nix
|
||||
|
@ -188,6 +190,7 @@
|
|||
./services/backup/duplicati.nix
|
||||
./services/backup/crashplan.nix
|
||||
./services/backup/crashplan-small-business.nix
|
||||
./services/backup/duplicity.nix
|
||||
./services/backup/mysql-backup.nix
|
||||
./services/backup/postgresql-backup.nix
|
||||
./services/backup/restic.nix
|
||||
|
@ -196,9 +199,17 @@
|
|||
./services/backup/tarsnap.nix
|
||||
./services/backup/znapzend.nix
|
||||
./services/cluster/hadoop/default.nix
|
||||
./services/cluster/kubernetes/addons/dns.nix
|
||||
./services/cluster/kubernetes/addons/dashboard.nix
|
||||
./services/cluster/kubernetes/addon-manager.nix
|
||||
./services/cluster/kubernetes/apiserver.nix
|
||||
./services/cluster/kubernetes/controller-manager.nix
|
||||
./services/cluster/kubernetes/default.nix
|
||||
./services/cluster/kubernetes/dns.nix
|
||||
./services/cluster/kubernetes/dashboard.nix
|
||||
./services/cluster/kubernetes/flannel.nix
|
||||
./services/cluster/kubernetes/kubelet.nix
|
||||
./services/cluster/kubernetes/pki.nix
|
||||
./services/cluster/kubernetes/proxy.nix
|
||||
./services/cluster/kubernetes/scheduler.nix
|
||||
./services/computing/boinc/client.nix
|
||||
./services/computing/torque/server.nix
|
||||
./services/computing/torque/mom.nix
|
||||
|
@ -259,6 +270,7 @@
|
|||
./services/desktops/gnome3/gnome-online-accounts.nix
|
||||
./services/desktops/gnome3/gnome-remote-desktop.nix
|
||||
./services/desktops/gnome3/gnome-online-miners.nix
|
||||
./services/desktops/gnome3/gnome-settings-daemon.nix
|
||||
./services/desktops/gnome3/gnome-terminal-server.nix
|
||||
./services/desktops/gnome3/gnome-user-share.nix
|
||||
./services/desktops/gnome3/gpaste.nix
|
||||
|
@ -284,6 +296,7 @@
|
|||
./services/hardware/acpid.nix
|
||||
./services/hardware/actkbd.nix
|
||||
./services/hardware/bluetooth.nix
|
||||
./services/hardware/bolt.nix
|
||||
./services/hardware/brltty.nix
|
||||
./services/hardware/freefall.nix
|
||||
./services/hardware/fwupd.nix
|
||||
|
@ -327,6 +340,7 @@
|
|||
./services/logging/syslog-ng.nix
|
||||
./services/logging/syslogd.nix
|
||||
./services/mail/clamsmtp.nix
|
||||
./services/mail/davmail.nix
|
||||
./services/mail/dkimproxy-out.nix
|
||||
./services/mail/dovecot.nix
|
||||
./services/mail/dspam.nix
|
||||
|
@ -352,6 +366,7 @@
|
|||
./services/misc/apache-kafka.nix
|
||||
./services/misc/autofs.nix
|
||||
./services/misc/autorandr.nix
|
||||
./services/misc/beanstalkd.nix
|
||||
./services/misc/bees.nix
|
||||
./services/misc/bepasty.nix
|
||||
./services/misc/canto-daemon.nix
|
||||
|
@ -413,7 +428,7 @@
|
|||
./services/misc/parsoid.nix
|
||||
./services/misc/phd.nix
|
||||
./services/misc/plex.nix
|
||||
./services/misc/plexpy.nix
|
||||
./services/misc/tautulli.nix
|
||||
./services/misc/pykms.nix
|
||||
./services/misc/radarr.nix
|
||||
./services/misc/redmine.nix
|
||||
|
@ -517,6 +532,7 @@
|
|||
./services/networking/cntlm.nix
|
||||
./services/networking/connman.nix
|
||||
./services/networking/consul.nix
|
||||
./services/networking/coredns.nix
|
||||
./services/networking/coturn.nix
|
||||
./services/networking/dante.nix
|
||||
./services/networking/ddclient.nix
|
||||
|
|
|
@ -14,5 +14,9 @@
|
|||
libinput.enable = true; # for touchpad support on many laptops
|
||||
};
|
||||
|
||||
# Enable sound in virtualbox appliances.
|
||||
hardware.pulseaudio.enable = true;
|
||||
hardware.pulseaudio.systemWide = true; # Needed since we run plasma as root.
|
||||
|
||||
environment.systemPackages = [ pkgs.glxinfo pkgs.firefox ];
|
||||
}
|
||||
|
|
|
@ -102,7 +102,7 @@ in
|
|||
# Emacs term mode doesn't support xterm title escape sequence (\e]0;)
|
||||
PS1="\n\[\033[$PROMPT_COLOR\][\u@\h:\w]\\$\[\033[0m\] "
|
||||
else
|
||||
PS1="\n\[\033[$PROMPT_COLOR\][\[\e]0;\u@\h: \w\a\]\u@\h:\w]\$\[\033[0m\] "
|
||||
PS1="\n\[\033[$PROMPT_COLOR\][\[\e]0;\u@\h: \w\a\]\u@\h:\w]\\$\[\033[0m\] "
|
||||
fi
|
||||
if test "$TERM" = "xterm"; then
|
||||
PS1="\[\033]2;\h:\u:\w\007\]$PS1"
|
||||
|
|
|
@ -169,6 +169,59 @@ in
|
|||
end
|
||||
'';
|
||||
|
||||
programs.fish.interactiveShellInit = ''
|
||||
# add completions generated by NixOS to $fish_complete_path
|
||||
begin
|
||||
# joins with null byte to acommodate all characters in paths, then respectively gets all paths before (exclusive) / after (inclusive) the first one including "generated_completions",
|
||||
# splits by null byte, and then removes all empty lines produced by using 'string'
|
||||
set -l prev (string join0 $fish_complete_path | string match --regex "^.*?(?=\x00[^\x00]*generated_completions.*)" | string split0 | string match -er ".")
|
||||
set -l post (string join0 $fish_complete_path | string match --regex "[^\x00]*generated_completions.*" | string split0 | string match -er ".")
|
||||
set fish_complete_path $prev "/etc/fish/generated_completions" $post
|
||||
end
|
||||
'';
|
||||
|
||||
environment.etc."fish/generated_completions".source =
|
||||
let
|
||||
patchedGenerator = pkgs.stdenv.mkDerivation {
|
||||
name = "fish_patched-completion-generator";
|
||||
srcs = [
|
||||
"${pkgs.fish}/share/fish/tools/create_manpage_completions.py"
|
||||
"${pkgs.fish}/share/fish/tools/deroff.py"
|
||||
];
|
||||
unpackCmd = "cp $curSrc $(basename $curSrc)";
|
||||
sourceRoot = ".";
|
||||
patches = [ ./fish_completion-generator.patch ]; # to prevent collisions of identical completion files
|
||||
dontBuild = true;
|
||||
installPhase = ''
|
||||
mkdir -p $out
|
||||
cp * $out/
|
||||
'';
|
||||
preferLocalBuild = true;
|
||||
allowSubstitutes = false;
|
||||
};
|
||||
generateCompletions = package: pkgs.runCommand
|
||||
"${package.name}_fish-completions"
|
||||
(
|
||||
{
|
||||
inherit package;
|
||||
preferLocalBuild = true;
|
||||
allowSubstitutes = false;
|
||||
}
|
||||
// optionalAttrs (package ? meta.priority) { meta.priority = package.meta.priority; }
|
||||
)
|
||||
''
|
||||
mkdir -p $out
|
||||
if [ -d $package/share/man ]; then
|
||||
find $package/share/man -type f | xargs ${pkgs.python3.interpreter} ${patchedGenerator}/create_manpage_completions.py --directory $out >/dev/null
|
||||
fi
|
||||
'';
|
||||
in
|
||||
pkgs.buildEnv {
|
||||
name = "system_fish-completions";
|
||||
ignoreCollisions = true;
|
||||
paths = map generateCompletions config.environment.systemPackages;
|
||||
};
|
||||
|
||||
# include programs that bring their own completions
|
||||
environment.pathsToLink = []
|
||||
++ optional cfg.vendor.config.enable "/share/fish/vendor_conf.d"
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
--- a/create_manpage_completions.py
|
||||
+++ b/create_manpage_completions.py
|
||||
@@ -776,8 +776,6 @@ def parse_manpage_at_path(manpage_path, output_directory):
|
||||
|
||||
built_command_output.insert(0, "# " + CMDNAME)
|
||||
|
||||
- # Output the magic word Autogenerated so we can tell if we can overwrite this
|
||||
- built_command_output.insert(1, "# Autogenerated from man page " + manpage_path)
|
||||
# built_command_output.insert(2, "# using " + parser.__class__.__name__) # XXX MISATTRIBUTES THE CULPABILE PARSER! Was really using Type2 but reporting TypeDeroffManParser
|
||||
|
||||
for line in built_command_output:
|
|
@ -85,11 +85,13 @@ in
|
|||
# SSH agent protocol doesn't support changing TTYs, so bind the agent
|
||||
# to every new TTY.
|
||||
${pkgs.gnupg}/bin/gpg-connect-agent --quiet updatestartuptty /bye > /dev/null
|
||||
'');
|
||||
|
||||
environment.extraInit = mkIf cfg.agent.enableSSHSupport ''
|
||||
if [ -z "$SSH_AUTH_SOCK" ]; then
|
||||
export SSH_AUTH_SOCK=$(${pkgs.gnupg}/bin/gpgconf --list-dirs agent-ssh-socket)
|
||||
fi
|
||||
'');
|
||||
'';
|
||||
|
||||
assertions = [
|
||||
{ assertion = cfg.agent.enableSSHSupport -> !config.programs.ssh.startAgent;
|
||||
|
|
|
@ -25,7 +25,7 @@ let
|
|||
'';
|
||||
|
||||
lessKey = pkgs.runCommand "lesskey"
|
||||
{ src = pkgs.writeText "lessconfig" configText; }
|
||||
{ src = pkgs.writeText "lessconfig" configText; preferLocalBuild = true; }
|
||||
"${pkgs.less}/bin/lesskey -o $out $src";
|
||||
|
||||
in
|
||||
|
|
|
@ -40,9 +40,19 @@ with lib;
|
|||
(mkRenamedOptionModule [ "services" "kibana" "host" ] [ "services" "kibana" "listenAddress" ])
|
||||
(mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "admissionControl" ] [ "services" "kubernetes" "apiserver" "enableAdmissionPlugins" ])
|
||||
(mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "address" ] ["services" "kubernetes" "apiserver" "bindAddress"])
|
||||
(mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "port" ] ["services" "kubernetes" "apiserver" "insecurePort"])
|
||||
(mkRemovedOptionModule [ "services" "kubernetes" "apiserver" "publicAddress" ] "")
|
||||
(mkRenamedOptionModule [ "services" "kubernetes" "addons" "dashboard" "enableRBAC" ] [ "services" "kubernetes" "addons" "dashboard" "rbac" "enable" ])
|
||||
(mkRenamedOptionModule [ "services" "kubernetes" "controllerManager" "address" ] ["services" "kubernetes" "controllerManager" "bindAddress"])
|
||||
(mkRenamedOptionModule [ "services" "kubernetes" "controllerManager" "port" ] ["services" "kubernetes" "controllerManager" "insecurePort"])
|
||||
(mkRenamedOptionModule [ "services" "kubernetes" "etcd" "servers" ] [ "services" "kubernetes" "apiserver" "etcd" "servers" ])
|
||||
(mkRenamedOptionModule [ "services" "kubernetes" "etcd" "keyFile" ] [ "services" "kubernetes" "apiserver" "etcd" "keyFile" ])
|
||||
(mkRenamedOptionModule [ "services" "kubernetes" "etcd" "certFile" ] [ "services" "kubernetes" "apiserver" "etcd" "certFile" ])
|
||||
(mkRenamedOptionModule [ "services" "kubernetes" "etcd" "caFile" ] [ "services" "kubernetes" "apiserver" "etcd" "caFile" ])
|
||||
(mkRemovedOptionModule [ "services" "kubernetes" "kubelet" "applyManifests" ] "")
|
||||
(mkRemovedOptionModule [ "services" "kubernetes" "kubelet" "cadvisorPort" ] "")
|
||||
(mkRenamedOptionModule [ "services" "kubernetes" "proxy" "address" ] ["services" "kubernetes" "proxy" "bindAddress"])
|
||||
(mkRemovedOptionModule [ "services" "kubernetes" "verbose" ] "")
|
||||
(mkRenamedOptionModule [ "services" "logstash" "address" ] [ "services" "logstash" "listenAddress" ])
|
||||
(mkRenamedOptionModule [ "services" "mpd" "network" "host" ] [ "services" "mpd" "network" "listenAddress" ])
|
||||
(mkRenamedOptionModule [ "services" "neo4j" "host" ] [ "services" "neo4j" "defaultListenAddress" ])
|
||||
|
@ -176,6 +186,9 @@ with lib;
|
|||
# parsoid
|
||||
(mkRemovedOptionModule [ "services" "parsoid" "interwikis" ] [ "services" "parsoid" "wikis" ])
|
||||
|
||||
# plexpy / tautulli
|
||||
(mkRenamedOptionModule [ "services" "plexpy" ] [ "services" "tautulli" ])
|
||||
|
||||
# piwik was renamed to matomo
|
||||
(mkRenamedOptionModule [ "services" "piwik" "enable" ] [ "services" "matomo" "enable" ])
|
||||
(mkRenamedOptionModule [ "services" "piwik" "webServerUser" ] [ "services" "matomo" "webServerUser" ])
|
||||
|
|
|
@ -14,6 +14,7 @@ let
|
|||
{ files =
|
||||
cfg.certificateFiles ++
|
||||
[ (builtins.toFile "extra.crt" (concatStringsSep "\n" cfg.certificates)) ];
|
||||
preferLocalBuild = true;
|
||||
}
|
||||
''
|
||||
cat $files > $out
|
||||
|
|
|
@ -7,7 +7,7 @@ let
|
|||
|
||||
boolToStr = b: if b then "yes" else "no";
|
||||
|
||||
configFile = ''
|
||||
configFilePam = ''
|
||||
[duo]
|
||||
ikey=${cfg.ikey}
|
||||
skey=${cfg.skey}
|
||||
|
@ -16,21 +16,24 @@ let
|
|||
failmode=${cfg.failmode}
|
||||
pushinfo=${boolToStr cfg.pushinfo}
|
||||
autopush=${boolToStr cfg.autopush}
|
||||
motd=${boolToStr cfg.motd}
|
||||
prompts=${toString cfg.prompts}
|
||||
accept_env_factor=${boolToStr cfg.acceptEnvFactor}
|
||||
fallback_local_ip=${boolToStr cfg.fallbackLocalIP}
|
||||
'';
|
||||
|
||||
configFileLogin = configFilePam + ''
|
||||
motd=${boolToStr cfg.motd}
|
||||
accept_env_factor=${boolToStr cfg.acceptEnvFactor}
|
||||
'';
|
||||
|
||||
loginCfgFile = optional cfg.ssh.enable
|
||||
{ source = pkgs.writeText "login_duo.conf" configFile;
|
||||
{ source = pkgs.writeText "login_duo.conf" configFileLogin;
|
||||
mode = "0600";
|
||||
user = "sshd";
|
||||
target = "duo/login_duo.conf";
|
||||
};
|
||||
|
||||
pamCfgFile = optional cfg.pam.enable
|
||||
{ source = pkgs.writeText "pam_duo.conf" configFile;
|
||||
{ source = pkgs.writeText "pam_duo.conf" configFilePam;
|
||||
mode = "0600";
|
||||
user = "sshd";
|
||||
target = "duo/pam_duo.conf";
|
||||
|
@ -180,12 +183,6 @@ in
|
|||
};
|
||||
|
||||
config = mkIf (cfg.ssh.enable || cfg.pam.enable) {
|
||||
assertions =
|
||||
[ { assertion = !cfg.pam.enable;
|
||||
message = "PAM support is currently not implemented.";
|
||||
}
|
||||
];
|
||||
|
||||
environment.systemPackages = [ pkgs.duo-unix ];
|
||||
|
||||
security.wrappers.login_duo.source = "${pkgs.duo-unix.out}/bin/login_duo";
|
||||
|
|
|
@ -131,6 +131,18 @@ let
|
|||
'';
|
||||
};
|
||||
|
||||
duoSecurity = {
|
||||
enable = mkOption {
|
||||
default = false;
|
||||
type = types.bool;
|
||||
description = ''
|
||||
If set, use the Duo Security pam module
|
||||
<literal>pam_duo</literal> for authentication. Requires
|
||||
configuration of <option>security.duosec</option> options.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
startSession = mkOption {
|
||||
default = false;
|
||||
type = types.bool;
|
||||
|
@ -340,7 +352,8 @@ let
|
|||
|| cfg.pamMount
|
||||
|| cfg.enableKwallet
|
||||
|| cfg.enableGnomeKeyring
|
||||
|| cfg.googleAuthenticator.enable)) ''
|
||||
|| cfg.googleAuthenticator.enable
|
||||
|| cfg.duoSecurity.enable)) ''
|
||||
auth required pam_unix.so ${optionalString cfg.allowNullPassword "nullok"} likeauth
|
||||
${optionalString config.security.pam.enableEcryptfs
|
||||
"auth optional ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so unwrap"}
|
||||
|
@ -350,9 +363,11 @@ let
|
|||
("auth optional ${pkgs.plasma5.kwallet-pam}/lib/security/pam_kwallet5.so" +
|
||||
" kwalletd=${pkgs.libsForQt5.kwallet.bin}/bin/kwalletd5")}
|
||||
${optionalString cfg.enableGnomeKeyring
|
||||
("auth optional ${pkgs.gnome3.gnome-keyring}/lib/security/pam_gnome_keyring.so")}
|
||||
"auth optional ${pkgs.gnome3.gnome-keyring}/lib/security/pam_gnome_keyring.so"}
|
||||
${optionalString cfg.googleAuthenticator.enable
|
||||
"auth required ${pkgs.googleAuthenticator}/lib/security/pam_google_authenticator.so no_increment_hotp"}
|
||||
"auth required ${pkgs.googleAuthenticator}/lib/security/pam_google_authenticator.so no_increment_hotp"}
|
||||
${optionalString cfg.duoSecurity.enable
|
||||
"auth required ${pkgs.duo-unix}/lib/security/pam_duo.so"}
|
||||
'') + ''
|
||||
${optionalString cfg.unixAuth
|
||||
"auth sufficient pam_unix.so ${optionalString cfg.allowNullPassword "nullok"} likeauth try_first_pass"}
|
||||
|
|
|
@ -215,7 +215,10 @@ in
|
|||
environment.etc = singleton
|
||||
{ source =
|
||||
pkgs.runCommand "sudoers"
|
||||
{ src = pkgs.writeText "sudoers-in" cfg.configFile; }
|
||||
{
|
||||
src = pkgs.writeText "sudoers-in" cfg.configFile;
|
||||
preferLocalBuild = true;
|
||||
}
|
||||
# Make sure that the sudoers file is syntactically valid.
|
||||
# (currently disabled - NIXOS-66)
|
||||
"${pkgs.buildPackages.sudo}/sbin/visudo -f $src -c && cp $src $out";
|
||||
|
|
|
@ -0,0 +1,217 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
|
||||
package = "snapcast";
|
||||
name = "snapserver";
|
||||
|
||||
cfg = config.services.snapserver;
|
||||
|
||||
# Using types.nullOr to inherit upstream defaults.
|
||||
sampleFormat = mkOption {
|
||||
type = with types; nullOr str;
|
||||
default = null;
|
||||
description = ''
|
||||
Default sample format.
|
||||
'';
|
||||
example = "48000:16:2";
|
||||
};
|
||||
|
||||
codec = mkOption {
|
||||
type = with types; nullOr str;
|
||||
default = null;
|
||||
description = ''
|
||||
Default audio compression method.
|
||||
'';
|
||||
example = "flac";
|
||||
};
|
||||
|
||||
streamToOption = name: opt:
|
||||
let
|
||||
os = val:
|
||||
optionalString (val != null) "${val}";
|
||||
os' = prefixx: val:
|
||||
optionalString (val != null) (prefixx + "${val}");
|
||||
flatten = key: value:
|
||||
"&${key}=${value}";
|
||||
in
|
||||
"-s ${opt.type}://" + os opt.location + "?" + os' "name=" name
|
||||
+ concatStrings (mapAttrsToList flatten opt.query);
|
||||
|
||||
optionalNull = val: ret:
|
||||
optional (val != null) ret;
|
||||
|
||||
optionString = concatStringsSep " " (mapAttrsToList streamToOption cfg.streams
|
||||
++ ["-p ${toString cfg.port}"]
|
||||
++ ["--controlPort ${toString cfg.controlPort}"]
|
||||
++ optionalNull cfg.sampleFormat "--sampleFormat ${cfg.sampleFormat}"
|
||||
++ optionalNull cfg.codec "-c ${cfg.codec}"
|
||||
++ optionalNull cfg.streamBuffer "--streamBuffer ${cfg.streamBuffer}"
|
||||
++ optionalNull cfg.buffer "-b ${cfg.buffer}"
|
||||
++ optional cfg.sendToMuted "--sendToMuted");
|
||||
|
||||
in {
|
||||
|
||||
###### interface
|
||||
|
||||
options = {
|
||||
|
||||
services.snapserver = {
|
||||
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to enable snapserver.
|
||||
'';
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 1704;
|
||||
description = ''
|
||||
The port that snapclients can connect to.
|
||||
'';
|
||||
};
|
||||
|
||||
controlPort = mkOption {
|
||||
type = types.port;
|
||||
default = 1705;
|
||||
description = ''
|
||||
The port for control connections (JSON-RPC).
|
||||
'';
|
||||
};
|
||||
|
||||
openFirewall = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Whether to automatically open the specified ports in the firewall.
|
||||
'';
|
||||
};
|
||||
|
||||
inherit sampleFormat;
|
||||
inherit codec;
|
||||
|
||||
streams = mkOption {
|
||||
type = with types; attrsOf (submodule {
|
||||
options = {
|
||||
location = mkOption {
|
||||
type = types.path;
|
||||
description = ''
|
||||
The location of the pipe.
|
||||
'';
|
||||
};
|
||||
type = mkOption {
|
||||
type = types.enum [ "pipe" "file" "process" "spotify" "airplay" ];
|
||||
default = "pipe";
|
||||
description = ''
|
||||
The type of input stream.
|
||||
'';
|
||||
};
|
||||
query = mkOption {
|
||||
type = attrsOf str;
|
||||
default = {};
|
||||
description = ''
|
||||
Key-value pairs that convey additional parameters about a stream.
|
||||
'';
|
||||
example = literalExample ''
|
||||
# for type == "pipe":
|
||||
{
|
||||
mode = "listen";
|
||||
};
|
||||
# for type == "process":
|
||||
{
|
||||
params = "--param1 --param2";
|
||||
logStderr = "true";
|
||||
};
|
||||
'';
|
||||
};
|
||||
inherit sampleFormat;
|
||||
inherit codec;
|
||||
};
|
||||
});
|
||||
default = { default = {}; };
|
||||
description = ''
|
||||
The definition for an input source.
|
||||
'';
|
||||
example = literalExample ''
|
||||
{
|
||||
mpd = {
|
||||
type = "pipe";
|
||||
location = "/run/snapserver/mpd";
|
||||
sampleFormat = "48000:16:2";
|
||||
codec = "pcm";
|
||||
};
|
||||
};
|
||||
'';
|
||||
};
|
||||
|
||||
streamBuffer = mkOption {
|
||||
type = with types; nullOr int;
|
||||
default = null;
|
||||
description = ''
|
||||
Stream read (input) buffer in ms.
|
||||
'';
|
||||
example = 20;
|
||||
};
|
||||
|
||||
buffer = mkOption {
|
||||
type = with types; nullOr int;
|
||||
default = null;
|
||||
description = ''
|
||||
Network buffer in ms.
|
||||
'';
|
||||
example = 1000;
|
||||
};
|
||||
|
||||
sendToMuted = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Send audio to muted clients.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
|
||||
###### implementation
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
systemd.services.snapserver = {
|
||||
after = [ "network.target" ];
|
||||
description = "Snapserver";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
before = [ "mpd.service" "mopidy.service" ];
|
||||
|
||||
serviceConfig = {
|
||||
DynamicUser = true;
|
||||
ExecStart = "${pkgs.snapcast}/bin/snapserver --daemon ${optionString}";
|
||||
Type = "forking";
|
||||
LimitRTPRIO = 50;
|
||||
LimitRTTIME = "infinity";
|
||||
NoNewPrivileges = true;
|
||||
PIDFile = "/run/${name}/pid";
|
||||
ProtectKernelTunables = true;
|
||||
ProtectControlGroups = true;
|
||||
ProtectKernelModules = true;
|
||||
RestrictAddressFamilies = "AF_INET AF_INET6 AF_UNIX";
|
||||
RestrictNamespaces = true;
|
||||
RuntimeDirectory = name;
|
||||
StateDirectory = name;
|
||||
};
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = optionals cfg.openFirewall [ cfg.port cfg.controlPort ];
|
||||
};
|
||||
|
||||
meta = {
|
||||
maintainers = with maintainers; [ tobim ];
|
||||
};
|
||||
|
||||
}
|
|
@ -3,8 +3,7 @@
|
|||
with lib;
|
||||
|
||||
let
|
||||
|
||||
uid = config.ids.uids.squeezelite;
|
||||
dataDir = "/var/lib/squeezelite";
|
||||
cfg = config.services.squeezelite;
|
||||
|
||||
in {
|
||||
|
@ -17,14 +16,6 @@ in {
|
|||
|
||||
enable = mkEnableOption "Squeezelite, a software Squeezebox emulator";
|
||||
|
||||
dataDir = mkOption {
|
||||
default = "/var/lib/squeezelite";
|
||||
type = types.str;
|
||||
description = ''
|
||||
The directory where Squeezelite stores its name file.
|
||||
'';
|
||||
};
|
||||
|
||||
extraArguments = mkOption {
|
||||
default = "";
|
||||
type = types.str;
|
||||
|
@ -46,22 +37,14 @@ in {
|
|||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" "sound.target" ];
|
||||
description = "Software Squeezebox emulator";
|
||||
preStart = "mkdir -p ${cfg.dataDir} && chown -R squeezelite ${cfg.dataDir}";
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.squeezelite}/bin/squeezelite -N ${cfg.dataDir}/player-name ${cfg.extraArguments}";
|
||||
User = "squeezelite";
|
||||
PermissionsStartOnly = true;
|
||||
DynamicUser = true;
|
||||
ExecStart = "${pkgs.squeezelite}/bin/squeezelite -N ${dataDir}/player-name ${cfg.extraArguments}";
|
||||
StateDirectory = builtins.baseNameOf dataDir;
|
||||
SupplementaryGroups = "audio";
|
||||
};
|
||||
};
|
||||
|
||||
users.users.squeezelite= {
|
||||
inherit uid;
|
||||
group = "nogroup";
|
||||
extraGroups = [ "audio" ];
|
||||
description = "Squeezelite user";
|
||||
home = "${cfg.dataDir}";
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,141 @@
|
|||
{ config, lib, pkgs, ...}:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.duplicity;
|
||||
|
||||
stateDirectory = "/var/lib/duplicity";
|
||||
|
||||
localTarget = if hasPrefix "file://" cfg.targetUrl
|
||||
then removePrefix "file://" cfg.targetUrl else null;
|
||||
|
||||
in {
|
||||
options.services.duplicity = {
|
||||
enable = mkEnableOption "backups with duplicity";
|
||||
|
||||
root = mkOption {
|
||||
type = types.path;
|
||||
default = "/";
|
||||
description = ''
|
||||
Root directory to backup.
|
||||
'';
|
||||
};
|
||||
|
||||
include = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
example = [ "/home" ];
|
||||
description = ''
|
||||
List of paths to include into the backups. See the FILE SELECTION
|
||||
section in <citerefentry><refentrytitle>duplicity</refentrytitle>
|
||||
<manvolnum>1</manvolnum></citerefentry> for details on the syntax.
|
||||
'';
|
||||
};
|
||||
|
||||
exclude = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
description = ''
|
||||
List of paths to exclude from backups. See the FILE SELECTION section in
|
||||
<citerefentry><refentrytitle>duplicity</refentrytitle>
|
||||
<manvolnum>1</manvolnum></citerefentry> for details on the syntax.
|
||||
'';
|
||||
};
|
||||
|
||||
targetUrl = mkOption {
|
||||
type = types.str;
|
||||
example = "s3://host:port/prefix";
|
||||
description = ''
|
||||
Target url to backup to. See the URL FORMAT section in
|
||||
<citerefentry><refentrytitle>duplicity</refentrytitle>
|
||||
<manvolnum>1</manvolnum></citerefentry> for supported urls.
|
||||
'';
|
||||
};
|
||||
|
||||
secretFile = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
default = null;
|
||||
description = ''
|
||||
Path of a file containing secrets (gpg passphrase, access key...) in
|
||||
the format of EnvironmentFile as described by
|
||||
<citerefentry><refentrytitle>systemd.exec</refentrytitle>
|
||||
<manvolnum>5</manvolnum></citerefentry>. For example:
|
||||
<programlisting>
|
||||
PASSPHRASE=<replaceable>...</replaceable>
|
||||
AWS_ACCESS_KEY_ID=<replaceable>...</replaceable>
|
||||
AWS_SECRET_ACCESS_KEY=<replaceable>...</replaceable>
|
||||
</programlisting>
|
||||
'';
|
||||
};
|
||||
|
||||
frequency = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = "daily";
|
||||
description = ''
|
||||
Run duplicity with the given frequency (see
|
||||
<citerefentry><refentrytitle>systemd.time</refentrytitle>
|
||||
<manvolnum>7</manvolnum></citerefentry> for the format).
|
||||
If null, do not run automatically.
|
||||
'';
|
||||
};
|
||||
|
||||
extraFlags = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
example = [ "--full-if-older-than" "1M" ];
|
||||
description = ''
|
||||
Extra command-line flags passed to duplicity. See
|
||||
<citerefentry><refentrytitle>duplicity</refentrytitle>
|
||||
<manvolnum>1</manvolnum></citerefentry>.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
systemd = {
|
||||
services.duplicity = {
|
||||
description = "backup files with duplicity";
|
||||
|
||||
environment.HOME = stateDirectory;
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.duplicity}/bin/duplicity ${escapeShellArgs (
|
||||
[
|
||||
cfg.root
|
||||
cfg.targetUrl
|
||||
"--archive-dir" stateDirectory
|
||||
]
|
||||
++ concatMap (p: [ "--include" p ]) cfg.include
|
||||
++ concatMap (p: [ "--exclude" p ]) cfg.exclude
|
||||
++ cfg.extraFlags)}
|
||||
'';
|
||||
PrivateTmp = true;
|
||||
ProtectSystem = "strict";
|
||||
ProtectHome = "read-only";
|
||||
StateDirectory = baseNameOf stateDirectory;
|
||||
} // optionalAttrs (localTarget != null) {
|
||||
ReadWritePaths = localTarget;
|
||||
} // optionalAttrs (cfg.secretFile != null) {
|
||||
EnvironmentFile = cfg.secretFile;
|
||||
};
|
||||
} // optionalAttrs (cfg.frequency != null) {
|
||||
startAt = cfg.frequency;
|
||||
};
|
||||
|
||||
tmpfiles.rules = optional (localTarget != null) "d ${localTarget} 0700 root root -";
|
||||
};
|
||||
|
||||
assertions = singleton {
|
||||
# Duplicity will fail if the last file selection option is an include. It
|
||||
# is not always possible to detect but this simple case can be caught.
|
||||
assertion = cfg.include != [] -> cfg.exclude != [] || cfg.extraFlags != [];
|
||||
message = ''
|
||||
Duplicity will fail if you only specify included paths ("Because the
|
||||
default is to include all files, the expression is redundant. Exiting
|
||||
because this probably isn't what you meant.")
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,167 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
top = config.services.kubernetes;
|
||||
cfg = top.addonManager;
|
||||
|
||||
isRBACEnabled = elem "RBAC" top.apiserver.authorizationMode;
|
||||
|
||||
addons = pkgs.runCommand "kubernetes-addons" { } ''
|
||||
mkdir -p $out
|
||||
# since we are mounting the addons to the addon manager, they need to be copied
|
||||
${concatMapStringsSep ";" (a: "cp -v ${a}/* $out/") (mapAttrsToList (name: addon:
|
||||
pkgs.writeTextDir "${name}.json" (builtins.toJSON addon)
|
||||
) (cfg.addons))}
|
||||
'';
|
||||
in
|
||||
{
|
||||
###### interface
|
||||
options.services.kubernetes.addonManager = with lib.types; {
|
||||
|
||||
bootstrapAddons = mkOption {
|
||||
description = ''
|
||||
Bootstrap addons are like regular addons, but they are applied with cluster-admin rigths.
|
||||
They are applied at addon-manager startup only.
|
||||
'';
|
||||
default = { };
|
||||
type = attrsOf attrs;
|
||||
example = literalExample ''
|
||||
{
|
||||
"my-service" = {
|
||||
"apiVersion" = "v1";
|
||||
"kind" = "Service";
|
||||
"metadata" = {
|
||||
"name" = "my-service";
|
||||
"namespace" = "default";
|
||||
};
|
||||
"spec" = { ... };
|
||||
};
|
||||
}
|
||||
'';
|
||||
};
|
||||
|
||||
addons = mkOption {
|
||||
description = "Kubernetes addons (any kind of Kubernetes resource can be an addon).";
|
||||
default = { };
|
||||
type = attrsOf (either attrs (listOf attrs));
|
||||
example = literalExample ''
|
||||
{
|
||||
"my-service" = {
|
||||
"apiVersion" = "v1";
|
||||
"kind" = "Service";
|
||||
"metadata" = {
|
||||
"name" = "my-service";
|
||||
"namespace" = "default";
|
||||
};
|
||||
"spec" = { ... };
|
||||
};
|
||||
}
|
||||
// import <nixpkgs/nixos/modules/services/cluster/kubernetes/dashboard.nix> { cfg = config.services.kubernetes; };
|
||||
'';
|
||||
};
|
||||
|
||||
enable = mkEnableOption "Whether to enable Kubernetes addon manager.";
|
||||
};
|
||||
|
||||
###### implementation
|
||||
config = mkIf cfg.enable {
|
||||
environment.etc."kubernetes/addons".source = "${addons}/";
|
||||
|
||||
systemd.services.kube-addon-manager = {
|
||||
description = "Kubernetes addon manager";
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
after = [ "kube-apiserver.service" ];
|
||||
environment.ADDON_PATH = "/etc/kubernetes/addons/";
|
||||
path = [ pkgs.gawk ];
|
||||
serviceConfig = {
|
||||
Slice = "kubernetes.slice";
|
||||
ExecStart = "${top.package}/bin/kube-addons";
|
||||
WorkingDirectory = top.dataDir;
|
||||
User = "kubernetes";
|
||||
Group = "kubernetes";
|
||||
Restart = "on-failure";
|
||||
RestartSec = 10;
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled
|
||||
(let
|
||||
name = system:kube-addon-manager;
|
||||
namespace = "kube-system";
|
||||
in
|
||||
{
|
||||
|
||||
kube-addon-manager-r = {
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "Role";
|
||||
metadata = {
|
||||
inherit name namespace;
|
||||
};
|
||||
rules = [{
|
||||
apiGroups = ["*"];
|
||||
resources = ["*"];
|
||||
verbs = ["*"];
|
||||
}];
|
||||
};
|
||||
|
||||
kube-addon-manager-rb = {
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "RoleBinding";
|
||||
metadata = {
|
||||
inherit name namespace;
|
||||
};
|
||||
roleRef = {
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
kind = "Role";
|
||||
inherit name;
|
||||
};
|
||||
subjects = [{
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
kind = "User";
|
||||
inherit name;
|
||||
}];
|
||||
};
|
||||
|
||||
kube-addon-manager-cluster-lister-cr = {
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "ClusterRole";
|
||||
metadata = {
|
||||
name = "${name}:cluster-lister";
|
||||
};
|
||||
rules = [{
|
||||
apiGroups = ["*"];
|
||||
resources = ["*"];
|
||||
verbs = ["list"];
|
||||
}];
|
||||
};
|
||||
|
||||
kube-addon-manager-cluster-lister-crb = {
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "ClusterRoleBinding";
|
||||
metadata = {
|
||||
name = "${name}:cluster-lister";
|
||||
};
|
||||
roleRef = {
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
kind = "ClusterRole";
|
||||
name = "${name}:cluster-lister";
|
||||
};
|
||||
subjects = [{
|
||||
kind = "User";
|
||||
inherit name;
|
||||
}];
|
||||
};
|
||||
});
|
||||
|
||||
services.kubernetes.pki.certs = {
|
||||
addonManager = top.lib.mkCert {
|
||||
name = "kube-addon-manager";
|
||||
CN = "system:kube-addon-manager";
|
||||
action = "systemctl restart kube-addon-manager.service";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
}
|
|
@ -38,6 +38,18 @@ in {
|
|||
type = types.int;
|
||||
};
|
||||
|
||||
reconcileMode = mkOption {
|
||||
description = ''
|
||||
Controls the addon manager reconciliation mode for the DNS addon.
|
||||
|
||||
Setting reconcile mode to EnsureExists makes it possible to tailor DNS behavior by editing the coredns ConfigMap.
|
||||
|
||||
See: <link xlink:href="https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/addon-manager/README.md"/>.
|
||||
'';
|
||||
default = "Reconcile";
|
||||
type = types.enum [ "Reconcile" "EnsureExists" ];
|
||||
};
|
||||
|
||||
coredns = mkOption {
|
||||
description = "Docker image to seed for the CoreDNS container.";
|
||||
type = types.attrs;
|
||||
|
@ -54,21 +66,7 @@ in {
|
|||
services.kubernetes.kubelet.seedDockerImages =
|
||||
singleton (pkgs.dockerTools.pullImage cfg.coredns);
|
||||
|
||||
services.kubernetes.addonManager.addons = {
|
||||
coredns-sa = {
|
||||
apiVersion = "v1";
|
||||
kind = "ServiceAccount";
|
||||
metadata = {
|
||||
labels = {
|
||||
"addonmanager.kubernetes.io/mode" = "Reconcile";
|
||||
"k8s-app" = "kube-dns";
|
||||
"kubernetes.io/cluster-service" = "true";
|
||||
};
|
||||
name = "coredns";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes.addonManager.bootstrapAddons = {
|
||||
coredns-cr = {
|
||||
apiVersion = "rbac.authorization.k8s.io/v1beta1";
|
||||
kind = "ClusterRole";
|
||||
|
@ -123,13 +121,29 @@ in {
|
|||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes.addonManager.addons = {
|
||||
coredns-sa = {
|
||||
apiVersion = "v1";
|
||||
kind = "ServiceAccount";
|
||||
metadata = {
|
||||
labels = {
|
||||
"addonmanager.kubernetes.io/mode" = "Reconcile";
|
||||
"k8s-app" = "kube-dns";
|
||||
"kubernetes.io/cluster-service" = "true";
|
||||
};
|
||||
name = "coredns";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
};
|
||||
|
||||
coredns-cm = {
|
||||
apiVersion = "v1";
|
||||
kind = "ConfigMap";
|
||||
metadata = {
|
||||
labels = {
|
||||
"addonmanager.kubernetes.io/mode" = "Reconcile";
|
||||
"addonmanager.kubernetes.io/mode" = cfg.reconcileMode;
|
||||
"k8s-app" = "kube-dns";
|
||||
"kubernetes.io/cluster-service" = "true";
|
||||
};
|
||||
|
@ -160,7 +174,7 @@ in {
|
|||
kind = "Deployment";
|
||||
metadata = {
|
||||
labels = {
|
||||
"addonmanager.kubernetes.io/mode" = "Reconcile";
|
||||
"addonmanager.kubernetes.io/mode" = cfg.reconcileMode;
|
||||
"k8s-app" = "kube-dns";
|
||||
"kubernetes.io/cluster-service" = "true";
|
||||
"kubernetes.io/name" = "CoreDNS";
|
|
@ -0,0 +1,428 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
top = config.services.kubernetes;
|
||||
cfg = top.apiserver;
|
||||
|
||||
isRBACEnabled = elem "RBAC" cfg.authorizationMode;
|
||||
|
||||
apiserverServiceIP = (concatStringsSep "." (
|
||||
take 3 (splitString "." cfg.serviceClusterIpRange
|
||||
)) + ".1");
|
||||
in
|
||||
{
|
||||
###### interface
|
||||
options.services.kubernetes.apiserver = with lib.types; {
|
||||
|
||||
advertiseAddress = mkOption {
|
||||
description = ''
|
||||
Kubernetes apiserver IP address on which to advertise the apiserver
|
||||
to members of the cluster. This address must be reachable by the rest
|
||||
of the cluster.
|
||||
'';
|
||||
default = null;
|
||||
type = nullOr str;
|
||||
};
|
||||
|
||||
allowPrivileged = mkOption {
|
||||
description = "Whether to allow privileged containers on Kubernetes.";
|
||||
default = false;
|
||||
type = bool;
|
||||
};
|
||||
|
||||
authorizationMode = mkOption {
|
||||
description = ''
|
||||
Kubernetes apiserver authorization mode (AlwaysAllow/AlwaysDeny/ABAC/Webhook/RBAC/Node). See
|
||||
<link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authorization/"/>
|
||||
'';
|
||||
default = ["RBAC" "Node"]; # Enabling RBAC by default, although kubernetes default is AllowAllow
|
||||
type = listOf (enum ["AlwaysAllow" "AlwaysDeny" "ABAC" "Webhook" "RBAC" "Node"]);
|
||||
};
|
||||
|
||||
authorizationPolicy = mkOption {
|
||||
description = ''
|
||||
Kubernetes apiserver authorization policy file. See
|
||||
<link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authorization/"/>
|
||||
'';
|
||||
default = [];
|
||||
type = listOf attrs;
|
||||
};
|
||||
|
||||
basicAuthFile = mkOption {
|
||||
description = ''
|
||||
Kubernetes apiserver basic authentication file. See
|
||||
<link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authentication"/>
|
||||
'';
|
||||
default = null;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
bindAddress = mkOption {
|
||||
description = ''
|
||||
The IP address on which to listen for the --secure-port port.
|
||||
The associated interface(s) must be reachable by the rest
|
||||
of the cluster, and by CLI/web clients.
|
||||
'';
|
||||
default = "0.0.0.0";
|
||||
type = str;
|
||||
};
|
||||
|
||||
clientCaFile = mkOption {
|
||||
description = "Kubernetes apiserver CA file for client auth.";
|
||||
default = top.caFile;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
disableAdmissionPlugins = mkOption {
|
||||
description = ''
|
||||
Kubernetes admission control plugins to disable. See
|
||||
<link xlink:href="https://kubernetes.io/docs/admin/admission-controllers/"/>
|
||||
'';
|
||||
default = [];
|
||||
type = listOf str;
|
||||
};
|
||||
|
||||
enable = mkEnableOption "Kubernetes apiserver";
|
||||
|
||||
enableAdmissionPlugins = mkOption {
|
||||
description = ''
|
||||
Kubernetes admission control plugins to enable. See
|
||||
<link xlink:href="https://kubernetes.io/docs/admin/admission-controllers/"/>
|
||||
'';
|
||||
default = [
|
||||
"NamespaceLifecycle" "LimitRanger" "ServiceAccount"
|
||||
"ResourceQuota" "DefaultStorageClass" "DefaultTolerationSeconds"
|
||||
"NodeRestriction"
|
||||
];
|
||||
example = [
|
||||
"NamespaceLifecycle" "NamespaceExists" "LimitRanger"
|
||||
"SecurityContextDeny" "ServiceAccount" "ResourceQuota"
|
||||
"PodSecurityPolicy" "NodeRestriction" "DefaultStorageClass"
|
||||
];
|
||||
type = listOf str;
|
||||
};
|
||||
|
||||
etcd = {
|
||||
servers = mkOption {
|
||||
description = "List of etcd servers.";
|
||||
default = ["http://127.0.0.1:2379"];
|
||||
type = types.listOf types.str;
|
||||
};
|
||||
|
||||
keyFile = mkOption {
|
||||
description = "Etcd key file.";
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
|
||||
certFile = mkOption {
|
||||
description = "Etcd cert file.";
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
|
||||
caFile = mkOption {
|
||||
description = "Etcd ca file.";
|
||||
default = top.caFile;
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
};
|
||||
|
||||
extraOpts = mkOption {
|
||||
description = "Kubernetes apiserver extra command line options.";
|
||||
default = "";
|
||||
type = str;
|
||||
};
|
||||
|
||||
extraSANs = mkOption {
|
||||
description = "Extra x509 Subject Alternative Names to be added to the kubernetes apiserver tls cert.";
|
||||
default = [];
|
||||
type = listOf str;
|
||||
};
|
||||
|
||||
featureGates = mkOption {
|
||||
description = "List set of feature gates";
|
||||
default = top.featureGates;
|
||||
type = listOf str;
|
||||
};
|
||||
|
||||
insecureBindAddress = mkOption {
|
||||
description = "The IP address on which to serve the --insecure-port.";
|
||||
default = "127.0.0.1";
|
||||
type = str;
|
||||
};
|
||||
|
||||
insecurePort = mkOption {
|
||||
description = "Kubernetes apiserver insecure listening port. (0 = disabled)";
|
||||
default = 0;
|
||||
type = int;
|
||||
};
|
||||
|
||||
kubeletClientCaFile = mkOption {
|
||||
description = "Path to a cert file for connecting to kubelet.";
|
||||
default = top.caFile;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
kubeletClientCertFile = mkOption {
|
||||
description = "Client certificate to use for connections to kubelet.";
|
||||
default = null;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
kubeletClientKeyFile = mkOption {
|
||||
description = "Key to use for connections to kubelet.";
|
||||
default = null;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
kubeletHttps = mkOption {
|
||||
description = "Whether to use https for connections to kubelet.";
|
||||
default = true;
|
||||
type = bool;
|
||||
};
|
||||
|
||||
runtimeConfig = mkOption {
|
||||
description = ''
|
||||
Api runtime configuration. See
|
||||
<link xlink:href="https://kubernetes.io/docs/tasks/administer-cluster/cluster-management/"/>
|
||||
'';
|
||||
default = "authentication.k8s.io/v1beta1=true";
|
||||
example = "api/all=false,api/v1=true";
|
||||
type = str;
|
||||
};
|
||||
|
||||
storageBackend = mkOption {
|
||||
description = ''
|
||||
Kubernetes apiserver storage backend.
|
||||
'';
|
||||
default = "etcd3";
|
||||
type = enum ["etcd2" "etcd3"];
|
||||
};
|
||||
|
||||
securePort = mkOption {
|
||||
description = "Kubernetes apiserver secure port.";
|
||||
default = 6443;
|
||||
type = int;
|
||||
};
|
||||
|
||||
serviceAccountKeyFile = mkOption {
|
||||
description = ''
|
||||
Kubernetes apiserver PEM-encoded x509 RSA private or public key file,
|
||||
used to verify ServiceAccount tokens. By default tls private key file
|
||||
is used.
|
||||
'';
|
||||
default = null;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
serviceClusterIpRange = mkOption {
|
||||
description = ''
|
||||
A CIDR notation IP range from which to assign service cluster IPs.
|
||||
This must not overlap with any IP ranges assigned to nodes for pods.
|
||||
'';
|
||||
default = "10.0.0.0/24";
|
||||
type = str;
|
||||
};
|
||||
|
||||
tlsCertFile = mkOption {
|
||||
description = "Kubernetes apiserver certificate file.";
|
||||
default = null;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
tlsKeyFile = mkOption {
|
||||
description = "Kubernetes apiserver private key file.";
|
||||
default = null;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
tokenAuthFile = mkOption {
|
||||
description = ''
|
||||
Kubernetes apiserver token authentication file. See
|
||||
<link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authentication"/>
|
||||
'';
|
||||
default = null;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
verbosity = mkOption {
|
||||
description = ''
|
||||
Optional glog verbosity level for logging statements. See
|
||||
<link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
|
||||
'';
|
||||
default = null;
|
||||
type = nullOr int;
|
||||
};
|
||||
|
||||
webhookConfig = mkOption {
|
||||
description = ''
|
||||
Kubernetes apiserver Webhook config file. It uses the kubeconfig file format.
|
||||
See <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/webhook/"/>
|
||||
'';
|
||||
default = null;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
|
||||
###### implementation
|
||||
config = mkMerge [
|
||||
|
||||
(mkIf cfg.enable {
|
||||
systemd.services.kube-apiserver = {
|
||||
description = "Kubernetes APIServer Service";
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
after = [ "network.target" ];
|
||||
serviceConfig = {
|
||||
Slice = "kubernetes.slice";
|
||||
ExecStart = ''${top.package}/bin/kube-apiserver \
|
||||
--allow-privileged=${boolToString cfg.allowPrivileged} \
|
||||
--authorization-mode=${concatStringsSep "," cfg.authorizationMode} \
|
||||
${optionalString (elem "ABAC" cfg.authorizationMode)
|
||||
"--authorization-policy-file=${
|
||||
pkgs.writeText "kube-auth-policy.jsonl"
|
||||
(concatMapStringsSep "\n" (l: builtins.toJSON l) cfg.authorizationPolicy)
|
||||
}"
|
||||
} \
|
||||
${optionalString (elem "Webhook" cfg.authorizationMode)
|
||||
"--authorization-webhook-config-file=${cfg.webhookConfig}"
|
||||
} \
|
||||
--bind-address=${cfg.bindAddress} \
|
||||
${optionalString (cfg.advertiseAddress != null)
|
||||
"--advertise-address=${cfg.advertiseAddress}"} \
|
||||
${optionalString (cfg.clientCaFile != null)
|
||||
"--client-ca-file=${cfg.clientCaFile}"} \
|
||||
--disable-admission-plugins=${concatStringsSep "," cfg.disableAdmissionPlugins} \
|
||||
--enable-admission-plugins=${concatStringsSep "," cfg.enableAdmissionPlugins} \
|
||||
--etcd-servers=${concatStringsSep "," cfg.etcd.servers} \
|
||||
${optionalString (cfg.etcd.caFile != null)
|
||||
"--etcd-cafile=${cfg.etcd.caFile}"} \
|
||||
${optionalString (cfg.etcd.certFile != null)
|
||||
"--etcd-certfile=${cfg.etcd.certFile}"} \
|
||||
${optionalString (cfg.etcd.keyFile != null)
|
||||
"--etcd-keyfile=${cfg.etcd.keyFile}"} \
|
||||
${optionalString (cfg.featureGates != [])
|
||||
"--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
|
||||
${optionalString (cfg.basicAuthFile != null)
|
||||
"--basic-auth-file=${cfg.basicAuthFile}"} \
|
||||
--kubelet-https=${boolToString cfg.kubeletHttps} \
|
||||
${optionalString (cfg.kubeletClientCaFile != null)
|
||||
"--kubelet-certificate-authority=${cfg.kubeletClientCaFile}"} \
|
||||
${optionalString (cfg.kubeletClientCertFile != null)
|
||||
"--kubelet-client-certificate=${cfg.kubeletClientCertFile}"} \
|
||||
${optionalString (cfg.kubeletClientKeyFile != null)
|
||||
"--kubelet-client-key=${cfg.kubeletClientKeyFile}"} \
|
||||
--insecure-bind-address=${cfg.insecureBindAddress} \
|
||||
--insecure-port=${toString cfg.insecurePort} \
|
||||
${optionalString (cfg.runtimeConfig != "")
|
||||
"--runtime-config=${cfg.runtimeConfig}"} \
|
||||
--secure-port=${toString cfg.securePort} \
|
||||
${optionalString (cfg.serviceAccountKeyFile!=null)
|
||||
"--service-account-key-file=${cfg.serviceAccountKeyFile}"} \
|
||||
--service-cluster-ip-range=${cfg.serviceClusterIpRange} \
|
||||
--storage-backend=${cfg.storageBackend} \
|
||||
${optionalString (cfg.tlsCertFile != null)
|
||||
"--tls-cert-file=${cfg.tlsCertFile}"} \
|
||||
${optionalString (cfg.tlsKeyFile != null)
|
||||
"--tls-private-key-file=${cfg.tlsKeyFile}"} \
|
||||
${optionalString (cfg.tokenAuthFile != null)
|
||||
"--token-auth-file=${cfg.tokenAuthFile}"} \
|
||||
${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
|
||||
${cfg.extraOpts}
|
||||
'';
|
||||
WorkingDirectory = top.dataDir;
|
||||
User = "kubernetes";
|
||||
Group = "kubernetes";
|
||||
AmbientCapabilities = "cap_net_bind_service";
|
||||
Restart = "on-failure";
|
||||
RestartSec = 5;
|
||||
};
|
||||
};
|
||||
|
||||
services.etcd = {
|
||||
clientCertAuth = mkDefault true;
|
||||
peerClientCertAuth = mkDefault true;
|
||||
listenClientUrls = mkDefault ["https://0.0.0.0:2379"];
|
||||
listenPeerUrls = mkDefault ["https://0.0.0.0:2380"];
|
||||
advertiseClientUrls = mkDefault ["https://${top.masterAddress}:2379"];
|
||||
initialCluster = mkDefault ["${top.masterAddress}=https://${top.masterAddress}:2380"];
|
||||
name = mkDefault top.masterAddress;
|
||||
initialAdvertisePeerUrls = mkDefault ["https://${top.masterAddress}:2380"];
|
||||
};
|
||||
|
||||
services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled {
|
||||
|
||||
apiserver-kubelet-api-admin-crb = {
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "ClusterRoleBinding";
|
||||
metadata = {
|
||||
name = "system:kube-apiserver:kubelet-api-admin";
|
||||
};
|
||||
roleRef = {
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
kind = "ClusterRole";
|
||||
name = "system:kubelet-api-admin";
|
||||
};
|
||||
subjects = [{
|
||||
kind = "User";
|
||||
name = "system:kube-apiserver";
|
||||
}];
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
services.kubernetes.pki.certs = with top.lib; {
|
||||
apiServer = mkCert {
|
||||
name = "kube-apiserver";
|
||||
CN = "kubernetes";
|
||||
hosts = [
|
||||
"kubernetes.default.svc"
|
||||
"kubernetes.default.svc.${top.addons.dns.clusterDomain}"
|
||||
cfg.advertiseAddress
|
||||
top.masterAddress
|
||||
apiserverServiceIP
|
||||
"127.0.0.1"
|
||||
] ++ cfg.extraSANs;
|
||||
action = "systemctl restart kube-apiserver.service";
|
||||
};
|
||||
apiserverKubeletClient = mkCert {
|
||||
name = "kube-apiserver-kubelet-client";
|
||||
CN = "system:kube-apiserver";
|
||||
action = "systemctl restart kube-apiserver.service";
|
||||
};
|
||||
apiserverEtcdClient = mkCert {
|
||||
name = "kube-apiserver-etcd-client";
|
||||
CN = "etcd-client";
|
||||
action = "systemctl restart kube-apiserver.service";
|
||||
};
|
||||
clusterAdmin = mkCert {
|
||||
name = "cluster-admin";
|
||||
CN = "cluster-admin";
|
||||
fields = {
|
||||
O = "system:masters";
|
||||
};
|
||||
privateKeyOwner = "root";
|
||||
};
|
||||
etcd = mkCert {
|
||||
name = "etcd";
|
||||
CN = top.masterAddress;
|
||||
hosts = [
|
||||
"etcd.local"
|
||||
"etcd.${top.addons.dns.clusterDomain}"
|
||||
top.masterAddress
|
||||
cfg.advertiseAddress
|
||||
];
|
||||
privateKeyOwner = "etcd";
|
||||
action = "systemctl restart etcd.service";
|
||||
};
|
||||
};
|
||||
|
||||
})
|
||||
|
||||
];
|
||||
|
||||
}
|
|
@ -0,0 +1,162 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
top = config.services.kubernetes;
|
||||
cfg = top.controllerManager;
|
||||
in
|
||||
{
|
||||
###### interface
|
||||
options.services.kubernetes.controllerManager = with lib.types; {
|
||||
|
||||
allocateNodeCIDRs = mkOption {
|
||||
description = "Whether to automatically allocate CIDR ranges for cluster nodes.";
|
||||
default = true;
|
||||
type = bool;
|
||||
};
|
||||
|
||||
bindAddress = mkOption {
|
||||
description = "Kubernetes controller manager listening address.";
|
||||
default = "127.0.0.1";
|
||||
type = str;
|
||||
};
|
||||
|
||||
clusterCidr = mkOption {
|
||||
description = "Kubernetes CIDR Range for Pods in cluster.";
|
||||
default = top.clusterCidr;
|
||||
type = str;
|
||||
};
|
||||
|
||||
enable = mkEnableOption "Kubernetes controller manager.";
|
||||
|
||||
extraOpts = mkOption {
|
||||
description = "Kubernetes controller manager extra command line options.";
|
||||
default = "";
|
||||
type = str;
|
||||
};
|
||||
|
||||
featureGates = mkOption {
|
||||
description = "List set of feature gates";
|
||||
default = top.featureGates;
|
||||
type = listOf str;
|
||||
};
|
||||
|
||||
insecurePort = mkOption {
|
||||
description = "Kubernetes controller manager insecure listening port.";
|
||||
default = 0;
|
||||
type = int;
|
||||
};
|
||||
|
||||
kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes controller manager";
|
||||
|
||||
leaderElect = mkOption {
|
||||
description = "Whether to start leader election before executing main loop.";
|
||||
type = bool;
|
||||
default = true;
|
||||
};
|
||||
|
||||
rootCaFile = mkOption {
|
||||
description = ''
|
||||
Kubernetes controller manager certificate authority file included in
|
||||
service account's token secret.
|
||||
'';
|
||||
default = top.caFile;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
securePort = mkOption {
|
||||
description = "Kubernetes controller manager secure listening port.";
|
||||
default = 10252;
|
||||
type = int;
|
||||
};
|
||||
|
||||
serviceAccountKeyFile = mkOption {
|
||||
description = ''
|
||||
Kubernetes controller manager PEM-encoded private RSA key file used to
|
||||
sign service account tokens
|
||||
'';
|
||||
default = null;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
tlsCertFile = mkOption {
|
||||
description = "Kubernetes controller-manager certificate file.";
|
||||
default = null;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
tlsKeyFile = mkOption {
|
||||
description = "Kubernetes controller-manager private key file.";
|
||||
default = null;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
verbosity = mkOption {
|
||||
description = ''
|
||||
Optional glog verbosity level for logging statements. See
|
||||
<link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
|
||||
'';
|
||||
default = null;
|
||||
type = nullOr int;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
###### implementation
|
||||
config = mkIf cfg.enable {
|
||||
systemd.services.kube-controller-manager = {
|
||||
description = "Kubernetes Controller Manager Service";
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
after = [ "kube-apiserver.service" ];
|
||||
serviceConfig = {
|
||||
RestartSec = "30s";
|
||||
Restart = "on-failure";
|
||||
Slice = "kubernetes.slice";
|
||||
ExecStart = ''${top.package}/bin/kube-controller-manager \
|
||||
--allocate-node-cidrs=${boolToString cfg.allocateNodeCIDRs} \
|
||||
--bind-address=${cfg.bindAddress} \
|
||||
${optionalString (cfg.clusterCidr!=null)
|
||||
"--cluster-cidr=${cfg.clusterCidr}"} \
|
||||
${optionalString (cfg.featureGates != [])
|
||||
"--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
|
||||
--kubeconfig=${top.lib.mkKubeConfig "kube-controller-manager" cfg.kubeconfig} \
|
||||
--leader-elect=${boolToString cfg.leaderElect} \
|
||||
${optionalString (cfg.rootCaFile!=null)
|
||||
"--root-ca-file=${cfg.rootCaFile}"} \
|
||||
--port=${toString cfg.insecurePort} \
|
||||
--secure-port=${toString cfg.securePort} \
|
||||
${optionalString (cfg.serviceAccountKeyFile!=null)
|
||||
"--service-account-private-key-file=${cfg.serviceAccountKeyFile}"} \
|
||||
${optionalString (cfg.tlsCertFile!=null)
|
||||
"--tls-cert-file=${cfg.tlsCertFile}"} \
|
||||
${optionalString (cfg.tlsKeyFile!=null)
|
||||
"--tls-private-key-file=${cfg.tlsKeyFile}"} \
|
||||
${optionalString (elem "RBAC" top.apiserver.authorizationMode)
|
||||
"--use-service-account-credentials"} \
|
||||
${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
|
||||
${cfg.extraOpts}
|
||||
'';
|
||||
WorkingDirectory = top.dataDir;
|
||||
User = "kubernetes";
|
||||
Group = "kubernetes";
|
||||
};
|
||||
path = top.path;
|
||||
};
|
||||
|
||||
services.kubernetes.pki.certs = with top.lib; {
|
||||
controllerManager = mkCert {
|
||||
name = "kube-controller-manager";
|
||||
CN = "kube-controller-manager";
|
||||
action = "systemctl restart kube-controller-manager.service";
|
||||
};
|
||||
controllerManagerClient = mkCert {
|
||||
name = "kube-controller-manager-client";
|
||||
CN = "system:kube-controller-manager";
|
||||
action = "systemctl restart kube-controller-manager.service";
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes.controllerManager.kubeconfig.server = mkDefault top.apiserverAddress;
|
||||
};
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,134 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
top = config.services.kubernetes;
|
||||
cfg = top.flannel;
|
||||
|
||||
# we want flannel to use kubernetes itself as configuration backend, not direct etcd
|
||||
storageBackend = "kubernetes";
|
||||
|
||||
# needed for flannel to pass options to docker
|
||||
mkDockerOpts = pkgs.runCommand "mk-docker-opts" {
|
||||
buildInputs = [ pkgs.makeWrapper ];
|
||||
} ''
|
||||
mkdir -p $out
|
||||
cp ${pkgs.kubernetes.src}/cluster/centos/node/bin/mk-docker-opts.sh $out/mk-docker-opts.sh
|
||||
|
||||
# bashInteractive needed for `compgen`
|
||||
makeWrapper ${pkgs.bashInteractive}/bin/bash $out/mk-docker-opts --add-flags "$out/mk-docker-opts.sh"
|
||||
'';
|
||||
in
|
||||
{
|
||||
###### interface
|
||||
options.services.kubernetes.flannel = {
|
||||
enable = mkEnableOption "enable flannel networking";
|
||||
};
|
||||
|
||||
###### implementation
|
||||
config = mkIf cfg.enable {
|
||||
services.flannel = {
|
||||
|
||||
enable = mkDefault true;
|
||||
network = mkDefault top.clusterCidr;
|
||||
inherit storageBackend;
|
||||
nodeName = config.services.kubernetes.kubelet.hostname;
|
||||
};
|
||||
|
||||
services.kubernetes.kubelet = {
|
||||
networkPlugin = mkDefault "cni";
|
||||
cni.config = mkDefault [{
|
||||
name = "mynet";
|
||||
type = "flannel";
|
||||
delegate = {
|
||||
isDefaultGateway = true;
|
||||
bridge = "docker0";
|
||||
};
|
||||
}];
|
||||
};
|
||||
|
||||
systemd.services."mk-docker-opts" = {
|
||||
description = "Pre-Docker Actions";
|
||||
path = with pkgs; [ gawk gnugrep ];
|
||||
script = ''
|
||||
${mkDockerOpts}/mk-docker-opts -d /run/flannel/docker
|
||||
systemctl restart docker
|
||||
'';
|
||||
serviceConfig.Type = "oneshot";
|
||||
};
|
||||
|
||||
systemd.paths."flannel-subnet-env" = {
|
||||
wantedBy = [ "flannel.service" ];
|
||||
pathConfig = {
|
||||
PathModified = "/run/flannel/subnet.env";
|
||||
Unit = "mk-docker-opts.service";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.docker = {
|
||||
environment.DOCKER_OPTS = "-b none";
|
||||
serviceConfig.EnvironmentFile = "-/run/flannel/docker";
|
||||
};
|
||||
|
||||
# read environment variables generated by mk-docker-opts
|
||||
virtualisation.docker.extraOptions = "$DOCKER_OPTS";
|
||||
|
||||
networking = {
|
||||
firewall.allowedUDPPorts = [
|
||||
8285 # flannel udp
|
||||
8472 # flannel vxlan
|
||||
];
|
||||
dhcpcd.denyInterfaces = [ "docker*" "flannel*" ];
|
||||
};
|
||||
|
||||
services.kubernetes.pki.certs = {
|
||||
flannelClient = top.lib.mkCert {
|
||||
name = "flannel-client";
|
||||
CN = "flannel-client";
|
||||
action = "systemctl restart flannel.service";
|
||||
};
|
||||
};
|
||||
|
||||
# give flannel som kubernetes rbac permissions if applicable
|
||||
services.kubernetes.addonManager.bootstrapAddons = mkIf ((storageBackend == "kubernetes") && (elem "RBAC" top.apiserver.authorizationMode)) {
|
||||
|
||||
flannel-cr = {
|
||||
apiVersion = "rbac.authorization.k8s.io/v1beta1";
|
||||
kind = "ClusterRole";
|
||||
metadata = { name = "flannel"; };
|
||||
rules = [{
|
||||
apiGroups = [ "" ];
|
||||
resources = [ "pods" ];
|
||||
verbs = [ "get" ];
|
||||
}
|
||||
{
|
||||
apiGroups = [ "" ];
|
||||
resources = [ "nodes" ];
|
||||
verbs = [ "list" "watch" ];
|
||||
}
|
||||
{
|
||||
apiGroups = [ "" ];
|
||||
resources = [ "nodes/status" ];
|
||||
verbs = [ "patch" ];
|
||||
}];
|
||||
};
|
||||
|
||||
flannel-crb = {
|
||||
apiVersion = "rbac.authorization.k8s.io/v1beta1";
|
||||
kind = "ClusterRoleBinding";
|
||||
metadata = { name = "flannel"; };
|
||||
roleRef = {
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
kind = "ClusterRole";
|
||||
name = "flannel";
|
||||
};
|
||||
subjects = [{
|
||||
kind = "User";
|
||||
name = "flannel-client";
|
||||
}];
|
||||
};
|
||||
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,358 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
top = config.services.kubernetes;
|
||||
cfg = top.kubelet;
|
||||
|
||||
cniConfig =
|
||||
if cfg.cni.config != [] && !(isNull cfg.cni.configDir) then
|
||||
throw "Verbatim CNI-config and CNI configDir cannot both be set."
|
||||
else if !(isNull cfg.cni.configDir) then
|
||||
cfg.cni.configDir
|
||||
else
|
||||
(pkgs.buildEnv {
|
||||
name = "kubernetes-cni-config";
|
||||
paths = imap (i: entry:
|
||||
pkgs.writeTextDir "${toString (10+i)}-${entry.type}.conf" (builtins.toJSON entry)
|
||||
) cfg.cni.config;
|
||||
});
|
||||
|
||||
infraContainer = pkgs.dockerTools.buildImage {
|
||||
name = "pause";
|
||||
tag = "latest";
|
||||
contents = top.package.pause;
|
||||
config.Cmd = "/bin/pause";
|
||||
};
|
||||
|
||||
kubeconfig = top.lib.mkKubeConfig "kubelet" cfg.kubeconfig;
|
||||
|
||||
manifests = pkgs.buildEnv {
|
||||
name = "kubernetes-manifests";
|
||||
paths = mapAttrsToList (name: manifest:
|
||||
pkgs.writeTextDir "${name}.json" (builtins.toJSON manifest)
|
||||
) cfg.manifests;
|
||||
};
|
||||
|
||||
manifestPath = "kubernetes/manifests";
|
||||
|
||||
taintOptions = with lib.types; { name, ... }: {
|
||||
options = {
|
||||
key = mkOption {
|
||||
description = "Key of taint.";
|
||||
default = name;
|
||||
type = str;
|
||||
};
|
||||
value = mkOption {
|
||||
description = "Value of taint.";
|
||||
type = str;
|
||||
};
|
||||
effect = mkOption {
|
||||
description = "Effect of taint.";
|
||||
example = "NoSchedule";
|
||||
type = enum ["NoSchedule" "PreferNoSchedule" "NoExecute"];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
taints = concatMapStringsSep "," (v: "${v.key}=${v.value}:${v.effect}") (mapAttrsToList (n: v: v) cfg.taints);
|
||||
in
|
||||
{
|
||||
###### interface
|
||||
options.services.kubernetes.kubelet = with lib.types; {
|
||||
|
||||
address = mkOption {
|
||||
description = "Kubernetes kubelet info server listening address.";
|
||||
default = "0.0.0.0";
|
||||
type = str;
|
||||
};
|
||||
|
||||
allowPrivileged = mkOption {
|
||||
description = "Whether to allow Kubernetes containers to request privileged mode.";
|
||||
default = false;
|
||||
type = bool;
|
||||
};
|
||||
|
||||
clusterDns = mkOption {
|
||||
description = "Use alternative DNS.";
|
||||
default = "10.1.0.1";
|
||||
type = str;
|
||||
};
|
||||
|
||||
clusterDomain = mkOption {
|
||||
description = "Use alternative domain.";
|
||||
default = config.services.kubernetes.addons.dns.clusterDomain;
|
||||
type = str;
|
||||
};
|
||||
|
||||
clientCaFile = mkOption {
|
||||
description = "Kubernetes apiserver CA file for client authentication.";
|
||||
default = top.caFile;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
cni = {
|
||||
packages = mkOption {
|
||||
description = "List of network plugin packages to install.";
|
||||
type = listOf package;
|
||||
default = [];
|
||||
};
|
||||
|
||||
config = mkOption {
|
||||
description = "Kubernetes CNI configuration.";
|
||||
type = listOf attrs;
|
||||
default = [];
|
||||
example = literalExample ''
|
||||
[{
|
||||
"cniVersion": "0.2.0",
|
||||
"name": "mynet",
|
||||
"type": "bridge",
|
||||
"bridge": "cni0",
|
||||
"isGateway": true,
|
||||
"ipMasq": true,
|
||||
"ipam": {
|
||||
"type": "host-local",
|
||||
"subnet": "10.22.0.0/16",
|
||||
"routes": [
|
||||
{ "dst": "0.0.0.0/0" }
|
||||
]
|
||||
}
|
||||
} {
|
||||
"cniVersion": "0.2.0",
|
||||
"type": "loopback"
|
||||
}]
|
||||
'';
|
||||
};
|
||||
|
||||
configDir = mkOption {
|
||||
description = "Path to Kubernetes CNI configuration directory.";
|
||||
type = nullOr path;
|
||||
default = null;
|
||||
};
|
||||
};
|
||||
|
||||
enable = mkEnableOption "Kubernetes kubelet.";
|
||||
|
||||
extraOpts = mkOption {
|
||||
description = "Kubernetes kubelet extra command line options.";
|
||||
default = "";
|
||||
type = str;
|
||||
};
|
||||
|
||||
featureGates = mkOption {
|
||||
description = "List set of feature gates";
|
||||
default = top.featureGates;
|
||||
type = listOf str;
|
||||
};
|
||||
|
||||
healthz = {
|
||||
bind = mkOption {
|
||||
description = "Kubernetes kubelet healthz listening address.";
|
||||
default = "127.0.0.1";
|
||||
type = str;
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
description = "Kubernetes kubelet healthz port.";
|
||||
default = 10248;
|
||||
type = int;
|
||||
};
|
||||
};
|
||||
|
||||
hostname = mkOption {
|
||||
description = "Kubernetes kubelet hostname override.";
|
||||
default = config.networking.hostName;
|
||||
type = str;
|
||||
};
|
||||
|
||||
kubeconfig = top.lib.mkKubeConfigOptions "Kubelet";
|
||||
|
||||
manifests = mkOption {
|
||||
description = "List of manifests to bootstrap with kubelet (only pods can be created as manifest entry)";
|
||||
type = attrsOf attrs;
|
||||
default = {};
|
||||
};
|
||||
|
||||
networkPlugin = mkOption {
|
||||
description = "Network plugin to use by Kubernetes.";
|
||||
type = nullOr (enum ["cni" "kubenet"]);
|
||||
default = "kubenet";
|
||||
};
|
||||
|
||||
nodeIp = mkOption {
|
||||
description = "IP address of the node. If set, kubelet will use this IP address for the node.";
|
||||
default = null;
|
||||
type = nullOr str;
|
||||
};
|
||||
|
||||
registerNode = mkOption {
|
||||
description = "Whether to auto register kubelet with API server.";
|
||||
default = true;
|
||||
type = bool;
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
description = "Kubernetes kubelet info server listening port.";
|
||||
default = 10250;
|
||||
type = int;
|
||||
};
|
||||
|
||||
seedDockerImages = mkOption {
|
||||
description = "List of docker images to preload on system";
|
||||
default = [];
|
||||
type = listOf package;
|
||||
};
|
||||
|
||||
taints = mkOption {
|
||||
description = "Node taints (https://kubernetes.io/docs/concepts/configuration/assign-pod-node/).";
|
||||
default = {};
|
||||
type = attrsOf (submodule [ taintOptions ]);
|
||||
};
|
||||
|
||||
tlsCertFile = mkOption {
|
||||
description = "File containing x509 Certificate for HTTPS.";
|
||||
default = null;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
tlsKeyFile = mkOption {
|
||||
description = "File containing x509 private key matching tlsCertFile.";
|
||||
default = null;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
unschedulable = mkOption {
|
||||
description = "Whether to set node taint to unschedulable=true as it is the case of node that has only master role.";
|
||||
default = false;
|
||||
type = bool;
|
||||
};
|
||||
|
||||
verbosity = mkOption {
|
||||
description = ''
|
||||
Optional glog verbosity level for logging statements. See
|
||||
<link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
|
||||
'';
|
||||
default = null;
|
||||
type = nullOr int;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
###### implementation
|
||||
config = mkMerge [
|
||||
(mkIf cfg.enable {
|
||||
services.kubernetes.kubelet.seedDockerImages = [infraContainer];
|
||||
|
||||
systemd.services.kubelet = {
|
||||
description = "Kubernetes Kubelet Service";
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
after = [ "network.target" "docker.service" "kube-apiserver.service" ];
|
||||
path = with pkgs; [ gitMinimal openssh docker utillinux iproute ethtool thin-provisioning-tools iptables socat ] ++ top.path;
|
||||
preStart = ''
|
||||
${concatMapStrings (img: ''
|
||||
echo "Seeding docker image: ${img}"
|
||||
docker load <${img}
|
||||
'') cfg.seedDockerImages}
|
||||
|
||||
rm /opt/cni/bin/* || true
|
||||
${concatMapStrings (package: ''
|
||||
echo "Linking cni package: ${package}"
|
||||
ln -fs ${package}/bin/* /opt/cni/bin
|
||||
'') cfg.cni.packages}
|
||||
'';
|
||||
serviceConfig = {
|
||||
Slice = "kubernetes.slice";
|
||||
CPUAccounting = true;
|
||||
MemoryAccounting = true;
|
||||
Restart = "on-failure";
|
||||
RestartSec = "1000ms";
|
||||
ExecStart = ''${top.package}/bin/kubelet \
|
||||
--address=${cfg.address} \
|
||||
--allow-privileged=${boolToString cfg.allowPrivileged} \
|
||||
--authentication-token-webhook \
|
||||
--authentication-token-webhook-cache-ttl="10s" \
|
||||
--authorization-mode=Webhook \
|
||||
${optionalString (cfg.clientCaFile != null)
|
||||
"--client-ca-file=${cfg.clientCaFile}"} \
|
||||
${optionalString (cfg.clusterDns != "")
|
||||
"--cluster-dns=${cfg.clusterDns}"} \
|
||||
${optionalString (cfg.clusterDomain != "")
|
||||
"--cluster-domain=${cfg.clusterDomain}"} \
|
||||
--cni-conf-dir=${cniConfig} \
|
||||
${optionalString (cfg.featureGates != [])
|
||||
"--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
|
||||
--hairpin-mode=hairpin-veth \
|
||||
--healthz-bind-address=${cfg.healthz.bind} \
|
||||
--healthz-port=${toString cfg.healthz.port} \
|
||||
--hostname-override=${cfg.hostname} \
|
||||
--kubeconfig=${kubeconfig} \
|
||||
${optionalString (cfg.networkPlugin != null)
|
||||
"--network-plugin=${cfg.networkPlugin}"} \
|
||||
${optionalString (cfg.nodeIp != null)
|
||||
"--node-ip=${cfg.nodeIp}"} \
|
||||
--pod-infra-container-image=pause \
|
||||
${optionalString (cfg.manifests != {})
|
||||
"--pod-manifest-path=/etc/${manifestPath}"} \
|
||||
--port=${toString cfg.port} \
|
||||
--register-node=${boolToString cfg.registerNode} \
|
||||
${optionalString (taints != "")
|
||||
"--register-with-taints=${taints}"} \
|
||||
--root-dir=${top.dataDir} \
|
||||
${optionalString (cfg.tlsCertFile != null)
|
||||
"--tls-cert-file=${cfg.tlsCertFile}"} \
|
||||
${optionalString (cfg.tlsKeyFile != null)
|
||||
"--tls-private-key-file=${cfg.tlsKeyFile}"} \
|
||||
${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
|
||||
${cfg.extraOpts}
|
||||
'';
|
||||
WorkingDirectory = top.dataDir;
|
||||
};
|
||||
};
|
||||
|
||||
# Allways include cni plugins
|
||||
services.kubernetes.kubelet.cni.packages = [pkgs.cni-plugins];
|
||||
|
||||
boot.kernelModules = ["br_netfilter"];
|
||||
|
||||
services.kubernetes.kubelet.hostname = with config.networking;
|
||||
mkDefault (hostName + optionalString (!isNull domain) ".${domain}");
|
||||
|
||||
services.kubernetes.pki.certs = with top.lib; {
|
||||
kubelet = mkCert {
|
||||
name = "kubelet";
|
||||
CN = top.kubelet.hostname;
|
||||
action = "systemctl restart kubelet.service";
|
||||
|
||||
};
|
||||
kubeletClient = mkCert {
|
||||
name = "kubelet-client";
|
||||
CN = "system:node:${top.kubelet.hostname}";
|
||||
fields = {
|
||||
O = "system:nodes";
|
||||
};
|
||||
action = "systemctl restart kubelet.service";
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes.kubelet.kubeconfig.server = mkDefault top.apiserverAddress;
|
||||
})
|
||||
|
||||
(mkIf (cfg.enable && cfg.manifests != {}) {
|
||||
environment.etc = mapAttrs' (name: manifest:
|
||||
nameValuePair "${manifestPath}/${name}.json" {
|
||||
text = builtins.toJSON manifest;
|
||||
mode = "0755";
|
||||
}
|
||||
) cfg.manifests;
|
||||
})
|
||||
|
||||
(mkIf (cfg.unschedulable && cfg.enable) {
|
||||
services.kubernetes.kubelet.taints.unschedulable = {
|
||||
value = "true";
|
||||
effect = "NoSchedule";
|
||||
};
|
||||
})
|
||||
|
||||
];
|
||||
}
|
|
@ -0,0 +1,388 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
top = config.services.kubernetes;
|
||||
cfg = top.pki;
|
||||
|
||||
csrCA = pkgs.writeText "kube-pki-cacert-csr.json" (builtins.toJSON {
|
||||
key = {
|
||||
algo = "rsa";
|
||||
size = 2048;
|
||||
};
|
||||
names = singleton cfg.caSpec;
|
||||
});
|
||||
|
||||
csrCfssl = pkgs.writeText "kube-pki-cfssl-csr.json" (builtins.toJSON {
|
||||
key = {
|
||||
algo = "rsa";
|
||||
size = 2048;
|
||||
};
|
||||
CN = top.masterAddress;
|
||||
});
|
||||
|
||||
cfsslAPITokenBaseName = "apitoken.secret";
|
||||
cfsslAPITokenPath = "${config.services.cfssl.dataDir}/${cfsslAPITokenBaseName}";
|
||||
certmgrAPITokenPath = "${top.secretsPath}/${cfsslAPITokenBaseName}";
|
||||
cfsslAPITokenLength = 32;
|
||||
|
||||
clusterAdminKubeconfig = with cfg.certs.clusterAdmin;
|
||||
top.lib.mkKubeConfig "cluster-admin" {
|
||||
server = top.apiserverAddress;
|
||||
certFile = cert;
|
||||
keyFile = key;
|
||||
};
|
||||
|
||||
remote = with config.services; "https://${kubernetes.masterAddress}:${toString cfssl.port}";
|
||||
in
|
||||
{
|
||||
###### interface
|
||||
options.services.kubernetes.pki = with lib.types; {
|
||||
|
||||
enable = mkEnableOption "Whether to enable easyCert issuer service.";
|
||||
|
||||
certs = mkOption {
|
||||
description = "List of certificate specs to feed to cert generator.";
|
||||
default = {};
|
||||
type = attrs;
|
||||
};
|
||||
|
||||
genCfsslCACert = mkOption {
|
||||
description = ''
|
||||
Whether to automatically generate cfssl CA certificate and key,
|
||||
if they don't exist.
|
||||
'';
|
||||
default = true;
|
||||
type = bool;
|
||||
};
|
||||
|
||||
genCfsslAPICerts = mkOption {
|
||||
description = ''
|
||||
Whether to automatically generate cfssl API webserver TLS cert and key,
|
||||
if they don't exist.
|
||||
'';
|
||||
default = true;
|
||||
type = bool;
|
||||
};
|
||||
|
||||
genCfsslAPIToken = mkOption {
|
||||
description = ''
|
||||
Whether to automatically generate cfssl API-token secret,
|
||||
if they doesn't exist.
|
||||
'';
|
||||
default = true;
|
||||
type = bool;
|
||||
};
|
||||
|
||||
pkiTrustOnBootstrap = mkOption {
|
||||
description = "Whether to always trust remote cfssl server upon initial PKI bootstrap.";
|
||||
default = true;
|
||||
type = bool;
|
||||
};
|
||||
|
||||
caCertPathPrefix = mkOption {
|
||||
description = ''
|
||||
Path-prefrix for the CA-certificate to be used for cfssl signing.
|
||||
Suffixes ".pem" and "-key.pem" will be automatically appended for
|
||||
the public and private keys respectively.
|
||||
'';
|
||||
default = "${config.services.cfssl.dataDir}/ca";
|
||||
type = str;
|
||||
};
|
||||
|
||||
caSpec = mkOption {
|
||||
description = "Certificate specification for the auto-generated CAcert.";
|
||||
default = {
|
||||
CN = "kubernetes-cluster-ca";
|
||||
O = "NixOS";
|
||||
OU = "services.kubernetes.pki.caSpec";
|
||||
L = "auto-generated";
|
||||
};
|
||||
type = attrs;
|
||||
};
|
||||
|
||||
etcClusterAdminKubeconfig = mkOption {
|
||||
description = ''
|
||||
Symlink a kubeconfig with cluster-admin privileges to environment path
|
||||
(/etc/<path>).
|
||||
'';
|
||||
default = null;
|
||||
type = nullOr str;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
###### implementation
|
||||
config = mkIf cfg.enable
|
||||
(let
|
||||
cfsslCertPathPrefix = "${config.services.cfssl.dataDir}/cfssl";
|
||||
cfsslCert = "${cfsslCertPathPrefix}.pem";
|
||||
cfsslKey = "${cfsslCertPathPrefix}-key.pem";
|
||||
in
|
||||
{
|
||||
|
||||
services.cfssl = mkIf (top.apiserver.enable) {
|
||||
enable = true;
|
||||
address = "0.0.0.0";
|
||||
tlsCert = cfsslCert;
|
||||
tlsKey = cfsslKey;
|
||||
configFile = toString (pkgs.writeText "cfssl-config.json" (builtins.toJSON {
|
||||
signing = {
|
||||
profiles = {
|
||||
default = {
|
||||
usages = ["digital signature"];
|
||||
auth_key = "default";
|
||||
expiry = "720h";
|
||||
};
|
||||
};
|
||||
};
|
||||
auth_keys = {
|
||||
default = {
|
||||
type = "standard";
|
||||
key = "file:${cfsslAPITokenPath}";
|
||||
};
|
||||
};
|
||||
}));
|
||||
};
|
||||
|
||||
systemd.services.cfssl.preStart = with pkgs; with config.services.cfssl; mkIf (top.apiserver.enable)
|
||||
(concatStringsSep "\n" [
|
||||
"set -e"
|
||||
(optionalString cfg.genCfsslCACert ''
|
||||
if [ ! -f "${cfg.caCertPathPrefix}.pem" ]; then
|
||||
${cfssl}/bin/cfssl genkey -initca ${csrCA} | \
|
||||
${cfssl}/bin/cfssljson -bare ${cfg.caCertPathPrefix}
|
||||
fi
|
||||
'')
|
||||
(optionalString cfg.genCfsslAPICerts ''
|
||||
if [ ! -f "${dataDir}/cfssl.pem" ]; then
|
||||
${cfssl}/bin/cfssl gencert -ca "${cfg.caCertPathPrefix}.pem" -ca-key "${cfg.caCertPathPrefix}-key.pem" ${csrCfssl} | \
|
||||
${cfssl}/bin/cfssljson -bare ${cfsslCertPathPrefix}
|
||||
fi
|
||||
'')
|
||||
(optionalString cfg.genCfsslAPIToken ''
|
||||
if [ ! -f "${cfsslAPITokenPath}" ]; then
|
||||
head -c ${toString (cfsslAPITokenLength / 2)} /dev/urandom | od -An -t x | tr -d ' ' >"${cfsslAPITokenPath}"
|
||||
fi
|
||||
chown cfssl "${cfsslAPITokenPath}" && chmod 400 "${cfsslAPITokenPath}"
|
||||
'')]);
|
||||
|
||||
systemd.services.kube-certmgr-bootstrap = {
|
||||
description = "Kubernetes certmgr bootstrapper";
|
||||
wantedBy = [ "certmgr.service" ];
|
||||
after = [ "cfssl.target" ];
|
||||
script = concatStringsSep "\n" [''
|
||||
set -e
|
||||
|
||||
# If there's a cfssl (cert issuer) running locally, then don't rely on user to
|
||||
# manually paste it in place. Just symlink.
|
||||
# otherwise, create the target file, ready for users to insert the token
|
||||
|
||||
if [ -f "${cfsslAPITokenPath}" ]; then
|
||||
ln -fs "${cfsslAPITokenPath}" "${certmgrAPITokenPath}"
|
||||
else
|
||||
touch "${certmgrAPITokenPath}" && chmod 600 "${certmgrAPITokenPath}"
|
||||
fi
|
||||
''
|
||||
(optionalString (cfg.pkiTrustOnBootstrap) ''
|
||||
if [ ! -f "${top.caFile}" ] || [ $(cat "${top.caFile}" | wc -c) -lt 1 ]; then
|
||||
${pkgs.curl}/bin/curl --fail-early -f -kd '{}' ${remote}/api/v1/cfssl/info | \
|
||||
${pkgs.cfssl}/bin/cfssljson -stdout >${top.caFile}
|
||||
fi
|
||||
'')
|
||||
];
|
||||
serviceConfig = {
|
||||
RestartSec = "10s";
|
||||
Restart = "on-failure";
|
||||
};
|
||||
};
|
||||
|
||||
services.certmgr = {
|
||||
enable = true;
|
||||
package = pkgs.certmgr-selfsigned;
|
||||
svcManager = "command";
|
||||
specs =
|
||||
let
|
||||
mkSpec = _: cert: {
|
||||
inherit (cert) action;
|
||||
authority = {
|
||||
inherit remote;
|
||||
file.path = cert.caCert;
|
||||
root_ca = cert.caCert;
|
||||
profile = "default";
|
||||
auth_key_file = certmgrAPITokenPath;
|
||||
};
|
||||
certificate = {
|
||||
path = cert.cert;
|
||||
};
|
||||
private_key = cert.privateKeyOptions;
|
||||
request = {
|
||||
inherit (cert) CN hosts;
|
||||
key = {
|
||||
algo = "rsa";
|
||||
size = 2048;
|
||||
};
|
||||
names = [ cert.fields ];
|
||||
};
|
||||
};
|
||||
in
|
||||
mapAttrs mkSpec cfg.certs;
|
||||
};
|
||||
|
||||
#TODO: Get rid of kube-addon-manager in the future for the following reasons
|
||||
# - it is basically just a shell script wrapped around kubectl
|
||||
# - it assumes that it is clusterAdmin or can gain clusterAdmin rights through serviceAccount
|
||||
# - it is designed to be used with k8s system components only
|
||||
# - it would be better with a more Nix-oriented way of managing addons
|
||||
systemd.services.kube-addon-manager = mkIf top.addonManager.enable (mkMerge [{
|
||||
environment.KUBECONFIG = with cfg.certs.addonManager;
|
||||
top.lib.mkKubeConfig "addon-manager" {
|
||||
server = top.apiserverAddress;
|
||||
certFile = cert;
|
||||
keyFile = key;
|
||||
};
|
||||
}
|
||||
|
||||
(optionalAttrs (top.addonManager.bootstrapAddons != {}) {
|
||||
serviceConfig.PermissionsStartOnly = true;
|
||||
preStart = with pkgs;
|
||||
let
|
||||
files = mapAttrsToList (n: v: writeText "${n}.json" (builtins.toJSON v))
|
||||
top.addonManager.bootstrapAddons;
|
||||
in
|
||||
''
|
||||
export KUBECONFIG=${clusterAdminKubeconfig}
|
||||
${kubectl}/bin/kubectl apply -f ${concatStringsSep " \\\n -f " files}
|
||||
'';
|
||||
})]);
|
||||
|
||||
environment.etc.${cfg.etcClusterAdminKubeconfig}.source = mkIf (!isNull cfg.etcClusterAdminKubeconfig)
|
||||
clusterAdminKubeconfig;
|
||||
|
||||
environment.systemPackages = mkIf (top.kubelet.enable || top.proxy.enable) [
|
||||
(pkgs.writeScriptBin "nixos-kubernetes-node-join" ''
|
||||
set -e
|
||||
exec 1>&2
|
||||
|
||||
if [ $# -gt 0 ]; then
|
||||
echo "Usage: $(basename $0)"
|
||||
echo ""
|
||||
echo "No args. Apitoken must be provided on stdin."
|
||||
echo "To get the apitoken, execute: 'sudo cat ${certmgrAPITokenPath}' on the master node."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ $(id -u) != 0 ]; then
|
||||
echo "Run as root please."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
read -r token
|
||||
if [ ''${#token} != ${toString cfsslAPITokenLength} ]; then
|
||||
echo "Token must be of length ${toString cfsslAPITokenLength}."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo $token > ${certmgrAPITokenPath}
|
||||
chmod 600 ${certmgrAPITokenPath}
|
||||
|
||||
echo "Restarting certmgr..." >&1
|
||||
systemctl restart certmgr
|
||||
|
||||
echo "Waiting for certs to appear..." >&1
|
||||
|
||||
${optionalString top.kubelet.enable ''
|
||||
while [ ! -f ${cfg.certs.kubelet.cert} ]; do sleep 1; done
|
||||
echo "Restarting kubelet..." >&1
|
||||
systemctl restart kubelet
|
||||
''}
|
||||
|
||||
${optionalString top.proxy.enable ''
|
||||
while [ ! -f ${cfg.certs.kubeProxyClient.cert} ]; do sleep 1; done
|
||||
echo "Restarting kube-proxy..." >&1
|
||||
systemctl restart kube-proxy
|
||||
''}
|
||||
|
||||
${optionalString top.flannel.enable ''
|
||||
while [ ! -f ${cfg.certs.flannelClient.cert} ]; do sleep 1; done
|
||||
echo "Restarting flannel..." >&1
|
||||
systemctl restart flannel
|
||||
''}
|
||||
|
||||
echo "Node joined succesfully"
|
||||
'')];
|
||||
|
||||
# isolate etcd on loopback at the master node
|
||||
# easyCerts doesn't support multimaster clusters anyway atm.
|
||||
services.etcd = with cfg.certs.etcd; {
|
||||
listenClientUrls = ["https://127.0.0.1:2379"];
|
||||
listenPeerUrls = ["https://127.0.0.1:2380"];
|
||||
advertiseClientUrls = ["https://etcd.local:2379"];
|
||||
initialCluster = ["${top.masterAddress}=https://etcd.local:2380"];
|
||||
initialAdvertisePeerUrls = ["https://etcd.local:2380"];
|
||||
certFile = mkDefault cert;
|
||||
keyFile = mkDefault key;
|
||||
trustedCaFile = mkDefault caCert;
|
||||
};
|
||||
networking.extraHosts = mkIf (config.services.etcd.enable) ''
|
||||
127.0.0.1 etcd.${top.addons.dns.clusterDomain} etcd.local
|
||||
'';
|
||||
|
||||
services.flannel = with cfg.certs.flannelClient; {
|
||||
kubeconfig = top.lib.mkKubeConfig "flannel" {
|
||||
server = top.apiserverAddress;
|
||||
certFile = cert;
|
||||
keyFile = key;
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes = {
|
||||
|
||||
apiserver = mkIf top.apiserver.enable (with cfg.certs.apiServer; {
|
||||
etcd = with cfg.certs.apiserverEtcdClient; {
|
||||
servers = ["https://etcd.local:2379"];
|
||||
certFile = mkDefault cert;
|
||||
keyFile = mkDefault key;
|
||||
caFile = mkDefault caCert;
|
||||
};
|
||||
clientCaFile = mkDefault caCert;
|
||||
tlsCertFile = mkDefault cert;
|
||||
tlsKeyFile = mkDefault key;
|
||||
serviceAccountKeyFile = mkDefault cfg.certs.serviceAccount.cert;
|
||||
kubeletClientCaFile = mkDefault caCert;
|
||||
kubeletClientCertFile = mkDefault cfg.certs.apiserverKubeletClient.cert;
|
||||
kubeletClientKeyFile = mkDefault cfg.certs.apiserverKubeletClient.key;
|
||||
});
|
||||
controllerManager = mkIf top.controllerManager.enable {
|
||||
serviceAccountKeyFile = mkDefault cfg.certs.serviceAccount.key;
|
||||
rootCaFile = cfg.certs.controllerManagerClient.caCert;
|
||||
kubeconfig = with cfg.certs.controllerManagerClient; {
|
||||
certFile = mkDefault cert;
|
||||
keyFile = mkDefault key;
|
||||
};
|
||||
};
|
||||
scheduler = mkIf top.scheduler.enable {
|
||||
kubeconfig = with cfg.certs.schedulerClient; {
|
||||
certFile = mkDefault cert;
|
||||
keyFile = mkDefault key;
|
||||
};
|
||||
};
|
||||
kubelet = mkIf top.kubelet.enable {
|
||||
clientCaFile = mkDefault cfg.certs.kubelet.caCert;
|
||||
tlsCertFile = mkDefault cfg.certs.kubelet.cert;
|
||||
tlsKeyFile = mkDefault cfg.certs.kubelet.key;
|
||||
kubeconfig = with cfg.certs.kubeletClient; {
|
||||
certFile = mkDefault cert;
|
||||
keyFile = mkDefault key;
|
||||
};
|
||||
};
|
||||
proxy = mkIf top.proxy.enable {
|
||||
kubeconfig = with cfg.certs.kubeProxyClient; {
|
||||
certFile = mkDefault cert;
|
||||
keyFile = mkDefault key;
|
||||
};
|
||||
};
|
||||
};
|
||||
});
|
||||
}
|
|
@ -0,0 +1,82 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
top = config.services.kubernetes;
|
||||
cfg = top.proxy;
|
||||
in
|
||||
{
|
||||
|
||||
###### interface
|
||||
options.services.kubernetes.proxy = with lib.types; {
|
||||
|
||||
bindAddress = mkOption {
|
||||
description = "Kubernetes proxy listening address.";
|
||||
default = "0.0.0.0";
|
||||
type = str;
|
||||
};
|
||||
|
||||
enable = mkEnableOption "Whether to enable Kubernetes proxy.";
|
||||
|
||||
extraOpts = mkOption {
|
||||
description = "Kubernetes proxy extra command line options.";
|
||||
default = "";
|
||||
type = str;
|
||||
};
|
||||
|
||||
featureGates = mkOption {
|
||||
description = "List set of feature gates";
|
||||
default = top.featureGates;
|
||||
type = listOf str;
|
||||
};
|
||||
|
||||
kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes proxy";
|
||||
|
||||
verbosity = mkOption {
|
||||
description = ''
|
||||
Optional glog verbosity level for logging statements. See
|
||||
<link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
|
||||
'';
|
||||
default = null;
|
||||
type = nullOr int;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
###### implementation
|
||||
config = mkIf cfg.enable {
|
||||
systemd.services.kube-proxy = {
|
||||
description = "Kubernetes Proxy Service";
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
after = [ "kube-apiserver.service" ];
|
||||
path = with pkgs; [ iptables conntrack_tools ];
|
||||
serviceConfig = {
|
||||
Slice = "kubernetes.slice";
|
||||
ExecStart = ''${top.package}/bin/kube-proxy \
|
||||
--bind-address=${cfg.bindAddress} \
|
||||
${optionalString (top.clusterCidr!=null)
|
||||
"--cluster-cidr=${top.clusterCidr}"} \
|
||||
${optionalString (cfg.featureGates != [])
|
||||
"--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
|
||||
--kubeconfig=${top.lib.mkKubeConfig "kube-proxy" cfg.kubeconfig} \
|
||||
${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
|
||||
${cfg.extraOpts}
|
||||
'';
|
||||
WorkingDirectory = top.dataDir;
|
||||
Restart = "on-failure";
|
||||
RestartSec = 5;
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes.pki.certs = {
|
||||
kubeProxyClient = top.lib.mkCert {
|
||||
name = "kube-proxy-client";
|
||||
CN = "system:kube-proxy";
|
||||
action = "systemctl restart kube-proxy.service";
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes.proxy.kubeconfig.server = mkDefault top.apiserverAddress;
|
||||
};
|
||||
}
|
|
@ -0,0 +1,94 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
top = config.services.kubernetes;
|
||||
cfg = top.scheduler;
|
||||
in
|
||||
{
|
||||
###### interface
|
||||
options.services.kubernetes.scheduler = with lib.types; {
|
||||
|
||||
address = mkOption {
|
||||
description = "Kubernetes scheduler listening address.";
|
||||
default = "127.0.0.1";
|
||||
type = str;
|
||||
};
|
||||
|
||||
enable = mkEnableOption "Whether to enable Kubernetes scheduler.";
|
||||
|
||||
extraOpts = mkOption {
|
||||
description = "Kubernetes scheduler extra command line options.";
|
||||
default = "";
|
||||
type = str;
|
||||
};
|
||||
|
||||
featureGates = mkOption {
|
||||
description = "List set of feature gates";
|
||||
default = top.featureGates;
|
||||
type = listOf str;
|
||||
};
|
||||
|
||||
kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes scheduler";
|
||||
|
||||
leaderElect = mkOption {
|
||||
description = "Whether to start leader election before executing main loop.";
|
||||
type = bool;
|
||||
default = true;
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
description = "Kubernetes scheduler listening port.";
|
||||
default = 10251;
|
||||
type = int;
|
||||
};
|
||||
|
||||
verbosity = mkOption {
|
||||
description = ''
|
||||
Optional glog verbosity level for logging statements. See
|
||||
<link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
|
||||
'';
|
||||
default = null;
|
||||
type = nullOr int;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
###### implementation
|
||||
config = mkIf cfg.enable {
|
||||
systemd.services.kube-scheduler = {
|
||||
description = "Kubernetes Scheduler Service";
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
after = [ "kube-apiserver.service" ];
|
||||
serviceConfig = {
|
||||
Slice = "kubernetes.slice";
|
||||
ExecStart = ''${top.package}/bin/kube-scheduler \
|
||||
--address=${cfg.address} \
|
||||
${optionalString (cfg.featureGates != [])
|
||||
"--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
|
||||
--kubeconfig=${top.lib.mkKubeConfig "kube-scheduler" cfg.kubeconfig} \
|
||||
--leader-elect=${boolToString cfg.leaderElect} \
|
||||
--port=${toString cfg.port} \
|
||||
${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
|
||||
${cfg.extraOpts}
|
||||
'';
|
||||
WorkingDirectory = top.dataDir;
|
||||
User = "kubernetes";
|
||||
Group = "kubernetes";
|
||||
Restart = "on-failure";
|
||||
RestartSec = 5;
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes.pki.certs = {
|
||||
schedulerClient = top.lib.mkCert {
|
||||
name = "kube-scheduler-client";
|
||||
CN = "system:kube-scheduler";
|
||||
action = "systemctl restart kube-scheduler.service";
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes.scheduler.kubeconfig.server = mkDefault top.apiserverAddress;
|
||||
};
|
||||
}
|
|
@ -24,7 +24,7 @@ let
|
|||
EOF
|
||||
chmod 755 $out/${name}
|
||||
'';
|
||||
in pkgs.runCommand "buildkite-agent-hooks" {} ''
|
||||
in pkgs.runCommand "buildkite-agent-hooks" { preferLocalBuild = true; } ''
|
||||
mkdir $out
|
||||
${concatStringsSep "\n" (mapAttrsToList mkHookEntry (filterAttrs (n: v: v != null) cfg.hooks))}
|
||||
'';
|
||||
|
|
|
@ -8,6 +8,7 @@ let
|
|||
if (cfg.configFile == null) then
|
||||
(pkgs.runCommand "config.toml" {
|
||||
buildInputs = [ pkgs.remarshal ];
|
||||
preferLocalBuild = true;
|
||||
} ''
|
||||
remarshal -if json -of toml \
|
||||
< ${pkgs.writeText "config.json" (builtins.toJSON cfg.configOptions)} \
|
||||
|
|
|
@ -18,7 +18,7 @@ let
|
|||
</configuration>
|
||||
'';
|
||||
|
||||
configDir = pkgs.runCommand "hbase-config-dir" {} ''
|
||||
configDir = pkgs.runCommand "hbase-config-dir" { preferLocalBuild = true; } ''
|
||||
mkdir -p $out
|
||||
cp ${cfg.package}/conf/* $out/
|
||||
rm $out/hbase-site.xml
|
||||
|
|
|
@ -98,6 +98,7 @@ let
|
|||
|
||||
configFile = pkgs.runCommand "config.toml" {
|
||||
buildInputs = [ pkgs.remarshal ];
|
||||
preferLocalBuild = true;
|
||||
} ''
|
||||
remarshal -if json -of toml \
|
||||
< ${pkgs.writeText "config.json" (builtins.toJSON configOptions)} \
|
||||
|
|
|
@ -146,7 +146,7 @@ in
|
|||
chown -R "${cfg.user}:${cfg.group}" "${cfg.dataDir}"
|
||||
'';
|
||||
serviceConfig.ExecStart =
|
||||
"${openldap.out}/libexec/slapd -d ${cfg.logLevel} " +
|
||||
"${openldap.out}/libexec/slapd -d '${cfg.logLevel}' " +
|
||||
"-u '${cfg.user}' -g '${cfg.group}' " +
|
||||
"-h '${concatStringsSep " " cfg.urlList}' " +
|
||||
"${configOpts}";
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue