Merge branch 'master' into fix/man-in-outputsToInstall

This commit is contained in:
Matthew Bauer 2019-01-26 10:51:12 -05:00 committed by GitHub
commit 8babcc3d44
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13823 changed files with 610678 additions and 340640 deletions

View File

@ -13,8 +13,8 @@ charset = utf-8
# see https://nixos.org/nixpkgs/manual/#chap-conventions
# Match nix/ruby files, set indent to spaces with width of two
[*.{nix,rb}]
# Match nix/ruby/docbook files, set indent to spaces with width of two
[*.{nix,rb,xml}]
indent_style = space
indent_size = 2

16
.gitattributes vendored Normal file
View File

@ -0,0 +1,16 @@
**/deps.nix linguist-generated
**/node-packages.nix linguist-generated
pkgs/applications/editors/emacs-modes/*-generated.nix linguist-generated
pkgs/development/r-modules/*-packages.nix linguist-generated
pkgs/development/haskell-modules/hackage-packages.nix linguist-generated
pkgs/development/beam-modules/hex-packages.nix linguist-generated
doc/** linguist-documentation
doc/default.nix linguist-documentation=false
nixos/doc/** linguist-documentation
nixos/doc/default.nix linguist-documentation=false
nixos/modules/module-list.nix merge=union
# pkgs/top-level/all-packages.nix merge=union

64
.github/CODEOWNERS vendored
View File

@ -12,14 +12,19 @@
# Libraries
/lib @edolstra @nbp
/lib/systems @nbp @ericson2314
/lib/systems @nbp @ericson2314 @matthewbauer
/lib/generators.nix @edolstra @nbp @Profpatsch
/lib/debug.nix @edolstra @nbp @Profpatsch
# Nixpkgs Internals
/default.nix @nbp
/pkgs/top-level/default.nix @nbp @Ericson2314
/pkgs/top-level/impure.nix @nbp @Ericson2314
/pkgs/top-level/stage.nix @nbp @Ericson2314
/pkgs/stdenv
/pkgs/top-level/stage.nix @nbp @Ericson2314 @matthewbauer
/pkgs/top-level/splice.nix @Ericson2314 @matthewbauer
/pkgs/top-level/release-cross.nix @Ericson2314 @matthewbauer
/pkgs/stdenv/generic @Ericson2314 @matthewbauer
/pkgs/stdenv/cross @Ericson2314 @matthewbauer
/pkgs/build-support/cc-wrapper @Ericson2314 @orivej
/pkgs/build-support/bintools-wrapper @Ericson2314 @orivej
/pkgs/build-support/setup-hooks @Ericson2314
@ -42,31 +47,51 @@
/nixos/doc/manual/man-nixos-option.xml @nbp
/nixos/modules/installer/tools/nixos-option.sh @nbp
# NixOS modules
/nixos/modules @Infinisil
# Python-related code and docs
/maintainers/scripts/update-python-libraries @FRidh
/pkgs/top-level/python-packages.nix @FRidh
/pkgs/development/interpreters/python @FRidh
/pkgs/development/python-modules @FRidh
/doc/languages-frameworks/python.md @FRidh
/doc/languages-frameworks/python.section.md @FRidh
# Haskell
/pkgs/development/compilers/ghc @peti
/pkgs/development/haskell-modules @peti
/pkgs/development/haskell-modules/default.nix @peti
/pkgs/development/haskell-modules/generic-builder.nix @peti
/pkgs/development/haskell-modules/hoogle.nix @peti
/pkgs/development/compilers/ghc @peti @ryantm @basvandijk
/pkgs/development/haskell-modules @peti @ryantm @basvandijk
/pkgs/development/haskell-modules/default.nix @peti @ryantm @basvandijk
/pkgs/development/haskell-modules/generic-builder.nix @peti @ryantm @basvandijk
/pkgs/development/haskell-modules/hoogle.nix @peti @ryantm @basvandijk
# Perl
/pkgs/development/interpreters/perl @volth
/pkgs/top-level/perl-packages.nix @volth
/pkgs/development/perl-modules @volth
# R
/pkgs/applications/science/math/R @peti
/pkgs/development/r-modules @peti
# Ruby
/pkgs/development/interpreters/ruby @zimbatm
/pkgs/development/ruby-modules @zimbatm
/pkgs/development/interpreters/ruby @alyssais @zimbatm
/pkgs/development/ruby-modules @alyssais @zimbatm
# Rust
/pkgs/development/compilers/rust @Mic92 @LnL7
# Darwin-related
/pkgs/stdenv/darwin @NixOS/darwin-maintainers
/pkgs/os-specific/darwin @NixOS/darwin-maintainers
# C compilers
/pkgs/development/compilers/gcc @matthewbauer
/pkgs/development/compilers/llvm @matthewbauer
# Compatibility stuff
/pkgs/top-level/unix-tools.nix @matthewbauer
/pkgs/development/tools/xcbuild @matthewbauer
# Beam-related (Erlang, Elixir, LFE, etc)
/pkgs/development/beam-modules @gleber
/pkgs/development/interpreters/erlang @gleber
@ -83,7 +108,6 @@
/pkgs/applications/editors/eclipse @rycee
# https://github.com/NixOS/nixpkgs/issues/31401
/lib/maintainers.nix @ghost
/lib/licenses.nix @ghost
# Qt / KDE
@ -91,3 +115,19 @@
/pkgs/desktops/plasma-5 @ttuegel
/pkgs/development/libraries/kde-frameworks @ttuegel
/pkgs/development/libraries/qt-5 @ttuegel
# PostgreSQL and related stuff
/pkgs/servers/sql/postgresql @thoughtpolice
/nixos/modules/services/databases/postgresql.xml @thoughtpolice
/nixos/modules/services/databases/postgresql.nix @thoughtpolice
/nixos/tests/postgresql.nix @thoughtpolice
# Dhall
/pkgs/development/dhall-modules @Gabriel439 @Profpatsch
/pkgs/development/interpreters/dhall @Gabriel439 @Profpatsch
# Idris
/pkgs/development/idris-modules @Infinisil
# Bazel
/pkgs/development/tools/build-managers/bazel @mboes @Profpatsch

View File

@ -20,6 +20,8 @@ under the terms of [COPYING](../COPYING), which is an MIT-like license.
(Motivation for change. Additional information.)
```
For consistency, there should not be a period at the end of the commit message's summary line (the first line of the commit message).
Examples:
* nginx: init at 2.0.1
@ -43,7 +45,7 @@ See the nixpkgs manual for more details on [standard meta-attributes](https://ni
## Writing good commit messages
In addition to writing properly formatted commit messages, it's important to include relevant information so other developers can later understand *why* a change was made. While this information usually can be found by digging code, mailing list archives, pull request discussions or upstream changes, it may require a lot of work.
In addition to writing properly formatted commit messages, it's important to include relevant information so other developers can later understand *why* a change was made. While this information usually can be found by digging code, mailing list/Discourse archives, pull request discussions or upstream changes, it may require a lot of work.
For package version upgrades and such a one-line commit message is usually sufficient.

View File

@ -5,7 +5,7 @@
<!-- Please check what applies. Note that these are not hard requirements but merely serve as information for reviewers. -->
- [ ] Tested using sandboxing ([nix.useSandbox](http://nixos.org/nixos/manual/options.html#opt-nix.useSandbox) on NixOS, or option `build-use-sandbox` in [`nix.conf`](http://nixos.org/nix/manual/#sec-conf-file) on non-NixOS)
- [ ] Tested using sandboxing ([nix.useSandbox](http://nixos.org/nixos/manual/options.html#opt-nix.useSandbox) on NixOS, or option `sandbox` in [`nix.conf`](http://nixos.org/nix/manual/#sec-conf-file) on non-NixOS)
- Built on platform(s)
- [ ] NixOS
- [ ] macOS
@ -13,6 +13,8 @@
- [ ] Tested via one or more NixOS test(s) if existing and applicable for the change (look inside [nixos/tests](https://github.com/NixOS/nixpkgs/blob/master/nixos/tests))
- [ ] Tested compilation of all pkgs that depend on this change using `nix-shell -p nox --run "nox-review wip"`
- [ ] Tested execution of all binary files (usually in `./result/bin/`)
- [ ] Determined the impact on package closure size (by running `nix path-info -S` before and after)
- [ ] Assured whether relevant documentation is up to date
- [ ] Fits [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/.github/CONTRIBUTING.md).
---

3
.gitignore vendored
View File

@ -13,4 +13,5 @@ result-*
.DS_Store
/pkgs/development/libraries/qt-5/*/tmp/
/pkgs/desktops/kde-5/*/tmp/
/pkgs/desktops/kde-5/*/tmp/
/pkgs/development/mobile/androidenv/xml/*

View File

@ -1 +1 @@
18.03
19.03

View File

@ -18,12 +18,3 @@ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
======================================================================
Note: the license above does not apply to the packages built by the
Nix Packages collection, merely to the package descriptions (i.e., Nix
expressions, build scripts, etc.). It also might not apply to patches
included in Nixpkgs, which may be derivative works of the packages to
which they apply. The aforementioned artifacts are all covered by the
licenses of the respective packages.

View File

@ -8,19 +8,19 @@ build daemon as so-called channels. To get channel information via git, add
[nixpkgs-channels](https://github.com/NixOS/nixpkgs-channels.git) as a remote:
```
% git remote add channels git://github.com/NixOS/nixpkgs-channels.git
% git remote add channels https://github.com/NixOS/nixpkgs-channels.git
```
For stability and maximum binary package support, it is recommended to maintain
custom changes on top of one of the channels, e.g. `nixos-17.09` for the latest
custom changes on top of one of the channels, e.g. `nixos-18.09` for the latest
release and `nixos-unstable` for the latest successful build of master:
```
% git remote update channels
% git rebase channels/nixos-17.09
% git rebase channels/nixos-18.09
```
For pull-requests, please rebase onto nixpkgs `master`.
For pull requests, please rebase onto nixpkgs `master`.
[NixOS](https://nixos.org/nixos/) Linux distribution source code is located inside
`nixos/` folder.
@ -31,11 +31,17 @@ For pull-requests, please rebase onto nixpkgs `master`.
* [Manual (NixOS)](https://nixos.org/nixos/manual/)
* [Community maintained wiki](https://nixos.wiki/)
* [Continuous package builds for unstable/master](https://hydra.nixos.org/jobset/nixos/trunk-combined)
* [Continuous package builds for 17.09 release](https://hydra.nixos.org/jobset/nixos/release-17.09)
* [Continuous package builds for 18.09 release](https://hydra.nixos.org/jobset/nixos/release-18.09)
* [Tests for unstable/master](https://hydra.nixos.org/job/nixos/trunk-combined/tested#tabs-constituents)
* [Tests for 17.09 release](https://hydra.nixos.org/job/nixos/release-17.09/tested#tabs-constituents)
* [Tests for 18.09 release](https://hydra.nixos.org/job/nixos/release-18.09/tested#tabs-constituents)
Communication:
* [Mailing list](https://groups.google.com/forum/#!forum/nix-devel)
* [Discourse Forum](https://discourse.nixos.org/)
* [IRC - #nixos on freenode.net](irc://irc.freenode.net/#nixos)
Note: MIT license does not apply to the packages built by Nixpkgs, merely to
the package descriptions (Nix expressions, build scripts, and so on). It also
might not apply to patches included in Nixpkgs, which may be derivative works
of the packages to which they apply. The aforementioned artifacts are all
covered by the licenses of the respective packages.

View File

@ -6,12 +6,21 @@ if ! builtins ? nixVersion || builtins.compareVersions requiredVersion builtins.
This version of Nixpkgs requires Nix >= ${requiredVersion}, please upgrade:
- If you are running NixOS, use `nixos-rebuild' to upgrade your system.
- If you are running NixOS, `nixos-rebuild' can be used to upgrade your system.
- Alternatively, with Nix > 2.0 `nix upgrade-nix' can be used to imperatively
upgrade Nix. You may use `nix-env --version' to check which version you have.
- If you installed Nix using the install script (https://nixos.org/nix/install),
it is safe to upgrade by running it again:
curl https://nixos.org/nix/install | sh
For more information, please see the NixOS release notes at
https://nixos.org/nixos/manual or locally at
${toString ./nixos/doc/manual/release-notes}.
If you need further help, see https://nixos.org/nixos/support.html
''
else

7
doc/.gitignore vendored Normal file
View File

@ -0,0 +1,7 @@
*.chapter.xml
*.section.xml
.version
out
manual-full.xml
highlightjs
functions/library/locations.xml

106
doc/Makefile Normal file
View File

@ -0,0 +1,106 @@
MD_TARGETS=$(addsuffix .xml, $(basename $(wildcard ./*.md ./**/*.md)))
.PHONY: all
all: validate format out/html/index.html out/epub/manual.epub
.PHONY: debug
debug:
nix-shell --run "xmloscopy --docbook5 ./manual.xml ./manual-full.xml"
.PHONY: format
format:
find . -iname '*.xml' -type f | while read f; do \
echo $$f ;\
xmlformat --config-file "$$XMLFORMAT_CONFIG" -i $$f ;\
done
.PHONY: fix-misc-xml
fix-misc-xml:
find . -iname '*.xml' -type f \
-exec ../nixos/doc/varlistentry-fixer.rb {} ';'
.PHONY: clean
clean:
rm -f ${MD_TARGETS} .version manual-full.xml functions/library/locations.xml
rm -rf ./out/ ./highlightjs
.PHONY: validate
validate: manual-full.xml
jing "$$RNG" manual-full.xml
out/html/index.html: manual-full.xml style.css highlightjs
mkdir -p out/html
xsltproc ${xsltFlags} \
--nonet --xinclude \
--output $@ \
"$$XSL/docbook/xhtml/docbook.xsl" \
./manual-full.xml
mkdir -p out/html/highlightjs/
cp -r highlightjs out/html/
cp ./overrides.css out/html/
cp ./style.css out/html/style.css
mkdir -p out/html/images/callouts
cp "$$XSL/docbook/images/callouts/"*.svg out/html/images/callouts/
chmod u+w -R out/html/
out/epub/manual.epub: manual-full.xml
mkdir -p out/epub/scratch
xsltproc ${xsltFlags} --nonet \
--output out/epub/scratch/ \
"$$XSL/docbook/epub/docbook.xsl" \
./manual-full.xml
cp ./overrides.css out/epub/scratch/OEBPS
cp ./style.css out/epub/scratch/OEBPS
mkdir -p out/epub/scratch/OEBPS/images/callouts/
cp "$$XSL/docbook/images/callouts/"*.svg out/epub/scratch/OEBPS/images/callouts/
echo "application/epub+zip" > mimetype
zip -0Xq "out/epub/manual.epub" mimetype
rm mimetype
cd "out/epub/scratch/" && zip -Xr9D "../manual.epub" *
rm -rf "out/epub/scratch/"
highlightjs:
mkdir -p highlightjs
cp -r "$$HIGHLIGHTJS/highlight.pack.js" highlightjs/
cp -r "$$HIGHLIGHTJS/LICENSE" highlightjs/
cp -r "$$HIGHLIGHTJS/mono-blue.css" highlightjs/
cp -r "$$HIGHLIGHTJS/loader.js" highlightjs/
manual-full.xml: ${MD_TARGETS} .version functions/library/locations.xml *.xml **/*.xml **/**/*.xml
xmllint --nonet --xinclude --noxincludenode manual.xml --output manual-full.xml
.version:
nix-instantiate --eval \
-E '(import ../lib).version' > .version
functions/library/locations.xml:
nix-build ./lib-function-locations.nix \
--out-link ./functions/library/locations.xml
%.section.xml: %.section.md
pandoc $^ -w docbook+smart \
-f markdown+smart \
| sed -e 's|<ulink url=|<link xlink:href=|' \
-e 's|</ulink>|</link>|' \
-e 's|<sect. id=|<section xml:id=|' \
-e 's|</sect[0-9]>|</section>|' \
-e '1s| id=| xml:id=|' \
-e '1s|\(<[^ ]* \)|\1xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" |' \
| cat > $@
%.chapter.xml: %.chapter.md
pandoc $^ -w docbook+smart \
--top-level-division=chapter \
-f markdown+smart \
| sed -e 's|<ulink url=|<link xlink:href=|' \
-e 's|</ulink>|</link>|' \
-e 's|<sect. id=|<section xml:id=|' \
-e 's|</sect[0-9]>|</section>|' \
-e '1s| id=| xml:id=|' \
-e '1s|\(<[^ ]* \)|\1|' \
| cat > $@

File diff suppressed because it is too large Load Diff

View File

@ -1,40 +1,51 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="chap-packageconfig">
<title>Global configuration</title>
<para>Nix comes with certain defaults about what packages can and
cannot be installed, based on a package's metadata. By default, Nix
will prevent installation if any of the following criteria are
true:</para>
<itemizedlist>
<listitem><para>The package is thought to be broken, and has had
its <literal>meta.broken</literal> set to
<literal>true</literal>.</para></listitem>
<listitem><para>The package's <literal>meta.license</literal> is set
to a license which is considered to be unfree.</para></listitem>
<listitem><para>The package has known security vulnerabilities but
has not or can not be updated for some reason, and a list of issues
has been entered in to the package's
<literal>meta.knownVulnerabilities</literal>.</para></listitem>
</itemizedlist>
<para>Note that all this is checked during evaluation already,
and the check includes any package that is evaluated.
In particular, all build-time dependencies are checked.
<literal>nix-env -qa</literal> will (attempt to) hide any packages
that would be refused.
</para>
<para>Each of these criteria can be altered in the nixpkgs
configuration.</para>
<para>The nixpkgs configuration for a NixOS system is set in the
<literal>configuration.nix</literal>, as in the following example:
<title>Global configuration</title>
<para>
Nix comes with certain defaults about what packages can and cannot be
installed, based on a package's metadata. By default, Nix will prevent
installation if any of the following criteria are true:
</para>
<itemizedlist>
<listitem>
<para>
The package is thought to be broken, and has had its
<literal>meta.broken</literal> set to <literal>true</literal>.
</para>
</listitem>
<listitem>
<para>
The package isn't intended to run on the given system, as none of its
<literal>meta.platforms</literal> match the given system.
</para>
</listitem>
<listitem>
<para>
The package's <literal>meta.license</literal> is set to a license which is
considered to be unfree.
</para>
</listitem>
<listitem>
<para>
The package has known security vulnerabilities but has not or can not be
updated for some reason, and a list of issues has been entered in to the
package's <literal>meta.knownVulnerabilities</literal>.
</para>
</listitem>
</itemizedlist>
<para>
Note that all this is checked during evaluation already, and the check
includes any package that is evaluated. In particular, all build-time
dependencies are checked. <literal>nix-env -qa</literal> will (attempt to)
hide any packages that would be refused.
</para>
<para>
Each of these criteria can be altered in the nixpkgs configuration.
</para>
<para>
The nixpkgs configuration for a NixOS system is set in the
<literal>configuration.nix</literal>, as in the following example:
<programlisting>
{
nixpkgs.config = {
@ -42,151 +53,202 @@ configuration.</para>
};
}
</programlisting>
However, this does not allow unfree software for individual users.
Their configurations are managed separately.</para>
<para>A user's of nixpkgs configuration is stored in a user-specific
configuration file located at
<filename>~/.config/nixpkgs/config.nix</filename>. For example:
However, this does not allow unfree software for individual users. Their
configurations are managed separately.
</para>
<para>
A user's of nixpkgs configuration is stored in a user-specific configuration
file located at <filename>~/.config/nixpkgs/config.nix</filename>. For
example:
<programlisting>
{
allowUnfree = true;
}
</programlisting>
</para>
<para>Note that we are not able to test or build unfree software on Hydra
due to policy. Most unfree licenses prohibit us from either executing or
distributing the software.</para>
<section xml:id="sec-allow-broken">
</para>
<para>
Note that we are not able to test or build unfree software on Hydra due to
policy. Most unfree licenses prohibit us from either executing or
distributing the software.
</para>
<section xml:id="sec-allow-broken">
<title>Installing broken packages</title>
<para>There are two ways to try compiling a package which has been
marked as broken.</para>
<para>
There are two ways to try compiling a package which has been marked as
broken.
</para>
<itemizedlist>
<listitem><para>
For allowing the build of a broken package once, you can use an
environment variable for a single invocation of the nix tools:
<programlisting>$ export NIXPKGS_ALLOW_BROKEN=1</programlisting>
</para></listitem>
<listitem><para>
For permanently allowing broken packages to be built, you may
add <literal>allowBroken = true;</literal> to your user's
configuration file, like this:
<listitem>
<para>
For allowing the build of a broken package once, you can use an
environment variable for a single invocation of the nix tools:
<programlisting>$ export NIXPKGS_ALLOW_BROKEN=1</programlisting>
</para>
</listitem>
<listitem>
<para>
For permanently allowing broken packages to be built, you may add
<literal>allowBroken = true;</literal> to your user's configuration file,
like this:
<programlisting>
{
allowBroken = true;
}
</programlisting>
</para></listitem>
</para>
</listitem>
</itemizedlist>
</section>
</section>
<section xml:id="sec-allow-unsupported-system">
<title>Installing packages on unsupported systems</title>
<section xml:id="sec-allow-unfree">
<title>Installing unfree packages</title>
<para>There are several ways to tweak how Nix handles a package
which has been marked as unfree.</para>
<para>
There are also two ways to try compiling a package which has been marked as
unsuported for the given system.
</para>
<itemizedlist>
<listitem><para>
To temporarily allow all unfree packages, you can use an
environment variable for a single invocation of the nix tools:
<listitem>
<para>
For allowing the build of a broken package once, you can use an
environment variable for a single invocation of the nix tools:
<programlisting>$ export NIXPKGS_ALLOW_UNSUPPORTED_SYSTEM=1</programlisting>
</para>
</listitem>
<listitem>
<para>
For permanently allowing broken packages to be built, you may add
<literal>allowUnsupportedSystem = true;</literal> to your user's
configuration file, like this:
<programlisting>
{
allowUnsupportedSystem = true;
}
</programlisting>
</para>
</listitem>
</itemizedlist>
<programlisting>$ export NIXPKGS_ALLOW_UNFREE=1</programlisting>
</para></listitem>
<para>
The difference between a package being unsupported on some system and
being broken is admittedly a bit fuzzy. If a program
<emphasis>ought</emphasis> to work on a certain platform, but doesn't, the
platform should be included in <literal>meta.platforms</literal>, but marked
as broken with e.g. <literal>meta.broken =
!hostPlatform.isWindows</literal>. Of course, this begs the question of what
"ought" means exactly. That is left to the package maintainer.
</para>
</section>
<section xml:id="sec-allow-unfree">
<title>Installing unfree packages</title>
<listitem><para>
It is possible to permanently allow individual unfree packages,
while still blocking unfree packages by default using the
<literal>allowUnfreePredicate</literal> configuration
option in the user configuration file.</para>
<para>
There are several ways to tweak how Nix handles a package which has been
marked as unfree.
</para>
<para>This option is a function which accepts a package as a
parameter, and returns a boolean. The following example
configuration accepts a package and always returns false:
<itemizedlist>
<listitem>
<para>
To temporarily allow all unfree packages, you can use an environment
variable for a single invocation of the nix tools:
<programlisting>$ export NIXPKGS_ALLOW_UNFREE=1</programlisting>
</para>
</listitem>
<listitem>
<para>
It is possible to permanently allow individual unfree packages, while
still blocking unfree packages by default using the
<literal>allowUnfreePredicate</literal> configuration option in the user
configuration file.
</para>
<para>
This option is a function which accepts a package as a parameter, and
returns a boolean. The following example configuration accepts a package
and always returns false:
<programlisting>
{
allowUnfreePredicate = (pkg: false);
}
</programlisting>
</para>
<para>A more useful example, the following configuration allows
only allows flash player and visual studio code:
</para>
<para>
For a more useful example, try the following. This configuration
only allows unfree packages named flash player and visual studio
code:
<programlisting>
{
allowUnfreePredicate = (pkg: elem (builtins.parseDrvName pkg.name).name [ "flashplayer" "vscode" ]);
allowUnfreePredicate = (pkg: builtins.elem
(builtins.parseDrvName pkg.name).name [
"flashplayer"
"vscode"
]);
}
</programlisting>
</para></listitem>
<listitem>
<para>It is also possible to whitelist and blacklist licenses
that are specifically acceptable or not acceptable, using
<literal>whitelistedLicenses</literal> and
<literal>blacklistedLicenses</literal>, respectively.
</para>
<para>The following example configuration whitelists the
licenses <literal>amd</literal> and <literal>wtfpl</literal>:
</para>
</listitem>
<listitem>
<para>
It is also possible to whitelist and blacklist licenses that are
specifically acceptable or not acceptable, using
<literal>whitelistedLicenses</literal> and
<literal>blacklistedLicenses</literal>, respectively.
</para>
<para>
The following example configuration whitelists the licenses
<literal>amd</literal> and <literal>wtfpl</literal>:
<programlisting>
{
whitelistedLicenses = with stdenv.lib.licenses; [ amd wtfpl ];
}
</programlisting>
</para>
<para>The following example configuration blacklists the
<literal>gpl3</literal> and <literal>agpl3</literal> licenses:
</para>
<para>
The following example configuration blacklists the <literal>gpl3</literal>
and <literal>agpl3</literal> licenses:
<programlisting>
{
blacklistedLicenses = with stdenv.lib.licenses; [ agpl3 gpl3 ];
}
</programlisting>
</para>
</listitem>
</para>
</listitem>
</itemizedlist>
<para>A complete list of licenses can be found in the file
<filename>lib/licenses.nix</filename> of the nixpkgs tree.</para>
</section>
<para>
A complete list of licenses can be found in the file
<filename>lib/licenses.nix</filename> of the nixpkgs tree.
</para>
</section>
<section xml:id="sec-allow-insecure">
<title>Installing insecure packages</title>
<section xml:id="sec-allow-insecure">
<title>
Installing insecure packages
</title>
<para>There are several ways to tweak how Nix handles a package
which has been marked as insecure.</para>
<para>
There are several ways to tweak how Nix handles a package which has been
marked as insecure.
</para>
<itemizedlist>
<listitem><para>
To temporarily allow all insecure packages, you can use an
environment variable for a single invocation of the nix tools:
<programlisting>$ export NIXPKGS_ALLOW_INSECURE=1</programlisting>
</para></listitem>
<listitem><para>
It is possible to permanently allow individual insecure
packages, while still blocking other insecure packages by
default using the <literal>permittedInsecurePackages</literal>
configuration option in the user configuration file.</para>
<para>The following example configuration permits the
installation of the hypothetically insecure package
<literal>hello</literal>, version <literal>1.2.3</literal>:
<listitem>
<para>
To temporarily allow all insecure packages, you can use an environment
variable for a single invocation of the nix tools:
<programlisting>$ export NIXPKGS_ALLOW_INSECURE=1</programlisting>
</para>
</listitem>
<listitem>
<para>
It is possible to permanently allow individual insecure packages, while
still blocking other insecure packages by default using the
<literal>permittedInsecurePackages</literal> configuration option in the
user configuration file.
</para>
<para>
The following example configuration permits the installation of the
hypothetically insecure package <literal>hello</literal>, version
<literal>1.2.3</literal>:
<programlisting>
{
permittedInsecurePackages = [
@ -194,47 +256,44 @@ distributing the software.</para>
];
}
</programlisting>
</para>
</listitem>
<listitem><para>
It is also possible to create a custom policy around which
insecure packages to allow and deny, by overriding the
<literal>allowInsecurePredicate</literal> configuration
option.</para>
<para>The <literal>allowInsecurePredicate</literal> option is a
function which accepts a package and returns a boolean, much
like <literal>allowUnfreePredicate</literal>.</para>
<para>The following configuration example only allows insecure
packages with very short names:
</para>
</listitem>
<listitem>
<para>
It is also possible to create a custom policy around which insecure
packages to allow and deny, by overriding the
<literal>allowInsecurePredicate</literal> configuration option.
</para>
<para>
The <literal>allowInsecurePredicate</literal> option is a function which
accepts a package and returns a boolean, much like
<literal>allowUnfreePredicate</literal>.
</para>
<para>
The following configuration example only allows insecure packages with
very short names:
<programlisting>
{
allowInsecurePredicate = (pkg: (builtins.stringLength (builtins.parseDrvName pkg.name).name) &lt;= 5);
}
</programlisting>
</para>
<para>Note that <literal>permittedInsecurePackages</literal> is
only checked if <literal>allowInsecurePredicate</literal> is not
specified.
</para></listitem>
</para>
<para>
Note that <literal>permittedInsecurePackages</literal> is only checked if
<literal>allowInsecurePredicate</literal> is not specified.
</para>
</listitem>
</itemizedlist>
</section>
</section>
<!--============================================================-->
<section xml:id="sec-modify-via-packageOverrides">
<title>Modify packages via <literal>packageOverrides</literal></title>
<section xml:id="sec-modify-via-packageOverrides"><title>Modify
packages via <literal>packageOverrides</literal></title>
<para>You can define a function called
<varname>packageOverrides</varname> in your local
<filename>~/.config/nixpkgs/config.nix</filename> to override nix packages. It
must be a function that takes pkgs as an argument and return modified
set of packages.
<para>
You can define a function called <varname>packageOverrides</varname> in your
local <filename>~/.config/nixpkgs/config.nix</filename> to override Nix
packages. It must be a function that takes pkgs as an argument and returns a
modified set of packages.
<programlisting>
{
packageOverrides = pkgs: rec {
@ -242,119 +301,146 @@ set of packages.
};
}
</programlisting>
</para>
</section>
<section xml:id="sec-declarative-package-management">
</para>
</section>
<section xml:id="sec-declarative-package-management">
<title>Declarative Package Management</title>
<section xml:id="sec-building-environment">
<title>Build an environment</title>
<title>Build an environment</title>
<para>
Using <literal>packageOverrides</literal>, it is possible to manage
packages declaratively. This means that we can list all of our desired
packages within a declarative Nix expression. For example, to have
<literal>aspell</literal>, <literal>bc</literal>,
<literal>ffmpeg</literal>, <literal>coreutils</literal>,
<literal>gdb</literal>, <literal>nixUnstable</literal>,
<literal>emscripten</literal>, <literal>jq</literal>,
<literal>nox</literal>, and <literal>silver-searcher</literal>, we could
use the following in <filename>~/.config/nixpkgs/config.nix</filename>:
</para>
<para>
Using <literal>packageOverrides</literal>, it is possible to manage
packages declaratively. This means that we can list all of our desired
packages within a declarative Nix expression. For example, to have
<literal>aspell</literal>, <literal>bc</literal>,
<literal>ffmpeg</literal>, <literal>coreutils</literal>,
<literal>gdb</literal>, <literal>nixUnstable</literal>,
<literal>emscripten</literal>, <literal>jq</literal>,
<literal>nox</literal>, and <literal>silver-searcher</literal>, we could
use the following in <filename>~/.config/nixpkgs/config.nix</filename>:
</para>
<screen>
<screen>
{
packageOverrides = pkgs: with pkgs; {
myPackages = pkgs.buildEnv {
name = "my-packages";
paths = [ aspell bc coreutils gdb ffmpeg nixUnstable emscripten jq nox silver-searcher ];
paths = [
aspell
bc
coreutils
gdb
ffmpeg
nixUnstable
emscripten
jq
nox
silver-searcher
];
};
};
}
</screen>
</screen>
<para>
To install it into our environment, you can just run <literal>nix-env -iA
nixpkgs.myPackages</literal>. If you want to load the packages to be built
from a working copy of <literal>nixpkgs</literal> you just run
<literal>nix-env -f. -iA myPackages</literal>. To explore what's been
installed, just look through <filename>~/.nix-profile/</filename>. You can
see that a lot of stuff has been installed. Some of this stuff is useful
some of it isn't. Let's tell Nixpkgs to only link the stuff that we want:
</para>
<para>
To install it into our environment, you can just run <literal>nix-env -iA
nixpkgs.myPackages</literal>. If you want to load the packages to be built
from a working copy of <literal>nixpkgs</literal> you just run
<literal>nix-env -f. -iA myPackages</literal>. To explore what's been
installed, just look through <filename>~/.nix-profile/</filename>. You can
see that a lot of stuff has been installed. Some of this stuff is useful
some of it isn't. Let's tell Nixpkgs to only link the stuff that we want:
</para>
<screen>
<screen>
{
packageOverrides = pkgs: with pkgs; {
myPackages = pkgs.buildEnv {
name = "my-packages";
paths = [ aspell bc coreutils gdb ffmpeg nixUnstable emscripten jq nox silver-searcher ];
paths = [
aspell
bc
coreutils
gdb
ffmpeg
nixUnstable
emscripten
jq
nox
silver-searcher
];
pathsToLink = [ "/share" "/bin" ];
};
};
}
</screen>
<para>
<literal>pathsToLink</literal> tells Nixpkgs to only link the paths listed
which gets rid of the extra stuff in the profile.
<filename>/bin</filename> and <filename>/share</filename> are good
defaults for a user environment, getting rid of the clutter. If you are
running on Nix on MacOS, you may want to add another path as well,
<filename>/Applications</filename>, that makes GUI apps available.
</para>
</screen>
<para>
<literal>pathsToLink</literal> tells Nixpkgs to only link the paths listed
which gets rid of the extra stuff in the profile. <filename>/bin</filename>
and <filename>/share</filename> are good defaults for a user environment,
getting rid of the clutter. If you are running on Nix on MacOS, you may
want to add another path as well, <filename>/Applications</filename>, that
makes GUI apps available.
</para>
</section>
<section xml:id="sec-getting-documentation">
<title>Getting documentation</title>
<title>Getting documentation</title>
<para>
After building that new environment, look through
<filename>~/.nix-profile</filename> to make sure everything is there that
we wanted. Discerning readers will note that some files are missing. Look
inside <filename>~/.nix-profile/share/man/man1/</filename> to verify this.
There are no man pages for any of the Nix tools! This is because some
packages like Nix have multiple outputs for things like documentation (see
section 4). Let's make Nix install those as well.
</para>
<para>
After building that new environment, look through
<filename>~/.nix-profile</filename> to make sure everything is there that
we wanted. Discerning readers will note that some files are missing. Look
inside <filename>~/.nix-profile/share/man/man1/</filename> to verify this.
There are no man pages for any of the Nix tools! This is because some
packages like Nix have multiple outputs for things like documentation (see
section 4). Let's make Nix install those as well.
</para>
<screen>
<screen>
{
packageOverrides = pkgs: with pkgs; {
myPackages = pkgs.buildEnv {
name = "my-packages";
paths = [ aspell bc coreutils ffmpeg nixUnstable emscripten jq nox silver-searcher ];
pathsToLink = [ "/share/man" "/share/doc" /bin" ];
paths = [
aspell
bc
coreutils
ffmpeg
nixUnstable
emscripten
jq
nox
silver-searcher
];
pathsToLink = [ "/share/man" "/share/doc" "/bin" ];
extraOutputsToInstall = [ "man" "doc" ];
};
};
}
</screen>
</screen>
<para>
This provides us with some useful documentation for using our packages.
However, if we actually want those manpages to be detected by man, we need
to set up our environment. This can also be managed within Nix
expressions.
</para>
<para>
This provides us with some useful documentation for using our packages.
However, if we actually want those manpages to be detected by man, we need
to set up our environment. This can also be managed within Nix expressions.
</para>
<screen>
<screen>
{
packageOverrides = pkgs: with pkgs; rec {
myProfile = writeText "my-profile" ''
export PATH=$HOME/.nix-profile/bin:/nix/var/nix/profiles/default/bin:/sbin:/bin:/usr/sbin:/usr/bin
export MANPATH=$HOME/.nix-profile/share/man:/nix/var/nix/profiles/default/share/man:/usr/share/man
export PATH=$HOME/.nix-profile/bin:/nix/var/nix/profiles/default/bin:/sbin:/bin:/usr/sbin:/usr/bin
export MANPATH=$HOME/.nix-profile/share/man:/nix/var/nix/profiles/default/share/man:/usr/share/man
'';
myPackages = pkgs.buildEnv {
name = "my-packages";
paths = [
(runCommand "profile" {} ''
mkdir -p $out/etc/profile.d
cp ${myProfile} $out/etc/profile.d/my-profile.sh
mkdir -p $out/etc/profile.d
cp ${myProfile} $out/etc/profile.d/my-profile.sh
'')
aspell
bc
@ -367,20 +453,20 @@ cp ${myProfile} $out/etc/profile.d/my-profile.sh
nox
silver-searcher
];
pathsToLink = [ "/share/man" "/share/doc" /bin" "/etc" ];
pathsToLink = [ "/share/man" "/share/doc" "/bin" "/etc" ];
extraOutputsToInstall = [ "man" "doc" ];
};
};
}
</screen>
</screen>
<para>
For this to work fully, you must also have this script sourced when you
are logged in. Try adding something like this to your
<filename>~/.profile</filename> file:
</para>
<para>
For this to work fully, you must also have this script sourced when you are
logged in. Try adding something like this to your
<filename>~/.profile</filename> file:
</para>
<screen>
<screen>
#!/bin/sh
if [ -d $HOME/.nix-profile/etc/profile.d ]; then
for i in $HOME/.nix-profile/etc/profile.d/*.sh; do
@ -389,38 +475,37 @@ if [ -d $HOME/.nix-profile/etc/profile.d ]; then
fi
done
fi
</screen>
<para>
Now just run <literal>source $HOME/.profile</literal> and you can starting
loading man pages from your environent.
</para>
</screen>
<para>
Now just run <literal>source $HOME/.profile</literal> and you can starting
loading man pages from your environent.
</para>
</section>
<section xml:id="sec-gnu-info-setup">
<title>GNU info setup</title>
<title>GNU info setup</title>
<para>
Configuring GNU info is a little bit trickier than man pages. To work
correctly, info needs a database to be generated. This can be done with
some small modifications to our environment scripts.
</para>
<para>
Configuring GNU info is a little bit trickier than man pages. To work
correctly, info needs a database to be generated. This can be done with
some small modifications to our environment scripts.
</para>
<screen>
<screen>
{
packageOverrides = pkgs: with pkgs; rec {
myProfile = writeText "my-profile" ''
export PATH=$HOME/.nix-profile/bin:/nix/var/nix/profiles/default/bin:/sbin:/bin:/usr/sbin:/usr/bin
export MANPATH=$HOME/.nix-profile/share/man:/nix/var/nix/profiles/default/share/man:/usr/share/man
export INFOPATH=$HOME/.nix-profile/share/info:/nix/var/nix/profiles/default/share/info:/usr/share/info
export PATH=$HOME/.nix-profile/bin:/nix/var/nix/profiles/default/bin:/sbin:/bin:/usr/sbin:/usr/bin
export MANPATH=$HOME/.nix-profile/share/man:/nix/var/nix/profiles/default/share/man:/usr/share/man
export INFOPATH=$HOME/.nix-profile/share/info:/nix/var/nix/profiles/default/share/info:/usr/share/info
'';
myPackages = pkgs.buildEnv {
name = "my-packages";
paths = [
(runCommand "profile" {} ''
mkdir -p $out/etc/profile.d
cp ${myProfile} $out/etc/profile.d/my-profile.sh
mkdir -p $out/etc/profile.d
cp ${myProfile} $out/etc/profile.d/my-profile.sh
'')
aspell
bc
@ -437,28 +522,25 @@ cp ${myProfile} $out/etc/profile.d/my-profile.sh
pathsToLink = [ "/share/man" "/share/doc" "/share/info" "/bin" "/etc" ];
extraOutputsToInstall = [ "man" "doc" "info" ];
postBuild = ''
if [ -x $out/bin/install-info -a -w $out/share/info ]; then
shopt -s nullglob
for i in $out/share/info/*.info $out/share/info/*.info.gz; do
$out/bin/install-info $i $out/share/info/dir
done
fi
if [ -x $out/bin/install-info -a -w $out/share/info ]; then
shopt -s nullglob
for i in $out/share/info/*.info $out/share/info/*.info.gz; do
$out/bin/install-info $i $out/share/info/dir
done
fi
'';
};
};
}
</screen>
<para>
<literal>postBuild</literal> tells Nixpkgs to run a command after building
the environment. In this case, <literal>install-info</literal> adds the
installed info pages to <literal>dir</literal> which is GNU info's default
root node. Note that <literal>texinfoInteractive</literal> is added to the
environment to give the <literal>install-info</literal> command.
</para>
</screen>
<para>
<literal>postBuild</literal> tells Nixpkgs to run a command after building
the environment. In this case, <literal>install-info</literal> adds the
installed info pages to <literal>dir</literal> which is GNU info's default
root node. Note that <literal>texinfoInteractive</literal> is added to the
environment to give the <literal>install-info</literal> command.
</para>
</section>
</section>
</section>
</chapter>

View File

@ -1,20 +1,35 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="chap-contributing">
<title>Contributing to this documentation</title>
<para>The DocBook sources of the Nixpkgs manual are in the <filename
<title>Contributing to this documentation</title>
<para>
The DocBook sources of the Nixpkgs manual are in the
<filename
xlink:href="https://github.com/NixOS/nixpkgs/tree/master/doc">doc</filename>
subdirectory of the Nixpkgs repository. If you make modifications to
the manual, it's important to build it before committing. You can do that as follows:
subdirectory of the Nixpkgs repository.
</para>
<para>
You can quickly check your edits with <command>make</command>:
</para>
<screen>
$ cd /path/to/nixpkgs
$ nix-build doc
$ cd /path/to/nixpkgs/doc
$ nix-shell
[nix-shell]$ make
</screen>
If the build succeeds, the manual will be in
<filename>./result/share/doc/nixpkgs/manual.html</filename>.</para>
<para>
If you experience problems, run <command>make debug</command> to help
understand the docbook errors.
</para>
<para>
After making modifications to the manual, it's important to build it before
committing. You can do that as follows:
<screen>
$ cd /path/to/nixpkgs/doc
$ nix-shell
[nix-shell]$ make clean
[nix-shell]$ nix-build .
</screen>
If the build succeeds, the manual will be in
<filename>./result/share/doc/nixpkgs/manual.html</filename>.
</para>
</chapter>

View File

@ -1,308 +1,468 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="chap-cross">
<title>Cross-compilation</title>
<section xml:id="sec-cross-intro">
<title>Cross-compilation</title>
<section xml:id="sec-cross-intro">
<title>Introduction</title>
<para>
"Cross-compilation" means compiling a program on one machine for another type of machine.
For example, a typical use of cross compilation is to compile programs for embedded devices.
These devices often don't have the computing power and memory to compile their own programs.
One might think that cross-compilation is a fairly niche concern, but there are advantages to being rigorous about distinguishing build-time vs run-time environments even when one is developing and deploying on the same machine.
Nixpkgs is increasingly adopting the opinion that packages should be written with cross-compilation in mind, and nixpkgs should evaluate in a similar way (by minimizing cross-compilation-specific special cases) whether or not one is cross-compiling.
"Cross-compilation" means compiling a program on one machine for another type
of machine. For example, a typical use of cross-compilation is to compile
programs for embedded devices. These devices often don't have the computing
power and memory to compile their own programs. One might think that
cross-compilation is a fairly niche concern. However, there are significant
advantages to rigorously distinguishing between build-time and run-time
environments! This applies even when one is developing and deploying on the
same machine. Nixpkgs is increasingly adopting the opinion that packages
should be written with cross-compilation in mind, and nixpkgs should evaluate
in a similar way (by minimizing cross-compilation-specific special cases)
whether or not one is cross-compiling.
</para>
<para>
This chapter will be organized in three parts.
First, it will describe the basics of how to package software in a way that supports cross-compilation.
Second, it will describe how to use Nixpkgs when cross-compiling.
Third, it will describe the internal infrastructure supporting cross-compilation.
This chapter will be organized in three parts. First, it will describe the
basics of how to package software in a way that supports cross-compilation.
Second, it will describe how to use Nixpkgs when cross-compiling. Third, it
will describe the internal infrastructure supporting cross-compilation.
</para>
</section>
</section>
<!--============================================================-->
<section xml:id="sec-cross-packaging">
<section xml:id="sec-cross-packaging">
<title>Packaging in a cross-friendly manner</title>
<section>
<title>Platform parameters</title>
<para>
Nixpkgs follows the <link xlink:href="https://gcc.gnu.org/onlinedocs/gccint/Configure-Terms.html">common historical convention of GNU autoconf</link> of distinguishing between 3 types of platform: <wordasword>build</wordasword>, <wordasword>host</wordasword>, and <wordasword>target</wordasword>.
<section xml:id="sec-cross-platform-parameters">
<title>Platform parameters</title>
In summary, <wordasword>build</wordasword> is the platform on which a package is being built, <wordasword>host</wordasword> is the platform on which it is to run. The third attribute, <wordasword>target</wordasword>, is relevant only for certain specific compilers and build tools.
</para>
<para>
Nixpkgs follows the <link
xlink:href="https://gcc.gnu.org/onlinedocs/gccint/Configure-Terms.html">conventions
of GNU autoconf</link>. We distinguish between 3 types of platforms when
building a derivation: <wordasword>build</wordasword>,
<wordasword>host</wordasword>, and <wordasword>target</wordasword>. In
summary, <wordasword>build</wordasword> is the platform on which a package
is being built, <wordasword>host</wordasword> is the platform on which it
will run. The third attribute, <wordasword>target</wordasword>, is relevant
only for certain specific compilers and build tools.
</para>
<para>
In Nixpkgs, these three platforms are defined as attribute sets under the names <literal>buildPlatform</literal>, <literal>hostPlatform</literal>, and <literal>targetPlatform</literal>.
All three are always defined as attributes in the standard environment, and at the top level. That means one can get at them just like a dependency in a function that is imported with <literal>callPackage</literal>:
<programlisting>{ stdenv, buildPlatform, hostPlatform, fooDep, barDep, .. }: ...buildPlatform...</programlisting>, or just off <varname>stdenv</varname>:
<programlisting>{ stdenv, fooDep, barDep, .. }: ...stdenv.buildPlatform...</programlisting>.
</para>
<variablelist>
<varlistentry>
<term><varname>buildPlatform</varname></term>
<listitem><para>
The "build platform" is the platform on which a package is built.
Once someone has a built package, or pre-built binary package, the build platform should not matter and be safe to ignore.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>hostPlatform</varname></term>
<listitem><para>
The "host platform" is the platform on which a package will be run.
This is the simplest platform to understand, but also the one with the worst name.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>targetPlatform</varname></term>
<listitem>
<para>
The "target platform" attribute is, unlike the other two attributes, not actually fundamental to the process of building software.
Instead, it is only relevant for compatibility with building certain specific compilers and build tools.
It can be safely ignored for all other packages.
</para>
<para>
The build process of certain compilers is written in such a way that the compiler resulting from a single build can itself only produce binaries for a single platform.
The task specifying this single "target platform" is thus pushed to build time of the compiler.
The root cause of this mistake is often that the compiler (which will be run on the host) and the the standard library/runtime (which will be run on the target) are built by a single build process.
</para>
<para>
There is no fundamental need to think about a single target ahead of time like this.
If the tool supports modular or pluggable backends, both the need to specify the target at build time and the constraint of having only a single target disappear.
An example of such a tool is LLVM.
</para>
<para>
Although the existance of a "target platfom" is arguably a historical mistake, it is a common one: examples of tools that suffer from it are GCC, Binutils, GHC and Autoconf.
Nixpkgs tries to avoid sharing in the mistake where possible.
Still, because the concept of a target platform is so ingrained, it is best to support it as is.
</para>
</listitem>
</varlistentry>
</variablelist>
<para>
The exact schema these fields follow is a bit ill-defined due to a long and convoluted evolution, but this is slowly being cleaned up.
You can see examples of ones used in practice in <literal>lib.systems.examples</literal>; note how they are not all very consistent.
For now, here are few fields can count on them containing:
</para>
<variablelist>
<varlistentry>
<term><varname>system</varname></term>
<listitem>
<para>
This is a two-component shorthand for the platform.
Examples of this would be "x86_64-darwin" and "i686-linux"; see <literal>lib.systems.doubles</literal> for more.
This format isn't very standard, but has built-in support in Nix, such as the <varname>builtins.currentSystem</varname> impure string.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term><varname>config</varname></term>
<listitem>
<para>
This is a 3- or 4- component shorthand for the platform.
Examples of this would be "x86_64-unknown-linux-gnu" and "aarch64-apple-darwin14".
This is a standard format called the "LLVM target triple", as they are pioneered by LLVM and traditionally just used for the <varname>targetPlatform</varname>.
This format is strictly more informative than the "Nix host double", as the previous format could analogously be termed.
This needs a better name than <varname>config</varname>!
</para>
</listitem>
</varlistentry>
<varlistentry>
<term><varname>parsed</varname></term>
<listitem>
<para>
This is a nix representation of a parsed LLVM target triple with white-listed components.
This can be specified directly, or actually parsed from the <varname>config</varname>.
[Technically, only one need be specified and the others can be inferred, though the precision of inference may not be very good.]
See <literal>lib.systems.parse</literal> for the exact representation.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term><varname>libc</varname></term>
<listitem>
<para>
This is a string identifying the standard C library used.
Valid identifiers include "glibc" for GNU libc, "libSystem" for Darwin's Libsystem, and "uclibc" for µClibc.
It should probably be refactored to use the module system, like <varname>parse</varname>.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term><varname>is*</varname></term>
<listitem>
<para>
These predicates are defined in <literal>lib.systems.inspect</literal>, and slapped on every platform.
They are superior to the ones in <varname>stdenv</varname> as they force the user to be explicit about which platform they are inspecting.
Please use these instead of those.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term><varname>platform</varname></term>
<listitem>
<para>
This is, quite frankly, a dumping ground of ad-hoc settings (it's an attribute set).
See <literal>lib.systems.platforms</literal> for examples—there's hopefully one in there that will work verbatim for each platform that is working.
Please help us triage these flags and give them better homes!
</para>
</listitem>
</varlistentry>
</variablelist>
<para>
In Nixpkgs, these three platforms are defined as attribute sets under the
names <literal>buildPlatform</literal>, <literal>hostPlatform</literal>,
and <literal>targetPlatform</literal>. They are always defined as
attributes in the standard environment. That means one can access them
like:
<programlisting>{ stdenv, fooDep, barDep, .. }: ...stdenv.buildPlatform...</programlisting>
.
</para>
<variablelist>
<varlistentry>
<term>
<varname>buildPlatform</varname>
</term>
<listitem>
<para>
The "build platform" is the platform on which a package is built. Once
someone has a built package, or pre-built binary package, the build
platform should not matter and can be ignored.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>hostPlatform</varname>
</term>
<listitem>
<para>
The "host platform" is the platform on which a package will be run. This
is the simplest platform to understand, but also the one with the worst
name.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>targetPlatform</varname>
</term>
<listitem>
<para>
The "target platform" attribute is, unlike the other two attributes, not
actually fundamental to the process of building software. Instead, it is
only relevant for compatibility with building certain specific compilers
and build tools. It can be safely ignored for all other packages.
</para>
<para>
The build process of certain compilers is written in such a way that the
compiler resulting from a single build can itself only produce binaries
for a single platform. The task of specifying this single "target
platform" is thus pushed to build time of the compiler. The root cause of
this that the compiler (which will be run on the host) and the standard
library/runtime (which will be run on the target) are built by a single
build process.
</para>
<para>
There is no fundamental need to think about a single target ahead of
time like this. If the tool supports modular or pluggable backends, both
the need to specify the target at build time and the constraint of
having only a single target disappear. An example of such a tool is
LLVM.
</para>
<para>
Although the existence of a "target platfom" is arguably a historical
mistake, it is a common one: examples of tools that suffer from it are
GCC, Binutils, GHC and Autoconf. Nixpkgs tries to avoid sharing in the
mistake where possible. Still, because the concept of a target platform
is so ingrained, it is best to support it as is.
</para>
</listitem>
</varlistentry>
</variablelist>
<para>
The exact schema these fields follow is a bit ill-defined due to a long and
convoluted evolution, but this is slowly being cleaned up. You can see
examples of ones used in practice in
<literal>lib.systems.examples</literal>; note how they are not all very
consistent. For now, here are few fields can count on them containing:
</para>
<variablelist>
<varlistentry>
<term>
<varname>system</varname>
</term>
<listitem>
<para>
This is a two-component shorthand for the platform. Examples of this
would be "x86_64-darwin" and "i686-linux"; see
<literal>lib.systems.doubles</literal> for more. The first component
corresponds to the CPU architecture of the platform and the second to the
operating system of the platform (<literal>[cpu]-[os]</literal>). This
format has built-in support in Nix, such as the
<varname>builtins.currentSystem</varname> impure string.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>config</varname>
</term>
<listitem>
<para>
This is a 3- or 4- component shorthand for the platform. Examples of this
would be <literal>x86_64-unknown-linux-gnu</literal> and
<literal>aarch64-apple-darwin14</literal>. This is a standard format
called the "LLVM target triple", as they are pioneered by LLVM. In the
4-part form, this corresponds to
<literal>[cpu]-[vendor]-[os]-[abi]</literal>. This format is strictly
more informative than the "Nix host double", as the previous format could
analogously be termed. This needs a better name than
<varname>config</varname>!
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>parsed</varname>
</term>
<listitem>
<para>
This is a Nix representation of a parsed LLVM target triple
with white-listed components. This can be specified directly,
or actually parsed from the <varname>config</varname>. See
<literal>lib.systems.parse</literal> for the exact
representation.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>libc</varname>
</term>
<listitem>
<para>
This is a string identifying the standard C library used. Valid
identifiers include "glibc" for GNU libc, "libSystem" for Darwin's
Libsystem, and "uclibc" for µClibc. It should probably be refactored to
use the module system, like <varname>parse</varname>.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>is*</varname>
</term>
<listitem>
<para>
These predicates are defined in <literal>lib.systems.inspect</literal>,
and slapped onto every platform. They are superior to the ones in
<varname>stdenv</varname> as they force the user to be explicit about
which platform they are inspecting. Please use these instead of those.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>platform</varname>
</term>
<listitem>
<para>
This is, quite frankly, a dumping ground of ad-hoc settings (it's an
attribute set). See <literal>lib.systems.platforms</literal> for
examples—there's hopefully one in there that will work verbatim for
each platform that is working. Please help us triage these flags and
give them better homes!
</para>
</listitem>
</varlistentry>
</variablelist>
</section>
<section>
<title>Specifying Dependencies</title>
<section xml:id="sec-cross-specifying-dependencies">
<title>Specifying Dependencies</title>
<para>
In this section we explore the relationship between both runtime and
build-time dependencies and the 3 Autoconf platforms.
</para>
<para>
A runtime dependency between 2 packages implies that between them both the
host and target platforms match. This is directly implied by the meaning of
"host platform" and "runtime dependency": The package dependency exists
while both packages are running on a single host platform.
</para>
<para>
A build time dependency, however, implies a shift in platforms between the
depending package and the depended-on package. The meaning of a build time
dependency is that to build the depending package we need to be able to run
the depended-on's package. The depending package's build platform is
therefore equal to the depended-on package's host platform. Analogously,
the depending package's host platform is equal to the depended-on package's
target platform.
</para>
<para>
In this manner, given the 3 platforms for one package, we can determine the
three platforms for all its transitive dependencies. This is the most
important guiding principle behind cross-compilation with Nixpkgs, and will
be called the <wordasword>sliding window principle</wordasword>.
</para>
<para>
Some examples will make this clearer. If a package is being built with a
<literal>(build, host, target)</literal> platform triple of <literal>(foo,
bar, bar)</literal>, then its build-time dependencies would have a triple of
<literal>(foo, foo, bar)</literal>, and <emphasis>those packages'</emphasis>
build-time dependencies would have a triple of <literal>(foo, foo,
foo)</literal>. In other words, it should take two "rounds" of following
build-time dependency edges before one reaches a fixed point where, by the
sliding window principle, the platform triple no longer changes. Indeed,
this happens with cross-compilation, where only rounds of native
dependencies starting with the second necessarily coincide with native
packages.
</para>
<note>
<para>
In this section we explore the relationship between both runtime and buildtime dependencies and the 3 Autoconf platforms.
The depending package's target platform is unconstrained by the sliding
window principle, which makes sense in that one can in principle build
cross compilers targeting arbitrary platforms.
</para>
</note>
<para>
How does this work in practice? Nixpkgs is now structured so that build-time
dependencies are taken from <varname>buildPackages</varname>, whereas
run-time dependencies are taken from the top level attribute set. For
example, <varname>buildPackages.gcc</varname> should be used at build-time,
while <varname>gcc</varname> should be used at run-time. Now, for most of
Nixpkgs's history, there was no <varname>buildPackages</varname>, and most
packages have not been refactored to use it explicitly. Instead, one can use
the six (<emphasis>gasp</emphasis>) attributes used for specifying
dependencies as documented in <xref linkend="ssec-stdenv-dependencies"/>. We
"splice" together the run-time and build-time package sets with
<varname>callPackage</varname>, and then <varname>mkDerivation</varname> for
each of four attributes pulls the right derivation out. This splicing can be
skipped when not cross-compiling as the package sets are the same, but is a
bit slow for cross-compiling. Because of this, a best-of-both-worlds
solution is in the works with no splicing or explicit access of
<varname>buildPackages</varname> needed. For now, feel free to use either
method.
</para>
<note>
<para>
A runtime dependency between 2 packages implies that between them both the host and target platforms match.
This is directly implied by the meaning of "host platform" and "runtime dependency":
The package dependency exists while both packages are running on a single host platform.
There is also a "backlink" <varname>targetPackages</varname>, yielding a
package set whose <varname>buildPackages</varname> is the current package
set. This is a hack, though, to accommodate compilers with lousy build
systems. Please do not use this unless you are absolutely sure you are
packaging such a compiler and there is no other way.
</para>
<para>
A build time dependency, however, implies a shift in platforms between the depending package and the depended-on package.
The meaning of a build time dependency is that to build the depending package we need to be able to run the depended-on's package.
The depending package's build platform is therefore equal to the depended-on package's host platform.
Analogously, the depending package's host platform is equal to the depended-on package's target platform.
</para>
<para>
In this manner, given the 3 platforms for one package, we can determine the three platforms for all its transitive dependencies.
This is the most important guiding principle behind cross-compilation with Nixpkgs, and will be called the <wordasword>sliding window principle</wordasword>.
</para>
<para>
Some examples will probably make this clearer.
If a package is being built with a <literal>(build, host, target)</literal> platform triple of <literal>(foo, bar, bar)</literal>, then its build-time dependencies would have a triple of <literal>(foo, foo, bar)</literal>, and <emphasis>those packages'</emphasis> build-time dependencies would have triple of <literal>(foo, foo, foo)</literal>.
In other words, it should take two "rounds" of following build-time dependency edges before one reaches a fixed point where, by the sliding window principle, the platform triple no longer changes.
Indeed, this happens with cross compilation, where only rounds of native dependencies starting with the second necessarily coincide with native packages.
</para>
<note><para>
The depending package's target platform is unconstrained by the sliding window principle, which makes sense in that one can in principle build cross compilers targeting arbitrary platforms.
</para></note>
<para>
How does this work in practice? Nixpkgs is now structured so that build-time dependencies are taken from <varname>buildPackages</varname>, whereas run-time dependencies are taken from the top level attribute set.
For example, <varname>buildPackages.gcc</varname> should be used at build time, while <varname>gcc</varname> should be used at run time.
Now, for most of Nixpkgs's history, there was no <varname>buildPackages</varname>, and most packages have not been refactored to use it explicitly.
Instead, one can use the six (<emphasis>gasp</emphasis>) attributes used for specifying dependencies as documented in <xref linkend="ssec-stdenv-dependencies"/>.
We "splice" together the run-time and build-time package sets with <varname>callPackage</varname>, and then <varname>mkDerivation</varname> for each of four attributes pulls the right derivation out.
This splicing can be skipped when not cross compiling as the package sets are the same, but is a bit slow for cross compiling.
Because of this, a best-of-both-worlds solution is in the works with no splicing or explicit access of <varname>buildPackages</varname> needed.
For now, feel free to use either method.
</para>
<note><para>
There is also a "backlink" <varname>targetPackages</varname>, yielding a package set whose <varname>buildPackages</varname> is the current package set.
This is a hack, though, to accommodate compilers with lousy build systems.
Please do not use this unless you are absolutely sure you are packaging such a compiler and there is no other way.
</para></note>
</note>
</section>
<section>
<title>Cross packagaing cookbook</title>
<para>
Some frequently problems when packaging for cross compilation are good to just spell and answer.
Ideally the information above is exhaustive, so this section cannot provide any new information,
but its ludicrous and cruel to expect everyone to spend effort working through the interaction of many features just to figure out the same answer to the same common problem.
Feel free to add to this list!
</para>
<qandaset>
<qandaentry>
<question><para>
What if my package's build system needs to build a C program to be run under the build environment?
</para></question>
<answer><para>
<programlisting>depsBuildBuild = [ buildPackages.stdenv.cc ];</programlisting>
Add it to your <function>mkDerivation</function> invocation.
</para></answer>
</qandaentry>
<qandaentry>
<question><para>
My package fails to find <command>ar</command>.
</para></question>
<answer><para>
Many packages assume that an unprefixed <command>ar</command> is available, but Nix doesn't provide one.
It only provides a prefixed one, just as it only does for all the other binutils programs.
It may be necessary to patch the package to fix the build system to use a prefixed `ar`.
</para></answer>
</qandaentry>
<qandaentry>
<question><para>
My package's testsuite needs to run host platform code.
</para></question>
<answer><para>
<programlisting>doCheck = stdenv.hostPlatform != stdenv.buildPlatfrom;</programlisting>
Add it to your <function>mkDerivation</function> invocation.
</para></answer>
</qandaentry>
</qandaset>
</section>
</section>
<section xml:id="sec-cross-cookbook">
<title>Cross packaging cookbook</title>
<para>
Some frequently encountered problems when packaging for cross-compilation
should be answered here. Ideally, the information above is exhaustive, so
this section cannot provide any new information, but it is ludicrous and
cruel to expect everyone to spend effort working through the interaction of
many features just to figure out the same answer to the same common problem.
Feel free to add to this list!
</para>
<qandaset>
<qandaentry xml:id="cross-qa-build-c-program-in-build-environment">
<question>
<para>
What if my package's build system needs to build a C program to be run
under the build environment?
</para>
</question>
<answer>
<para>
<programlisting>depsBuildBuild = [ buildPackages.stdenv.cc ];</programlisting>
Add it to your <function>mkDerivation</function> invocation.
</para>
</answer>
</qandaentry>
<qandaentry xml:id="cross-qa-fails-to-find-ar">
<question>
<para>
My package fails to find <command>ar</command>.
</para>
</question>
<answer>
<para>
Many packages assume that an unprefixed <command>ar</command> is
available, but Nix doesn't provide one. It only provides a prefixed one,
just as it only does for all the other binutils programs. It may be
necessary to patch the package to fix the build system to use a prefixed
`ar`.
</para>
</answer>
</qandaentry>
<qandaentry xml:id="cross-testsuite-runs-host-code">
<question>
<para>
My package's testsuite needs to run host platform code.
</para>
</question>
<answer>
<para>
<programlisting>doCheck = stdenv.hostPlatform != stdenv.buildPlatfrom;</programlisting>
Add it to your <function>mkDerivation</function> invocation.
</para>
</answer>
</qandaentry>
</qandaset>
</section>
</section>
<!--============================================================-->
<section xml:id="sec-cross-usage">
<section xml:id="sec-cross-usage">
<title>Cross-building packages</title>
<note><para>
More information needs to moved from the old wiki, especially <link xlink:href="https://nixos.org/wiki/CrossCompiling" />, for this section.
</para></note>
<para>
Nixpkgs can be instantiated with <varname>localSystem</varname> alone, in which case there is no cross compiling and everything is built by and for that system,
or also with <varname>crossSystem</varname>, in which case packages run on the latter, but all building happens on the former.
Both parameters take the same schema as the 3 (build, host, and target) platforms defined in the previous section.
As mentioned above, <literal>lib.systems.examples</literal> has some platforms which are used as arguments for these parameters in practice.
You can use them programmatically, or on the command line: <programlisting>
Nixpkgs can be instantiated with <varname>localSystem</varname> alone, in
which case there is no cross-compiling and everything is built by and for
that system, or also with <varname>crossSystem</varname>, in which case
packages run on the latter, but all building happens on the former. Both
parameters take the same schema as the 3 (build, host, and target) platforms
defined in the previous section. As mentioned above,
<literal>lib.systems.examples</literal> has some platforms which are used as
arguments for these parameters in practice. You can use them
programmatically, or on the command line:
<programlisting>
nix-build &lt;nixpkgs&gt; --arg crossSystem '(import &lt;nixpkgs/lib&gt;).systems.examples.fooBarBaz' -A whatever</programlisting>
</para>
<note>
<para>
Eventually we would like to make these platform examples an unnecessary convenience so that <programlisting>
nix-build &lt;nixpkgs&gt; --arg crossSystem.config '&lt;arch&gt;-&lt;os&gt;-&lt;vendor&gt;-&lt;abi&gt;' -A whatever</programlisting>
works in the vast majority of cases.
The problem today is dependencies on other sorts of configuration which aren't given proper defaults.
We rely on the examples to crudely to set those configuration parameters in some vaguely sane manner on the users behalf.
Issue <link xlink:href="https://github.com/NixOS/nixpkgs/issues/34274">#34274</link> tracks this inconvenience along with its root cause in crufty configuration options.
</para>
</note>
<para>
While one is free to pass both parameters in full, there's a lot of logic to fill in missing fields.
As discussed in the previous section, only one of <varname>system</varname>, <varname>config</varname>, and <varname>parsed</varname> is needed to infer the other two.
Additionally, <varname>libc</varname> will be inferred from <varname>parse</varname>.
Finally, <literal>localSystem.system</literal> is also <emphasis>impurely</emphasis> inferred based on the platform evaluation occurs.
This means it is often not necessary to pass <varname>localSystem</varname> at all, as in the command-line example in the previous paragraph.
</para>
<note>
<para>
Many sources (manual, wiki, etc) probably mention passing <varname>system</varname>, <varname>platform</varname>, along with the optional <varname>crossSystem</varname> to nixpkgs:
<literal>import &lt;nixpkgs&gt; { system = ..; platform = ..; crossSystem = ..; }</literal>.
Passing those two instead of <varname>localSystem</varname> is still supported for compatibility, but is discouraged.
Indeed, much of the inference we do for these parameters is motivated by compatibility as much as convenience.
</para>
</note>
<para>
One would think that <varname>localSystem</varname> and <varname>crossSystem</varname> overlap horribly with the three <varname>*Platforms</varname> (<varname>buildPlatform</varname>, <varname>hostPlatform,</varname> and <varname>targetPlatform</varname>; see <varname>stage.nix</varname> or the manual).
Actually, those identifiers are purposefully not used here to draw a subtle but important distinction:
While the granularity of having 3 platforms is necessary to properly *build* packages, it is overkill for specifying the user's *intent* when making a build plan or package set.
A simple "build vs deploy" dichotomy is adequate: the sliding window principle described in the previous section shows how to interpolate between the these two "end points" to get the 3 platform triple for each bootstrapping stage.
That means for any package a given package set, even those not bound on the top level but only reachable via dependencies or <varname>buildPackages</varname>, the three platforms will be defined as one of <varname>localSystem</varname> or <varname>crossSystem</varname>, with the former replacing the latter as one traverses build-time dependencies.
A last simple difference then is <varname>crossSystem</varname> should be null when one doesn't want to cross-compile, while the <varname>*Platform</varname>s are always non-null.
<varname>localSystem</varname> is always non-null.
</para>
</section>
<note>
<para>
Eventually we would like to make these platform examples an unnecessary
convenience so that
<programlisting>
nix-build &lt;nixpkgs&gt; --arg crossSystem '{ config = "&lt;arch&gt;-&lt;os&gt;-&lt;vendor&gt;-&lt;abi&gt;"; }' -A whatever</programlisting>
works in the vast majority of cases. The problem today is dependencies on
other sorts of configuration which aren't given proper defaults. We rely on
the examples to crudely to set those configuration parameters in some
vaguely sane manner on the users behalf. Issue
<link xlink:href="https://github.com/NixOS/nixpkgs/issues/34274">#34274</link>
tracks this inconvenience along with its root cause in crufty configuration
options.
</para>
</note>
<para>
While one is free to pass both parameters in full, there's a lot of logic to
fill in missing fields. As discussed in the previous section, only one of
<varname>system</varname>, <varname>config</varname>, and
<varname>parsed</varname> is needed to infer the other two. Additionally,
<varname>libc</varname> will be inferred from <varname>parse</varname>.
Finally, <literal>localSystem.system</literal> is also
<emphasis>impurely</emphasis> inferred based on the platform evaluation
occurs. This means it is often not necessary to pass
<varname>localSystem</varname> at all, as in the command-line example in the
previous paragraph.
</para>
<note>
<para>
Many sources (manual, wiki, etc) probably mention passing
<varname>system</varname>, <varname>platform</varname>, along with the
optional <varname>crossSystem</varname> to nixpkgs: <literal>import
&lt;nixpkgs&gt; { system = ..; platform = ..; crossSystem = ..;
}</literal>. Passing those two instead of <varname>localSystem</varname> is
still supported for compatibility, but is discouraged. Indeed, much of the
inference we do for these parameters is motivated by compatibility as much
as convenience.
</para>
</note>
<para>
One would think that <varname>localSystem</varname> and
<varname>crossSystem</varname> overlap horribly with the three
<varname>*Platforms</varname> (<varname>buildPlatform</varname>,
<varname>hostPlatform,</varname> and <varname>targetPlatform</varname>; see
<varname>stage.nix</varname> or the manual). Actually, those identifiers are
purposefully not used here to draw a subtle but important distinction: While
the granularity of having 3 platforms is necessary to properly *build*
packages, it is overkill for specifying the user's *intent* when making a
build plan or package set. A simple "build vs deploy" dichotomy is adequate:
the sliding window principle described in the previous section shows how to
interpolate between the these two "end points" to get the 3 platform triple
for each bootstrapping stage. That means for any package a given package set,
even those not bound on the top level but only reachable via dependencies or
<varname>buildPackages</varname>, the three platforms will be defined as one
of <varname>localSystem</varname> or <varname>crossSystem</varname>, with the
former replacing the latter as one traverses build-time dependencies. A last
simple difference is that <varname>crossSystem</varname> should be null when
one doesn't want to cross-compile, while the <varname>*Platform</varname>s
are always non-null. <varname>localSystem</varname> is always non-null.
</para>
</section>
<!--============================================================-->
<section xml:id="sec-cross-infra">
<section xml:id="sec-cross-infra">
<title>Cross-compilation infrastructure</title>
<para>To be written.</para>
<note><para>
If one explores nixpkgs, they will see derivations with names like <literal>gccCross</literal>.
Such <literal>*Cross</literal> derivations is a holdover from before we properly distinguished between the host and target platforms
—the derivation with "Cross" in the name covered the <literal>build = host != target</literal> case, while the other covered the <literal>host = target</literal>, with build platform the same or not based on whether one was using its <literal>.nativeDrv</literal> or <literal>.crossDrv</literal>.
This ugliness will disappear soon.
</para></note>
</section>
<para>
To be written.
</para>
<note>
<para>
If one explores Nixpkgs, they will see derivations with names like
<literal>gccCross</literal>. Such <literal>*Cross</literal> derivations is a
holdover from before we properly distinguished between the host and target
platforms—the derivation with "Cross" in the name covered the <literal>build
= host != target</literal> case, while the other covered the <literal>host =
target</literal>, with build platform the same or not based on whether one
was using its <literal>.nativeDrv</literal> or <literal>.crossDrv</literal>.
This ugliness will disappear soon.
</para>
</note>
</section>
</chapter>

View File

@ -1,118 +1,51 @@
{ pkgs ? (import ./.. { }), nixpkgs ? { }}:
let
pkgs = import ./.. { };
lib = pkgs.lib;
sources = lib.sourceFilesBySuffices ./. [".xml"];
sources-langs = ./languages-frameworks;
in
pkgs.stdenv.mkDerivation {
locationsXml = import ./lib-function-locations.nix { inherit pkgs nixpkgs; };
functionDocs = import ./lib-function-docs.nix { inherit locationsXml pkgs; };
in pkgs.stdenv.mkDerivation {
name = "nixpkgs-manual";
buildInputs = with pkgs; [ pandoc libxml2 libxslt zip jing xmlformat ];
buildInputs = with pkgs; [ pandoc libxml2 libxslt zip ];
src = ./.;
xsltFlags = ''
--param section.autolabel 1
--param section.label.includes.component.label 1
--param html.stylesheet 'style.css'
--param xref.with.number.and.title 1
--param toc.section.depth 3
--param admon.style '''
--param callout.graphics.extension '.gif'
# Hacking on these variables? Make sure to close and open
# nix-shell between each test, maybe even:
# $ nix-shell --run "make clean all"
# otherwise they won't reapply :)
HIGHLIGHTJS = pkgs.documentation-highlighter;
XSL = "${pkgs.docbook_xsl_ns}/xml/xsl";
RNG = "${pkgs.docbook5}/xml/rng/docbook/docbook.rng";
XMLFORMAT_CONFIG = ../nixos/doc/xmlformat.conf;
xsltFlags = lib.concatStringsSep " " [
"--param section.autolabel 1"
"--param section.label.includes.component.label 1"
"--stringparam html.stylesheet 'style.css overrides.css highlightjs/mono-blue.css'"
"--stringparam html.script './highlightjs/highlight.pack.js ./highlightjs/loader.js'"
"--param xref.with.number.and.title 1"
"--param toc.section.depth 3"
"--stringparam admon.style ''"
"--stringparam callout.graphics.extension .svg"
];
postPatch = ''
rm -rf ./functions/library/locations.xml
ln -s ${locationsXml} ./functions/library/locations.xml
ln -s ${functionDocs} ./functions/library/generated
echo ${lib.version} > .version
'';
installPhase = ''
dest="$out/share/doc/nixpkgs"
mkdir -p "$(dirname "$dest")"
mv out/html "$dest"
mv "$dest/index.html" "$dest/manual.html"
buildCommand = let toDocbook = { useChapters ? false, inputFile, outputFile }:
let
extraHeader = lib.optionalString (!useChapters)
''xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" '';
in ''
{
pandoc '${inputFile}' -w docbook+smart ${lib.optionalString useChapters "--top-level-division=chapter"} \
-f markdown+smart \
| sed -e 's|<ulink url=|<link xlink:href=|' \
-e 's|</ulink>|</link>|' \
-e 's|<sect. id=|<section xml:id=|' \
-e 's|</sect[0-9]>|</section>|' \
-e '1s| id=| xml:id=|' \
-e '1s|\(<[^ ]* \)|\1${extraHeader}|'
} > '${outputFile}'
'';
in
mv out/epub/manual.epub "$dest/nixpkgs-manual.epub"
''
ln -s '${sources}/'*.xml .
mkdir ./languages-frameworks
cp -s '${sources-langs}'/* ./languages-frameworks
''
+ toDocbook {
inputFile = ./introduction.md;
outputFile = "introduction.xml";
useChapters = true;
}
+ toDocbook {
inputFile = ./shell.md;
outputFile = "shell.xml";
}
+ toDocbook {
inputFile = ./languages-frameworks/python.md;
outputFile = "./languages-frameworks/python.xml";
}
+ toDocbook {
inputFile = ./languages-frameworks/haskell.md;
outputFile = "./languages-frameworks/haskell.xml";
}
+ toDocbook {
inputFile = ../pkgs/development/idris-modules/README.md;
outputFile = "languages-frameworks/idris.xml";
}
+ toDocbook {
inputFile = ../pkgs/development/node-packages/README.md;
outputFile = "languages-frameworks/node.xml";
}
+ toDocbook {
inputFile = ../pkgs/development/r-modules/README.md;
outputFile = "languages-frameworks/r.xml";
}
+ toDocbook {
inputFile = ./languages-frameworks/rust.md;
outputFile = "./languages-frameworks/rust.xml";
}
+ toDocbook {
inputFile = ./languages-frameworks/vim.md;
outputFile = "./languages-frameworks/vim.xml";
}
+ ''
echo ${lib.nixpkgsVersion} > .version
# validate against relaxng schema
xmllint --nonet --xinclude --noxincludenode manual.xml --output manual-full.xml
${pkgs.jing}/bin/jing ${pkgs.docbook5}/xml/rng/docbook/docbook.rng manual-full.xml
dst=$out/share/doc/nixpkgs
mkdir -p $dst
xsltproc $xsltFlags --nonet --xinclude \
--output $dst/manual.html \
${pkgs.docbook5_xsl}/xml/xsl/docbook/xhtml/docbook.xsl \
./manual.xml
cp ${./style.css} $dst/style.css
mkdir -p $dst/images/callouts
cp "${pkgs.docbook5_xsl}/xml/xsl/docbook/images/callouts/"*.gif $dst/images/callouts/
mkdir -p $out/nix-support
echo "doc manual $dst manual.html" >> $out/nix-support/hydra-build-products
xsltproc $xsltFlags --nonet --xinclude \
--output $dst/epub/ \
${pkgs.docbook5_xsl}/xml/xsl/docbook/epub/docbook.xsl \
./manual.xml
cp -r $dst/images $dst/epub/OEBPS
echo "application/epub+zip" > mimetype
manual="$dst/nixpkgs-manual.epub"
zip -0Xq "$manual" mimetype
cd $dst/epub && zip -Xr9D "$manual" *
rm -rf $dst/epub
mkdir -p $out/nix-support/
echo "doc manual $dest manual.html" >> $out/nix-support/hydra-build-products
echo "doc manual $dest nixpkgs-manual.epub" >> $out/nix-support/hydra-build-products
'';
}

View File

@ -1,706 +1,18 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="chap-functions">
<title>Functions reference</title>
<para>
The nixpkgs repository has several utility functions to manipulate Nix expressions.
</para>
<section xml:id="sec-overrides">
<title>Overriding</title>
<para>
Sometimes one wants to override parts of
<literal>nixpkgs</literal>, e.g. derivation attributes, the results of
derivations or even the whole package set.
</para>
<section xml:id="sec-pkg-override">
<title>&lt;pkg&gt;.override</title>
<para>
The function <varname>override</varname> is usually available for all the
derivations in the nixpkgs expression (<varname>pkgs</varname>).
</para>
<para>
It is used to override the arguments passed to a function.
</para>
<para>
Example usages:
<programlisting>pkgs.foo.override { arg1 = val1; arg2 = val2; ... }</programlisting>
<programlisting>import pkgs.path { overlays = [ (self: super: {
foo = super.foo.override { barSupport = true ; };
})]};</programlisting>
<programlisting>mypkg = pkgs.callPackage ./mypkg.nix {
mydep = pkgs.mydep.override { ... };
}</programlisting>
</para>
<para>
In the first example, <varname>pkgs.foo</varname> is the result of a function call
with some default arguments, usually a derivation.
Using <varname>pkgs.foo.override</varname> will call the same function with
the given new arguments.
</para>
</section>
<section xml:id="sec-pkg-overrideAttrs">
<title>&lt;pkg&gt;.overrideAttrs</title>
<para>
The function <varname>overrideAttrs</varname> allows overriding the
attribute set passed to a <varname>stdenv.mkDerivation</varname> call,
producing a new derivation based on the original one.
This function is available on all derivations produced by the
<varname>stdenv.mkDerivation</varname> function, which is most packages
in the nixpkgs expression <varname>pkgs</varname>.
</para>
<para>
Example usage:
<programlisting>helloWithDebug = pkgs.hello.overrideAttrs (oldAttrs: rec {
separateDebugInfo = true;
});</programlisting>
</para>
<para>
In the above example, the <varname>separateDebugInfo</varname> attribute is
overridden to be true, thus building debug info for
<varname>helloWithDebug</varname>, while all other attributes will be
retained from the original <varname>hello</varname> package.
</para>
<para>
The argument <varname>oldAttrs</varname> is conventionally used to refer to
the attr set originally passed to <varname>stdenv.mkDerivation</varname>.
</para>
<note>
<para>
Note that <varname>separateDebugInfo</varname> is processed only by the
<varname>stdenv.mkDerivation</varname> function, not the generated, raw
Nix derivation. Thus, using <varname>overrideDerivation</varname> will
not work in this case, as it overrides only the attributes of the final
derivation. It is for this reason that <varname>overrideAttrs</varname>
should be preferred in (almost) all cases to
<varname>overrideDerivation</varname>, i.e. to allow using
<varname>sdenv.mkDerivation</varname> to process input arguments, as well
as the fact that it is easier to use (you can use the same attribute
names you see in your Nix code, instead of the ones generated (e.g.
<varname>buildInputs</varname> vs <varname>nativeBuildInputs</varname>,
and involves less typing.
</para>
</note>
</section>
<section xml:id="sec-pkg-overrideDerivation">
<title>&lt;pkg&gt;.overrideDerivation</title>
<warning>
<para>You should prefer <varname>overrideAttrs</varname> in almost all
cases, see its documentation for the reasons why.
<varname>overrideDerivation</varname> is not deprecated and will continue
to work, but is less nice to use and does not have as many abilities as
<varname>overrideAttrs</varname>.
</para>
</warning>
<warning>
<para>Do not use this function in Nixpkgs as it evaluates a Derivation
before modifying it, which breaks package abstraction and removes
error-checking of function arguments. In addition, this
evaluation-per-function application incurs a performance penalty,
which can become a problem if many overrides are used.
It is only intended for ad-hoc customisation, such as in
<filename>~/.config/nixpkgs/config.nix</filename>.
</para>
</warning>
<para>
The function <varname>overrideDerivation</varname> creates a new derivation
based on an existing one by overriding the original's attributes with
the attribute set produced by the specified function.
This function is available on all
derivations defined using the <varname>makeOverridable</varname> function.
Most standard derivation-producing functions, such as
<varname>stdenv.mkDerivation</varname>, are defined using this
function, which means most packages in the nixpkgs expression,
<varname>pkgs</varname>, have this function.
</para>
<para>
Example usage:
<programlisting>mySed = pkgs.gnused.overrideDerivation (oldAttrs: {
name = "sed-4.2.2-pre";
src = fetchurl {
url = ftp://alpha.gnu.org/gnu/sed/sed-4.2.2-pre.tar.bz2;
sha256 = "11nq06d131y4wmf3drm0yk502d2xc6n5qy82cg88rb9nqd2lj41k";
};
patches = [];
});</programlisting>
</para>
<para>
In the above example, the <varname>name</varname>, <varname>src</varname>,
and <varname>patches</varname> of the derivation will be overridden, while
all other attributes will be retained from the original derivation.
</para>
<para>
The argument <varname>oldAttrs</varname> is used to refer to the attribute set of
the original derivation.
</para>
<note>
<para>
A package's attributes are evaluated *before* being modified by
the <varname>overrideDerivation</varname> function.
For example, the <varname>name</varname> attribute reference
in <varname>url = "mirror://gnu/hello/${name}.tar.gz";</varname>
is filled-in *before* the <varname>overrideDerivation</varname> function
modifies the attribute set. This means that overriding the
<varname>name</varname> attribute, in this example, *will not* change the
value of the <varname>url</varname> attribute. Instead, we need to override
both the <varname>name</varname> *and* <varname>url</varname> attributes.
</para>
</note>
</section>
<section xml:id="sec-lib-makeOverridable">
<title>lib.makeOverridable</title>
<para>
The function <varname>lib.makeOverridable</varname> is used to make the result
of a function easily customizable. This utility only makes sense for functions
that accept an argument set and return an attribute set.
</para>
<para>
Example usage:
<programlisting>f = { a, b }: { result = a+b; }
c = lib.makeOverridable f { a = 1; b = 2; }</programlisting>
</para>
<para>
The variable <varname>c</varname> is the value of the <varname>f</varname> function
applied with some default arguments. Hence the value of <varname>c.result</varname>
is <literal>3</literal>, in this example.
</para>
<para>
The variable <varname>c</varname> however also has some additional functions, like
<link linkend="sec-pkg-override">c.override</link> which can be used to
override the default arguments. In this example the value of
<varname>(c.override { a = 4; }).result</varname> is 6.
</para>
</section>
</section>
<section xml:id="sec-generators">
<title>Generators</title>
<para>
Generators are functions that create file formats from nix
data structures, e.g. for configuration files.
There are generators available for: <literal>INI</literal>,
<literal>JSON</literal> and <literal>YAML</literal>
</para>
<para>
All generators follow a similar call interface: <code>generatorName
configFunctions data</code>, where <literal>configFunctions</literal> is a
set of user-defined functions that format variable parts of the content.
They each have common defaults, so often they do not need to be set
manually. An example is <code>mkSectionName ? (name: libStr.escape [ "[" "]"
] name)</code> from the <literal>INI</literal> generator. It gets the name
of a section and returns a sanitized name. The default
<literal>mkSectionName</literal> escapes <literal>[</literal> and
<literal>]</literal> with a backslash.
</para>
<note><para>Nix store paths can be converted to strings by enclosing a
derivation attribute like so: <code>"${drv}"</code>.</para></note>
<para>
Detailed documentation for each generator can be found in
<literal>lib/generators.nix</literal>.
</para>
</section>
<section xml:id="sec-fhs-environments">
<title>buildFHSUserEnv</title>
<para>
<function>buildFHSUserEnv</function> provides a way to build and run
FHS-compatible lightweight sandboxes. It creates an isolated root with
bound <filename>/nix/store</filename>, so its footprint in terms of disk
space needed is quite small. This allows one to run software which is hard or
unfeasible to patch for NixOS -- 3rd-party source trees with FHS assumptions,
games distributed as tarballs, software with integrity checking and/or external
self-updated binaries. It uses Linux namespaces feature to create
temporary lightweight environments which are destroyed after all child
processes exit, without root user rights requirement. Accepted arguments are:
</para>
<variablelist>
<varlistentry>
<term><literal>name</literal></term>
<listitem><para>Environment name.</para></listitem>
</varlistentry>
<varlistentry>
<term><literal>targetPkgs</literal></term>
<listitem><para>Packages to be installed for the main host's architecture
(i.e. x86_64 on x86_64 installations). Along with libraries binaries are also
installed.</para></listitem>
</varlistentry>
<varlistentry>
<term><literal>multiPkgs</literal></term>
<listitem><para>Packages to be installed for all architectures supported by
a host (i.e. i686 and x86_64 on x86_64 installations). Only libraries are
installed by default.</para></listitem>
</varlistentry>
<varlistentry>
<term><literal>extraBuildCommands</literal></term>
<listitem><para>Additional commands to be executed for finalizing the
directory structure.</para></listitem>
</varlistentry>
<varlistentry>
<term><literal>extraBuildCommandsMulti</literal></term>
<listitem><para>Like <literal>extraBuildCommands</literal>, but
executed only on multilib architectures.</para></listitem>
</varlistentry>
<varlistentry>
<term><literal>extraOutputsToInstall</literal></term>
<listitem><para>Additional derivation outputs to be linked for both
target and multi-architecture packages.</para></listitem>
</varlistentry>
<varlistentry>
<term><literal>extraInstallCommands</literal></term>
<listitem><para>Additional commands to be executed for finalizing the
derivation with runner script.</para></listitem>
</varlistentry>
<varlistentry>
<term><literal>runScript</literal></term>
<listitem><para>A command that would be executed inside the sandbox and
passed all the command line arguments. It defaults to
<literal>bash</literal>.</para></listitem>
</varlistentry>
</variablelist>
<para>
One can create a simple environment using a <literal>shell.nix</literal>
like that:
</para>
<programlisting><![CDATA[
{ pkgs ? import <nixpkgs> {} }:
(pkgs.buildFHSUserEnv {
name = "simple-x11-env";
targetPkgs = pkgs: (with pkgs;
[ udev
alsaLib
]) ++ (with pkgs.xorg;
[ libX11
libXcursor
libXrandr
]);
multiPkgs = pkgs: (with pkgs;
[ udev
alsaLib
]);
runScript = "bash";
}).env
]]></programlisting>
<para>
Running <literal>nix-shell</literal> would then drop you into a shell with
these libraries and binaries available. You can use this to run
closed-source applications which expect FHS structure without hassles:
simply change <literal>runScript</literal> to the application path,
e.g. <filename>./bin/start.sh</filename> -- relative paths are supported.
</para>
</section>
<section xml:id="sec-pkgs-dockerTools">
<title>pkgs.dockerTools</title>
<para>
<varname>pkgs.dockerTools</varname> is a set of functions for creating and
manipulating Docker images according to the
<link xlink:href="https://github.com/moby/moby/blob/master/image/spec/v1.2.md#docker-image-specification-v120">
Docker Image Specification v1.2.0
</link>. Docker itself is not used to perform any of the operations done by these
functions.
</para>
<warning>
<para>
The <varname>dockerTools</varname> API is unstable and may be subject to
backwards-incompatible changes in the future.
</para>
</warning>
<section xml:id="ssec-pkgs-dockerTools-buildImage">
<title>buildImage</title>
<para>
This function is analogous to the <command>docker build</command> command,
in that can used to build a Docker-compatible repository tarball containing
a single image with one or multiple layers. As such, the result
is suitable for being loaded in Docker with <command>docker load</command>.
</para>
<para>
The parameters of <varname>buildImage</varname> with relative example values are
described below:
</para>
<example xml:id='ex-dockerTools-buildImage'><title>Docker build</title>
<programlisting>
buildImage {
name = "redis"; <co xml:id='ex-dockerTools-buildImage-1' />
tag = "latest"; <co xml:id='ex-dockerTools-buildImage-2' />
fromImage = someBaseImage; <co xml:id='ex-dockerTools-buildImage-3' />
fromImageName = null; <co xml:id='ex-dockerTools-buildImage-4' />
fromImageTag = "latest"; <co xml:id='ex-dockerTools-buildImage-5' />
contents = pkgs.redis; <co xml:id='ex-dockerTools-buildImage-6' />
runAsRoot = '' <co xml:id='ex-dockerTools-buildImage-runAsRoot' />
#!${stdenv.shell}
mkdir -p /data
'';
config = { <co xml:id='ex-dockerTools-buildImage-8' />
Cmd = [ "/bin/redis-server" ];
WorkingDir = "/data";
Volumes = {
"/data" = {};
};
};
}
</programlisting>
</example>
<para>The above example will build a Docker image <literal>redis/latest</literal>
from the given base image. Loading and running this image in Docker results in
<literal>redis-server</literal> being started automatically.
</para>
<calloutlist>
<callout arearefs='ex-dockerTools-buildImage-1'>
<para>
<varname>name</varname> specifies the name of the resulting image.
This is the only required argument for <varname>buildImage</varname>.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-2'>
<para>
<varname>tag</varname> specifies the tag of the resulting image.
By default it's <literal>latest</literal>.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-3'>
<para>
<varname>fromImage</varname> is the repository tarball containing the base image.
It must be a valid Docker image, such as exported by <command>docker save</command>.
By default it's <literal>null</literal>, which can be seen as equivalent
to <literal>FROM scratch</literal> of a <filename>Dockerfile</filename>.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-4'>
<para>
<varname>fromImageName</varname> can be used to further specify
the base image within the repository, in case it contains multiple images.
By default it's <literal>null</literal>, in which case
<varname>buildImage</varname> will peek the first image available
in the repository.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-5'>
<para>
<varname>fromImageTag</varname> can be used to further specify the tag
of the base image within the repository, in case an image contains multiple tags.
By default it's <literal>null</literal>, in which case
<varname>buildImage</varname> will peek the first tag available for the base image.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-6'>
<para>
<varname>contents</varname> is a derivation that will be copied in the new
layer of the resulting image. This can be similarly seen as
<command>ADD contents/ /</command> in a <filename>Dockerfile</filename>.
By default it's <literal>null</literal>.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-runAsRoot'>
<para>
<varname>runAsRoot</varname> is a bash script that will run as root
in an environment that overlays the existing layers of the base image with
the new resulting layer, including the previously copied
<varname>contents</varname> derivation.
This can be similarly seen as
<command>RUN ...</command> in a <filename>Dockerfile</filename>.
<note>
<para>
Using this parameter requires the <literal>kvm</literal>
device to be available.
</para>
</note>
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-8'>
<para>
<varname>config</varname> is used to specify the configuration of the
containers that will be started off the built image in Docker.
The available options are listed in the
<link xlink:href="https://github.com/moby/moby/blob/master/image/spec/v1.2.md#image-json-field-descriptions">
Docker Image Specification v1.2.0
</link>.
</para>
</callout>
</calloutlist>
<para>
After the new layer has been created, its closure
(to which <varname>contents</varname>, <varname>config</varname> and
<varname>runAsRoot</varname> contribute) will be copied in the layer itself.
Only new dependencies that are not already in the existing layers will be copied.
</para>
<para>
At the end of the process, only one new single layer will be produced and
added to the resulting image.
</para>
<para>
The resulting repository will only list the single image
<varname>image/tag</varname>. In the case of <xref linkend='ex-dockerTools-buildImage'/>
it would be <varname>redis/latest</varname>.
</para>
<para>
It is possible to inspect the arguments with which an image was built
using its <varname>buildArgs</varname> attribute.
</para>
<note>
<para>
If you see errors similar to <literal>getProtocolByName: does not exist (no such protocol name: tcp)</literal>
you may need to add <literal>pkgs.iana-etc</literal> to <varname>contents</varname>.
</para>
</note>
<note>
<para>
If you see errors similar to <literal>Error_Protocol ("certificate has unknown CA",True,UnknownCa)</literal>
you may need to add <literal>pkgs.cacert</literal> to <varname>contents</varname>.
</para>
</note>
</section>
<section xml:id="ssec-pkgs-dockerTools-fetchFromRegistry">
<title>pullImage</title>
<para>
This function is analogous to the <command>docker pull</command> command,
in that can be used to fetch a Docker image from a Docker registry.
Currently only registry <literal>v1</literal> is supported.
By default <link xlink:href="https://hub.docker.com/">Docker Hub</link>
is used to pull images.
</para>
<para>
Its parameters are described in the example below:
</para>
<example xml:id='ex-dockerTools-pullImage'><title>Docker pull</title>
<programlisting>
pullImage {
imageName = "debian"; <co xml:id='ex-dockerTools-pullImage-1' />
imageTag = "jessie"; <co xml:id='ex-dockerTools-pullImage-2' />
imageId = null; <co xml:id='ex-dockerTools-pullImage-3' />
sha256 = "1bhw5hkz6chrnrih0ymjbmn69hyfriza2lr550xyvpdrnbzr4gk2"; <co xml:id='ex-dockerTools-pullImage-4' />
indexUrl = "https://index.docker.io"; <co xml:id='ex-dockerTools-pullImage-5' />
registryVersion = "v1";
}
</programlisting>
</example>
<calloutlist>
<callout arearefs='ex-dockerTools-pullImage-1'>
<para>
<varname>imageName</varname> specifies the name of the image to be downloaded,
which can also include the registry namespace (e.g. <literal>library/debian</literal>).
This argument is required.
</para>
</callout>
<callout arearefs='ex-dockerTools-pullImage-2'>
<para>
<varname>imageTag</varname> specifies the tag of the image to be downloaded.
By default it's <literal>latest</literal>.
</para>
</callout>
<callout arearefs='ex-dockerTools-pullImage-3'>
<para>
<varname>imageId</varname>, if specified this exact image will be fetched, instead
of <varname>imageName/imageTag</varname>. However, the resulting repository
will still be named <varname>imageName/imageTag</varname>.
By default it's <literal>null</literal>.
</para>
</callout>
<callout arearefs='ex-dockerTools-pullImage-4'>
<para>
<varname>sha256</varname> is the checksum of the whole fetched image.
This argument is required.
</para>
<note>
<para>The checksum is computed on the unpacked directory, not on the final tarball.</para>
</note>
</callout>
<callout arearefs='ex-dockerTools-pullImage-5'>
<para>
In the above example the default values are shown for the variables
<varname>indexUrl</varname> and <varname>registryVersion</varname>.
Hence by default the Docker.io registry is used to pull the images.
</para>
</callout>
</calloutlist>
</section>
<section xml:id="ssec-pkgs-dockerTools-exportImage">
<title>exportImage</title>
<para>
This function is analogous to the <command>docker export</command> command,
in that can used to flatten a Docker image that contains multiple layers.
It is in fact the result of the merge of all the layers of the image.
As such, the result is suitable for being imported in Docker
with <command>docker import</command>.
</para>
<note>
<para>
Using this function requires the <literal>kvm</literal>
device to be available.
</para>
</note>
<para>
The parameters of <varname>exportImage</varname> are the following:
</para>
<example xml:id='ex-dockerTools-exportImage'><title>Docker export</title>
<programlisting>
exportImage {
fromImage = someLayeredImage;
fromImageName = null;
fromImageTag = null;
name = someLayeredImage.name;
}
</programlisting>
</example>
<para>
The parameters relative to the base image have the same synopsis as
described in <xref linkend='ssec-pkgs-dockerTools-buildImage'/>, except that
<varname>fromImage</varname> is the only required argument in this case.
</para>
<para>
The <varname>name</varname> argument is the name of the derivation output,
which defaults to <varname>fromImage.name</varname>.
</para>
</section>
<section xml:id="ssec-pkgs-dockerTools-shadowSetup">
<title>shadowSetup</title>
<para>
This constant string is a helper for setting up the base files for managing
users and groups, only if such files don't exist already.
It is suitable for being used in a
<varname>runAsRoot</varname> <xref linkend='ex-dockerTools-buildImage-runAsRoot'/> script for cases like
in the example below:
</para>
<example xml:id='ex-dockerTools-shadowSetup'><title>Shadow base files</title>
<programlisting>
buildImage {
name = "shadow-basic";
runAsRoot = ''
#!${stdenv.shell}
${shadowSetup}
groupadd -r redis
useradd -r -g redis redis
mkdir /data
chown redis:redis /data
'';
}
</programlisting>
</example>
<para>
Creating base files like <literal>/etc/passwd</literal> or
<literal>/etc/login.defs</literal> are necessary for shadow-utils to
manipulate users and groups.
</para>
</section>
</section>
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
xml:id="chap-functions">
<title>Functions reference</title>
<para>
The nixpkgs repository has several utility functions to manipulate Nix
expressions.
</para>
<xi:include href="functions/library.xml" />
<xi:include href="functions/overrides.xml" />
<xi:include href="functions/generators.xml" />
<xi:include href="functions/debug.xml" />
<xi:include href="functions/fhs-environments.xml" />
<xi:include href="functions/shell.xml" />
<xi:include href="functions/dockertools.xml" />
<xi:include href="functions/prefer-remote-fetch.xml" />
</chapter>

21
doc/functions/debug.xml Normal file
View File

@ -0,0 +1,21 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
xml:id="sec-debug">
<title>Debugging Nix Expressions</title>
<para>
Nix is a unityped, dynamic language, this means every value can potentially
appear anywhere. Since it is also non-strict, evaluation order and what
ultimately is evaluated might surprise you. Therefore it is important to be
able to debug nix expressions.
</para>
<para>
In the <literal>lib/debug.nix</literal> file you will find a number of
functions that help (pretty-)printing values while evaluation is runnnig. You
can even specify how deep these values should be printed recursively, and
transform them on the fly. Please consult the docstrings in
<literal>lib/debug.nix</literal> for usage information.
</para>
</section>

View File

@ -0,0 +1,564 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
xml:id="sec-pkgs-dockerTools">
<title>pkgs.dockerTools</title>
<para>
<varname>pkgs.dockerTools</varname> is a set of functions for creating and
manipulating Docker images according to the
<link xlink:href="https://github.com/moby/moby/blob/master/image/spec/v1.2.md#docker-image-specification-v120">
Docker Image Specification v1.2.0 </link>. Docker itself is not used to
perform any of the operations done by these functions.
</para>
<warning>
<para>
The <varname>dockerTools</varname> API is unstable and may be subject to
backwards-incompatible changes in the future.
</para>
</warning>
<section xml:id="ssec-pkgs-dockerTools-buildImage">
<title>buildImage</title>
<para>
This function is analogous to the <command>docker build</command> command,
in that can used to build a Docker-compatible repository tarball containing
a single image with one or multiple layers. As such, the result is suitable
for being loaded in Docker with <command>docker load</command>.
</para>
<para>
The parameters of <varname>buildImage</varname> with relative example values
are described below:
</para>
<example xml:id='ex-dockerTools-buildImage'>
<title>Docker build</title>
<programlisting>
buildImage {
name = "redis"; <co xml:id='ex-dockerTools-buildImage-1' />
tag = "latest"; <co xml:id='ex-dockerTools-buildImage-2' />
fromImage = someBaseImage; <co xml:id='ex-dockerTools-buildImage-3' />
fromImageName = null; <co xml:id='ex-dockerTools-buildImage-4' />
fromImageTag = "latest"; <co xml:id='ex-dockerTools-buildImage-5' />
contents = pkgs.redis; <co xml:id='ex-dockerTools-buildImage-6' />
runAsRoot = '' <co xml:id='ex-dockerTools-buildImage-runAsRoot' />
#!${stdenv.shell}
mkdir -p /data
'';
config = { <co xml:id='ex-dockerTools-buildImage-8' />
Cmd = [ "/bin/redis-server" ];
WorkingDir = "/data";
Volumes = {
"/data" = {};
};
};
}
</programlisting>
</example>
<para>
The above example will build a Docker image <literal>redis/latest</literal>
from the given base image. Loading and running this image in Docker results
in <literal>redis-server</literal> being started automatically.
</para>
<calloutlist>
<callout arearefs='ex-dockerTools-buildImage-1'>
<para>
<varname>name</varname> specifies the name of the resulting image. This is
the only required argument for <varname>buildImage</varname>.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-2'>
<para>
<varname>tag</varname> specifies the tag of the resulting image. By
default it's <literal>null</literal>, which indicates that the nix output
hash will be used as tag.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-3'>
<para>
<varname>fromImage</varname> is the repository tarball containing the base
image. It must be a valid Docker image, such as exported by
<command>docker save</command>. By default it's <literal>null</literal>,
which can be seen as equivalent to <literal>FROM scratch</literal> of a
<filename>Dockerfile</filename>.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-4'>
<para>
<varname>fromImageName</varname> can be used to further specify the base
image within the repository, in case it contains multiple images. By
default it's <literal>null</literal>, in which case
<varname>buildImage</varname> will peek the first image available in the
repository.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-5'>
<para>
<varname>fromImageTag</varname> can be used to further specify the tag of
the base image within the repository, in case an image contains multiple
tags. By default it's <literal>null</literal>, in which case
<varname>buildImage</varname> will peek the first tag available for the
base image.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-6'>
<para>
<varname>contents</varname> is a derivation that will be copied in the new
layer of the resulting image. This can be similarly seen as <command>ADD
contents/ /</command> in a <filename>Dockerfile</filename>. By default
it's <literal>null</literal>.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-runAsRoot'>
<para>
<varname>runAsRoot</varname> is a bash script that will run as root in an
environment that overlays the existing layers of the base image with the
new resulting layer, including the previously copied
<varname>contents</varname> derivation. This can be similarly seen as
<command>RUN ...</command> in a <filename>Dockerfile</filename>.
<note>
<para>
Using this parameter requires the <literal>kvm</literal> device to be
available.
</para>
</note>
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-8'>
<para>
<varname>config</varname> is used to specify the configuration of the
containers that will be started off the built image in Docker. The
available options are listed in the
<link xlink:href="https://github.com/moby/moby/blob/master/image/spec/v1.2.md#image-json-field-descriptions">
Docker Image Specification v1.2.0 </link>.
</para>
</callout>
</calloutlist>
<para>
After the new layer has been created, its closure (to which
<varname>contents</varname>, <varname>config</varname> and
<varname>runAsRoot</varname> contribute) will be copied in the layer itself.
Only new dependencies that are not already in the existing layers will be
copied.
</para>
<para>
At the end of the process, only one new single layer will be produced and
added to the resulting image.
</para>
<para>
The resulting repository will only list the single image
<varname>image/tag</varname>. In the case of
<xref linkend='ex-dockerTools-buildImage'/> it would be
<varname>redis/latest</varname>.
</para>
<para>
It is possible to inspect the arguments with which an image was built using
its <varname>buildArgs</varname> attribute.
</para>
<note>
<para>
If you see errors similar to <literal>getProtocolByName: does not exist (no
such protocol name: tcp)</literal> you may need to add
<literal>pkgs.iana-etc</literal> to <varname>contents</varname>.
</para>
</note>
<note>
<para>
If you see errors similar to <literal>Error_Protocol ("certificate has
unknown CA",True,UnknownCa)</literal> you may need to add
<literal>pkgs.cacert</literal> to <varname>contents</varname>.
</para>
</note>
<example xml:id="example-pkgs-dockerTools-buildImage-creation-date">
<title>Impurely Defining a Docker Layer's Creation Date</title>
<para>
By default <function>buildImage</function> will use a static date of one
second past the UNIX Epoch. This allows <function>buildImage</function> to
produce binary reproducible images. When listing images with
<command>docker list images</command>, the newly created images will be
listed like this:
</para>
<screen><![CDATA[
$ docker image list
REPOSITORY TAG IMAGE ID CREATED SIZE
hello latest 08c791c7846e 48 years ago 25.2MB
]]></screen>
<para>
You can break binary reproducibility but have a sorted, meaningful
<literal>CREATED</literal> column by setting <literal>created</literal> to
<literal>now</literal>.
</para>
<programlisting><![CDATA[
pkgs.dockerTools.buildImage {
name = "hello";
tag = "latest";
created = "now";
contents = pkgs.hello;
config.Cmd = [ "/bin/hello" ];
}
]]></programlisting>
<para>
and now the Docker CLI will display a reasonable date and sort the images
as expected:
<screen><![CDATA[
$ docker image list
REPOSITORY TAG IMAGE ID CREATED SIZE
hello latest de2bf4786de6 About a minute ago 25.2MB
]]></screen>
however, the produced images will not be binary reproducible.
</para>
</example>
</section>
<section xml:id="ssec-pkgs-dockerTools-buildLayeredImage">
<title>buildLayeredImage</title>
<para>
Create a Docker image with many of the store paths being on their own layer
to improve sharing between images.
</para>
<variablelist>
<varlistentry>
<term>
<varname>name</varname>
</term>
<listitem>
<para>
The name of the resulting image.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>tag</varname> <emphasis>optional</emphasis>
</term>
<listitem>
<para>
Tag of the generated image.
</para>
<para>
<emphasis>Default:</emphasis> the output path's hash
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>contents</varname> <emphasis>optional</emphasis>
</term>
<listitem>
<para>
Top level paths in the container. Either a single derivation, or a list
of derivations.
</para>
<para>
<emphasis>Default:</emphasis> <literal>[]</literal>
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>config</varname> <emphasis>optional</emphasis>
</term>
<listitem>
<para>
Run-time configuration of the container. A full list of the options are
available at in the
<link xlink:href="https://github.com/moby/moby/blob/master/image/spec/v1.2.md#image-json-field-descriptions">
Docker Image Specification v1.2.0 </link>.
</para>
<para>
<emphasis>Default:</emphasis> <literal>{}</literal>
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>created</varname> <emphasis>optional</emphasis>
</term>
<listitem>
<para>
Date and time the layers were created. Follows the same
<literal>now</literal> exception supported by
<literal>buildImage</literal>.
</para>
<para>
<emphasis>Default:</emphasis> <literal>1970-01-01T00:00:01Z</literal>
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>maxLayers</varname> <emphasis>optional</emphasis>
</term>
<listitem>
<para>
Maximum number of layers to create.
</para>
<para>
<emphasis>Default:</emphasis> <literal>24</literal>
</para>
</listitem>
</varlistentry>
</variablelist>
<section xml:id="dockerTools-buildLayeredImage-arg-contents">
<title>Behavior of <varname>contents</varname> in the final image</title>
<para>
Each path directly listed in <varname>contents</varname> will have a
symlink in the root of the image.
</para>
<para>
For example:
<programlisting><![CDATA[
pkgs.dockerTools.buildLayeredImage {
name = "hello";
contents = [ pkgs.hello ];
}
]]></programlisting>
will create symlinks for all the paths in the <literal>hello</literal>
package:
<screen><![CDATA[
/bin/hello -> /nix/store/h1zb1padqbbb7jicsvkmrym3r6snphxg-hello-2.10/bin/hello
/share/info/hello.info -> /nix/store/h1zb1padqbbb7jicsvkmrym3r6snphxg-hello-2.10/share/info/hello.info
/share/locale/bg/LC_MESSAGES/hello.mo -> /nix/store/h1zb1padqbbb7jicsvkmrym3r6snphxg-hello-2.10/share/locale/bg/LC_MESSAGES/hello.mo
]]></screen>
</para>
</section>
<section xml:id="dockerTools-buildLayeredImage-arg-config">
<title>Automatic inclusion of <varname>config</varname> references</title>
<para>
The closure of <varname>config</varname> is automatically included in the
closure of the final image.
</para>
<para>
This allows you to make very simple Docker images with very little code.
This container will start up and run <command>hello</command>:
<programlisting><![CDATA[
pkgs.dockerTools.buildLayeredImage {
name = "hello";
config.Cmd = [ "${pkgs.hello}/bin/hello" ];
}
]]></programlisting>
</para>
</section>
<section xml:id="dockerTools-buildLayeredImage-arg-maxLayers">
<title>Adjusting <varname>maxLayers</varname></title>
<para>
Increasing the <varname>maxLayers</varname> increases the number of layers
which have a chance to be shared between different images.
</para>
<para>
Modern Docker installations support up to 128 layers, however older
versions support as few as 42.
</para>
<para>
If the produced image will not be extended by other Docker builds, it is
safe to set <varname>maxLayers</varname> to <literal>128</literal>. However
it will be impossible to extend the image further.
</para>
<para>
The first (<literal>maxLayers-2</literal>) most "popular" paths will have
their own individual layers, then layer #<literal>maxLayers-1</literal>
will contain all the remaining "unpopular" paths, and finally layer
#<literal>maxLayers</literal> will contain the Image configuration.
</para>
<para>
Docker's Layers are not inherently ordered, they are content-addressable
and are not explicitly layered until they are composed in to an Image.
</para>
</section>
</section>
<section xml:id="ssec-pkgs-dockerTools-fetchFromRegistry">
<title>pullImage</title>
<para>
This function is analogous to the <command>docker pull</command> command, in
that can be used to pull a Docker image from a Docker registry. By default
<link xlink:href="https://hub.docker.com/">Docker Hub</link> is used to pull
images.
</para>
<para>
Its parameters are described in the example below:
</para>
<example xml:id='ex-dockerTools-pullImage'>
<title>Docker pull</title>
<programlisting>
pullImage {
imageName = "nixos/nix"; <co xml:id='ex-dockerTools-pullImage-1' />
imageDigest = "sha256:20d9485b25ecfd89204e843a962c1bd70e9cc6858d65d7f5fadc340246e2116b"; <co xml:id='ex-dockerTools-pullImage-2' />
finalImageTag = "1.11"; <co xml:id='ex-dockerTools-pullImage-3' />
sha256 = "0mqjy3zq2v6rrhizgb9nvhczl87lcfphq9601wcprdika2jz7qh8"; <co xml:id='ex-dockerTools-pullImage-4' />
os = "linux"; <co xml:id='ex-dockerTools-pullImage-5' />
arch = "x86_64"; <co xml:id='ex-dockerTools-pullImage-6' />
}
</programlisting>
</example>
<calloutlist>
<callout arearefs='ex-dockerTools-pullImage-1'>
<para>
<varname>imageName</varname> specifies the name of the image to be
downloaded, which can also include the registry namespace (e.g.
<literal>nixos</literal>). This argument is required.
</para>
</callout>
<callout arearefs='ex-dockerTools-pullImage-2'>
<para>
<varname>imageDigest</varname> specifies the digest of the image to be
downloaded. Skopeo can be used to get the digest of an image, with its
<varname>inspect</varname> subcommand. Since a given
<varname>imageName</varname> may transparently refer to a manifest list of
images which support multiple architectures and/or operating systems,
supply the `--override-os` and `--override-arch` arguments to specify
exactly which image you want. By default it will match the OS and
architecture of the host the command is run on.
<programlisting>
$ nix-shell --packages skopeo jq --command "skopeo --override-os linux --override-arch x86_64 inspect docker://docker.io/nixos/nix:1.11 | jq -r '.Digest'"
sha256:20d9485b25ecfd89204e843a962c1bd70e9cc6858d65d7f5fadc340246e2116b
</programlisting>
This argument is required.
</para>
</callout>
<callout arearefs='ex-dockerTools-pullImage-3'>
<para>
<varname>finalImageTag</varname>, if specified, this is the tag of the
image to be created. Note it is never used to fetch the image since we
prefer to rely on the immutable digest ID. By default it's
<literal>latest</literal>.
</para>
</callout>
<callout arearefs='ex-dockerTools-pullImage-4'>
<para>
<varname>sha256</varname> is the checksum of the whole fetched image. This
argument is required.
</para>
</callout>
<callout arearefs='ex-dockerTools-pullImage-5'>
<para>
<varname>os</varname>, if specified, is the operating system of the
fetched image. By default it's <literal>linux</literal>.
</para>
</callout>
<callout arearefs='ex-dockerTools-pullImage-6'>
<para>
<varname>arch</varname>, if specified, is the cpu architecture of the
fetched image. By default it's <literal>x86_64</literal>.
</para>
</callout>
</calloutlist>
</section>
<section xml:id="ssec-pkgs-dockerTools-exportImage">
<title>exportImage</title>
<para>
This function is analogous to the <command>docker export</command> command,
in that can used to flatten a Docker image that contains multiple layers. It
is in fact the result of the merge of all the layers of the image. As such,
the result is suitable for being imported in Docker with <command>docker
import</command>.
</para>
<note>
<para>
Using this function requires the <literal>kvm</literal> device to be
available.
</para>
</note>
<para>
The parameters of <varname>exportImage</varname> are the following:
</para>
<example xml:id='ex-dockerTools-exportImage'>
<title>Docker export</title>
<programlisting>
exportImage {
fromImage = someLayeredImage;
fromImageName = null;
fromImageTag = null;
name = someLayeredImage.name;
}
</programlisting>
</example>
<para>
The parameters relative to the base image have the same synopsis as
described in <xref linkend='ssec-pkgs-dockerTools-buildImage'/>, except that
<varname>fromImage</varname> is the only required argument in this case.
</para>
<para>
The <varname>name</varname> argument is the name of the derivation output,
which defaults to <varname>fromImage.name</varname>.
</para>
</section>
<section xml:id="ssec-pkgs-dockerTools-shadowSetup">
<title>shadowSetup</title>
<para>
This constant string is a helper for setting up the base files for managing
users and groups, only if such files don't exist already. It is suitable for
being used in a <varname>runAsRoot</varname>
<xref linkend='ex-dockerTools-buildImage-runAsRoot'/> script for cases like
in the example below:
</para>
<example xml:id='ex-dockerTools-shadowSetup'>
<title>Shadow base files</title>
<programlisting>
buildImage {
name = "shadow-basic";
runAsRoot = ''
#!${stdenv.shell}
${shadowSetup}
groupadd -r redis
useradd -r -g redis redis
mkdir /data
chown redis:redis /data
'';
}
</programlisting>
</example>
<para>
Creating base files like <literal>/etc/passwd</literal> or
<literal>/etc/login.defs</literal> are necessary for shadow-utils to
manipulate users and groups.
</para>
</section>
</section>

View File

@ -0,0 +1,142 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
xml:id="sec-fhs-environments">
<title>buildFHSUserEnv</title>
<para>
<function>buildFHSUserEnv</function> provides a way to build and run
FHS-compatible lightweight sandboxes. It creates an isolated root with bound
<filename>/nix/store</filename>, so its footprint in terms of disk space
needed is quite small. This allows one to run software which is hard or
unfeasible to patch for NixOS -- 3rd-party source trees with FHS assumptions,
games distributed as tarballs, software with integrity checking and/or
external self-updated binaries. It uses Linux namespaces feature to create
temporary lightweight environments which are destroyed after all child
processes exit, without root user rights requirement. Accepted arguments are:
</para>
<variablelist>
<varlistentry>
<term>
<literal>name</literal>
</term>
<listitem>
<para>
Environment name.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<literal>targetPkgs</literal>
</term>
<listitem>
<para>
Packages to be installed for the main host's architecture (i.e. x86_64 on
x86_64 installations). Along with libraries binaries are also installed.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<literal>multiPkgs</literal>
</term>
<listitem>
<para>
Packages to be installed for all architectures supported by a host (i.e.
i686 and x86_64 on x86_64 installations). Only libraries are installed by
default.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<literal>extraBuildCommands</literal>
</term>
<listitem>
<para>
Additional commands to be executed for finalizing the directory structure.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<literal>extraBuildCommandsMulti</literal>
</term>
<listitem>
<para>
Like <literal>extraBuildCommands</literal>, but executed only on multilib
architectures.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<literal>extraOutputsToInstall</literal>
</term>
<listitem>
<para>
Additional derivation outputs to be linked for both target and
multi-architecture packages.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<literal>extraInstallCommands</literal>
</term>
<listitem>
<para>
Additional commands to be executed for finalizing the derivation with
runner script.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<literal>runScript</literal>
</term>
<listitem>
<para>
A command that would be executed inside the sandbox and passed all the
command line arguments. It defaults to <literal>bash</literal>.
</para>
</listitem>
</varlistentry>
</variablelist>
<para>
One can create a simple environment using a <literal>shell.nix</literal> like
that:
</para>
<programlisting><![CDATA[
{ pkgs ? import <nixpkgs> {} }:
(pkgs.buildFHSUserEnv {
name = "simple-x11-env";
targetPkgs = pkgs: (with pkgs;
[ udev
alsaLib
]) ++ (with pkgs.xorg;
[ libX11
libXcursor
libXrandr
]);
multiPkgs = pkgs: (with pkgs;
[ udev
alsaLib
]);
runScript = "bash";
}).env
]]></programlisting>
<para>
Running <literal>nix-shell</literal> would then drop you into a shell with
these libraries and binaries available. You can use this to run closed-source
applications which expect FHS structure without hassles: simply change
<literal>runScript</literal> to the application path, e.g.
<filename>./bin/start.sh</filename> -- relative paths are supported.
</para>
</section>

View File

@ -0,0 +1,89 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
xml:id="sec-generators">
<title>Generators</title>
<para>
Generators are functions that create file formats from nix data structures,
e.g. for configuration files. There are generators available for:
<literal>INI</literal>, <literal>JSON</literal> and <literal>YAML</literal>
</para>
<para>
All generators follow a similar call interface: <code>generatorName
configFunctions data</code>, where <literal>configFunctions</literal> is an
attrset of user-defined functions that format nested parts of the content.
They each have common defaults, so often they do not need to be set manually.
An example is <code>mkSectionName ? (name: libStr.escape [ "[" "]" ]
name)</code> from the <literal>INI</literal> generator. It receives the name
of a section and sanitizes it. The default <literal>mkSectionName</literal>
escapes <literal>[</literal> and <literal>]</literal> with a backslash.
</para>
<para>
Generators can be fine-tuned to produce exactly the file format required by
your application/service. One example is an INI-file format which uses
<literal>: </literal> as separator, the strings
<literal>"yes"</literal>/<literal>"no"</literal> as boolean values and
requires all string values to be quoted:
</para>
<programlisting>
with lib;
let
customToINI = generators.toINI {
# specifies how to format a key/value pair
mkKeyValue = generators.mkKeyValueDefault {
# specifies the generated string for a subset of nix values
mkValueString = v:
if v == true then ''"yes"''
else if v == false then ''"no"''
else if isString v then ''"${v}"''
# and delegats all other values to the default generator
else generators.mkValueStringDefault {} v;
} ":";
};
# the INI file can now be given as plain old nix values
in customToINI {
main = {
pushinfo = true;
autopush = false;
host = "localhost";
port = 42;
};
mergetool = {
merge = "diff3";
};
}
</programlisting>
<para>
This will produce the following INI file as nix string:
</para>
<programlisting>
[main]
autopush:"no"
host:"localhost"
port:42
pushinfo:"yes"
str\:ange:"very::strange"
[mergetool]
merge:"diff3"
</programlisting>
<note>
<para>
Nix store paths can be converted to strings by enclosing a derivation
attribute like so: <code>"${drv}"</code>.
</para>
</note>
<para>
Detailed documentation for each generator can be found in
<literal>lib/generators.nix</literal>.
</para>
</section>

24
doc/functions/library.xml Normal file
View File

@ -0,0 +1,24 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
xml:id="sec-functions-library">
<title>Nixpkgs Library Functions</title>
<para>
Nixpkgs provides a standard library at <varname>pkgs.lib</varname>, or
through <code>import &lt;nixpkgs/lib&gt;</code>.
</para>
<xi:include href="./library/asserts.xml" />
<xi:include href="./library/attrsets.xml" />
<!-- These docs are generated via nixdoc. To add another generated
library function file to this list, the file
`lib-function-docs.nix` must also be updated. -->
<xi:include href="./library/generated/strings.xml" />
<xi:include href="./library/generated/trivial.xml" />
<xi:include href="./library/generated/lists.xml" />
<xi:include href="./library/generated/debug.xml" />
<xi:include href="./library/generated/options.xml" />
</section>

View File

@ -0,0 +1,117 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
xml:id="sec-functions-library-asserts">
<title>Assert functions</title>
<section xml:id="function-library-lib.asserts.assertMsg">
<title><function>lib.asserts.assertMsg</function></title>
<subtitle><literal>assertMsg :: Bool -> String -> Bool</literal>
</subtitle>
<xi:include href="./locations.xml" xpointer="lib.asserts.assertMsg" />
<para>
Print a trace message if <literal>pred</literal> is false.
</para>
<para>
Intended to be used to augment asserts with helpful error messages.
</para>
<variablelist>
<varlistentry>
<term>
<varname>pred</varname>
</term>
<listitem>
<para>
Condition under which the <varname>msg</varname> should
<emphasis>not</emphasis> be printed.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>msg</varname>
</term>
<listitem>
<para>
Message to print.
</para>
</listitem>
</varlistentry>
</variablelist>
<example xml:id="function-library-lib.asserts.assertMsg-example-false">
<title>Printing when the predicate is false</title>
<programlisting><![CDATA[
assert lib.asserts.assertMsg ("foo" == "bar") "foo is not bar, silly"
stderr> trace: foo is not bar, silly
stderr> assert failed
]]></programlisting>
</example>
</section>
<section xml:id="function-library-lib.asserts.assertOneOf">
<title><function>lib.asserts.assertOneOf</function></title>
<subtitle><literal>assertOneOf :: String -> String ->
StringList -> Bool</literal>
</subtitle>
<xi:include href="./locations.xml" xpointer="lib.asserts.assertOneOf" />
<para>
Specialized <function>asserts.assertMsg</function> for checking if
<varname>val</varname> is one of the elements of <varname>xs</varname>.
Useful for checking enums.
</para>
<variablelist>
<varlistentry>
<term>
<varname>name</varname>
</term>
<listitem>
<para>
The name of the variable the user entered <varname>val</varname> into,
for inclusion in the error message.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>val</varname>
</term>
<listitem>
<para>
The value of what the user provided, to be compared against the values in
<varname>xs</varname>.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>xs</varname>
</term>
<listitem>
<para>
The list of valid values.
</para>
</listitem>
</varlistentry>
</variablelist>
<example xml:id="function-library-lib.asserts.assertOneOf-example">
<title>Ensuring a user provided a possible value</title>
<programlisting><![CDATA[
let sslLibrary = "bearssl";
in lib.asserts.assertOneOf "sslLibrary" sslLibrary [ "openssl" "bearssl" ];
=> false
stderr> trace: sslLibrary must be one of "openssl", "libressl", but is: "bearssl"
]]></programlisting>
</example>
</section>
</section>

File diff suppressed because it is too large Load Diff

212
doc/functions/overrides.xml Normal file
View File

@ -0,0 +1,212 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
xml:id="sec-overrides">
<title>Overriding</title>
<para>
Sometimes one wants to override parts of <literal>nixpkgs</literal>, e.g.
derivation attributes, the results of derivations.
</para>
<para>
These functions are used to make changes to packages, returning only single
packages. <link xlink:href="#chap-overlays">Overlays</link>, on the other
hand, can be used to combine the overridden packages across the entire
package set of Nixpkgs.
</para>
<section xml:id="sec-pkg-override">
<title>&lt;pkg&gt;.override</title>
<para>
The function <varname>override</varname> is usually available for all the
derivations in the nixpkgs expression (<varname>pkgs</varname>).
</para>
<para>
It is used to override the arguments passed to a function.
</para>
<para>
Example usages:
<programlisting>pkgs.foo.override { arg1 = val1; arg2 = val2; ... }</programlisting>
<!-- TODO: move below programlisting to a new section about extending and overlays
and reference it
-->
<programlisting>
import pkgs.path { overlays = [ (self: super: {
foo = super.foo.override { barSupport = true ; };
})]};
</programlisting>
<programlisting>
mypkg = pkgs.callPackage ./mypkg.nix {
mydep = pkgs.mydep.override { ... };
}
</programlisting>
</para>
<para>
In the first example, <varname>pkgs.foo</varname> is the result of a
function call with some default arguments, usually a derivation. Using
<varname>pkgs.foo.override</varname> will call the same function with the
given new arguments.
</para>
</section>
<section xml:id="sec-pkg-overrideAttrs">
<title>&lt;pkg&gt;.overrideAttrs</title>
<para>
The function <varname>overrideAttrs</varname> allows overriding the
attribute set passed to a <varname>stdenv.mkDerivation</varname> call,
producing a new derivation based on the original one. This function is
available on all derivations produced by the
<varname>stdenv.mkDerivation</varname> function, which is most packages in
the nixpkgs expression <varname>pkgs</varname>.
</para>
<para>
Example usage:
<programlisting>
helloWithDebug = pkgs.hello.overrideAttrs (oldAttrs: rec {
separateDebugInfo = true;
});
</programlisting>
</para>
<para>
In the above example, the <varname>separateDebugInfo</varname> attribute is
overridden to be true, thus building debug info for
<varname>helloWithDebug</varname>, while all other attributes will be
retained from the original <varname>hello</varname> package.
</para>
<para>
The argument <varname>oldAttrs</varname> is conventionally used to refer to
the attr set originally passed to <varname>stdenv.mkDerivation</varname>.
</para>
<note>
<para>
Note that <varname>separateDebugInfo</varname> is processed only by the
<varname>stdenv.mkDerivation</varname> function, not the generated, raw Nix
derivation. Thus, using <varname>overrideDerivation</varname> will not work
in this case, as it overrides only the attributes of the final derivation.
It is for this reason that <varname>overrideAttrs</varname> should be
preferred in (almost) all cases to <varname>overrideDerivation</varname>,
i.e. to allow using <varname>stdenv.mkDerivation</varname> to process input
arguments, as well as the fact that it is easier to use (you can use the
same attribute names you see in your Nix code, instead of the ones
generated (e.g. <varname>buildInputs</varname> vs
<varname>nativeBuildInputs</varname>), and it involves less typing).
</para>
</note>
</section>
<section xml:id="sec-pkg-overrideDerivation">
<title>&lt;pkg&gt;.overrideDerivation</title>
<warning>
<para>
You should prefer <varname>overrideAttrs</varname> in almost all cases, see
its documentation for the reasons why.
<varname>overrideDerivation</varname> is not deprecated and will continue
to work, but is less nice to use and does not have as many abilities as
<varname>overrideAttrs</varname>.
</para>
</warning>
<warning>
<para>
Do not use this function in Nixpkgs as it evaluates a Derivation before
modifying it, which breaks package abstraction and removes error-checking
of function arguments. In addition, this evaluation-per-function
application incurs a performance penalty, which can become a problem if
many overrides are used. It is only intended for ad-hoc customisation, such
as in <filename>~/.config/nixpkgs/config.nix</filename>.
</para>
</warning>
<para>
The function <varname>overrideDerivation</varname> creates a new derivation
based on an existing one by overriding the original's attributes with the
attribute set produced by the specified function. This function is available
on all derivations defined using the <varname>makeOverridable</varname>
function. Most standard derivation-producing functions, such as
<varname>stdenv.mkDerivation</varname>, are defined using this function,
which means most packages in the nixpkgs expression,
<varname>pkgs</varname>, have this function.
</para>
<para>
Example usage:
<programlisting>
mySed = pkgs.gnused.overrideDerivation (oldAttrs: {
name = "sed-4.2.2-pre";
src = fetchurl {
url = ftp://alpha.gnu.org/gnu/sed/sed-4.2.2-pre.tar.bz2;
sha256 = "11nq06d131y4wmf3drm0yk502d2xc6n5qy82cg88rb9nqd2lj41k";
};
patches = [];
});
</programlisting>
</para>
<para>
In the above example, the <varname>name</varname>, <varname>src</varname>,
and <varname>patches</varname> of the derivation will be overridden, while
all other attributes will be retained from the original derivation.
</para>
<para>
The argument <varname>oldAttrs</varname> is used to refer to the attribute
set of the original derivation.
</para>
<note>
<para>
A package's attributes are evaluated *before* being modified by the
<varname>overrideDerivation</varname> function. For example, the
<varname>name</varname> attribute reference in <varname>url =
"mirror://gnu/hello/${name}.tar.gz";</varname> is filled-in *before* the
<varname>overrideDerivation</varname> function modifies the attribute set.
This means that overriding the <varname>name</varname> attribute, in this
example, *will not* change the value of the <varname>url</varname>
attribute. Instead, we need to override both the <varname>name</varname>
*and* <varname>url</varname> attributes.
</para>
</note>
</section>
<section xml:id="sec-lib-makeOverridable">
<title>lib.makeOverridable</title>
<para>
The function <varname>lib.makeOverridable</varname> is used to make the
result of a function easily customizable. This utility only makes sense for
functions that accept an argument set and return an attribute set.
</para>
<para>
Example usage:
<programlisting>
f = { a, b }: { result = a+b; };
c = lib.makeOverridable f { a = 1; b = 2; };
</programlisting>
</para>
<para>
The variable <varname>c</varname> is the value of the <varname>f</varname>
function applied with some default arguments. Hence the value of
<varname>c.result</varname> is <literal>3</literal>, in this example.
</para>
<para>
The variable <varname>c</varname> however also has some additional
functions, like <link linkend="sec-pkg-override">c.override</link> which can
be used to override the default arguments. In this example the value of
<varname>(c.override { a = 4; }).result</varname> is 6.
</para>
</section>
</section>

View File

@ -0,0 +1,27 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/xinclude"
xml:id="sec-prefer-remote-fetch">
<title>prefer-remote-fetch overlay</title>
<para>
<function>prefer-remote-fetch</function> is an overlay that download sources
on remote builder. This is useful when the evaluating machine has a slow
upload while the builder can fetch faster directly from the source.
To use it, put the following snippet as a new overlay:
<programlisting>
self: super:
(super.prefer-remote-fetch self super)
</programlisting>
A full configuration example for that sets the overlay up for your own account,
could look like this
<programlisting>
$ mkdir ~/.config/nixpkgs/overlays/
$ cat &gt; ~/.config/nixpkgs/overlays/prefer-remote-fetch.nix &lt;&lt;EOF
self: super: super.prefer-remote-fetch self super
EOF
</programlisting>
</para>
</section>

26
doc/functions/shell.xml Normal file
View File

@ -0,0 +1,26 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
xml:id="sec-pkgs-mkShell">
<title>pkgs.mkShell</title>
<para>
<function>pkgs.mkShell</function> is a special kind of derivation that is
only useful when using it combined with <command>nix-shell</command>. It will
in fact fail to instantiate when invoked with <command>nix-build</command>.
</para>
<section xml:id="sec-pkgs-mkShell-usage">
<title>Usage</title>
<programlisting><![CDATA[
{ pkgs ? import <nixpkgs> {} }:
pkgs.mkShell {
# this will make all the build inputs from hello and gnutar
# available to the shell environment
inputsFrom = with pkgs; [ hello gnutar ];
buildInputs = [ pkgs.gnumake ];
}
]]></programlisting>
</section>
</section>

View File

@ -0,0 +1,51 @@
---
title: Introduction
author: Frederik Rietdijk
date: 2015-11-25
---
# Introduction
The Nix Packages collection (Nixpkgs) is a set of thousands of packages for the
[Nix package manager](http://nixos.org/nix/), released under a
[permissive MIT/X11 license](https://github.com/NixOS/nixpkgs/blob/master/COPYING).
Packages are available for several platforms, and can be used with the Nix
package manager on most GNU/Linux distributions as well as NixOS.
This manual primarily describes how to write packages for the Nix Packages collection
(Nixpkgs). Thus its mainly for packagers and developers who want to add packages to
Nixpkgs. If you like to learn more about the Nix package manager and the Nix
expression language, then you are kindly referred to the [Nix manual](http://nixos.org/nix/manual/).
## Overview of Nixpkgs
Nix expressions describe how to build packages from source and are collected in
the [nixpkgs repository](https://github.com/NixOS/nixpkgs). Also included in the
collection are Nix expressions for
[NixOS modules](http://nixos.org/nixos/manual/index.html#sec-writing-modules).
With these expressions the Nix package manager can build binary packages.
Packages, including the Nix packages collection, are distributed through
[channels](http://nixos.org/nix/manual/#sec-channels). The collection is
distributed for users of Nix on non-NixOS distributions through the channel
`nixpkgs`. Users of NixOS generally use one of the `nixos-*` channels, e.g.
`nixos-16.03`, which includes all packages and modules for the stable NixOS
16.03. Stable NixOS releases are generally only given
security updates. More up to date packages and modules are available via the
`nixos-unstable` channel.
Both `nixos-unstable` and `nixpkgs` follow the `master` branch of the Nixpkgs
repository, although both do lag the `master` branch by generally
[a couple of days](http://howoldis.herokuapp.com/). Updates to a channel are
distributed as soon as all tests for that channel pass, e.g.
[this table](http://hydra.nixos.org/job/nixpkgs/trunk/unstable#tabs-constituents)
shows the status of tests for the `nixpkgs` channel.
The tests are conducted by a cluster called [Hydra](http://nixos.org/hydra/),
which also builds binary packages from the Nix expressions in Nixpkgs for
`x86_64-linux`, `i686-linux` and `x86_64-darwin`.
The binaries are made available via a [binary cache](https://cache.nixos.org).
The current Nix expressions of the channels are available in the
[`nixpkgs-channels`](https://github.com/NixOS/nixpkgs-channels) repository,
which has branches corresponding to the available channels.

View File

@ -1,51 +0,0 @@
---
title: Introduction
author: Frederik Rietdijk
date: 2015-11-25
---
# Introduction
The Nix Packages collection (Nixpkgs) is a set of thousands of packages for the
[Nix package manager](http://nixos.org/nix/), released under a
[permissive MIT/X11 license](https://github.com/NixOS/nixpkgs/blob/master/COPYING).
Packages are available for several platforms, and can be used with the Nix
package manager on most GNU/Linux distributions as well as NixOS.
This manual primarily describes how to write packages for the Nix Packages collection
(Nixpkgs). Thus its mainly for packagers and developers who want to add packages to
Nixpkgs. If you like to learn more about the Nix package manager and the Nix
expression language, then you are kindly referred to the [Nix manual](http://nixos.org/nix/manual/).
## Overview of Nixpkgs
Nix expressions describe how to build packages from source and are collected in
the [nixpkgs repository](https://github.com/NixOS/nixpkgs). Also included in the
collection are Nix expressions for
[NixOS modules](http://nixos.org/nixos/manual/index.html#sec-writing-modules).
With these expressions the Nix package manager can build binary packages.
Packages, including the Nix packages collection, are distributed through
[channels](http://nixos.org/nix/manual/#sec-channels). The collection is
distributed for users of Nix on non-NixOS distributions through the channel
`nixpkgs`. Users of NixOS generally use one of the `nixos-*` channels, e.g.
`nixos-16.03`, which includes all packages and modules for the stable NixOS
16.03. The purpose of stable NixOS releases are generally only given
security updates. More up to date packages and modules are available via the
`nixos-unstable` channel.
Both `nixos-unstable` and `nixpkgs` follow the `master` branch of the Nixpkgs
repository, although both do lag the `master` branch by generally
[a couple of days](http://howoldis.herokuapp.com/). Updates to a channel are
distributed as soon as all tests for that channel pass, e.g.
[this table](http://hydra.nixos.org/job/nixpkgs/trunk/unstable#tabs-constituents)
shows the status of tests for the `nixpkgs` channel.
The tests are conducted by a cluster called [Hydra](http://nixos.org/hydra/),
which also builds binary packages from the Nix expressions in Nixpkgs for
`x86_64-linux`, `i686-linux` and `x86_64-darwin`.
The binaries are made available via a [binary cache](https://cache.nixos.org).
The current Nix expressions of the channels are available in the
[`nixpkgs-channels`](https://github.com/NixOS/nixpkgs-channels) repository,
which has branches corresponding to the available channels.

View File

@ -0,0 +1,240 @@
---
title: Android
author: Sander van der Burg
date: 2018-11-18
---
# Android
The Android build environment provides three major features and a number of
supporting features.
Deploying an Android SDK installation with plugins
--------------------------------------------------
The first use case is deploying the SDK with a desired set of plugins or subsets
of an SDK.
```nix
with import <nixpkgs> {};
let
androidComposition = androidenv.composeAndroidPackages {
toolsVersion = "25.2.5";
platformToolsVersion = "27.0.1";
buildToolsVersions = [ "27.0.3" ];
includeEmulator = false;
emulatorVersion = "27.2.0";
platformVersions = [ "24" ];
includeSources = false;
includeDocs = false;
includeSystemImages = false;
systemImageTypes = [ "default" ];
abiVersions = [ "armeabi-v7a" ];
lldbVersions = [ "2.0.2558144" ];
cmakeVersions = [ "3.6.4111459" ];
includeNDK = false;
ndkVersion = "16.1.4479499";
useGoogleAPIs = false;
useGoogleTVAddOns = false;
includeExtras = [
"extras;google;gcm"
];
};
in
androidComposition.androidsdk
```
The above function invocation states that we want an Android SDK with the above
specified plugin versions. By default, most plugins are disabled. Notable
exceptions are the tools, platform-tools and build-tools sub packages.
The following parameters are supported:
* `toolsVersion`, specifies the version of the tools package to use
* `platformsToolsVersion` specifies the version of the `platform-tools` plugin
* `buildToolsVersion` specifies the versions of the `build-tools` plugins to
use.
* `includeEmulator` specifies whether to deploy the emulator package (`false`
by default). When enabled, the version of the emulator to deploy can be
specified by setting the `emulatorVersion` parameter.
* `includeDocs` specifies whether the documentation catalog should be included.
* `lldbVersions` specifies what LLDB versions should be deployed.
* `cmakeVersions` specifies which CMake versions should be deployed.
* `includeNDK` specifies that the Android NDK bundle should be included.
Defaults to: `false`.
* `ndkVersion` specifies the NDK version that we want to use.
* `includeExtras` is an array of identifier strings referring to arbitrary
add-on packages that should be installed.
* `platformVersions` specifies which platform SDK versions should be included.
For each platform version that has been specified, we can apply the following
options:
* `includeSystemImages` specifies whether a system image for each platform SDK
should be included.
* `includeSources` specifies whether the sources for each SDK version should be
included.
* `useGoogleAPIs` specifies that for each selected platform version the
Google API should be included.
* `useGoogleTVAddOns` specifies that for each selected platform version the
Google TV add-on should be included.
For each requested system image we can specify the following options:
* `systemImageTypes` specifies what kind of system images should be included.
Defaults to: `default`.
* `abiVersions` specifies what kind of ABI version of each system image should
be included. Defaults to: `armeabi-v7a`.
Most of the function arguments have reasonable default settings.
When building the above expression with:
```bash
$ nix-build
```
The Android SDK gets deployed with all desired plugin versions.
We can also deploy subsets of the Android SDK. For example, to only the the
`platform-tools` package, you can evaluate the following expression:
```nix
with import <nixpkgs> {};
let
androidComposition = androidenv.composeAndroidPackages {
# ...
};
in
androidComposition.platform-tools
```
Using predefine Android package compositions
--------------------------------------------
In addition to composing an Android package set manually, it is also possible
to use a predefined composition that contains all basic packages for a specific
Android version, such as version 9.0 (API-level 28).
The following Nix expression can be used to deploy the entire SDK with all basic
plugins:
```nix
with import <nixpkgs> {};
androidenv.androidPkgs_9_0.androidsdk
```
It is also possible to use one plugin only:
```nix
with import <nixpkgs> {};
androidenv.androidPkgs_9_0.platform-tools
```
Building an Android application
-------------------------------
In addition to the SDK, it is also possible to build an Ant-based Android
project and automatically deploy all the Android plugins that a project
requires.
```nix
with import <nixpkgs> {};
androidenv.buildApp {
name = "MyAndroidApp";
src = ./myappsources;
release = true;
# If release is set to true, you need to specify the following parameters
keyStore = ./keystore;
keyAlias = "myfirstapp";
keyStorePassword = "mykeystore";
keyAliasPassword = "myfirstapp";
# Any Android SDK parameters that install all the relevant plugins that a
# build requires
platformVersions = [ "24" ];
# When we include the NDK, then ndk-build is invoked before Ant gets invoked
includeNDK = true;
}
```
Aside from the app-specific build parameters (`name`, `src`, `release` and
keystore parameters), the `buildApp {}` function supports all the function
parameters that the SDK composition function (the function shown in the
previous section) supports.
This build function is particularly useful when it is desired to use
[Hydra](http://nixos.org/hydra): the Nix-based continuous integration solution
to build Android apps. An Android APK gets exposed as a build product and can be
installed on any Android device with a web browser by navigating to the build
result page.
Spawning emulator instances
---------------------------
For testing purposes, it can also be quite convenient to automatically generate
scripts that spawn emulator instances with all desired configuration settings.
An emulator spawn script can be configured by invoking the `emulateApp {}`
function:
```nix
with import <nixpkgs> {};
androidenv.emulateApp {
name = "emulate-MyAndroidApp";
platformVersion = "24";
abiVersion = "armeabi-v7a"; # mips, x86 or x86_64
systemImageType = "default";
useGoogleAPIs = false;
}
```
It is also possible to specify an APK to deploy inside the emulator
and the package and activity names to launch it:
```nix
with import <nixpkgs> {};
androidenv.emulateApp {
name = "emulate-MyAndroidApp";
platformVersion = "24";
abiVersion = "armeabi-v7a"; # mips, x86 or x86_64
systemImageType = "default";
useGoogleAPIs = false;
app = ./MyApp.apk;
package = "MyApp";
activity = "MainActivity";
}
```
In addition to prebuilt APKs, you can also bind the APK parameter to a
`buildApp {}` function invocation shown in the previous example.
Querying the available versions of each plugin
----------------------------------------------
When using any of the previously shown functions, it may be a bit inconvenient
to find out what options are supported, since the Android SDK provides many
plugins.
A shell script in the `pkgs/development/mobile/androidenv/` sub directory can be used to retrieve all
possible options:
```bash
sh ./querypackages.sh packages build-tools
```
The above command-line instruction queries all build-tools versions in the
generated `packages.nix` expression.
Updating the generated expressions
----------------------------------
Most of the Nix expressions are generated from XML files that the Android
package manager uses. To update the expressions run the `generate.sh` script
that is stored in the `pkgs/development/mobile/androidenv/` sub directory:
```bash
sh ./generate.sh
```

View File

@ -1,124 +1,137 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="sec-beam">
<title>BEAM Languages (Erlang, Elixir &amp; LFE)</title>
<title>BEAM Languages (Erlang, Elixir &amp; LFE)</title>
<section xml:id="beam-introduction">
<title>Introduction</title>
<para>
In this document and related Nix expressions, we use the term,
<emphasis>BEAM</emphasis>, to describe the environment. BEAM is the name
of the Erlang Virtual Machine and, as far as we're concerned, from a
packaging perspective, all languages that run on the BEAM are
interchangeable. That which varies, like the build system, is transparent
to users of any given BEAM package, so we make no distinction.
</para>
</section>
<section xml:id="beam-structure">
<title>Structure</title>
<para>
All BEAM-related expressions are available via the top-level
<literal>beam</literal> attribute, which includes:
</para>
<itemizedlist>
<listitem>
<para>
<literal>interpreters</literal>: a set of compilers running on the
BEAM, including multiple Erlang/OTP versions
(<literal>beam.interpreters.erlangR19</literal>, etc), Elixir
(<literal>beam.interpreters.elixir</literal>) and LFE
(<literal>beam.interpreters.lfe</literal>).
</para>
</listitem>
<listitem>
<para>
<literal>packages</literal>: a set of package sets, each compiled with
a specific Erlang/OTP version, e.g.
<literal>beam.packages.erlangR19</literal>.
</para>
</listitem>
</itemizedlist>
<para>
The default Erlang compiler, defined by
<literal>beam.interpreters.erlang</literal>, is aliased as
<literal>erlang</literal>. The default BEAM package set is defined by
<literal>beam.packages.erlang</literal> and aliased at the top level as
<literal>beamPackages</literal>.
</para>
<para>
To create a package set built with a custom Erlang version, use the
lambda, <literal>beam.packagesWith</literal>, which accepts an Erlang/OTP
derivation and produces a package set similar to
<literal>beam.packages.erlang</literal>.
</para>
<para>
Many Erlang/OTP distributions available in
<literal>beam.interpreters</literal> have versions with ODBC and/or Java
enabled. For example, there's
<literal>beam.interpreters.erlangR19_odbc_javac</literal>, which
corresponds to <literal>beam.interpreters.erlangR19</literal>.
</para>
<para xml:id="erlang-call-package">
We also provide the lambda,
<literal>beam.packages.erlang.callPackage</literal>, which simplifies
writing BEAM package definitions by injecting all packages from
<literal>beam.packages.erlang</literal> into the top-level context.
</para>
</section>
<section xml:id="build-tools">
<title>Build Tools</title>
<section xml:id="build-tools-rebar3">
<title>Rebar3</title>
<para>
By default, Rebar3 wants to manage its own dependencies. This is perfectly
acceptable in the normal, non-Nix setup, but in the Nix world, it is not.
To rectify this, we provide two versions of Rebar3:
<itemizedlist>
<listitem>
<para>
<literal>rebar3</literal>: patched to remove the ability to download
anything. When not running it via <literal>nix-shell</literal> or
<literal>nix-build</literal>, it's probably not going to work as
desired.
</para>
</listitem>
<listitem>
<para>
<literal>rebar3-open</literal>: the normal, unmodified Rebar3. It
should work exactly as would any other version of Rebar3. Any Erlang
package should rely on <literal>rebar3</literal> instead. See <xref
linkend="rebar3-packages"/>.
</para>
</listitem>
</itemizedlist>
</para>
</section>
<section xml:id="build-tools-other">
<title>Mix &amp; Erlang.mk</title>
<para>
Both Mix and Erlang.mk work exactly as expected. There is a bootstrap
process that needs to be run for both, however, which is supported by the
<literal>buildMix</literal> and <literal>buildErlangMk</literal>
derivations, respectively.
</para>
</section>
</section>
<section xml:id="beam-introduction">
<title>Introduction</title>
<section xml:id="how-to-install-beam-packages">
<title>How to Install BEAM Packages</title>
<para>
BEAM packages are not registered at the top level, simply because they are
not relevant to the vast majority of Nix users. They are installable using
the <literal>beam.packages.erlang</literal> attribute set (aliased as
<literal>beamPackages</literal>), which points to packages built by the
default Erlang/OTP version in Nixpkgs, as defined by
<literal>beam.interpreters.erlang</literal>.
In this document and related Nix expressions, we use the term,
<emphasis>BEAM</emphasis>, to describe the environment. BEAM is the name of
the Erlang Virtual Machine and, as far as we're concerned, from a packaging
perspective, all languages that run on the BEAM are interchangeable. That
which varies, like the build system, is transparent to users of any given
BEAM package, so we make no distinction.
</para>
</section>
To list the available packages in
<literal>beamPackages</literal>, use the following command:
<section xml:id="beam-structure">
<title>Structure</title>
<para>
All BEAM-related expressions are available via the top-level
<literal>beam</literal> attribute, which includes:
</para>
<programlisting>
<itemizedlist>
<listitem>
<para>
<literal>interpreters</literal>: a set of compilers running on the BEAM,
including multiple Erlang/OTP versions
(<literal>beam.interpreters.erlangR19</literal>, etc), Elixir
(<literal>beam.interpreters.elixir</literal>) and LFE
(<literal>beam.interpreters.lfe</literal>).
</para>
</listitem>
<listitem>
<para>
<literal>packages</literal>: a set of package sets, each compiled with a
specific Erlang/OTP version, e.g.
<literal>beam.packages.erlangR19</literal>.
</para>
</listitem>
</itemizedlist>
<para>
The default Erlang compiler, defined by
<literal>beam.interpreters.erlang</literal>, is aliased as
<literal>erlang</literal>. The default BEAM package set is defined by
<literal>beam.packages.erlang</literal> and aliased at the top level as
<literal>beamPackages</literal>.
</para>
<para>
To create a package set built with a custom Erlang version, use the lambda,
<literal>beam.packagesWith</literal>, which accepts an Erlang/OTP derivation
and produces a package set similar to
<literal>beam.packages.erlang</literal>.
</para>
<para>
Many Erlang/OTP distributions available in
<literal>beam.interpreters</literal> have versions with ODBC and/or Java
enabled. For example, there's
<literal>beam.interpreters.erlangR19_odbc_javac</literal>, which corresponds
to <literal>beam.interpreters.erlangR19</literal>.
</para>
<para xml:id="erlang-call-package">
We also provide the lambda,
<literal>beam.packages.erlang.callPackage</literal>, which simplifies
writing BEAM package definitions by injecting all packages from
<literal>beam.packages.erlang</literal> into the top-level context.
</para>
</section>
<section xml:id="build-tools">
<title>Build Tools</title>
<section xml:id="build-tools-rebar3">
<title>Rebar3</title>
<para>
By default, Rebar3 wants to manage its own dependencies. This is perfectly
acceptable in the normal, non-Nix setup, but in the Nix world, it is not.
To rectify this, we provide two versions of Rebar3:
<itemizedlist>
<listitem>
<para>
<literal>rebar3</literal>: patched to remove the ability to download
anything. When not running it via <literal>nix-shell</literal> or
<literal>nix-build</literal>, it's probably not going to work as
desired.
</para>
</listitem>
<listitem>
<para>
<literal>rebar3-open</literal>: the normal, unmodified Rebar3. It should
work exactly as would any other version of Rebar3. Any Erlang package
should rely on <literal>rebar3</literal> instead. See
<xref
linkend="rebar3-packages"/>.
</para>
</listitem>
</itemizedlist>
</para>
</section>
<section xml:id="build-tools-other">
<title>Mix &amp; Erlang.mk</title>
<para>
Both Mix and Erlang.mk work exactly as expected. There is a bootstrap
process that needs to be run for both, however, which is supported by the
<literal>buildMix</literal> and <literal>buildErlangMk</literal>
derivations, respectively.
</para>
</section>
</section>
<section xml:id="how-to-install-beam-packages">
<title>How to Install BEAM Packages</title>
<para>
BEAM packages are not registered at the top level, simply because they are
not relevant to the vast majority of Nix users. They are installable using
the <literal>beam.packages.erlang</literal> attribute set (aliased as
<literal>beamPackages</literal>), which points to packages built by the
default Erlang/OTP version in Nixpkgs, as defined by
<literal>beam.interpreters.erlang</literal>. To list the available packages
in <literal>beamPackages</literal>, use the following command:
</para>
<programlisting>
$ nix-env -f &quot;&lt;nixpkgs&gt;&quot; -qaP -A beamPackages
beamPackages.esqlite esqlite-0.2.1
beamPackages.goldrush goldrush-0.1.7
@ -128,34 +141,43 @@ beamPackages.lager lager-3.0.2
beamPackages.meck meck-0.8.3
beamPackages.rebar3-pc pc-1.1.0
</programlisting>
<para>
To install any of those packages into your profile, refer to them by their
attribute path (first column):
To install any of those packages into your profile, refer to them by their
attribute path (first column):
</para>
<programlisting>
<programlisting>
$ nix-env -f &quot;&lt;nixpkgs&gt;&quot; -iA beamPackages.ibrowse
</programlisting>
<para>
The attribute path of any BEAM package corresponds to the name of that
particular package in <link xlink:href="https://hex.pm">Hex</link> or its
OTP Application/Release name.
The attribute path of any BEAM package corresponds to the name of that
particular package in <link xlink:href="https://hex.pm">Hex</link> or its
OTP Application/Release name.
</para>
</section>
<section xml:id="packaging-beam-applications">
</section>
<section xml:id="packaging-beam-applications">
<title>Packaging BEAM Applications</title>
<section xml:id="packaging-erlang-applications">
<title>Erlang Applications</title>
<section xml:id="rebar3-packages">
<title>Rebar3 Packages</title>
<para>
The Nix function, <literal>buildRebar3</literal>, defined in
<literal>beam.packages.erlang.buildRebar3</literal> and aliased at the
top level, can be used to build a derivation that understands how to
build a Rebar3 project. For example, we can build <link
xlink:href="https://github.com/erlang-nix/hex2nix">hex2nix</link> as
follows:
</para>
<programlisting>
<title>Erlang Applications</title>
<section xml:id="rebar3-packages">
<title>Rebar3 Packages</title>
<para>
The Nix function, <literal>buildRebar3</literal>, defined in
<literal>beam.packages.erlang.buildRebar3</literal> and aliased at the top
level, can be used to build a derivation that understands how to build a
Rebar3 project. For example, we can build
<link
xlink:href="https://github.com/erlang-nix/hex2nix">hex2nix</link>
as follows:
</para>
<programlisting>
{ stdenv, fetchFromGitHub, buildRebar3, ibrowse, jsx, erlware_commons }:
buildRebar3 rec {
@ -172,33 +194,40 @@ $ nix-env -f &quot;&lt;nixpkgs&gt;&quot; -iA beamPackages.ibrowse
beamDeps = [ ibrowse jsx erlware_commons ];
}
</programlisting>
<para>
Such derivations are callable with
<literal>beam.packages.erlang.callPackage</literal> (see <xref
linkend="erlang-call-package"/>). To call this package using the normal
<literal>callPackage</literal>, refer to dependency packages via
<literal>beamPackages</literal>, e.g.
<literal>beamPackages.ibrowse</literal>.
</para>
<para>
Notably, <literal>buildRebar3</literal> includes
<literal>beamDeps</literal>, while
<literal>stdenv.mkDerivation</literal> does not. BEAM dependencies added
there will be correctly handled by the system.
</para>
<para>
If a package needs to compile native code via Rebar3's port compilation
mechanism, add <literal>compilePort = true;</literal> to the derivation.
</para>
</section>
<section xml:id="erlang-mk-packages">
<title>Erlang.mk Packages</title>
<para>
Erlang.mk functions similarly to Rebar3, except we use
<literal>buildErlangMk</literal> instead of
<literal>buildRebar3</literal>.
</para>
<programlisting>
<para>
Such derivations are callable with
<literal>beam.packages.erlang.callPackage</literal> (see
<xref
linkend="erlang-call-package"/>). To call this package using
the normal <literal>callPackage</literal>, refer to dependency packages
via <literal>beamPackages</literal>, e.g.
<literal>beamPackages.ibrowse</literal>.
</para>
<para>
Notably, <literal>buildRebar3</literal> includes
<literal>beamDeps</literal>, while <literal>stdenv.mkDerivation</literal>
does not. BEAM dependencies added there will be correctly handled by the
system.
</para>
<para>
If a package needs to compile native code via Rebar3's port compilation
mechanism, add <literal>compilePort = true;</literal> to the derivation.
</para>
</section>
<section xml:id="erlang-mk-packages">
<title>Erlang.mk Packages</title>
<para>
Erlang.mk functions similarly to Rebar3, except we use
<literal>buildErlangMk</literal> instead of
<literal>buildRebar3</literal>.
</para>
<programlisting>
{ buildErlangMk, fetchHex, cowlib, ranch }:
buildErlangMk {
@ -222,14 +251,17 @@ $ nix-env -f &quot;&lt;nixpkgs&gt;&quot; -iA beamPackages.ibrowse
};
}
</programlisting>
</section>
<section xml:id="mix-packages">
<title>Mix Packages</title>
<para>
Mix functions similarly to Rebar3, except we use
<literal>buildMix</literal> instead of <literal>buildRebar3</literal>.
</para>
<programlisting>
</section>
<section xml:id="mix-packages">
<title>Mix Packages</title>
<para>
Mix functions similarly to Rebar3, except we use
<literal>buildMix</literal> instead of <literal>buildRebar3</literal>.
</para>
<programlisting>
{ buildMix, fetchHex, plug, absinthe }:
buildMix {
@ -253,10 +285,12 @@ $ nix-env -f &quot;&lt;nixpkgs&gt;&quot; -iA beamPackages.ibrowse
};
}
</programlisting>
<para>
Alternatively, we can use <literal>buildHex</literal> as a shortcut:
</para>
<programlisting>
<para>
Alternatively, we can use <literal>buildHex</literal> as a shortcut:
</para>
<programlisting>
{ buildHex, buildMix, plug, absinthe }:
buildHex {
@ -278,21 +312,25 @@ $ nix-env -f &quot;&lt;nixpkgs&gt;&quot; -iA beamPackages.ibrowse
};
}
</programlisting>
</section>
</section>
</section>
</section>
<section xml:id="how-to-develop">
</section>
<section xml:id="how-to-develop">
<title>How to Develop</title>
<section xml:id="accessing-an-environment">
<title>Accessing an Environment</title>
<para>
Often, we simply want to access a valid environment that contains a
specific package and its dependencies. We can accomplish that with the
<literal>env</literal> attribute of a derivation. For example, let's say
we want to access an Erlang REPL with <literal>ibrowse</literal> loaded
up. We could do the following:
</para>
<programlisting>
<title>Accessing an Environment</title>
<para>
Often, we simply want to access a valid environment that contains a
specific package and its dependencies. We can accomplish that with the
<literal>env</literal> attribute of a derivation. For example, let's say we
want to access an Erlang REPL with <literal>ibrowse</literal> loaded up. We
could do the following:
</para>
<programlisting>
$ nix-shell -A beamPackages.ibrowse.env --run "erl"
Erlang/OTP 18 [erts-7.0] [source] [64-bit] [smp:4:4] [async-threads:10] [hipe] [kernel-poll:false]
@ -333,22 +371,25 @@ $ nix-env -f &quot;&lt;nixpkgs&gt;&quot; -iA beamPackages.ibrowse
ok
2>
</programlisting>
<para>
Notice the <literal>-A beamPackages.ibrowse.env</literal>. That is the key
to this functionality.
</para>
<para>
Notice the <literal>-A beamPackages.ibrowse.env</literal>. That is the key
to this functionality.
</para>
</section>
<section xml:id="creating-a-shell">
<title>Creating a Shell</title>
<para>
Getting access to an environment often isn't enough to do real
development. Usually, we need to create a <literal>shell.nix</literal>
file and do our development inside of the environment specified therein.
This file looks a lot like the packaging described above, except that
<literal>src</literal> points to the project root and we call the package
directly.
</para>
<programlisting>
<title>Creating a Shell</title>
<para>
Getting access to an environment often isn't enough to do real development.
Usually, we need to create a <literal>shell.nix</literal> file and do our
development inside of the environment specified therein. This file looks a
lot like the packaging described above, except that <literal>src</literal>
points to the project root and we call the package directly.
</para>
<programlisting>
{ pkgs ? import &quot;&lt;nixpkgs&quot;&gt; {} }:
with pkgs;
@ -368,13 +409,16 @@ in
drv
</programlisting>
<section xml:id="building-in-a-shell">
<section xml:id="building-in-a-shell">
<title>Building in a Shell (for Mix Projects)</title>
<para>
We can leverage the support of the derivation, irrespective of the build
derivation, by calling the commands themselves.
We can leverage the support of the derivation, irrespective of the build
derivation, by calling the commands themselves.
</para>
<programlisting>
<programlisting>
# =============================================================================
# Variables
# =============================================================================
@ -431,44 +475,54 @@ analyze: build plt
$(NIX_SHELL) --run "mix dialyzer --no-compile"
</programlisting>
<para>
Using a <literal>shell.nix</literal> as described (see <xref
Using a <literal>shell.nix</literal> as described (see
<xref
linkend="creating-a-shell"/>) should just work. Aside from
<literal>test</literal>, <literal>plt</literal>, and
<literal>analyze</literal>, the Make targets work just fine for all of the
build derivations.
<literal>test</literal>, <literal>plt</literal>, and
<literal>analyze</literal>, the Make targets work just fine for all of the
build derivations.
</para>
</section>
</section>
</section>
</section>
<section xml:id="generating-packages-from-hex-with-hex2nix">
</section>
<section xml:id="generating-packages-from-hex-with-hex2nix">
<title>Generating Packages from Hex with <literal>hex2nix</literal></title>
<para>
Updating the <link xlink:href="https://hex.pm">Hex</link> package set
requires <link
xlink:href="https://github.com/erlang-nix/hex2nix">hex2nix</link>. Given the
path to the Erlang modules (usually
<literal>pkgs/development/erlang-modules</literal>), it will dump a file
called <literal>hex-packages.nix</literal>, containing all the packages that
use a recognized build system in <link
xlink:href="https://hex.pm">Hex</link>. It can't be determined, however,
whether every package is buildable.
</para>
<para>
To make life easier for our users, try to build every <link
xlink:href="https://hex.pm">Hex</link> package and remove those that fail.
To do that, simply run the following command in the root of your
<literal>nixpkgs</literal> repository:
</para>
<programlisting>
Updating the <link xlink:href="https://hex.pm">Hex</link> package set
requires
<link
xlink:href="https://github.com/erlang-nix/hex2nix">hex2nix</link>.
Given the path to the Erlang modules (usually
<literal>pkgs/development/erlang-modules</literal>), it will dump a file
called <literal>hex-packages.nix</literal>, containing all the packages that
use a recognized build system in
<link
xlink:href="https://hex.pm">Hex</link>. It can't be determined,
however, whether every package is buildable.
</para>
<para>
To make life easier for our users, try to build every
<link
xlink:href="https://hex.pm">Hex</link> package and remove those
that fail. To do that, simply run the following command in the root of your
<literal>nixpkgs</literal> repository:
</para>
<programlisting>
$ nix-build -A beamPackages
</programlisting>
<para>
That will attempt to build every package in
<literal>beamPackages</literal>. Then manually remove those that fail.
Hopefully, someone will improve <link
xlink:href="https://github.com/erlang-nix/hex2nix">hex2nix</link> in the
future to automate the process.
</para>
</section>
<para>
That will attempt to build every package in <literal>beamPackages</literal>.
Then manually remove those that fail. Hopefully, someone will improve
<link
xlink:href="https://github.com/erlang-nix/hex2nix">hex2nix</link>
in the future to automate the process.
</para>
</section>
</section>

View File

@ -1,40 +1,37 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="sec-bower">
<title>Bower</title>
<title>Bower</title>
<para>
<link xlink:href="http://bower.io">Bower</link> is a package manager for web
site front-end components. Bower packages (comprising of build artefacts and
sometimes sources) are stored in <command>git</command> repositories,
typically on Github. The package registry is run by the Bower team with
package metadata coming from the <filename>bower.json</filename> file within
each package.
</para>
<para>
<link xlink:href="http://bower.io">Bower</link> is a package manager
for web site front-end components. Bower packages (comprising of
build artefacts and sometimes sources) are stored in
<command>git</command> repositories, typically on Github. The
package registry is run by the Bower team with package metadata
coming from the <filename>bower.json</filename> file within each
package.
</para>
<para>
The end result of running Bower is a <filename>bower_components</filename>
directory which can be included in the web app's build process.
</para>
<para>
The end result of running Bower is a
<filename>bower_components</filename> directory which can be included
in the web app's build process.
</para>
<para>
<para>
Bower can be run interactively, by installing
<varname>nodePackages.bower</varname>. More interestingly, the Bower
components can be declared in a Nix derivation, with the help of
<varname>nodePackages.bower2nix</varname>.
</para>
</para>
<section xml:id="ssec-bower2nix-usage">
<section xml:id="ssec-bower2nix-usage">
<title><command>bower2nix</command> usage</title>
<para>
Suppose you have a <filename>bower.json</filename> with the following contents:
<example xml:id="ex-bowerJson"><title><filename>bower.json</filename></title>
<para>
Suppose you have a <filename>bower.json</filename> with the following
contents:
<example xml:id="ex-bowerJson">
<title><filename>bower.json</filename></title>
<programlisting language="json">
<![CDATA[{
"name": "my-web-app",
@ -44,14 +41,12 @@
}
}]]>
</programlisting>
</example>
</para>
<para>
Running <command>bower2nix</command> will produce something like the
following output:
</example>
</para>
<para>
Running <command>bower2nix</command> will produce something like the
following output:
<programlisting language="nix">
<![CDATA[{ fetchbower, buildEnv }:
buildEnv { name = "bower-env"; ignoreCollisions = true; paths = [
@ -60,31 +55,31 @@ buildEnv { name = "bower-env"; ignoreCollisions = true; paths = [
(fetchbower "jquery" "2.2.2" "1.9.1 - 2" "10sp5h98sqwk90y4k6hbdviwqzvzwqf47r3r51pakch5ii2y7js1")
]; }]]>
</programlisting>
</para>
<para>
Using the <command>bower2nix</command> command line arguments, the
output can be redirected to a file. A name like
<filename>bower-packages.nix</filename> would be fine.
</para>
<para>
The resulting derivation is a union of all the downloaded Bower
packages (and their dependencies). To use it, they still need to be
linked together by Bower, which is where
<varname>buildBowerComponents</varname> is useful.
</para>
</section>
<section xml:id="ssec-build-bower-components"><title><varname>buildBowerComponents</varname> function</title>
</para>
<para>
The function is implemented in <link xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/bower-modules/generic/default.nix">
<filename>pkgs/development/bower-modules/generic/default.nix</filename></link>.
Example usage:
Using the <command>bower2nix</command> command line arguments, the output
can be redirected to a file. A name like
<filename>bower-packages.nix</filename> would be fine.
</para>
<example xml:id="ex-buildBowerComponents"><title>buildBowerComponents</title>
<para>
The resulting derivation is a union of all the downloaded Bower packages
(and their dependencies). To use it, they still need to be linked together
by Bower, which is where <varname>buildBowerComponents</varname> is useful.
</para>
</section>
<section xml:id="ssec-build-bower-components">
<title><varname>buildBowerComponents</varname> function</title>
<para>
The function is implemented in
<link xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/bower-modules/generic/default.nix">
<filename>pkgs/development/bower-modules/generic/default.nix</filename></link>.
Example usage:
<example xml:id="ex-buildBowerComponents">
<title>buildBowerComponents</title>
<programlisting language="nix">
bowerComponents = buildBowerComponents {
name = "my-web-app";
@ -92,42 +87,42 @@ bowerComponents = buildBowerComponents {
src = myWebApp; <co xml:id="ex-buildBowerComponents-2" />
};
</programlisting>
</example>
</example>
</para>
<para>
In <xref linkend="ex-buildBowerComponents" />, the following arguments
are of special significance to the function:
<para>
In <xref linkend="ex-buildBowerComponents" />, the following arguments are
of special significance to the function:
<calloutlist>
<callout arearefs="ex-buildBowerComponents-1">
<para>
<varname>generated</varname> specifies the file which was created by
<command>bower2nix</command>.
</para>
</callout>
<callout arearefs="ex-buildBowerComponents-2">
<para>
<varname>src</varname> is your project's sources. It needs to contain a
<filename>bower.json</filename> file.
</para>
</callout>
</calloutlist>
</para>
<calloutlist>
<callout arearefs="ex-buildBowerComponents-1">
<para>
<varname>generated</varname> specifies the file which was created by <command>bower2nix</command>.
</para>
</callout>
<para>
<varname>buildBowerComponents</varname> will run Bower to link together the
output of <command>bower2nix</command>, resulting in a
<filename>bower_components</filename> directory which can be used.
</para>
<callout arearefs="ex-buildBowerComponents-2">
<para>
<varname>src</varname> is your project's sources. It needs to
contain a <filename>bower.json</filename> file.
</para>
</callout>
</calloutlist>
</para>
<para>
Here is an example of a web frontend build process using
<command>gulp</command>. You might use <command>grunt</command>, or anything
else.
</para>
<para>
<varname>buildBowerComponents</varname> will run Bower to link
together the output of <command>bower2nix</command>, resulting in a
<filename>bower_components</filename> directory which can be used.
</para>
<para>
Here is an example of a web frontend build process using
<command>gulp</command>. You might use <command>grunt</command>, or
anything else.
</para>
<example xml:id="ex-bowerGulpFile"><title>Example build script (<filename>gulpfile.js</filename>)</title>
<example xml:id="ex-bowerGulpFile">
<title>Example build script (<filename>gulpfile.js</filename>)</title>
<programlisting language="javascript">
<![CDATA[var gulp = require('gulp');
@ -142,10 +137,10 @@ gulp.task('build', [], function () {
.pipe(gulp.dest("./gulpdist/"));
});]]>
</programlisting>
</example>
</example>
<example xml:id="ex-buildBowerComponentsDefaultNix">
<title>Full example — <filename>default.nix</filename></title>
<example xml:id="ex-buildBowerComponentsDefaultNix">
<title>Full example — <filename>default.nix</filename></title>
<programlisting language="nix">
{ myWebApp ? { outPath = ./.; name = "myWebApp"; }
, pkgs ? import &lt;nixpkgs&gt; {}
@ -172,73 +167,63 @@ pkgs.stdenv.mkDerivation {
installPhase = "mv gulpdist $out";
}
</programlisting>
</example>
</example>
<para>
A few notes about <xref linkend="ex-buildBowerComponentsDefaultNix" />:
<calloutlist>
<callout arearefs="ex-buildBowerComponentsDefault-1">
<para>
The result of <varname>buildBowerComponents</varname> is an
input to the frontend build.
</para>
</callout>
<callout arearefs="ex-buildBowerComponentsDefault-2">
<para>
Whether to symlink or copy the
<filename>bower_components</filename> directory depends on the
build tool in use. In this case a copy is used to avoid
<command>gulp</command> silliness with permissions.
</para>
</callout>
<callout arearefs="ex-buildBowerComponentsDefault-3">
<para>
<command>gulp</command> requires <varname>HOME</varname> to
refer to a writeable directory.
</para>
</callout>
<callout arearefs="ex-buildBowerComponentsDefault-4">
<para>
<para>
A few notes about <xref linkend="ex-buildBowerComponentsDefaultNix" />:
<calloutlist>
<callout arearefs="ex-buildBowerComponentsDefault-1">
<para>
The result of <varname>buildBowerComponents</varname> is an input to the
frontend build.
</para>
</callout>
<callout arearefs="ex-buildBowerComponentsDefault-2">
<para>
Whether to symlink or copy the <filename>bower_components</filename>
directory depends on the build tool in use. In this case a copy is used
to avoid <command>gulp</command> silliness with permissions.
</para>
</callout>
<callout arearefs="ex-buildBowerComponentsDefault-3">
<para>
<command>gulp</command> requires <varname>HOME</varname> to refer to a
writeable directory.
</para>
</callout>
<callout arearefs="ex-buildBowerComponentsDefault-4">
<para>
The actual build command. Other tools could be used.
</para>
</callout>
</calloutlist>
</para>
</section>
</para>
</callout>
</calloutlist>
</para>
</section>
<section xml:id="ssec-bower2nix-troubleshooting">
<section xml:id="ssec-bower2nix-troubleshooting">
<title>Troubleshooting</title>
<variablelist>
<varlistentry>
<variablelist>
<varlistentry>
<term>
<literal>ENOCACHE</literal> errors from
<varname>buildBowerComponents</varname>
<literal>ENOCACHE</literal> errors from <varname>buildBowerComponents</varname>
</term>
<listitem>
<para>
This means that Bower was looking for a package version which
doesn't exist in the generated
<filename>bower-packages.nix</filename>.
</para>
<para>
If <filename>bower.json</filename> has been updated, then run
<command>bower2nix</command> again.
</para>
<para>
It could also be a bug in <command>bower2nix</command> or
<command>fetchbower</command>. If possible, try reformulating
the version specification in <filename>bower.json</filename>.
</para>
<para>
This means that Bower was looking for a package version which doesn't
exist in the generated <filename>bower-packages.nix</filename>.
</para>
<para>
If <filename>bower.json</filename> has been updated, then run
<command>bower2nix</command> again.
</para>
<para>
It could also be a bug in <command>bower2nix</command> or
<command>fetchbower</command>. If possible, try reformulating the version
specification in <filename>bower.json</filename>.
</para>
</listitem>
</varlistentry>
</variablelist>
</section>
</varlistentry>
</variablelist>
</section>
</section>

View File

@ -1,36 +1,37 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="sec-language-coq">
<title>Coq</title>
<title>Coq</title>
<para>
Coq libraries should be installed in
<literal>$(out)/lib/coq/${coq.coq-version}/user-contrib/</literal>.
Such directories are automatically added to the
<literal>$COQPATH</literal> environment variable by the hook defined
in the Coq derivation.
</para>
<para>
Some libraries require OCaml and sometimes also Camlp5 or findlib.
The exact versions that were used to build Coq are saved in the
<literal>coq.ocaml</literal> and <literal>coq.camlp5</literal>
and <literal>coq.findlib</literal> attributes.
</para>
<para>
Coq libraries may be compatible with some specific versions of Coq only.
The <literal>compatibleCoqVersions</literal> attribute is used to
precisely select those versions of Coq that are compatible with this
derivation.
</para>
<para>
Here is a simple package example. It is a pure Coq library, thus it
depends on Coq. It builds on the Mathematical Components library, thus it
also takes <literal>mathcomp</literal> as <literal>buildInputs</literal>.
Its <literal>Makefile</literal> has been generated using
<literal>coq_makefile</literal> so we only have to
set the <literal>$COQLIB</literal> variable at install time.
</para>
<programlisting>
<para>
Coq libraries should be installed in
<literal>$(out)/lib/coq/${coq.coq-version}/user-contrib/</literal>. Such
directories are automatically added to the <literal>$COQPATH</literal>
environment variable by the hook defined in the Coq derivation.
</para>
<para>
Some extensions (plugins) might require OCaml and sometimes other OCaml
packages. The <literal>coq.ocamlPackages</literal> attribute can be used to
depend on the same package set Coq was built against.
</para>
<para>
Coq libraries may be compatible with some specific versions of Coq only. The
<literal>compatibleCoqVersions</literal> attribute is used to precisely
select those versions of Coq that are compatible with this derivation.
</para>
<para>
Here is a simple package example. It is a pure Coq library, thus it depends
on Coq. It builds on the Mathematical Components library, thus it also takes
<literal>mathcomp</literal> as <literal>buildInputs</literal>. Its
<literal>Makefile</literal> has been generated using
<literal>coq_makefile</literal> so we only have to set the
<literal>$COQLIB</literal> variable at install time.
</para>
<programlisting>
{ stdenv, fetchFromGitHub, coq, mathcomp }:
stdenv.mkDerivation rec {

View File

@ -0,0 +1,185 @@
# User's Guide to Emscripten in Nixpkgs
[Emscripten](https://github.com/kripken/emscripten): An LLVM-to-JavaScript Compiler
This section of the manual covers how to use `emscripten` in nixpkgs.
Minimal requirements:
* nix
* nixpkgs
Modes of use of `emscripten`:
* **Imperative usage** (on the command line):
If you want to work with `emcc`, `emconfigure` and `emmake` as you are used to from Ubuntu and similar distributions you can use these commands:
* `nix-env -i emscripten`
* `nix-shell -p emscripten`
* **Declarative usage**:
This mode is far more power full since this makes use of `nix` for dependency management of emscripten libraries and targets by using the `mkDerivation` which is implemented by `pkgs.emscriptenStdenv` and `pkgs.buildEmscriptenPackage`. The source for the packages is in `pkgs/top-level/emscripten-packages.nix` and the abstraction behind it in `pkgs/development/em-modules/generic/default.nix`.
* build and install all packages:
* `nix-env -iA emscriptenPackages`
* dev-shell for zlib implementation hacking:
* `nix-shell -A emscriptenPackages.zlib`
## Imperative usage
A few things to note:
* `export EMCC_DEBUG=2` is nice for debugging
* `~/.emscripten`, the build artifact cache sometimes creates issues and needs to be removed from time to time
## Declarative usage
Let's see two different examples from `pkgs/top-level/emscripten-packages.nix`:
* `pkgs.zlib.override`
* `pkgs.buildEmscriptenPackage`
Both are interesting concepts.
A special requirement of the `pkgs.buildEmscriptenPackage` is the `doCheck = true` is a default meaning that each emscriptenPackage requires a `checkPhase` implemented.
* Use `export EMCC_DEBUG=2` from within a emscriptenPackage's `phase` to get more detailed debug output what is going wrong.
* ~/.emscripten cache is requiring us to set `HOME=$TMPDIR` in individual phases. This makes compilation slower but also makes it more deterministic.
### Usage 1: pkgs.zlib.override
This example uses `zlib` from nixpkgs but instead of compiling **C** to **ELF** it compiles **C** to **JS** since we were using `pkgs.zlib.override` and changed stdenv to `pkgs.emscriptenStdenv`. A few adaptions and hacks were set in place to make it working. One advantage is that when `pkgs.zlib` is updated, it will automatically update this package as well. However, this can also be the downside...
See the `zlib` example:
zlib = (pkgs.zlib.override {
stdenv = pkgs.emscriptenStdenv;
}).overrideDerivation
(old: rec {
buildInputs = old.buildInputs ++ [ pkgconfig ];
# we need to reset this setting!
NIX_CFLAGS_COMPILE="";
configurePhase = ''
# FIXME: Some tests require writing at $HOME
HOME=$TMPDIR
runHook preConfigure
#export EMCC_DEBUG=2
emconfigure ./configure --prefix=$out --shared
runHook postConfigure
'';
dontStrip = true;
outputs = [ "out" ];
buildPhase = ''
emmake make
'';
installPhase = ''
emmake make install
'';
checkPhase = ''
echo "================= testing zlib using node ================="
echo "Compiling a custom test"
set -x
emcc -O2 -s EMULATE_FUNCTION_POINTER_CASTS=1 test/example.c -DZ_SOLO \
libz.so.${old.version} -I . -o example.js
echo "Using node to execute the test"
${pkgs.nodejs}/bin/node ./example.js
set +x
if [ $? -ne 0 ]; then
echo "test failed for some reason"
exit 1;
else
echo "it seems to work! very good."
fi
echo "================= /testing zlib using node ================="
'';
postPatch = pkgs.stdenv.lib.optionalString pkgs.stdenv.isDarwin ''
substituteInPlace configure \
--replace '/usr/bin/libtool' 'ar' \
--replace 'AR="libtool"' 'AR="ar"' \
--replace 'ARFLAGS="-o"' 'ARFLAGS="-r"'
'';
});
### Usage 2: pkgs.buildEmscriptenPackage
This `xmlmirror` example features a emscriptenPackage which is defined completely from this context and no `pkgs.zlib.override` is used.
xmlmirror = pkgs.buildEmscriptenPackage rec {
name = "xmlmirror";
buildInputs = [ pkgconfig autoconf automake libtool gnumake libxml2 nodejs openjdk json_c ];
nativeBuildInputs = [ pkgconfig zlib ];
src = pkgs.fetchgit {
url = "https://gitlab.com/odfplugfest/xmlmirror.git";
rev = "4fd7e86f7c9526b8f4c1733e5c8b45175860a8fd";
sha256 = "1jasdqnbdnb83wbcnyrp32f36w3xwhwp0wq8lwwmhqagxrij1r4b";
};
configurePhase = ''
rm -f fastXmlLint.js*
# a fix for ERROR:root:For asm.js, TOTAL_MEMORY must be a multiple of 16MB, was 234217728
# https://gitlab.com/odfplugfest/xmlmirror/issues/8
sed -e "s/TOTAL_MEMORY=234217728/TOTAL_MEMORY=268435456/g" -i Makefile.emEnv
# https://github.com/kripken/emscripten/issues/6344
# https://gitlab.com/odfplugfest/xmlmirror/issues/9
sed -e "s/\$(JSONC_LDFLAGS) \$(ZLIB_LDFLAGS) \$(LIBXML20_LDFLAGS)/\$(JSONC_LDFLAGS) \$(LIBXML20_LDFLAGS) \$(ZLIB_LDFLAGS) /g" -i Makefile.emEnv
# https://gitlab.com/odfplugfest/xmlmirror/issues/11
sed -e "s/-o fastXmlLint.js/-s EXTRA_EXPORTED_RUNTIME_METHODS='[\"ccall\", \"cwrap\"]' -o fastXmlLint.js/g" -i Makefile.emEnv
'';
buildPhase = ''
HOME=$TMPDIR
make -f Makefile.emEnv
'';
outputs = [ "out" "doc" ];
installPhase = ''
mkdir -p $out/share
mkdir -p $doc/share/${name}
cp Demo* $out/share
cp -R codemirror-5.12 $out/share
cp fastXmlLint.js* $out/share
cp *.xsd $out/share
cp *.js $out/share
cp *.xhtml $out/share
cp *.html $out/share
cp *.json $out/share
cp *.rng $out/share
cp README.md $doc/share/${name}
'';
checkPhase = ''
'';
};
### Declarative debugging
Use `nix-shell -I nixpkgs=/some/dir/nixpkgs -A emscriptenPackages.libz` and from there you can go trough the individual steps. This makes it easy to build a good `unit test` or list the files of the project.
1. `nix-shell -I nixpkgs=/some/dir/nixpkgs -A emscriptenPackages.libz`
2. `cd /tmp/`
3. `unpackPhase`
4. cd libz-1.2.3
5. `configurePhase`
6. `buildPhase`
7. ... happy hacking...
## Summary
Using this toolchain makes it easy to leverage `nix` from NixOS, MacOSX or even Windows (WSL+ubuntu+nix). This toolchain is reproducible, behaves like the rest of the packages from nixpkgs and contains a set of well working examples to learn and adapt from.
If in trouble, ask the maintainers.

View File

@ -1,14 +1,14 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="sec-language-go">
<title>Go</title>
<title>Go</title>
<para>
The function <varname>buildGoPackage</varname> builds standard Go programs.
</para>
<para>The function <varname>buildGoPackage</varname> builds
standard Go programs.
</para>
<example xml:id='ex-buildGoPackage'><title>buildGoPackage</title>
<example xml:id='ex-buildGoPackage'>
<title>buildGoPackage</title>
<programlisting>
deis = buildGoPackage rec {
name = "deis-${version}";
@ -29,55 +29,56 @@ deis = buildGoPackage rec {
buildFlags = "--tags release"; <co xml:id='ex-buildGoPackage-4' />
}
</programlisting>
</example>
</example>
<para><xref linkend='ex-buildGoPackage'/> is an example expression using buildGoPackage,
the following arguments are of special significance to the function:
<calloutlist>
<callout arearefs='ex-buildGoPackage-1'>
<para>
<xref linkend='ex-buildGoPackage'/> is an example expression using
buildGoPackage, the following arguments are of special significance to the
function:
<calloutlist>
<callout arearefs='ex-buildGoPackage-1'>
<para>
<varname>goPackagePath</varname> specifies the package's canonical Go import path.
<varname>goPackagePath</varname> specifies the package's canonical Go
import path.
</para>
</callout>
<callout arearefs='ex-buildGoPackage-2'>
</callout>
<callout arearefs='ex-buildGoPackage-2'>
<para>
<varname>subPackages</varname> limits the builder from building child packages that
have not been listed. If <varname>subPackages</varname> is not specified, all child
packages will be built.
<varname>subPackages</varname> limits the builder from building child
packages that have not been listed. If <varname>subPackages</varname> is
not specified, all child packages will be built.
</para>
<para>
In this example only <literal>github.com/deis/deis/client</literal> will be built.
In this example only <literal>github.com/deis/deis/client</literal> will
be built.
</para>
</callout>
<callout arearefs='ex-buildGoPackage-3'>
</callout>
<callout arearefs='ex-buildGoPackage-3'>
<para>
<varname>goDeps</varname> is where the Go dependencies of a Go program are listed
as a list of package source identified by Go import path.
It could be imported as a separate <varname>deps.nix</varname> file for
readability. The dependency data structure is described below.
<varname>goDeps</varname> is where the Go dependencies of a Go program are
listed as a list of package source identified by Go import path. It could
be imported as a separate <varname>deps.nix</varname> file for
readability. The dependency data structure is described below.
</para>
</callout>
<callout arearefs='ex-buildGoPackage-4'>
</callout>
<callout arearefs='ex-buildGoPackage-4'>
<para>
<varname>buildFlags</varname> is a list of flags passed to the go build command.
<varname>buildFlags</varname> is a list of flags passed to the go build
command.
</para>
</callout>
</callout>
</calloutlist>
</para>
</calloutlist>
<para>
The <varname>goDeps</varname> attribute can be imported from a separate
<varname>nix</varname> file that defines which Go libraries are needed and
should be included in <varname>GOPATH</varname> for
<varname>buildPhase</varname>.
</para>
</para>
<para>The <varname>goDeps</varname> attribute can be imported from a separate
<varname>nix</varname> file that defines which Go libraries are needed and should
be included in <varname>GOPATH</varname> for <varname>buildPhase</varname>.
</para>
<example xml:id='ex-goDeps'><title>deps.nix</title>
<example xml:id='ex-goDeps'>
<title>deps.nix</title>
<programlisting>
[ <co xml:id='ex-goDeps-1' />
{
@ -100,67 +101,60 @@ the following arguments are of special significance to the function:
}
]
</programlisting>
</example>
</example>
<para>
<calloutlist>
<callout arearefs='ex-goDeps-1'>
<para>
<calloutlist>
<callout arearefs='ex-goDeps-1'>
<para>
<varname>goDeps</varname> is a list of Go dependencies.
<varname>goDeps</varname> is a list of Go dependencies.
</para>
</callout>
<callout arearefs='ex-goDeps-2'>
</callout>
<callout arearefs='ex-goDeps-2'>
<para>
<varname>goPackagePath</varname> specifies Go package import path.
<varname>goPackagePath</varname> specifies Go package import path.
</para>
</callout>
<callout arearefs='ex-goDeps-3'>
</callout>
<callout arearefs='ex-goDeps-3'>
<para>
<varname>fetch type</varname> that needs to be used to get package source. If <varname>git</varname>
is used there should be <varname>url</varname>, <varname>rev</varname> and <varname>sha256</varname>
defined next to it.
<varname>fetch type</varname> that needs to be used to get package source.
If <varname>git</varname> is used there should be <varname>url</varname>,
<varname>rev</varname> and <varname>sha256</varname> defined next to it.
</para>
</callout>
</callout>
</calloutlist>
</para>
</calloutlist>
<para>
To extract dependency information from a Go package in automated way use
<link xlink:href="https://github.com/kamilchm/go2nix">go2nix</link>. It can
produce complete derivation and <varname>goDeps</varname> file for Go
programs.
</para>
</para>
<para>To extract dependency information from a Go package in automated way use <link xlink:href="https://github.com/kamilchm/go2nix">go2nix</link>.
It can produce complete derivation and <varname>goDeps</varname> file for Go programs.</para>
<para>
<varname>buildGoPackage</varname> produces <xref linkend='chap-multiple-output' xrefstyle="select: title" />
where <varname>bin</varname> includes program binaries. You can test build a Go binary as follows:
<screen>
<para>
<varname>buildGoPackage</varname> produces
<xref linkend='chap-multiple-output' xrefstyle="select: title" /> where
<varname>bin</varname> includes program binaries. You can test build a Go
binary as follows:
<screen>
$ nix-build -A deis.bin
</screen>
or build all outputs with:
<screen>
<screen>
$ nix-build -A deis.all
</screen>
<varname>bin</varname> output will be installed by default with
<varname>nix-env -i</varname> or <varname>systemPackages</varname>.
</para>
<varname>bin</varname> output will be installed by default with <varname>nix-env -i</varname>
or <varname>systemPackages</varname>.
</para>
<para>
You may use Go packages installed into the active Nix profiles by adding
the following to your ~/.bashrc:
<para>
You may use Go packages installed into the active Nix profiles by adding the
following to your ~/.bashrc:
<screen>
for p in $NIX_PROFILES; do
GOPATH="$p/share/go:$GOPATH"
done
</screen>
</para>
</para>
</section>

View File

@ -1,955 +0,0 @@
---
title: User's Guide for Haskell in Nixpkgs
author: Peter Simons
date: 2015-06-01
---
# User's Guide to the Haskell Infrastructure
## How to install Haskell packages
Nixpkgs distributes build instructions for all Haskell packages registered on
[Hackage](http://hackage.haskell.org/), but strangely enough normal Nix package
lookups don't seem to discover any of them, except for the default version of ghc, cabal-install, and stack:
```
$ nix-env -i alex
error: selector alex matches no derivations
$ nix-env -qa ghc
ghc-7.10.2
```
The Haskell package set is not registered in the top-level namespace because it
is *huge*. If all Haskell packages were visible to these commands, then
name-based search/install operations would be much slower than they are now. We
avoided that by keeping all Haskell-related packages in a separate attribute
set called `haskellPackages`, which the following command will list:
```
$ nix-env -f "<nixpkgs>" -qaP -A haskellPackages
haskellPackages.a50 a50-0.5
haskellPackages.abacate haskell-abacate-0.0.0.0
haskellPackages.abcBridge haskell-abcBridge-0.12
haskellPackages.afv afv-0.1.1
haskellPackages.alex alex-3.1.4
haskellPackages.Allure Allure-0.4.101.1
haskellPackages.alms alms-0.6.7
[... some 8000 entries omitted ...]
```
To install any of those packages into your profile, refer to them by their
attribute path (first column):
```shell
nix-env -f "<nixpkgs>" -iA haskellPackages.Allure ...
```
The attribute path of any Haskell packages corresponds to the name of that
particular package on Hackage: the package `cabal-install` has the attribute
`haskellPackages.cabal-install`, and so on. (Actually, this convention causes
trouble with packages like `3dmodels` and `4Blocks`, because these names are
invalid identifiers in the Nix language. The issue of how to deal with these
rare corner cases is currently unresolved.)
Haskell packages whose Nix name (second column) begins with a `haskell-` prefix
are packages that provide a library whereas packages without that prefix
provide just executables. Libraries may provide executables too, though: the
package `haskell-pandoc`, for example, installs both a library and an
application. You can install and use Haskell executables just like any other
program in Nixpkgs, but using Haskell libraries for development is a bit
trickier and we'll address that subject in great detail in section [How to
create a development environment].
Attribute paths are deterministic inside of Nixpkgs, but the path necessary to
reach Nixpkgs varies from system to system. We dodged that problem by giving
`nix-env` an explicit `-f "<nixpkgs>"` parameter, but if you call `nix-env`
without that flag, then chances are the invocation fails:
```
$ nix-env -iA haskellPackages.cabal-install
error: attribute haskellPackages in selection path
haskellPackages.cabal-install not found
```
On NixOS, for example, Nixpkgs does *not* exist in the top-level namespace by
default. To figure out the proper attribute path, it's easiest to query for the
path of a well-known Nixpkgs package, i.e.:
```
$ nix-env -qaP coreutils
nixos.coreutils coreutils-8.23
```
If your system responds like that (most NixOS installations will), then the
attribute path to `haskellPackages` is `nixos.haskellPackages`. Thus, if you
want to use `nix-env` without giving an explicit `-f` flag, then that's the way
to do it:
```shell
nix-env -qaP -A nixos.haskellPackages
nix-env -iA nixos.haskellPackages.cabal-install
```
Our current default compiler is GHC 7.10.x and the `haskellPackages` set
contains packages built with that particular version. Nixpkgs contains the
latest major release of every GHC since 6.10.4, however, and there is a whole
family of package sets available that defines Hackage packages built with each
of those compilers, too:
```shell
nix-env -f "<nixpkgs>" -qaP -A haskell.packages.ghc6123
nix-env -f "<nixpkgs>" -qaP -A haskell.packages.ghc763
```
The name `haskellPackages` is really just a synonym for
`haskell.packages.ghc7102`, because we prefer that package set internally and
recommend it to our users as their default choice, but ultimately you are free
to compile your Haskell packages with any GHC version you please. The following
command displays the complete list of available compilers:
```
$ nix-env -f "<nixpkgs>" -qaP -A haskell.compiler
haskell.compiler.ghc6104 ghc-6.10.4
haskell.compiler.ghc6123 ghc-6.12.3
haskell.compiler.ghc704 ghc-7.0.4
haskell.compiler.ghc722 ghc-7.2.2
haskell.compiler.ghc742 ghc-7.4.2
haskell.compiler.ghc763 ghc-7.6.3
haskell.compiler.ghc784 ghc-7.8.4
haskell.compiler.ghc7102 ghc-7.10.2
haskell.compiler.ghcHEAD ghc-7.11.20150402
haskell.compiler.ghcNokinds ghc-nokinds-7.11.20150704
haskell.compiler.ghcjs ghcjs-0.1.0
haskell.compiler.jhc jhc-0.8.2
haskell.compiler.uhc uhc-1.1.9.0
```
We have no package sets for `jhc` or `uhc` yet, unfortunately, but for every
version of GHC listed above, there exists a package set based on that compiler.
Also, the attributes `haskell.compiler.ghcXYC` and
`haskell.packages.ghcXYC.ghc` are synonymous for the sake of convenience.
## How to create a development environment
### How to install a compiler
A simple development environment consists of a Haskell compiler and one or both
of the tools `cabal-install` and `stack`. We saw in section
[How to install Haskell packages] how you can install those programs into your
user profile:
```shell
nix-env -f "<nixpkgs>" -iA haskellPackages.ghc haskellPackages.cabal-install
```
Instead of the default package set `haskellPackages`, you can also use the more
precise name `haskell.compiler.ghc7102`, which has the advantage that it refers
to the same GHC version regardless of what Nixpkgs considers "default" at any
given time.
Once you've made those tools available in `$PATH`, it's possible to build
Hackage packages the same way people without access to Nix do it all the time:
```shell
cabal get lens-4.11 && cd lens-4.11
cabal install -j --dependencies-only
cabal configure
cabal build
```
If you enjoy working with Cabal sandboxes, then that's entirely possible too:
just execute the command
```shell
cabal sandbox init
```
before installing the required dependencies.
The `nix-shell` utility makes it easy to switch to a different compiler
version; just enter the Nix shell environment with the command
```shell
nix-shell -p haskell.compiler.ghc784
```
to bring GHC 7.8.4 into `$PATH`. Alternatively, you can use Stack instead of
`nix-shell` directly to select compiler versions and other build tools
per-project. It uses `nix-shell` under the hood when Nix support is turned on.
See [How to build a Haskell project using Stack].
If you're using `cabal-install`, re-running `cabal configure` inside the spawned
shell switches your build to use that compiler instead. If you're working on
a project that doesn't depend on any additional system libraries outside of GHC,
then it's even sufficient to just run the `cabal configure` command inside of
the shell:
```shell
nix-shell -p haskell.compiler.ghc784 --command "cabal configure"
```
Afterwards, all other commands like `cabal build` work just fine in any shell
environment, because the configure phase recorded the absolute paths to all
required tools like GHC in its build configuration inside of the `dist/`
directory. Please note, however, that `nix-collect-garbage` can break such an
environment because the Nix store paths created by `nix-shell` aren't "alive"
anymore once `nix-shell` has terminated. If you find that your Haskell builds
no longer work after garbage collection, then you'll have to re-run `cabal
configure` inside of a new `nix-shell` environment.
### How to install a compiler with libraries
GHC expects to find all installed libraries inside of its own `lib` directory.
This approach works fine on traditional Unix systems, but it doesn't work for
Nix, because GHC's store path is immutable once it's built. We cannot install
additional libraries into that location. As a consequence, our copies of GHC
don't know any packages except their own core libraries, like `base`,
`containers`, `Cabal`, etc.
We can register additional libraries to GHC, however, using a special build
function called `ghcWithPackages`. That function expects one argument: a
function that maps from an attribute set of Haskell packages to a list of
packages, which determines the libraries known to that particular version of
GHC. For example, the Nix expression `ghcWithPackages (pkgs: [pkgs.mtl])`
generates a copy of GHC that has the `mtl` library registered in addition to
its normal core packages:
```
$ nix-shell -p "haskellPackages.ghcWithPackages (pkgs: [pkgs.mtl])"
[nix-shell:~]$ ghc-pkg list mtl
/nix/store/zy79...-ghc-7.10.2/lib/ghc-7.10.2/package.conf.d:
mtl-2.2.1
```
This function allows users to define their own development environment by means
of an override. After adding the following snippet to `~/.config/nixpkgs/config.nix`,
```nix
{
packageOverrides = super: let self = super.pkgs; in
{
myHaskellEnv = self.haskell.packages.ghc7102.ghcWithPackages
(haskellPackages: with haskellPackages; [
# libraries
arrows async cgi criterion
# tools
cabal-install haskintex
]);
};
}
```
it's possible to install that compiler with `nix-env -f "<nixpkgs>" -iA
myHaskellEnv`. If you'd like to switch that development environment to a
different version of GHC, just replace the `ghc7102` bit in the previous
definition with the appropriate name. Of course, it's also possible to define
any number of these development environments! (You can't install two of them
into the same profile at the same time, though, because that would result in
file conflicts.)
The generated `ghc` program is a wrapper script that re-directs the real
GHC executable to use a new `lib` directory --- one that we specifically
constructed to contain all those packages the user requested:
```
$ cat $(type -p ghc)
#! /nix/store/xlxj...-bash-4.3-p33/bin/bash -e
export NIX_GHC=/nix/store/19sm...-ghc-7.10.2/bin/ghc
export NIX_GHCPKG=/nix/store/19sm...-ghc-7.10.2/bin/ghc-pkg
export NIX_GHC_DOCDIR=/nix/store/19sm...-ghc-7.10.2/share/doc/ghc/html
export NIX_GHC_LIBDIR=/nix/store/19sm...-ghc-7.10.2/lib/ghc-7.10.2
exec /nix/store/j50p...-ghc-7.10.2/bin/ghc "-B$NIX_GHC_LIBDIR" "$@"
```
The variables `$NIX_GHC`, `$NIX_GHCPKG`, etc. point to the *new* store path
`ghcWithPackages` constructed specifically for this environment. The last line
of the wrapper script then executes the real `ghc`, but passes the path to the
new `lib` directory using GHC's `-B` flag.
The purpose of those environment variables is to work around an impurity in the
popular [ghc-paths](http://hackage.haskell.org/package/ghc-paths) library. That
library promises to give its users access to GHC's installation paths. Only,
the library can't possible know that path when it's compiled, because the path
GHC considers its own is determined only much later, when the user configures
it through `ghcWithPackages`. So we [patched
ghc-paths](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/haskell-modules/patches/ghc-paths-nix.patch)
to return the paths found in those environment variables at run-time rather
than trying to guess them at compile-time.
To make sure that mechanism works properly all the time, we recommend that you
set those variables to meaningful values in your shell environment, too, i.e.
by adding the following code to your `~/.bashrc`:
```bash
if type >/dev/null 2>&1 -p ghc; then
eval "$(egrep ^export "$(type -p ghc)")"
fi
```
If you are certain that you'll use only one GHC environment which is located in
your user profile, then you can use the following code, too, which has the
advantage that it doesn't contain any paths from the Nix store, i.e. those
settings always remain valid even if a `nix-env -u` operation updates the GHC
environment in your profile:
```bash
if [ -e ~/.nix-profile/bin/ghc ]; then
export NIX_GHC="$HOME/.nix-profile/bin/ghc"
export NIX_GHCPKG="$HOME/.nix-profile/bin/ghc-pkg"
export NIX_GHC_DOCDIR="$HOME/.nix-profile/share/doc/ghc/html"
export NIX_GHC_LIBDIR="$HOME/.nix-profile/lib/ghc-$($NIX_GHC --numeric-version)"
fi
```
### How to install a compiler with libraries, hoogle and documentation indexes
If you plan to use your environment for interactive programming, not just
compiling random Haskell code, you might want to replace `ghcWithPackages` in
all the listings above with `ghcWithHoogle`.
This environment generator not only produces an environment with GHC and all
the specified libraries, but also generates a `hoogle` and `haddock` indexes
for all the packages, and provides a wrapper script around `hoogle` binary that
uses all those things. A precise name for this thing would be
"`ghcWithPackagesAndHoogleAndDocumentationIndexes`", which is, regrettably, too
long and scary.
For example, installing the following environment
```nix
{
packageOverrides = super: let self = super.pkgs; in
{
myHaskellEnv = self.haskellPackages.ghcWithHoogle
(haskellPackages: with haskellPackages; [
# libraries
arrows async cgi criterion
# tools
cabal-install haskintex
]);
};
}
```
allows one to browse module documentation index [not too dissimilar to
this](https://downloads.haskell.org/~ghc/latest/docs/html/libraries/index.html)
for all the specified packages and their dependencies by directing a browser of
choice to `~/.nix-profiles/share/doc/hoogle/index.html` (or
`/run/current-system/sw/share/doc/hoogle/index.html` in case you put it in
`environment.systemPackages` in NixOS).
After you've marveled enough at that try adding the following to your
`~/.ghc/ghci.conf`
```
:def hoogle \s -> return $ ":! hoogle search -cl --count=15 \"" ++ s ++ "\""
:def doc \s -> return $ ":! hoogle search -cl --info \"" ++ s ++ "\""
```
and test it by typing into `ghci`:
```
:hoogle a -> a
:doc a -> a
```
Be sure to note the links to `haddock` files in the output. With any modern and
properly configured terminal emulator you can just click those links to
navigate there.
Finally, you can run
```shell
hoogle server -p 8080 --local
```
and navigate to http://localhost:8080/ for your own local
[Hoogle](https://www.haskell.org/hoogle/).
### How to build a Haskell project using Stack
[Stack](http://haskellstack.org) is a popular build tool for Haskell projects.
It has first-class support for Nix. Stack can optionally use Nix to
automatically select the right version of GHC and other build tools to build,
test and execute apps in an existing project downloaded from somewhere on the
Internet. Pass the `--nix` flag to any `stack` command to do so, e.g.
```shell
git clone --recursive http://github.com/yesodweb/wai
cd wai
stack --nix build
```
If you want `stack` to use Nix by default, you can add a `nix` section to the
`stack.yaml` file, as explained in the [Stack documentation][stack-nix-doc]. For
example:
```yaml
nix:
enable: true
packages: [pkgconfig zeromq zlib]
```
The example configuration snippet above tells Stack to create an ad hoc
environment for `nix-shell` as in the below section, in which the `pkgconfig`,
`zeromq` and `zlib` packages from Nixpkgs are available. All `stack` commands
will implicitly be executed inside this ad hoc environment.
Some projects have more sophisticated needs. For examples, some ad hoc
environments might need to expose Nixpkgs packages compiled in a certain way, or
with extra environment variables. In these cases, you'll need a `shell` field
instead of `packages`:
```yaml
nix:
enable: true
shell-file: shell.nix
```
For more on how to write a `shell.nix` file see the below section. You'll need
to express a derivation. Note that Nixpkgs ships with a convenience wrapper
function around `mkDerivation` called `haskell.lib.buildStackProject` to help you
create this derivation in exactly the way Stack expects. All of the same inputs
as `mkDerivation` can be provided. For example, to build a Stack project that
including packages that link against a version of the R library compiled with
special options turned on:
```nix
with (import <nixpkgs> { });
let R = pkgs.R.override { enableStrictBarrier = true; };
in
haskell.lib.buildStackProject {
name = "HaskellR";
buildInputs = [ R zeromq zlib ];
}
```
You can select a particular GHC version to compile with by setting the
`ghc` attribute as an argument to `buildStackProject`. Better yet, let
Stack choose what GHC version it wants based on the snapshot specified
in `stack.yaml` (only works with Stack >= 1.1.3):
```nix
{nixpkgs ? import <nixpkgs> { }, ghc ? nixpkgs.ghc}:
with nixpkgs;
let R = pkgs.R.override { enableStrictBarrier = true; };
in
haskell.lib.buildStackProject {
name = "HaskellR";
buildInputs = [ R zeromq zlib ];
inherit ghc;
}
```
[stack-nix-doc]: http://docs.haskellstack.org/en/stable/nix_integration.html
### How to create ad hoc environments for `nix-shell`
The easiest way to create an ad hoc development environment is to run
`nix-shell` with the appropriate GHC environment given on the command-line:
```shell
nix-shell -p "haskellPackages.ghcWithPackages (pkgs: with pkgs; [mtl pandoc])"
```
For more sophisticated use-cases, however, it's more convenient to save the
desired configuration in a file called `shell.nix` that looks like this:
```nix
{ nixpkgs ? import <nixpkgs> {}, compiler ? "ghc7102" }:
let
inherit (nixpkgs) pkgs;
ghc = pkgs.haskell.packages.${compiler}.ghcWithPackages (ps: with ps; [
monad-par mtl
]);
in
pkgs.stdenv.mkDerivation {
name = "my-haskell-env-0";
buildInputs = [ ghc ];
shellHook = "eval $(egrep ^export ${ghc}/bin/ghc)";
}
```
Now run `nix-shell` --- or even `nix-shell --pure` --- to enter a shell
environment that has the appropriate compiler in `$PATH`. If you use `--pure`,
then add all other packages that your development environment needs into the
`buildInputs` attribute. If you'd like to switch to a different compiler
version, then pass an appropriate `compiler` argument to the expression, i.e.
`nix-shell --argstr compiler ghc784`.
If you need such an environment because you'd like to compile a Hackage package
outside of Nix --- i.e. because you're hacking on the latest version from Git
---, then the package set provides suitable nix-shell environments for you
already! Every Haskell package has an `env` attribute that provides a shell
environment suitable for compiling that particular package. If you'd like to
hack the `lens` library, for example, then you just have to check out the
source code and enter the appropriate environment:
```
$ cabal get lens-4.11 && cd lens-4.11
Downloading lens-4.11...
Unpacking to lens-4.11/
$ nix-shell "<nixpkgs>" -A haskellPackages.lens.env
[nix-shell:/tmp/lens-4.11]$
```
At point, you can run `cabal configure`, `cabal build`, and all the other
development commands. Note that you need `cabal-install` installed in your
`$PATH` already to use it here --- the `nix-shell` environment does not provide
it.
## How to create Nix builds for your own private Haskell packages
If your own Haskell packages have build instructions for Cabal, then you can
convert those automatically into build instructions for Nix using the
`cabal2nix` utility, which you can install into your profile by running
`nix-env -i cabal2nix`.
### How to build a stand-alone project
For example, let's assume that you're working on a private project called
`foo`. To generate a Nix build expression for it, change into the project's
top-level directory and run the command:
```shell
cabal2nix . > foo.nix
```
Then write the following snippet into a file called `default.nix`:
```nix
{ nixpkgs ? import <nixpkgs> {}, compiler ? "ghc7102" }:
nixpkgs.pkgs.haskell.packages.${compiler}.callPackage ./foo.nix { }
```
Finally, store the following code in a file called `shell.nix`:
```nix
{ nixpkgs ? import <nixpkgs> {}, compiler ? "ghc7102" }:
(import ./default.nix { inherit nixpkgs compiler; }).env
```
At this point, you can run `nix-build` to have Nix compile your project and
install it into a Nix store path. The local directory will contain a symlink
called `result` after `nix-build` returns that points into that location. Of
course, passing the flag `--argstr compiler ghc763` allows switching the build
to any version of GHC currently supported.
Furthermore, you can call `nix-shell` to enter an interactive development
environment in which you can use `cabal configure` and `cabal build` to develop
your code. That environment will automatically contain a proper GHC derivation
with all the required libraries registered as well as all the system-level
libraries your package might need.
If your package does not depend on any system-level libraries, then it's
sufficient to run
```shell
nix-shell --command "cabal configure"
```
once to set up your build. `cabal-install` determines the absolute paths to all
resources required for the build and writes them into a config file in the
`dist/` directory. Once that's done, you can run `cabal build` and any other
command for that project even outside of the `nix-shell` environment. This
feature is particularly nice for those of us who like to edit their code with
an IDE, like Emacs' `haskell-mode`, because it's not necessary to start Emacs
inside of nix-shell just to make it find out the necessary settings for
building the project; `cabal-install` has already done that for us.
If you want to do some quick-and-dirty hacking and don't want to bother setting
up a `default.nix` and `shell.nix` file manually, then you can use the
`--shell` flag offered by `cabal2nix` to have it generate a stand-alone
`nix-shell` environment for you. With that feature, running
```shell
cabal2nix --shell . > shell.nix
nix-shell --command "cabal configure"
```
is usually enough to set up a build environment for any given Haskell package.
You can even use that generated file to run `nix-build`, too:
```shell
nix-build shell.nix
```
### How to build projects that depend on each other
If you have multiple private Haskell packages that depend on each other, then
you'll have to register those packages in the Nixpkgs set to make them visible
for the dependency resolution performed by `callPackage`. First of all, change
into each of your projects top-level directories and generate a `default.nix`
file with `cabal2nix`:
```shell
cd ~/src/foo && cabal2nix . > default.nix
cd ~/src/bar && cabal2nix . > default.nix
```
Then edit your `~/.config/nixpkgs/config.nix` file to register those builds in the
default Haskell package set:
```nix
{
packageOverrides = super: let self = super.pkgs; in
{
haskellPackages = super.haskellPackages.override {
overrides = self: super: {
foo = self.callPackage ../src/foo {};
bar = self.callPackage ../src/bar {};
};
};
};
}
```
Once that's accomplished, `nix-env -f "<nixpkgs>" -qA haskellPackages` will
show your packages like any other package from Hackage, and you can build them
```shell
nix-build "<nixpkgs>" -A haskellPackages.foo
```
or enter an interactive shell environment suitable for building them:
```shell
nix-shell "<nixpkgs>" -A haskellPackages.bar.env
```
## Miscellaneous Topics
### How to build with profiling enabled
Every Haskell package set takes a function called `overrides` that you can use
to manipulate the package as much as you please. One useful application of this
feature is to replace the default `mkDerivation` function with one that enables
library profiling for all packages. To accomplish that add the following
snippet to your `~/.config/nixpkgs/config.nix` file:
```nix
{
packageOverrides = super: let self = super.pkgs; in
{
profiledHaskellPackages = self.haskellPackages.override {
overrides = self: super: {
mkDerivation = args: super.mkDerivation (args // {
enableLibraryProfiling = true;
});
};
};
};
}
```
Then, replace instances of `haskellPackages` in the `cabal2nix`-generated
`default.nix` or `shell.nix` files with `profiledHaskellPackages`.
### How to override package versions in a compiler-specific package set
Nixpkgs provides the latest version of
[`ghc-events`](http://hackage.haskell.org/package/ghc-events), which is 0.4.4.0
at the time of this writing. This is fine for users of GHC 7.10.x, but GHC
7.8.4 cannot compile that binary. Now, one way to solve that problem is to
register an older version of `ghc-events` in the 7.8.x-specific package set.
The first step is to generate Nix build instructions with `cabal2nix`:
```shell
cabal2nix cabal://ghc-events-0.4.3.0 > ~/.nixpkgs/ghc-events-0.4.3.0.nix
```
Then add the override in `~/.config/nixpkgs/config.nix`:
```nix
{
packageOverrides = super: let self = super.pkgs; in
{
haskell = super.haskell // {
packages = super.haskell.packages // {
ghc784 = super.haskell.packages.ghc784.override {
overrides = self: super: {
ghc-events = self.callPackage ./ghc-events-0.4.3.0.nix {};
};
};
};
};
};
}
```
This code is a little crazy, no doubt, but it's necessary because the intuitive
version
```nix
{ # ...
haskell.packages.ghc784 = super.haskell.packages.ghc784.override {
overrides = self: super: {
ghc-events = self.callPackage ./ghc-events-0.4.3.0.nix {};
};
};
}
```
doesn't do what we want it to: that code replaces the `haskell` package set in
Nixpkgs with one that contains only one entry,`packages`, which contains only
one entry `ghc784`. This override loses the `haskell.compiler` set, and it
loses the `haskell.packages.ghcXYZ` sets for all compilers but GHC 7.8.4. To
avoid that problem, we have to perform the convoluted little dance from above,
iterating over each step in hierarchy.
Once it's accomplished, however, we can install a variant of `ghc-events`
that's compiled with GHC 7.8.4:
```shell
nix-env -f "<nixpkgs>" -iA haskell.packages.ghc784.ghc-events
```
Unfortunately, it turns out that this build fails again while executing the
test suite! Apparently, the release archive on Hackage is missing some data
files that the test suite requires, so we cannot run it. We accomplish that by
re-generating the Nix expression with the `--no-check` flag:
```shell
cabal2nix --no-check cabal://ghc-events-0.4.3.0 > ~/.nixpkgs/ghc-events-0.4.3.0.nix
```
Now the builds succeeds.
Of course, in the concrete example of `ghc-events` this whole exercise is not
an ideal solution, because `ghc-events` can analyze the output emitted by any
version of GHC later than 6.12 regardless of the compiler version that was used
to build the `ghc-events` executable, so strictly speaking there's no reason to
prefer one built with GHC 7.8.x in the first place. However, for users who
cannot use GHC 7.10.x at all for some reason, the approach of downgrading to an
older version might be useful.
### How to recover from GHC's infamous non-deterministic library ID bug
GHC and distributed build farms don't get along well:
- https://ghc.haskell.org/trac/ghc/ticket/4012
When you see an error like this one
```
package foo-0.7.1.0 is broken due to missing package
text-1.2.0.4-98506efb1b9ada233bb5c2b2db516d91
```
then you have to download and re-install `foo` and all its dependents from
scratch:
```shell
nix-store -q --referrers /nix/store/*-haskell-text-1.2.0.4 \
| xargs -L 1 nix-store --repair-path
```
If you're using additional Hydra servers other than `hydra.nixos.org`, then it
might be necessary to purge the local caches that store data from those
machines to disable these binary channels for the duration of the previous
command, i.e. by running:
```shell
rm /nix/var/nix/binary-cache-v3.sqlite
rm /nix/var/nix/manifests/*
rm /nix/var/nix/channel-cache/*
```
### Builds on Darwin fail with `math.h` not found
Users of GHC on Darwin have occasionally reported that builds fail, because the
compiler complains about a missing include file:
```
fatal error: 'math.h' file not found
```
The issue has been discussed at length in [ticket
6390](https://github.com/NixOS/nixpkgs/issues/6390), and so far no good
solution has been proposed. As a work-around, users who run into this problem
can configure the environment variables
```shell
export NIX_CFLAGS_COMPILE="-idirafter /usr/include"
export NIX_CFLAGS_LINK="-L/usr/lib"
```
in their `~/.bashrc` file to avoid the compiler error.
### Builds using Stack complain about missing system libraries
```
-- While building package zlib-0.5.4.2 using:
runhaskell -package=Cabal-1.22.4.0 -clear-package-db [... lots of flags ...]
Process exited with code: ExitFailure 1
Logs have been written to: /home/foo/src/stack-ide/.stack-work/logs/zlib-0.5.4.2.log
Configuring zlib-0.5.4.2...
Setup.hs: Missing dependency on a foreign library:
* Missing (or bad) header file: zlib.h
This problem can usually be solved by installing the system package that
provides this library (you may need the "-dev" version). If the library is
already installed but in a non-standard location then you can use the flags
--extra-include-dirs= and --extra-lib-dirs= to specify where it is.
If the header file does exist, it may contain errors that are caught by the C
compiler at the preprocessing stage. In this case you can re-run configure
with the verbosity flag -v3 to see the error messages.
```
When you run the build inside of the nix-shell environment, the system
is configured to find `libz.so` without any special flags -- the compiler
and linker "just know" how to find it. Consequently, Cabal won't record
any search paths for `libz.so` in the package description, which means
that the package works fine inside of nix-shell, but once you leave the
shell the shared object can no longer be found. That issue is by no
means specific to Stack: you'll have that problem with any other
Haskell package that's built inside of nix-shell but run outside of that
environment.
You can remedy this issue in several ways. The easiest is to add a `nix` section
to the `stack.yaml` like the following:
```yaml
nix:
enable: true
packages: [ zlib ]
```
Stack's Nix support knows to add `${zlib.out}/lib` and `${zlib.dev}/include`
as an `--extra-lib-dirs` and `extra-include-dirs`, respectively.
Alternatively, you can achieve the same effect by hand. First of all, run
```
$ nix-build --no-out-link "<nixpkgs>" -A zlib
/nix/store/alsvwzkiw4b7ip38l4nlfjijdvg3fvzn-zlib-1.2.8
```
to find out the store path of the system's zlib library. Now, you can
1. add that path (plus a "/lib" suffix) to your `$LD_LIBRARY_PATH`
environment variable to make sure your system linker finds `libz.so`
automatically. It's no pretty solution, but it will work.
2. As a variant of (1), you can also install any number of system
libraries into your user's profile (or some other profile) and point
`$LD_LIBRARY_PATH` to that profile instead, so that you don't have to
list dozens of those store paths all over the place.
3. The solution I prefer is to call stack with an appropriate
--extra-lib-dirs flag like so:
```shell
stack --extra-lib-dirs=/nix/store/alsvwzkiw4b7ip38l4nlfjijdvg3fvzn-zlib-1.2.8/lib build
```
Typically, you'll need `--extra-include-dirs` as well. It's possible
to add those flag to the project's `stack.yaml` or your user's
global `~/.stack/global/stack.yaml` file so that you don't have to
specify them manually every time. But again, you're likely better off
using Stack's Nix support instead.
The same thing applies to `cabal configure`, of course, if you're
building with `cabal-install` instead of Stack.
### Creating statically linked binaries
There are two levels of static linking. The first option is to configure the
build with the Cabal flag `--disable-executable-dynamic`. In Nix expressions,
this can be achieved by setting the attribute:
```
enableSharedExecutables = false;
```
That gives you a binary with statically linked Haskell libraries and
dynamically linked system libraries.
To link both Haskell libraries and system libraries statically, the additional
flags `--ghc-option=-optl=-static --ghc-option=-optl=-pthread` need to be used.
In Nix, this is accomplished with:
```
configureFlags = [ "--ghc-option=-optl=-static" "--ghc-option=-optl=-pthread" ];
```
It's important to realize, however, that most system libraries in Nix are
built as shared libraries only, i.e. there is just no static library
available that Cabal could link!
### Building GHC with integer-simple
By default GHC implements the Integer type using the
[GNU Multiple Precision Arithmetic (GMP) library](https://gmplib.org/).
The implementation can be found in the
[integer-gmp](http://hackage.haskell.org/package/integer-gmp) package.
A potential problem with this is that GMP is licensed under the
[GNU Lesser General Public License (LGPL)](http://www.gnu.org/copyleft/lesser.html),
a kind of "copyleft" license. According to the terms of the LGPL, paragraph 5,
you may distribute a program that is designed to be compiled and dynamically
linked with the library under the terms of your choice (i.e., commercially) but
if your program incorporates portions of the library, if it is linked
statically, then your program is a "derivative"--a "work based on the
library"--and according to paragraph 2, section c, you "must cause the whole of
the work to be licensed" under the terms of the LGPL (including for free).
The LGPL licensing for GMP is a problem for the overall licensing of binary
programs compiled with GHC because most distributions (and builds) of GHC use
static libraries. (Dynamic libraries are currently distributed only for macOS.)
The LGPL licensing situation may be worse: even though
[The Glasgow Haskell Compiler License](https://www.haskell.org/ghc/license)
is essentially a "free software" license (BSD3), according to
paragraph 2 of the LGPL, GHC must be distributed under the terms of the LGPL!
To work around these problems GHC can be build with a slower but LGPL-free
alternative implemention for Integer called
[integer-simple](http://hackage.haskell.org/package/integer-simple).
To get a GHC compiler build with `integer-simple` instead of `integer-gmp` use
the attribute: `haskell.compiler.integer-simple."${ghcVersion}"`.
For example:
```
$ nix-build -E '(import <nixpkgs> {}).haskell.compiler.integer-simple.ghc802'
...
$ result/bin/ghc-pkg list | grep integer
integer-simple-0.1.1.1
```
The following command displays the complete list of GHC compilers build with `integer-simple`:
```
$ nix-env -f "<nixpkgs>" -qaP -A haskell.compiler.integer-simple
haskell.compiler.integer-simple.ghc7102 ghc-7.10.2
haskell.compiler.integer-simple.ghc7103 ghc-7.10.3
haskell.compiler.integer-simple.ghc722 ghc-7.2.2
haskell.compiler.integer-simple.ghc742 ghc-7.4.2
haskell.compiler.integer-simple.ghc783 ghc-7.8.3
haskell.compiler.integer-simple.ghc784 ghc-7.8.4
haskell.compiler.integer-simple.ghc801 ghc-8.0.1
haskell.compiler.integer-simple.ghc802 ghc-8.0.2
haskell.compiler.integer-simple.ghcHEAD ghc-8.1.20170106
```
To get a package set supporting `integer-simple` use the attribute:
`haskell.packages.integer-simple."${ghcVersion}"`. For example
use the following to get the `scientific` package build with `integer-simple`:
```shell
nix-build -A haskell.packages.integer-simple.ghc802.scientific
```
### Quality assurance
The `haskell.lib` library includes a number of functions for checking for
various imperfections in Haskell packages. It's useful to apply these functions
to your own Haskell packages and integrate that in a Continuous Integration
server like [hydra](https://nixos.org/hydra/) to assure your packages maintain a
minimum level of quality. This section discusses some of these functions.
#### failOnAllWarnings
Applying `haskell.lib.failOnAllWarnings` to a Haskell package enables the
`-Wall` and `-Werror` GHC options to turn all warnings into build failures.
#### buildStrictly
Applying `haskell.lib.buildStrictly` to a Haskell package calls
`failOnAllWarnings` on the given package to turn all warnings into build
failures. Additionally the source of your package is gotten from first invoking
`cabal sdist` to ensure all needed files are listed in the Cabal file.
#### checkUnusedPackages
Applying `haskell.lib.checkUnusedPackages` to a Haskell package invokes
the [packunused](http://hackage.haskell.org/package/packunused) tool on the
package. `packunused` complains when it finds packages listed as build-depends
in the Cabal file which are redundant. For example:
```
$ nix-build -E 'let pkgs = import <nixpkgs> {}; in pkgs.haskell.lib.checkUnusedPackages {} pkgs.haskellPackages.scientific'
these derivations will be built:
/nix/store/3lc51cxj2j57y3zfpq5i69qbzjpvyci1-scientific-0.3.5.1.drv
...
detected package components
~~~~~~~~~~~~~~~~~~~~~~~~~~~
- library
- testsuite(s): test-scientific
- benchmark(s): bench-scientific*
(component names suffixed with '*' are not configured to be built)
library
~~~~~~~
The following package dependencies seem redundant:
- ghc-prim-0.5.0.0
testsuite(test-scientific)
~~~~~~~~~~~~~~~~~~~~~~~~~~
no redundant packages dependencies found
builder for /nix/store/3lc51cxj2j57y3zfpq5i69qbzjpvyci1-scientific-0.3.5.1.drv failed with exit code 1
error: build of /nix/store/3lc51cxj2j57y3zfpq5i69qbzjpvyci1-scientific-0.3.5.1.drv failed
```
As you can see, `packunused` finds out that although the testsuite component has
no redundant dependencies the library component of `scientific-0.3.5.1` depends
on `ghc-prim` which is unused in the library.
## Other resources
- The Youtube video [Nix Loves Haskell](https://www.youtube.com/watch?v=BsBhi_r-OeE)
provides an introduction into Haskell NG aimed at beginners. The slides are
available at http://cryp.to/nixos-meetup-3-slides.pdf and also -- in a form
ready for cut & paste -- at
https://github.com/NixOS/cabal2nix/blob/master/doc/nixos-meetup-3-slides.md.
- Another Youtube video is [Escaping Cabal Hell with Nix](https://www.youtube.com/watch?v=mQd3s57n_2Y),
which discusses the subject of Haskell development with Nix but also provides
a basic introduction to Nix as well, i.e. it's suitable for viewers with
almost no prior Nix experience.
- Oliver Charles wrote a very nice [Tutorial how to develop Haskell packages with Nix](http://wiki.ocharles.org.uk/Nix).
- The *Journey into the Haskell NG infrastructure* series of postings
describe the new Haskell infrastructure in great detail:
- [Part 1](https://nixos.org/nix-dev/2015-January/015591.html)
explains the differences between the old and the new code and gives
instructions how to migrate to the new setup.
- [Part 2](https://nixos.org/nix-dev/2015-January/015608.html)
looks in-depth at how to tweak and configure your setup by means of
overrides.
- [Part 3](https://nixos.org/nix-dev/2015-April/016912.html)
describes the infrastructure that keeps the Haskell package set in Nixpkgs
up-to-date.

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,115 @@
# Idris packages
## Installing Idris
The easiest way to get a working idris version is to install the `idris` attribute:
```
$ # On NixOS
$ nix-env -i nixos.idris
$ # On non-NixOS
$ nix-env -i nixpkgs.idris
```
This however only provides the `prelude` and `base` libraries. To install additional libraries:
```
$ nix-env -iE 'pkgs: pkgs.idrisPackages.with-packages (with pkgs.idrisPackages; [ contrib pruviloj ])'
```
To see all available Idris packages:
```
$ # On NixOS
$ nix-env -qaPA nixos.idrisPackages
$ # On non-NixOS
$ nix-env -qaPA nixpkgs.idrisPackages
```
Similarly, entering a `nix-shell`:
```
$ nix-shell -p 'idrisPackages.with-packages (with idrisPackages; [ contrib pruviloj ])'
```
## Starting Idris with library support
To have access to these libraries in idris, call it with an argument `-p <library name>` for each library:
```
$ nix-shell -p 'idrisPackages.with-packages (with idrisPackages; [ contrib pruviloj ])'
[nix-shell:~]$ idris -p contrib -p pruviloj
```
A listing of all available packages the Idris binary has access to is available via `--listlibs`:
```
$ idris --listlibs
00prelude-idx.ibc
pruviloj
base
contrib
prelude
00pruviloj-idx.ibc
00base-idx.ibc
00contrib-idx.ibc
```
## Building an Idris project with Nix
As an example of how a Nix expression for an Idris package can be created, here is the one for `idrisPackages.yaml`:
```nix
{ build-idris-package
, fetchFromGitHub
, contrib
, lightyear
, lib
}:
build-idris-package {
name = "yaml";
version = "2018-01-25";
# This is the .ipkg file that should be built, defaults to the package name
# In this case it should build `Yaml.ipkg` instead of `yaml.ipkg`
# This is only necessary because the yaml packages ipkg file is
# different from its package name here.
ipkgName = "Yaml";
# Idris dependencies to provide for the build
idrisDeps = [ contrib lightyear ];
src = fetchFromGitHub {
owner = "Heather";
repo = "Idris.Yaml";
rev = "5afa51ffc839844862b8316faba3bafa15656db4";
sha256 = "1g4pi0swmg214kndj85hj50ccmckni7piprsxfdzdfhg87s0avw7";
};
meta = {
description = "Idris YAML lib";
homepage = https://github.com/Heather/Idris.Yaml;
license = lib.licenses.mit;
maintainers = [ lib.maintainers.brainrape ];
};
}
```
Assuming this file is saved as `yaml.nix`, it's buildable using
```
$ nix-build -E '(import <nixpkgs> {}).idrisPackages.callPackage ./yaml.nix {}'
```
Or it's possible to use
```nix
with import <nixpkgs> {};
{
yaml = idrisPackages.callPackage ./yaml.nix {};
}
```
in another file (say `default.nix`) to be able to build it with
```
$ nix-build -A yaml
```

View File

@ -1,35 +1,35 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xi="http://www.w3.org/2001/XInclude"
xml:id="chap-language-support">
<title>Support for specific programming languages and frameworks</title>
<para>The <link linkend="chap-stdenv">standard build
environment</link> makes it easy to build typical Autotools-based
packages with very little code. Any other kind of package can be
accomodated by overriding the appropriate phases of
<literal>stdenv</literal>. However, there are specialised functions
in Nixpkgs to easily build packages for other programming languages,
such as Perl or Haskell. These are described in this chapter.</para>
<xi:include href="beam.xml" />
<xi:include href="bower.xml" />
<xi:include href="coq.xml" />
<xi:include href="go.xml" />
<xi:include href="haskell.xml" />
<xi:include href="idris.xml" /> <!-- generated from ../../pkgs/development/idris-modules/README.md -->
<xi:include href="java.xml" />
<xi:include href="lua.xml" />
<xi:include href="node.xml" /> <!-- generated from ../../pkgs/development/node-packages/README.md -->
<xi:include href="perl.xml" />
<xi:include href="python.xml" />
<xi:include href="qt.xml" />
<xi:include href="r.xml" /> <!-- generated from ../../pkgs/development/r-modules/README.md -->
<xi:include href="ruby.xml" />
<xi:include href="rust.xml" />
<xi:include href="texlive.xml" />
<xi:include href="vim.xml" />
<title>Support for specific programming languages and frameworks</title>
<para>
The <link linkend="chap-stdenv">standard build environment</link> makes it
easy to build typical Autotools-based packages with very little code. Any
other kind of package can be accomodated by overriding the appropriate phases
of <literal>stdenv</literal>. However, there are specialised functions in
Nixpkgs to easily build packages for other programming languages, such as
Perl or Haskell. These are described in this chapter.
</para>
<xi:include href="android.section.xml" />
<xi:include href="beam.xml" />
<xi:include href="bower.xml" />
<xi:include href="coq.xml" />
<xi:include href="go.xml" />
<xi:include href="haskell.section.xml" />
<xi:include href="idris.section.xml" />
<xi:include href="ios.section.xml" />
<xi:include href="java.xml" />
<xi:include href="lua.xml" />
<xi:include href="node.section.xml" />
<xi:include href="ocaml.xml" />
<xi:include href="perl.xml" />
<xi:include href="python.section.xml" />
<xi:include href="qt.xml" />
<xi:include href="r.section.xml" />
<xi:include href="ruby.xml" />
<xi:include href="rust.section.xml" />
<xi:include href="texlive.xml" />
<xi:include href="titanium.section.xml" />
<xi:include href="vim.section.xml" />
<xi:include href="emscripten.section.xml" />
</chapter>

View File

@ -0,0 +1,219 @@
---
title: iOS
author: Sander van der Burg
date: 2018-11-18
---
# iOS
This component is basically a wrapper/workaround that makes it possible to
expose an Xcode installation as a Nix package by means of symlinking to the
relevant executables on the host system.
Since Xcode can't be packaged with Nix, nor we can publish it as a Nix package
(because of its license) this is basically the only integration strategy
making it possible to do iOS application builds that integrate with other
components of the Nix ecosystem
The primary objective of this project is to use the Nix expression language to
specify how iOS apps can be built from source code, and to automatically spawn
iOS simulator instances for testing.
This component also makes it possible to use [Hydra](http://nixos.org/hydra),
the Nix-based continuous integration server to regularly build iOS apps and to
do wireless ad-hoc installations of enterprise IPAs on iOS devices through
Hydra.
The Xcode build environment implements a number of features.
Deploying a proxy component wrapper exposing Xcode
--------------------------------------------------
The first use case is deploying a Nix package that provides symlinks to the Xcode
installation on the host system. This package can be used as a build input to
any build function implemented in the Nix expression language that requires
Xcode.
```nix
let
pkgs = import <nixpkgs> {};
xcodeenv = import ./xcodeenv {
inherit (pkgs) stdenv;
};
in
xcodeenv.composeXcodeWrapper {
version = "9.2";
xcodeBaseDir = "/Applications/Xcode.app";
}
```
By deploying the above expression with `nix-build` and inspecting its content
you will notice that several Xcode-related executables are exposed as a Nix
package:
```bash
$ ls result/bin
lrwxr-xr-x 1 sander staff 94 1 jan 1970 Simulator -> /Applications/Xcode.app/Contents/Developer/Applications/Simulator.app/Contents/MacOS/Simulator
lrwxr-xr-x 1 sander staff 17 1 jan 1970 codesign -> /usr/bin/codesign
lrwxr-xr-x 1 sander staff 17 1 jan 1970 security -> /usr/bin/security
lrwxr-xr-x 1 sander staff 21 1 jan 1970 xcode-select -> /usr/bin/xcode-select
lrwxr-xr-x 1 sander staff 61 1 jan 1970 xcodebuild -> /Applications/Xcode.app/Contents/Developer/usr/bin/xcodebuild
lrwxr-xr-x 1 sander staff 14 1 jan 1970 xcrun -> /usr/bin/xcrun
```
Building an iOS application
---------------------------
We can build an iOS app executable for the simulator, or an IPA/xcarchive file
for release purposes, e.g. ad-hoc, enterprise or store installations, by
executing the `xcodeenv.buildApp {}` function:
```nix
let
pkgs = import <nixpkgs> {};
xcodeenv = import ./xcodeenv {
inherit (pkgs) stdenv;
};
in
xcodeenv.buildApp {
name = "MyApp";
src = ./myappsources;
sdkVersion = "11.2";
target = null; # Corresponds to the name of the app by default
configuration = null; # Release for release builds, Debug for debug builds
scheme = null; # -scheme will correspond to the app name by default
sdk = null; # null will set it to 'iphonesimulator` for simulator builds or `iphoneos` to real builds
xcodeFlags = "";
release = true;
certificateFile = ./mycertificate.p12;
certificatePassword = "secret";
provisioningProfile = ./myprovisioning.profile;
signMethod = "ad-hoc"; # 'enterprise' or 'store'
generateIPA = true;
generateXCArchive = false;
enableWirelessDistribution = true;
installURL = "/installipa.php";
bundleId = "mycompany.myapp";
appVersion = "1.0";
# Supports all xcodewrapper parameters as well
xcodeBaseDir = "/Applications/Xcode.app";
}
```
The above function takes a variety of parameters:
* The `name` and `src` parameters are mandatory and specify the name of the app
and the location where the source code resides
* `sdkVersion` specifies which version of the iOS SDK to use.
It also possile to adjust the `xcodebuild` parameters. This is only needed in
rare circumstances. In most cases the default values should suffice:
* Specifies which `xcodebuild` target to build. By default it takes the target
that has the same name as the app.
* The `configuration` parameter can be overridden if desired. By default, it
will do a debug build for the simulator and a release build for real devices.
* The `scheme` parameter specifies which `-scheme` parameter to propagate to
`xcodebuild`. By default, it corresponds to the app name.
* The `sdk` parameter specifies which SDK to use. By default, it picks
`iphonesimulator` for simulator builds and `iphoneos` for release builds.
* The `xcodeFlags` parameter specifies arbitrary command line parameters that
should be propagated to `xcodebuild`.
By default, builds are carried out for the iOS simulator. To do release builds
(builds for real iOS devices), you must set the `release` parameter to `true`.
In addition, you need to set the following parameters:
* `certificateFile` refers to a P12 certificate file.
* `certificatePassword` specifies the password of the P12 certificate.
* `provisioningProfile` refers to the provision profile needed to sign the app
* `signMethod` should refer to `ad-hoc` for signing the app with an ad-hoc
certificate, `enterprise` for enterprise certificates and `app-store` for App
store certificates.
* `generateIPA` specifies that we want to produce an IPA file (this is probably
what you want)
* `generateXCArchive` specifies thet we want to produce an xcarchive file.
When building IPA files on Hydra and when it is desired to allow iOS devices to
install IPAs by browsing to the Hydra build products page, you can enable the
`enableWirelessDistribution` parameter.
When enabled, you need to configure the following options:
* The `installURL` parameter refers to the URL of a PHP script that composes the
`itms-services://` URL allowing iOS devices to install the IPA file.
* `bundleId` refers to the bundle ID value of the app
* `appVersion` refers to the app's version number
To use wireless adhoc distributions, you must also install the corresponding
PHP script on a web server (see section: 'Installing the PHP script for wireless
ad hoc installations from Hydra' for more information).
In addition to the build parameters, you can also specify any parameters that
the `xcodeenv.composeXcodeWrapper {}` function takes. For example, the
`xcodeBaseDir` parameter can be overridden to refer to a different Xcode
version.
Spawning simulator instances
----------------------------
In addition to building iOS apps, we can also automatically spawn simulator
instances:
```nix
let
pkgs = import <nixpkgs> {};
xcodeenv = import ./xcodeenv {
inherit (pkgs) stdenv;
};
in
xcode.simulateApp {
name = "simulate";
# Supports all xcodewrapper parameters as well
xcodeBaseDir = "/Applications/Xcode.app";
}
```
The above expression produces a script that starts the simulator from the
provided Xcode installation. The script can be started as follows:
```bash
./result/bin/run-test-simulator
```
By default, the script will show an overview of UDID for all available simulator
instances and asks you to pick one. You can also provide a UDID as a
command-line parameter to launch an instance automatically:
```bash
./result/bin/run-test-simulator 5C93129D-CF39-4B1A-955F-15180C3BD4B8
```
You can also extend the simulator script to automatically deploy and launch an
app in the requested simulator instance:
```nix
let
pkgs = import <nixpkgs> {};
xcodeenv = import ./xcodeenv {
inherit (pkgs) stdenv;
};
in
xcode.simulateApp {
name = "simulate";
bundleId = "mycompany.myapp";
app = xcode.buildApp {
# ...
};
# Supports all xcodewrapper parameters as well
xcodeBaseDir = "/Applications/Xcode.app";
}
```
By providing the result of an `xcode.buildApp {}` function and configuring the
app bundle id, the app gets deployed automatically and started.

View File

@ -1,11 +1,10 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="sec-language-java">
<title>Java</title>
<title>Java</title>
<para>Ant-based Java packages are typically built from source as follows:
<para>
Ant-based Java packages are typically built from source as follows:
<programlisting>
stdenv.mkDerivation {
name = "...";
@ -16,33 +15,36 @@ stdenv.mkDerivation {
buildPhase = "ant";
}
</programlisting>
Note that <varname>jdk</varname> is an alias for the OpenJDK (self-built
where available, or pre-built via Zulu). Platforms with OpenJDK not (yet) in
Nixpkgs (<literal>Aarch32</literal>, <literal>Aarch64</literal>) point to the
(unfree) <literal>oraclejdk</literal>.
</para>
Note that <varname>jdk</varname> is an alias for the OpenJDK.</para>
<para>JAR files that are intended to be used by other packages should
be installed in <filename>$out/share/java</filename>. The OpenJDK has
a stdenv setup hook that adds any JARs in the
<filename>share/java</filename> directories of the build inputs to the
<envar>CLASSPATH</envar> environment variable. For instance, if the
package <literal>libfoo</literal> installs a JAR named
<filename>foo.jar</filename> in its <filename>share/java</filename>
directory, and another package declares the attribute
<para>
JAR files that are intended to be used by other packages should be installed
in <filename>$out/share/java</filename>. JDKs have a stdenv setup hook that
add any JARs in the <filename>share/java</filename> directories of the build
inputs to the <envar>CLASSPATH</envar> environment variable. For instance, if
the package <literal>libfoo</literal> installs a JAR named
<filename>foo.jar</filename> in its <filename>share/java</filename>
directory, and another package declares the attribute
<programlisting>
buildInputs = [ jdk libfoo ];
</programlisting>
then <envar>CLASSPATH</envar> will be set to
<filename>/nix/store/...-libfoo/share/java/foo.jar</filename>.
</para>
then <envar>CLASSPATH</envar> will be set to
<filename>/nix/store/...-libfoo/share/java/foo.jar</filename>.</para>
<para>Private JARs
should be installed in a location like
<filename>$out/share/<replaceable>package-name</replaceable></filename>.</para>
<para>If your Java package provides a program, you need to generate a
wrapper script to run it using the OpenJRE. You can use
<literal>makeWrapper</literal> for this:
<para>
Private JARs should be installed in a location like
<filename>$out/share/<replaceable>package-name</replaceable></filename>.
</para>
<para>
If your Java package provides a program, you need to generate a wrapper
script to run it using the OpenJRE. You can use
<literal>makeWrapper</literal> for this:
<programlisting>
buildInputs = [ makeWrapper ];
@ -53,32 +55,30 @@ installPhase =
--add-flags "-cp $out/share/java/foo.jar org.foo.Main"
'';
</programlisting>
Note the use of <literal>jre</literal>, which is the part of the OpenJDK
package that contains the Java Runtime Environment. By using
<literal>${jre}/bin/java</literal> instead of
<literal>${jdk}/bin/java</literal>, you prevent your package from depending
on the JDK at runtime.
</para>
Note the use of <literal>jre</literal>, which is the part of the
OpenJDK package that contains the Java Runtime Environment. By using
<literal>${jre}/bin/java</literal> instead of
<literal>${jdk}/bin/java</literal>, you prevent your package from
depending on the JDK at runtime.</para>
<para>It is possible to use a different Java compiler than
<command>javac</command> from the OpenJDK. For instance, to use the
Eclipse Java Compiler:
<para>
Note all JDKs passthru <literal>home</literal>, so if your application
requires environment variables like <envar>JAVA_HOME</envar> being set, that
can be done in a generic fashion with the <literal>--set</literal> argument
of <literal>makeWrapper</literal>:
<programlisting>
buildInputs = [ jre ant ecj ];
--set JAVA_HOME ${jdk.home}
</programlisting>
</para>
(Note that here you dont need the full JDK as an input, but just the
JRE.) The ECJ has a stdenv setup hook that sets some environment
variables to cause Ant to use ECJ, but this doesnt work with all Ant
files. Similarly, you can use the GNU Java Compiler:
<para>
It is possible to use a different Java compiler than <command>javac</command>
from the OpenJDK. For instance, to use the GNU Java Compiler:
<programlisting>
buildInputs = [ gcj ant ];
</programlisting>
Here, Ant will automatically use <command>gij</command> (the GNU Java
Runtime) instead of the OpenJRE.</para>
Here, Ant will automatically use <command>gij</command> (the GNU Java
Runtime) instead of the OpenJRE.
</para>
</section>

View File

@ -1,24 +1,22 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="sec-language-lua">
<title>Lua</title>
<title>Lua</title>
<para>
Lua packages are built by the <varname>buildLuaPackage</varname> function. This function is
implemented
in <link xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/lua-modules/generic/default.nix">
<para>
Lua packages are built by the <varname>buildLuaPackage</varname> function.
This function is implemented in
<link xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/lua-modules/generic/default.nix">
<filename>pkgs/development/lua-modules/generic/default.nix</filename></link>
and works similarly to <varname>buildPerlPackage</varname>. (See
<xref linkend="sec-language-perl"/> for details.)
</para>
</para>
<para>
Lua packages are defined
in <link xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/top-level/lua-packages.nix"><filename>pkgs/top-level/lua-packages.nix</filename></link>.
<para>
Lua packages are defined in
<link xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/top-level/lua-packages.nix"><filename>pkgs/top-level/lua-packages.nix</filename></link>.
Most of them are simple. For example:
<programlisting>
<programlisting>
fileSystem = buildLuaPackage {
name = "filesystem-1.6.2";
src = fetchurl {
@ -32,20 +30,19 @@ fileSystem = buildLuaPackage {
};
};
</programlisting>
</para>
</para>
<para>
<para>
Though, more complicated package should be placed in a seperate file in
<link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/lua-modules"><filename>pkgs/development/lua-modules</filename></link>.
</para>
<para>
Lua packages accept additional parameter <varname>disabled</varname>, which defines
the condition of disabling package from luaPackages. For example, if package has
<varname>disabled</varname> assigned to <literal>lua.luaversion != "5.1"</literal>,
it will not be included in any luaPackages except lua51Packages, making it
only be built for lua 5.1.
</para>
</para>
<para>
Lua packages accept additional parameter <varname>disabled</varname>, which
defines the condition of disabling package from luaPackages. For example, if
package has <varname>disabled</varname> assigned to <literal>lua.luaversion
!= "5.1"</literal>, it will not be included in any luaPackages except
lua51Packages, making it only be built for lua 5.1.
</para>
</section>

View File

@ -0,0 +1,51 @@
Node.js packages
================
The `pkgs/development/node-packages` folder contains a generated collection of
[NPM packages](https://npmjs.com/) that can be installed with the Nix package
manager.
As a rule of thumb, the package set should only provide *end user* software
packages, such as command-line utilities. Libraries should only be added to the
package set if there is a non-NPM package that requires it.
When it is desired to use NPM libraries in a development project, use the
`node2nix` generator directly on the `package.json` configuration file of the
project.
The package set also provides support for multiple Node.js versions. The policy
is that a new package should be added to the collection for the latest stable LTS
release (which is currently 10.x), unless there is an explicit reason to support
a different release.
If your package uses native addons, you need to examine what kind of native
build system it uses. Here are some examples:
* `node-gyp`
* `node-gyp-builder`
* `node-pre-gyp`
After you have identified the correct system, you need to override your package
expression while adding in build system as a build input. For example, `dat`
requires `node-gyp-build`, so we override its expression in `default-v10.nix`:
```nix
dat = nodePackages.dat.override (oldAttrs: {
buildInputs = oldAttrs.buildInputs ++ [ nodePackages.node-gyp-build ];
});
```
To add a package from NPM to nixpkgs:
1. Modify `pkgs/development/node-packages/node-packages-v10.json` to add, update
or remove package entries. (Or `pkgs/development/node-packages/node-packages-v8.json`
for packages depending on Node.js 8.x)
2. Run the script: `(cd pkgs/development/node-packages && ./generate.sh)`.
3. Build your new package to test your changes:
`cd /path/to/nixpkgs && nix-build -A nodePackages.<new-or-updated-package>`.
To build against a specific Node.js version (e.g. 10.x):
`nix-build -A nodePackages_10_x.<new-or-updated-package>`
4. Add and commit all modified and generated files.
For more information about the generation process, consult the
[README.md](https://github.com/svanderburg/node2nix) file of the `node2nix`
tool.

View File

@ -0,0 +1,99 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="sec-language-ocaml">
<title>OCaml</title>
<para>
OCaml libraries should be installed in
<literal>$(out)/lib/ocaml/${ocaml.version}/site-lib/</literal>. Such
directories are automatically added to the <literal>$OCAMLPATH</literal>
environment variable when building another package that depends on them
or when opening a <literal>nix-shell</literal>.
</para>
<para>
Given that most of the OCaml ecosystem is now built with dune,
nixpkgs includes a convenience build support function called
<literal>buildDunePackage</literal> that will build an OCaml package
using dune, OCaml and findlib and any additional dependencies provided
as <literal>buildInputs</literal> or <literal>propagatedBuildInputs</literal>.
</para>
<para>
Here is a simple package example. It defines an (optional) attribute
<literal>minimumOCamlVersion</literal> that will be used to throw a
descriptive evaluation error if building with an older OCaml is attempted.
It uses the <literal>fetchFromGitHub</literal> fetcher to get its source.
It sets the <literal>doCheck</literal> (optional) attribute to
<literal>true</literal> which means that tests will be run with
<literal>dune runtest -p angstrom</literal> after the build
(<literal>dune build -p angstrom</literal>) is complete.
It uses <literal>alcotest</literal> as a build input (because it is needed
to run the tests) and <literal>bigstringaf</literal> and
<literal>result</literal> as propagated build inputs (thus they will also
be available to libraries depending on this library).
The library will be installed using the <literal>angstrom.install</literal>
file that dune generates.
</para>
<programlisting>
{ stdenv, fetchFromGitHub, buildDunePackage, alcotest, result, bigstringaf }:
buildDunePackage rec {
pname = "angstrom";
version = "0.10.0";
minimumOCamlVersion = "4.03";
src = fetchFromGitHub {
owner = "inhabitedtype";
repo = pname;
rev = version;
sha256 = "0lh6024yf9ds0nh9i93r9m6p5psi8nvrqxl5x7jwl13zb0r9xfpw";
};
buildInputs = [ alcotest ];
propagatedBuildInputs = [ bigstringaf result ];
doCheck = true;
meta = {
homepage = https://github.com/inhabitedtype/angstrom;
description = "OCaml parser combinators built for speed and memory efficiency";
license = stdenv.lib.licenses.bsd3;
maintainers = with stdenv.lib.maintainers; [ sternenseemann ];
};
}
</programlisting>
<para>
Here is a second example, this time using a source archive generated with
<literal>dune-release</literal>. It is a good idea to use this archive when
it is available as it will usually contain substituted variables such as a
<literal>%%VERSION%%</literal> field. This library does not depend
on any other OCaml library and no tests are run after building it.
</para>
<programlisting>
{ stdenv, fetchurl, buildDunePackage }:
buildDunePackage rec {
pname = "wtf8";
version = "1.0.1";
minimumOCamlVersion = "4.01";
src = fetchurl {
url = "https://github.com/flowtype/ocaml-${pname}/releases/download/v${version}/${pname}-${version}.tbz";
sha256 = "1msg3vycd3k8qqj61sc23qks541cxpb97vrnrvrhjnqxsqnh6ygq";
};
meta = with stdenv.lib; {
homepage = https://github.com/flowtype/ocaml-wtf8;
description = "WTF-8 is a superset of UTF-8 that allows unpaired surrogates.";
license = licenses.mit;
maintainers = [ maintainers.eqyiel ];
};
}
</programlisting>
</section>

View File

@ -1,24 +1,27 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="sec-language-perl">
<title>Perl</title>
<title>Perl</title>
<para>
Nixpkgs provides a function <varname>buildPerlPackage</varname>, a generic
package builder function for any Perl package that has a standard
<varname>Makefile.PL</varname>. Its implemented in
<link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/perl-modules/generic"><filename>pkgs/development/perl-modules/generic</filename></link>.
</para>
<para>Nixpkgs provides a function <varname>buildPerlPackage</varname>,
a generic package builder function for any Perl package that has a
standard <varname>Makefile.PL</varname>. Its implemented in <link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/perl-modules/generic"><filename>pkgs/development/perl-modules/generic</filename></link>.</para>
<para>Perl packages from CPAN are defined in <link
<para>
Perl packages from CPAN are defined in
<link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/top-level/perl-packages.nix"><filename>pkgs/top-level/perl-packages.nix</filename></link>,
rather than <filename>pkgs/all-packages.nix</filename>. Most Perl
packages are so straight-forward to build that they are defined here
directly, rather than having a separate function for each package
called from <filename>perl-packages.nix</filename>. However, more
complicated packages should be put in a separate file, typically in
<filename>pkgs/development/perl-modules</filename>. Here is an
example of the former:
rather than <filename>pkgs/all-packages.nix</filename>. Most Perl packages
are so straight-forward to build that they are defined here directly, rather
than having a separate function for each package called from
<filename>perl-packages.nix</filename>. However, more complicated packages
should be put in a separate file, typically in
<filename>pkgs/development/perl-modules</filename>. Here is an example of the
former:
<programlisting>
ClassC3 = buildPerlPackage rec {
name = "Class-C3-0.21";
@ -28,74 +31,72 @@ ClassC3 = buildPerlPackage rec {
};
};
</programlisting>
Note the use of <literal>mirror://cpan/</literal>, and the
<literal>${name}</literal> in the URL definition to ensure that the
name attribute is consistent with the source that were actually
downloading. Perl packages are made available in
<filename>all-packages.nix</filename> through the variable
<varname>perlPackages</varname>. For instance, if you have a package
that needs <varname>ClassC3</varname>, you would typically write
Note the use of <literal>mirror://cpan/</literal>, and the
<literal>${name}</literal> in the URL definition to ensure that the name
attribute is consistent with the source that were actually downloading.
Perl packages are made available in <filename>all-packages.nix</filename>
through the variable <varname>perlPackages</varname>. For instance, if you
have a package that needs <varname>ClassC3</varname>, you would typically
write
<programlisting>
foo = import ../path/to/foo.nix {
inherit stdenv fetchurl ...;
inherit (perlPackages) ClassC3;
};
</programlisting>
in <filename>all-packages.nix</filename>. You can test building a
Perl package as follows:
in <filename>all-packages.nix</filename>. You can test building a Perl
package as follows:
<screen>
$ nix-build -A perlPackages.ClassC3
</screen>
<varname>buildPerlPackage</varname> adds <literal>perl-</literal> to
the start of the name attribute, so the package above is actually
called <literal>perl-Class-C3-0.21</literal>. So to install it, you
can say:
<varname>buildPerlPackage</varname> adds <literal>perl-</literal> to the
start of the name attribute, so the package above is actually called
<literal>perl-Class-C3-0.21</literal>. So to install it, you can say:
<screen>
$ nix-env -i perl-Class-C3
</screen>
(Of course you can also install using the attribute name: <literal>nix-env -i
-A perlPackages.ClassC3</literal>.)
</para>
(Of course you can also install using the attribute name:
<literal>nix-env -i -A perlPackages.ClassC3</literal>.)</para>
<para>So what does <varname>buildPerlPackage</varname> do? It does
the following:
<orderedlist>
<listitem><para>In the configure phase, it calls <literal>perl
Makefile.PL</literal> to generate a Makefile. You can set the
variable <varname>makeMakerFlags</varname> to pass flags to
<filename>Makefile.PL</filename></para></listitem>
<listitem><para>It adds the contents of the <envar>PERL5LIB</envar>
environment variable to <literal>#! .../bin/perl</literal> line of
Perl scripts as <literal>-I<replaceable>dir</replaceable></literal>
flags. This ensures that a script can find its
dependencies.</para></listitem>
<listitem><para>In the fixup phase, it writes the propagated build
inputs (<varname>propagatedBuildInputs</varname>) to the file
<filename>$out/nix-support/propagated-user-env-packages</filename>.
<command>nix-env</command> recursively installs all packages listed
in this file when you install a package that has it. This ensures
that a Perl package can find its dependencies.</para></listitem>
</orderedlist>
</para>
<para><varname>buildPerlPackage</varname> is built on top of
<varname>stdenv</varname>, so everything can be customised in the
usual way. For instance, the <literal>BerkeleyDB</literal> module has
a <varname>preConfigure</varname> hook to generate a configuration
file used by <filename>Makefile.PL</filename>:
<para>
So what does <varname>buildPerlPackage</varname> do? It does the following:
<orderedlist>
<listitem>
<para>
In the configure phase, it calls <literal>perl Makefile.PL</literal> to
generate a Makefile. You can set the variable
<varname>makeMakerFlags</varname> to pass flags to
<filename>Makefile.PL</filename>
</para>
</listitem>
<listitem>
<para>
It adds the contents of the <envar>PERL5LIB</envar> environment variable
to <literal>#! .../bin/perl</literal> line of Perl scripts as
<literal>-I<replaceable>dir</replaceable></literal> flags. This ensures
that a script can find its dependencies.
</para>
</listitem>
<listitem>
<para>
In the fixup phase, it writes the propagated build inputs
(<varname>propagatedBuildInputs</varname>) to the file
<filename>$out/nix-support/propagated-user-env-packages</filename>.
<command>nix-env</command> recursively installs all packages listed in
this file when you install a package that has it. This ensures that a Perl
package can find its dependencies.
</para>
</listitem>
</orderedlist>
</para>
<para>
<varname>buildPerlPackage</varname> is built on top of
<varname>stdenv</varname>, so everything can be customised in the usual way.
For instance, the <literal>BerkeleyDB</literal> module has a
<varname>preConfigure</varname> hook to generate a configuration file used by
<filename>Makefile.PL</filename>:
<programlisting>
{ buildPerlPackage, fetchurl, db }:
@ -108,23 +109,20 @@ buildPerlPackage rec {
};
preConfigure = ''
echo "LIB = ${db}/lib" > config.in
echo "INCLUDE = ${db}/include" >> config.in
echo "LIB = ${db.out}/lib" > config.in
echo "INCLUDE = ${db.dev}/include" >> config.in
'';
}
</programlisting>
</para>
</para>
<para>Dependencies on other Perl packages can be specified in the
<varname>buildInputs</varname> and
<varname>propagatedBuildInputs</varname> attributes. If something is
exclusively a build-time dependency, use
<varname>buildInputs</varname>; if its (also) a runtime dependency,
use <varname>propagatedBuildInputs</varname>. For instance, this
builds a Perl module that has runtime dependencies on a bunch of other
modules:
<para>
Dependencies on other Perl packages can be specified in the
<varname>buildInputs</varname> and <varname>propagatedBuildInputs</varname>
attributes. If something is exclusively a build-time dependency, use
<varname>buildInputs</varname>; if its (also) a runtime dependency, use
<varname>propagatedBuildInputs</varname>. For instance, this builds a Perl
module that has runtime dependencies on a bunch of other modules:
<programlisting>
ClassC3Componentised = buildPerlPackage rec {
name = "Class-C3-Componentised-1.0004";
@ -137,24 +135,26 @@ ClassC3Componentised = buildPerlPackage rec {
];
};
</programlisting>
</para>
</para>
<section xml:id="ssec-generation-from-CPAN">
<title>Generation from CPAN</title>
<section xml:id="ssec-generation-from-CPAN"><title>Generation from CPAN</title>
<para>Nix expressions for Perl packages can be generated (almost)
automatically from CPAN. This is done by the program
<command>nix-generate-from-cpan</command>, which can be installed
as follows:</para>
<para>
Nix expressions for Perl packages can be generated (almost) automatically
from CPAN. This is done by the program
<command>nix-generate-from-cpan</command>, which can be installed as
follows:
</para>
<screen>
$ nix-env -i nix-generate-from-cpan
</screen>
<para>This program takes a Perl module name, looks it up on CPAN,
fetches and unpacks the corresponding package, and prints a Nix
expression on standard output. For example:
<para>
This program takes a Perl module name, looks it up on CPAN, fetches and
unpacks the corresponding package, and prints a Nix expression on standard
output. For example:
<screen>
$ nix-generate-from-cpan XML::Simple
XMLSimple = buildPerlPackage rec {
@ -170,12 +170,23 @@ $ nix-generate-from-cpan XML::Simple
};
};
</screen>
The output can be pasted into
<filename>pkgs/top-level/perl-packages.nix</filename> or wherever else you
need it.
</para>
</section>
The output can be pasted into
<filename>pkgs/top-level/perl-packages.nix</filename> or wherever else
you need it.</para>
<section xml:id="ssec-perl-cross-compilation">
<title>Cross-compiling modules</title>
<para>
Nixpkgs has experimental support for cross-compiling Perl modules. In many
cases, it will just work out of the box, even for modules with native
extensions. Sometimes, however, the Makefile.PL for a module may
(indirectly) import a native module. In that case, you will need to make a
stub for that module that will satisfy the Makefile.PL and install it into
<filename>lib/perl5/site_perl/cross_perl/${perl.version}</filename>. See the
<varname>postInstall</varname> for <varname>DBI</varname> for an example.
</para>
</section>
</section>
</section>

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,58 +1,74 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="sec-language-qt">
<title>Qt</title>
<title>Qt</title>
<para>
Qt is a comprehensive desktop and mobile application development toolkit for
C++. Legacy support is available for Qt 3 and Qt 4, but all current
development uses Qt 5. The Qt 5 packages in Nixpkgs are updated frequently to
take advantage of new features, but older versions are typically retained
until their support window ends. The most important consideration in
packaging Qt-based software is ensuring that each package and all its
dependencies use the same version of Qt 5; this consideration motivates most
of the tools described below.
</para>
<para>
Qt is a comprehensive desktop and mobile application development toolkit for C++.
Legacy support is available for Qt 3 and Qt 4, but all current development uses Qt 5.
The Qt 5 packages in Nixpkgs are updated frequently to take advantage of new features,
but older versions are typically retained until their support window ends.
The most important consideration in packaging Qt-based software is ensuring that each package and all its dependencies use the same version of Qt 5;
this consideration motivates most of the tools described below.
</para>
<section xml:id="ssec-qt-libraries">
<title>Packaging Libraries for Nixpkgs</title>
<section xml:id="ssec-qt-libraries"><title>Packaging Libraries for Nixpkgs</title>
<para>
Whenever possible, libraries that use Qt 5 should be built with each
available version. Packages providing libraries should be added to the
top-level function <varname>mkLibsForQt5</varname>, which is used to build a
set of libraries for every Qt 5 version. A special
<varname>callPackage</varname> function is used in this scope to ensure that
the entire dependency tree uses the same Qt 5 version. Import dependencies
unqualified, i.e., <literal>qtbase</literal> not
<literal>qt5.qtbase</literal>. <emphasis>Do not</emphasis> import a package
set such as <literal>qt5</literal> or <literal>libsForQt5</literal>.
</para>
<para>
Whenever possible, libraries that use Qt 5 should be built with each available version.
Packages providing libraries should be added to the top-level function <varname>mkLibsForQt5</varname>,
which is used to build a set of libraries for every Qt 5 version.
A special <varname>callPackage</varname> function is used in this scope to ensure that the entire dependency tree uses the same Qt 5 version.
Import dependencies unqualified, i.e., <literal>qtbase</literal> not <literal>qt5.qtbase</literal>.
<emphasis>Do not</emphasis> import a package set such as <literal>qt5</literal> or <literal>libsForQt5</literal>.
</para>
<para>
If a library does not support a particular version of Qt 5, it is best to
mark it as broken by setting its <literal>meta.broken</literal> attribute. A
package may be marked broken for certain versions by testing the
<literal>qtbase.version</literal> attribute, which will always give the
current Qt 5 version.
</para>
</section>
<para>
If a library does not support a particular version of Qt 5, it is best to mark it as broken by setting its <literal>meta.broken</literal> attribute.
A package may be marked broken for certain versions by testing the <literal>qtbase.version</literal> attribute, which will always give the current Qt 5 version.
</para>
<section xml:id="ssec-qt-applications">
<title>Packaging Applications for Nixpkgs</title>
<para>
Call your application expression using
<literal>libsForQt5.callPackage</literal> instead of
<literal>callPackage</literal>. Import dependencies unqualified, i.e.,
<literal>qtbase</literal> not <literal>qt5.qtbase</literal>. <emphasis>Do
not</emphasis> import a package set such as <literal>qt5</literal> or
<literal>libsForQt5</literal>.
</para>
<para>
Qt 5 maintains strict backward compatibility, so it is generally best to
build an application package against the latest version using the
<varname>libsForQt5</varname> library set. In case a package does not build
with the latest Qt version, it is possible to pick a set pinned to a
particular version, e.g. <varname>libsForQt55</varname> for Qt 5.5, if that
is the latest version the package supports. If a package must be pinned to
an older Qt version, be sure to file a bug upstream; because Qt is strictly
backwards-compatible, any incompatibility is by definition a bug in the
application.
</para>
<para>
When testing applications in Nixpkgs, it is a common practice to build the
package with <literal>nix-build</literal> and run it using the created
symbolic link. This will not work with Qt applications, however, because
they have many hard runtime requirements that can only be guaranteed if the
package is actually installed. To test a Qt application, install it with
<literal>nix-env</literal> or run it inside <literal>nix-shell</literal>.
</para>
</section>
</section>
<section xml:id="ssec-qt-applications"><title>Packaging Applications for Nixpkgs</title>
<para>
Call your application expression using <literal>libsForQt5.callPackage</literal> instead of <literal>callPackage</literal>.
Import dependencies unqualified, i.e., <literal>qtbase</literal> not <literal>qt5.qtbase</literal>.
<emphasis>Do not</emphasis> import a package set such as <literal>qt5</literal> or <literal>libsForQt5</literal>.
</para>
<para>
Qt 5 maintains strict backward compatibility, so it is generally best to build an application package against the latest version using the <varname>libsForQt5</varname> library set.
In case a package does not build with the latest Qt version, it is possible to pick a set pinned to a particular version, e.g. <varname>libsForQt55</varname> for Qt 5.5, if that is the latest version the package supports.
If a package must be pinned to an older Qt version, be sure to file a bug upstream;
because Qt is strictly backwards-compatible, any incompatibility is by definition a bug in the application.
</para>
<para>
When testing applications in Nixpkgs, it is a common practice to build the package with <literal>nix-build</literal> and run it using the created symbolic link.
This will not work with Qt applications, however, because they have many hard runtime requirements that can only be guaranteed if the package is actually installed.
To test a Qt application, install it with <literal>nix-env</literal> or run it inside <literal>nix-shell</literal>.
</para>
</section>
</section>

View File

@ -0,0 +1,120 @@
R packages
==========
## Installation
Define an environment for R that contains all the libraries that you'd like to
use by adding the following snippet to your $HOME/.config/nixpkgs/config.nix file:
```nix
{
packageOverrides = super: let self = super.pkgs; in
{
rEnv = super.rWrapper.override {
packages = with self.rPackages; [
devtools
ggplot2
reshape2
yaml
optparse
];
};
};
}
```
Then you can use `nix-env -f "<nixpkgs>" -iA rEnv` to install it into your user
profile. The set of available libraries can be discovered by running the
command `nix-env -f "<nixpkgs>" -qaP -A rPackages`. The first column from that
output is the name that has to be passed to rWrapper in the code snipped above.
However, if you'd like to add a file to your project source to make the
environment available for other contributors, you can create a `default.nix`
file like so:
```nix
let
pkgs = import <nixpkgs> {};
stdenv = pkgs.stdenv;
in with pkgs; {
myProject = stdenv.mkDerivation {
name = "myProject";
version = "1";
src = if pkgs.lib.inNixShell then null else nix;
buildInputs = with rPackages; [
R
ggplot2
knitr
];
};
}
```
and then run `nix-shell .` to be dropped into a shell with those packages
available.
## RStudio
RStudio uses a standard set of packages and ignores any custom R
environments or installed packages you may have. To create a custom
environment, see `rstudioWrapper`, which functions similarly to
`rWrapper`:
```nix
{
packageOverrides = super: let self = super.pkgs; in
{
rstudioEnv = super.rstudioWrapper.override {
packages = with self.rPackages; [
dplyr
ggplot2
reshape2
];
};
};
}
```
Then like above, `nix-env -f "<nixpkgs>" -iA rstudioEnv` will install
this into your user profile.
Alternatively, you can create a self-contained `shell.nix` without the need to
modify any configuration files:
```nix
{ pkgs ? import <nixpkgs> {}
}:
pkgs.rstudioWrapper.override {
packages = with pkgs.rPackages; [ dplyr ggplot2 reshape2 ];
}
```
Executing `nix-shell` will then drop you into an environment equivalent to the
one above. If you need additional packages just add them to the list and
re-enter the shell.
## Updating the package set
```bash
nix-shell generate-shell.nix
Rscript generate-r-packages.R cran > cran-packages.nix.new
mv cran-packages.nix.new cran-packages.nix
Rscript generate-r-packages.R bioc > bioc-packages.nix.new
mv bioc-packages.nix.new bioc-packages.nix
```
`generate-r-packages.R <repo>` reads `<repo>-packages.nix`, therefor the renaming.
## Testing if the Nix-expression could be evaluated
```bash
nix-build test-evaluation.nix --dry-run
```
If this exits fine, the expression is ok. If not, you have to edit `default.nix`

View File

@ -1,17 +1,19 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="sec-language-ruby">
<title>Ruby</title>
<title>Ruby</title>
<para>
There currently is support to bundle applications that are packaged as Ruby
gems. The utility "bundix" allows you to write a
<filename>Gemfile</filename>, let bundler create a
<filename>Gemfile.lock</filename>, and then convert this into a nix
expression that contains all Gem dependencies automatically.
</para>
<para>There currently is support to bundle applications that are packaged as
Ruby gems. The utility "bundix" allows you to write a
<filename>Gemfile</filename>, let bundler create a
<filename>Gemfile.lock</filename>, and then convert this into a nix
expression that contains all Gem dependencies automatically.
</para>
<para>For example, to package sensu, we did:</para>
<para>
For example, to package sensu, we did:
</para>
<screen>
<![CDATA[$ cd pkgs/servers/monitoring
@ -42,17 +44,29 @@ bundlerEnv rec {
}]]>
</screen>
<para>Please check in the <filename>Gemfile</filename>,
<filename>Gemfile.lock</filename> and the
<filename>gemset.nix</filename> so future updates can be run easily.
</para>
<para>
Please check in the <filename>Gemfile</filename>,
<filename>Gemfile.lock</filename> and the <filename>gemset.nix</filename> so
future updates can be run easily.
</para>
<para>For tools written in Ruby - i.e. where the desire is to install
a package and then execute e.g. <command>rake</command> at the command
line, there is an alternative builder called <literal>bundlerApp</literal>.
Set up the <filename>gemset.nix</filename> the same way, and then, for
example:
</para>
<para>
Updating Ruby packages can then be done like this:
</para>
<screen>
<![CDATA[$ cd pkgs/servers/monitoring/sensu
$ nix-shell -p bundler --run 'bundle lock --update'
$ nix-shell -p bundix --run 'bundix'
]]>
</screen>
<para>
For tools written in Ruby - i.e. where the desire is to install a package and
then execute e.g. <command>rake</command> at the command line, there is an
alternative builder called <literal>bundlerApp</literal>. Set up the
<filename>gemset.nix</filename> the same way, and then, for example:
</para>
<screen>
<![CDATA[{ lib, bundlerApp }:
@ -72,31 +86,31 @@ bundlerApp {
}]]>
</screen>
<para>The chief advantage of <literal>bundlerApp</literal> over
<literal>bundlerEnv</literal> is the executables introduced in the
environment are precisely those selected in the <literal>exes</literal>
list, as opposed to <literal>bundlerEnv</literal> which adds all the
executables made available by gems in the gemset, which can mean e.g.
<command>rspec</command> or <command>rake</command> in unpredictable
versions available from various packages.
</para>
<para>
The chief advantage of <literal>bundlerApp</literal> over
<literal>bundlerEnv</literal> is the executables introduced in the
environment are precisely those selected in the <literal>exes</literal> list,
as opposed to <literal>bundlerEnv</literal> which adds all the executables
made available by gems in the gemset, which can mean e.g.
<command>rspec</command> or <command>rake</command> in unpredictable versions
available from various packages.
</para>
<para>Resulting derivations for both builders also have two helpful
attributes, <literal>env</literal> and <literal>wrappedRuby</literal>.
The first one allows one to quickly drop into
<command>nix-shell</command> with the specified environment present.
E.g. <command>nix-shell -A sensu.env</command> would give you an
environment with Ruby preset so it has all the libraries necessary
for <literal>sensu</literal> in its paths. The second one can be
used to make derivations from custom Ruby scripts which have
<filename>Gemfile</filename>s with their dependencies specified. It is
a derivation with <command>ruby</command> wrapped so it can find all
the needed dependencies. For example, to make a derivation
<literal>my-script</literal> for a <filename>my-script.rb</filename>
(which should be placed in <filename>bin</filename>) you should run
<command>bundix</command> as specified above and then use
<literal>bundlerEnv</literal> like this:
</para>
<para>
Resulting derivations for both builders also have two helpful attributes,
<literal>env</literal> and <literal>wrappedRuby</literal>. The first one
allows one to quickly drop into <command>nix-shell</command> with the
specified environment present. E.g. <command>nix-shell -A sensu.env</command>
would give you an environment with Ruby preset so it has all the libraries
necessary for <literal>sensu</literal> in its paths. The second one can be
used to make derivations from custom Ruby scripts which have
<filename>Gemfile</filename>s with their dependencies specified. It is a
derivation with <command>ruby</command> wrapped so it can find all the needed
dependencies. For example, to make a derivation <literal>my-script</literal>
for a <filename>my-script.rb</filename> (which should be placed in
<filename>bin</filename>) you should run <command>bundix</command> as
specified above and then use <literal>bundlerEnv</literal> like this:
</para>
<programlisting>
<![CDATA[let env = bundlerEnv {
@ -118,5 +132,4 @@ in stdenv.mkDerivation {
'';
}]]>
</programlisting>
</section>

View File

@ -1,309 +0,0 @@
---
title: Rust
author: Matthias Beyer
date: 2017-03-05
---
# User's Guide to the Rust Infrastructure
To install the rust compiler and cargo put
```
rustc
cargo
```
into the `environment.systemPackages` or bring them into
scope with `nix-shell -p rustc cargo`.
For daily builds (beta and nightly) use either rustup from
nixpkgs or use the [Rust nightlies
overlay](#using-the-rust-nightlies-overlay).
## Compiling Rust applications with Cargo
Rust applications are packaged by using the `buildRustPackage` helper from `rustPlatform`:
```
rustPlatform.buildRustPackage rec {
name = "ripgrep-${version}";
version = "0.4.0";
src = fetchFromGitHub {
owner = "BurntSushi";
repo = "ripgrep";
rev = "${version}";
sha256 = "0y5d1n6hkw85jb3rblcxqas2fp82h3nghssa4xqrhqnz25l799pj";
};
cargoSha256 = "0q68qyl2h6i0qsz82z840myxlnjay8p1w5z7hfyr8fqp7wgwa9cx";
meta = with stdenv.lib; {
description = "A fast line-oriented regex search tool, similar to ag and ack";
homepage = https://github.com/BurntSushi/ripgrep;
license = licenses.unlicense;
maintainers = [ maintainers.tailhook ];
platforms = platforms.all;
};
}
```
`buildRustPackage` requires a `cargoSha256` attribute which is computed over
all crate sources of this package. Currently it is obtained by inserting a
fake checksum into the expression and building the package once. The correct
checksum can be then take from the failed build.
To install crates with nix there is also an experimental project called
[nixcrates](https://github.com/fractalide/nixcrates).
## Compiling Rust crates using Nix instead of Cargo
### Simple operation
When run, `cargo build` produces a file called `Cargo.lock`,
containing pinned versions of all dependencies. Nixpkgs contains a
tool called `carnix` (`nix-env -iA nixos.carnix`), which can be used
to turn a `Cargo.lock` into a Nix expression.
That Nix expression calls `rustc` directly (hence bypassing Cargo),
and can be used to compile a crate and all its dependencies. Here is
an example for a minimal `hello` crate:
$ cargo new hello
$ cd hello
$ cargo build
Compiling hello v0.1.0 (file:///tmp/hello)
Finished dev [unoptimized + debuginfo] target(s) in 0.20 secs
$ carnix -o hello.nix --src ./. Cargo.lock --standalone
$ nix-build hello.nix
Now, the file produced by the call to `carnix`, called `hello.nix`, looks like:
```
# Generated by carnix 0.6.5: carnix -o hello.nix --src ./. Cargo.lock --standalone
{ lib, buildPlatform, buildRustCrate, fetchgit }:
let kernel = buildPlatform.parsed.kernel.name;
# ... (content skipped)
in
rec {
hello = f: hello_0_1_0 { features = hello_0_1_0_features { hello_0_1_0 = f; }; };
hello_0_1_0_ = { dependencies?[], buildDependencies?[], features?[] }: buildRustCrate {
crateName = "hello";
version = "0.1.0";
authors = [ "pe@pijul.org <pe@pijul.org>" ];
src = ./.;
inherit dependencies buildDependencies features;
};
hello_0_1_0 = { features?(hello_0_1_0_features {}) }: hello_0_1_0_ {};
hello_0_1_0_features = f: updateFeatures f (rec {
hello_0_1_0.default = (f.hello_0_1_0.default or true);
}) [ ];
}
```
In particular, note that the argument given as `--src` is copied
verbatim to the source. If we look at a more complicated
dependencies, for instance by adding a single line `libc="*"` to our
`Cargo.toml`, we first need to run `cargo build` to update the
`Cargo.lock`. Then, `carnix` needs to be run again, and produces the
following nix file:
```
# Generated by carnix 0.6.5: carnix -o hello.nix --src ./. Cargo.lock --standalone
{ lib, buildPlatform, buildRustCrate, fetchgit }:
let kernel = buildPlatform.parsed.kernel.name;
# ... (content skipped)
in
rec {
hello = f: hello_0_1_0 { features = hello_0_1_0_features { hello_0_1_0 = f; }; };
hello_0_1_0_ = { dependencies?[], buildDependencies?[], features?[] }: buildRustCrate {
crateName = "hello";
version = "0.1.0";
authors = [ "pe@pijul.org <pe@pijul.org>" ];
src = ./.;
inherit dependencies buildDependencies features;
};
libc_0_2_36_ = { dependencies?[], buildDependencies?[], features?[] }: buildRustCrate {
crateName = "libc";
version = "0.2.36";
authors = [ "The Rust Project Developers" ];
sha256 = "01633h4yfqm0s302fm0dlba469bx8y6cs4nqc8bqrmjqxfxn515l";
inherit dependencies buildDependencies features;
};
hello_0_1_0 = { features?(hello_0_1_0_features {}) }: hello_0_1_0_ {
dependencies = mapFeatures features ([ libc_0_2_36 ]);
};
hello_0_1_0_features = f: updateFeatures f (rec {
hello_0_1_0.default = (f.hello_0_1_0.default or true);
libc_0_2_36.default = true;
}) [ libc_0_2_36_features ];
libc_0_2_36 = { features?(libc_0_2_36_features {}) }: libc_0_2_36_ {
features = mkFeatures (features.libc_0_2_36 or {});
};
libc_0_2_36_features = f: updateFeatures f (rec {
libc_0_2_36.default = (f.libc_0_2_36.default or true);
libc_0_2_36.use_std =
(f.libc_0_2_36.use_std or false) ||
(f.libc_0_2_36.default or false) ||
(libc_0_2_36.default or false);
}) [];
}
```
Here, the `libc` crate has no `src` attribute, so `buildRustCrate`
will fetch it from [crates.io](https://crates.io). A `sha256`
attribute is still needed for Nix purity.
### Handling external dependencies
Some crates require external libraries. For crates from
[crates.io](https://crates.io), such libraries can be specified in
`defaultCrateOverrides` package in nixpkgs itself.
Starting from that file, one can add more overrides, to add features
or build inputs by overriding the hello crate in a seperate file.
```
with import <nixpkgs> {};
((import ./hello.nix).hello {}).override {
crateOverrides = defaultCrateOverrides // {
hello = attrs: { buildInputs = [ openssl ]; };
};
}
```
Here, `crateOverrides` is expected to be a attribute set, where the
key is the crate name without version number and the value a function.
The function gets all attributes passed to `buildRustCrate` as first
argument and returns a set that contains all attribute that should be
overwritten.
For more complicated cases, such as when parts of the crate's
derivation depend on the the crate's version, the `attrs` argument of
the override above can be read, as in the following example, which
patches the derivation:
```
with import <nixpkgs> {};
((import ./hello.nix).hello {}).override {
crateOverrides = defaultCrateOverrides // {
hello = attrs: lib.optionalAttrs (lib.versionAtLeast attrs.version "1.0") {
postPatch = ''
substituteInPlace lib/zoneinfo.rs \
--replace "/usr/share/zoneinfo" "${tzdata}/share/zoneinfo"
'';
};
};
}
```
Another situation is when we want to override a nested
dependency. This actually works in the exact same way, since the
`crateOverrides` parameter is forwarded to the crate's
dependencies. For instance, to override the build inputs for crate
`libc` in the example above, where `libc` is a dependency of the main
crate, we could do:
```
with import <nixpkgs> {};
((import hello.nix).hello {}).override {
crateOverrides = defaultCrateOverrides // {
libc = attrs: { buildInputs = []; };
};
}
```
### Options and phases configuration
Actually, the overrides introduced in the previous section are more
general. A number of other parameters can be overridden:
- The version of rustc used to compile the crate:
```
(hello {}).override { rust = pkgs.rust; };
```
- Whether to build in release mode or debug mode (release mode by
default):
```
(hello {}).override { release = false; };
```
- Whether to print the commands sent to rustc when building
(equivalent to `--verbose` in cargo:
```
(hello {}).override { verbose = false; };
```
- Extra arguments to be passed to `rustc`:
```
(hello {}).override { extraRustcOpts = "-Z debuginfo=2"; };
```
- Phases, just like in any other derivation, can be specified using
the following attributes: `preUnpack`, `postUnpack`, `prePatch`,
`patches`, `postPatch`, `preConfigure` (in the case of a Rust crate,
this is run before calling the "build" script), `postConfigure`
(after the "build" script),`preBuild`, `postBuild`, `preInstall` and
`postInstall`. As an example, here is how to create a new module
before running the build script:
```
(hello {}).override {
preConfigure = ''
echo "pub const PATH=\"${hi.out}\";" >> src/path.rs"
'';
};
```
### Features
One can also supply features switches. For example, if we want to
compile `diesel_cli` only with the `postgres` feature, and no default
features, we would write:
```
(callPackage ./diesel.nix {}).diesel {
default = false;
postgres = true;
}
```
Where `diesel.nix` is the file generated by Carnix, as explained above.
## Using the Rust nightlies overlay
Mozilla provides an overlay for nixpkgs to bring a nightly version of Rust into scope.
This overlay can _also_ be used to install recent unstable or stable versions
of Rust, if desired.
To use this overlay, clone
[nixpkgs-mozilla](https://github.com/mozilla/nixpkgs-mozilla),
and create a symbolic link to the file
[rust-overlay.nix](https://github.com/mozilla/nixpkgs-mozilla/blob/master/rust-overlay.nix)
in the `~/.config/nixpkgs/overlays` directory.
$ git clone https://github.com/mozilla/nixpkgs-mozilla.git
$ mkdir -p ~/.config/nixpkgs/overlays
$ ln -s $(pwd)/nixpkgs-mozilla/rust-overlay.nix ~/.config/nixpkgs/overlays/rust-overlay.nix
The latest version can be installed with the following command:
$ nix-env -Ai nixos.latest.rustChannels.stable.rust
Or using the attribute with nix-shell:
$ nix-shell -p nixos.latest.rustChannels.stable.rust
To install the beta or nightly channel, "stable" should be substituted by
"nightly" or "beta", or
use the function provided by this overlay to pull a version based on a
build date.
The overlay automatically updates itself as it uses the same source as
[rustup](https://www.rustup.rs/).

View File

@ -0,0 +1,399 @@
---
title: Rust
author: Matthias Beyer
date: 2017-03-05
---
# User's Guide to the Rust Infrastructure
To install the rust compiler and cargo put
```
rustc
cargo
```
into the `environment.systemPackages` or bring them into
scope with `nix-shell -p rustc cargo`.
> If you are using NixOS and you want to use rust without a nix expression you
> probably want to add the following in your `configuration.nix` to build
> crates with C dependencies.
>
> environment.systemPackages = [binutils gcc gnumake openssl pkgconfig]
For daily builds (beta and nightly) use either rustup from
nixpkgs or use the [Rust nightlies
overlay](#using-the-rust-nightlies-overlay).
## Compiling Rust applications with Cargo
Rust applications are packaged by using the `buildRustPackage` helper from `rustPlatform`:
```
rustPlatform.buildRustPackage rec {
name = "ripgrep-${version}";
version = "0.4.0";
src = fetchFromGitHub {
owner = "BurntSushi";
repo = "ripgrep";
rev = "${version}";
sha256 = "0y5d1n6hkw85jb3rblcxqas2fp82h3nghssa4xqrhqnz25l799pj";
};
cargoSha256 = "0q68qyl2h6i0qsz82z840myxlnjay8p1w5z7hfyr8fqp7wgwa9cx";
meta = with stdenv.lib; {
description = "A fast line-oriented regex search tool, similar to ag and ack";
homepage = https://github.com/BurntSushi/ripgrep;
license = licenses.unlicense;
maintainers = [ maintainers.tailhook ];
platforms = platforms.all;
};
}
```
`buildRustPackage` requires a `cargoSha256` attribute which is computed over
all crate sources of this package. Currently it is obtained by inserting a
fake checksum into the expression and building the package once. The correct
checksum can be then take from the failed build.
When the `Cargo.lock`, provided by upstream, is not in sync with the
`Cargo.toml`, it is possible to use `cargoPatches` to update it. All patches
added in `cargoPatches` will also be prepended to the patches in `patches` at
build-time.
## Compiling Rust crates using Nix instead of Cargo
### Simple operation
When run, `cargo build` produces a file called `Cargo.lock`,
containing pinned versions of all dependencies. Nixpkgs contains a
tool called `carnix` (`nix-env -iA nixos.carnix`), which can be used
to turn a `Cargo.lock` into a Nix expression.
That Nix expression calls `rustc` directly (hence bypassing Cargo),
and can be used to compile a crate and all its dependencies. Here is
an example for a minimal `hello` crate:
$ cargo new hello
$ cd hello
$ cargo build
Compiling hello v0.1.0 (file:///tmp/hello)
Finished dev [unoptimized + debuginfo] target(s) in 0.20 secs
$ carnix -o hello.nix --src ./. Cargo.lock --standalone
$ nix-build hello.nix -A hello_0_1_0
Now, the file produced by the call to `carnix`, called `hello.nix`, looks like:
```
# Generated by carnix 0.6.5: carnix -o hello.nix --src ./. Cargo.lock --standalone
{ lib, stdenv, buildRustCrate, fetchgit }:
let kernel = stdenv.buildPlatform.parsed.kernel.name;
# ... (content skipped)
in
rec {
hello = f: hello_0_1_0 { features = hello_0_1_0_features { hello_0_1_0 = f; }; };
hello_0_1_0_ = { dependencies?[], buildDependencies?[], features?[] }: buildRustCrate {
crateName = "hello";
version = "0.1.0";
authors = [ "pe@pijul.org <pe@pijul.org>" ];
src = ./.;
inherit dependencies buildDependencies features;
};
hello_0_1_0 = { features?(hello_0_1_0_features {}) }: hello_0_1_0_ {};
hello_0_1_0_features = f: updateFeatures f (rec {
hello_0_1_0.default = (f.hello_0_1_0.default or true);
}) [ ];
}
```
In particular, note that the argument given as `--src` is copied
verbatim to the source. If we look at a more complicated
dependencies, for instance by adding a single line `libc="*"` to our
`Cargo.toml`, we first need to run `cargo build` to update the
`Cargo.lock`. Then, `carnix` needs to be run again, and produces the
following nix file:
```
# Generated by carnix 0.6.5: carnix -o hello.nix --src ./. Cargo.lock --standalone
{ lib, stdenv, buildRustCrate, fetchgit }:
let kernel = stdenv.buildPlatform.parsed.kernel.name;
# ... (content skipped)
in
rec {
hello = f: hello_0_1_0 { features = hello_0_1_0_features { hello_0_1_0 = f; }; };
hello_0_1_0_ = { dependencies?[], buildDependencies?[], features?[] }: buildRustCrate {
crateName = "hello";
version = "0.1.0";
authors = [ "pe@pijul.org <pe@pijul.org>" ];
src = ./.;
inherit dependencies buildDependencies features;
};
libc_0_2_36_ = { dependencies?[], buildDependencies?[], features?[] }: buildRustCrate {
crateName = "libc";
version = "0.2.36";
authors = [ "The Rust Project Developers" ];
sha256 = "01633h4yfqm0s302fm0dlba469bx8y6cs4nqc8bqrmjqxfxn515l";
inherit dependencies buildDependencies features;
};
hello_0_1_0 = { features?(hello_0_1_0_features {}) }: hello_0_1_0_ {
dependencies = mapFeatures features ([ libc_0_2_36 ]);
};
hello_0_1_0_features = f: updateFeatures f (rec {
hello_0_1_0.default = (f.hello_0_1_0.default or true);
libc_0_2_36.default = true;
}) [ libc_0_2_36_features ];
libc_0_2_36 = { features?(libc_0_2_36_features {}) }: libc_0_2_36_ {
features = mkFeatures (features.libc_0_2_36 or {});
};
libc_0_2_36_features = f: updateFeatures f (rec {
libc_0_2_36.default = (f.libc_0_2_36.default or true);
libc_0_2_36.use_std =
(f.libc_0_2_36.use_std or false) ||
(f.libc_0_2_36.default or false) ||
(libc_0_2_36.default or false);
}) [];
}
```
Here, the `libc` crate has no `src` attribute, so `buildRustCrate`
will fetch it from [crates.io](https://crates.io). A `sha256`
attribute is still needed for Nix purity.
### Handling external dependencies
Some crates require external libraries. For crates from
[crates.io](https://crates.io), such libraries can be specified in
`defaultCrateOverrides` package in nixpkgs itself.
Starting from that file, one can add more overrides, to add features
or build inputs by overriding the hello crate in a seperate file.
```
with import <nixpkgs> {};
((import ./hello.nix).hello {}).override {
crateOverrides = defaultCrateOverrides // {
hello = attrs: { buildInputs = [ openssl ]; };
};
}
```
Here, `crateOverrides` is expected to be a attribute set, where the
key is the crate name without version number and the value a function.
The function gets all attributes passed to `buildRustCrate` as first
argument and returns a set that contains all attribute that should be
overwritten.
For more complicated cases, such as when parts of the crate's
derivation depend on the the crate's version, the `attrs` argument of
the override above can be read, as in the following example, which
patches the derivation:
```
with import <nixpkgs> {};
((import ./hello.nix).hello {}).override {
crateOverrides = defaultCrateOverrides // {
hello = attrs: lib.optionalAttrs (lib.versionAtLeast attrs.version "1.0") {
postPatch = ''
substituteInPlace lib/zoneinfo.rs \
--replace "/usr/share/zoneinfo" "${tzdata}/share/zoneinfo"
'';
};
};
}
```
Another situation is when we want to override a nested
dependency. This actually works in the exact same way, since the
`crateOverrides` parameter is forwarded to the crate's
dependencies. For instance, to override the build inputs for crate
`libc` in the example above, where `libc` is a dependency of the main
crate, we could do:
```
with import <nixpkgs> {};
((import hello.nix).hello {}).override {
crateOverrides = defaultCrateOverrides // {
libc = attrs: { buildInputs = []; };
};
}
```
### Options and phases configuration
Actually, the overrides introduced in the previous section are more
general. A number of other parameters can be overridden:
- The version of rustc used to compile the crate:
```
(hello {}).override { rust = pkgs.rust; };
```
- Whether to build in release mode or debug mode (release mode by
default):
```
(hello {}).override { release = false; };
```
- Whether to print the commands sent to rustc when building
(equivalent to `--verbose` in cargo:
```
(hello {}).override { verbose = false; };
```
- Extra arguments to be passed to `rustc`:
```
(hello {}).override { extraRustcOpts = "-Z debuginfo=2"; };
```
- Phases, just like in any other derivation, can be specified using
the following attributes: `preUnpack`, `postUnpack`, `prePatch`,
`patches`, `postPatch`, `preConfigure` (in the case of a Rust crate,
this is run before calling the "build" script), `postConfigure`
(after the "build" script),`preBuild`, `postBuild`, `preInstall` and
`postInstall`. As an example, here is how to create a new module
before running the build script:
```
(hello {}).override {
preConfigure = ''
echo "pub const PATH=\"${hi.out}\";" >> src/path.rs"
'';
};
```
### Features
One can also supply features switches. For example, if we want to
compile `diesel_cli` only with the `postgres` feature, and no default
features, we would write:
```
(callPackage ./diesel.nix {}).diesel {
default = false;
postgres = true;
}
```
Where `diesel.nix` is the file generated by Carnix, as explained above.
## Setting Up `nix-shell`
Oftentimes you want to develop code from within `nix-shell`. Unfortunately
`buildRustCrate` does not support common `nix-shell` operations directly
(see [this issue](https://github.com/NixOS/nixpkgs/issues/37945))
so we will use `stdenv.mkDerivation` instead.
Using the example `hello` project above, we want to do the following:
- Have access to `cargo` and `rustc`
- Have the `openssl` library available to a crate through it's _normal_
compilation mechanism (`pkg-config`).
A typical `shell.nix` might look like:
```
with import <nixpkgs> {};
stdenv.mkDerivation {
name = "rust-env";
nativeBuildInputs = [
rustc cargo
# Example Build-time Additional Dependencies
pkgconfig
];
buildInputs = [
# Example Run-time Additional Dependencies
openssl
];
# Set Environment Variables
RUST_BACKTRACE = 1;
}
```
You should now be able to run the following:
```
$ nix-shell --pure
$ cargo build
$ cargo test
```
### Controlling Rust Version Inside `nix-shell`
To control your rust version (i.e. use nightly) from within `shell.nix` (or
other nix expressions) you can use the following `shell.nix`
```
# Latest Nightly
with import <nixpkgs> {};
let src = fetchFromGitHub {
owner = "mozilla";
repo = "nixpkgs-mozilla";
# commit from: 2018-03-27
rev = "2945b0b6b2fd19e7d23bac695afd65e320efcebe";
sha256 = "034m1dryrzh2lmjvk3c0krgip652dql46w5yfwpvh7gavd3iypyw";
};
in
with import "${src.out}/rust-overlay.nix" pkgs pkgs;
stdenv.mkDerivation {
name = "rust-env";
buildInputs = [
# Note: to use use stable, just replace `nightly` with `stable`
latest.rustChannels.nightly.rust
# Add some extra dependencies from `pkgs`
pkgconfig openssl
];
# Set Environment Variables
RUST_BACKTRACE = 1;
}
```
Now run:
```
$ rustc --version
rustc 1.26.0-nightly (188e693b3 2018-03-26)
```
To see that you are using nightly.
## Using the Rust nightlies overlay
Mozilla provides an overlay for nixpkgs to bring a nightly version of Rust into scope.
This overlay can _also_ be used to install recent unstable or stable versions
of Rust, if desired.
To use this overlay, clone
[nixpkgs-mozilla](https://github.com/mozilla/nixpkgs-mozilla),
and create a symbolic link to the file
[rust-overlay.nix](https://github.com/mozilla/nixpkgs-mozilla/blob/master/rust-overlay.nix)
in the `~/.config/nixpkgs/overlays` directory.
$ git clone https://github.com/mozilla/nixpkgs-mozilla.git
$ mkdir -p ~/.config/nixpkgs/overlays
$ ln -s $(pwd)/nixpkgs-mozilla/rust-overlay.nix ~/.config/nixpkgs/overlays/rust-overlay.nix
The latest version can be installed with the following command:
$ nix-env -Ai nixos.latest.rustChannels.stable.rust
Or using the attribute with nix-shell:
$ nix-shell -p nixos.latest.rustChannels.stable.rust
To install the beta or nightly channel, "stable" should be substituted by
"nightly" or "beta", or
use the function provided by this overlay to pull a version based on a
build date.
The overlay automatically updates itself as it uses the same source as
[rustup](https://www.rustup.rs/).

View File

@ -1,27 +1,42 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="sec-language-texlive">
<title>TeX Live</title>
<title>TeX Live</title>
<para>
Since release 15.09 there is a new TeX Live packaging that lives entirely
under attribute <varname>texlive</varname>.
</para>
<section xml:id="sec-language-texlive-users-guide">
<title>User's guide</title>
<para>Since release 15.09 there is a new TeX Live packaging that lives entirely under attribute <varname>texlive</varname>.</para>
<section><title>User's guide</title>
<itemizedlist>
<listitem><para>
For basic usage just pull <varname>texlive.combined.scheme-basic</varname> for an environment with basic LaTeX support.</para></listitem>
<listitem><para>
It typically won't work to use separately installed packages together.
Instead, you can build a custom set of packages like this:
<programlisting>
<listitem>
<para>
For basic usage just pull <varname>texlive.combined.scheme-basic</varname>
for an environment with basic LaTeX support.
</para>
</listitem>
<listitem>
<para>
It typically won't work to use separately installed packages together.
Instead, you can build a custom set of packages like this:
<programlisting>
texlive.combine {
inherit (texlive) scheme-small collection-langkorean algorithms cm-super;
}
</programlisting>
There are all the schemes, collections and a few thousand packages, as defined upstream (perhaps with tiny differences).
</para></listitem>
<listitem><para>
By default you only get executables and files needed during runtime, and a little documentation for the core packages. To change that, you need to add <varname>pkgFilter</varname> function to <varname>combine</varname>.
<programlisting>
There are all the schemes, collections and a few thousand packages, as
defined upstream (perhaps with tiny differences).
</para>
</listitem>
<listitem>
<para>
By default you only get executables and files needed during runtime, and a
little documentation for the core packages. To change that, you need to
add <varname>pkgFilter</varname> function to <varname>combine</varname>.
<programlisting>
texlive.combine {
# inherit (texlive) whatever-you-want;
pkgFilter = pkg:
@ -30,31 +45,55 @@ texlive.combine {
# there are also other attributes: version, name
}
</programlisting>
</para></listitem>
<listitem><para>
You can list packages e.g. by <command>nix-repl</command>.
<programlisting>
$ nix-repl
nix-repl> :l &lt;nixpkgs>
nix-repl> texlive.collection-&lt;TAB>
</programlisting>
</para></listitem>
</para>
</listitem>
<listitem>
<para>
You can list packages e.g. by <command>nix repl</command>.
<programlisting><![CDATA[
$ nix repl
nix-repl> :l <nixpkgs>
nix-repl> texlive.collection-<TAB>
]]></programlisting>
</para>
</listitem>
<listitem>
<para>
Note that the wrapper assumes that the result has a chance to be useful.
For example, the core executables should be present, as well as some core
data files. The supported way of ensuring this is by including some
scheme, for example <varname>scheme-basic</varname>, into the combination.
</para>
</listitem>
</itemizedlist>
</section>
</section>
<section xml:id="sec-language-texlive-known-problems">
<title>Known problems</title>
<section><title>Known problems</title>
<itemizedlist>
<listitem><para>
Some tools are still missing, e.g. luajittex;</para></listitem>
<listitem><para>
some apps aren't packaged/tested yet (asymptote, biber, etc.);</para></listitem>
<listitem><para>
feature/bug: when a package is rejected by <varname>pkgFilter</varname>, its dependencies are still propagated;</para></listitem>
<listitem><para>
in case of any bugs or feature requests, file a github issue or better a pull request and /cc @vcunat.</para></listitem>
<listitem>
<para>
Some tools are still missing, e.g. luajittex;
</para>
</listitem>
<listitem>
<para>
some apps aren't packaged/tested yet (asymptote, biber, etc.);
</para>
</listitem>
<listitem>
<para>
feature/bug: when a package is rejected by <varname>pkgFilter</varname>,
its dependencies are still propagated;
</para>
</listitem>
<listitem>
<para>
in case of any bugs or feature requests, file a github issue or better a
pull request and /cc @vcunat.
</para>
</listitem>
</itemizedlist>
</section>
</section>
</section>

View File

@ -0,0 +1,115 @@
---
title: Titanium
author: Sander van der Burg
date: 2018-11-18
---
# Titanium
The Nixpkgs repository contains facilities to deploy a variety of versions of
the [Titanium SDK](https://www.appcelerator.com) versions, a cross-platform
mobile app development framework using JavaScript as an implementation language,
and includes a function abstraction making it possible to build Titanium
applications for Android and iOS devices from source code.
Not all Titanium features supported -- currently, it can only be used to build
Android and iOS apps.
Building a Titanium app
-----------------------
We can build a Titanium app from source for Android or iOS and for debugging or
release purposes by invoking the `titaniumenv.buildApp {}` function:
```nix
titaniumenv.buildApp {
name = "myapp";
src = ./myappsource;
preBuild = "";
target = "android"; # or 'iphone'
tiVersion = "7.1.0.GA";
release = true;
androidsdkArgs = {
platformVersions = [ "25" "26" ];
};
androidKeyStore = ./keystore;
androidKeyAlias = "myfirstapp";
androidKeyStorePassword = "secret";
xcodeBaseDir = "/Applications/Xcode.app";
xcodewrapperArgs = {
version = "9.3";
};
iosMobileProvisioningProfile = ./myprovisioning.profile;
iosCertificateName = "My Company";
iosCertificate = ./mycertificate.p12;
iosCertificatePassword = "secret";
iosVersion = "11.3";
iosBuildStore = false;
enableWirelessDistribution = true;
installURL = "/installipa.php";
}
```
The `titaniumenv.buildApp {}` function takes the following parameters:
* The `name` parameter refers to the name in the Nix store.
* The `src` parameter refers to the source code location of the app that needs
to be built.
* `preRebuild` contains optional build instructions that are carried out before
the build starts.
* `target` indicates for which device the app must be built. Currently only
'android' and 'iphone' (for iOS) are supported.
* `tiVersion` can be used to optionally override the requested Titanium version
in `tiapp.xml`. If not specified, it will use the version in `tiapp.xml`.
* `release` should be set to true when building an app for submission to the
Google Playstore or Apple Appstore. Otherwise, it should be false.
When the `target` has been set to `android`, we can configure the following
parameters:
* The `androidSdkArgs` parameter refers to an attribute set that propagates all
parameters to the `androidenv.composeAndroidPackages {}` function. This can
be used to install all relevant Android plugins that may be needed to perform
the Android build. If no parameters are given, it will deploy the platform
SDKs for API-levels 25 and 26 by default.
When the `release` parameter has been set to true, you need to provide
parameters to sign the app:
* `androidKeyStore` is the path to the keystore file
* `androidKeyAlias` is the key alias
* `androidKeyStorePassword` refers to the password to open the keystore file.
When the `target` has been set to `iphone`, we can configure the following
parameters:
* The `xcodeBaseDir` parameter refers to the location where Xcode has been
installed. When none value is given, the above value is the default.
* The `xcodewrapperArgs` parameter passes arbitrary parameters to the
`xcodeenv.composeXcodeWrapper {}` function. This can, for example, be used
to adjust the default version of Xcode.
When `release` has been set to true, you also need to provide the following
parameters:
* `iosMobileProvisioningProfile` refers to a mobile provisioning profile needed
for signing.
* `iosCertificateName` refers to the company name in the P12 certificate.
* `iosCertificate` refers to the path to the P12 file.
* `iosCertificatePassword` contains the password to open the P12 file.
* `iosVersion` refers to the iOS SDK version to use. It defaults to the latest
version.
* `iosBuildStore` should be set to `true` when building for the Apple Appstore
submission. For enterprise or ad-hoc builds it should be set to `false`.
When `enableWirelessDistribution` has been enabled, you must also provide the
path of the PHP script (`installURL`) (that is included with the iOS build
environment) to enable wireless ad-hoc installations.
Emulating or simulating the app
-------------------------------
It is also possible to simulate the correspond iOS simulator build by using
`xcodeenv.simulateApp {}` and emulate an Android APK by using
`androidenv.emulateApp {}`.

View File

@ -1,135 +0,0 @@
---
title: User's Guide for Vim in Nixpkgs
author: Marc Weber
date: 2016-06-25
---
# User's Guide to Vim Plugins/Addons/Bundles/Scripts in Nixpkgs
You'll get a vim(-your-suffix) in PATH also loading the plugins you want.
Loading can be deferred; see examples.
Vim packages, VAM (=vim-addon-manager) and Pathogen are supported to load
packages.
## Custom configuration
Adding custom .vimrc lines can be done using the following code:
```
vim_configurable.customize {
name = "vim-with-plugins";
vimrcConfig.customRC = ''
set hidden
'';
}
```
## Vim packages
To store you plugins in Vim packages the following example can be used:
```
vim_configurable.customize {
vimrcConfig.packages.myVimPackage = with pkgs.vimPlugins; {
# loaded on launch
start = [ youcompleteme fugitive ];
# manually loadable by calling `:packadd $plugin-name`
opt = [ phpCompletion elm-vim ];
# To automatically load a plugin when opening a filetype, add vimrc lines like:
# autocmd FileType php :packadd phpCompletion
}
};
```
## VAM
### dependencies by Vim plugins
VAM introduced .json files supporting dependencies without versioning
assuming that "using latest version" is ok most of the time.
### Example
First create a vim-scripts file having one plugin name per line. Example:
"tlib"
{'name': 'vim-addon-sql'}
{'filetype_regex': '\%(vim)$', 'names': ['reload', 'vim-dev-plugin']}
Such vim-scripts file can be read by VAM as well like this:
call vam#Scripts(expand('~/.vim-scripts'), {})
Create a default.nix file:
{ nixpkgs ? import <nixpkgs> {}, compiler ? "ghc7102" }:
nixpkgs.vim_configurable.customize { name = "vim"; vimrcConfig.vam.pluginDictionaries = [ "vim-addon-vim2nix" ]; }
Create a generate.vim file:
ActivateAddons vim-addon-vim2nix
let vim_scripts = "vim-scripts"
call nix#ExportPluginsForNix({
\ 'path_to_nixpkgs': eval('{"'.substitute(substitute(substitute($NIX_PATH, ':', ',', 'g'), '=',':', 'g'), '\([:,]\)', '"\1"',"g").'"}')["nixpkgs"],
\ 'cache_file': '/tmp/vim2nix-cache',
\ 'try_catch': 0,
\ 'plugin_dictionaries': ["vim-addon-manager"]+map(readfile(vim_scripts), 'eval(v:val)')
\ })
Then run
nix-shell -p vimUtils.vim_with_vim2nix --command "vim -c 'source generate.vim'"
You should get a Vim buffer with the nix derivations (output1) and vam.pluginDictionaries (output2).
You can add your vim to your system's configuration file like this and start it by "vim-my":
my-vim =
let plugins = let inherit (vimUtils) buildVimPluginFrom2Nix; in {
copy paste output1 here
}; in vim_configurable.customize {
name = "vim-my";
vimrcConfig.vam.knownPlugins = plugins; # optional
vimrcConfig.vam.pluginDictionaries = [
copy paste output2 here
];
# Pathogen would be
# vimrcConfig.pathogen.knownPlugins = plugins; # plugins
# vimrcConfig.pathogen.pluginNames = ["tlib"];
};
Sample output1:
"reload" = buildVimPluginFrom2Nix { # created by nix#NixDerivation
name = "reload";
src = fetchgit {
url = "git://github.com/xolox/vim-reload";
rev = "0a601a668727f5b675cb1ddc19f6861f3f7ab9e1";
sha256 = "0vb832l9yxj919f5hfg6qj6bn9ni57gnjd3bj7zpq7d4iv2s4wdh";
};
dependencies = ["nim-misc"];
};
[...]
Sample output2:
[
''vim-addon-manager''
''tlib''
{ "name" = ''vim-addon-sql''; }
{ "filetype_regex" = ''\%(vim)$$''; "names" = [ ''reload'' ''vim-dev-plugin'' ]; }
]
## Important repositories
- [vim-pi](https://bitbucket.org/vimcommunity/vim-pi) is a plugin repository
from VAM plugin manager meant to be used by others as well used by
- [vim2nix](http://github.com/MarcWeber/vim-addon-vim2nix) which generates the
.nix code

View File

@ -0,0 +1,255 @@
---
title: User's Guide for Vim in Nixpkgs
author: Marc Weber
date: 2016-06-25
---
# User's Guide to Vim Plugins/Addons/Bundles/Scripts in Nixpkgs
Both Neovim and Vim can be configured to include your favorite plugins
and additional libraries.
Loading can be deferred; see examples.
At the moment we support three different methods for managing plugins:
- Vim packages (*recommend*)
- VAM (=vim-addon-manager)
- Pathogen
- vim-plug
## Custom configuration
Adding custom .vimrc lines can be done using the following code:
```
vim_configurable.customize {
# `name` specifies the name of the executable and package
name = "vim-with-plugins";
vimrcConfig.customRC = ''
set hidden
'';
}
```
This configuration is used when vim is invoked with the command specified as name, in this case `vim-with-plugins`.
For Neovim the `configure` argument can be overridden to achieve the same:
```
neovim.override {
configure = {
customRC = ''
# here your custom configuration goes!
'';
};
}
```
If you want to use `neovim-qt` as a graphical editor, you can configure it by overriding neovim in an overlay
or passing it an overridden neovimn:
```
neovim-qt.override {
neovim = neovim.override {
configure = {
customRC = ''
# your custom configuration
'';
};
};
}
```
## Managing plugins with Vim packages
To store you plugins in Vim packages (the native vim plugin manager, see `:help packages`) the following example can be used:
```
vim_configurable.customize {
vimrcConfig.packages.myVimPackage = with pkgs.vimPlugins; {
# loaded on launch
start = [ youcompleteme fugitive ];
# manually loadable by calling `:packadd $plugin-name`
# however, if a vim plugin has a dependency that is not explicitly listed in
# opt that dependency will always be added to start to avoid confusion.
opt = [ phpCompletion elm-vim ];
# To automatically load a plugin when opening a filetype, add vimrc lines like:
# autocmd FileType php :packadd phpCompletion
};
}
```
`myVimPackage` is an arbitrary name for the generated package. You can choose any name you like.
For Neovim the syntax is:
```
neovim.override {
configure = {
customRC = ''
# here your custom configuration goes!
'';
packages.myVimPackage = with pkgs.vimPlugins; {
# see examples below how to use custom packages
start = [ ];
# If a vim plugin has a dependency that is not explicitly listed in
# opt that dependency will always be added to start to avoid confusion.
opt = [ ];
};
};
}
```
The resulting package can be added to `packageOverrides` in `~/.nixpkgs/config.nix` to make it installable:
```
{
packageOverrides = pkgs: with pkgs; {
myVim = vim_configurable.customize {
# `name` specifies the name of the executable and package
name = "vim-with-plugins";
# add here code from the example section
};
myNeovim = neovim.override {
configure = {
# add here code from the example section
};
};
};
}
```
After that you can install your special grafted `myVim` or `myNeovim` packages.
## Managing plugins with vim-plug
To use [vim-plug](https://github.com/junegunn/vim-plug) to manage your Vim
plugins the following example can be used:
```
vim_configurable.customize {
vimrcConfig.packages.myVimPackage = with pkgs.vimPlugins; {
# loaded on launch
plug.plugins = [ youcompleteme fugitive phpCompletion elm-vim ];
};
}
```
For Neovim the syntax is:
```
neovim.override {
configure = {
customRC = ''
# here your custom configuration goes!
'';
plug.plugins = with pkgs.vimPlugins; [
vim-go
];
};
}
```
## Managing plugins with VAM
### Handling dependencies of Vim plugins
VAM introduced .json files supporting dependencies without versioning
assuming that "using latest version" is ok most of the time.
### Example
First create a vim-scripts file having one plugin name per line. Example:
"tlib"
{'name': 'vim-addon-sql'}
{'filetype_regex': '\%(vim)$', 'names': ['reload', 'vim-dev-plugin']}
Such vim-scripts file can be read by VAM as well like this:
call vam#Scripts(expand('~/.vim-scripts'), {})
Create a default.nix file:
{ nixpkgs ? import <nixpkgs> {}, compiler ? "ghc7102" }:
nixpkgs.vim_configurable.customize { name = "vim"; vimrcConfig.vam.pluginDictionaries = [ "vim-addon-vim2nix" ]; }
Create a generate.vim file:
ActivateAddons vim-addon-vim2nix
let vim_scripts = "vim-scripts"
call nix#ExportPluginsForNix({
\ 'path_to_nixpkgs': eval('{"'.substitute(substitute(substitute($NIX_PATH, ':', ',', 'g'), '=',':', 'g'), '\([:,]\)', '"\1"',"g").'"}')["nixpkgs"],
\ 'cache_file': '/tmp/vim2nix-cache',
\ 'try_catch': 0,
\ 'plugin_dictionaries': ["vim-addon-manager"]+map(readfile(vim_scripts), 'eval(v:val)')
\ })
Then run
nix-shell -p vimUtils.vim_with_vim2nix --command "vim -c 'source generate.vim'"
You should get a Vim buffer with the nix derivations (output1) and vam.pluginDictionaries (output2).
You can add your vim to your system's configuration file like this and start it by "vim-my":
my-vim =
let plugins = let inherit (vimUtils) buildVimPluginFrom2Nix; in {
copy paste output1 here
}; in vim_configurable.customize {
name = "vim-my";
vimrcConfig.vam.knownPlugins = plugins; # optional
vimrcConfig.vam.pluginDictionaries = [
copy paste output2 here
];
# Pathogen would be
# vimrcConfig.pathogen.knownPlugins = plugins; # plugins
# vimrcConfig.pathogen.pluginNames = ["tlib"];
};
Sample output1:
"reload" = buildVimPluginFrom2Nix { # created by nix#NixDerivation
name = "reload";
src = fetchgit {
url = "git://github.com/xolox/vim-reload";
rev = "0a601a668727f5b675cb1ddc19f6861f3f7ab9e1";
sha256 = "0vb832l9yxj919f5hfg6qj6bn9ni57gnjd3bj7zpq7d4iv2s4wdh";
};
dependencies = ["nim-misc"];
};
[...]
Sample output2:
[
''vim-addon-manager''
''tlib''
{ "name" = ''vim-addon-sql''; }
{ "filetype_regex" = ''\%(vim)$$''; "names" = [ ''reload'' ''vim-dev-plugin'' ]; }
]
## Adding new plugins to nixpkgs
In `pkgs/misc/vim-plugins/vim-plugin-names` we store the plugin names
for all vim plugins we automatically generate plugins for.
The format of this file `github username/github repository`:
For example https://github.com/scrooloose/nerdtree becomes `scrooloose/nerdtree`.
After adding your plugin to this file run the `./update.py` in the same folder.
This will updated a file called `generated.nix` and make your plugin accessible in the
`vimPlugins` attribute set (`vimPlugins.nerdtree` in our example).
If additional steps to the build process of the plugin are required, add an
override to the `pkgs/misc/vim-plugins/default.nix` in the same directory.
## Important repositories
- [vim-pi](https://bitbucket.org/vimcommunity/vim-pi) is a plugin repository
from VAM plugin manager meant to be used by others as well used by
- [vim2nix](http://github.com/MarcWeber/vim-addon-vim2nix) which generates the
.nix code

26
doc/lib-function-docs.nix Normal file
View File

@ -0,0 +1,26 @@
# Generates the documentation for library functons via nixdoc. To add
# another library function file to this list, the include list in the
# file `doc/functions/library.xml` must also be updated.
{ pkgs ? import ./.. {}, locationsXml }:
with pkgs; stdenv.mkDerivation {
name = "nixpkgs-lib-docs";
src = ./../lib;
buildInputs = [ nixdoc ];
installPhase = ''
function docgen {
nixdoc -c "$1" -d "$2" -f "../lib/$1.nix" > "$out/$1.xml"
}
mkdir -p $out
ln -s ${locationsXml} $out/locations.xml
docgen strings 'String manipulation functions'
docgen trivial 'Miscellaneous functions'
docgen lists 'List manipulation functions'
docgen debug 'Debugging functions'
docgen options 'NixOS / nixpkgs option handling'
'';
}

View File

@ -0,0 +1,85 @@
{ pkgs ? (import ./.. { }), nixpkgs ? { }}:
let
revision = pkgs.lib.trivial.revisionWithDefault (nixpkgs.revision or "master");
libDefPos = set:
builtins.map
(name: {
name = name;
location = builtins.unsafeGetAttrPos name set;
})
(builtins.attrNames set);
libset = toplib:
builtins.map
(subsetname: {
subsetname = subsetname;
functions = libDefPos toplib."${subsetname}";
})
(builtins.filter
(name: builtins.isAttrs toplib."${name}")
(builtins.attrNames toplib));
nixpkgsLib = pkgs.lib;
flattenedLibSubset = { subsetname, functions }:
builtins.map
(fn: {
name = "lib.${subsetname}.${fn.name}";
value = fn.location;
})
functions;
locatedlibsets = libs: builtins.map flattenedLibSubset (libset libs);
removeFilenamePrefix = prefix: filename:
let
prefixLen = (builtins.stringLength prefix) + 1; # +1 to remove the leading /
filenameLen = builtins.stringLength filename;
substr = builtins.substring prefixLen filenameLen filename;
in substr;
removeNixpkgs = removeFilenamePrefix (builtins.toString pkgs.path);
liblocations =
builtins.filter
(elem: elem.value != null)
(nixpkgsLib.lists.flatten
(locatedlibsets nixpkgsLib));
fnLocationRelative = { name, value }:
{
inherit name;
value = value // { file = removeNixpkgs value.file; };
};
relativeLocs = (builtins.map fnLocationRelative liblocations);
sanitizeId = builtins.replaceStrings
[ "'" ]
[ "-prime" ];
urlPrefix = "https://github.com/NixOS/nixpkgs/blob/${revision}";
xmlstrings = (nixpkgsLib.strings.concatMapStrings
({ name, value }:
''
<section><title>${name}</title>
<para xml:id="${sanitizeId name}">
Located at
<link
xlink:href="${urlPrefix}/${value.file}#L${builtins.toString value.line}">${value.file}:${builtins.toString value.line}</link>
in <literal>&lt;nixpkgs&gt;</literal>.
</para>
</section>
'')
relativeLocs);
in pkgs.writeText
"locations.xml"
''
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
version="5">
<title>All the locations for every lib function</title>
<para>This file is only for inclusion by other files.</para>
${xmlstrings}
</section>
''

View File

@ -1,29 +1,24 @@
<book xmlns="http://docbook.org/ns/docbook"
xmlns:xi="http://www.w3.org/2001/XInclude">
<info>
<title>Nixpkgs Contributors Guide</title>
<subtitle>Version <xi:include href=".version" parse="text" /></subtitle>
</info>
<xi:include href="introduction.xml" />
<xi:include href="quick-start.xml" />
<xi:include href="stdenv.xml" />
<xi:include href="multiple-output.xml" />
<xi:include href="cross-compilation.xml" />
<xi:include href="configuration.xml" />
<xi:include href="functions.xml" />
<xi:include href="meta.xml" />
<xi:include href="languages-frameworks/index.xml" />
<xi:include href="platform-notes.xml" />
<xi:include href="package-notes.xml" />
<xi:include href="overlays.xml" />
<xi:include href="coding-conventions.xml" />
<xi:include href="submitting-changes.xml" />
<xi:include href="reviewing-contributions.xml" />
<xi:include href="contributing.xml" />
<info>
<title>Nixpkgs Contributors Guide</title>
<subtitle>Version <xi:include href=".version" parse="text" />
</subtitle>
</info>
<xi:include href="introduction.chapter.xml" />
<xi:include href="quick-start.xml" />
<xi:include href="stdenv.xml" />
<xi:include href="multiple-output.xml" />
<xi:include href="cross-compilation.xml" />
<xi:include href="configuration.xml" />
<xi:include href="functions.xml" />
<xi:include href="meta.xml" />
<xi:include href="languages-frameworks/index.xml" />
<xi:include href="platform-notes.xml" />
<xi:include href="package-notes.xml" />
<xi:include href="overlays.xml" />
<xi:include href="coding-conventions.xml" />
<xi:include href="submitting-changes.xml" />
<xi:include href="reviewing-contributions.xml" />
<xi:include href="contributing.xml" />
</book>

View File

@ -1,44 +1,41 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="chap-meta">
<title>Meta-attributes</title>
<para>Nix packages can declare <emphasis>meta-attributes</emphasis>
that contain information about a package such as a description, its
homepage, its license, and so on. For instance, the GNU Hello package
has a <varname>meta</varname> declaration like this:
<title>Meta-attributes</title>
<para>
Nix packages can declare <emphasis>meta-attributes</emphasis> that contain
information about a package such as a description, its homepage, its license,
and so on. For instance, the GNU Hello package has a <varname>meta</varname>
declaration like this:
<programlisting>
meta = {
meta = with stdenv.lib; {
description = "A program that produces a familiar, friendly greeting";
longDescription = ''
GNU Hello is a program that prints "Hello, world!" when you run it.
It is fully customizable.
'';
homepage = http://www.gnu.org/software/hello/manual/;
license = stdenv.lib.licenses.gpl3Plus;
maintainers = [ stdenv.lib.maintainers.eelco ];
platforms = stdenv.lib.platforms.all;
homepage = https://www.gnu.org/software/hello/manual/;
license = licenses.gpl3Plus;
maintainers = [ maintainers.eelco ];
platforms = platforms.all;
};
</programlisting>
</para>
<para>Meta-attributes are not passed to the builder of the package.
Thus, a change to a meta-attribute doesnt trigger a recompilation of
the package. The value of a meta-attribute must be a string.</para>
<para>The meta-attributes of a package can be queried from the
command-line using <command>nix-env</command>:
</para>
<para>
Meta-attributes are not passed to the builder of the package. Thus, a change
to a meta-attribute doesnt trigger a recompilation of the package. The
value of a meta-attribute must be a string.
</para>
<para>
The meta-attributes of a package can be queried from the command-line using
<command>nix-env</command>:
<screen>
$ nix-env -qa hello --json
{
"hello": {
"meta": {
"description": "A program that produces a familiar, friendly greeting",
"homepage": "http://www.gnu.org/software/hello/manual/",
"homepage": "https://www.gnu.org/software/hello/manual/",
"license": {
"fullName": "GNU General Public License version 3 or later",
"shortName": "GPLv3+",
@ -53,7 +50,7 @@ $ nix-env -qa hello --json
"x86_64-linux",
"armv5tel-linux",
"armv7l-linux",
"mips64el-linux",
"mips32-linux",
"x86_64-darwin",
"i686-cygwin",
"i686-freebsd",
@ -70,252 +67,365 @@ $ nix-env -qa hello --json
</screen>
<command>nix-env</command> knows about the
<varname>description</varname> field specifically:
<command>nix-env</command> knows about the <varname>description</varname>
field specifically:
<screen>
$ nix-env -qa hello --description
hello-2.3 A program that produces a familiar, friendly greeting
</screen>
</para>
<section xml:id="sec-standard-meta-attributes">
<title>Standard meta-attributes</title>
</para>
<para>
It is expected that each meta-attribute is one of the following:
</para>
<section xml:id="sec-standard-meta-attributes"><title>Standard
meta-attributes</title>
<para>It is expected that each meta-attribute is one of the following:</para>
<variablelist>
<varlistentry>
<term><varname>description</varname></term>
<listitem><para>A short (one-line) description of the package.
This is shown by <command>nix-env -q --description</command> and
also on the Nixpkgs release pages.</para>
<para>Dont include a period at the end. Dont include newline
characters. Capitalise the first character. For brevity, dont
repeat the name of package — just describe what it does.</para>
<para>Wrong: <literal>"libpng is a library that allows you to decode PNG images."</literal></para>
<para>Right: <literal>"A library for decoding PNG images"</literal></para>
</listitem>
</varlistentry>
<varlistentry>
<term><varname>longDescription</varname></term>
<listitem><para>An arbitrarily long description of the
package.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>branch</varname></term>
<listitem><para>Release branch. Used to specify that a package is not
going to receive updates that are not in this branch; for example, Linux
kernel 3.0 is supposed to be updated to 3.0.X, not 3.1.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>homepage</varname></term>
<listitem><para>The packages homepage. Example:
<literal>http://www.gnu.org/software/hello/manual/</literal></para></listitem>
</varlistentry>
<varlistentry>
<term><varname>downloadPage</varname></term>
<listitem><para>The page where a link to the current version can be found. Example:
<literal>http://ftp.gnu.org/gnu/hello/</literal></para></listitem>
</varlistentry>
<varlistentry>
<term><varname>license</varname></term>
<variablelist>
<varlistentry>
<term>
<varname>description</varname>
</term>
<listitem>
<para>
The license, or licenses, for the package. One from the attribute set
defined in <link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/lib/licenses.nix">
<filename>nixpkgs/lib/licenses.nix</filename></link>. At this moment
using both a list of licenses and a single license is valid. If the
license field is in the form of a list representation, then it means
that parts of the package are licensed differently. Each license
should preferably be referenced by their attribute. The non-list
attribute value can also be a space delimited string representation of
the contained attribute shortNames or spdxIds. The following are all valid
examples:
<itemizedlist>
<listitem><para>Single license referenced by attribute (preferred)
<literal>stdenv.lib.licenses.gpl3</literal>.
</para></listitem>
<listitem><para>Single license referenced by its attribute shortName (frowned upon)
<literal>"gpl3"</literal>.
</para></listitem>
<listitem><para>Single license referenced by its attribute spdxId (frowned upon)
<literal>"GPL-3.0"</literal>.
</para></listitem>
<listitem><para>Multiple licenses referenced by attribute (preferred)
<literal>with stdenv.lib.licenses; [ asl20 free ofl ]</literal>.
</para></listitem>
<listitem><para>Multiple licenses referenced as a space delimited string of attribute shortNames (frowned upon)
<literal>"asl20 free ofl"</literal>.
</para></listitem>
</itemizedlist>
For details, see <xref linkend='sec-meta-license'/>.
</para>
<para>
A short (one-line) description of the package. This is shown by
<command>nix-env -q --description</command> and also on the Nixpkgs
release pages.
</para>
<para>
Dont include a period at the end. Dont include newline characters.
Capitalise the first character. For brevity, dont repeat the name of
package — just describe what it does.
</para>
<para>
Wrong: <literal>"libpng is a library that allows you to decode PNG
images."</literal>
</para>
<para>
Right: <literal>"A library for decoding PNG images"</literal>
</para>
</listitem>
</varlistentry>
<varlistentry>
<term><varname>maintainers</varname></term>
<listitem><para>A list of names and e-mail addresses of the
maintainers of this Nix expression. If
you would like to be a maintainer of a package, you may want to add
yourself to <link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/lib/maintainers.nix"><filename>nixpkgs/lib/maintainers.nix</filename></link>
and write something like <literal>[ stdenv.lib.maintainers.alice
stdenv.lib.maintainers.bob ]</literal>.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>priority</varname></term>
<listitem><para>The <emphasis>priority</emphasis> of the package,
used by <command>nix-env</command> to resolve file name conflicts
between packages. See the Nix manual page for
<command>nix-env</command> for details. Example:
<literal>"10"</literal> (a low-priority
package).</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>platforms</varname></term>
<listitem><para>The list of Nix platform types on which the
package is supported. Hydra builds packages according to the
platform specified. If no platform is specified, the package does
not have prebuilt binaries. An example is:
</varlistentry>
<varlistentry>
<term>
<varname>longDescription</varname>
</term>
<listitem>
<para>
An arbitrarily long description of the package.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>branch</varname>
</term>
<listitem>
<para>
Release branch. Used to specify that a package is not going to receive
updates that are not in this branch; for example, Linux kernel 3.0 is
supposed to be updated to 3.0.X, not 3.1.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>homepage</varname>
</term>
<listitem>
<para>
The packages homepage. Example:
<literal>https://www.gnu.org/software/hello/manual/</literal>
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>downloadPage</varname>
</term>
<listitem>
<para>
The page where a link to the current version can be found. Example:
<literal>https://ftp.gnu.org/gnu/hello/</literal>
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>license</varname>
</term>
<listitem>
<para>
The license, or licenses, for the package. One from the attribute set
defined in
<link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/lib/licenses.nix">
<filename>nixpkgs/lib/licenses.nix</filename></link>. At this moment
using both a list of licenses and a single license is valid. If the
license field is in the form of a list representation, then it means that
parts of the package are licensed differently. Each license should
preferably be referenced by their attribute. The non-list attribute value
can also be a space delimited string representation of the contained
attribute shortNames or spdxIds. The following are all valid examples:
<itemizedlist>
<listitem>
<para>
Single license referenced by attribute (preferred)
<literal>stdenv.lib.licenses.gpl3</literal>.
</para>
</listitem>
<listitem>
<para>
Single license referenced by its attribute shortName (frowned upon)
<literal>"gpl3"</literal>.
</para>
</listitem>
<listitem>
<para>
Single license referenced by its attribute spdxId (frowned upon)
<literal>"GPL-3.0"</literal>.
</para>
</listitem>
<listitem>
<para>
Multiple licenses referenced by attribute (preferred) <literal>with
stdenv.lib.licenses; [ asl20 free ofl ]</literal>.
</para>
</listitem>
<listitem>
<para>
Multiple licenses referenced as a space delimited string of attribute
shortNames (frowned upon) <literal>"asl20 free ofl"</literal>.
</para>
</listitem>
</itemizedlist>
For details, see <xref linkend='sec-meta-license'/>.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>maintainers</varname>
</term>
<listitem>
<para>
A list of names and e-mail addresses of the maintainers of this Nix
expression. If you would like to be a maintainer of a package, you may
want to add yourself to
<link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/maintainers/maintainer-list.nix"><filename>nixpkgs/maintainers/maintainer-list.nix</filename></link>
and write something like <literal>[ stdenv.lib.maintainers.alice
stdenv.lib.maintainers.bob ]</literal>.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>priority</varname>
</term>
<listitem>
<para>
The <emphasis>priority</emphasis> of the package, used by
<command>nix-env</command> to resolve file name conflicts between
packages. See the Nix manual page for <command>nix-env</command> for
details. Example: <literal>"10"</literal> (a low-priority package).
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>platforms</varname>
</term>
<listitem>
<para>
The list of Nix platform types on which the package is supported. Hydra
builds packages according to the platform specified. If no platform is
specified, the package does not have prebuilt binaries. An example is:
<programlisting>
meta.platforms = stdenv.lib.platforms.linux;
</programlisting>
Attribute Set <varname>stdenv.lib.platforms</varname> defines
<link xlink:href="https://github.com/NixOS/nixpkgs/blob/master/lib/systems/doubles.nix">
various common lists</link> of platforms types.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>hydraPlatforms</varname></term>
<listitem><para>The list of Nix platform types for which the Hydra
instance at <literal>hydra.nixos.org</literal> will build the
package. (Hydra is the Nix-based continuous build system.) It
defaults to the value of <varname>meta.platforms</varname>. Thus,
the only reason to set <varname>meta.hydraPlatforms</varname> is
if you want <literal>hydra.nixos.org</literal> to build the
package on a subset of <varname>meta.platforms</varname>, or not
at all, e.g.
Attribute Set <varname>stdenv.lib.platforms</varname> defines
<link xlink:href="https://github.com/NixOS/nixpkgs/blob/master/lib/systems/doubles.nix">
various common lists</link> of platforms types.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>tests</varname>
</term>
<listitem>
<warning>
<para>
This attribute is special in that it is not actually under the
<literal>meta</literal> attribute set but rather under the
<literal>passthru</literal> attribute set. This is due to a current
limitation of Nix, and will change as soon as Nixpkgs will be able to
depend on a new enough version of Nix. See
<link xlink:href="https://github.com/NixOS/nix/issues/2532">the relevant
issue</link> for more details.
</para>
</warning>
<para>
An attribute set with as values tests. A test is a derivation, which
builds successfully when the test passes, and fails to build otherwise. A
derivation that is a test needs to have <literal>meta.timeout</literal>
defined.
</para>
<para>
The NixOS tests are available as <literal>nixosTests</literal> in
parameters of derivations. For instance, the OpenSMTPD derivation
includes lines similar to:
<programlisting>
{ /* ... */, nixosTests }:
{
# ...
passthru.tests = {
basic-functionality-and-dovecot-integration = nixosTests.opensmtpd;
};
}
</programlisting>
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>timeout</varname>
</term>
<listitem>
<para>
A timeout (in seconds) for building the derivation. If the derivation
takes longer than this time to build, it can fail due to breaking the
timeout. However, all computers do not have the same computing power,
hence some builders may decide to apply a multiplicative factor to this
value. When filling this value in, try to keep it approximately
consistent with other values already present in
<literal>nixpkgs</literal>.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>hydraPlatforms</varname>
</term>
<listitem>
<para>
The list of Nix platform types for which the Hydra instance at
<literal>hydra.nixos.org</literal> will build the package. (Hydra is the
Nix-based continuous build system.) It defaults to the value of
<varname>meta.platforms</varname>. Thus, the only reason to set
<varname>meta.hydraPlatforms</varname> is if you want
<literal>hydra.nixos.org</literal> to build the package on a subset of
<varname>meta.platforms</varname>, or not at all, e.g.
<programlisting>
meta.platforms = stdenv.lib.platforms.linux;
meta.hydraPlatforms = [];
</programlisting>
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>broken</varname>
</term>
<listitem>
<para>
If set to <literal>true</literal>, the package is marked as “broken”,
meaning that it wont show up in <literal>nix-env -qa</literal>, and
cannot be built or installed. Such packages should be removed from
Nixpkgs eventually unless they are fixed.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>updateWalker</varname>
</term>
<listitem>
<para>
If set to <literal>true</literal>, the package is tested to be updated
correctly by the <literal>update-walker.sh</literal> script without
additional settings. Such packages have <varname>meta.version</varname>
set and their homepage (or the page specified by
<varname>meta.downloadPage</varname>) contains a direct link to the
package tarball.
</para>
</listitem>
</varlistentry>
</variablelist>
</section>
<section xml:id="sec-meta-license">
<title>Licenses</title>
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>broken</varname></term>
<listitem><para>If set to <literal>true</literal>, the package is
marked as “broken”, meaning that it wont show up in
<literal>nix-env -qa</literal>, and cannot be built or installed.
Such packages should be removed from Nixpkgs eventually unless
they are fixed.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>updateWalker</varname></term>
<listitem><para>If set to <literal>true</literal>, the package is
tested to be updated correctly by the <literal>update-walker.sh</literal>
script without additional settings. Such packages have
<varname>meta.version</varname> set and their homepage (or
the page specified by <varname>meta.downloadPage</varname>) contains
a direct link to the package tarball.</para></listitem>
</varlistentry>
</variablelist>
</section>
<section xml:id="sec-meta-license"><title>Licenses</title>
<para>The <varname>meta.license</varname> attribute should preferrably contain
a value from <varname>stdenv.lib.licenses</varname> defined in
<link xlink:href="https://github.com/NixOS/nixpkgs/blob/master/lib/licenses.nix">
<filename>nixpkgs/lib/licenses.nix</filename></link>,
or in-place license description of the same format if the license is
unlikely to be useful in another expression.</para>
<para>Although it's typically better to indicate the specific license,
a few generic options are available:
<variablelist>
<varlistentry>
<term><varname>stdenv.lib.licenses.free</varname>,
<varname>"free"</varname></term>
<listitem><para>Catch-all for free software licenses not listed
above.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>stdenv.lib.licenses.unfreeRedistributable</varname>,
<varname>"unfree-redistributable"</varname></term>
<listitem><para>Unfree package that can be redistributed in binary
form. That is, its legal to redistribute the
<emphasis>output</emphasis> of the derivation. This means that
the package can be included in the Nixpkgs
channel.</para>
<para>Sometimes proprietary software can only be redistributed
unmodified. Make sure the builder doesnt actually modify the
original binaries; otherwise were breaking the license. For
instance, the NVIDIA X11 drivers can be redistributed unmodified,
but our builder applies <command>patchelf</command> to make them
work. Thus, its license is <varname>"unfree"</varname> and it
cannot be included in the Nixpkgs channel.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>stdenv.lib.licenses.unfree</varname>,
<varname>"unfree"</varname></term>
<listitem><para>Unfree package that cannot be redistributed. You
can build it yourself, but you cannot redistribute the output of
the derivation. Thus it cannot be included in the Nixpkgs
channel.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>stdenv.lib.licenses.unfreeRedistributableFirmware</varname>,
<varname>"unfree-redistributable-firmware"</varname></term>
<listitem><para>This package supplies unfree, redistributable
firmware. This is a separate value from
<varname>unfree-redistributable</varname> because not everybody
cares whether firmware is free.</para></listitem>
</varlistentry>
</variablelist>
</para>
</section>
<para>
The <varname>meta.license</varname> attribute should preferrably contain a
value from <varname>stdenv.lib.licenses</varname> defined in
<link xlink:href="https://github.com/NixOS/nixpkgs/blob/master/lib/licenses.nix">
<filename>nixpkgs/lib/licenses.nix</filename></link>, or in-place license
description of the same format if the license is unlikely to be useful in
another expression.
</para>
<para>
Although it's typically better to indicate the specific license, a few
generic options are available:
<variablelist>
<varlistentry>
<term>
<varname>stdenv.lib.licenses.free</varname>, <varname>"free"</varname>
</term>
<listitem>
<para>
Catch-all for free software licenses not listed above.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>stdenv.lib.licenses.unfreeRedistributable</varname>, <varname>"unfree-redistributable"</varname>
</term>
<listitem>
<para>
Unfree package that can be redistributed in binary form. That is, its
legal to redistribute the <emphasis>output</emphasis> of the derivation.
This means that the package can be included in the Nixpkgs channel.
</para>
<para>
Sometimes proprietary software can only be redistributed unmodified.
Make sure the builder doesnt actually modify the original binaries;
otherwise were breaking the license. For instance, the NVIDIA X11
drivers can be redistributed unmodified, but our builder applies
<command>patchelf</command> to make them work. Thus, its license is
<varname>"unfree"</varname> and it cannot be included in the Nixpkgs
channel.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>stdenv.lib.licenses.unfree</varname>, <varname>"unfree"</varname>
</term>
<listitem>
<para>
Unfree package that cannot be redistributed. You can build it yourself,
but you cannot redistribute the output of the derivation. Thus it cannot
be included in the Nixpkgs channel.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>stdenv.lib.licenses.unfreeRedistributableFirmware</varname>, <varname>"unfree-redistributable-firmware"</varname>
</term>
<listitem>
<para>
This package supplies unfree, redistributable firmware. This is a
separate value from <varname>unfree-redistributable</varname> because
not everybody cares whether firmware is free.
</para>
</listitem>
</varlistentry>
</variablelist>
</para>
</section>
</chapter>

View File

@ -5,99 +5,319 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="chap-multiple-output">
<title>Multiple-output packages</title>
<section xml:id="sec-multiple-outputs-introduction">
<title>Introduction</title>
<title>Multiple-output packages</title>
<para>
The Nix language allows a derivation to produce multiple outputs, which is
similar to what is utilized by other Linux distribution packaging systems.
The outputs reside in separate Nix store paths, so they can be mostly
handled independently of each other, including passing to build inputs,
garbage collection or binary substitution. The exception is that building
from source always produces all the outputs.
</para>
<section><title>Introduction</title>
<para>The Nix language allows a derivation to produce multiple outputs, which is similar to what is utilized by other Linux distribution packaging systems. The outputs reside in separate nix store paths, so they can be mostly handled independently of each other, including passing to build inputs, garbage collection or binary substitution. The exception is that building from source always produces all the outputs.</para>
<para>The main motivation is to save disk space by reducing runtime closure sizes; consequently also sizes of substituted binaries get reduced. Splitting can be used to have more granular runtime dependencies, for example the typical reduction is to split away development-only files, as those are typically not needed during runtime. As a result, closure sizes of many packages can get reduced to a half or even much less.</para>
<note><para>The reduction effects could be instead achieved by building the parts in completely separate derivations. That would often additionally reduce build-time closures, but it tends to be much harder to write such derivations, as build systems typically assume all parts are being built at once. This compromise approach of single source package producing multiple binary packages is also utilized often by rpm and deb.</para></note>
</section>
<para>
The main motivation is to save disk space by reducing runtime closure sizes;
consequently also sizes of substituted binaries get reduced. Splitting can
be used to have more granular runtime dependencies, for example the typical
reduction is to split away development-only files, as those are typically
not needed during runtime. As a result, closure sizes of many packages can
get reduced to a half or even much less.
</para>
<note>
<para>
The reduction effects could be instead achieved by building the parts in
completely separate derivations. That would often additionally reduce
build-time closures, but it tends to be much harder to write such
derivations, as build systems typically assume all parts are being built at
once. This compromise approach of single source package producing multiple
binary packages is also utilized often by rpm and deb.
</para>
</note>
</section>
<section xml:id="sec-multiple-outputs-installing">
<title>Installing a split package</title>
<para>
When installing a package via <varname>systemPackages</varname> or
<command>nix-env</command> you have several options:
</para>
<section><title>Installing a split package</title>
<para>When installing a package via <varname>systemPackages</varname> or <command>nix-env</command> you have several options:</para>
<itemizedlist>
<listitem><para>You can install particular outputs explicitly, as each is available in the Nix language as an attribute of the package. The <varname>outputs</varname> attribute contains a list of output names.</para></listitem>
<listitem><para>You can let it use the default outputs. These are handled by <varname>meta.outputsToInstall</varname> attribute that contains a list of output names.</para>
<para>TODO: more about tweaking the attribute, etc.</para></listitem>
<listitem><para>NixOS provides configuration option <varname>environment.extraOutputsToInstall</varname> that allows adding extra outputs of <varname>environment.systemPackages</varname> atop the default ones. It's mainly meant for documentation and debug symbols, and it's also modified by specific options.</para>
<note><para>At this moment there is no similar configurability for packages installed by <command>nix-env</command>. You can still use approach from <xref linkend="sec-modify-via-packageOverrides" /> to override <varname>meta.outputsToInstall</varname> attributes, but that's a rather inconvenient way.</para></note>
</listitem>
<listitem>
<para>
You can install particular outputs explicitly, as each is available in the
Nix language as an attribute of the package. The
<varname>outputs</varname> attribute contains a list of output names.
</para>
</listitem>
<listitem>
<para>
You can let it use the default outputs. These are handled by
<varname>meta.outputsToInstall</varname> attribute that contains a list of
output names.
</para>
<para>
TODO: more about tweaking the attribute, etc.
</para>
</listitem>
<listitem>
<para>
NixOS provides configuration option
<varname>environment.extraOutputsToInstall</varname> that allows adding
extra outputs of <varname>environment.systemPackages</varname> atop the
default ones. It's mainly meant for documentation and debug symbols, and
it's also modified by specific options.
</para>
<note>
<para>
At this moment there is no similar configurability for packages installed
by <command>nix-env</command>. You can still use approach from
<xref linkend="sec-modify-via-packageOverrides" /> to override
<varname>meta.outputsToInstall</varname> attributes, but that's a rather
inconvenient way.
</para>
</note>
</listitem>
</itemizedlist>
</section>
</section>
<section xml:id="sec-multiple-outputs-using-split-packages">
<title>Using a split package</title>
<section><title>Using a split package</title>
<para>In the Nix language the individual outputs can be reached explicitly as attributes, e.g. <varname>coreutils.info</varname>, but the typical case is just using packages as build inputs.</para>
<para>When a multiple-output derivation gets into a build input of another derivation, the <varname>dev</varname> output is added if it exists, otherwise the first output is added. In addition to that, <varname>propagatedBuildOutputs</varname> of that package which by default contain <varname>$outputBin</varname> and <varname>$outputLib</varname> are also added. (See <xref linkend="multiple-output-file-type-groups" />.)</para>
</section>
<para>
In the Nix language the individual outputs can be reached explicitly as
attributes, e.g. <varname>coreutils.info</varname>, but the typical case is
just using packages as build inputs.
</para>
<para>
When a multiple-output derivation gets into a build input of another
derivation, the <varname>dev</varname> output is added if it exists,
otherwise the first output is added. In addition to that,
<varname>propagatedBuildOutputs</varname> of that package which by default
contain <varname>$outputBin</varname> and <varname>$outputLib</varname> are
also added. (See <xref linkend="multiple-output-file-type-groups" />.)
</para>
</section>
<section xml:id="sec-multiple-outputs-">
<title>Writing a split derivation</title>
<section><title>Writing a split derivation</title>
<para>Here you find how to write a derivation that produces multiple outputs.</para>
<para>In nixpkgs there is a framework supporting multiple-output derivations. It tries to cover most cases by default behavior. You can find the source separated in &lt;<filename>nixpkgs/pkgs/build-support/setup-hooks/multiple-outputs.sh</filename>&gt;; it's relatively well-readable. The whole machinery is triggered by defining the <varname>outputs</varname> attribute to contain the list of desired output names (strings).</para>
<programlisting>outputs = [ "bin" "dev" "out" "doc" ];</programlisting>
<para>Often such a single line is enough. For each output an equally named environment variable is passed to the builder and contains the path in nix store for that output. By convention, the first output should contain the executable programs provided by the package as that output is used by Nix in string conversions, allowing references to binaries like <literal>${pkgs.perl}/bin/perl</literal> to always work. Typically you also want to have the main <varname>out</varname> output, as it catches any files that didn't get elsewhere.</para>
<para>
Here you find how to write a derivation that produces multiple outputs.
</para>
<note><para>There is a special handling of the <varname>debug</varname> output, described at <xref linkend="stdenv-separateDebugInfo" />.</para></note>
<para>
In nixpkgs there is a framework supporting multiple-output derivations. It
tries to cover most cases by default behavior. You can find the source
separated in
&lt;<filename>nixpkgs/pkgs/build-support/setup-hooks/multiple-outputs.sh</filename>&gt;;
it's relatively well-readable. The whole machinery is triggered by defining
the <varname>outputs</varname> attribute to contain the list of desired
output names (strings).
</para>
<programlisting>outputs = [ "bin" "dev" "out" "doc" ];</programlisting>
<para>
Often such a single line is enough. For each output an equally named
environment variable is passed to the builder and contains the path in nix
store for that output. Typically you also want to have the main
<varname>out</varname> output, as it catches any files that didn't get
elsewhere.
</para>
<note>
<para>
There is a special handling of the <varname>debug</varname> output,
described at <xref linkend="stdenv-separateDebugInfo" />.
</para>
</note>
<section xml:id="multiple-output-file-binaries-first-convention">
<title><quote>Binaries first</quote></title>
<para>
A commonly adopted convention in <literal>nixpkgs</literal> is that
executables provided by the package are contained within its first output.
This convention allows the dependent packages to reference the executables
provided by packages in a uniform manner. For instance, provided with the
knowledge that the <literal>perl</literal> package contains a
<literal>perl</literal> executable it can be referenced as
<literal>${pkgs.perl}/bin/perl</literal> within a Nix derivation that needs
to execute a Perl script.
</para>
<para>
The <literal>glibc</literal> package is a deliberate single exception to
the <quote>binaries first</quote> convention. The <literal>glibc</literal>
has <literal>libs</literal> as its first output allowing the libraries
provided by <literal>glibc</literal> to be referenced directly (e.g.
<literal>${stdenv.glibc}/lib/ld-linux-x86-64.so.2</literal>). The
executables provided by <literal>glibc</literal> can be accessed via its
<literal>bin</literal> attribute (e.g.
<literal>${stdenv.glibc.bin}/bin/ldd</literal>).
</para>
<para>
The reason for why <literal>glibc</literal> deviates from the convention is
because referencing a library provided by <literal>glibc</literal> is a
very common operation among Nix packages. For instance, third-party
executables packaged by Nix are typically patched and relinked with the
relevant version of <literal>glibc</literal> libraries from Nix packages
(please see the documentation on
<link xlink:href="https://nixos.org/patchelf.html">patchelf</link> for more
details).
</para>
</section>
<section xml:id="multiple-output-file-type-groups">
<title>File type groups</title>
<para>The support code currently recognizes some particular kinds of outputs and either instructs the build system of the package to put files into their desired outputs or it moves the files during the fixup phase. Each group of file types has an <varname>outputFoo</varname> variable specifying the output name where they should go. If that variable isn't defined by the derivation writer, it is guessed &ndash; a default output name is defined, falling back to other possibilities if the output isn't defined.</para>
<variablelist>
<title>File type groups</title>
<varlistentry><term><varname>
$outputDev</varname></term><listitem><para>
is for development-only files. These include C(++) headers, pkg-config, cmake and aclocal files. They go to <varname>dev</varname> or <varname>out</varname> by default.
</para></listitem>
</varlistentry>
<para>
The support code currently recognizes some particular kinds of outputs and
either instructs the build system of the package to put files into their
desired outputs or it moves the files during the fixup phase. Each group of
file types has an <varname>outputFoo</varname> variable specifying the
output name where they should go. If that variable isn't defined by the
derivation writer, it is guessed &ndash; a default output name is defined,
falling back to other possibilities if the output isn't defined.
</para>
<varlistentry><term><varname>
$outputBin</varname></term><listitem><para>
is meant for user-facing binaries, typically residing in bin/. They go to <varname>bin</varname> or <varname>out</varname> by default.
</para></listitem></varlistentry>
<varlistentry><term><varname>
$outputLib</varname></term><listitem><para>
is meant for libraries, typically residing in <filename>lib/</filename> and <filename>libexec/</filename>. They go to <varname>lib</varname> or <varname>out</varname> by default.
</para></listitem></varlistentry>
<varlistentry><term><varname>
$outputDoc</varname></term><listitem><para>
is for user documentation, typically residing in <filename>share/doc/</filename>. It goes to <varname>doc</varname> or <varname>out</varname> by default.
</para></listitem></varlistentry>
<varlistentry><term><varname>
$outputDevdoc</varname></term><listitem><para>
is for <emphasis>developer</emphasis> documentation. Currently we count gtk-doc and devhelp books in there. It goes to <varname>devdoc</varname> or is removed (!) by default. This is because e.g. gtk-doc tends to be rather large and completely unused by nixpkgs users.
</para></listitem></varlistentry>
<varlistentry><term><varname>
$outputMan</varname></term><listitem><para>
is for man pages (except for section 3). They go to <varname>man</varname> or <varname>$outputBin</varname> by default.
</para></listitem></varlistentry>
<varlistentry><term><varname>
$outputDevman</varname></term><listitem><para>
is for section 3 man pages. They go to <varname>devman</varname> or <varname>$outputMan</varname> by default.
</para></listitem></varlistentry>
<varlistentry><term><varname>
$outputInfo</varname></term><listitem><para>
is for info pages. They go to <varname>info</varname> or <varname>$outputBin</varname> by default.
</para></listitem></varlistentry>
</variablelist>
<variablelist>
<varlistentry>
<term>
<varname> $outputDev</varname>
</term>
<listitem>
<para>
is for development-only files. These include C(++) headers, pkg-config,
cmake and aclocal files. They go to <varname>dev</varname> or
<varname>out</varname> by default.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname> $outputBin</varname>
</term>
<listitem>
<para>
is meant for user-facing binaries, typically residing in bin/. They go
to <varname>bin</varname> or <varname>out</varname> by default.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname> $outputLib</varname>
</term>
<listitem>
<para>
is meant for libraries, typically residing in <filename>lib/</filename>
and <filename>libexec/</filename>. They go to <varname>lib</varname> or
<varname>out</varname> by default.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname> $outputDoc</varname>
</term>
<listitem>
<para>
is for user documentation, typically residing in
<filename>share/doc/</filename>. It goes to <varname>doc</varname> or
<varname>out</varname> by default.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname> $outputDevdoc</varname>
</term>
<listitem>
<para>
is for <emphasis>developer</emphasis> documentation. Currently we count
gtk-doc and devhelp books in there. It goes to <varname>devdoc</varname>
or is removed (!) by default. This is because e.g. gtk-doc tends to be
rather large and completely unused by nixpkgs users.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname> $outputMan</varname>
</term>
<listitem>
<para>
is for man pages (except for section 3). They go to
<varname>man</varname> or <varname>$outputBin</varname> by default.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname> $outputDevman</varname>
</term>
<listitem>
<para>
is for section 3 man pages. They go to <varname>devman</varname> or
<varname>$outputMan</varname> by default.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname> $outputInfo</varname>
</term>
<listitem>
<para>
is for info pages. They go to <varname>info</varname> or
<varname>$outputBin</varname> by default.
</para>
</listitem>
</varlistentry>
</variablelist>
</section>
<section><title>Common caveats</title>
<itemizedlist>
<listitem><para>Some configure scripts don't like some of the parameters passed by default by the framework, e.g. <literal>--docdir=/foo/bar</literal>. You can disable this by setting <literal>setOutputFlags = false;</literal>.</para></listitem>
<listitem><para>The outputs of a single derivation can retain references to each other, but note that circular references are not allowed. (And each strongly-connected component would act as a single output anyway.)</para></listitem>
<listitem><para>Most of split packages contain their core functionality in libraries. These libraries tend to refer to various kind of data that typically gets into <varname>out</varname>, e.g. locale strings, so there is often no advantage in separating the libraries into <varname>lib</varname>, as keeping them in <varname>out</varname> is easier.</para></listitem>
<listitem><para>Some packages have hidden assumptions on install paths, which complicates splitting.</para></listitem>
</itemizedlist>
<section xml:id="sec-multiple-outputs-caveats">
<title>Common caveats</title>
<itemizedlist>
<listitem>
<para>
Some configure scripts don't like some of the parameters passed by
default by the framework, e.g. <literal>--docdir=/foo/bar</literal>. You
can disable this by setting <literal>setOutputFlags = false;</literal>.
</para>
</listitem>
<listitem>
<para>
The outputs of a single derivation can retain references to each other,
but note that circular references are not allowed. (And each
strongly-connected component would act as a single output anyway.)
</para>
</listitem>
<listitem>
<para>
Most of split packages contain their core functionality in libraries.
These libraries tend to refer to various kind of data that typically gets
into <varname>out</varname>, e.g. locale strings, so there is often no
advantage in separating the libraries into <varname>lib</varname>, as
keeping them in <varname>out</varname> is easier.
</para>
</listitem>
<listitem>
<para>
Some packages have hidden assumptions on install paths, which complicates
splitting.
</para>
</listitem>
</itemizedlist>
</section>
</section><!--Writing a split derivation-->
</section>
<!--Writing a split derivation-->
</chapter>

View File

@ -64,7 +64,7 @@ stdenv.mkDerivation {
sha256 = "1ian3kwh2vg6hr3ymrv48s04gijs539vzrq62xr76bxbhbwnz2np";
};
inherit noSysDirs;
configureFlags = "--target=arm-linux";
configureFlags = [ "--target=arm-linux" ];
}
---
@ -78,7 +78,7 @@ Step 2: build kernel headers for the target architecture
---
{stdenv, fetchurl}:
assert stdenv.system == "i686-linux";
assert stdenv.buildPlatform.system == "i686-linux";
stdenv.mkDerivation {
name = "linux-headers-2.6.13.1-arm";

View File

@ -1,95 +1,148 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="chap-overlays">
<title>Overlays</title>
<para>This chapter describes how to extend and change Nixpkgs packages using
overlays. Overlays are used to add layers in the fix-point used by Nixpkgs
to compose the set of all packages.</para>
<para>Nixpkgs can be configured with a list of overlays, which are
applied in order. This means that the order of the overlays can be significant
if multiple layers override the same package.</para>
<title>Overlays</title>
<para>
This chapter describes how to extend and change Nixpkgs using overlays.
Overlays are used to add layers in the fixed-point used by Nixpkgs to compose
the set of all packages.
</para>
<para>
Nixpkgs can be configured with a list of overlays, which are applied in
order. This means that the order of the overlays can be significant if
multiple layers override the same package.
</para>
<!--============================================================-->
<section xml:id="sec-overlays-install">
<title>Installing overlays</title>
<section xml:id="sec-overlays-install">
<title>Installing overlays</title>
<para>
The list of overlays can be set either explicitly in a Nix expression, or
through <literal>&lt;nixpkgs-overlays></literal> or user configuration
files.
</para>
<para>The list of overlays is determined as follows.</para>
<section xml:id="sec-overlays-argument">
<title>Set overlays in NixOS or Nix expressions</title>
<para>If the <varname>overlays</varname> argument is not provided explicitly, we look for overlays in a path. The path
is determined as follows:
<para>
On a NixOS system the value of the <literal>nixpkgs.overlays</literal>
option, if present, is passed to the system Nixpkgs directly as an
argument. Note that this does not affect the overlays for non-NixOS
operations (e.g. <literal>nix-env</literal>), which are
<link xlink:href="#sec-overlays-lookup">looked</link> up independently.
</para>
<orderedlist>
<para>
The list of overlays can be passed explicitly when importing nixpkgs, for
example <literal>import &lt;nixpkgs> { overlays = [ overlay1 overlay2 ];
}</literal>.
</para>
<listitem>
<para>First, if an <varname>overlays</varname> argument to the nixpkgs function itself is given,
then that is used.</para>
<para>
Further overlays can be added by calling the <literal>pkgs.extend</literal>
or <literal>pkgs.appendOverlays</literal>, although it is often preferable
to avoid these functions, because they recompute the Nixpkgs fixpoint,
which is somewhat expensive to do.
</para>
</section>
<para>This can be passed explicitly when importing nipxkgs, for example
<literal>import &lt;nixpkgs> { overlays = [ overlay1 overlay2 ]; }</literal>.</para>
</listitem>
<section xml:id="sec-overlays-lookup">
<title>Install overlays via configuration lookup</title>
<listitem>
<para>Otherwise, if the Nix path entry <literal>&lt;nixpkgs-overlays></literal> exists, we look for overlays
at that path, as described below.</para>
<para>
The list of overlays is determined as follows.
</para>
<para>See the section on <literal>NIX_PATH</literal> in the Nix manual for more details on how to
set a value for <literal>&lt;nixpkgs-overlays>.</literal></para>
</listitem>
<para>
<orderedlist>
<listitem>
<para>
First, if an
<link xlink:href="#sec-overlays-argument"><varname>overlays</varname>
argument</link> to the Nixpkgs function itself is given, then that is
used and no path lookup will be performed.
</para>
</listitem>
<listitem>
<para>
Otherwise, if the Nix path entry
<literal>&lt;nixpkgs-overlays></literal> exists, we look for overlays at
that path, as described below.
</para>
<para>
See the section on <literal>NIX_PATH</literal> in the Nix manual for
more details on how to set a value for
<literal>&lt;nixpkgs-overlays>.</literal>
</para>
</listitem>
<listitem>
<para>
If one of <filename>~/.config/nixpkgs/overlays.nix</filename> and
<filename>~/.config/nixpkgs/overlays/</filename> exists, then we look
for overlays at that path, as described below. It is an error if both
exist.
</para>
</listitem>
</orderedlist>
</para>
<listitem>
<para>If one of <filename>~/.config/nixpkgs/overlays.nix</filename> and
<filename>~/.config/nixpkgs/overlays/</filename> exists, then we look for overlays at that path, as
described below. It is an error if both exist.</para>
</listitem>
</orderedlist>
</para>
<para>If we are looking for overlays at a path, then there are two cases:
<itemizedlist>
<listitem>
<para>If the path is a file, then the file is imported as a Nix expression and used as the list of
overlays.</para>
</listitem>
<listitem>
<para>If the path is a directory, then we take the content of the directory, order it
lexicographically, and attempt to interpret each as an overlay by:
<para>
If we are looking for overlays at a path, then there are two cases:
<itemizedlist>
<listitem>
<para>Importing the file, if it is a <literal>.nix</literal> file.</para>
</listitem>
<listitem>
<para>Importing a top-level <filename>default.nix</filename> file, if it is a directory.</para>
</listitem>
<listitem>
<para>
If the path is a file, then the file is imported as a Nix expression and
used as the list of overlays.
</para>
</listitem>
<listitem>
<para>
If the path is a directory, then we take the content of the directory,
order it lexicographically, and attempt to interpret each as an overlay
by:
<itemizedlist>
<listitem>
<para>
Importing the file, if it is a <literal>.nix</literal> file.
</para>
</listitem>
<listitem>
<para>
Importing a top-level <filename>default.nix</filename> file, if it is
a directory.
</para>
</listitem>
</itemizedlist>
</para>
</listitem>
</itemizedlist>
</para>
</listitem>
</itemizedlist>
</para>
</para>
<para>On a NixOS system the value of the <literal>nixpkgs.overlays</literal> option, if present,
is passed to the system Nixpkgs directly as an argument. Note that this does not affect the overlays for
non-NixOS operations (e.g. <literal>nix-env</literal>), which are looked up independently.</para>
<para>The <filename>overlays.nix</filename> option therefore provides a convenient way to use the same
overlays for a NixOS system configuration and user configuration: the same file can be used
as <filename>overlays.nix</filename> and imported as the value of <literal>nixpkgs.overlays</literal>.</para>
</section>
<para>
Because overlays that are set in NixOS configuration do not affect
non-NixOS operations such as <literal>nix-env</literal>, the
<filename>overlays.nix</filename> option provides a convenient way to use
the same overlays for a NixOS system configuration and user configuration:
the same file can be used as <filename>overlays.nix</filename> and imported
as the value of <literal>nixpkgs.overlays</literal>.
</para>
<!-- TODO: Example of sharing overlays between NixOS configuration
and configuration lookup. Also reference the example
from the sec-overlays-argument paragraph about NixOS.
-->
</section>
</section>
<!--============================================================-->
<section xml:id="sec-overlays-definition">
<title>Defining overlays</title>
<section xml:id="sec-overlays-definition">
<title>Defining overlays</title>
<para>Overlays are Nix functions which accept two arguments,
conventionally called <varname>self</varname> and <varname>super</varname>,
and return a set of packages. For example, the following is a valid overlay.</para>
<para>
Overlays are Nix functions which accept two arguments, conventionally called
<varname>self</varname> and <varname>super</varname>, and return a set of
packages. For example, the following is a valid overlay.
</para>
<programlisting>
self: super:
@ -104,31 +157,39 @@ self: super:
}
</programlisting>
<para>The first argument (<varname>self</varname>) corresponds to the final package
set. You should use this set for the dependencies of all packages specified in your
overlay. For example, all the dependencies of <varname>rr</varname> in the example above come
from <varname>self</varname>, as well as the overridden dependencies used in the
<varname>boost</varname> override.</para>
<para>
The first argument (<varname>self</varname>) corresponds to the final
package set. You should use this set for the dependencies of all packages
specified in your overlay. For example, all the dependencies of
<varname>rr</varname> in the example above come from
<varname>self</varname>, as well as the overridden dependencies used in the
<varname>boost</varname> override.
</para>
<para>The second argument (<varname>super</varname>)
corresponds to the result of the evaluation of the previous stages of
Nixpkgs. It does not contain any of the packages added by the current
overlay, nor any of the following overlays. This set should be used either
to refer to packages you wish to override, or to access functions defined
in Nixpkgs. For example, the original recipe of <varname>boost</varname>
in the above example, comes from <varname>super</varname>, as well as the
<varname>callPackage</varname> function.</para>
<para>
The second argument (<varname>super</varname>) corresponds to the result of
the evaluation of the previous stages of Nixpkgs. It does not contain any of
the packages added by the current overlay, nor any of the following
overlays. This set should be used either to refer to packages you wish to
override, or to access functions defined in Nixpkgs. For example, the
original recipe of <varname>boost</varname> in the above example, comes from
<varname>super</varname>, as well as the <varname>callPackage</varname>
function.
</para>
<para>The value returned by this function should be a set similar to
<filename>pkgs/top-level/all-packages.nix</filename>, containing
overridden and/or new packages.</para>
<para>Overlays are similar to other methods for customizing Nixpkgs, in particular
the <literal>packageOverrides</literal> attribute described in <xref linkend="sec-modify-via-packageOverrides"/>.
Indeed, <literal>packageOverrides</literal> acts as an overlay with only the
<varname>super</varname> argument. It is therefore appropriate for basic use,
but overlays are more powerful and easier to distribute.</para>
</section>
<para>
The value returned by this function should be a set similar to
<filename>pkgs/top-level/all-packages.nix</filename>, containing overridden
and/or new packages.
</para>
<para>
Overlays are similar to other methods for customizing Nixpkgs, in particular
the <literal>packageOverrides</literal> attribute described in
<xref linkend="sec-modify-via-packageOverrides"/>. Indeed,
<literal>packageOverrides</literal> acts as an overlay with only the
<varname>super</varname> argument. It is therefore appropriate for basic
use, but overlays are more powerful and easier to distribute.
</para>
</section>
</chapter>

9
doc/overrides.css Normal file
View File

@ -0,0 +1,9 @@
.docbook .xref img[src^=images\/callouts\/],
.screen img,
.programlisting img {
width: 1em;
}
.calloutlist img {
width: 1.5em;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,27 +1,25 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="chap-platform-nodes">
<title>Platform Notes</title>
<section xml:id="sec-darwin">
<title>Darwin (macOS)</title>
<title>Platform Notes</title>
<para>
Some common issues when packaging software for Darwin:
</para>
<section xml:id="sec-darwin">
<title>Darwin (macOS)</title>
<para>Some common issues when packaging software for darwin:</para>
<itemizedlist>
<listitem>
<itemizedlist>
<listitem>
<para>
The darwin <literal>stdenv</literal> uses clang instead of gcc.
When referring to the compiler <varname>$CC</varname> or <command>cc</command>
will work in both cases. Some builds hardcode gcc/g++ in their
build scripts, that can usually be fixed with using something
like <literal>makeFlags = [ "CC=cc" ];</literal> or by patching
the build scripts.
The Darwin <literal>stdenv</literal> uses clang instead of gcc. When
referring to the compiler <varname>$CC</varname> or <command>cc</command>
will work in both cases. Some builds hardcode gcc/g++ in their build
scripts, that can usually be fixed with using something like
<literal>makeFlags = [ "CC=cc" ];</literal> or by patching the build
scripts.
</para>
<programlisting>
<programlisting>
stdenv.mkDerivation {
name = "libfoo-1.2.3";
# ...
@ -30,36 +28,63 @@
'';
}
</programlisting>
</listitem>
<listitem>
</listitem>
<listitem>
<para>
On darwin libraries are linked using absolute paths, libraries
are resolved by their <literal>install_name</literal> at link
time. Sometimes packages won't set this correctly causing the
library lookups to fail at runtime. This can be fixed by adding
extra linker flags or by running <command>install_name_tool -id</command>
during the <function>fixupPhase</function>.
On Darwin, libraries are linked using absolute paths, libraries are
resolved by their <literal>install_name</literal> at link time. Sometimes
packages won't set this correctly causing the library lookups to fail at
runtime. This can be fixed by adding extra linker flags or by running
<command>install_name_tool -id</command> during the
<function>fixupPhase</function>.
</para>
<programlisting>
<programlisting>
stdenv.mkDerivation {
name = "libfoo-1.2.3";
# ...
makeFlags = stdenv.lib.optional stdenv.isDarwin "LDFLAGS=-Wl,-install_name,$(out)/lib/libfoo.dylib";
}
</programlisting>
</listitem>
<listitem>
</listitem>
<listitem>
<para>
Some packages assume xcode is available and use <command>xcrun</command>
to resolve build tools like <command>clang</command>, etc.
This causes errors like <code>xcode-select: error: no developer tools were found at '/Applications/Xcode.app'</code>
while the build doesn't actually depend on xcode.
Even if the libraries are linked using absolute paths and resolved via
their <literal>install_name</literal> correctly, tests can sometimes fail
to run binaries. This happens because the <varname>checkPhase</varname>
runs before the libraries are installed.
</para>
<programlisting>
<para>
This can usually be solved by running the tests after the
<varname>installPhase</varname> or alternatively by using
<varname>DYLD_LIBRARY_PATH</varname>. More information about this variable
can be found in the <citerefentry>
<refentrytitle>dyld</refentrytitle>
<manvolnum>1</manvolnum></citerefentry> manpage.
</para>
<programlisting>
dyld: Library not loaded: /nix/store/7hnmbscpayxzxrixrgxvvlifzlxdsdir-jq-1.5-lib/lib/libjq.1.dylib
Referenced from: /private/tmp/nix-build-jq-1.5.drv-0/jq-1.5/tests/../jq
Reason: image not found
./tests/jqtest: line 5: 75779 Abort trap: 6
</programlisting>
<programlisting>
stdenv.mkDerivation {
name = "libfoo-1.2.3";
# ...
doInstallCheck = true;
installCheckTarget = "check";
}
</programlisting>
</listitem>
<listitem>
<para>
Some packages assume xcode is available and use <command>xcrun</command>
to resolve build tools like <command>clang</command>, etc. This causes
errors like <code>xcode-select: error: no developer tools were found at
'/Applications/Xcode.app'</code> while the build doesn't actually depend
on xcode.
</para>
<programlisting>
stdenv.mkDerivation {
name = "libfoo-1.2.3";
# ...
@ -69,15 +94,12 @@
'';
}
</programlisting>
<para>
The package <literal>xcbuild</literal> can be used to build projects
that really depend on Xcode, however projects that build some kind of
graphical interface won't work without using Xcode in an impure way.
The package <literal>xcbuild</literal> can be used to build projects that
really depend on Xcode. However, this replacement is not 100%
compatible with Xcode and can occasionally cause issues.
</para>
</listitem>
</itemizedlist>
</section>
</listitem>
</itemizedlist>
</section>
</chapter>

View File

@ -1,223 +1,219 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="chap-quick-start">
<title>Quick Start to Adding a Package</title>
<para>To add a package to Nixpkgs:
<orderedlist>
<listitem>
<para>Checkout the Nixpkgs source tree:
<title>Quick Start to Adding a Package</title>
<para>
To add a package to Nixpkgs:
<orderedlist>
<listitem>
<para>
Checkout the Nixpkgs source tree:
<screen>
$ git clone git://github.com/NixOS/nixpkgs.git
$ git clone https://github.com/NixOS/nixpkgs
$ cd nixpkgs</screen>
</para>
</listitem>
<listitem>
<para>Find a good place in the Nixpkgs tree to add the Nix
expression for your package. For instance, a library package
typically goes into
<filename>pkgs/development/libraries/<replaceable>pkgname</replaceable></filename>,
while a web browser goes into
<filename>pkgs/applications/networking/browsers/<replaceable>pkgname</replaceable></filename>.
See <xref linkend="sec-organisation" /> for some hints on the tree
organisation. Create a directory for your package, e.g.
</listitem>
<listitem>
<para>
Find a good place in the Nixpkgs tree to add the Nix expression for your
package. For instance, a library package typically goes into
<filename>pkgs/development/libraries/<replaceable>pkgname</replaceable></filename>,
while a web browser goes into
<filename>pkgs/applications/networking/browsers/<replaceable>pkgname</replaceable></filename>.
See <xref linkend="sec-organisation" /> for some hints on the tree
organisation. Create a directory for your package, e.g.
<screen>
$ mkdir pkgs/development/libraries/libfoo</screen>
</para>
</listitem>
<listitem>
<para>In the package directory, create a Nix expression — a piece
of code that describes how to build the package. In this case, it
should be a <emphasis>function</emphasis> that is called with the
package dependencies as arguments, and returns a build of the
package in the Nix store. The expression should usually be called
<filename>default.nix</filename>.
</listitem>
<listitem>
<para>
In the package directory, create a Nix expression — a piece of code that
describes how to build the package. In this case, it should be a
<emphasis>function</emphasis> that is called with the package dependencies
as arguments, and returns a build of the package in the Nix store. The
expression should usually be called <filename>default.nix</filename>.
<screen>
$ emacs pkgs/development/libraries/libfoo/default.nix
$ git add pkgs/development/libraries/libfoo/default.nix</screen>
</para>
<para>You can have a look at the existing Nix expressions under
<filename>pkgs/</filename> to see how its done. Here are some
good ones:
<itemizedlist>
<listitem>
<para>GNU Hello: <link
<para>
You can have a look at the existing Nix expressions under
<filename>pkgs/</filename> to see how its done. Here are some good
ones:
<itemizedlist>
<listitem>
<para>
GNU Hello:
<link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/misc/hello/default.nix"><filename>pkgs/applications/misc/hello/default.nix</filename></link>.
Trivial package, which specifies some <varname>meta</varname>
attributes which is good practice.</para>
</listitem>
<listitem>
<para>GNU cpio: <link
Trivial package, which specifies some <varname>meta</varname>
attributes which is good practice.
</para>
</listitem>
<listitem>
<para>
GNU cpio:
<link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/tools/archivers/cpio/default.nix"><filename>pkgs/tools/archivers/cpio/default.nix</filename></link>.
Also a simple package. The generic builder in
<varname>stdenv</varname> does everything for you. It has
no dependencies beyond <varname>stdenv</varname>.</para>
</listitem>
<listitem>
<para>GNU Multiple Precision arithmetic library (GMP): <link
Also a simple package. The generic builder in <varname>stdenv</varname>
does everything for you. It has no dependencies beyond
<varname>stdenv</varname>.
</para>
</listitem>
<listitem>
<para>
GNU Multiple Precision arithmetic library (GMP):
<link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/libraries/gmp/5.1.x.nix"><filename>pkgs/development/libraries/gmp/5.1.x.nix</filename></link>.
Also done by the generic builder, but has a dependency on
<varname>m4</varname>.</para>
</listitem>
<listitem>
<para>Pan, a GTK-based newsreader: <link
Also done by the generic builder, but has a dependency on
<varname>m4</varname>.
</para>
</listitem>
<listitem>
<para>
Pan, a GTK-based newsreader:
<link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/networking/newsreaders/pan/default.nix"><filename>pkgs/applications/networking/newsreaders/pan/default.nix</filename></link>.
Has an optional dependency on <varname>gtkspell</varname>,
which is only built if <varname>spellCheck</varname> is
<literal>true</literal>.</para>
</listitem>
<listitem>
<para>Apache HTTPD: <link
Has an optional dependency on <varname>gtkspell</varname>, which is
only built if <varname>spellCheck</varname> is <literal>true</literal>.
</para>
</listitem>
<listitem>
<para>
Apache HTTPD:
<link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/servers/http/apache-httpd/2.4.nix"><filename>pkgs/servers/http/apache-httpd/2.4.nix</filename></link>.
A bunch of optional features, variable substitutions in the
configure flags, a post-install hook, and miscellaneous
hackery.</para>
</listitem>
<listitem>
<para>Thunderbird: <link
A bunch of optional features, variable substitutions in the configure
flags, a post-install hook, and miscellaneous hackery.
</para>
</listitem>
<listitem>
<para>
Thunderbird:
<link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/networking/mailreaders/thunderbird/default.nix"><filename>pkgs/applications/networking/mailreaders/thunderbird/default.nix</filename></link>.
Lots of dependencies.</para>
</listitem>
<listitem>
<para>JDiskReport, a Java utility: <link
Lots of dependencies.
</para>
</listitem>
<listitem>
<para>
JDiskReport, a Java utility:
<link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/tools/misc/jdiskreport/default.nix"><filename>pkgs/tools/misc/jdiskreport/default.nix</filename></link>
(and the <link
(and the
<link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/tools/misc/jdiskreport/builder.sh">builder</link>).
Nixpkgs doesnt have a decent <varname>stdenv</varname> for
Java yet so this is pretty ad-hoc.</para>
</listitem>
<listitem>
<para>XML::Simple, a Perl module: <link
Nixpkgs doesnt have a decent <varname>stdenv</varname> for Java yet
so this is pretty ad-hoc.
</para>
</listitem>
<listitem>
<para>
XML::Simple, a Perl module:
<link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/top-level/perl-packages.nix"><filename>pkgs/top-level/perl-packages.nix</filename></link>
(search for the <varname>XMLSimple</varname> attribute).
Most Perl modules are so simple to build that they are
defined directly in <filename>perl-packages.nix</filename>;
no need to make a separate file for them.</para>
</listitem>
<listitem>
<para>Adobe Reader: <link
(search for the <varname>XMLSimple</varname> attribute). Most Perl
modules are so simple to build that they are defined directly in
<filename>perl-packages.nix</filename>; no need to make a separate file
for them.
</para>
</listitem>
<listitem>
<para>
Adobe Reader:
<link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/misc/adobe-reader/default.nix"><filename>pkgs/applications/misc/adobe-reader/default.nix</filename></link>.
Shows how binary-only packages can be supported. In
particular the <link
Shows how binary-only packages can be supported. In particular the
<link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/misc/adobe-reader/builder.sh">builder</link>
uses <command>patchelf</command> to set the RUNPATH and ELF
interpreter of the executables so that the right libraries
are found at runtime.</para>
</listitem>
</itemizedlist>
uses <command>patchelf</command> to set the RUNPATH and ELF interpreter
of the executables so that the right libraries are found at runtime.
</para>
</listitem>
</itemizedlist>
</para>
<para>Some notes:
<itemizedlist>
<listitem>
<para>All <varname linkend="chap-meta">meta</varname>
attributes are optional, but its still a good idea to
provide at least the <varname>description</varname>,
<varname>homepage</varname> and <varname
linkend="sec-meta-license">license</varname>.</para>
</listitem>
<listitem>
<para>You can use <command>nix-prefetch-url</command> (or similar nix-prefetch-git, etc)
<replaceable>url</replaceable> to get the SHA-256 hash of
source distributions. There are similar commands as <command>nix-prefetch-git</command> and
<command>nix-prefetch-hg</command> available in <literal>nix-prefetch-scripts</literal> package.</para>
</listitem>
<listitem>
<para>A list of schemes for <literal>mirror://</literal>
URLs can be found in <link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/build-support/fetchurl/mirrors.nix"><filename>pkgs/build-support/fetchurl/mirrors.nix</filename></link>.</para>
</listitem>
</itemizedlist>
<para>
Some notes:
<itemizedlist>
<listitem>
<para>
All <varname linkend="chap-meta">meta</varname> attributes are
optional, but its still a good idea to provide at least the
<varname>description</varname>, <varname>homepage</varname> and
<varname
linkend="sec-meta-license">license</varname>.
</para>
</listitem>
<listitem>
<para>
You can use <command>nix-prefetch-url</command>
<replaceable>url</replaceable> to get the
SHA-256 hash of source distributions. There are similar commands as
<command>nix-prefetch-git</command> and
<command>nix-prefetch-hg</command> available in
<literal>nix-prefetch-scripts</literal> package.
</para>
</listitem>
<listitem>
<para>
A list of schemes for <literal>mirror://</literal> URLs can be found in
<link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/build-support/fetchurl/mirrors.nix"><filename>pkgs/build-support/fetchurl/mirrors.nix</filename></link>.
</para>
</listitem>
</itemizedlist>
</para>
<para>The exact syntax and semantics of the Nix expression
language, including the built-in function, are described in the
Nix manual in the <link
<para>
The exact syntax and semantics of the Nix expression language, including
the built-in function, are described in the Nix manual in the
<link
xlink:href="http://hydra.nixos.org/job/nix/trunk/tarball/latest/download-by-type/doc/manual/#chap-writing-nix-expressions">chapter
on writing Nix expressions</link>.</para>
</listitem>
<listitem>
<para>Add a call to the function defined in the previous step to
<link
on writing Nix expressions</link>.
</para>
</listitem>
<listitem>
<para>
Add a call to the function defined in the previous step to
<link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/top-level/all-packages.nix"><filename>pkgs/top-level/all-packages.nix</filename></link>
with some descriptive name for the variable,
e.g. <varname>libfoo</varname>.
<screen>
with some descriptive name for the variable, e.g.
<varname>libfoo</varname>.
<screen>
$ emacs pkgs/top-level/all-packages.nix</screen>
</para>
<para>The attributes in that file are sorted by category (like
“Development / Libraries”) that more-or-less correspond to the
directory structure of Nixpkgs, and then by attribute name.</para>
</listitem>
<listitem>
<para>To test whether the package builds, run the following command
from the root of the nixpkgs source tree:
<screen>
<para>
The attributes in that file are sorted by category (like “Development /
Libraries”) that more-or-less correspond to the directory structure of
Nixpkgs, and then by attribute name.
</para>
</listitem>
<listitem>
<para>
To test whether the package builds, run the following command from the
root of the nixpkgs source tree:
<screen>
$ nix-build -A libfoo</screen>
where <varname>libfoo</varname> should be the variable name
defined in the previous step. You may want to add the flag
<option>-K</option> to keep the temporary build directory in case
something fails. If the build succeeds, a symlink
<filename>./result</filename> to the package in the Nix store is
created.</para>
</listitem>
<listitem>
<para>If you want to install the package into your profile
(optional), do
<screen>
$ nix-env -f . -iA libfoo</screen>
where <varname>libfoo</varname> should be the variable name defined in the
previous step. You may want to add the flag <option>-K</option> to keep
the temporary build directory in case something fails. If the build
succeeds, a symlink <filename>./result</filename> to the package in the
Nix store is created.
</para>
</listitem>
<listitem>
<para>Optionally commit the new package and open a pull request, or send a patch to
<literal>https://groups.google.com/forum/#!forum/nix-devel</literal>.</para>
</listitem>
</orderedlist>
</para>
</listitem>
<listitem>
<para>
If you want to install the package into your profile (optional), do
<screen>
$ nix-env -f . -iA libfoo</screen>
</para>
</listitem>
<listitem>
<para>
Optionally commit the new package and open a pull request, or send a patch
to <literal>https://groups.google.com/forum/#!forum/nix-devel</literal>.
</para>
</listitem>
</orderedlist>
</para>
</chapter>

File diff suppressed because it is too large Load Diff

View File

@ -3,93 +3,153 @@
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-reviewing-contributions">
<title>Reviewing contributions</title>
<warning>
<para>The following section is a draft and reviewing policy is still being
discussed.</para>
</warning>
<para>The nixpkgs projects receives a fairly high number of contributions via
GitHub pull-requests. Reviewing and approving these is an important task and a
way to contribute to the project.</para>
<para>The high change rate of nixpkgs make any pull request that is open for
long enough subject to conflicts that will require extra work from the
submitter or the merger. Reviewing pull requests in a timely manner and being
responsive to the comments is the key to avoid these. GitHub provides sort
filters that can be used to see the <link
xlink:href="https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-desc">most
recently</link> and the <link
xlink:href="https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-asc">least
recently</link> updated pull-requests.</para>
<para>When reviewing a pull request, please always be nice and polite.
<title>Reviewing contributions</title>
<warning>
<para>
The following section is a draft, and the policy for reviewing is still
being discussed in issues such as
<link
xlink:href="https://github.com/NixOS/nixpkgs/issues/11166">#11166
</link> and
<link
xlink:href="https://github.com/NixOS/nixpkgs/issues/20836">#20836
</link>.
</para>
</warning>
<para>
The Nixpkgs project receives a fairly high number of contributions via GitHub
pull requests. Reviewing and approving these is an important task and a way
to contribute to the project.
</para>
<para>
The high change rate of Nixpkgs makes any pull request that remains open for
too long subject to conflicts that will require extra work from the submitter
or the merger. Reviewing pull requests in a timely manner and being responsive
to the comments is the key to avoid this issue. GitHub provides sort filters
that can be used to see the <link
xlink:href="https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-desc">most
recently</link> and the <link
xlink:href="https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-asc">least
recently</link> updated pull requests. We highly encourage looking at
<link xlink:href="https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+review%3Anone+status%3Asuccess+-label%3A%222.status%3A+work-in-progress%22+no%3Aproject+no%3Aassignee+no%3Amilestone">
this list of ready to merge, unreviewed pull requests</link>.
</para>
<para>
When reviewing a pull request, please always be nice and polite.
Controversial changes can lead to controversial opinions, but it is important
to respect every community members and their work.</para>
<para>GitHub provides reactions, they are a simple and quick way to provide
feedback to pull-requests or any comments. The thumb-down reaction should be
used with care and if possible accompanied with some explanations so the
submitter has directions to improve his contribution.</para>
<para>Pull-requests reviews should include a list of what has been reviewed in a
comment, so other reviewers and mergers can know the state of the
review.</para>
<para>All the review template samples provided in this section are generic and
to respect every community member and their work.
</para>
<para>
GitHub provides reactions as a simple and quick way to provide feedback to
pull requests or any comments. The thumb-down reaction should be used with
care and if possible accompanied with some explanation so the submitter has
directions to improve their contribution.
</para>
<para>
pull request reviews should include a list of what has been reviewed in a
comment, so other reviewers and mergers can know the state of the review.
</para>
<para>
All the review template samples provided in this section are generic and
meant as examples. Their usage is optional and the reviewer is free to adapt
them to his liking.</para>
them to their liking.
</para>
<section xml:id="reviewing-contributions-package-updates">
<title>Package updates</title>
<section><title>Package updates</title>
<para>
A package update is the most trivial and common type of pull request. These
pull requests mainly consist of updating the version part of the package
name and the source hash.
</para>
<para>A package update is the most trivial and common type of pull-request.
These pull-requests mainly consist in updating the version part of the package
name and the source hash.</para>
<para>It can happen that non trivial updates include patches or more complex
changes.</para>
<para>
It can happen that non-trivial updates include patches or more complex
changes.
</para>
<para>Reviewing process:</para>
<para>
Reviewing process:
</para>
<itemizedlist>
<listitem><para>Add labels to the pull-request. (Requires commit
rights)</para>
<itemizedlist>
<listitem>
<para>
Add labels to the pull request. (Requires commit rights)
</para>
<itemizedlist>
<listitem><para><literal>8.has: package (update)</literal> and any topic
label that fit the updated package.</para></listitem>
<listitem>
<para>
<literal>8.has: package (update)</literal> and any topic label that fit
the updated package.
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that the package versioning is fitting the
guidelines.</para></listitem>
<listitem><para>Ensure that the commit text is fitting the
guidelines.</para></listitem>
<listitem><para>Ensure that the package maintainers are notified.</para>
</listitem>
<listitem>
<para>
Ensure that the package versioning fits the guidelines.
</para>
</listitem>
<listitem>
<para>
Ensure that the commit text fits the guidelines.
</para>
</listitem>
<listitem>
<para>
Ensure that the package maintainers are notified.
</para>
<itemizedlist>
<listitem><para>mention-bot usually notify GitHub users based on the
submitted changes, but it can happen that it misses some of the
package maintainers.</para></listitem>
<listitem>
<para>
<link xlink:href="https://help.github.com/articles/about-codeowners/">CODEOWNERS</link>
will make GitHub notify users based on the submitted changes, but it can
happen that it misses some of the package maintainers.
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that the meta field contains correct
information.</para>
</listitem>
<listitem>
<para>
Ensure that the meta field information is correct.
</para>
<itemizedlist>
<listitem><para>License can change with version updates, so it should be
checked to be fitting upstream license.</para></listitem>
<listitem><para>If the package has no maintainer, a maintainer must be
set. This can be the update submitter or a community member that
accepts to take maintainership of the package.</para></listitem>
<listitem>
<para>
License can change with version updates, so it should be checked to
match the upstream license.
</para>
</listitem>
<listitem>
<para>
If the package has no maintainer, a maintainer must be set. This can be
the update submitter or a community member that accepts to take
maintainership of the package.
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that the code contains no typos.</para></listitem>
<listitem><para>Building the package locally.</para>
</listitem>
<listitem>
<para>
Ensure that the code contains no typos.
</para>
</listitem>
<listitem>
<para>
Building the package locally.
</para>
<itemizedlist>
<listitem><para>Pull-requests are often targeted to the master or staging
branch so building the pull-request locally as it is submitted can
trigger a large amount of source builds.</para>
<para>It is possible to rebase the changes on nixos-unstable or
nixpkgs-unstable for easier review by running the following commands
from a nixpkgs clone.
<listitem>
<para>
pull requests are often targeted to the master or staging branch, and
building the pull request locally when it is submitted can trigger many
source builds.
</para>
<para>
It is possible to rebase the changes on nixos-unstable or
nixpkgs-unstable for easier review by running the following commands
from a nixpkgs clone.
<screen>
$ git remote add channels https://github.com/NixOS/nixpkgs-channels.git <co
xml:id='reviewing-rebase-1' />
@ -98,43 +158,56 @@ $ git fetch origin pull/PRNUMBER/head <co xml:id='reviewing-rebase-3' />
$ git rebase --onto nixos-unstable BASEBRANCH FETCH_HEAD <co
xml:id='reviewing-rebase-4' />
</screen>
<calloutlist>
<callout arearefs='reviewing-rebase-1'>
<para>This should be done only once to be able to fetch channel
branches from the nixpkgs-channels repository.</para>
</callout>
<callout arearefs='reviewing-rebase-2'>
<para>Fetching the nixos-unstable branch.</para>
</callout>
<callout arearefs='reviewing-rebase-3'>
<para>Fetching the pull-request changes, <varname>PRNUMBER</varname>
is the number at the end of the pull-request title and
<varname>BASEBRANCH</varname> the base branch of the
pull-request.</para>
</callout>
<callout arearefs='reviewing-rebase-3'>
<para>Rebasing the pull-request changes to the nixos-unstable
branch.</para>
</callout>
</calloutlist>
</para>
</listitem>
<listitem>
<para>The <link xlink:href="https://github.com/madjar/nox">nox</link>
tool can be used to review a pull-request content in a single command.
It doesn't rebase on a channel branch so it might trigger multiple
source builds. <varname>PRNUMBER</varname> should be replaced by the
number at the end of the pull-request title.</para>
<calloutlist>
<callout arearefs='reviewing-rebase-1'>
<para>
This should be done only once to be able to fetch channel branches
from the nixpkgs-channels repository.
</para>
</callout>
<callout arearefs='reviewing-rebase-2'>
<para>
Fetching the nixos-unstable branch.
</para>
</callout>
<callout arearefs='reviewing-rebase-3'>
<para>
Fetching the pull request changes, <varname>PRNUMBER</varname> is the
number at the end of the pull request title and
<varname>BASEBRANCH</varname> the base branch of the pull request.
</para>
</callout>
<callout arearefs='reviewing-rebase-4'>
<para>
Rebasing the pull request changes to the nixos-unstable branch.
</para>
</callout>
</calloutlist>
</para>
</listitem>
<listitem>
<para>
The <link xlink:href="https://github.com/madjar/nox">nox</link> tool can
be used to review a pull request content in a single command. It doesn't
rebase on a channel branch so it might trigger multiple source builds.
<varname>PRNUMBER</varname> should be replaced by the number at the end
of the pull request title.
</para>
<screen>
$ nix-shell -p nox --run "nox-review -k pr PRNUMBER"
</screen>
</listitem>
</listitem>
</itemizedlist>
</listitem>
<listitem><para>Running every binary.</para></listitem>
</itemizedlist>
</listitem>
<listitem>
<para>
Running every binary.
</para>
</listitem>
</itemizedlist>
<example><title>Sample template for a package update review</title>
<example xml:id="reviewing-contributions-sample-package-update">
<title>Sample template for a package update review</title>
<screen>
##### Reviewed points
@ -148,55 +221,105 @@ $ nix-shell -p nox --run "nox-review -k pr PRNUMBER"
##### Comments
</screen></example>
</section>
</screen>
</example>
</section>
<section xml:id="reviewing-contributions-new-packages">
<title>New packages</title>
<section><title>New packages</title>
<para>
New packages are a common type of pull requests. These pull requests
consists in adding a new nix-expression for a package.
</para>
<para>New packages are a common type of pull-requests. These pull requests
consists in adding a new nix-expression for a package.</para>
<para>
Reviewing process:
</para>
<para>Reviewing process:</para>
<itemizedlist>
<listitem><para>Add labels to the pull-request. (Requires commit
rights)</para>
<itemizedlist>
<listitem>
<para>
Add labels to the pull request. (Requires commit rights)
</para>
<itemizedlist>
<listitem><para><literal>8.has: package (new)</literal> and any topic
label that fit the new package.</para></listitem>
<listitem>
<para>
<literal>8.has: package (new)</literal> and any topic label that fit the
new package.
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that the package versioning is fitting the
guidelines.</para></listitem>
<listitem><para>Ensure that the commit name is fitting the
guidelines.</para></listitem>
<listitem><para>Ensure that the meta field contains correct
information.</para>
</listitem>
<listitem>
<para>
Ensure that the package versioning is fitting the guidelines.
</para>
</listitem>
<listitem>
<para>
Ensure that the commit name is fitting the guidelines.
</para>
</listitem>
<listitem>
<para>
Ensure that the meta field contains correct information.
</para>
<itemizedlist>
<listitem><para>License must be checked to be fitting upstream
license.</para></listitem>
<listitem><para>Platforms should be set or the package will not get binary
substitutes.</para></listitem>
<listitem><para>A maintainer must be set, this can be the package
submitter or a community member that accepts to take maintainership of
the package.</para></listitem>
<listitem>
<para>
License must be checked to be fitting upstream license.
</para>
</listitem>
<listitem>
<para>
Platforms should be set or the package will not get binary substitutes.
</para>
</listitem>
<listitem>
<para>
A maintainer must be set. This can be the package submitter or a
community member that accepts to take maintainership of the package.
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that the code contains no typos.</para></listitem>
<listitem><para>Ensure the package source.</para>
</listitem>
<listitem>
<para>
Ensure that the code contains no typos.
</para>
</listitem>
<listitem>
<para>
Ensure the package source.
</para>
<itemizedlist>
<listitem><para>Mirrors urls should be used when
available.</para></listitem>
<listitem><para>The most appropriate function should be used (e.g.
packages from GitHub should use
<literal>fetchFromGitHub</literal>).</para></listitem>
<listitem>
<para>
Mirrors urls should be used when available.
</para>
</listitem>
<listitem>
<para>
The most appropriate function should be used (e.g. packages from GitHub
should use <literal>fetchFromGitHub</literal>).
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem><para>Building the package locally.</para></listitem>
<listitem><para>Running every binary.</para></listitem>
</itemizedlist>
</listitem>
<listitem>
<para>
Building the package locally.
</para>
</listitem>
<listitem>
<para>
Running every binary.
</para>
</listitem>
</itemizedlist>
<example><title>Sample template for a new package review</title>
<example xml:id="reviewing-contributions-sample-new-package">
<title>Sample template for a new package review</title>
<screen>
##### Reviewed points
@ -218,58 +341,108 @@ $ nix-shell -p nox --run "nox-review -k pr PRNUMBER"
##### Comments
</screen></example>
</section>
</screen>
</example>
</section>
<section xml:id="reviewing-contributions-module-updates">
<title>Module updates</title>
<section><title>Module updates</title>
<para>
Module updates are submissions changing modules in some ways. These often
contains changes to the options or introduce new options.
</para>
<para>Module updates are submissions changing modules in some ways. These often
contains changes to the options or introduce new options.</para>
<para>
Reviewing process
</para>
<para>Reviewing process</para>
<itemizedlist>
<listitem><para>Add labels to the pull-request. (Requires commit
rights)</para>
<itemizedlist>
<listitem>
<para>
Add labels to the pull request. (Requires commit rights)
</para>
<itemizedlist>
<listitem><para><literal>8.has: module (update)</literal> and any topic
label that fit the module.</para></listitem>
<listitem>
<para>
<literal>8.has: module (update)</literal> and any topic label that fit
the module.
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that the module maintainers are notified.</para>
</listitem>
<listitem>
<para>
Ensure that the module maintainers are notified.
</para>
<itemizedlist>
<listitem><para>Mention-bot notify GitHub users based on the submitted
changes, but it can happen that it miss some of the package
maintainers.</para></listitem>
<listitem>
<para>
<link xlink:href="https://help.github.com/articles/about-codeowners/">CODEOWNERS</link>
will make GitHub notify users based on the submitted changes, but it can
happen that it misses some of the package maintainers.
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that the module tests, if any, are
succeeding.</para></listitem>
<listitem><para>Ensure that the introduced options are correct.</para>
</listitem>
<listitem>
<para>
Ensure that the module tests, if any, are succeeding.
</para>
</listitem>
<listitem>
<para>
Ensure that the introduced options are correct.
</para>
<itemizedlist>
<listitem><para>Type should be appropriate (string related types differs
in their merging capabilities, <literal>optionSet</literal> and
<literal>string</literal> types are deprecated).</para></listitem>
<listitem><para>Description, default and example should be
provided.</para></listitem>
<listitem>
<para>
Type should be appropriate (string related types differs in their
merging capabilities, <literal>optionSet</literal> and
<literal>string</literal> types are deprecated).
</para>
</listitem>
<listitem>
<para>
Description, default and example should be provided.
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that option changes are backward compatible.</para>
</listitem>
<listitem>
<para>
Ensure that option changes are backward compatible.
</para>
<itemizedlist>
<listitem><para><literal>mkRenamedOptionModule</literal> and
<literal>mkAliasOptionModule</literal> functions provide way to make
option changes backward compatible.</para></listitem>
<listitem>
<para>
<literal>mkRenamedOptionModule</literal> and
<literal>mkAliasOptionModule</literal> functions provide way to make
option changes backward compatible.
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that removed options are declared with
<literal>mkRemovedOptionModule</literal></para></listitem>
<listitem><para>Ensure that changes that are not backward compatible are
mentioned in release notes.</para></listitem>
<listitem><para>Ensure that documentations affected by the change is
updated.</para></listitem>
</itemizedlist>
</listitem>
<listitem>
<para>
Ensure that removed options are declared with
<literal>mkRemovedOptionModule</literal>
</para>
</listitem>
<listitem>
<para>
Ensure that changes that are not backward compatible are mentioned in
release notes.
</para>
</listitem>
<listitem>
<para>
Ensure that documentations affected by the change is updated.
</para>
</listitem>
</itemizedlist>
<example><title>Sample template for a module update review</title>
<example xml:id="reviewing-contributions-sample-module-update">
<title>Sample template for a module update review</title>
<screen>
##### Reviewed points
@ -286,51 +459,89 @@ $ nix-shell -p nox --run "nox-review -k pr PRNUMBER"
##### Comments
</screen></example>
</section>
</screen>
</example>
</section>
<section xml:id="reviewing-contributions-new-modules">
<title>New modules</title>
<section><title>New modules</title>
<para>
New modules submissions introduce a new module to NixOS.
</para>
<para>New modules submissions introduce a new module to NixOS.</para>
<itemizedlist>
<listitem>
<para>
Add labels to the pull request. (Requires commit rights)
</para>
<itemizedlist>
<listitem>
<para>
<literal>8.has: module (new)</literal> and any topic label that fit the
module.
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem>
<para>
Ensure that the module tests, if any, are succeeding.
</para>
</listitem>
<listitem>
<para>
Ensure that the introduced options are correct.
</para>
<itemizedlist>
<listitem>
<para>
Type should be appropriate (string related types differs in their
merging capabilities, <literal>optionSet</literal> and
<literal>string</literal> types are deprecated).
</para>
</listitem>
<listitem>
<para>
Description, default and example should be provided.
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem>
<para>
Ensure that module <literal>meta</literal> field is present
</para>
<itemizedlist>
<listitem>
<para>
Maintainers should be declared in <literal>meta.maintainers</literal>.
</para>
</listitem>
<listitem>
<para>
Module documentation should be declared with
<literal>meta.doc</literal>.
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem>
<para>
Ensure that the module respect other modules functionality.
</para>
<itemizedlist>
<listitem>
<para>
For example, enabling a module should not open firewall ports by
default.
</para>
</listitem>
</itemizedlist>
</listitem>
</itemizedlist>
<itemizedlist>
<listitem><para>Add labels to the pull-request. (Requires commit
rights)</para>
<itemizedlist>
<listitem><para><literal>8.has: module (new)</literal> and any topic label
that fit the module.</para></listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that the module tests, if any, are
succeeding.</para></listitem>
<listitem><para>Ensure that the introduced options are correct.</para>
<itemizedlist>
<listitem><para>Type should be appropriate (string related types differs
in their merging capabilities, <literal>optionSet</literal> and
<literal>string</literal> types are deprecated).</para></listitem>
<listitem><para>Description, default and example should be
provided.</para></listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that module <literal>meta</literal> field is
present</para>
<itemizedlist>
<listitem><para>Maintainers should be declared in
<literal>meta.maintainers</literal>.</para></listitem>
<listitem><para>Module documentation should be declared with
<literal>meta.doc</literal>.</para></listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that the module respect other modules
functionality.</para>
<itemizedlist>
<listitem><para>For example, enabling a module should not open firewall
ports by default.</para></listitem>
</itemizedlist>
</listitem>
</itemizedlist>
<example><title>Sample template for a new module review</title>
<example xml:id="reviewing-contributions-sample-new-module">
<title>Sample template for a new module review</title>
<screen>
##### Reviewed points
@ -348,32 +559,41 @@ $ nix-shell -p nox --run "nox-review -k pr PRNUMBER"
##### Comments
</screen></example>
</section>
</screen>
</example>
</section>
<section xml:id="reviewing-contributions-other-submissions">
<title>Other submissions</title>
<section><title>Other submissions</title>
<para>
Other type of submissions requires different reviewing steps.
</para>
<para>Other type of submissions requires different reviewing steps.</para>
<para>
If you consider having enough knowledge and experience in a topic and would
like to be a long-term reviewer for related submissions, please contact the
current reviewers for that topic. They will give you information about the
reviewing process. The main reviewers for a topic can be hard to find as
there is no list, but checking past pull requests to see who reviewed or
git-blaming the code to see who committed to that topic can give some hints.
</para>
<para>If you consider having enough knowledge and experience in a topic and
would like to be a long-term reviewer for related submissions, please contact
the current reviewers for that topic. They will give you information about the
reviewing process.
The main reviewers for a topic can be hard to find as there is no list, but
checking past pull-requests to see who reviewed or git-blaming the code to see
who committed to that topic can give some hints.</para>
<para>
Container system, boot system and library changes are some examples of the
pull requests fitting this category.
</para>
</section>
<section xml:id="reviewing-contributions--merging-pull-requests">
<title>Merging pull requests</title>
<para>Container system, boot system and library changes are some examples of the
pull requests fitting this category.</para>
<para>
It is possible for community members that have enough knowledge and
experience on a special topic to contribute by merging pull requests.
</para>
</section>
<section><title>Merging pull-requests</title>
<para>It is possible for community members that have enough knowledge and
experience on a special topic to contribute by merging pull requests.</para>
<para>TODO: add the procedure to request merging rights.</para>
<para>
TODO: add the procedure to request merging rights.
</para>
<!--
The following paragraph about how to deal with unactive contributors is just a
@ -384,10 +604,13 @@ policy.
three months will have their commit rights revoked.</para>
-->
<para>In a case a contributor leaves definitively the Nix community, he should
create an issue or notify the mailing list with references of packages and
modules he maintains so the maintainership can be taken over by other
contributors.</para>
</section>
<para>
In a case a contributor definitively leaves the Nix community, they should
create an issue or post on
<link
xlink:href="https://discourse.nixos.org">Discourse</link> with
references of packages and modules they maintain so the maintainership can be
taken over by other contributors.
</para>
</section>
</chapter>

View File

@ -1,20 +0,0 @@
---
title: pkgs.mkShell
author: zimbatm
date: 2017-10-30
---
pkgs.mkShell is a special kind of derivation that is only useful when using
it combined with nix-shell. It will in fact fail to instantiate when invoked
with nix-build.
## Usage
```nix
{ pkgs ? import <nixpkgs> {} }:
pkgs.mkShell {
# this will make all the build inputs from hello and gnutar available to the shell environment
inputsFrom = with pkgs; [ hello gnutar ];
buildInputs = [ pkgs.gnumake ];
}
```

5
doc/shell.nix Normal file
View File

@ -0,0 +1,5 @@
{ pkgs ? import ../. {} }:
(import ./default.nix {}).overrideAttrs (x: {
buildInputs = x.buildInputs ++ [ pkgs.xmloscopy pkgs.ruby ];
})

File diff suppressed because it is too large Load Diff

View File

@ -9,6 +9,7 @@
body
{
font-family: "Nimbus Sans L", sans-serif;
font-size: 1em;
background: white;
margin: 2em 1em 2em 1em;
}
@ -28,9 +29,28 @@ h2 /* chapters, appendices, subtitle */
font-size: 180%;
}
div.book
{
text-align: center;
}
div.book > div
{
/*
* based on https://medium.com/@zkareemz/golden-ratio-62b3b6d4282a
* we do 70 characters per line to fit code listings better
* 70 * (font-size / 1.618)
* expression for emacs:
* (* 70 (/ 1 1.618))
*/
max-width: 43.2em;
text-align: left;
margin: auto;
}
/* Extra space between chapters, appendices. */
div.chapter > div.titlepage h2, div.appendix > div.titlepage h2
{
div.chapter > div.titlepage h2, div.appendix > div.titlepage h2
{
margin-top: 1.5em;
}
@ -102,9 +122,9 @@ pre.screen, pre.programlisting
{
border: 1px solid #b0b0b0;
padding: 3px 3px;
margin-left: 1.5em;
margin-right: 1.5em;
color: #600000;
margin-left: 0.5em;
margin-right: 0.5em;
background: #f4f4f8;
font-family: monospace;
border-radius: 0.4em;
@ -118,7 +138,6 @@ div.example pre.programlisting
margin: 0 0 0 0;
}
/***************************************************************************
Notes, warnings etc:
***************************************************************************/
@ -172,7 +191,7 @@ div.navfooter *
/***************************************************************************
Links colors and highlighting:
Links colors and highlighting:
***************************************************************************/
a { text-decoration: none; }
@ -209,7 +228,7 @@ tt, code
.term
{
font-weight: bold;
}
div.variablelist dd p, div.glosslist dd p
@ -249,7 +268,24 @@ table
box-shadow: 0.4em 0.4em 0.5em #e0e0e0;
}
table.simplelist
{
text-align: left;
color: #005aa0;
border: 0;
padding: 5px;
background: #fffff5;
font-weight: normal;
font-style: italic;
box-shadow: none;
margin-bottom: 1em;
}
div.navheader table, div.navfooter table {
box-shadow: none;
}
div.affiliation
{
font-style: italic;
}
}

View File

@ -1,447 +1,513 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="chap-submitting-changes">
<title>Submitting changes</title>
<section xml:id="submitting-changes-making-patches">
<title>Making patches</title>
<title>Submitting changes</title>
<section>
<title>Making patches</title>
<itemizedlist>
<listitem>
<para>Read <link xlink:href="https://nixos.org/nixpkgs/manual/">Manual (How to write packages for Nix)</link>.</para>
</listitem>
<listitem>
<para>Fork the repository on GitHub.</para>
</listitem>
<listitem>
<para>Create a branch for your future fix.
<itemizedlist>
<listitem>
<para>You can make branch from a commit of your local <command>nixos-version</command>. That will help you to avoid additional local compilations. Because you will receive packages from binary cache.
<itemizedlist>
<listitem>
<para>For example: <command>nixos-version</command> returns <command>15.05.git.0998212 (Dingo)</command>. So you can do:</para>
</listitem>
</itemizedlist>
<itemizedlist>
<listitem>
<para>
Read <link xlink:href="https://nixos.org/nixpkgs/manual/">Manual (How to
write packages for Nix)</link>.
</para>
</listitem>
<listitem>
<para>
Fork the repository on GitHub.
</para>
</listitem>
<listitem>
<para>
Create a branch for your future fix.
<itemizedlist>
<listitem>
<para>
You can make branch from a commit of your local
<command>nixos-version</command>. That will help you to avoid
additional local compilations. Because you will receive packages from
binary cache.
<itemizedlist>
<listitem>
<para>
For example: <command>nixos-version</command> returns
<command>15.05.git.0998212 (Dingo)</command>. So you can do:
</para>
</listitem>
</itemizedlist>
<screen>
$ git checkout 0998212
$ git checkout -b 'fix/pkg-name-update'
</screen>
</para>
</listitem>
<listitem>
<para>Please avoid working directly on the <command>master</command> branch.</para>
</listitem>
</itemizedlist>
</para>
</listitem>
<listitem>
<para>Make commits of logical units.
<itemizedlist>
<listitem>
<para>If you removed pkgs, made some major NixOS changes etc., write about them in <command>nixos/doc/manual/release-notes/rl-unstable.xml</command>.</para>
</listitem>
</itemizedlist>
</para>
</listitem>
<listitem>
<para>Check for unnecessary whitespace with <command>git diff --check</command> before committing.</para>
</listitem>
<listitem>
<para>Format the commit in a following way:</para>
</para>
</listitem>
<listitem>
<para>
Please avoid working directly on the <command>master</command> branch.
</para>
</listitem>
</itemizedlist>
</para>
</listitem>
<listitem>
<para>
Make commits of logical units.
<itemizedlist>
<listitem>
<para>
If you removed pkgs, made some major NixOS changes etc., write about
them in
<command>nixos/doc/manual/release-notes/rl-unstable.xml</command>.
</para>
</listitem>
</itemizedlist>
</para>
</listitem>
<listitem>
<para>
Check for unnecessary whitespace with <command>git diff --check</command>
before committing.
</para>
</listitem>
<listitem>
<para>
Format the commit in a following way:
</para>
<programlisting>
(pkg-name | nixos/&lt;module>): (from -> to | init at version | refactor | etc)
Additional information.
</programlisting>
<itemizedlist>
<listitem>
<para>
Examples:
<itemizedlist>
<listitem>
<para>
<command>nginx: init at 2.0.1</command>
</para>
</listitem>
<listitem>
<para>
<command>firefox: 54.0.1 -> 55.0</command>
</para>
</listitem>
<listitem>
<para>
<command>nixos/hydra: add bazBaz option</command>
</para>
</listitem>
<listitem>
<para>
<command>nixos/nginx: refactor config generation</command>
</para>
</listitem>
</itemizedlist>
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem>
<para>
Test your changes. If you work with
<itemizedlist>
<listitem>
<para>
nixpkgs:
<itemizedlist>
<listitem>
<para>
update pkg ->
<itemizedlist>
<listitem>
<para>
<command>nix-env -i pkg-name -f &lt;path to your local nixpkgs
folder&gt;</command>
</para>
</listitem>
</itemizedlist>
</para>
</listitem>
<listitem>
<para>
add pkg ->
<itemizedlist>
<listitem>
<para>
Make sure it's in
<command>pkgs/top-level/all-packages.nix</command>
</para>
</listitem>
<listitem>
<para>
<command>nix-env -i pkg-name -f &lt;path to your local nixpkgs
folder&gt;</command>
</para>
</listitem>
</itemizedlist>
</para>
</listitem>
<listitem>
<para>
<emphasis>If you don't want to install pkg in you
profile</emphasis>.
<itemizedlist>
<listitem>
<para>
<command>nix-build -A pkg-attribute-name &lt;path to your local
nixpkgs folder&gt;/default.nix</command> and check results in the
folder <command>result</command>. It will appear in the same
directory where you did <command>nix-build</command>.
</para>
</listitem>
</itemizedlist>
</para>
</listitem>
<listitem>
<para>
If you did <command>nix-env -i pkg-name</command> you can do
<command>nix-env -e pkg-name</command> to uninstall it from your
system.
</para>
</listitem>
</itemizedlist>
</para>
</listitem>
<listitem>
<para>
NixOS and its modules:
<itemizedlist>
<listitem>
<para>
You can add new module to your NixOS configuration file (usually
it's <command>/etc/nixos/configuration.nix</command>). And do
<command>sudo nixos-rebuild test -I nixpkgs=&lt;path to your local
nixpkgs folder&gt; --fast</command>.
</para>
</listitem>
</itemizedlist>
</para>
</listitem>
</itemizedlist>
</para>
</listitem>
<listitem>
<para>
If you have commits <command>pkg-name: oh, forgot to insert
whitespace</command>: squash commits in this case. Use <command>git rebase
-i</command>.
</para>
</listitem>
<listitem>
<para>
Rebase you branch against current <command>master</command>.
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="submitting-changes-submitting-changes">
<title>Submitting changes</title>
<itemizedlist>
<listitem>
<para>Examples:
<itemizedlist>
<listitem>
<para>
<command>nginx: init at 2.0.1</command>
</para>
</listitem>
<listitem>
<para>
<command>firefox: 54.0.1 -> 55.0</command>
</para>
</listitem>
<listitem>
<para>
<command>nixos/hydra: add bazBaz option</command>
</para>
</listitem>
<listitem>
<para>
<command>nixos/nginx: refactor config generation</command>
</para>
</listitem>
</itemizedlist>
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem>
<para>Test your changes. If you work with
<itemizedlist>
<listitem>
<para>nixpkgs:
<itemizedlist>
<listitem>
<para>update pkg ->
<itemizedlist>
<listitem>
<para>
<command>nix-env -i pkg-name -f &lt;path to your local nixpkgs folder&gt;</command>
</para>
</listitem>
</itemizedlist>
</para>
</listitem>
<listitem>
<para>add pkg ->
<itemizedlist>
<listitem>
<para>Make sure it's in <command>pkgs/top-level/all-packages.nix</command>
</para>
</listitem>
<listitem>
<para>
<command>nix-env -i pkg-name -f &lt;path to your local nixpkgs folder&gt;</command>
</para>
</listitem>
</itemizedlist>
</para>
</listitem>
<listitem>
<para>
<emphasis>If you don't want to install pkg in you profile</emphasis>.
<itemizedlist>
<listitem>
<para>
<command>nix-build -A pkg-attribute-name &lt;path to your local nixpkgs folder&gt;/default.nix</command> and check results in the folder <command>result</command>. It will appear in the same directory where you did <command>nix-build</command>.</para>
</listitem>
</itemizedlist>
</para>
</listitem>
<listitem>
<para>If you did <command>nix-env -i pkg-name</command> you can do <command>nix-env -e pkg-name</command> to uninstall it from your system.</para>
</listitem>
</itemizedlist>
</para>
</listitem>
<listitem>
<para>NixOS and its modules:
<itemizedlist>
<listitem>
<para>You can add new module to your NixOS configuration file (usually it's <command>/etc/nixos/configuration.nix</command>).
And do <command>sudo nixos-rebuild test -I nixpkgs=&lt;path to your local nixpkgs folder&gt; --fast</command>.</para>
</listitem>
</itemizedlist>
</para>
</listitem>
</itemizedlist>
</para>
</listitem>
<listitem>
<para>If you have commits <command>pkg-name: oh, forgot to insert whitespace</command>: squash commits in this case. Use <command>git rebase -i</command>.</para>
</listitem>
<listitem>
<para>Rebase you branch against current <command>master</command>.</para>
</listitem>
</itemizedlist>
</section>
<section>
<title>Submitting changes</title>
<itemizedlist>
<listitem>
<para>Push your changes to your fork of nixpkgs.</para>
</listitem>
<listitem>
<para>Create pull request:
<itemizedlist>
<listitem>
<para>Write the title in format <command>(pkg-name | nixos/&lt;module>): improvement</command>.
<itemizedlist>
<listitem>
<para>If you update the pkg, write versions <command>from -> to</command>.</para>
</listitem>
</itemizedlist>
</para>
</listitem>
<listitem>
<para>Write in comment if you have tested your patch. Do not rely much on <command>TravisCI</command>.</para>
</listitem>
<listitem>
<para>If you make an improvement, write about your motivation.</para>
</listitem>
<listitem>
<para>Notify maintainers of the package. For example add to the message: <command>cc @jagajaga @domenkozar</command>.</para>
</listitem>
</itemizedlist>
</para>
</listitem>
</itemizedlist>
</section>
<section>
<itemizedlist>
<listitem>
<para>
Push your changes to your fork of nixpkgs.
</para>
</listitem>
<listitem>
<para>
Create pull request:
<itemizedlist>
<listitem>
<para>
Write the title in format <command>(pkg-name | nixos/&lt;module>):
improvement</command>.
<itemizedlist>
<listitem>
<para>
If you update the pkg, write versions <command>from -> to</command>.
</para>
</listitem>
</itemizedlist>
</para>
</listitem>
<listitem>
<para>
Write in comment if you have tested your patch. Do not rely much on
<command>TravisCI</command>.
</para>
</listitem>
<listitem>
<para>
If you make an improvement, write about your motivation.
</para>
</listitem>
<listitem>
<para>
Notify maintainers of the package. For example add to the message:
<command>cc @jagajaga @domenkozar</command>.
</para>
</listitem>
</itemizedlist>
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="submitting-changes-pull-request-template">
<title>Pull Request Template</title>
<para>
The pull request template helps determine what steps have been made for a
contribution so far, and will help guide maintainers on the status of a
change. The motivation section of the PR should include any extra details
the title does not address and link any existing issues related to the pull
request.
The pull request template helps determine what steps have been made for a
contribution so far, and will help guide maintainers on the status of a
change. The motivation section of the PR should include any extra details
the title does not address and link any existing issues related to the pull
request.
</para>
<para>When a PR is created, it will be pre-populated with some checkboxes detailed below:
<para>
When a PR is created, it will be pre-populated with some checkboxes detailed
below:
</para>
<section>
<title>Tested using sandboxing</title>
<para>
When sandbox builds are enabled, Nix will setup an isolated environment
for each build process. It is used to remove further hidden dependencies
set by the build environment to improve reproducibility. This includes
access to the network during the build outside of
<function>fetch*</function> functions and files outside the Nix store.
Depending on the operating system access to other resources are blocked
as well (ex. inter process communication is isolated on Linux); see <link
<section xml:id="submitting-changes-tested-with-sandbox">
<title>Tested using sandboxing</title>
<para>
When sandbox builds are enabled, Nix will setup an isolated environment for
each build process. It is used to remove further hidden dependencies set by
the build environment to improve reproducibility. This includes access to
the network during the build outside of <function>fetch*</function>
functions and files outside the Nix store. Depending on the operating
system access to other resources are blocked as well (ex. inter process
communication is isolated on Linux); see
<link
xlink:href="https://nixos.org/nix/manual/#description-45">build-use-sandbox</link>
in Nix manual for details.
</para>
<para>
Sandboxing is not enabled by default in Nix due to a small performance
hit on each build. In pull requests for <link
xlink:href="https://github.com/NixOS/nixpkgs/">nixpkgs</link> people
are asked to test builds with sandboxing enabled (see <literal>Tested
using sandboxing</literal> in the pull request template) because
in<link
in Nix manual for details.
</para>
<para>
Sandboxing is not enabled by default in Nix due to a small performance hit
on each build. In pull requests for
<link
xlink:href="https://github.com/NixOS/nixpkgs/">nixpkgs</link>
people are asked to test builds with sandboxing enabled (see
<literal>Tested using sandboxing</literal> in the pull request template)
because
in<link
xlink:href="https://nixos.org/hydra/">https://nixos.org/hydra/</link>
sandboxing is also used.
</para>
<para>
Depending if you use NixOS or other platforms you can use one of the
following methods to enable sandboxing <emphasis role="bold">before</emphasis> building the package:
<itemizedlist>
<listitem>
<para>
<emphasis role="bold">Globally enable sandboxing on NixOS</emphasis>:
add the following to
<filename>configuration.nix</filename>
<screen>nix.useSandbox = true;</screen>
</para>
</listitem>
<listitem>
<para>
<emphasis role="bold">Globally enable sandboxing on non-NixOS platforms</emphasis>:
add the following to: <filename>/etc/nix/nix.conf</filename>
<screen>build-use-sandbox = true</screen>
</para>
</listitem>
</itemizedlist>
</para>
sandboxing is also used.
</para>
<para>
Depending if you use NixOS or other platforms you can use one of the
following methods to enable sandboxing
<emphasis role="bold">before</emphasis> building the package:
<itemizedlist>
<listitem>
<para>
<emphasis role="bold">Globally enable sandboxing on NixOS</emphasis>:
add the following to <filename>configuration.nix</filename>
<screen>nix.useSandbox = true;</screen>
</para>
</listitem>
<listitem>
<para>
<emphasis role="bold">Globally enable sandboxing on non-NixOS
platforms</emphasis>: add the following to:
<filename>/etc/nix/nix.conf</filename>
<screen>build-use-sandbox = true</screen>
</para>
</listitem>
</itemizedlist>
</para>
</section>
<section>
<title>Built on platform(s)</title>
<para>
Many Nix packages are designed to run on multiple
platforms. As such, it's important to let the maintainer know which
platforms your changes have been tested on. It's not always practical to
test a change on all platforms, and is not required for a pull request to
be merged. Only check the systems you tested the build on in this
section.
</para>
<section xml:id="submitting-changes-platform-diversity">
<title>Built on platform(s)</title>
<para>
Many Nix packages are designed to run on multiple platforms. As such, it's
important to let the maintainer know which platforms your changes have been
tested on. It's not always practical to test a change on all platforms, and
is not required for a pull request to be merged. Only check the systems you
tested the build on in this section.
</para>
</section>
<section>
<title>Tested via one or more NixOS test(s) if existing and applicable for the change (look inside nixos/tests)</title>
<para>
Packages with automated tests are much more likely to be merged in a
timely fashion because it doesn't require as much manual testing by the
maintainer to verify the functionality of the package. If there are
existing tests for the package, they should be run to verify your changes
do not break the tests. Tests only apply to packages with NixOS modules
defined and can only be run on Linux. For more details on writing and
running tests, see the <link
<section xml:id="submitting-changes-nixos-tests">
<title>Tested via one or more NixOS test(s) if existing and applicable for the change (look inside nixos/tests)</title>
<para>
Packages with automated tests are much more likely to be merged in a timely
fashion because it doesn't require as much manual testing by the maintainer
to verify the functionality of the package. If there are existing tests for
the package, they should be run to verify your changes do not break the
tests. Tests only apply to packages with NixOS modules defined and can only
be run on Linux. For more details on writing and running tests, see the
<link
xlink:href="https://nixos.org/nixos/manual/index.html#sec-nixos-tests">section
in the NixOS manual</link>.
</para>
in the NixOS manual</link>.
</para>
</section>
<section>
<title>Tested compilation of all pkgs that depend on this change using <command>nox-review</command></title>
<para>
If you are updating a package's version, you can use nox to make sure all
packages that depend on the updated package still compile correctly. This
can be done using the nox utility. The <command>nox-review</command>
utility can look for and build all dependencies either based on
uncommited changes with the <literal>wip</literal> option or specifying a
github pull request number.
</para>
<para>
review uncommitted changes:
<screen>nix-shell -p nox --run nox-review wip</screen>
</para>
<para>
review changes from pull request number 12345:
<screen>nix-shell -p nox --run nox-review pr 12345</screen>
</para>
<section xml:id="submitting-changes-tested-compilation">
<title>Tested compilation of all pkgs that depend on this change using <command>nox-review</command></title>
<para>
If you are updating a package's version, you can use nox to make sure all
packages that depend on the updated package still compile correctly. This
can be done using the nox utility. The <command>nox-review</command>
utility can look for and build all dependencies either based on uncommited
changes with the <literal>wip</literal> option or specifying a github pull
request number.
</para>
<para>
review uncommitted changes:
<screen>nix-shell -p nox --run "nox-review wip"</screen>
</para>
<para>
review changes from pull request number 12345:
<screen>nix-shell -p nox --run "nox-review pr 12345"</screen>
</para>
</section>
<section>
<title>Tested execution of all binary files (usually in <filename>./result/bin/</filename>)</title>
<para>
It's important to test any executables generated by a build when you
change or create a package in nixpkgs. This can be done by looking in
<filename>./result/bin</filename> and running any files in there, or at a
minimum, the main executable for the package. For example, if you make a change
to <package>texlive</package>, you probably would only check the binaries
associated with the change you made rather than testing all of them.
</para>
<section xml:id="submitting-changes-tested-execution">
<title>Tested execution of all binary files (usually in <filename>./result/bin/</filename>)</title>
<para>
It's important to test any executables generated by a build when you change
or create a package in nixpkgs. This can be done by looking in
<filename>./result/bin</filename> and running any files in there, or at a
minimum, the main executable for the package. For example, if you make a
change to <package>texlive</package>, you probably would only check the
binaries associated with the change you made rather than testing all of
them.
</para>
</section>
<section>
<title>Meets nixpkgs contribution standards</title>
<para>
The last checkbox is fits <link
<section xml:id="submitting-changes-contribution-standards">
<title>Meets Nixpkgs contribution standards</title>
<para>
The last checkbox is fits
<link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/.github/CONTRIBUTING.md">CONTRIBUTING.md</link>.
The contributing document has detailed information on standards the Nix
community has for commit messages, reviews, licensing of contributions
you make to the project, etc... Everyone should read and understand the
standards the community has for contributing before submitting a pull
request.
</para>
The contributing document has detailed information on standards the Nix
community has for commit messages, reviews, licensing of contributions you
make to the project, etc... Everyone should read and understand the
standards the community has for contributing before submitting a pull
request.
</para>
</section>
</section>
<section>
<title>Hotfixing pull requests</title>
<itemizedlist>
<listitem>
<para>Make the appropriate changes in you branch.</para>
</listitem>
<listitem>
<para>Don't create additional commits, do
<itemizedlist>
<listitem>
<para><command>git rebase -i</command></para>
</listitem>
<listitem>
<para>
<command>git push --force</command> to your branch.</para>
</listitem>
</itemizedlist>
</para>
</listitem>
</itemizedlist>
</section>
<section>
<title>Commit policy</title>
<itemizedlist>
<listitem>
<para>Commits must be sufficiently tested before being merged, both for the master and staging branches.</para>
</listitem>
<listitem>
<para>Hydra builds for master and staging should not be used as testing platform, it's a build farm for changes that have been already tested.</para>
</listitem>
<listitem>
<para>When changing the bootloader installation process, extra care must be taken. Grub installations cannot be rolled back, hence changes may break people's installations forever. For any non-trivial change to the bootloader please file a PR asking for review, especially from @edolstra.</para>
</listitem>
</itemizedlist>
<section>
<title>Master branch</title>
</section>
<section xml:id="submitting-changes-hotfixing-pull-requests">
<title>Hotfixing pull requests</title>
<itemizedlist>
<listitem>
<para>
It should only see non-breaking commits that do not cause mass rebuilds.
</para>
</listitem>
<listitem>
<para>
Make the appropriate changes in you branch.
</para>
</listitem>
<listitem>
<para>
Don't create additional commits, do
<itemizedlist>
<listitem>
<para>
<command>git rebase -i</command>
</para>
</listitem>
<listitem>
<para>
<command>git push --force</command> to your branch.
</para>
</listitem>
</itemizedlist>
</para>
</listitem>
</itemizedlist>
</section>
<section>
<title>Staging branch</title>
</section>
<section xml:id="submitting-changes-commit-policy">
<title>Commit policy</title>
<itemizedlist>
<listitem>
<para>
It's only for non-breaking mass-rebuild commits. That means it's not to
be used for testing, and changes must have been well tested already.
<link xlink:href="http://comments.gmane.org/gmane.linux.distributions.nixos/13447">Read policy here</link>.
</para>
</listitem>
<listitem>
<para>
If the branch is already in a broken state, please refrain from adding
extra new breakages. Stabilize it for a few days, merge into master,
then resume development on staging.
<link xlink:href="http://hydra.nixos.org/jobset/nixpkgs/staging#tabs-evaluations">Keep an eye on the staging evaluations here</link>.
If any fixes for staging happen to be already in master, then master can
be merged into staging.
</para>
</listitem>
<listitem>
<para>
Commits must be sufficiently tested before being merged, both for the
master and staging branches.
</para>
</listitem>
<listitem>
<para>
Hydra builds for master and staging should not be used as testing
platform, it's a build farm for changes that have been already tested.
</para>
</listitem>
<listitem>
<para>
When changing the bootloader installation process, extra care must be
taken. Grub installations cannot be rolled back, hence changes may break
people's installations forever. For any non-trivial change to the
bootloader please file a PR asking for review, especially from @edolstra.
</para>
</listitem>
</itemizedlist>
</section>
<section>
<title>Stable release branches</title>
<section xml:id="submitting-changes-master-branch">
<title>Master branch</title>
<itemizedlist>
<itemizedlist>
<listitem>
<para>
If you're cherry-picking a commit to a stable release branch, always use
<command>git cherry-pick -xe</command> and ensure the message contains a
clear description about why this needs to be included in the stable
branch.
</para>
<para>An example of a cherry-picked commit would look like this:</para>
<screen>
<para>
It should only see non-breaking commits that do not cause mass rebuilds.
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="submitting-changes-staging-branch">
<title>Staging branch</title>
<itemizedlist>
<listitem>
<para>
It's only for non-breaking mass-rebuild commits. That means it's not to
be used for testing, and changes must have been well tested already.
<link xlink:href="https://web.archive.org/web/20160528180406/http://comments.gmane.org/gmane.linux.distributions.nixos/13447">Read
policy here</link>.
</para>
</listitem>
<listitem>
<para>
If the branch is already in a broken state, please refrain from adding
extra new breakages. Stabilize it for a few days, merge into master, then
resume development on staging.
<link xlink:href="http://hydra.nixos.org/jobset/nixpkgs/staging#tabs-evaluations">Keep
an eye on the staging evaluations here</link>. If any fixes for staging
happen to be already in master, then master can be merged into staging.
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="submitting-changes-stable-release-branches">
<title>Stable release branches</title>
<itemizedlist>
<listitem>
<para>
If you're cherry-picking a commit to a stable release branch, always use
<command>git cherry-pick -xe</command> and ensure the message contains a
clear description about why this needs to be included in the stable
branch.
</para>
<para>
An example of a cherry-picked commit would look like this:
</para>
<screen>
nixos: Refactor the world.
The original commit message describing the reason why the world was torn apart.
@ -451,9 +517,7 @@ Reason: I just had a gut feeling that this would also be wanted by people from
the stone age.
</screen>
</listitem>
</itemizedlist>
</section>
</section>
</itemizedlist>
</section>
</section>
</chapter>

44
lib/asserts.nix Normal file
View File

@ -0,0 +1,44 @@
{ lib }:
rec {
/* Print a trace message if pred is false.
Intended to be used to augment asserts with helpful error messages.
Example:
assertMsg false "nope"
=> false
stderr> trace: nope
assert (assertMsg ("foo" == "bar") "foo is not bar, silly"); ""
stderr> trace: foo is not bar, silly
stderr> assert failed at
Type:
assertMsg :: Bool -> String -> Bool
*/
# TODO(Profpatsch): add tests that check stderr
assertMsg = pred: msg:
if pred
then true
else builtins.trace msg false;
/* Specialized `assertMsg` for checking if val is one of the elements
of a list. Useful for checking enums.
Example:
let sslLibrary = "libressl"
in assertOneOf "sslLibrary" sslLibrary [ "openssl" "bearssl" ]
=> false
stderr> trace: sslLibrary must be one of "openssl", "bearssl", but is: "libressl"
Type:
assertOneOf :: String -> ComparableVal -> List ComparableVal -> Bool
*/
assertOneOf = name: val: xs: assertMsg
(lib.elem val xs)
"${name} must be one of ${
lib.generators.toPretty {} xs}, but is: ${
lib.generators.toPretty {} val}";
}

View File

@ -3,9 +3,9 @@
let
inherit (builtins) head tail length;
inherit (lib.trivial) and or;
inherit (lib.trivial) and;
inherit (lib.strings) concatStringsSep;
inherit (lib.lists) fold concatMap concatLists all deepSeqList;
inherit (lib.lists) fold concatMap concatLists;
in
rec {
@ -94,6 +94,15 @@ rec {
attrValues = builtins.attrValues or (attrs: attrVals (attrNames attrs) attrs);
/* Given a set of attribute names, return the set of the corresponding
attributes from the given set.
Example:
getAttrs [ "a" "b" ] { a = 1; b = 2; c = 3; }
=> { a = 1; b = 2; }
*/
getAttrs = names: attrs: genAttrs names (name: attrs.${name});
/* Collect each attribute named `attr' from a list of attribute
sets. Sets that don't contain the named attribute are ignored.
@ -145,7 +154,7 @@ rec {
foldAttrs = op: nul: list_of_attrs:
fold (n: a:
fold (name: o:
o // (listToAttrs [{inherit name; value = op n.${name} (a.${name} or nul); }])
o // { ${name} = op n.${name} (a.${name} or nul); }
) a (attrNames n)
) {} list_of_attrs;
@ -195,8 +204,9 @@ rec {
{ x = "foo"; y = "bar"; }
=> { x = "x-foo"; y = "y-bar"; }
*/
mapAttrs = f: set:
listToAttrs (map (attr: { name = attr; value = f attr set.${attr}; }) (attrNames set));
mapAttrs = builtins.mapAttrs or
(f: set:
listToAttrs (map (attr: { name = attr; value = f attr set.${attr}; }) (attrNames set)));
/* Like `mapAttrs', but allows the name of each attribute to be
@ -383,11 +393,12 @@ rec {
recursiveUpdateUntil = pred: lhs: rhs:
let f = attrPath:
zipAttrsWith (n: values:
let here = attrPath ++ [n]; in
if tail values == []
|| pred attrPath (head (tail values)) (head values) then
|| pred here (head (tail values)) (head values) then
head values
else
f (attrPath ++ [n]) values
f here values
);
in f [] [rhs lhs];
@ -433,12 +444,15 @@ rec {
useful for deep-overriding.
Example:
x = { a = { b = 4; c = 3; }; }
overrideExisting x { a = { b = 6; d = 2; }; }
=> { a = { b = 6; d = 2; }; }
overrideExisting {} { a = 1; }
=> {}
overrideExisting { b = 2; } { a = 1; }
=> { b = 2; }
overrideExisting { a = 3; b = 2; } { a = 1; }
=> { a = 1; b = 2; }
*/
overrideExisting = old: new:
old // listToAttrs (map (attr: nameValuePair attr (attrByPath [attr] old.${attr} new)) (attrNames old));
mapAttrs (name: value: new.${name} or value) old;
/* Get a package output.
If no output is found, fallback to `.out` and then to the default.

View File

@ -1,113 +0,0 @@
{lib, pkgs}:
let inherit (lib) nv nvs; in
{
# composableDerivation basically mixes these features:
# - fix function
# - mergeAttrBy
# - provides shortcuts for "options" such as "--enable-foo" and adding
# buildInputs, see php example
#
# It predates styles which are common today, such as
# * the config attr
# * mkDerivation.override feature
# * overrideDerivation (lib/customization.nix)
#
# Some of the most more important usage examples (which could be rewritten if it was important):
# * php
# * postgis
# * vim_configurable
#
# A minimal example illustrating most features would look like this:
# let base = composableDerivation { (fixed: let inherit (fixed.fixed) name in {
# src = fetchurl {
# }
# buildInputs = [A];
# preConfigre = "echo ${name}";
# # attention, "name" attr is missing, thus you cannot instantiate "base".
# }
# in {
# # These all add name attribute, thus you can instantiate those:
# v1 = base.merge ({ name = "foo-add-B"; buildInputs = [B]; }); // B gets merged into buildInputs
# v2 = base.merge ({ name = "mix-in-pre-configure-lines" preConfigre = ""; });
# v3 = base.replace ({ name = "foo-no-A-only-B;" buildInputs = [B]; });
# }
#
# So yes, you can think about it being something like nixos modules, and
# you'd be merging "features" in one at a time using .merge or .replace
# Thanks Shea for telling me that I rethink the documentation ..
#
# issues:
# * its complicated to understand
# * some "features" such as exact merge behaviour are buried in mergeAttrBy
# and defaultOverridableDelayableArgs assuming the default behaviour does
# the right thing in the common case
# * Eelco once said using such fix style functions are slow to evaluate
# * Too quick & dirty. Hard to understand for others. The benefit was that
# you were able to create a kernel builder like base derivation and replace
# / add patches the way you want without having to declare function arguments
#
# nice features:
# declaring "optional features" is modular. For instance:
# flags.curl = {
# configureFlags = ["--with-curl=${curl.dev}" "--with-curlwrappers"];
# buildInputs = [curl openssl];
# };
# flags.other = { .. }
# (Example taken from PHP)
#
# alternative styles / related features:
# * Eg see function supporting building the kernel
# * versionedDerivation (discussion about this is still going on - or ended)
# * composedArgsAndFun
# * mkDerivation.override
# * overrideDerivation
# * using { .., *Support ? false }: like configurable options.
# To find those examples use grep
#
# To sum up: It exists for historical reasons - and for most commonly used
# tasks the alternatives should be used
#
# If you have questions about this code ping Marc Weber.
composableDerivation = {
mkDerivation ? pkgs.stdenv.mkDerivation,
# list of functions to be applied before defaultOverridableDelayableArgs removes removeAttrs names
# prepareDerivationArgs handles derivation configurations
applyPreTidy ? [ lib.prepareDerivationArgs ],
# consider adding addtional elements by derivation.merge { removeAttrs = ["elem"]; };
removeAttrs ? ["cfg" "flags"]
}: (lib.defaultOverridableDelayableArgs ( a: mkDerivation a)
{
inherit applyPreTidy removeAttrs;
}).merge;
# some utility functions
# use this function to generate flag attrs for prepareDerivationArgs
# E nable D isable F eature
edf = {name, feat ? name, enable ? {}, disable ? {} , value ? ""}:
nvs name {
set = {
configureFlags = ["--enable-${feat}${if value == "" then "" else "="}${value}"];
} // enable;
unset = {
configureFlags = ["--disable-${feat}"];
} // disable;
};
# same for --with and --without-
# W ith or W ithout F eature
wwf = {name, feat ? name, enable ? {}, disable ? {}, value ? ""}:
nvs name {
set = enable // {
configureFlags = ["--with-${feat}${if value == "" then "" else "="}${value}"]
++ lib.maybeAttr "configureFlags" [] enable;
};
unset = disable // {
configureFlags = ["--without-${feat}"]
++ lib.maybeAttr "configureFlags" [] disable;
};
};
}

View File

@ -1,9 +1,4 @@
{ lib }:
let
inherit (builtins) attrNames;
in
rec {
@ -155,12 +150,6 @@ rec {
outPath = assert condition; drv.outPath;
};
/* Add attributes to each output of a derivation without changing
the derivation itself. */
addPassthru =
lib.warn "`addPassthru drv passthru` is deprecated, replace with `extendDerivation true passthru drv`"
(drv: passthru: extendDerivation true passthru drv);
/* Strip a derivation of all non-essential attributes, returning
only those needed by hydra-eval-jobs. Also strictly evaluate the
result to ensure that there are no thunks kept alive to prevent
@ -196,7 +185,7 @@ rec {
/* Make a set of packages with a common scope. All packages called
with the provided `callPackage' will be evaluated with the same
arguments. Any package in the set may depend on any other. The
`overrideScope' function allows subsequent modification of the package
`overrideScope'` function allows subsequent modification of the package
set in a consistent way, i.e. all packages in the set will be
called with the overridden packages. The package sets may be
hierarchical: the packages in the set are called with the scope
@ -206,9 +195,10 @@ rec {
let self = f self // {
newScope = scope: newScope (self // scope);
callPackage = self.newScope {};
overrideScope = g:
makeScope newScope
(self_: let super = f self_; in super // g super self_);
overrideScope = g: lib.warn
"`overrideScope` (from `lib.makeScope`) is deprecated. Do `overrideScope' (self: super: { })` instead of `overrideScope (super: self: { })`. All other overrides have the parameters in that order, including other definitions of `overrideScope`. This was the only definition violating the pattern."
(makeScope newScope (lib.fixedPoints.extends (lib.flip g) f));
overrideScope' = g: makeScope newScope (lib.fixedPoints.extends g f);
packages = f;
};
in self;

View File

@ -1,34 +1,98 @@
/* Collection of functions useful for debugging
broken nix expressions.
* `trace`-like functions take two values, print
the first to stderr and return the second.
* `traceVal`-like functions take one argument
which both printed and returned.
* `traceSeq`-like functions fully evaluate their
traced value before printing (not just to weak
head normal form like trace does by default).
* Functions that end in `-Fn` take an additional
function as their first argument, which is applied
to the traced value before it is printed.
*/
{ lib }:
let
inherit (builtins) trace attrNamesToStr isAttrs isList isInt
isString isBool head substring attrNames;
inherit (lib) all id mapAttrsFlatten elem isFunction;
inherit (builtins) trace isAttrs isList isInt
head substring attrNames;
inherit (lib) id elem isFunction;
in
rec {
inherit (builtins) addErrorContext;
# -- TRACING --
addErrorContextToAttrs = lib.mapAttrs (a: v: lib.addErrorContext "while evaluating ${a}" v);
/* Conditionally trace the supplied message, based on a predicate.
traceIf = p: msg: x: if p then trace msg x else x;
Type: traceIf :: bool -> string -> a -> a
traceVal = x: trace x x;
traceXMLVal = x: trace (builtins.toXML x) x;
traceXMLValMarked = str: x: trace (str + builtins.toXML x) x;
Example:
traceIf true "hello" 3
trace: hello
=> 3
*/
traceIf =
# Predicate to check
pred:
# Message that should be traced
msg:
# Value to return
x: if pred then trace msg x else x;
# strict trace functions (traced structure is fully evaluated and printed)
/* Trace the supplied value after applying a function to it, and
return the original value.
/* `builtins.trace`, but the value is `builtins.deepSeq`ed first. */
traceSeq = x: y: trace (builtins.deepSeq x x) y;
Type: traceValFn :: (a -> b) -> a -> a
/* Like `traceSeq`, but only down to depth n.
* This is very useful because lots of `traceSeq` usages
* lead to an infinite recursion.
Example:
traceValFn (v: "mystring ${v}") "foo"
trace: mystring foo
=> "foo"
*/
traceValFn =
# Function to apply
f:
# Value to trace and return
x: trace (f x) x;
/* Trace the supplied value and return it.
Type: traceVal :: a -> a
Example:
traceVal 42
# trace: 42
=> 42
*/
traceVal = traceValFn id;
/* `builtins.trace`, but the value is `builtins.deepSeq`ed first.
Type: traceSeq :: a -> b -> b
Example:
trace { a.b.c = 3; } null
trace: { a = <CODE>; }
=> null
traceSeq { a.b.c = 3; } null
trace: { a = { b = { c = 3; }; }; }
=> null
*/
traceSeq =
# The value to trace
x:
# The value to return
y: trace (builtins.deepSeq x x) y;
/* Like `traceSeq`, but only evaluate down to depth n.
This is very useful because lots of `traceSeq` usages
lead to an infinite recursion.
Example:
traceSeqN 2 { a.b.c = 3; } null
trace: { a = { b = {}; }; }
=> null
*/
traceSeqN = depth: x: y: with lib;
let snip = v: if isList v then noQuotes "[]" v
@ -43,50 +107,49 @@ rec {
in trace (generators.toPretty { allowPrettyValues = true; }
(modify depth snip x)) y;
/* `traceSeq`, but the same value is traced and returned */
traceValSeq = v: traceVal (builtins.deepSeq v v);
/* `traceValSeq` but with fixed depth */
traceValSeqN = depth: v: traceSeqN depth v v;
# this can help debug your code as well - designed to not produce thousands of lines
traceShowVal = x: trace (showVal x) x;
traceShowValMarked = str: x: trace (str + showVal x) x;
attrNamesToStr = a: lib.concatStringsSep "; " (map (x: "${x}=") (attrNames a));
showVal = x:
if isAttrs x then
if x ? outPath then "x is a derivation, name ${if x ? name then x.name else "<no name>"}, { ${attrNamesToStr x} }"
else "x is attr set { ${attrNamesToStr x} }"
else if isFunction x then "x is a function"
else if x == [] then "x is an empty list"
else if isList x then "x is a list, first element is: ${showVal (head x)}"
else if x == true then "x is boolean true"
else if x == false then "x is boolean false"
else if x == null then "x is null"
else if isInt x then "x is an integer `${toString x}'"
else if isString x then "x is a string `${substring 0 50 x}...'"
else "x is probably a path `${substring 0 50 (toString x)}...'";
# trace the arguments passed to function and its result
# maybe rewrite these functions in a traceCallXml like style. Then one function is enough
traceCall = n: f: a: let t = n2: x: traceShowValMarked "${n} ${n2}:" x; in t "result" (f (t "arg 1" a));
traceCall2 = n: f: a: b: let t = n2: x: traceShowValMarked "${n} ${n2}:" x; in t "result" (f (t "arg 1" a) (t "arg 2" b));
traceCall3 = n: f: a: b: c: let t = n2: x: traceShowValMarked "${n} ${n2}:" x; in t "result" (f (t "arg 1" a) (t "arg 2" b) (t "arg 3" c));
# FIXME: rename this?
traceValIfNot = c: x:
if c x then true else trace (showVal x) false;
/* Evaluate a set of tests. A test is an attribute set {expr,
expected}, denoting an expression and its expected result. The
result is a list of failed tests, each represented as {name,
expected, actual}, denoting the attribute name of the failing
test and its expected and actual results. Used for regression
testing of the functions in lib; see tests.nix for an example.
Only tests having names starting with "test" are run.
Add attr { tests = ["testName"]; } to run these test only
/* A combination of `traceVal` and `traceSeq` that applies a
provided function to the value to be traced after `deepSeq`ing
it.
*/
runTests = tests: lib.concatLists (lib.attrValues (lib.mapAttrs (name: test:
traceValSeqFn =
# Function to apply
f:
# Value to trace
v: traceValFn f (builtins.deepSeq v v);
/* A combination of `traceVal` and `traceSeq`. */
traceValSeq = traceValSeqFn id;
/* A combination of `traceVal` and `traceSeqN` that applies a
provided function to the value to be traced. */
traceValSeqNFn =
# Function to apply
f:
depth:
# Value to trace
v: traceSeqN depth (f v) v;
/* A combination of `traceVal` and `traceSeqN`. */
traceValSeqN = traceValSeqNFn id;
# -- TESTING --
/* Evaluate a set of tests. A test is an attribute set `{expr,
expected}`, denoting an expression and its expected result. The
result is a list of failed tests, each represented as `{name,
expected, actual}`, denoting the attribute name of the failing
test and its expected and actual results.
Used for regression testing of the functions in lib; see
tests.nix for an example. Only tests having names starting with
"test" are run.
Add attr { tests = ["testName"]; } to run these tests only.
*/
runTests =
# Tests to run
tests: lib.concatLists (lib.attrValues (lib.mapAttrs (name: test:
let testsToRun = if tests ? tests then tests.tests else [];
in if (substring 0 4 name == "test" || elem name testsToRun)
&& ((testsToRun == []) || elem name tests.tests)
@ -95,13 +158,75 @@ rec {
then [ { inherit name; expected = test.expected; result = test.expr; } ]
else [] ) tests));
# create a test assuming that list elements are true
# usage: { testX = allTrue [ true ]; }
/* Create a test assuming that list elements are `true`.
Example:
{ testX = allTrue [ true ]; }
*/
testAllTrue = expr: { inherit expr; expected = map (x: true) expr; };
strict = v:
trace "Warning: strict is deprecated and will be removed in the next release"
(builtins.seq v v);
# -- DEPRECATED --
traceShowVal = x: trace (showVal x) x;
traceShowValMarked = str: x: trace (str + showVal x) x;
attrNamesToStr = a:
trace ( "Warning: `attrNamesToStr` is deprecated "
+ "and will be removed in the next release. "
+ "Please use more specific concatenation "
+ "for your uses (`lib.concat(Map)StringsSep`)." )
(lib.concatStringsSep "; " (map (x: "${x}=") (attrNames a)));
showVal = with lib;
trace ( "Warning: `showVal` is deprecated "
+ "and will be removed in the next release, "
+ "please use `traceSeqN`" )
(let
modify = v:
let pr = f: { __pretty = f; val = v; };
in if isDerivation v then pr
(drv: "<δ:${drv.name}:${concatStringsSep ","
(attrNames drv)}>")
else if [] == v then pr (const "[]")
else if isList v then pr (l: "[ ${go (head l)}, ]")
else if isAttrs v then pr
(a: "{ ${ concatStringsSep ", " (attrNames a)} }")
else v;
go = x: generators.toPretty
{ allowPrettyValues = true; }
(modify x);
in go);
traceXMLVal = x:
trace ( "Warning: `traceXMLVal` is deprecated "
+ "and will be removed in the next release. "
+ "Please use `traceValFn builtins.toXML`." )
(trace (builtins.toXML x) x);
traceXMLValMarked = str: x:
trace ( "Warning: `traceXMLValMarked` is deprecated "
+ "and will be removed in the next release. "
+ "Please use `traceValFn (x: str + builtins.toXML x)`." )
(trace (str + builtins.toXML x) x);
# trace the arguments passed to function and its result
# maybe rewrite these functions in a traceCallXml like style. Then one function is enough
traceCall = n: f: a: let t = n2: x: traceShowValMarked "${n} ${n2}:" x; in t "result" (f (t "arg 1" a));
traceCall2 = n: f: a: b: let t = n2: x: traceShowValMarked "${n} ${n2}:" x; in t "result" (f (t "arg 1" a) (t "arg 2" b));
traceCall3 = n: f: a: b: c: let t = n2: x: traceShowValMarked "${n} ${n2}:" x; in t "result" (f (t "arg 1" a) (t "arg 2" b) (t "arg 3" c));
traceValIfNot = c: x:
trace ( "Warning: `traceValIfNot` is deprecated "
+ "and will be removed in the next release. "
+ "Please use `if/then/else` and `traceValSeq 1`.")
(if c x then true else traceSeq (showVal x) false);
addErrorContextToAttrs = attrs:
trace ( "Warning: `addErrorContextToAttrs` is deprecated "
+ "and will be removed in the next release. "
+ "Please use `builtins.addErrorContext` directly." )
(lib.mapAttrs (a: v: lib.addErrorContext "while evaluating ${a}" v) attrs);
# example: (traceCallXml "myfun" id 3) will output something like
# calling myfun arg 1: 3 result: 3
@ -109,17 +234,20 @@ rec {
# note: if result doesn't evaluate you'll get no trace at all (FIXME)
# args should be printed in any case
traceCallXml = a:
if !isInt a then
trace ( "Warning: `traceCallXml` is deprecated "
+ "and will be removed in the next release. "
+ "Please complain if you use the function regularly." )
(if !isInt a then
traceCallXml 1 "calling ${a}\n"
else
let nr = a;
in (str: expr:
if isFunction expr then
(arg:
traceCallXml (builtins.add 1 nr) "${str}\n arg ${builtins.toString nr} is \n ${builtins.toXML (strict arg)}" (expr arg)
traceCallXml (builtins.add 1 nr) "${str}\n arg ${builtins.toString nr} is \n ${builtins.toXML (builtins.seq arg arg)}" (expr arg)
)
else
let r = strict expr;
let r = builtins.seq expr expr;
in trace "${str}\n result:\n${builtins.toXML r}" r
);
));
}

View File

@ -5,9 +5,11 @@
*/
let
callLibs = file: import file { inherit lib; };
inherit (import ./fixed-points.nix {}) makeExtensible;
lib = rec {
lib = makeExtensible (self: let
callLibs = file: import file { lib = self; };
in with self; {
# often used, or depending on very little
trivial = callLibs ./trivial.nix;
@ -21,10 +23,10 @@ let
# packaging
customisation = callLibs ./customisation.nix;
maintainers = callLibs ./maintainers.nix;
maintainers = import ../maintainers/maintainer-list.nix;
meta = callLibs ./meta.nix;
sources = callLibs ./sources.nix;
versions = callLibs ./versions.nix;
# module system
modules = callLibs ./modules.nix;
@ -36,10 +38,11 @@ let
systems = callLibs ./systems;
# misc
asserts = callLibs ./asserts.nix;
debug = callLibs ./debug.nix;
generators = callLibs ./generators.nix;
misc = callLibs ./deprecated.nix;
# domain-specific
fetchers = callLibs ./fetchers.nix;
@ -47,22 +50,21 @@ let
filesystem = callLibs ./filesystem.nix;
# back-compat aliases
platforms = systems.doubles;
platforms = systems.forMeta;
inherit (builtins) add addErrorContext attrNames
concatLists deepSeq elem elemAt filter genericClosure genList
getAttr hasAttr head isAttrs isBool isInt isList
isString length lessThan listToAttrs pathExists readFile
replaceStrings seq stringLength sub substring tail;
inherit (trivial) id const concat or and boolToString mergeAttrs
flip mapNullable inNixShell min max importJSON warn info
nixpkgsVersion mod compare splitByAndCompare
functionArgs setFunctionArgs isFunction;
inherit (fixedPoints) fix fix' extends composeExtensions
inherit (builtins) add addErrorContext attrNames concatLists
deepSeq elem elemAt filter genericClosure genList getAttr
hasAttr head isAttrs isBool isInt isList isString length
lessThan listToAttrs pathExists readFile replaceStrings seq
stringLength sub substring tail;
inherit (trivial) id const concat or and bitAnd bitOr bitXor bitNot
boolToString mergeAttrs flip mapNullable inNixShell min max
importJSON warn info nixpkgsVersion version mod compare
splitByAndCompare functionArgs setFunctionArgs isFunction;
inherit (fixedPoints) fix fix' converge extends composeExtensions
makeExtensible makeExtensibleWithCustomName;
inherit (attrsets) attrByPath hasAttrByPath setAttrByPath
getAttrFromPath attrVals attrValues catAttrs filterAttrs
getAttrFromPath attrVals attrValues getAttrs catAttrs filterAttrs
filterAttrsRecursive foldAttrs collect nameValuePair mapAttrs
mapAttrs' mapAttrsToList mapAttrsRecursive mapAttrsRecursiveCond
genAttrs isDerivation toDerivation optionalAttrs
@ -72,30 +74,32 @@ let
inherit (lists) singleton foldr fold foldl foldl' imap0 imap1
concatMap flatten remove findSingle findFirst any all count
optional optionals toList range partition zipListsWith zipLists
reverseList listDfs toposort sort compareLists take drop sublist
last init crossLists unique intersectLists subtractLists
mutuallyExclusive;
reverseList listDfs toposort sort naturalSort compareLists take
drop sublist last init crossLists unique intersectLists
subtractLists mutuallyExclusive groupBy groupBy';
inherit (strings) concatStrings concatMapStrings concatImapStrings
intersperse concatStringsSep concatMapStringsSep
concatImapStringsSep makeSearchPath makeSearchPathOutput
makeLibraryPath makeBinPath makePerlPath optionalString
makeLibraryPath makeBinPath optionalString
hasPrefix hasSuffix stringToCharacters stringAsChars escape
escapeShellArg escapeShellArgs replaceChars lowerChars upperChars
toLower toUpper addContextFrom splitString removePrefix
removeSuffix versionOlder versionAtLeast getVersion nameFromURL
enableFeature fixedWidthString fixedWidthNumber isStorePath
escapeShellArg escapeShellArgs replaceChars lowerChars
upperChars toLower toUpper addContextFrom splitString
removePrefix removeSuffix versionOlder versionAtLeast getVersion
nameFromURL enableFeature enableFeatureAs withFeature
withFeatureAs fixedWidthString fixedWidthNumber isStorePath
toInt readPathsFromFile fileContents;
inherit (stringsWithDeps) textClosureList textClosureMap
noDepEntry fullDepEntry packEntry stringAfter;
inherit (customisation) overrideDerivation makeOverridable
callPackageWith callPackagesWith extendDerivation addPassthru
hydraJob makeScope;
callPackageWith callPackagesWith extendDerivation hydraJob
makeScope;
inherit (meta) addMetaAttrs dontDistribute setName updateName
appendToName mapDerivationAttrset lowPrio lowPrioSet hiPrio
appendToName mapDerivationAttrset setPrio lowPrio lowPrioSet hiPrio
hiPrioSet;
inherit (sources) pathType pathIsDirectory cleanSourceFilter
cleanSource sourceByRegex sourceFilesBySuffices
commitIdFromGitRepo cleanSourceWith pathHasContext canCleanSource;
commitIdFromGitRepo cleanSourceWith pathHasContext
canCleanSource;
inherit (modules) evalModules closeModules unifyModuleSyntax
applyIfFunction unpackSubmodule packSubmodule mergeModules
mergeModules' mergeOptionDecls evalOptionValue mergeDefinitions
@ -105,7 +109,7 @@ let
mkFixStrictness mkOrder mkBefore mkAfter mkAliasDefinitions
mkAliasAndWrapDefinitions fixMergeModules mkRemovedOptionModule
mkRenamedOptionModule mkMergedOptionModule mkChangedOptionModule
mkAliasOptionModule doRename filterModules;
mkAliasOptionModule mkAliasOptionModuleWithPriority doRename filterModules;
inherit (options) isOption mkEnableOption mkSinkUndeclaredOptions
mergeDefaultOption mergeOneOption mergeEqualOption getValues
getFiles optionAttrSetToDocList optionAttrSetToDocList'
@ -113,13 +117,14 @@ let
unknownModule mkOption;
inherit (types) isType setType defaultTypeMerge defaultFunctor
isOptionType mkOptionType;
inherit (debug) addErrorContextToAttrs traceIf traceVal
inherit (asserts)
assertMsg assertOneOf;
inherit (debug) addErrorContextToAttrs traceIf traceVal traceValFn
traceXMLVal traceXMLValMarked traceSeq traceSeqN traceValSeq
traceValSeqN traceShowVal traceShowValMarked
showVal traceCall traceCall2 traceCall3 traceValIfNot runTests
testAllTrue strict traceCallXml attrNamesToStr;
traceValSeqFn traceValSeqN traceValSeqNFn traceShowVal
traceShowValMarked showVal traceCall traceCall2 traceCall3
traceValIfNot runTests testAllTrue traceCallXml attrNamesToStr;
inherit (misc) maybeEnv defaultMergeArg defaultMerge foldArgs
defaultOverridableDelayableArgs composedArgsAndFun
maybeAttrNullable maybeAttr ifEnable checkFlag getValue
checkReqs uniqList uniqListExt condConcat lazyGenericClosure
innerModifySumArgs modifySumArgs innerClosePropagation
@ -127,6 +132,7 @@ let
mergeAttrsWithFunc mergeAttrsConcatenateValues
mergeAttrsNoOverride mergeAttrByFunc mergeAttrsByFuncDefaults
mergeAttrsByFuncDefaultsClean mergeAttrBy
prepareDerivationArgs nixType imap overridableDelayableArgs;
};
fakeSha256 fakeSha512
nixType imap;
});
in lib

View File

@ -35,74 +35,6 @@ rec {
withStdOverrides;
# predecessors: proposed replacement for applyAndFun (which has a bug cause it merges twice)
# the naming "overridableDelayableArgs" tries to express that you can
# - override attr values which have been supplied earlier
# - use attr values before they have been supplied by accessing the fix point
# name "fixed"
# f: the (delayed overridden) arguments are applied to this
#
# initial: initial attrs arguments and settings. see defaultOverridableDelayableArgs
#
# returns: f applied to the arguments // special attributes attrs
# a) merge: merge applied args with new args. Wether an argument is overridden depends on the merge settings
# b) replace: this let's you replace and remove names no matter which merge function has been set
#
# examples: see test cases "res" below;
overridableDelayableArgs =
f: # the function applied to the arguments
initial: # you pass attrs, the functions below are passing a function taking the fix argument
let
takeFixed = if lib.isFunction initial then initial else (fixed : initial); # transform initial to an expression always taking the fixed argument
tidy = args:
let # apply all functions given in "applyPreTidy" in sequence
applyPreTidyFun = fold ( n: a: x: n ( a x ) ) lib.id (maybeAttr "applyPreTidy" [] args);
in removeAttrs (applyPreTidyFun args) ( ["applyPreTidy"] ++ (maybeAttr "removeAttrs" [] args) ); # tidy up args before applying them
fun = n: x:
let newArgs = fixed:
let args = takeFixed fixed;
mergeFun = args.${n};
in if isAttrs x then (mergeFun args x)
else assert lib.isFunction x;
mergeFun args (x ( args // { inherit fixed; }));
in overridableDelayableArgs f newArgs;
in
(f (tidy (lib.fix takeFixed))) // {
merge = fun "mergeFun";
replace = fun "keepFun";
};
defaultOverridableDelayableArgs = f:
let defaults = {
mergeFun = mergeAttrByFunc; # default merge function. merge strategie (concatenate lists, strings) is given by mergeAttrBy
keepFun = a: b: { inherit (a) removeAttrs mergeFun keepFun mergeAttrBy; } // b; # even when using replace preserve these values
applyPreTidy = []; # list of functions applied to args before args are tidied up (usage case : prepareDerivationArgs)
mergeAttrBy = mergeAttrBy // {
applyPreTidy = a: b: a ++ b;
removeAttrs = a: b: a ++ b;
};
removeAttrs = ["mergeFun" "keepFun" "mergeAttrBy" "removeAttrs" "fixed" ]; # before applying the arguments to the function make sure these names are gone
};
in (overridableDelayableArgs f defaults).merge;
# rec { # an example of how composedArgsAndFun can be used
# a = composedArgsAndFun (x: x) { a = ["2"]; meta = { d = "bar";}; };
# # meta.d will be lost ! It's your task to preserve it (eg using a merge function)
# b = a.passthru.function { a = [ "3" ]; meta = { d2 = "bar2";}; };
# # instead of passing/ overriding values you can use a merge function:
# c = b.passthru.function ( x: { a = x.a ++ ["4"]; }); # consider using (maybeAttr "a" [] x)
# }
# result:
# {
# a = { a = ["2"]; meta = { d = "bar"; }; passthru = { function = .. }; };
# b = { a = ["3"]; meta = { d2 = "bar2"; }; passthru = { function = .. }; };
# c = { a = ["3" "4"]; meta = { d2 = "bar2"; }; passthru = { function = .. }; };
# # c2 is equal to c
# }
composedArgsAndFun = f: foldArgs defaultMerge f {};
# shortcut for attrByPath ["name"] default attrs
maybeAttrNullable = maybeAttr;
@ -285,7 +217,7 @@ rec {
# };
# will result in
# { mergeAttrsBy = [...]; buildInputs = [ a b c d ]; }
# is used by prepareDerivationArgs, defaultOverridableDelayableArgs and can be used when composing using
# is used by defaultOverridableDelayableArgs and can be used when composing using
# foldArgs, composedArgsAndFun or applyAndFun. Example: composableDerivation in all-packages.nix
mergeAttrByFunc = x: y:
let
@ -318,58 +250,6 @@ rec {
// listToAttrs (map (n: nameValuePair n (a: b: "${a}\n${b}") ) [ "preConfigure" "postInstall" ])
;
# prepareDerivationArgs tries to make writing configurable derivations easier
# example:
# prepareDerivationArgs {
# mergeAttrBy = {
# myScript = x: y: x ++ "\n" ++ y;
# };
# cfg = {
# readlineSupport = true;
# };
# flags = {
# readline = {
# set = {
# configureFlags = [ "--with-compiler=${compiler}" ];
# buildInputs = [ compiler ];
# pass = { inherit compiler; READLINE=1; };
# assertion = compiler.dllSupport;
# myScript = "foo";
# };
# unset = { configureFlags = ["--without-compiler"]; };
# };
# };
# src = ...
# buildPhase = '' ... '';
# name = ...
# myScript = "bar";
# };
# if you don't have need for unset you can omit the surrounding set = { .. } attr
# all attrs except flags cfg and mergeAttrBy will be merged with the
# additional data from flags depending on config settings
# It's used in composableDerivation in all-packages.nix. It's also used
# heavily in the new python and libs implementation
#
# should we check for misspelled cfg options?
# TODO use args.mergeFun here as well?
prepareDerivationArgs = args:
let args2 = { cfg = {}; flags = {}; } // args;
flagName = name: "${name}Support";
cfgWithDefaults = (listToAttrs (map (n: nameValuePair (flagName n) false) (attrNames args2.flags)))
// args2.cfg;
opts = attrValues (mapAttrs (a: v:
let v2 = if v ? set || v ? unset then v else { set = v; };
n = if cfgWithDefaults.${flagName a} then "set" else "unset";
attr = maybeAttr n {} v2; in
if (maybeAttr "assertion" true attr)
then attr
else throw "assertion of flag ${a} of derivation ${args.name} failed"
) args2.flags );
in removeAttrs
(mergeAttrsByFuncDefaults ([args] ++ opts ++ [{ passthru = cfgWithDefaults; }]))
["flags" "cfg" "mergeAttrBy" ];
nixType = x:
if isAttrs x then
if x ? outPath then "derivation"
@ -390,4 +270,8 @@ rec {
starting at zero.
*/
imap = imap1;
# Fake hashes. Can be used as hash placeholders, when computing hash ahead isn't trivial
fakeSha256 = "0000000000000000000000000000000000000000000000000000000000000000";
fakeSha512 = "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000";
}

View File

@ -24,6 +24,16 @@ rec {
# for a concrete example.
fix' = f: let x = f x // { __unfix__ = f; }; in x;
# Return the fixpoint that `f` converges to when called recursively, starting
# with the input `x`.
#
# nix-repl> converge (x: x / 2) 16
# 0
converge = f: x:
if (f x) == x
then x
else converge f (f x);
# Modify the contents of an explicitly recursive attribute set in a way that
# honors `self`-references. This is accomplished with a function
#
@ -41,6 +51,18 @@ rec {
# think of it as an infix operator `g extends f` that mimics the syntax from
# Java. It may seem counter-intuitive to have the "base class" as the second
# argument, but it's nice this way if several uses of `extends` are cascaded.
#
# To get a better understanding how `extends` turns a function with a fix
# point (the package set we start with) into a new function with a different fix
# point (the desired packages set) lets just see, how `extends g f`
# unfolds with `g` and `f` defined above:
#
# extends g f = self: let super = f self; in super // g self super;
# = self: let super = { foo = "foo"; bar = "bar"; foobar = self.foo + self.bar; }; in super // g self super
# = self: { foo = "foo"; bar = "bar"; foobar = self.foo + self.bar; } // g self { foo = "foo"; bar = "bar"; foobar = self.foo + self.bar; }
# = self: { foo = "foo"; bar = "bar"; foobar = self.foo + self.bar; } // { foo = "foo" + " + "; }
# = self: { foo = "foo + "; bar = "bar"; foobar = self.foo + self.bar; }
#
extends = f: rattrs: self: let super = rattrs self; in super // f self super;
# Compose two extending functions of the type expected by 'extends'

View File

@ -4,6 +4,12 @@
* They all follow a similar interface:
* generator { config-attrs } data
*
* `config-attrs` are holes in the generators
* with sensible default implementations that
* can be overwritten. The default implementations
* are mostly generators themselves, called with
* their respective default values; they can be reused.
*
* Tests can be found in ./tests.nix
* Documentation in the manual, #sec-generators
*/
@ -13,13 +19,37 @@ let
libStr = lib.strings;
libAttr = lib.attrsets;
flipMapAttrs = flip libAttr.mapAttrs;
inherit (lib) isFunction;
in
rec {
## -- HELPER FUNCTIONS & DEFAULTS --
/* Convert a value to a sensible default string representation.
* The builtin `toString` function has some strange defaults,
* suitable for bash scripts but not much else.
*/
mkValueStringDefault = {}: v: with builtins;
let err = t: v: abort
("generators.mkValueStringDefault: " +
"${t} not supported: ${toPretty {} v}");
in if isInt v then toString v
# we default to not quoting strings
else if isString v then v
# isString returns "1", which is not a good default
else if true == v then "true"
# here it returns to "", which is even less of a good default
else if false == v then "false"
else if null == v then "null"
# if you have lists you probably want to replace this
else if isList v then err "lists" v
# same as for lists, might want to replace
else if isAttrs v then err "attrsets" v
else if isFunction v then err "functions" v
else err "this value is" (toString v);
/* Generate a line of key k and value v, separated by
* character sep. If sep appears in k, it is escaped.
* Helper for synaxes with different separators.
@ -30,11 +60,14 @@ rec {
* > "f\:oo:bar"
*/
mkKeyValueDefault = {
mkValueString ? toString
mkValueString ? mkValueStringDefault {}
}: sep: k: v:
"${libStr.escape [sep] k}${sep}${mkValueString v}";
## -- FILE FORMAT GENERATORS --
/* Generate a key-value-style config file from an attrset.
*
* mkKeyValue is the same as in toINI.
@ -98,6 +131,7 @@ rec {
*/
toYAML = {}@args: toJSON args;
/* Pretty print a value, akin to `builtins.trace`.
* Should probably be a builtin as well.
*/
@ -107,17 +141,14 @@ rec {
(This means fn is type Val -> String.) */
allowPrettyValues ? false
}@args: v: with builtins;
if isInt v then toString v
else if isBool v then (if v == true then "true" else "false")
else if isString v then "\"" + v + "\""
else if null == v then "null"
else if isFunction v then
let fna = lib.functionArgs v;
showFnas = concatStringsSep "," (libAttr.mapAttrsToList
(name: hasDefVal: if hasDefVal then "(${name})" else name)
fna);
in if fna == {} then "<λ>"
else "<λ:{${showFnas}}>"
let isPath = v: typeOf v == "path";
in if isInt v then toString v
else if isFloat v then "~${toString v}"
else if isString v then ''"${libStr.escape [''"''] v}"''
else if true == v then "true"
else if false == v then "false"
else if null == v then "null"
else if isPath v then toString v
else if isList v then "[ "
+ libStr.concatMapStringsSep " " (toPretty args) v
+ " ]"
@ -126,12 +157,71 @@ rec {
if attrNames v == [ "__pretty" "val" ] && allowPrettyValues
then v.__pretty v.val
# TODO: there is probably a better representation?
else if v ? type && v.type == "derivation" then "<δ>"
else if v ? type && v.type == "derivation" then
"<δ:${v.name}>"
# "<δ:${concatStringsSep "," (builtins.attrNames v)}>"
else "{ "
+ libStr.concatStringsSep " " (libAttr.mapAttrsToList
(name: value:
"${toPretty args name} = ${toPretty args value};") v)
+ " }"
else abort "toPretty: should never happen (v = ${v})";
else if isFunction v then
let fna = lib.functionArgs v;
showFnas = concatStringsSep "," (libAttr.mapAttrsToList
(name: hasDefVal: if hasDefVal then "(${name})" else name)
fna);
in if fna == {} then "<λ>"
else "<λ:{${showFnas}}>"
else abort "generators.toPretty: should never happen (v = ${v})";
# PLIST handling
toPlist = {}: v: let
isFloat = builtins.isFloat or (x: false);
expr = ind: x: with builtins;
if isNull x then "" else
if isBool x then bool ind x else
if isInt x then int ind x else
if isString x then str ind x else
if isList x then list ind x else
if isAttrs x then attrs ind x else
if isFloat x then float ind x else
abort "generators.toPlist: should never happen (v = ${v})";
literal = ind: x: ind + x;
bool = ind: x: literal ind (if x then "<true/>" else "<false/>");
int = ind: x: literal ind "<integer>${toString x}</integer>";
str = ind: x: literal ind "<string>${x}</string>";
key = ind: x: literal ind "<key>${x}</key>";
float = ind: x: literal ind "<real>${toString x}</real>";
indent = ind: expr "\t${ind}";
item = ind: libStr.concatMapStringsSep "\n" (indent ind);
list = ind: x: libStr.concatStringsSep "\n" [
(literal ind "<array>")
(item ind x)
(literal ind "</array>")
];
attrs = ind: x: libStr.concatStringsSep "\n" [
(literal ind "<dict>")
(attr ind x)
(literal ind "</dict>")
];
attr = let attrFilter = name: value: name != "_module" && value != null;
in ind: x: libStr.concatStringsSep "\n" (lib.flatten (lib.mapAttrsToList
(name: value: lib.optional (attrFilter name value) [
(key "\t${ind}" name)
(expr "\t${ind}" value)
]) x));
in ''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
${expr "" v}
</plist>'';
}

57
lib/kernel.nix Normal file
View File

@ -0,0 +1,57 @@
{ lib
# we pass the kernel version here to keep a nice syntax `whenOlder "4.13"`
# kernelVersion, e.g., config.boot.kernelPackages.version
, version
, mkValuePreprocess ? null
}:
with lib;
rec {
# Common patterns
when = cond: opt: if cond then opt else null;
whenAtLeast = ver: when (versionAtLeast version ver);
whenOlder = ver: when (versionOlder version ver);
whenBetween = verLow: verHigh: when (versionAtLeast version verLow && versionOlder version verHigh);
# Keeping these around in case we decide to change this horrible implementation :)
option = x: if x == null then null else "?${x}";
yes = "y";
no = "n";
module = "m";
mkValue = val:
let
isNumber = c: elem c ["0" "1" "2" "3" "4" "5" "6" "7" "8" "9"];
in
if val == "" then "\"\""
else if val == yes || val == module || val == no then val
else if all isNumber (stringToCharacters val) then val
else if substring 0 2 val == "0x" then val
else val; # FIXME: fix quoting one day
# generate nix intermediate kernel config file of the form
#
# VIRTIO_MMIO m
# VIRTIO_BLK y
# VIRTIO_CONSOLE n
# NET_9P_VIRTIO? y
#
# Use mkValuePreprocess to preprocess option values, aka mark 'modules' as
# 'yes' or vice-versa
# Borrowed from copumpkin https://github.com/NixOS/nixpkgs/pull/12158
# returns a string, expr should be an attribute set
generateNixKConf = exprs: mkValuePreprocess:
let
mkConfigLine = key: rawval:
let
val = if builtins.isFunction mkValuePreprocess then mkValuePreprocess rawval else rawval;
in
if val == null
then ""
else if hasPrefix "?" val
then "${key}? ${mkValue (removePrefix "?" val)}\n"
else "${key} ${mkValue val}\n";
mkConf = cfg: concatStrings (mapAttrsToList mkConfigLine cfg);
in mkConf exprs;
}

View File

@ -13,6 +13,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
* add it to this list. The URL mentioned above is a good source for inspiration.
*/
abstyles = spdx {
spdxId = "Abstyles";
fullName = "Abstyles License";
};
afl21 = spdx {
spdxId = "AFL-2.1";
fullName = "Academic Free License v2.1";
@ -24,13 +29,13 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
};
agpl3 = spdx {
spdxId = "AGPL-3.0";
fullName = "GNU Affero General Public License v3.0";
spdxId = "AGPL-3.0-only";
fullName = "GNU Affero General Public License v3.0 only";
};
agpl3Plus = {
agpl3Plus = spdx {
spdxId = "AGPL-3.0-or-later";
fullName = "GNU Affero General Public License v3.0 or later";
inherit (agpl3) url;
};
amazonsl = {
@ -42,6 +47,7 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
amd = {
fullName = "AMD License Agreement";
url = http://developer.amd.com/amd-license-agreement/;
free = false;
};
apsl20 = spdx {
@ -99,6 +105,12 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = ''BSD 4-clause "Original" or "Old" License'';
};
bsl11 = {
fullName = "Business Source License 1.1";
url = https://mariadb.com/bsl11;
free = false;
};
clArtistic = spdx {
spdxId = "ClArtistic";
fullName = "Clarified Artistic License";
@ -112,26 +124,37 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
cc-by-nc-sa-20 = spdx {
spdxId = "CC-BY-NC-SA-2.0";
fullName = "Creative Commons Attribution Non Commercial Share Alike 2.0";
free = false;
};
cc-by-nc-sa-25 = spdx {
spdxId = "CC-BY-NC-SA-2.5";
fullName = "Creative Commons Attribution Non Commercial Share Alike 2.5";
free = false;
};
cc-by-nc-sa-30 = spdx {
spdxId = "CC-BY-NC-SA-3.0";
fullName = "Creative Commons Attribution Non Commercial Share Alike 3.0";
free = false;
};
cc-by-nc-sa-40 = spdx {
spdxId = "CC-BY-NC-SA-4.0";
fullName = "Creative Commons Attribution Non Commercial Share Alike 4.0";
free = false;
};
cc-by-nc-40 = spdx {
spdxId = "CC-BY-NC-4.0";
fullName = "Creative Commons Attribution Non Commercial 4.0 International";
free = false;
};
cc-by-nd-30 = spdx {
spdxId = "CC-BY-ND-3.0";
fullName = "Creative Commons Attribution-No Derivative Works v3.00";
free = false;
};
cc-by-sa-25 = spdx {
@ -179,11 +202,21 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = "CeCILL-C Free Software License Agreement";
};
cpal10 = spdx {
spdxId = "CPAL-1.0";
fullName = "Common Public Attribution License 1.0";
};
cpl10 = spdx {
spdxId = "CPL-1.0";
fullName = "Common Public License 1.0";
};
curl = {
fullName = "MIT/X11 derivate";
url = "https://curl.haxx.se/docs/copyright.html";
};
doc = spdx {
spdxId = "DOC";
fullName = "DOC License";
@ -205,6 +238,12 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = "Eiffel Forum License v2.0";
};
elastic = {
fullName = "ELASTIC LICENSE";
url = https://github.com/elastic/elasticsearch/blob/master/licenses/ELASTIC-LICENSE.txt;
free = false;
};
epl10 = spdx {
spdxId = "EPL-1.0";
fullName = "Eclipse Public License 1.0";
@ -227,13 +266,23 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
};
fdl12 = spdx {
spdxId = "GFDL-1.2";
fullName = "GNU Free Documentation License v1.2";
spdxId = "GFDL-1.2-only";
fullName = "GNU Free Documentation License v1.2 only";
};
fdl12Plus = spdx {
spdxId = "GFDL-1.2-or-later";
fullName = "GNU Free Documentation License v1.2 or later";
};
fdl13 = spdx {
spdxId = "GFDL-1.3";
fullName = "GNU Free Documentation License v1.3";
spdxId = "GFDL-1.3-only";
fullName = "GNU Free Documentation License v1.3 only";
};
fdl13Plus = spdx {
spdxId = "GFDL-1.3-or-later";
fullName = "GNU Free Documentation License v1.3 or later";
};
ffsl = {
@ -258,20 +307,25 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
};
gpl1 = spdx {
spdxId = "GPL-1.0";
spdxId = "GPL-1.0-only";
fullName = "GNU General Public License v1.0 only";
};
gpl1Plus = spdx {
spdxId = "GPL-1.0+";
spdxId = "GPL-1.0-or-later";
fullName = "GNU General Public License v1.0 or later";
};
gpl2 = spdx {
spdxId = "GPL-2.0";
spdxId = "GPL-2.0-only";
fullName = "GNU General Public License v2.0 only";
};
gpl2Classpath = spdx {
spdxId = "GPL-2.0-with-classpath-exception";
fullName = "GNU General Public License v2.0 only (with Classpath exception)";
};
gpl2ClasspathPlus = {
fullName = "GNU General Public License v2.0 or later (with Classpath exception)";
url = https://fedoraproject.org/wiki/Licensing/GPL_Classpath_Exception;
@ -279,21 +333,21 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
gpl2Oss = {
fullName = "GNU General Public License version 2 only (with OSI approved licenses linking exception)";
url = http://www.mysql.com/about/legal/licensing/foss-exception;
url = https://www.mysql.com/about/legal/licensing/foss-exception;
};
gpl2Plus = spdx {
spdxId = "GPL-2.0+";
spdxId = "GPL-2.0-or-later";
fullName = "GNU General Public License v2.0 or later";
};
gpl3 = spdx {
spdxId = "GPL-3.0";
spdxId = "GPL-3.0-only";
fullName = "GNU General Public License v3.0 only";
};
gpl3Plus = spdx {
spdxId = "GPL-3.0+";
spdxId = "GPL-3.0-or-later";
fullName = "GNU General Public License v3.0 or later";
};
@ -318,6 +372,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = "Independent JPEG Group License";
};
imagemagick = spdx {
fullName = "ImageMagick License";
spdxId = "imagemagick";
};
inria-compcert = {
fullName = "INRIA Non-Commercial License Agreement for the CompCert verified compiler";
url = "http://compcert.inria.fr/doc/LICENSE";
@ -345,33 +404,45 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = "ISC License";
};
# Proprietary binaries; free to redistribute without modification.
issl = {
fullName = "Intel Simplified Software License";
url = https://software.intel.com/en-us/license/intel-simplified-software-license;
free = false;
};
jasper = spdx {
spdxId = "JasPer-2.0";
fullName = "JasPer License";
};
lgpl2 = spdx {
spdxId = "LGPL-2.0";
spdxId = "LGPL-2.0-only";
fullName = "GNU Library General Public License v2 only";
};
lgpl2Plus = spdx {
spdxId = "LGPL-2.0+";
spdxId = "LGPL-2.0-or-later";
fullName = "GNU Library General Public License v2 or later";
};
lgpl21 = spdx {
spdxId = "LGPL-2.1";
spdxId = "LGPL-2.1-only";
fullName = "GNU Library General Public License v2.1 only";
};
lgpl21Plus = spdx {
spdxId = "LGPL-2.1+";
spdxId = "LGPL-2.1-or-later";
fullName = "GNU Library General Public License v2.1 or later";
};
lgpl3 = spdx {
spdxId = "LGPL-3.0";
spdxId = "LGPL-3.0-only";
fullName = "GNU Lesser General Public License v3.0 only";
};
lgpl3Plus = spdx {
spdxId = "LGPL-3.0+";
spdxId = "LGPL-3.0-or-later";
fullName = "GNU Lesser General Public License v3.0 or later";
};
@ -440,6 +511,13 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
msrla = {
fullName = "Microsoft Research License Agreement";
url = "http://research.microsoft.com/en-us/projects/pex/msr-la.txt";
free = false;
};
nasa13 = spdx {
spdxId = "NASA-1.3";
fullName = "NASA Open Source Agreement 1.3";
free = false;
};
ncsa = spdx {
@ -457,6 +535,12 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = "Non-Profit Open Software License 3.0";
};
ocamlpro_nc = {
fullName = "OCamlPro Non Commercial license version 1";
url = "https://alt-ergo.ocamlpro.com/http/alt-ergo-2.2.0/OCamlPro-Non-Commercial-License.pdf";
free = false;
};
ofl = spdx {
spdxId = "OFL-1.1";
fullName = "SIL Open Font License 1.1";
@ -508,6 +592,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = "Public Domain";
};
purdueBsd = {
fullName = " Purdue BSD-Style License"; # also know as lsof license
url = https://enterprise.dejacode.com/licenses/public/purdue-bsd;
};
qpl = spdx {
spdxId = "QPL-1.0";
fullName = "Q Public License 1.0";
@ -523,6 +612,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = "Ruby License";
};
sendmail = spdx {
spdxId = "Sendmail";
fullName = "Sendmail License";
};
sgi-b-20 = spdx {
spdxId = "SGI-B-2.0";
fullName = "SGI Free Software License B v2.0";
@ -580,6 +674,12 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = "Vim License";
};
virtualbox-puel = {
fullName = "Oracle VM VirtualBox Extension Pack Personal Use and Evaluation License (PUEL)";
url = "https://www.virtualbox.org/wiki/VirtualBox_PUEL";
free = false;
};
vsl10 = spdx {
spdxId = "VSL-1.0";
fullName = "Vovida Software License v1.0";
@ -606,10 +706,15 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
};
wxWindows = spdx {
spdxId = "WXwindows";
spdxId = "wxWindows";
fullName = "wxWindows Library Licence, Version 3.1";
};
xfig = {
fullName = "xfig";
url = "http://mcj.sourceforge.net/authors.html#xfig";
};
zlib = spdx {
spdxId = "Zlib";
fullName = "zlib License";

View File

@ -1,26 +1,31 @@
# General list operations.
{ lib }:
with lib.trivial;
let
inherit (lib.strings) toInt;
in
rec {
inherit (builtins) head tail length isList elemAt concatLists filter elem genList;
/* Create a list consisting of a single element. `singleton x' is
sometimes more convenient with respect to indentation than `[x]'
/* Create a list consisting of a single element. `singleton x` is
sometimes more convenient with respect to indentation than `[x]`
when x spans multiple lines.
Type: singleton :: a -> [a]
Example:
singleton "foo"
=> [ "foo" ]
*/
singleton = x: [x];
/* right fold a binary function `op' between successive elements of
`list' with `nul' as the starting value, i.e.,
`foldr op nul [x_1 x_2 ... x_n] == op x_1 (op x_2 ... (op x_n nul))'.
Type:
foldr :: (a -> b -> b) -> b -> [a] -> b
/* right fold a binary function `op` between successive elements of
`list` with `nul' as the starting value, i.e.,
`foldr op nul [x_1 x_2 ... x_n] == op x_1 (op x_2 ... (op x_n nul))`.
Type: foldr :: (a -> b -> b) -> b -> [a] -> b
Example:
concat = foldr (a: b: a + b) "z"
@ -40,16 +45,15 @@ rec {
else op (elemAt list n) (fold' (n + 1));
in fold' 0;
/* `fold' is an alias of `foldr' for historic reasons */
/* `fold` is an alias of `foldr` for historic reasons */
# FIXME(Profpatsch): deprecate?
fold = foldr;
/* left fold, like `foldr', but from the left:
/* left fold, like `foldr`, but from the left:
`foldl op nul [x_1 x_2 ... x_n] == op (... (op (op nul x_1) x_2) ... x_n)`.
Type:
foldl :: (b -> a -> b) -> b -> [a] -> b
Type: foldl :: (b -> a -> b) -> b -> [a] -> b
Example:
lconcat = foldl (a: b: a + b) "z"
@ -62,23 +66,26 @@ rec {
*/
foldl = op: nul: list:
let
len = length list;
foldl' = n:
if n == -1
then nul
else op (foldl' (n - 1)) (elemAt list n);
in foldl' (length list - 1);
/* Strict version of `foldl'.
/* Strict version of `foldl`.
The difference is that evaluation is forced upon access. Usually used
with small whole results (in contract with lazily-generated list or large
lists where only a part is consumed.)
Type: foldl' :: (b -> a -> b) -> b -> [a] -> b
*/
foldl' = builtins.foldl' or foldl;
/* Map with index starting from 0
Type: imap0 :: (int -> a -> b) -> [a] -> [b]
Example:
imap0 (i: v: "${v}-${toString i}") ["a" "b"]
=> [ "a-0" "b-1" ]
@ -87,6 +94,8 @@ rec {
/* Map with index starting from 1
Type: imap1 :: (int -> a -> b) -> [a] -> [b]
Example:
imap1 (i: v: "${v}-${toString i}") ["a" "b"]
=> [ "a-1" "b-2" ]
@ -95,11 +104,13 @@ rec {
/* Map and concatenate the result.
Type: concatMap :: (a -> [b]) -> [a] -> [b]
Example:
concatMap (x: [x] ++ ["z"]) ["a" "b"]
=> [ "a" "z" "b" "z" ]
*/
concatMap = f: list: concatLists (map f list);
concatMap = builtins.concatMap or (f: list: concatLists (map f list));
/* Flatten the argument into a single list; that is, nested lists are
spliced into the top-level lists.
@ -117,15 +128,21 @@ rec {
/* Remove elements equal to 'e' from a list. Useful for buildInputs.
Type: remove :: a -> [a] -> [a]
Example:
remove 3 [ 1 3 4 3 ]
=> [ 1 4 ]
*/
remove = e: filter (x: x != e);
remove =
# Element to remove from the list
e: filter (x: x != e);
/* Find the sole element in the list matching the specified
predicate, returns `default' if no such element exists, or
`multiple' if there are multiple matching elements.
predicate, returns `default` if no such element exists, or
`multiple` if there are multiple matching elements.
Type: findSingle :: (a -> bool) -> a -> a -> [a] -> a
Example:
findSingle (x: x == 3) "none" "multiple" [ 1 3 3 ]
@ -135,14 +152,24 @@ rec {
findSingle (x: x == 3) "none" "multiple" [ 1 9 ]
=> "none"
*/
findSingle = pred: default: multiple: list:
findSingle =
# Predicate
pred:
# Default value to return if element was not found.
default:
# Default value to return if more than one element was found
multiple:
# Input list
list:
let found = filter pred list; len = length found;
in if len == 0 then default
else if len != 1 then multiple
else head found;
/* Find the first element in the list matching the specified
predicate or returns `default' if no such element exists.
predicate or return `default` if no such element exists.
Type: findFirst :: (a -> bool) -> a -> [a] -> a
Example:
findFirst (x: x > 3) 7 [ 1 6 4 ]
@ -150,12 +177,20 @@ rec {
findFirst (x: x > 9) 7 [ 1 6 4 ]
=> 7
*/
findFirst = pred: default: list:
findFirst =
# Predicate
pred:
# Default value to return
default:
# Input list
list:
let found = filter pred list;
in if found == [] then default else head found;
/* Return true iff function `pred' returns true for at least element
of `list'.
/* Return true if function `pred` returns true for at least one
element of `list`.
Type: any :: (a -> bool) -> [a] -> bool
Example:
any isString [ 1 "a" { } ]
@ -165,8 +200,10 @@ rec {
*/
any = builtins.any or (pred: foldr (x: y: if pred x then true else y) false);
/* Return true iff function `pred' returns true for all elements of
`list'.
/* Return true if function `pred` returns true for all elements of
`list`.
Type: all :: (a -> bool) -> [a] -> bool
Example:
all (x: x < 3) [ 1 2 ]
@ -176,19 +213,25 @@ rec {
*/
all = builtins.all or (pred: foldr (x: y: if pred x then y else false) true);
/* Count how many times function `pred' returns true for the elements
of `list'.
/* Count how many elements of `list` match the supplied predicate
function.
Type: count :: (a -> bool) -> [a] -> int
Example:
count (x: x == 3) [ 3 2 3 4 6 ]
=> 2
*/
count = pred: foldl' (c: x: if pred x then c + 1 else c) 0;
count =
# Predicate
pred: foldl' (c: x: if pred x then c + 1 else c) 0;
/* Return a singleton list or an empty list, depending on a boolean
value. Useful when building lists with optional elements
(e.g. `++ optional (system == "i686-linux") flashplayer').
Type: optional :: bool -> a -> [a]
Example:
optional true "foo"
=> [ "foo" ]
@ -199,13 +242,19 @@ rec {
/* Return a list or an empty list, depending on a boolean value.
Type: optionals :: bool -> [a] -> [a]
Example:
optionals true [ 2 3 ]
=> [ 2 3 ]
optionals false [ 2 3 ]
=> [ ]
*/
optionals = cond: elems: if cond then elems else [];
optionals =
# Condition
cond:
# List to return if condition is true
elems: if cond then elems else [];
/* If argument is a list, return it; else, wrap it in a singleton
@ -222,20 +271,28 @@ rec {
/* Return a list of integers from `first' up to and including `last'.
Type: range :: int -> int -> [int]
Example:
range 2 4
=> [ 2 3 4 ]
range 3 2
=> [ ]
*/
range = first: last:
range =
# First integer in the range
first:
# Last integer in the range
last:
if first > last then
[]
else
genList (n: first + n) (last - first + 1);
/* Splits the elements of a list in two lists, `right' and
`wrong', depending on the evaluation of a predicate.
/* Splits the elements of a list in two lists, `right` and
`wrong`, depending on the evaluation of a predicate.
Type: (a -> bool) -> [a] -> { right :: [a], wrong :: [a] }
Example:
partition (x: x > 2) [ 5 1 2 3 4 ]
@ -248,21 +305,63 @@ rec {
else { right = t.right; wrong = [h] ++ t.wrong; }
) { right = []; wrong = []; });
/* Splits the elements of a list into many lists, using the return value of a predicate.
Predicate should return a string which becomes keys of attrset `groupBy' returns.
`groupBy'` allows to customise the combining function and initial value
Example:
groupBy (x: boolToString (x > 2)) [ 5 1 2 3 4 ]
=> { true = [ 5 3 4 ]; false = [ 1 2 ]; }
groupBy (x: x.name) [ {name = "icewm"; script = "icewm &";}
{name = "xfce"; script = "xfce4-session &";}
{name = "icewm"; script = "icewmbg &";}
{name = "mate"; script = "gnome-session &";}
]
=> { icewm = [ { name = "icewm"; script = "icewm &"; }
{ name = "icewm"; script = "icewmbg &"; } ];
mate = [ { name = "mate"; script = "gnome-session &"; } ];
xfce = [ { name = "xfce"; script = "xfce4-session &"; } ];
}
groupBy' builtins.add 0 (x: boolToString (x > 2)) [ 5 1 2 3 4 ]
=> { true = 12; false = 3; }
*/
groupBy' = op: nul: pred: lst:
foldl' (r: e:
let
key = pred e;
in
r // { ${key} = op (r.${key} or nul) e; }
) {} lst;
groupBy = groupBy' (sum: e: sum ++ [e]) [];
/* Merges two lists of the same size together. If the sizes aren't the same
the merging stops at the shortest. How both lists are merged is defined
by the first argument.
Type: zipListsWith :: (a -> b -> c) -> [a] -> [b] -> [c]
Example:
zipListsWith (a: b: a + b) ["h" "l"] ["e" "o"]
=> ["he" "lo"]
*/
zipListsWith = f: fst: snd:
zipListsWith =
# Function to zip elements of both lists
f:
# First list
fst:
# Second list
snd:
genList
(n: f (elemAt fst n) (elemAt snd n)) (min (length fst) (length snd));
/* Merges two lists of the same size together. If the sizes aren't the same
the merging stops at the shortest.
Type: zipLists :: [a] -> [b] -> [{ fst :: a, snd :: b}]
Example:
zipLists [ 1 2 ] [ "a" "b" ]
=> [ { fst = 1; snd = "a"; } { fst = 2; snd = "b"; } ]
@ -271,6 +370,8 @@ rec {
/* Reverse the order of the elements of a list.
Type: reverseList :: [a] -> [a]
Example:
reverseList [ "b" "o" "j" ]
@ -284,8 +385,7 @@ rec {
`before a b == true` means that `b` depends on `a` (there's an
edge from `b` to `a`).
Examples:
Example:
listDfs true hasPrefix [ "/home/user" "other" "/" "/home" ]
== { minimal = "/"; # minimal element
visited = [ "/home/user" ]; # seen elements (in reverse order)
@ -299,7 +399,6 @@ rec {
rest = [ "/home" "other" ]; # everything else
*/
listDfs = stopOnCycles: before: list:
let
dfs' = us: visited: rest:
@ -324,7 +423,7 @@ rec {
`before a b == true` means that `b` should be after `a`
in the result.
Examples:
Example:
toposort hasPrefix [ "/home/user" "other" "/" "/home" ]
== { result = [ "/" "/home" "/home/user" "other" ]; }
@ -339,7 +438,6 @@ rec {
toposort (a: b: a < b) [ 3 2 1 ] == { result = [ 1 2 3 ]; }
*/
toposort = before: list:
let
dfsthis = listDfs true before list;
@ -409,28 +507,59 @@ rec {
then compareLists cmp (tail a) (tail b)
else rel;
/* Sort list using "Natural sorting".
Numeric portions of strings are sorted in numeric order.
Example:
naturalSort ["disk11" "disk8" "disk100" "disk9"]
=> ["disk8" "disk9" "disk11" "disk100"]
naturalSort ["10.46.133.149" "10.5.16.62" "10.54.16.25"]
=> ["10.5.16.62" "10.46.133.149" "10.54.16.25"]
naturalSort ["v0.2" "v0.15" "v0.0.9"]
=> [ "v0.0.9" "v0.2" "v0.15" ]
*/
naturalSort = lst:
let
vectorise = s: map (x: if isList x then toInt (head x) else x) (builtins.split "(0|[1-9][0-9]*)" s);
prepared = map (x: [ (vectorise x) x ]) lst; # remember vectorised version for O(n) regex splits
less = a: b: (compareLists compare (head a) (head b)) < 0;
in
map (x: elemAt x 1) (sort less prepared);
/* Return the first (at most) N elements of a list.
Type: take :: int -> [a] -> [a]
Example:
take 2 [ "a" "b" "c" "d" ]
=> [ "a" "b" ]
take 2 [ ]
=> [ ]
*/
take = count: sublist 0 count;
take =
# Number of elements to take
count: sublist 0 count;
/* Remove the first (at most) N elements of a list.
Type: drop :: int -> [a] -> [a]
Example:
drop 2 [ "a" "b" "c" "d" ]
=> [ "c" "d" ]
drop 2 [ ]
=> [ ]
*/
drop = count: list: sublist count (length list) list;
drop =
# Number of elements to drop
count:
# Input list
list: sublist count (length list) list;
/* Return a list consisting of at most count elements of list,
starting at index start.
/* Return a list consisting of at most `count` elements of `list`,
starting at index `start`.
Type: sublist :: int -> int -> [a] -> [a]
Example:
sublist 1 3 [ "a" "b" "c" "d" "e" ]
@ -438,7 +567,13 @@ rec {
sublist 1 3 [ ]
=> [ ]
*/
sublist = start: count: list:
sublist =
# Index at which to start the sublist
start:
# Number of elements to take
count:
# Input list
list:
let len = length list; in
genList
(n: elemAt list (n + start))
@ -448,23 +583,34 @@ rec {
/* Return the last element of a list.
This function throws an error if the list is empty.
Type: last :: [a] -> a
Example:
last [ 1 2 3 ]
=> 3
*/
last = list:
assert list != []; elemAt list (length list - 1);
assert lib.assertMsg (list != []) "lists.last: list must not be empty!";
elemAt list (length list - 1);
/* Return all elements but the last
/* Return all elements but the last.
This function throws an error if the list is empty.
Type: init :: [a] -> [a]
Example:
init [ 1 2 3 ]
=> [ 1 2 ]
*/
init = list: assert list != []; take (length list - 1) list;
init = list:
assert lib.assertMsg (list != []) "lists.init: list must not be empty!";
take (length list - 1) list;
/* return the image of the cross product of some lists by a function
/* Return the image of the cross product of some lists by a function.
Example:
crossLists (x:y: "${toString x}${toString y}") [[1 2] [3 4]]
@ -475,8 +621,9 @@ rec {
/* Remove duplicate elements from the list. O(n^2) complexity.
Example:
Type: unique :: [a] -> [a]
Example:
unique [ 3 2 3 4 ]
=> [ 3 2 4 ]
*/

View File

@ -1,801 +0,0 @@
{ ...}:
/* List of NixOS maintainers. The format is:
handle = "Real Name <address@example.org>";
where <handle> is preferred to be your GitHub username (so it's easy
to ping a package @<handle>), and <Real Name> is your real name, not
a pseudonym. Please keep the list alphabetically sorted. */
{
a1russell = "Adam Russell <adamlr6+pub@gmail.com>";
aaronschif = "Aaron Schif <aaronschif@gmail.com>";
abaldeau = "Andreas Baldeau <andreas@baldeau.net>";
abbradar = "Nikolay Amiantov <ab@fmap.me>";
abigailbuccaneer = "Abigail Bunyan <abigailbuccaneer@gmail.com>";
aboseley = "Adam Boseley <adam.boseley@gmail.com>";
abuibrahim = "Ruslan Babayev <ruslan@babayev.com>";
acowley = "Anthony Cowley <acowley@gmail.com>";
adelbertc = "Adelbert Chang <adelbertc@gmail.com>";
adev = "Adrien Devresse <adev@adev.name>";
adisbladis = "Adam Hose <adis@blad.is>";
Adjective-Object = "Maxwell Huang-Hobbs <mhuan13@gmail.com>";
adnelson = "Allen Nelson <ithinkican@gmail.com>";
adolfogc = "Adolfo E. García Castro <adolfo.garcia.cr@gmail.com>";
aespinosa = "Allan Espinosa <allan.espinosa@outlook.com>";
aflatter = "Alexander Flatter <flatter@fastmail.fm>";
afldcr = "James Alexander Feldman-Crough <alex@fldcr.com>";
aforemny = "Alexander Foremny <alexanderforemny@googlemail.com>";
afranchuk = "Alex Franchuk <alex.franchuk@gmail.com>";
aherrmann = "Andreas Herrmann <andreash87@gmx.ch>";
ahmedtd = "Taahir Ahmed <ahmed.taahir@gmail.com>";
aij = "Ivan Jager <aij+git@mrph.org>";
ajgrf = "Alex Griffin <a@ajgrf.com>";
ak = "Alexander Kjeldaas <ak@formalprivacy.com>";
akaWolf = "Artjom Vejsel <akawolf0@gmail.com>";
akc = "Anders Claesson <akc@akc.is>";
alexvorobiev = "Alex Vorobiev <alexander.vorobiev@gmail.com";
algorith = "Dries Van Daele <dries_van_daele@telenet.be>";
alibabzo = "Alistair Bill <alistair.bill@gmail.com>";
all = "Nix Committers <nix-commits@lists.science.uu.nl>";
alunduil = "Alex Brandt <alunduil@alunduil.com>";
ambrop72 = "Ambroz Bizjak <ambrop7@gmail.com>";
amiddelk = "Arie Middelkoop <amiddelk@gmail.com>";
amiloradovsky = "Andrew Miloradovsky <miloradovsky@gmail.com>";
amorsillo = "Andrew Morsillo <andrew.morsillo@gmail.com>";
AndersonTorres = "Anderson Torres <torres.anderson.85@gmail.com>";
anderspapitto = "Anders Papitto <anderspapitto@gmail.com>";
andir = "Andreas Rammhold <andreas@rammhold.de>";
andres = "Andres Loeh <ksnixos@andres-loeh.de>";
andrestylianos = "Andre S. Ramos <andre.stylianos@gmail.com>";
andrew-d = "Andrew Dunham <andrew@du.nham.ca>";
andrewrk = "Andrew Kelley <superjoe30@gmail.com>";
andsild = "Anders Sildnes <andsild@gmail.com>";
aneeshusa = "Aneesh Agrawal <aneeshusa@gmail.com>";
ankhers = "Justin Wood <justin.k.wood@gmail.com>";
antono = "Antono Vasiljev <self@antono.info>";
antonxy = "Anton Schirg <anton.schirg@posteo.de>";
apeschar = "Albert Peschar <albert@peschar.net>";
apeyroux = "Alexandre Peyroux <alex@px.io>";
arcadio = "Arcadio Rubio García <arc@well.ox.ac.uk>";
ardumont = "Antoine R. Dumont <eniotna.t@gmail.com>";
aristid = "Aristid Breitkreuz <aristidb@gmail.com>";
arobyn = "Alexei Robyn <shados@shados.net>";
artuuge = "Artur E. Ruuge <artuuge@gmail.com>";
ashalkhakov = "Artyom Shalkhakov <artyom.shalkhakov@gmail.com>";
ashgillman = "Ashley Gillman <gillmanash@gmail.com>";
aske = "Kirill Boltaev <aske@fmap.me>";
asppsa = "Alastair Pharo <asppsa@gmail.com>";
astsmtl = "Alexander Tsamutali <astsmtl@yandex.ru>";
asymmetric = "Lorenzo Manacorda <lorenzo@mailbox.org>";
aszlig = "aszlig <aszlig@nix.build>";
auntie = "Jonathan Glines <auntieNeo@gmail.com>";
avnik = "Alexander V. Nikolaev <avn@avnik.info>";
aycanirican = "Aycan iRiCAN <iricanaycan@gmail.com>";
bachp = "Pascal Bach <pascal.bach@nextrem.ch>";
backuitist = "Bruno Bieth";
badi = "Badi' Abdul-Wahid <abdulwahidc@gmail.com>";
balajisivaraman = "Balaji Sivaraman <sivaraman.balaji@gmail.com>";
barrucadu = "Michael Walker <mike@barrucadu.co.uk>";
basvandijk = "Bas van Dijk <v.dijk.bas@gmail.com>";
Baughn = "Svein Ove Aas <sveina@gmail.com>";
bcarrell = "Brandon Carrell <brandoncarrell@gmail.com>";
bcdarwin = "Ben Darwin <bcdarwin@gmail.com>";
bdimcheff = "Brandon Dimcheff <brandon@dimcheff.com>";
bendlas = "Herwig Hochleitner <herwig@bendlas.net>";
benley = "Benjamin Staffin <benley@gmail.com>";
bennofs = "Benno Fünfstück <benno.fuenfstueck@gmail.com>";
benwbooth = "Ben Booth <benwbooth@gmail.com>";
berce = "Bert Moens <bert.moens@gmail.com>";
berdario = "Dario Bertini <berdario@gmail.com>";
bergey = "Daniel Bergey <bergey@teallabs.org>";
bhipple = "Benjamin Hipple <bhipple@protonmail.com>";
binarin = "Alexey Lebedeff <binarin@binarin.ru>";
bjg = "Brian Gough <bjg@gnu.org>";
bjornfor = "Bjørn Forsman <bjorn.forsman@gmail.com>";
bluescreen303 = "Mathijs Kwik <mathijs@bluescreen303.nl>";
bobakker = "Bo Bakker <bobakk3r@gmail.com>";
bobvanderlinden = "Bob van der Linden <bobvanderlinden@gmail.com>";
bodil = "Bodil Stokke <nix@bodil.org>";
boothead = "Ben Ford <ben@perurbis.com>";
bosu = "Boris Sukholitko <boriss@gmail.com>";
bradediger = "Brad Ediger <brad@bradediger.com>";
bramd = "Bram Duvigneau <bram@bramd.nl>";
bstrik = "Berno Strik <dutchman55@gmx.com>";
bugworm = "Roman Gerasimenko <bugworm@zoho.com>";
bzizou = "Bruno Bzeznik <Bruno@bzizou.net>";
c0bw3b = "Renaud <c0bw3b@gmail.com>";
c0dehero = "CodeHero <codehero@nerdpol.ch>";
calbrecht = "Christian Albrecht <christian.albrecht@mayflower.de>";
calrama = "Moritz Maxeiner <moritz@ucworks.org>";
calvertvl = "Victor Calvert <calvertvl@gmail.com>";
campadrenalin = "Philip Horger <campadrenalin@gmail.com>";
canndrew = "Andrew Cann <shum@canndrew.org>";
carlsverre = "Carl Sverre <accounts@carlsverre.com>";
casey = "Casey Rodarmor <casey@rodarmor.net>";
catern = "Spencer Baugh <sbaugh@catern.com>";
caugner = "Claas Augner <nixos@caugner.de>";
cdepillabout = "Dennis Gosnell <cdep.illabout@gmail.com>";
cfouche = "Chaddaï Fouché <chaddai.fouche@gmail.com>";
changlinli = "Changlin Li <mail@changlinli.com>";
chaoflow = "Florian Friesdorf <flo@chaoflow.net>";
chattered = "Phil Scott <me@philscotted.com>";
ChengCat = "Yucheng Zhang <yu@cheng.cat>";
chiiruno = "Okina Matara <okinan@protonmail.com>";
choochootrain = "Hurshal Patel <hurshal@imap.cc>";
chpatrick = "Patrick Chilton <chpatrick@gmail.com>";
chreekat = "Bryan Richter <b@chreekat.net>";
chris-martin = "Chris Martin <ch.martin@gmail.com>";
chrisjefferson = "Christopher Jefferson <chris@bubblescope.net>";
chrisrosset = "Christopher Rosset <chris@rosset.org.uk>";
christopherpoole = "Christopher Mark Poole <mail@christopherpoole.net>";
ciil = "Simon Lackerbauer <simon@lackerbauer.com>";
ck3d = "Christian Kögler <ck3d@gmx.de>";
ckampka = "Christian Kampka <christian@kampka.net>";
ckauhaus = "Christian Kauhaus <kc@flyingcircus.io>";
cko = "Christine Koppelt <christine.koppelt@gmail.com>";
cleverca22 = "Michael Bishop <cleverca22@gmail.com>";
cmcdragonkai = "Roger Qiu <roger.qiu@matrix.ai>";
cmfwyp = "cmfwyp <cmfwyp@riseup.net>";
cobbal = "Andrew Cobb <andrew.cobb@gmail.com>";
coconnor = "Corey O'Connor <coreyoconnor@gmail.com>";
codsl = "codsl <codsl@riseup.net>";
codyopel = "Cody Opel <codyopel@gmail.com>";
colemickens = "Cole Mickens <cole.mickens@gmail.com>";
colescott = "Cole Scott <colescottsf@gmail.com>";
copumpkin = "Dan Peebles <pumpkingod@gmail.com>";
corngood = "David McFarland <corngood@gmail.com>";
coroa = "Jonas Hörsch <jonas@chaoflow.net>";
couchemar = "Andrey Pavlov <couchemar@yandex.ru>";
cpages = "Carles Pagès <page@ruiec.cat>";
cransom = "Casey Ransom <cransom@hubns.net>";
cryptix = "Henry Bubert <cryptix@riseup.net>";
CrystalGamma = "Jona Stubbe <nixos@crystalgamma.de>";
cstrahan = "Charles Strahan <charles@cstrahan.com>";
csingley = "Christopher Singley <csingley@gmail.com>";
cwoac = "Oliver Matthews <oliver@codersoffortune.net>";
DamienCassou = "Damien Cassou <damien@cassou.me>";
danbst = "Danylo Hlynskyi <abcz2.uprola@gmail.com>";
dancek = "Hannu Hartikainen <hannu.hartikainen@gmail.com>";
danharaj = "Dan Haraj <dan@obsidian.systems>";
danielfullmer = "Daniel Fullmer <danielrf12@gmail.com>";
dasuxullebt = "Christoph-Simon Senjak <christoph.senjak@googlemail.com>";
david50407 = "David Kuo <me@davy.tw>";
davidak = "David Kleuker <post@davidak.de>";
davidrusu = "David Rusu <davidrusu.me@gmail.com>";
davorb = "Davor Babic <davor@davor.se>";
dbohdan = "Danyil Bohdan <danyil.bohdan@gmail.com>";
dbrock = "Daniel Brockman <daniel@brockman.se>";
deepfire = "Kosyrev Serge <_deepfire@feelingofgreen.ru>";
demin-dmitriy = "Dmitriy Demin <demindf@gmail.com>";
derchris = "Christian Gerbrandt <derchris@me.com>";
DerGuteMoritz = "Moritz Heidkamp <moritz@twoticketsplease.de>";
dermetfan = "Robin Stumm <serverkorken@gmail.com>";
DerTim1 = "Tim Digel <tim.digel@active-group.de>";
desiderius = "Didier J. Devroye <didier@devroye.name>";
devhell = "devhell <\"^\"@regexmail.net>";
dezgeg = "Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi>";
dfordivam = "Divam <dfordivam+nixpkgs@gmail.com>";
dfoxfranke = "Daniel Fox Franke <dfoxfranke@gmail.com>";
dgonyeo = "Derek Gonyeo <derek@gonyeo.com>";
dipinhora = "Dipin Hora <dipinhora+github@gmail.com>";
disassembler = "Samuel Leathers <disasm@gmail.com>";
dizfer = "David Izquierdo <david@izquierdofernandez.com>";
dmalikov = "Dmitry Malikov <malikov.d.y@gmail.com>";
DmitryTsygankov = "Dmitry Tsygankov <dmitry.tsygankov@gmail.com>";
dmjio = "David Johnson <djohnson.m@gmail.com>";
dochang = "Desmond O. Chang <dochang@gmail.com>";
domenkozar = "Domen Kozar <domen@dev.si>";
dotlambda = "Robert Schütz <rschuetz17@gmail.com>";
doublec = "Chris Double <chris.double@double.co.nz>";
dpaetzel = "David Pätzel <david.a.paetzel@gmail.com>";
dpflug = "David Pflug <david@pflug.email>";
drets = "Dmytro Rets <dmitryrets@gmail.com>";
drewkett = "Andrew Burkett <burkett.andrew@gmail.com>";
dsferruzza = "David Sferruzza <david.sferruzza@gmail.com>";
dtzWill = "Will Dietz <nix@wdtz.org>";
dupgit = "Olivier Delhomme <olivier.delhomme@free.fr>";
dywedir = "Vladyslav M. <dywedir@protonmail.ch>";
dzabraev = "Maksim Dzabraev <dzabraew@gmail.com>";
e-user = "Alexander Kahl <nixos@sodosopa.io>";
earldouglas = "James Earl Douglas <james@earldouglas.com>";
ebzzry = "Rommel Martinez <ebzzry@ebzzry.io>";
edanaher = "Evan Danaher <nixos@edanaher.net>";
edef = "edef <edef@edef.eu>";
ederoyd46 = "Matthew Brown <matt@ederoyd.co.uk>";
eduarrrd = "Eduard Bachmakov <e.bachmakov@gmail.com>";
edwtjo = "Edward Tjörnhammar <ed@cflags.cc>";
eelco = "Eelco Dolstra <eelco.dolstra@logicblox.com>";
ehegnes = "Eric Hegnes <eric.hegnes@gmail.com>";
ehmry = "Emery Hemingway <emery@vfemail.net>";
eikek = "Eike Kettner <eike.kettner@posteo.de>";
ekleog = "Leo Gaspard <leo@gaspard.io>";
elasticdog = "Aaron Bull Schaefer <aaron@elasticdog.com>";
eleanor = "Dejan Lukan <dejan@proteansec.com>";
elijahcaine = "Elijah Caine <elijahcainemv@gmail.com>";
elitak = "Eric Litak <elitak@gmail.com>";
ellis = "Ellis Whitehead <nixos@ellisw.net>";
enzime = "Michael Hoang <enzime@users.noreply.github.com>";
eperuffo = "Emanuele Peruffo <info@emanueleperuffo.com>";
epitrochoid = "Mabry Cervin <mpcervin@uncg.edu>";
eqyiel = "Ruben Maher <r@rkm.id.au>";
ericbmerritt = "Eric Merritt <eric@afiniate.com>";
ericsagnes = "Eric Sagnes <eric.sagnes@gmail.com>";
ericson2314 = "John Ericson <John.Ericson@Obsidian.Systems>";
erictapen = "Justin Humm <justin.humm@posteo.de>";
erikryb = "Erik Rybakken <erik.rybakken@math.ntnu.no>";
ertes = "Ertugrul Söylemez <esz@posteo.de>";
ethercrow = "Dmitry Ivanov <ethercrow@gmail.com>";
etu = "Elis Hirwing <elis@hirwing.se>";
exfalso = "Andras Slemmer <0slemi0@gmail.com>";
exi = "Reno Reckling <nixos@reckling.org>";
exlevan = "Alexey Levan <exlevan@gmail.com>";
expipiplus1 = "Joe Hermaszewski <nix@monoid.al>";
fadenb = "Tristan Helmich <tristan.helmich+nixos@gmail.com>";
falsifian = "James Cook <james.cook@utoronto.ca>";
fare = "Francois-Rene Rideau <fahree@gmail.com>";
f-breidenstein = "Felix Breidenstein <mail@felixbreidenstein.de>";
fgaz = "Francesco Gazzetta <francygazz@gmail.com>";
FireyFly = "Jonas Höglund <nix@firefly.nu>";
flokli = "Florian Klink <flokli@flokli.de>";
florianjacob = "Florian Jacob <projects+nixos@florianjacob.de>";
flosse = "Markus Kohlhase <mail@markus-kohlhase.de>";
fluffynukeit = "Daniel Austin <dan@fluffynukeit.com>";
fmthoma = "Franz Thoma <f.m.thoma@googlemail.com>";
forkk = "Andrew Okin <forkk@forkk.net>";
fornever = "Friedrich von Never <friedrich@fornever.me>";
fpletz = "Franz Pletz <fpletz@fnordicwalking.de>";
fps = "Florian Paul Schmidt <mista.tapas@gmx.net>";
fridh = "Frederik Rietdijk <fridh@fridh.nl>";
frlan = "Frank Lanitz <frank@frank.uvena.de>";
fro_ozen = "fro_ozen <fro_ozen@gmx.de>";
ftrvxmtrx = "Siarhei Zirukin <ftrvxmtrx@gmail.com>";
funfunctor = "Edward O'Callaghan <eocallaghan@alterapraxis.com>";
fuuzetsu = "Mateusz Kowalczyk <fuuzetsu@fuuzetsu.co.uk>";
fuzzy-id = "Thomas Bach <hacking+nixos@babibo.de>";
fxfactorial = "Edgar Aroutiounian <edgar.factorial@gmail.com>";
gabesoft = "Gabriel Adomnicai <gabesoft@gmail.com>";
gal_bolle = "Florent Becker <florent.becker@ens-lyon.org>";
garbas = "Rok Garbas <rok@garbas.si>";
garrison = "Jim Garrison <jim@garrison.cc>";
gavin = "Gavin Rogers <gavin@praxeology.co.uk>";
gebner = "Gabriel Ebner <gebner@gebner.org>";
geistesk = "Alvar Penning <post@0x21.biz>";
genesis = "Ronan Bignaux <ronan@aimao.org>";
georgewhewell = "George Whewell <georgerw@gmail.com>";
gilligan = "Tobias Pflug <tobias.pflug@gmail.com>";
giogadi = "Luis G. Torres <lgtorres42@gmail.com>";
gleber = "Gleb Peregud <gleber.p@gmail.com>";
glenns = "Glenn Searby <glenn.searby@gmail.com>";
globin = "Robin Gloster <mail@glob.in>";
gnidorah = "Alex Ivanov <yourbestfriend@opmbx.org>";
goibhniu = "Cillian de Róiste <cillian.deroiste@gmail.com>";
Gonzih = "Max Gonzih <gonzih@gmail.com>";
goodrone = "Andrew Trachenko <goodrone@gmail.com>";
gpyh = "Yacine Hmito <yacine.hmito@gmail.com>";
grahamc = "Graham Christensen <graham@grahamc.com>";
grburst = "Julius Elias <grburst@openmailbox.org>";
gridaphobe = "Eric Seidel <eric@seidel.io>";
guibert = "David Guibert <david.guibert@gmail.com>";
guibou = "Guillaume Bouchard <guillaum.bouchard@gmail.com>";
guillaumekoenig = "Guillaume Koenig <guillaume.edward.koenig@gmail.com>";
guyonvarch = "Joris Guyonvarch <joris@guyonvarch.me>";
hakuch = "Jesse Haber-Kucharsky <hakuch@gmail.com>";
hamhut1066 = "Hamish Hutchings <github@hamhut1066.com>";
havvy = "Ryan Scheel <ryan.havvy@gmail.com>";
hbunke = "Hendrik Bunke <bunke.hendrik@gmail.com>";
hce = "Hans-Christian Esperer <hc@hcesperer.org>";
hectorj = "Hector Jusforgues <hector.jusforgues+nixos@gmail.com>";
hedning = "Tor Hedin Brønner <torhedinbronner@gmail.com>";
heel = "Sergii Paryzhskyi <parizhskiy@gmail.com>";
henrytill = "Henry Till <henrytill@gmail.com>";
hhm = "hhm <heehooman+nixpkgs@gmail.com>";
hinton = "Tom Hinton <t@larkery.com>";
hodapp = "Chris Hodapp <hodapp87@gmail.com>";
hrdinka = "Christoph Hrdinka <c.nix@hrdinka.at>";
htr = "Hugo Tavares Reis <hugo@linux.com>";
hyphon81 = "Masato Yonekawa <zero812n@gmail.com>";
iand675 = "Ian Duncan <ian@iankduncan.com>";
ianwookim = "Ian-Woo Kim <ianwookim@gmail.com>";
iblech = "Ingo Blechschmidt <iblech@speicherleck.de>";
igsha = "Igor Sharonov <igor.sharonov@gmail.com>";
ikervagyok = "Balázs Lengyel <ikervagyok@gmail.com>";
ilya-kolpakov = "Ilya Kolpakov <ilya.kolpakov@gmail.com>";
infinisil = "Silvan Mosberger <infinisil@icloud.com>";
ironpinguin = "Michele Catalano <michele@catalano.de>";
ivan-tkatchev = "Ivan Tkatchev <tkatchev@gmail.com>";
ixmatus = "Parnell Springmeyer <parnell@digitalmentat.com>";
izorkin = "Yurii Izorkin <Izorkin@gmail.com>";
ixxie = "Matan Bendix Shenhav <matan@fluxcraft.net>";
j-keck = "Jürgen Keck <jhyphenkeck@gmail.com>";
jagajaga = "Arseniy Seroka <ars.seroka@gmail.com>";
jammerful = "jammerful <jammerful@gmail.com>";
jansol = "Jan Solanti <jan.solanti@paivola.fi>";
javaguirre = "Javier Aguirre <contacto@javaguirre.net>";
jb55 = "William Casarin <jb55@jb55.com>";
jbedo = "Justin Bedő <cu@cua0.org>";
jcumming = "Jack Cummings <jack@mudshark.org>";
jdagilliland = "Jason Gilliland <jdagilliland@gmail.com>";
jefdaj = "Jeffrey David Johnson <jefdaj@gmail.com>";
jensbin = "Jens Binkert <jensbin@protonmail.com>";
jerith666 = "Matt McHenry <github@matt.mchenryfamily.org>";
jfb = "James Felix Black <james@yamtime.com>";
jfrankenau = "Johannes Frankenau <johannes@frankenau.net>";
jgeerds = "Jascha Geerds <jascha@jgeerds.name>";
jgertm = "Tim Jaeger <jger.tm@gmail.com>";
jgillich = "Jakob Gillich <jakob@gillich.me>";
jhhuh = "Ji-Haeng Huh <jhhuh.note@gmail.com>";
jirkamarsik = "Jirka Marsik <jiri.marsik89@gmail.com>";
jlesquembre = "José Luis Lafuente <jl@lafuente.me>";
jluttine = "Jaakko Luttinen <jaakko.luttinen@iki.fi>";
Jo = "Joachim Ernst <0x4A6F@shackspace.de>";
joachifm = "Joachim Fasting <joachifm@fastmail.fm>";
joamaki = "Jussi Maki <joamaki@gmail.com>";
joelmo = "Joel Moberg <joel.moberg@gmail.com>";
joelteon = "Joel Taylor <me@joelt.io>";
johbo = "Johannes Bornhold <johannes@bornhold.name>";
johnazoidberg = "Daniel Schäfer <git@danielschaefer.me>";
johnmh = "John M. Harris, Jr. <johnmh@openblox.org>";
johnramsden = "John Ramsden <johnramsden@riseup.net>";
joko = "Ioannis Koutras <ioannis.koutras@gmail.com>";
jonafato = "Jon Banafato <jon@jonafato.com>";
joncojonathan = "Jonathan Haddock <joncojonathan@gmail.com>";
jpdoyle = "Joe Doyle <joethedoyle@gmail.com>";
jpierre03 = "Jean-Pierre PRUNARET <nix@prunetwork.fr>";
jpotier = "Martin Potier <jpo.contributes.to.nixos@marvid.fr>";
jraygauthier = "Raymond Gauthier <jraygauthier@gmail.com>";
jtojnar = "Jan Tojnar <jtojnar@gmail.com>";
juliendehos = "Julien Dehos <dehos@lisic.univ-littoral.fr>";
jwiegley = "John Wiegley <johnw@newartisans.com>";
jwilberding = "Jordan Wilberding <jwilberding@afiniate.com>";
jyp = "Jean-Philippe Bernardy <jeanphilippe.bernardy@gmail.com>";
jzellner = "Jeff Zellner <jeffz@eml.cc>";
kaiha = "Kai Harries <kai.harries@gmail.com>";
kamilchm = "Kamil Chmielewski <kamil.chm@gmail.com>";
kampfschlaefer = "Arnold Krille <arnold@arnoldarts.de>";
karolchmist = "karolchmist <info+nix@chmist.com>";
kentjames = "James Kent <jameschristopherkent@gmail.com";
kevincox = "Kevin Cox <kevincox@kevincox.ca>";
khumba = "Bryan Gardiner <bog@khumba.net>";
KibaFox = "Kiba Fox <kiba.fox@foxypossibilities.com>";
kierdavis = "Kier Davis <kierdavis@gmail.com>";
kiloreux = "Kiloreux Emperex <kiloreux@gmail.com>";
kini = "Keshav Kini <keshav.kini@gmail.com>";
kkallio = "Karn Kallio <tierpluspluslists@gmail.com>";
knedlsepp = "Josef Kemetmüller <josef.kemetmueller@gmail.com>";
konimex = "Muhammad Herdiansyah <herdiansyah@netc.eu>";
koral = "Koral <koral@mailoo.org>";
kovirobi = "Kovacsics Robert <kovirobi@gmail.com>";
kquick = "Kevin Quick <quick@sparq.org>";
kragniz = "Louis Taylor <louis@kragniz.eu>";
kristoff3r = "Kristoffer Søholm <k.soeholm@gmail.com>";
ktosiek = "Tomasz Kontusz <tomasz.kontusz@gmail.com>";
kuznero = "Roman Kuznetsov <roman@kuznero.com>";
lasandell = "Luke Sandell <lasandell@gmail.com>";
lassulus = "Lassulus <lassulus@gmail.com>";
layus = "Guillaume Maudoux <layus.on@gmail.com>";
lblasc = "Luka Blaskovic <lblasc@znode.net>";
ldesgoui = "Lucas Desgouilles <ldesgoui@gmail.com>";
league = "Christopher League <league@contrapunctus.net>";
lebastr = "Alexander Lebedev <lebastr@gmail.com>";
ledif = "Adam Fidel <refuse@gmail.com>";
leemachin = "Lee Machin <me@mrl.ee>";
leenaars = "Michiel Leenaars <ml.software@leenaa.rs>";
leonardoce = "Leonardo Cecchi <leonardo.cecchi@gmail.com>";
lethalman = "Luca Bruno <lucabru@src.gnome.org>";
lewo = "Antoine Eiche <lewo@abesis.fr>";
lheckemann = "Linus Heckemann <git@sphalerite.org>";
lhvwb = "Nathaniel Baxter <nathaniel.baxter@gmail.com>";
lihop = "Leroy Hopson <nixos@leroy.geek.nz>";
limeytexan = "Michael Brantley <limeytexan@gmail.com>";
linquize = "Linquize <linquize@yahoo.com.hk>";
linus = "Linus Arver <linusarver@gmail.com>";
lluchs = "Lukas Werling <lukas.werling@gmail.com>";
lnl7 = "Daiderd Jordan <daiderd@gmail.com>";
lo1tuma = "Mathias Schreck <schreck.mathias@gmail.com>";
loskutov = "Ignat Loskutov <ignat.loskutov@gmail.com>";
lovek323 = "Jason O'Conal <jason@oconal.id.au>";
lowfatcomputing = "Andreas Wagner <andreas.wagner@lowfatcomputing.org>";
lsix = "Lancelot SIX <lsix@lancelotsix.com>";
lschuermann = "Leon Schuermann <leon.git@is.currently.online>";
ltavard = "Laure Tavard <laure.tavard@univ-grenoble-alpes.fr>";
lucas8 = "Luc Chabassier <luc.linux@mailoo.org>";
ludo = "Ludovic Courtès <ludo@gnu.org>";
lufia = "Kyohei Kadota <lufia@lufia.org>";
luispedro = "Luis Pedro Coelho <luis@luispedro.org>";
lukego = "Luke Gorrie <luke@snabb.co>";
luz = "Luz <luz666@daum.net>";
lw = "Sergey Sofeychuk <lw@fmap.me>";
lyt = "Tim Liou <wheatdoge@gmail.com>";
m3tti = "Mathaeus Sander <mathaeus.peter.sander@gmail.com>";
ma27 = "Maximilian Bosch <maximilian@mbosch.me>";
madjar = "Georges Dubus <georges.dubus@compiletoi.net>";
magnetophon = "Bart Brouns <bart@magnetophon.nl>";
mahe = "Matthias Herrmann <matthias.mh.herrmann@gmail.com>";
makefu = "Felix Richter <makefu@syntax-fehler.de>";
malyn = "Michael Alyn Miller <malyn@strangeGizmo.com>";
manveru = "Michael Fellinger <m.fellinger@gmail.com>";
marcweber = "Marc Weber <marco-oweber@gmx.de>";
markus1189 = "Markus Hauck <markus1189@gmail.com>";
markuskowa = "Markus Kowalewski <markus.kowalewski@gmail.com>";
markWot = "Markus Wotringer <markus@wotringer.de>";
martijnvermaat = "Martijn Vermaat <martijn@vermaat.name>";
martingms = "Martin Gammelsæter <martin@mg.am>";
matejc = "Matej Cotman <cotman.matej@gmail.com>";
mathnerd314 = "Mathnerd314 <mathnerd314.gph+hs@gmail.com>";
matthewbauer = "Matthew Bauer <mjbauer95@gmail.com>";
matthiasbeyer = "Matthias Beyer <mail@beyermatthias.de>";
maurer = "Matthew Maurer <matthew.r.maurer+nix@gmail.com>";
mbakke = "Marius Bakke <mbakke@fastmail.com>";
mbbx6spp = "Susan Potter <me@susanpotter.net>";
mbe = "Brandon Edens <brandonedens@gmail.com>";
mbode = "Maximilian Bode <maxbode@gmail.com>";
mboes = "Mathieu Boespflug <mboes@tweag.net>";
mbrgm = "Marius Bergmann <marius@yeai.de>";
mcmtroffaes = "Matthias C. M. Troffaes <matthias.troffaes@gmail.com>";
mdaiter = "Matthew S. Daiter <mdaiter8121@gmail.com>";
meditans = "Carlo Nucera <meditans@gmail.com>";
mehandes = "Matt Deming <niewskici@gmail.com>";
meisternu = "Matt Miemiec <meister@krutt.org>";
metabar = "Celine Mercier <softs@metabarcoding.org>";
mgdelacroix = "Miguel de la Cruz <mgdelacroix@gmail.com>";
mgttlinger = "Merlin Göttlinger <megoettlinger@gmail.com";
mguentner = "Maximilian Güntner <code@klandest.in>";
mic92 = "Jörg Thalheim <joerg@thalheim.io>";
michaelpj = "Michael Peyton Jones <michaelpj@gmail.com>";
michalrus = "Michal Rus <m@michalrus.com>";
michelk = "Michel Kuhlmann <michel@kuhlmanns.info>";
mickours = "Michael Mercier <mickours@gmail.com<";
midchildan = "midchildan <midchildan+nix@gmail.com>";
mikefaille = "Michaël Faille <michael@faille.io>";
mikoim = "Eshin Kunishima <ek@esh.ink>";
miltador = "Vasiliy Solovey <miltador@yandex.ua>";
mimadrid = "Miguel Madrid <mimadrid@ucm.es>";
mirdhyn = "Merlin Gaillard <mirdhyn@gmail.com>";
mirrexagon = "Andrew Abbott <mirrexagon@mirrexagon.com>";
mjanczyk = "Marcin Janczyk <m@dragonvr.pl>";
mjp = "Mike Playle <mike@mythik.co.uk>"; # github = "MikePlayle";
mkg = "Mark K Gardner <mkg@vt.edu>";
mlieberman85 = "Michael Lieberman <mlieberman85@gmail.com>";
mmahut = "Marek Mahut <marek.mahut@gmail.com>";
moaxcp = "John Mercier <moaxcp@gmail.com>";
modulistic = "Pablo Costa <modulistic@gmail.com>";
mog = "Matthew O'Gorman <mog-lists@rldn.net>";
montag451 = "montag451 <montag451@laposte.net>";
moosingin3space = "Nathan Moos <moosingin3space@gmail.com>";
moredread = "André-Patrick Bubel <code@apb.name>";
moretea = "Maarten Hoogendoorn <maarten@moretea.nl>";
mornfall = "Petr Ročkai <me@mornfall.net>";
MostAwesomeDude = "Corbin Simpson <cds@corbinsimpson.com>";
mounium = "Katona László <muoniurn@gmail.com>";
MP2E = "Cray Elliott <MP2E@archlinux.us>";
mpcsh = "Mark Cohen <m@mpc.sh>";
mpickering = "Matthew Pickering <matthewtpickering@gmail.com>";
mpscholten = "Marc Scholten <marc@mpscholten.de>";
mpsyco = "Francis St-Amour <fr.st-amour@gmail.com>";
mrVanDalo = "Ingolf Wanger <contact@ingolf-wagner.de>";
msackman = "Matthew Sackman <matthew@wellquite.org>";
mschristiansen = "Mikkel Christiansen <mikkel@rheosystems.com>";
mstarzyk = "Maciek Starzyk <mstarzyk@gmail.com>";
msteen = "Matthijs Steen <emailmatthijs@gmail.com>";
mt-caret = "Masayuki Takeda <mtakeda.enigsol@gmail.com>";
mtreskin = "Max Treskin <zerthurd@gmail.com>";
mudri = "James Wood <lamudri@gmail.com>";
muflax = "Stefan Dorn <mail@muflax.com>";
myrl = "Myrl Hex <myrl.0xf@gmail.com>";
nadrieril = "Nadrieril Feneanar <nadrieril@gmail.com>";
namore = "Roman Naumann <namor@hemio.de>";
nand0p = "Fernando Jose Pando <nando@hex7.com>";
Nate-Devv = "Nathan Moore <natedevv@gmail.com>";
nathan-gs = "Nathan Bijnens <nathan@nathan.gs>";
nckx = "Tobias Geerinckx-Rice <github@tobias.gr>";
ndowens = "Nathan Owens <ndowens04@gmail.com>";
neeasade = "Nathan Isom <nathanisom27@gmail.com>";
nequissimus = "Tim Steinbach <tim@nequissimus.com>";
nfjinjing = "Jinjing Wang <nfjinjing@gmail.com>";
nh2 = "Niklas Hambüchen <mail@nh2.me>";
nhooyr = "Anmol Sethi <anmol@aubble.com>";
nickhu = "Nick Hu <me@nickhu.co.uk>";
nicknovitski = "Nick Novitski <nixpkgs@nicknovitski.com>";
nico202 = "Nicolò Balzarotti <anothersms@gmail.com>";
NikolaMandic = "Ratko Mladic <nikola@mandic.email>";
ninjatrappeur = "Félix Baylac-Jacqué <felix@alternativebit.fr>";
nipav = "Niko Pavlinek <niko.pavlinek@gmail.com>";
nixy = "Andrew R. M. <nixy@nixy.moe>";
nmattia = "Nicolas Mattia <nicolas@nmattia.com>";
nocoolnametom = "Tom Doggett <nocoolnametom@gmail.com>";
notthemessiah = "Brian Cohen <brian.cohen.88@gmail.com>";
np = "Nicolas Pouillard <np.nix@nicolaspouillard.fr>";
nslqqq = "Nikita Mikhailov <nslqqq@gmail.com>";
nthorne = "Niklas Thörne <notrupertthorne@gmail.com>";
nyarly = "Judson Lester <nyarly@gmail.com>";
obadz = "obadz <obadz-nixos@obadz.com>";
ocharles = "Oliver Charles <ollie@ocharles.org.uk>";
odi = "Oliver Dunkl <oliver.dunkl@gmail.com>";
offline = "Jaka Hudoklin <jakahudoklin@gmail.com>";
oida = "oida <oida@posteo.de>";
okasu = "Okasu <oka.sux@gmail.com>";
olcai = "Erik Timan <dev@timan.info>";
olejorgenb = "Ole Jørgen Brønner <olejorgenb@yahoo.no>";
olynch = "Owen Lynch <owen@olynch.me>";
orbekk = "KJ Ørbekk <kjetil.orbekk@gmail.com>";
orbitz = "Malcolm Matalka <mmatalka@gmail.com>";
orivej = "Orivej Desh <orivej@gmx.fr>";
osener = "Ozan Sener <ozan@ozansener.com>";
otwieracz = "Slawomir Gonet <slawek@otwiera.cz>";
oxij = "Jan Malakhovski <oxij@oxij.org>";
paholg = "Paho Lurie-Gregg <paho@paholg.com>";
pakhfn = "Fedor Pakhomov <pakhfn@gmail.com>";
panaeon = "Vitalii Voloshyn <vitalii.voloshyn@gmail.com";
paperdigits = "Mica Semrick <mica@silentumbrella.com>";
paraseba = "Sebastian Galkin <paraseba@gmail.com>";
pashev = "Igor Pashev <pashev.igor@gmail.com>";
patternspandemic = "Brad Christensen <patternspandemic@live.com>";
pawelpacana = "Paweł Pacana <pawel.pacana@gmail.com>";
pbogdan = "Piotr Bogdan <ppbogdan@gmail.com>";
pcarrier = "Pierre Carrier <pc@rrier.ca>";
periklis = "theopompos@gmail.com";
pesterhazy = "Paulus Esterhazy <pesterhazy@gmail.com>";
peterhoeg = "Peter Hoeg <peter@hoeg.com>";
peterromfeldhk = "Peter Romfeld <peter.romfeld.hk@gmail.com>";
peti = "Peter Simons <simons@cryp.to>";
philandstuff = "Philip Potter <philip.g.potter@gmail.com>";
phile314 = "Philipp Hausmann <nix@314.ch>";
Phlogistique = "Noé Rubinstein <noe.rubinstein@gmail.com>";
phreedom = "Evgeny Egorochkin <phreedom@yandex.ru>";
phunehehe = "Hoang Xuan Phu <phunehehe@gmail.com>";
pierrechevalier83 = "Pierre Chevalier <pierrechevalier83@gmail.com>";
pierrer = "Pierre Radermecker <pierrer@pi3r.be>";
pierron = "Nicolas B. Pierron <nixos@nbp.name>";
piotr = "Piotr Pietraszkiewicz <ppietrasa@gmail.com>";
pjbarnoy = "Perry Barnoy <pjbarnoy@gmail.com>";
pjones = "Peter Jones <pjones@devalot.com>";
pkmx = "Chih-Mao Chen <pkmx.tw@gmail.com>";
plcplc = "Philip Lykke Carlsen <plcplc@gmail.com>";
plumps = "Maksim Bronsky <maks.bronsky@web.de";
pmahoney = "Patrick Mahoney <pat@polycrystal.org>";
pmeunier = "Pierre-Étienne Meunier <pierre-etienne.meunier@inria.fr>";
pmiddend = "Philipp Middendorf <pmidden@secure.mailbox.org>";
pneumaticat = "Kevin Liu <kevin@potatofrom.space>";
polyrod = "Maurizio Di Pietro <dc1mdp@gmail.com>";
pradeepchhetri = "Pradeep Chhetri <pradeep.chhetri89@gmail.com>";
prikhi = "Pavan Rikhi <pavan.rikhi@gmail.com>";
primeos = "Michael Weiss <dev.primeos@gmail.com>";
Profpatsch = "Profpatsch <mail@profpatsch.de>";
proglodyte = "Proglodyte <proglodyte23@gmail.com>";
pshendry = "Paul Hendry <paul@pshendry.com>";
psibi = "Sibi <sibi@psibi.in>";
pstn = "Philipp Steinpaß <philipp@xndr.de>";
pSub = "Pascal Wittmann <mail@pascal-wittmann.de>";
puffnfresh = "Brian McKenna <brian@brianmckenna.org>";
pxc = "Patrick Callahan <patrick.callahan@latitudeengineering.com>";
qknight = "Joachim Schiele <js@lastlog.de>";
ragge = "Ragnar Dahlen <r.dahlen@gmail.com>";
ralith = "Benjamin Saunders <ben.e.saunders@gmail.com>";
ramkromberg = "Ram Kromberg <ramkromberg@mail.com>";
rardiol = "Ricardo Ardissone <ricardo.ardissone@gmail.com>";
rasendubi = "Alexey Shmalko <rasen.dubi@gmail.com>";
raskin = "Michael Raskin <7c6f434c@mail.ru>";
ravloony = "Tom Macdonald <ravloony@gmail.com>";
razvan = "Răzvan Flavius Panda <razvan.panda@gmail.com>";
rbasso = "Rafael Basso <rbasso@sharpgeeks.net>";
redbaron = "Maxim Ivanov <ivanov.maxim@gmail.com>";
redvers = "Redvers Davies <red@infect.me>";
refnil = "Martin Lavoie <broemartino@gmail.com>";
regnat = "Théophane Hufschmitt <regnat@regnat.ovh>";
relrod = "Ricky Elrod <ricky@elrod.me>";
renzo = "Renzo Carbonara <renzocarbonara@gmail.com>";
retrry = "Tadas Barzdžius <retrry@gmail.com>";
rht = "rht <rhtbot@protonmail.com>";
richardipsum = "Richard Ipsum <richardipsum@fastmail.co.uk>";
rick68 = "Wei-Ming Yang <rick68@gmail.com>";
rickynils = "Rickard Nilsson <rickynils@gmail.com>";
ris = "Robert Scott <code@humanleg.org.uk>";
rlupton20 = "Richard Lupton <richard.lupton@gmail.com>";
rnhmjoj = "Michele Guerini Rocco <micheleguerinirocco@me.com>";
rob = "Rob Vermaas <rob.vermaas@gmail.com>";
robberer = "Longrin Wischnewski <robberer@freakmail.de>";
robbinch = "Robbin C. <robbinch33@gmail.com>";
roberth = "Robert Hensing <nixpkgs@roberthensing.nl>";
robertodr = "Roberto Di Remigio <roberto.diremigio@gmail.com>";
robgssp = "Rob Glossop <robgssp@gmail.com>";
roblabla = "Robin Lambertz <robinlambertz+dev@gmail.com>";
roconnor = "Russell O'Connor <roconnor@theorem.ca>";
romildo = "José Romildo Malaquias <malaquias@gmail.com>";
rongcuid = "Rongcui Dong <rongcuid@outlook.com>";
rszibele = "Richard Szibele <richard@szibele.com>";
rtreffer = "Rene Treffer <treffer+nixos@measite.de>";
rushmorem = "Rushmore Mushambi <rushmore@webenchanter.com>";
rvl = "Rodney Lorrimar <dev+nix@rodney.id.au>";
rvlander = "Gaëtan André <rvlander@gaetanandre.eu>";
rvolosatovs = "Roman Volosatovs <rvolosatovs@riseup.net>";
ryanartecona = "Ryan Artecona <ryanartecona@gmail.com>";
ryansydnor = "Ryan Sydnor <ryan.t.sydnor@gmail.com>";
ryantm = "Ryan Mulligan <ryan@ryantm.com>";
ryantrinkle = "Ryan Trinkle <ryan.trinkle@gmail.com>";
rybern = "Ryan Bernstein <ryan.bernstein@columbia.edu>";
rycee = "Robert Helgesson <robert@rycee.net>";
ryneeverett = "Ryne Everett <ryneeverett@gmail.com>";
rzetterberg = "Richard Zetterberg <richard.zetterberg@gmail.com>";
s1lvester = "Markus Silvester <s1lvester@bockhacker.me>";
samdroid-apps = "Sam Parkinson <sam@sam.today>";
samueldr = "Samuel Dionne-Riel <samuel@dionne-riel.com>";
samuelrivas = "Samuel Rivas <samuelrivas@gmail.com>";
sander = "Sander van der Burg <s.vanderburg@tudelft.nl>";
sargon = "Daniel Ehlers <danielehlers@mindeye.net>";
sauyon = "Sauyon Lee <s@uyon.co>";
schmitthenner = "Fabian Schmitthenner <development@schmitthenner.eu>";
schneefux = "schneefux <schneefux+nixos_pkg@schneefux.xyz>";
schristo = "Scott Christopher <schristopher@konputa.com>";
scode = "Peter Schuller <peter.schuller@infidyne.com>";
scolobb = "Sergiu Ivanov <sivanov@colimite.fr>";
sdll = "Sasha Illarionov <sasha.delly@gmail.com>";
SeanZicari = "Sean Zicari <sean.zicari@gmail.com>";
sellout = "Greg Pfeil <greg@technomadic.org>";
sepi = "Raffael Mancini <raffael@mancini.lu>";
seppeljordan = "Sebastian Jordan <sebastian.jordan.mail@googlemail.com>";
sfrijters = "Stefan Frijters <sfrijters@gmail.com>";
shanemikel = "Shane Pearlman <shanemikel1@gmail.com>";
shawndellysse = "Shawn Dellysse <sdellysse@gmail.com>";
sheenobu = "Sheena Artrip <sheena.artrip@gmail.com>";
sheganinans = "Aistis Raulinaitis <sheganinans@gmail.com>";
shell = "Shell Turner <cam.turn@gmail.com>";
shlevy = "Shea Levy <shea@shealevy.com>";
siddharthist = "Langston Barrett <langston.barrett@gmail.com>";
sifmelcara = "Ming Chuan <ming@culpring.com>";
sigma = "Yann Hodique <yann.hodique@gmail.com>";
simonvandel = "Simon Vandel Sillesen <simon.vandel@gmail.com>";
sivteck = "Sivaram Balakrishnan <sivaram1992@gmail.com>";
sjagoe = "Simon Jagoe <simon@simonjagoe.com>";
sjmackenzie = "Stewart Mackenzie <setori88@gmail.com>";
sjourdois = "Stéphane kwisatz Jourdois <sjourdois@gmail.com>";
skeidel = "Sven Keidel <svenkeidel@gmail.com>";
skrzyp = "Jakub Skrzypnik <jot.skrzyp@gmail.com>";
sleexyz = "Sean Lee <freshdried@gmail.com>";
smironov = "Sergey Mironov <grrwlf@gmail.com>";
snyh = "Xia Bin <snyh@snyh.org>";
solson = "Scott Olson <scott@solson.me>";
sorpaas = "Wei Tang <hi@that.world>";
sorki = "Richard Marko <srk@48.io>";
spacefrogg = "Michael Raitza <spacefrogg-nixos@meterriblecrew.net>";
spencerjanssen = "Spencer Janssen <spencerjanssen@gmail.com>";
spinus = "Tomasz Czyż <tomasz.czyz@gmail.com>";
sprock = "Roger Mason <rmason@mun.ca>";
spwhitt = "Spencer Whitt <sw@swhitt.me>";
srhb = "Sarah Brofeldt <sbrofeldt@gmail.com>";
SShrike = "Severen Redwood <severen@shrike.me>";
stephenmw = "Stephen Weinberg <stephen@q5comm.com>";
sternenseemann = "Lukas Epple <post@lukasepple.de>";
stesie = "Stefan Siegl <stesie@brokenpipe.de>";
steveej = "Stefan Junker <mail@stefanjunker.de>";
StijnDW = "Stijn DW <stekke@airmail.cc>";
StillerHarpo = "Florian Engel <florianengel39@gmail.com>";
stumoss = "Stuart Moss <samoss@gmail.com>";
SuprDewd = "Bjarki Ágúst Guðmundsson <suprdewd@gmail.com>";
suvash = "Suvash Thapaliya <suvash+nixpkgs@gmail.com>";
svsdep = "Vasyl Solovei <svsdep@gmail.com>";
swarren83 = "Shawn Warren <shawn.w.warren@gmail.com>";
swflint = "Samuel W. Flint <swflint@flintfam.org>";
swistak35 = "Rafał Łasocha <me@swistak35.com>";
symphorien = "Guillaume Girol <symphorien_nixpkgs@xlumurb.eu>";
szczyp = "Szczyp <qb@szczyp.com>";
sztupi = "Attila Sztupak <attila.sztupak@gmail.com>";
tadfisher = "Tad Fisher <tadfisher@gmail.com>";
taeer = "Taeer Bar-Yam <taeer@necsi.edu>";
tailhook = "Paul Colomiets <paul@colomiets.name>";
taketwo = "Sergey Alexandrov <alexandrov88@gmail.com>";
takikawa = "Asumu Takikawa <asumu@igalia.com>";
taktoa = "Remy Goldschmidt <taktoa@gmail.com>";
taku0 = "Takuo Yonezawa <mxxouy6x3m_github@tatapa.org>";
tari = "Peter Marheine <peter@taricorp.net>";
tavyc = "Octavian Cerna <octavian.cerna@gmail.com>";
TealG = "Teal Gaure <~@Teal.Gr>";
teh = "Tom Hunger <tehunger@gmail.com>";
telotortium = "Robert Irelan <rirelan@gmail.com>";
teozkr = "Teo Klestrup Röijezon <teo@nullable.se>";
teto = "Matthieu Coudron <mcoudron@hotmail.com>";
tex = "Milan Svoboda <milan.svoboda@centrum.cz>";
thall = "Niclas Thall <niclas.thall@gmail.com>";
thammers = "Tobias Hammerschmidt <jawr@gmx.de>";
thanegill = "Thane Gill <me@thanegill.com>";
the-kenny = "Moritz Ulrich <moritz@tarn-vedra.de>";
theuni = "Christian Theune <ct@flyingcircus.io>";
ThomasMader = "Thomas Mader <thomas.mader@gmail.com>";
thoughtpolice = "Austin Seipp <aseipp@pobox.com>";
thpham = "Thomas Pham <thomas.pham@ithings.ch>";
tilpner = "Till Höppner <till@hoeppner.ws>";
timbertson = "Tim Cuthbertson <tim@gfxmonk.net>";
timokau = "Timo Kaufmann <timokau@zoho.com>";
tiramiseb = "Sébastien Maccagnoni <sebastien@maccagnoni.eu>";
titanous = "Jonathan Rudenberg <jonathan@titanous.com>";
tnias = "Philipp Bartsch <phil@grmr.de>";
tohl = "Tomas Hlavaty <tom@logand.com>";
tokudan = "Daniel Frank <git@danielfrank.net>";
tomberek = "Thomas Bereknyei <tomberek@gmail.com>";
tomsmeets = "Tom Smeets <tom@tsmeets.nl>";
travisbhartwell = "Travis B. Hartwell <nafai@travishartwell.net>";
treemo = "Matthieu Chevrier <matthieu.chevrier@treemo.fr>";
trevorj = "Trevor Joynson <nix@trevor.joynson.io>";
trino = "Hubert Mühlhans <muehlhans.hubert@ekodia.de>";
troydm = "Dmitry Geurkov <d.geurkov@gmail.com>";
tstrobel = "Thomas Strobel <4ZKTUB6TEP74PYJOPWIR013S2AV29YUBW5F9ZH2F4D5UMJUJ6S@hash.domains>";
ttuegel = "Thomas Tuegel <ttuegel@mailbox.org>";
tv = "Tomislav Viljetić <tv@shackspace.de>";
tvestelind = "Tomas Vestelind <tomas.vestelind@fripost.org>";
tvorog = "Marsel Zaripov <marszaripov@gmail.com>";
tweber = "Thorsten Weber <tw+nixpkgs@360vier.de>";
twey = "James Twey Kay <twey@twey.co.uk>";
unode = "Renato Alves <alves.rjc@gmail.com>";
uralbash = "Svintsov Dmitry <root@uralbash.ru>";
utdemir = "Utku Demir <me@utdemir.com>";
#urkud = "Yury G. Kudryashov <urkud+nix@ya.ru>"; inactive since 2012
uwap = "uwap <me@uwap.name>";
va1entin = "Valentin Heidelberger <github@valentinsblog.com>";
vaibhavsagar = "Vaibhav Sagar <vaibhavsagar@gmail.com>";
valeriangalliat = "Valérian Galliat <val@codejam.info>";
vandenoever = "Jos van den Oever <jos@vandenoever.info>";
vanschelven = "Klaas van Schelven <klaas@vanschelven.com>";
vanzef = "Ivan Solyankin <vanzef@gmail.com>";
varunpatro = "Varun Patro <varun.kumar.patro@gmail.com>";
vbgl = "Vincent Laporte <Vincent.Laporte@gmail.com>";
vbmithr = "Vincent Bernardoff <vb@luminar.eu.org>";
vcunat = "Vladimír Čunát <vcunat@gmail.com>";
vdemeester = "Vincent Demeester <vincent@sbr.pm>";
velovix = "Tyler Compton <xaviosx@gmail.com>";
veprbl = "Dmitry Kalinkin <veprbl@gmail.com>";
vidbina = "David Asabina <vid@bina.me>";
vifino = "Adrian Pistol <vifino@tty.sh>";
vinymeuh = "VinyMeuh <vinymeuh@gmail.com>";
viric = "Lluís Batlle i Rossell <viric@viric.name>";
vizanto = "Danny Wilson <danny@prime.vc>";
vklquevs = "vklquevs <vklquevs@gmail.com>";
vlstill = "Vladimír Štill <xstill@fi.muni.cz>";
vmandela = "Venkateswara Rao Mandela <venkat.mandela@gmail.com>";
vmchale = "Vanessa McHale <tmchale@wisc.edu>";
volhovm = "Mikhail Volkhov <volhovm.cs@gmail.com>";
volth = "Jaroslavas Pocepko <jaroslavas@volth.com>";
vozz = "Oliver Hunt <oliver.huntuk@gmail.com>";
vrthra = "Rahul Gopinath <rahul@gopinath.org>";
vyp = "vyp <elisp.vim@gmail.com>";
wedens = "wedens <kirill.wedens@gmail.com>";
willibutz = "Willi Butz <willibutz@posteo.de>";
willtim = "Tim Philip Williams <tim.williams.public@gmail.com>";
winden = "Antonio Vargas Gonzalez <windenntw@gmail.com>";
wizeman = "Ricardo M. Correia <rcorreia@wizy.org>";
wjlroe = "William Roe <willroe@gmail.com>";
wkennington = "William A. Kennington III <william@wkennington.com>";
wmertens = "Wout Mertens <Wout.Mertens@gmail.com>";
woffs = "Frank Doepper <github@woffs.de>";
womfoo = "Kranium Gikos Mendoza <kranium@gikos.net>";
wscott = "Wayne Scott <wsc9tt@gmail.com>";
wyvie = "Elijah Rum <elijahrum@gmail.com>";
xaverdh = "Dominik Xaver Hörl <hoe.dom@gmx.de>";
xeji = "xeji <xeji@cat3.de>";
xnwdd = "Guillermo NWDD <nwdd+nixos@no.team>";
xurei = "Olivier Bourdoux <olivier.bourdoux@gmail.com>";
xvapx = "Marti Serra <marti.serra.coscollano@gmail.com>";
xwvvvvwx = "David Terry <davidterry@posteo.de>";
xzfc = "Albert Safin <xzfcpw@gmail.com>";
y0no = "Yoann Ono <y0no@y0no.fr>";
yarr = "Dmitry V. <savraz@gmail.com>";
yegortimoshenko = "Yegor Timoshenko <yegortimoshenko@gmail.com>";
yesbox = "Jesper Geertsen Jonsson <jesper.geertsen.jonsson@gmail.com>";
ylwghst = "Burim Augustin Berisa <ylwghst@onionmail.info>";
yochai = "Yochai <yochai@titat.info>";
yorickvp = "Yorick van Pelt <yorickvanpelt@gmail.com>";
yrashk = "Yurii Rashkovskii <yrashk@gmail.com>";
yuriaisaka = "Yuri Aisaka <yuri.aisaka+nix@gmail.com>";
yurrriq = "Eric Bailey <eric@ericb.me>";
z77z = "Marco Maggesi <maggesi@math.unifi.it>";
zagy = "Christian Zagrodnick <cz@flyingcircus.io>";
zalakain = "Unai Zalakain <contact@unaizalakain.info>";
zarelit = "David Costa <david@zarel.net>";
zauberpony = "Elmar Athmer <elmar@athmer.org>";
zef = "Zef Hemel <zef@zef.me>";
zimbatm = "zimbatm <zimbatm@zimbatm.com>";
Zimmi48 = "Théo Zimmermann <theo.zimmermann@univ-paris-diderot.fr>";
zohl = "Al Zohali <zohl@fmap.me>";
zoomulator = "Kim Simmons <zoomulator@gmail.com>";
zraexy = "David Mell <zraexy@gmail.com>";
zx2c4 = "Jason A. Donenfeld <Jason@zx2c4.com>";
zzamboni = "Diego Zamboni <diego@zzamboni.org>";
}

View File

@ -41,16 +41,18 @@ rec {
let x = builtins.parseDrvName name; in "${x.name}-${suffix}-${x.version}");
/* Apply a function to each derivation and only to derivations in an attrset
/* Apply a function to each derivation and only to derivations in an attrset.
*/
mapDerivationAttrset = f: set: lib.mapAttrs (name: pkg: if lib.isDerivation pkg then (f pkg) else pkg) set;
/* Set the nix-env priority of the package.
*/
setPrio = priority: addMetaAttrs { inherit priority; };
/* Decrease the nix-env priority of the package, i.e., other
versions/variants of the package will be preferred.
*/
lowPrio = drv: addMetaAttrs { priority = 10; } drv;
lowPrio = setPrio 10;
/* Apply lowPrio to an attrset with derivations
*/
@ -60,11 +62,29 @@ rec {
/* Increase the nix-env priority of the package, i.e., this
version/variant of the package will be preferred.
*/
hiPrio = drv: addMetaAttrs { priority = -10; } drv;
hiPrio = setPrio (-10);
/* Apply hiPrio to an attrset with derivations
*/
hiPrioSet = set: mapDerivationAttrset hiPrio set;
/* Check to see if a platform is matched by the given `meta.platforms`
element.
A `meta.platform` pattern is either
1. (legacy) a system string.
2. (modern) a pattern for the platform `parsed` field.
We can inject these into a patten for the whole of a structured platform,
and then match that.
*/
platformMatch = platform: elem: let
pattern =
if builtins.isString elem
then { system = elem; }
else { parsed = elem; };
in lib.matchAttrs pattern platform;
}

View File

@ -1,2 +1,2 @@
# Expose the minimum required version for evaluating Nixpkgs
"1.11"
"2.0"

View File

@ -59,7 +59,7 @@ rec {
};
};
closed = closeModules (modules ++ [ internalModule ]) ({ inherit config options; lib = import ./.; } // specialArgs);
closed = closeModules (modules ++ [ internalModule ]) ({ inherit config options lib; } // specialArgs);
options = mergeModules prefix (reverseList (filterModules (specialArgs.modulesPath or "") closed));
@ -159,7 +159,7 @@ rec {
context = name: ''while evaluating the module argument `${name}' in "${key}":'';
extraArgs = builtins.listToAttrs (map (name: {
inherit name;
value = addErrorContext (context name)
value = builtins.addErrorContext (context name)
(args.${name} or config._module.args.${name});
}) requiredArgs);
@ -192,29 +192,53 @@ rec {
(concatMap (m: map (config: { inherit (m) file; inherit config; }) (pushDownProperties m.config)) modules);
mergeModules' = prefix: options: configs:
listToAttrs (map (name: {
let
/* byName is like foldAttrs, but will look for attributes to merge in the
specified attribute name.
byName "foo" (module: value: ["module.hidden=${module.hidden},value=${value}"])
[
{
hidden="baz";
foo={qux="bar"; gla="flop";};
}
{
hidden="fli";
foo={qux="gne"; gli="flip";};
}
]
===>
{
gla = [ "module.hidden=baz,value=flop" ];
gli = [ "module.hidden=fli,value=flip" ];
qux = [ "module.hidden=baz,value=bar" "module.hidden=fli,value=gne" ];
}
*/
byName = attr: f: modules: foldl' (acc: module:
foldl' (inner: name:
inner // { ${name} = (acc.${name} or []) ++ (f module module.${attr}.${name}); }
) acc (attrNames module.${attr})
) {} modules;
# an attrset 'name' => list of submodules that declare name.
declsByName = byName "options"
(module: option: [{ inherit (module) file; options = option; }])
options;
# an attrset 'name' => list of submodules that define name.
defnsByName = byName "config" (module: value:
map (config: { inherit (module) file; inherit config; }) (pushDownProperties value)
) configs;
# extract the definitions for each loc
defnsByName' = byName "config"
(module: value: [{ inherit (module) file; inherit value; }])
configs;
in
(flip mapAttrs declsByName (name: decls:
# We're descending into attribute name.
inherit name;
value =
let
loc = prefix ++ [name];
# Get all submodules that declare name.
decls = concatMap (m:
if m.options ? ${name}
then [ { inherit (m) file; options = m.options.${name}; } ]
else []
) options;
# Get all submodules that define name.
defns = concatMap (m:
if m.config ? ${name}
then map (config: { inherit (m) file; inherit config; })
(pushDownProperties m.config.${name})
else []
) configs;
defns = defnsByName.${name} or [];
defns' = defnsByName'.${name} or [];
nrOptions = count (m: isOption m.options) decls;
# Extract the definitions for this loc
defns' = map (m: { inherit (m) file; value = m.config.${name}; })
(filter (m: m.config ? ${name}) configs);
in
if nrOptions == length decls then
let opt = fixupOptionType loc (mergeOptionDecls loc decls);
@ -226,8 +250,8 @@ rec {
in
throw "The option `${showOption loc}' in `${firstOption.file}' is a prefix of options in `${firstNonOption.file}'."
else
mergeModules' loc decls defns;
}) (concatMap (m: attrNames m.options) options))
mergeModules' loc decls defns
))
// { _definedNames = map (m: { inherit (m) file; names = attrNames m.config; }) configs; };
/* Merge multiple option declarations into a single declaration. In
@ -309,7 +333,8 @@ rec {
res.mergedValue;
in opt //
{ value = addErrorContext "while evaluating the option `${showOption loc}':" value;
{ value = builtins.addErrorContext "while evaluating the option `${showOption loc}':" value;
inherit (res.defsFinal') highestPrio;
definitions = map (def: def.value) res.defsFinal;
files = map (def: def.file) res.defsFinal;
inherit (res) isDefined;
@ -317,7 +342,7 @@ rec {
# Merge definitions of a value of a given type.
mergeDefinitions = loc: type: defs: rec {
defsFinal =
defsFinal' =
let
# Process mkMerge and mkIf properties.
defs' = concatMap (m:
@ -325,15 +350,20 @@ rec {
) defs;
# Process mkOverride properties.
defs'' = filterOverrides defs';
defs'' = filterOverrides' defs';
# Sort mkOrder properties.
defs''' =
# Avoid sorting if we don't have to.
if any (def: def.value._type or "" == "order") defs''
then sortProperties defs''
else defs'';
in defs''';
if any (def: def.value._type or "" == "order") defs''.values
then sortProperties defs''.values
else defs''.values;
in {
values = defs''';
inherit (defs'') highestPrio;
};
defsFinal = defsFinal'.values;
# Type-check the remaining definitions, and merge them.
mergedValue = foldl' (res: def:
@ -416,13 +446,17 @@ rec {
Note that "z" has the default priority 100.
*/
filterOverrides = defs:
filterOverrides = defs: (filterOverrides' defs).values;
filterOverrides' = defs:
let
defaultPrio = 100;
getPrio = def: if def.value._type or "" == "override" then def.value.priority else defaultPrio;
getPrio = def: if def.value._type or "" == "override" then def.value.priority else defaultPriority;
highestPrio = foldl' (prio: def: min (getPrio def) prio) 9999 defs;
strip = def: if def.value._type or "" == "override" then def // { value = def.value.content; } else def;
in concatMap (def: if getPrio def == highestPrio then [(strip def)] else []) defs;
in {
values = concatMap (def: if getPrio def == highestPrio then [(strip def)] else []) defs;
inherit highestPrio;
};
/* Sort a list of properties. The sort priority of a property is
1000 by default, but can be overridden by wrapping the property
@ -482,7 +516,7 @@ rec {
inherit priority content;
};
mkOptionDefault = mkOverride 1001; # priority of option defaults
mkOptionDefault = mkOverride 1500; # priority of option defaults
mkDefault = mkOverride 1000; # used in config sections of non-user modules to set a default
mkForce = mkOverride 50;
mkVMOverride = mkOverride 10; # used by nixos-rebuild build-vm
@ -499,6 +533,8 @@ rec {
mkBefore = mkOrder 500;
mkAfter = mkOrder 1500;
# The default priority for things that don't have a priority specified.
defaultPriority = 100;
# Convenient property used to transfer all definitions and their
# properties from one option to another. This property is useful for
@ -521,10 +557,20 @@ rec {
#
mkAliasDefinitions = mkAliasAndWrapDefinitions id;
mkAliasAndWrapDefinitions = wrap: option:
mkMerge
(optional (isOption option && option.isDefined)
(wrap (mkMerge option.definitions)));
mkAliasIfDef option (wrap (mkMerge option.definitions));
# Similar to mkAliasAndWrapDefinitions but copies over the priority from the
# option as well.
#
# If a priority is not set, it assumes a priority of defaultPriority.
mkAliasAndWrapDefsWithPriority = wrap: option:
let
prio = option.highestPrio or defaultPriority;
defsWithPrio = map (mkOverride prio) option.definitions;
in mkAliasIfDef option (wrap (mkMerge defsWithPrio));
mkAliasIfDef = option:
mkIf (isOption option && option.isDefined);
/* Compatibility. */
fixMergeModules = modules: args: evalModules { inherit modules args; check = false; };
@ -657,22 +703,37 @@ rec {
use = id;
};
doRename = { from, to, visible, warn, use }:
/* Like mkAliasOptionModule, but copy over the priority of the option as well. */
mkAliasOptionModuleWithPriority = from: to: doRename {
inherit from to;
visible = true;
warn = false;
use = id;
withPriority = true;
};
doRename = { from, to, visible, warn, use, withPriority ? false }:
{ config, options, ... }:
let
fromOpt = getAttrFromPath from options;
toOf = attrByPath to
(abort "Renaming error: option `${showOption to}' does not exists.");
(abort "Renaming error: option `${showOption to}' does not exist.");
in
{ config, options, ... }:
{ options = setAttrByPath from (mkOption {
description = "Alias of <option>${showOption to}</option>.";
apply = x: use (toOf config);
});
config = {
warnings =
let opt = getAttrFromPath from options; in
optional (warn && opt.isDefined)
"The option `${showOption from}' defined in ${showFiles opt.files} has been renamed to `${showOption to}'.";
} // setAttrByPath to (mkAliasDefinitions (getAttrFromPath from options));
};
{
options = setAttrByPath from (mkOption {
inherit visible;
description = "Alias of <option>${showOption to}</option>.";
apply = x: use (toOf config);
});
config = mkMerge [
{
warnings = optional (warn && fromOpt.isDefined)
"The option `${showOption from}' defined in ${showFiles fromOpt.files} has been renamed to `${showOption to}'.";
}
(if withPriority
then mkAliasAndWrapDefsWithPriority (setAttrByPath to) fromOpt
else mkAliasAndWrapDefinitions (setAttrByPath to) fromOpt)
];
};
}

View File

@ -8,33 +8,72 @@ with lib.strings;
rec {
/* Returns true when the given argument is an option
Type: isOption :: a -> bool
Example:
isOption 1 // => false
isOption (mkOption {}) // => true
*/
isOption = lib.isType "option";
/* Creates an Option attribute set. mkOption accepts an attribute set with the following keys:
All keys default to `null` when not given.
Example:
mkOption { } // => { _type = "option"; }
mkOption { defaultText = "foo"; } // => { _type = "option"; defaultText = "foo"; }
*/
mkOption =
{ default ? null # Default value used when no definition is given in the configuration.
, defaultText ? null # Textual representation of the default, for in the manual.
, example ? null # Example value used in the manual.
, description ? null # String describing the option.
, relatedPackages ? null # Related packages used in the manual (see `genRelatedPackages` in ../nixos/doc/manual/default.nix).
, type ? null # Option type, providing type-checking and value merging.
, apply ? null # Function that converts the option value to something else.
, internal ? null # Whether the option is for NixOS developers only.
, visible ? null # Whether the option shows up in the manual.
, readOnly ? null # Whether the option can be set only once
, options ? null # Obsolete, used by types.optionSet.
{
# Default value used when no definition is given in the configuration.
default ? null,
# Textual representation of the default, for the manual.
defaultText ? null,
# Example value used in the manual.
example ? null,
# String describing the option.
description ? null,
# Related packages used in the manual (see `genRelatedPackages` in ../nixos/doc/manual/default.nix).
relatedPackages ? null,
# Option type, providing type-checking and value merging.
type ? null,
# Function that converts the option value to something else.
apply ? null,
# Whether the option is for NixOS developers only.
internal ? null,
# Whether the option shows up in the manual.
visible ? null,
# Whether the option can be set only once
readOnly ? null,
# Obsolete, used by types.optionSet.
options ? null
} @ attrs:
attrs // { _type = "option"; };
mkEnableOption = name: mkOption {
/* Creates an Option attribute set for a boolean value option i.e an
option to be toggled on or off:
Example:
mkEnableOption "foo"
=> { _type = "option"; default = false; description = "Whether to enable foo."; example = true; type = { ... }; }
*/
mkEnableOption =
# Name for the created option
name: mkOption {
default = false;
example = true;
description = "Whether to enable ${name}.";
type = lib.types.bool;
};
# This option accept anything, but it does not produce any result. This
# is useful for sharing a module across different module sets without
# having to implement similar features as long as the value of the options
# are not expected.
/* This option accepts anything, but it does not produce any result.
This is useful for sharing a module across different module sets
without having to implement similar features as long as the
values of the options are not accessed. */
mkSinkUndeclaredOptions = attrs: mkOption ({
internal = true;
visible = false;
@ -74,7 +113,24 @@ rec {
else
val) (head defs).value defs;
/* Extracts values of all "value" keys of the given list.
Type: getValues :: [ { value :: a } ] -> [a]
Example:
getValues [ { value = 1; } { value = 2; } ] // => [ 1 2 ]
getValues [ ] // => [ ]
*/
getValues = map (x: x.value);
/* Extracts values of all "file" keys of the given list
Type: getFiles :: [ { file :: a } ] -> [a]
Example:
getFiles [ { file = "file1"; } { file = "file2"; } ] // => [ "file1" "file2" ]
getFiles [ ] // => [ ]
*/
getFiles = map (x: x.file);
# Generate documentation template from the list of option declaration like
@ -107,10 +163,13 @@ rec {
/* This function recursively removes all derivation attributes from
`x' except for the `name' attribute. This is to make the
generation of `options.xml' much more efficient: the XML
representation of derivations is very large (on the order of
megabytes) and is not actually used by the manual generator. */
`x` except for the `name` attribute.
This is to make the generation of `options.xml` much more
efficient: the XML representation of derivations is very large
(on the order of megabytes) and is not actually used by the
manual generator.
*/
scrubOptionValue = x:
if isDerivation x then
{ type = "derivation"; drvPath = x.name; outPath = x.name; name = x.name; }
@ -119,15 +178,29 @@ rec {
else x;
/* For use in the example option attribute. It causes the given
text to be included verbatim in documentation. This is necessary
for example values that are not simple values, e.g.,
functions. */
/* For use in the `example` option attribute. It causes the given
text to be included verbatim in documentation. This is necessary
for example values that are not simple values, e.g., functions.
*/
literalExample = text: { _type = "literalExample"; inherit text; };
# Helper functions.
/* Helper functions. */
showOption = concatStringsSep ".";
/* Convert an option, described as a list of the option parts in to a
safe, human readable version.
Example:
(showOption ["foo" "bar" "baz"]) == "foo.bar.baz"
(showOption ["foo" "bar.baz" "tux"]) == "foo.\"bar.baz\".tux"
*/
showOption = parts: let
escapeOptionPart = part:
let
escaped = lib.strings.escapeNixString part;
in if escaped == "\"${part}\""
then part
else escaped;
in (concatStringsSep ".") (map escapeOptionPart parts);
showFiles = files: concatStringsSep " and " (map (f: "`${f}'") files);
unknownModule = "<unknown-file>";

View File

@ -26,6 +26,10 @@ rec {
(type == "symlink" && lib.hasPrefix "result" baseName)
);
# Filters a source tree removing version control files and directories using cleanSourceWith
#
# Example:
# cleanSource ./.
cleanSource = src: cleanSourceWith { filter = cleanSourceFilter; inherit src; };
# Like `builtins.filterSource`, except it will compose with itself,
@ -69,7 +73,7 @@ rec {
# Get the commit id of a git repo
# Example: commitIdFromGitRepo <nixpkgs/.git>
commitIdFromGitRepo =
let readCommitFromFile = path: file:
let readCommitFromFile = file: path:
with builtins;
let fileName = toString path + "/" + file;
packedRefsName = toString path + "/packed-refs";
@ -81,7 +85,7 @@ rec {
matchRef = match "^ref: (.*)$" fileContent;
in if isNull matchRef
then fileContent
else readCommitFromFile path (lib.head matchRef)
else readCommitFromFile (lib.head matchRef) path
# Sometimes, the file isn't there at all and has been packed away in the
# packed-refs file, so we have to grep through it:
else if lib.pathExists packedRefsName
@ -92,7 +96,7 @@ rec {
then throw ("Could not find " + file + " in " + packedRefsName)
else lib.head matchRef
else throw ("Not a .git directory: " + path);
in lib.flip readCommitFromFile "HEAD";
in readCommitFromFile "HEAD";
pathHasContext = builtins.hasContext or (lib.hasPrefix builtins.storeDir);

View File

@ -12,6 +12,8 @@ rec {
/* Concatenate a list of strings.
Type: concatStrings :: [string] -> string
Example:
concatStrings ["foo" "bar"]
=> "foobar"
@ -20,15 +22,19 @@ rec {
/* Map a function over a list and concatenate the resulting strings.
Type: concatMapStrings :: (a -> string) -> [a] -> string
Example:
concatMapStrings (x: "a" + x) ["foo" "bar"]
=> "afooabar"
*/
concatMapStrings = f: list: concatStrings (map f list);
/* Like `concatMapStrings' except that the f functions also gets the
/* Like `concatMapStrings` except that the f functions also gets the
position as a parameter.
Type: concatImapStrings :: (int -> a -> string) -> [a] -> string
Example:
concatImapStrings (pos: x: "${toString pos}-${x}") ["foo" "bar"]
=> "1-foo2-bar"
@ -37,17 +43,25 @@ rec {
/* Place an element between each element of a list
Type: intersperse :: a -> [a] -> [a]
Example:
intersperse "/" ["usr" "local" "bin"]
=> ["usr" "/" "local" "/" "bin"].
*/
intersperse = separator: list:
intersperse =
# Separator to add between elements
separator:
# Input list
list:
if list == [] || length list == 1
then list
else tail (lib.concatMap (x: [separator x]) list);
/* Concatenate a list of strings with a separator between each element
Type: concatStringsSep :: string -> [string] -> string
Example:
concatStringsSep "/" ["usr" "local" "bin"]
=> "usr/local/bin"
@ -55,43 +69,77 @@ rec {
concatStringsSep = builtins.concatStringsSep or (separator: list:
concatStrings (intersperse separator list));
/* First maps over the list and then concatenates it.
/* Maps a function over a list of strings and then concatenates the
result with the specified separator interspersed between
elements.
Type: concatMapStringsSep :: string -> (string -> string) -> [string] -> string
Example:
concatMapStringsSep "-" (x: toUpper x) ["foo" "bar" "baz"]
=> "FOO-BAR-BAZ"
*/
concatMapStringsSep = sep: f: list: concatStringsSep sep (map f list);
concatMapStringsSep =
# Separator to add between elements
sep:
# Function to map over the list
f:
# List of input strings
list: concatStringsSep sep (map f list);
/* First imaps over the list and then concatenates it.
/* Same as `concatMapStringsSep`, but the mapping function
additionally receives the position of its argument.
Type: concatMapStringsSep :: string -> (int -> string -> string) -> [string] -> string
Example:
concatImapStringsSep "-" (pos: x: toString (x / pos)) [ 6 6 6 ]
=> "6-3-2"
*/
concatImapStringsSep = sep: f: list: concatStringsSep sep (lib.imap1 f list);
concatImapStringsSep =
# Separator to add between elements
sep:
# Function that receives elements and their positions
f:
# List of input strings
list: concatStringsSep sep (lib.imap1 f list);
/* Construct a Unix-style search path consisting of each `subDir"
directory of the given list of packages.
/* Construct a Unix-style, colon-separated search path consisting of
the given `subDir` appended to each of the given paths.
Type: makeSearchPath :: string -> [string] -> string
Example:
makeSearchPath "bin" ["/root" "/usr" "/usr/local"]
=> "/root/bin:/usr/bin:/usr/local/bin"
makeSearchPath "bin" ["/"]
=> "//bin"
makeSearchPath "bin" [""]
=> "/bin"
*/
makeSearchPath = subDir: packages:
concatStringsSep ":" (map (path: path + "/" + subDir) packages);
makeSearchPath =
# Directory name to append
subDir:
# List of base paths
paths:
concatStringsSep ":" (map (path: path + "/" + subDir) (builtins.filter (x: x != null) paths));
/* Construct a Unix-style search path, using given package output.
If no output is found, fallback to `.out` and then to the default.
/* Construct a Unix-style search path by appending the given
`subDir` to the specified `output` of each of the packages. If no
output by the given name is found, fallback to `.out` and then to
the default.
Type: string -> string -> [package] -> string
Example:
makeSearchPathOutput "dev" "bin" [ pkgs.openssl pkgs.zlib ]
=> "/nix/store/9rz8gxhzf8sw4kf2j2f1grr49w8zx5vj-openssl-1.0.1r-dev/bin:/nix/store/wwh7mhwh269sfjkm6k5665b5kgp7jrk2-zlib-1.2.8/bin"
*/
makeSearchPathOutput = output: subDir: pkgs: makeSearchPath subDir (map (lib.getOutput output) pkgs);
makeSearchPathOutput =
# Package output to use
output:
# Directory name to append
subDir:
# List of packages
pkgs: makeSearchPath subDir (map (lib.getOutput output) pkgs);
/* Construct a library search path (such as RPATH) containing the
libraries for a set of packages
@ -114,55 +162,80 @@ rec {
*/
makeBinPath = makeSearchPathOutput "bin" "bin";
/* Construct a perl search path (such as $PERL5LIB)
FIXME(zimbatm): this should be moved in perl-specific code
Example:
pkgs = import <nixpkgs> { }
makePerlPath [ pkgs.perlPackages.NetSMTP ]
=> "/nix/store/n0m1fk9c960d8wlrs62sncnadygqqc6y-perl-Net-SMTP-1.25/lib/perl5/site_perl"
*/
makePerlPath = makeSearchPathOutput "lib" "lib/perl5/site_perl";
/* Depending on the boolean `cond', return either the given string
or the empty string. Useful to concatenate against a bigger string.
Type: optionalString :: bool -> string -> string
Example:
optionalString true "some-string"
=> "some-string"
optionalString false "some-string"
=> ""
*/
optionalString = cond: string: if cond then string else "";
optionalString =
# Condition
cond:
# String to return if condition is true
string: if cond then string else "";
/* Determine whether a string has given prefix.
Type: hasPrefix :: string -> string -> bool
Example:
hasPrefix "foo" "foobar"
=> true
hasPrefix "foo" "barfoo"
=> false
*/
hasPrefix = pref: str:
substring 0 (stringLength pref) str == pref;
hasPrefix =
# Prefix to check for
pref:
# Input string
str: substring 0 (stringLength pref) str == pref;
/* Determine whether a string has given suffix.
Type: hasSuffix :: string -> string -> bool
Example:
hasSuffix "foo" "foobar"
=> false
hasSuffix "foo" "barfoo"
=> true
*/
hasSuffix = suffix: content:
hasSuffix =
# Suffix to check for
suffix:
# Input string
content:
let
lenContent = stringLength content;
lenSuffix = stringLength suffix;
in lenContent >= lenSuffix &&
substring (lenContent - lenSuffix) lenContent content == suffix;
/* Determine whether a string contains the given infix
Type: hasInfix :: string -> string -> bool
Example:
hasInfix "bc" "abcd"
=> true
hasInfix "ab" "abcd"
=> true
hasInfix "cd" "abcd"
=> true
hasInfix "foo" "abcd"
=> false
*/
hasInfix = infix: content:
let
drop = x: substring 1 (stringLength x) x;
in hasPrefix infix content
|| content != "" && hasInfix infix (drop content);
/* Convert a string to a list of characters (i.e. singleton strings).
This allows you to, e.g., map a function over each character. However,
note that this will likely be horribly inefficient; Nix is not a
@ -171,6 +244,8 @@ rec {
Also note that Nix treats strings as a list of bytes and thus doesn't
handle unicode.
Type: stringtoCharacters :: string -> [string]
Example:
stringToCharacters ""
=> [ ]
@ -185,18 +260,25 @@ rec {
/* Manipulate a string character by character and replace them by
strings before concatenating the results.
Type: stringAsChars :: (string -> string) -> string -> string
Example:
stringAsChars (x: if x == "a" then "i" else x) "nax"
=> "nix"
*/
stringAsChars = f: s:
concatStrings (
stringAsChars =
# Function to map over each individual character
f:
# Input string
s: concatStrings (
map f (stringToCharacters s)
);
/* Escape occurrence of the elements of list in string by
/* Escape occurrence of the elements of `list` in `string` by
prefixing it with a backslash.
Type: escape :: [string] -> string -> string
Example:
escape ["(" ")"] "(foo)"
=> "\\(foo\\)"
@ -205,6 +287,8 @@ rec {
/* Quote string to be used safely within the Bourne shell.
Type: escapeShellArg :: string -> string
Example:
escapeShellArg "esc'ape\nme"
=> "'esc'\\''ape\nme'"
@ -213,6 +297,8 @@ rec {
/* Quote all arguments to be safely passed to the Bourne shell.
Type: escapeShellArgs :: [string] -> string
Example:
escapeShellArgs ["one" "two three" "four'five"]
=> "'one' 'two three' 'four'\\''five'"
@ -221,13 +307,15 @@ rec {
/* Turn a string into a Nix expression representing that string
Type: string -> string
Example:
escapeNixString "hello\${}\n"
=> "\"hello\\\${}\\n\""
*/
escapeNixString = s: escape ["$"] (builtins.toJSON s);
/* Obsolete - use replaceStrings instead. */
# Obsolete - use replaceStrings instead.
replaceChars = builtins.replaceStrings or (
del: new: s:
let
@ -247,6 +335,8 @@ rec {
/* Converts an ASCII string to lower-case.
Type: toLower :: string -> string
Example:
toLower "HOME"
=> "home"
@ -255,6 +345,8 @@ rec {
/* Converts an ASCII string to upper-case.
Type: toUpper :: string -> string
Example:
toUpper "home"
=> "HOME"
@ -264,7 +356,7 @@ rec {
/* Appends string context from another string. This is an implementation
detail of Nix.
Strings in Nix carry an invisible `context' which is a list of strings
Strings in Nix carry an invisible `context` which is a list of strings
representing store paths. If the string is later used in a derivation
attribute, the derivation will properly populate the inputDrvs and
inputSrcs.
@ -310,8 +402,9 @@ rec {
in
recurse 0 0;
/* Return the suffix of the second argument if the first argument matches
its prefix.
/* Return a string without the specified prefix, if the prefix matches.
Type: string -> string -> string
Example:
removePrefix "foo." "foo.bar.baz"
@ -319,18 +412,23 @@ rec {
removePrefix "xxx" "foo.bar.baz"
=> "foo.bar.baz"
*/
removePrefix = pre: s:
removePrefix =
# Prefix to remove if it matches
prefix:
# Input string
str:
let
preLen = stringLength pre;
sLen = stringLength s;
preLen = stringLength prefix;
sLen = stringLength str;
in
if hasPrefix pre s then
substring preLen (sLen - preLen) s
if hasPrefix prefix str then
substring preLen (sLen - preLen) str
else
s;
str;
/* Return the prefix of the second argument if the first argument matches
its suffix.
/* Return a string without the specified suffix, if the suffix matches.
Type: string -> string -> string
Example:
removeSuffix "front" "homefront"
@ -338,17 +436,21 @@ rec {
removeSuffix "xxx" "homefront"
=> "homefront"
*/
removeSuffix = suf: s:
removeSuffix =
# Suffix to remove if it matches
suffix:
# Input string
str:
let
sufLen = stringLength suf;
sLen = stringLength s;
sufLen = stringLength suffix;
sLen = stringLength str;
in
if sufLen <= sLen && suf == substring (sLen - sufLen) sufLen s then
substring 0 (sLen - sufLen) s
if sufLen <= sLen && suffix == substring (sLen - sufLen) sufLen str then
substring 0 (sLen - sufLen) str
else
s;
str;
/* Return true iff string v1 denotes a version older than v2.
/* Return true if string v1 denotes a version older than v2.
Example:
versionOlder "1.1" "1.2"
@ -358,7 +460,7 @@ rec {
*/
versionOlder = v1: v2: builtins.compareVersions v2 v1 == 1;
/* Return true iff string v1 denotes a version equal to or newer than v2.
/* Return true if string v1 denotes a version equal to or newer than v2.
Example:
versionAtLeast "1.1" "1.0"
@ -401,7 +503,7 @@ rec {
components = splitString "/" url;
filename = lib.last components;
name = builtins.head (splitString sep filename);
in assert name != filename; name;
in assert name != filename; name;
/* Create an --{enable,disable}-<feat> string that can be passed to
standard GNU Autoconf scripts.
@ -414,9 +516,47 @@ rec {
*/
enableFeature = enable: feat: "--${if enable then "enable" else "disable"}-${feat}";
/* Create an --{enable-<feat>=<value>,disable-<feat>} string that can be passed to
standard GNU Autoconf scripts.
Example:
enableFeature true "shared" "foo"
=> "--enable-shared=foo"
enableFeature false "shared" (throw "ignored")
=> "--disable-shared"
*/
enableFeatureAs = enable: feat: value: enableFeature enable feat + optionalString enable "=${value}";
/* Create an --{with,without}-<feat> string that can be passed to
standard GNU Autoconf scripts.
Example:
withFeature true "shared"
=> "--with-shared"
withFeature false "shared"
=> "--without-shared"
*/
withFeature = with_: feat: "--${if with_ then "with" else "without"}-${feat}";
/* Create an --{with-<feat>=<value>,without-<feat>} string that can be passed to
standard GNU Autoconf scripts.
Example:
with_Feature true "shared" "foo"
=> "--with-shared=foo"
with_Feature false "shared" (throw "ignored")
=> "--without-shared"
*/
withFeatureAs = with_: feat: value: withFeature with_ feat + optionalString with_ "=${value}";
/* Create a fixed width string with additional prefix to match
required width.
This function will fail if the input string is longer than the
requested length.
Type: fixedWidthString :: int -> string -> string
Example:
fixedWidthString 5 "0" (toString 15)
=> "00015"
@ -426,7 +566,10 @@ rec {
strw = lib.stringLength str;
reqWidth = width - (lib.stringLength filler);
in
assert strw <= width;
assert lib.assertMsg (strw <= width)
"fixedWidthString: requested string length (${
toString width}) must not be shorter than actual length (${
toString strw})";
if strw == width then str else filler + fixedWidthString reqWidth filler str;
/* Format a number adding leading zeroes up to fixed width.
@ -437,6 +580,13 @@ rec {
*/
fixedWidthNumber = width: n: fixedWidthString width "0" (toString n);
/* Check whether a value can be coerced to a string */
isCoercibleToString = x:
builtins.elem (builtins.typeOf x) [ "path" "string" "null" "int" "float" "bool" ] ||
(builtins.isList x && lib.all isCoercibleToString x) ||
x ? outPath ||
x ? __toString;
/* Check whether a value is a store path.
Example:
@ -450,12 +600,16 @@ rec {
=> false
*/
isStorePath = x:
builtins.isString x
&& builtins.substring 0 1 (toString x) == "/"
&& dirOf (builtins.toPath x) == builtins.storeDir;
if isCoercibleToString x then
let str = toString x; in
builtins.substring 0 1 str == "/"
&& dirOf str == builtins.storeDir
else
false;
/* Convert string to int
Obviously, it is a bit hacky to use fromJSON that way.
/* Parse a string string as an int.
Type: string -> int
Example:
toInt "1337"
@ -465,17 +619,18 @@ rec {
toInt "3.14"
=> error: floating point JSON numbers are not supported
*/
# Obviously, it is a bit hacky to use fromJSON this way.
toInt = str:
let may_be_int = builtins.fromJSON str; in
if builtins.isInt may_be_int
then may_be_int
else throw "Could not convert ${str} to int.";
/* Read a list of paths from `file', relative to the `rootPath'. Lines
beginning with `#' are treated as comments and ignored. Whitespace
is significant.
/* Read a list of paths from `file`, relative to the `rootPath`.
Lines beginning with `#` are treated as comments and ignored.
Whitespace is significant.
NOTE: this function is not performant and should be avoided
NOTE: This function is not performant and should be avoided.
Example:
readPathsFromFile /prefix
@ -488,16 +643,17 @@ rec {
*/
readPathsFromFile = rootPath: file:
let
root = toString rootPath;
lines = lib.splitString "\n" (builtins.readFile file);
removeComments = lib.filter (line: line != "" && !(lib.hasPrefix "#" line));
relativePaths = removeComments lines;
absolutePaths = builtins.map (path: builtins.toPath (root + "/" + path)) relativePaths;
absolutePaths = builtins.map (path: rootPath + "/${path}") relativePaths;
in
absolutePaths;
/* Read the contents of a file removing the trailing \n
Type: fileContents :: path -> string
Example:
$ echo "1.0" > ./version

View File

@ -3,6 +3,7 @@
rec {
doubles = import ./doubles.nix { inherit lib; };
forMeta = import ./for-meta.nix { inherit lib; };
parse = import ./parse.nix { inherit lib; };
inspect = import ./inspect.nix { inherit lib; };
platforms = import ./platforms.nix { inherit lib; };
@ -23,13 +24,17 @@ rec {
config = parse.tripleFromSystem final.parsed;
# Just a guess, based on `system`
platform = platforms.selectBySystem final.system;
# Derived meta-data
libc =
/**/ if final.isDarwin then "libSystem"
else if final.isMinGW then "msvcrt"
else if final.isMusl then "musl"
else if final.isLinux /* default */ then "glibc"
/**/ if final.isDarwin then "libSystem"
else if final.isMinGW then "msvcrt"
else if final.isMusl then "musl"
else if final.isUClibc then "uclibc"
else if final.isAndroid then "bionic"
else if final.isLinux /* default */ then "glibc"
else if final.isAvr then "avrlibc"
# TODO(@Ericson2314) think more about other operating systems
else "native/impure";
else "native/impure";
extensions = {
sharedLibrary =
/**/ if final.isDarwin then ".dylib"
@ -39,7 +44,77 @@ rec {
/**/ if final.isWindows then ".exe"
else "";
};
# Misc boolean options
useAndroidPrebuilt = false;
useiOSPrebuilt = false;
# Output from uname
uname = {
# uname -s
system = {
"linux" = "Linux";
"windows" = "Windows";
"darwin" = "Darwin";
"netbsd" = "NetBSD";
"freebsd" = "FreeBSD";
"openbsd" = "OpenBSD";
}.${final.parsed.kernel.name} or null;
# uname -p
processor = final.parsed.cpu.name;
# uname -r
release = null;
};
qemuArch =
if final.isArm then "arm"
else if final.isx86_64 then "x86_64"
else if final.isx86 then "i386"
else {
"powerpc" = "ppc";
"powerpc64" = "ppc64";
"powerpc64le" = "ppc64";
"mips64" = "mips";
"mipsel64" = "mipsel";
}.${final.parsed.cpu.name} or final.parsed.cpu.name;
emulator = pkgs: let
qemu-user = pkgs.qemu.override {
smartcardSupport = false;
spiceSupport = false;
openGLSupport = false;
virglSupport = false;
vncSupport = false;
gtkSupport = false;
sdlSupport = false;
pulseSupport = false;
smbdSupport = false;
seccompSupport = false;
hostCpuTargets = ["${final.qemuArch}-linux-user"];
};
wine-name = "wine${toString final.parsed.cpu.bits}";
wine = (pkgs.winePackagesFor wine-name).minimal;
in
if final.parsed.kernel.name == pkgs.stdenv.hostPlatform.parsed.kernel.name &&
(final.parsed.cpu.name == pkgs.stdenv.hostPlatform.parsed.cpu.name ||
(final.isi686 && pkgs.stdenv.hostPlatform.isx86_64))
then pkgs.runtimeShell
else if final.isWindows
then "${wine}/bin/${wine-name}"
else if final.isLinux && pkgs.stdenv.hostPlatform.isLinux
then "${qemu-user}/bin/qemu-${final.qemuArch}"
else throw "Don't know how to run ${final.config} executables.";
} // mapAttrs (n: v: v final.parsed) inspect.predicates
// args;
in final;
in assert final.useAndroidPrebuilt -> final.isAndroid;
assert lib.foldl
(pass: { assertion, message }:
if assertion final
then pass
else throw message)
true
(final.parsed.abi.assertions or []);
final;
}

View File

@ -9,12 +9,14 @@ let
"aarch64-linux"
"armv5tel-linux" "armv6l-linux" "armv7l-linux"
"mips64el-linux"
"mipsel-linux"
"i686-cygwin" "i686-freebsd" "i686-linux" "i686-netbsd" "i686-openbsd"
"x86_64-cygwin" "x86_64-darwin" "x86_64-freebsd" "x86_64-linux"
"x86_64-netbsd" "x86_64-openbsd" "x86_64-solaris"
"x86_64-windows" "i686-windows"
];
allParsed = map parse.mkSystemFromString all;
@ -24,24 +26,26 @@ let
in rec {
inherit all;
allBut = platforms: lists.filter (x: !(builtins.elem x platforms)) all;
none = [];
arm = filterDoubles predicates.isArm;
arm = filterDoubles predicates.isAarch32;
aarch64 = filterDoubles predicates.isAarch64;
x86 = filterDoubles predicates.isx86;
i686 = filterDoubles predicates.isi686;
mips = filterDoubles predicates.isMips;
x86_64 = filterDoubles predicates.isx86_64;
mips = filterDoubles predicates.isMips;
cygwin = filterDoubles predicates.isCygwin;
darwin = filterDoubles predicates.isDarwin;
freebsd = filterDoubles predicates.isFreeBSD;
# Should be better, but MinGW is unclear, and HURD is bit-rotted.
gnu = filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnu; });
# Should be better, but MinGW is unclear.
gnu = filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnu; }) ++ filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnueabi; }) ++ filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnueabihf; });
illumos = filterDoubles predicates.isSunOS;
linux = filterDoubles predicates.isLinux;
netbsd = filterDoubles predicates.isNetBSD;
openbsd = filterDoubles predicates.isOpenBSD;
unix = filterDoubles predicates.isUnix;
windows = filterDoubles predicates.isWindows;
mesaPlatforms = ["i686-linux" "x86_64-linux" "x86_64-darwin" "armv5tel-linux" "armv6l-linux" "armv7l-linux" "aarch64-linux"];
mesaPlatforms = ["i686-linux" "x86_64-linux" "x86_64-darwin" "armv5tel-linux" "armv6l-linux" "armv7l-linux" "aarch64-linux" "powerpc64le-linux"];
}

View File

@ -2,58 +2,89 @@
# `crossSystem`. They are put here for user convenience, but also used by cross
# tests and linux cross stdenv building, so handle with care!
{ lib }:
let platforms = import ./platforms.nix { inherit lib; }; in
let
platforms = import ./platforms.nix { inherit lib; };
riscv = bits: {
config = "riscv${bits}-unknown-linux-gnu";
platform = platforms.riscv-multiplatform bits;
};
in
rec {
#
# Linux
#
powernv = {
config = "powerpc64le-unknown-linux-gnu";
platform = platforms.powernv;
};
musl-power = {
config = "powerpc64le-unknown-linux-musl";
platform = platforms.powernv;
};
sheevaplug = rec {
config = "armv5tel-unknown-linux-gnueabi";
arch = "armv5tel";
float = "soft";
platform = platforms.sheevaplug;
};
raspberryPi = rec {
config = "armv6l-unknown-linux-gnueabihf";
arch = "armv6l";
float = "hard";
fpu = "vfp";
platform = platforms.raspberrypi;
};
armv7l-hf-multiplatform = rec {
config = "arm-unknown-linux-gnueabihf";
arch = "armv7-a";
float = "hard";
fpu = "vfpv3-d16";
config = "armv7l-unknown-linux-gnueabihf";
platform = platforms.armv7l-hf-multiplatform;
};
aarch64-multiplatform = rec {
config = "aarch64-unknown-linux-gnu";
arch = "aarch64";
platform = platforms.aarch64-multiplatform;
};
armv5te-android-prebuilt = rec {
config = "armv5tel-unknown-linux-androideabi";
sdkVer = "21";
ndkVer = "18b";
platform = platforms.armv5te-android;
useAndroidPrebuilt = true;
};
armv7a-android-prebuilt = rec {
config = "armv7a-unknown-linux-androideabi";
sdkVer = "24";
ndkVer = "18b";
platform = platforms.armv7a-android;
useAndroidPrebuilt = true;
};
aarch64-android-prebuilt = rec {
config = "aarch64-unknown-linux-android";
sdkVer = "24";
ndkVer = "18b";
platform = platforms.aarch64-multiplatform;
useAndroidPrebuilt = true;
};
scaleway-c1 = armv7l-hf-multiplatform // rec {
platform = platforms.scaleway-c1;
inherit (platform.gcc) fpu;
};
pogoplug4 = rec {
arch = "armv5tel";
config = "armv5tel-unknown-linux-gnueabi";
float = "soft";
platform = platforms.pogoplug4;
};
ben-nanonote = rec {
config = "mipsel-unknown-linux-uclibc";
platform = platforms.ben_nanonote;
};
fuloongminipc = rec {
config = "mips64el-unknown-linux-gnu";
arch = "mips";
float = "hard";
config = "mipsel-unknown-linux-gnu";
platform = platforms.fuloong2f_n32;
};
@ -68,29 +99,98 @@ rec {
musl64 = { config = "x86_64-unknown-linux-musl"; };
musl32 = { config = "i686-unknown-linux-musl"; };
riscv = bits: {
config = "riscv${bits}-unknown-linux-gnu";
platform = platforms.riscv-multiplatform bits;
};
riscv64 = riscv "64";
riscv32 = riscv "32";
avr = {
config = "avr";
};
arm-embedded = {
config = "arm-none-eabi";
libc = "newlib";
};
armhf-embedded = {
config = "arm-none-eabihf";
libc = "newlib";
};
aarch64-embedded = {
config = "aarch64-none-elf";
libc = "newlib";
};
aarch64be-embedded = {
config = "aarch64_be-none-elf";
libc = "newlib";
};
ppc-embedded = {
config = "powerpc-none-eabi";
libc = "newlib";
};
ppcle-embedded = {
config = "powerpcle-none-eabi";
libc = "newlib";
};
alpha-embedded = {
config = "alpha-elf";
libc = "newlib";
};
i686-embedded = {
config = "i686-elf";
libc = "newlib";
};
x86_64-embedded = {
config = "x86_64-elf";
libc = "newlib";
};
#
# Darwin
#
iphone64 = {
config = "aarch64-apple-darwin14";
arch = "arm64";
libc = "libSystem";
config = "aarch64-apple-ios";
# config = "aarch64-apple-darwin14";
sdkVer = "10.2";
xcodeVer = "8.2";
xcodePlatform = "iPhoneOS";
useiOSPrebuilt = true;
platform = {};
};
iphone32 = {
config = "arm-apple-darwin10";
arch = "armv7-a";
libc = "libSystem";
config = "armv7a-apple-ios";
# config = "arm-apple-darwin10";
sdkVer = "10.2";
xcodeVer = "8.2";
xcodePlatform = "iPhoneOS";
useiOSPrebuilt = true;
platform = {};
};
iphone64-simulator = {
config = "x86_64-apple-ios";
# config = "x86_64-apple-darwin14";
sdkVer = "10.2";
xcodeVer = "8.2";
xcodePlatform = "iPhoneSimulator";
useiOSPrebuilt = true;
platform = {};
};
iphone32-simulator = {
config = "i686-apple-ios";
# config = "i386-apple-darwin11";
sdkVer = "10.2";
xcodeVer = "8.2";
xcodePlatform = "iPhoneSimulator";
useiOSPrebuilt = true;
platform = {};
};
@ -101,7 +201,6 @@ rec {
# 32 bit mingw-w64
mingw32 = {
config = "i686-pc-mingw32";
arch = "x86"; # Irrelevant
libc = "msvcrt"; # This distinguishes the mingw (non posix) toolchain
platform = {};
};
@ -110,7 +209,6 @@ rec {
mingwW64 = {
# That's the triplet they use in the mingw-w64 docs.
config = "x86_64-pc-mingw32";
arch = "x86_64"; # Irrelevant
libc = "msvcrt"; # This distinguishes the mingw (non posix) toolchain
platform = {};
};

37
lib/systems/for-meta.nix Normal file
View File

@ -0,0 +1,37 @@
{ lib }:
let
inherit (lib.systems) parse;
inherit (lib.systems.inspect) patterns;
abis = lib.mapAttrs (_: abi: builtins.removeAttrs abi [ "assertions" ]) parse.abis;
in rec {
all = [ {} ]; # `{}` matches anything
none = [];
arm = [ patterns.isAarch32 ];
aarch64 = [ patterns.isAarch64 ];
x86 = [ patterns.isx86 ];
i686 = [ patterns.isi686 ];
x86_64 = [ patterns.isx86_64 ];
mips = [ patterns.isMips ];
riscv = [ patterns.isRiscV ];
cygwin = [ patterns.isCygwin ];
darwin = [ patterns.isDarwin ];
freebsd = [ patterns.isFreeBSD ];
# Should be better, but MinGW is unclear.
gnu = [
{ kernel = parse.kernels.linux; abi = abis.gnu; }
{ kernel = parse.kernels.linux; abi = abis.gnueabi; }
{ kernel = parse.kernels.linux; abi = abis.gnueabihf; }
];
illumos = [ patterns.isSunOS ];
linux = [ patterns.isLinux ];
netbsd = [ patterns.isNetBSD ];
openbsd = [ patterns.isOpenBSD ];
unix = patterns.isUnix; # Actually a list
windows = [ patterns.isWindows ];
inherit (lib.systems.doubles) mesaPlatforms;
}

View File

@ -3,52 +3,58 @@ with import ./parse.nix { inherit lib; };
with lib.attrsets;
with lib.lists;
let abis_ = abis; in
let abis = lib.mapAttrs (_: abi: builtins.removeAttrs abi [ "assertions" ]) abis_; in
rec {
patterns = rec {
i686 = { cpu = cpuTypes.i686; };
x86_64 = { cpu = cpuTypes.x86_64; };
PowerPC = { cpu = cpuTypes.powerpc; };
x86 = { cpu = { family = "x86"; }; };
Arm = { cpu = { family = "arm"; }; };
Aarch64 = { cpu = { family = "aarch64"; }; };
Mips = { cpu = { family = "mips"; }; };
RiscV = { cpu = { family = "riscv"; }; };
Wasm = { cpu = { family = "wasm"; }; };
isi686 = { cpu = cpuTypes.i686; };
isx86_64 = { cpu = cpuTypes.x86_64; };
isPowerPC = { cpu = cpuTypes.powerpc; };
isPower = { cpu = { family = "power"; }; };
isx86 = { cpu = { family = "x86"; }; };
isAarch32 = { cpu = { family = "arm"; bits = 32; }; };
isAarch64 = { cpu = { family = "arm"; bits = 64; }; };
isMips = { cpu = { family = "mips"; }; };
isRiscV = { cpu = { family = "riscv"; }; };
isSparc = { cpu = { family = "sparc"; }; };
isWasm = { cpu = { family = "wasm"; }; };
isAvr = { cpu = { family = "avr"; }; };
"32bit" = { cpu = { bits = 32; }; };
"64bit" = { cpu = { bits = 64; }; };
BigEndian = { cpu = { significantByte = significantBytes.bigEndian; }; };
LittleEndian = { cpu = { significantByte = significantBytes.littleEndian; }; };
is32bit = { cpu = { bits = 32; }; };
is64bit = { cpu = { bits = 64; }; };
isBigEndian = { cpu = { significantByte = significantBytes.bigEndian; }; };
isLittleEndian = { cpu = { significantByte = significantBytes.littleEndian; }; };
BSD = { kernel = { families = { inherit (kernelFamilies) bsd; }; }; };
Unix = [ BSD Darwin Linux SunOS Hurd Cygwin ];
isBSD = { kernel = { families = { inherit (kernelFamilies) bsd; }; }; };
isDarwin = { kernel = { families = { inherit (kernelFamilies) darwin; }; }; };
isUnix = [ isBSD isDarwin isLinux isSunOS isCygwin ];
Darwin = { kernel = kernels.darwin; };
Linux = { kernel = kernels.linux; };
SunOS = { kernel = kernels.solaris; };
FreeBSD = { kernel = kernels.freebsd; };
Hurd = { kernel = kernels.hurd; };
NetBSD = { kernel = kernels.netbsd; };
OpenBSD = { kernel = kernels.openbsd; };
Windows = { kernel = kernels.windows; };
Cygwin = { kernel = kernels.windows; abi = abis.cygnus; };
MinGW = { kernel = kernels.windows; abi = abis.gnu; };
isMacOS = { kernel = kernels.macos; };
isiOS = { kernel = kernels.ios; };
isLinux = { kernel = kernels.linux; };
isSunOS = { kernel = kernels.solaris; };
isFreeBSD = { kernel = kernels.freebsd; };
isNetBSD = { kernel = kernels.netbsd; };
isOpenBSD = { kernel = kernels.openbsd; };
isWindows = { kernel = kernels.windows; };
isCygwin = { kernel = kernels.windows; abi = abis.cygnus; };
isMinGW = { kernel = kernels.windows; abi = abis.gnu; };
Musl = with abis; map (a: { abi = a; }) [ musl musleabi musleabihf ];
isAndroid = [ { abi = abis.android; } { abi = abis.androideabi; } ];
isMusl = with abis; map (a: { abi = a; }) [ musl musleabi musleabihf ];
isUClibc = with abis; map (a: { abi = a; }) [ uclibc uclibceabi uclibceabihf ];
Kexecable = map (family: { kernel = kernels.linux; cpu.family = family; })
[ "x86" "arm" "aarch64" "mips" ];
Efi = map (family: { cpu.family = family; })
[ "x86" "arm" "aarch64" ];
Seccomputable = map (family: { kernel = kernels.linux; cpu.family = family; })
[ "x86" "arm" "aarch64" "mips" ];
isEfi = map (family: { cpu.family = family; })
[ "x86" "arm" "aarch64" ];
# Deprecated after 18.03
isArm = isAarch32;
};
matchAnyAttrs = patterns:
if builtins.isList patterns then attrs: any (pattern: matchAttrs pattern attrs) patterns
else matchAttrs patterns;
predicates = mapAttrs'
(name: value: nameValuePair ("is" + name) (matchAnyAttrs value))
patterns;
predicates = mapAttrs (_: matchAnyAttrs) patterns;
}

View File

@ -18,6 +18,7 @@
with lib.lists;
with lib.types;
with lib.attrsets;
with lib.strings;
with (import ./inspect.nix { inherit lib; }).predicates;
let
@ -34,7 +35,7 @@ rec {
################################################################################
types.openSignifiantByte = mkOptionType {
types.openSignificantByte = mkOptionType {
name = "significant-byte";
description = "Endianness";
merge = mergeOneOption;
@ -42,7 +43,7 @@ rec {
types.significantByte = enum (attrValues significantBytes);
significantBytes = setTypes types.openSignifiantByte {
significantBytes = setTypes types.openSignificantByte {
bigEndian = {};
littleEndian = {};
};
@ -68,19 +69,47 @@ rec {
cpuTypes = with significantBytes; setTypes types.openCpuType {
arm = { bits = 32; significantByte = littleEndian; family = "arm"; };
armv5tel = { bits = 32; significantByte = littleEndian; family = "arm"; };
armv6l = { bits = 32; significantByte = littleEndian; family = "arm"; };
armv7a = { bits = 32; significantByte = littleEndian; family = "arm"; };
armv7l = { bits = 32; significantByte = littleEndian; family = "arm"; };
aarch64 = { bits = 64; significantByte = littleEndian; family = "aarch64"; };
armv5tel = { bits = 32; significantByte = littleEndian; family = "arm"; version = "5"; };
armv6m = { bits = 32; significantByte = littleEndian; family = "arm"; version = "6"; };
armv6l = { bits = 32; significantByte = littleEndian; family = "arm"; version = "6"; };
armv7a = { bits = 32; significantByte = littleEndian; family = "arm"; version = "7"; };
armv7r = { bits = 32; significantByte = littleEndian; family = "arm"; version = "7"; };
armv7m = { bits = 32; significantByte = littleEndian; family = "arm"; version = "7"; };
armv7l = { bits = 32; significantByte = littleEndian; family = "arm"; version = "7"; };
armv8a = { bits = 32; significantByte = littleEndian; family = "arm"; version = "8"; };
armv8r = { bits = 32; significantByte = littleEndian; family = "arm"; version = "8"; };
armv8m = { bits = 32; significantByte = littleEndian; family = "arm"; version = "8"; };
aarch64 = { bits = 64; significantByte = littleEndian; family = "arm"; version = "8"; };
aarch64_be = { bits = 64; significantByte = bigEndian; family = "arm"; version = "8"; };
i386 = { bits = 32; significantByte = littleEndian; family = "x86"; };
i486 = { bits = 32; significantByte = littleEndian; family = "x86"; };
i586 = { bits = 32; significantByte = littleEndian; family = "x86"; };
i686 = { bits = 32; significantByte = littleEndian; family = "x86"; };
x86_64 = { bits = 64; significantByte = littleEndian; family = "x86"; };
mips64el = { bits = 32; significantByte = littleEndian; family = "mips"; };
mips = { bits = 32; significantByte = bigEndian; family = "mips"; };
mipsel = { bits = 32; significantByte = littleEndian; family = "mips"; };
mips64 = { bits = 64; significantByte = bigEndian; family = "mips"; };
mips64el = { bits = 64; significantByte = littleEndian; family = "mips"; };
powerpc = { bits = 32; significantByte = bigEndian; family = "power"; };
powerpc64 = { bits = 64; significantByte = bigEndian; family = "power"; };
powerpc64le = { bits = 64; significantByte = littleEndian; family = "power"; };
powerpcle = { bits = 32; significantByte = littleEndian; family = "power"; };
riscv32 = { bits = 32; significantByte = littleEndian; family = "riscv"; };
riscv64 = { bits = 64; significantByte = littleEndian; family = "riscv"; };
sparc = { bits = 32; significantByte = bigEndian; family = "sparc"; };
sparc64 = { bits = 64; significantByte = bigEndian; family = "sparc"; };
wasm32 = { bits = 32; significantByte = littleEndian; family = "wasm"; };
wasm64 = { bits = 64; significantByte = littleEndian; family = "wasm"; };
alpha = { bits = 64; significantByte = littleEndian; family = "alpha"; };
avr = { bits = 8; family = "avr"; };
};
################################################################################
@ -97,6 +126,7 @@ rec {
apple = {};
pc = {};
none = {};
unknown = {};
};
@ -131,6 +161,7 @@ rec {
kernelFamilies = setTypes types.openKernelFamily {
bsd = {};
darwin = {};
};
################################################################################
@ -146,9 +177,11 @@ rec {
types.kernel = enum (attrValues kernels);
kernels = with execFormats; with kernelFamilies; setTypes types.openKernel {
darwin = { execFormat = macho; families = { }; };
# TODO(@Ericson2314): Don't want to mass-rebuild yet to keeping 'darwin' as
# the nnormalized name for macOS.
macos = { execFormat = macho; families = { inherit darwin; }; name = "darwin"; };
ios = { execFormat = macho; families = { inherit darwin; }; };
freebsd = { execFormat = elf; families = { inherit bsd; }; };
hurd = { execFormat = elf; families = { }; };
linux = { execFormat = elf; families = { }; };
netbsd = { execFormat = elf; families = { inherit bsd; }; };
none = { execFormat = unknown; families = { }; };
@ -156,9 +189,10 @@ rec {
solaris = { execFormat = elf; families = { }; };
windows = { execFormat = pe; families = { }; };
} // { # aliases
# TODO(@Ericson2314): Handle these Darwin version suffixes more generally.
darwin10 = kernels.darwin;
darwin14 = kernels.darwin;
# 'darwin' is the kernel for all of them. We choose macOS by default.
darwin = kernels.macos;
watchos = kernels.ios;
tvos = kernels.ios;
win32 = kernels.windows;
};
@ -173,23 +207,55 @@ rec {
types.abi = enum (attrValues abis);
abis = setTypes types.openAbi {
cygnus = {};
gnu = {};
msvc = {};
eabi = {};
androideabi = {};
gnueabi = {};
gnueabihf = {};
musleabi = {};
musleabihf = {};
musl = {};
cygnus = {};
msvc = {};
# Note: eabi is specific to ARM and PowerPC.
# On PowerPC, this corresponds to PPCEABI.
# On ARM, this corresponds to ARMEABI.
eabi = { float = "soft"; };
eabihf = { float = "hard"; };
# Other architectures should use ELF in embedded situations.
elf = {};
androideabi = {};
android = {
assertions = [
{ assertion = platform: !platform.isAarch32;
message = ''
The "android" ABI is not for 32-bit ARM. Use "androideabi" instead.
'';
}
];
};
gnueabi = { float = "soft"; };
gnueabihf = { float = "hard"; };
gnu = {
assertions = [
{ assertion = platform: !platform.isAarch32;
message = ''
The "gnu" ABI is ambiguous on 32-bit ARM. Use "gnueabi" or "gnueabihf" instead.
'';
}
];
};
musleabi = { float = "soft"; };
musleabihf = { float = "hard"; };
musl = {};
uclibceabihf = { float = "soft"; };
uclibceabi = { float = "hard"; };
uclibc = {};
unknown = {};
};
################################################################################
types.system = mkOptionType {
types.parsedPlatform = mkOptionType {
name = "system";
description = "fully parsed representation of llvm- or nix-style platform tuple";
merge = mergeOneOption;
@ -203,15 +269,24 @@ rec {
isSystem = isType "system";
mkSystem = components:
assert types.system.check components;
assert types.parsedPlatform.check components;
setType "system" components;
mkSkeletonFromList = l: {
"1" = if elemAt l 0 == "avr"
then { cpu = elemAt l 0; kernel = "none"; abi = "unknown"; }
else throw "Target specification with 1 components is ambiguous";
"2" = # We only do 2-part hacks for things Nix already supports
if elemAt l 1 == "cygwin"
then { cpu = elemAt l 0; kernel = "windows"; abi = "cygnus"; }
else if elemAt l 1 == "gnu"
then { cpu = elemAt l 0; kernel = "hurd"; abi = "gnu"; }
# MSVC ought to be the default ABI so this case isn't needed. But then it
# becomes difficult to handle the gnu* variants for Aarch32 correctly for
# minGW. So it's easier to make gnu* the default for the MinGW, but
# hack-in MSVC for the non-MinGW case right here.
else if elemAt l 1 == "windows"
then { cpu = elemAt l 0; kernel = "windows"; abi = "msvc"; }
else if (elemAt l 1) == "elf"
then { cpu = elemAt l 0; vendor = "unknown"; kernel = "none"; abi = elemAt l 1; }
else { cpu = elemAt l 0; kernel = elemAt l 1; };
"3" = # Awkwards hacks, beware!
if elemAt l 1 == "apple"
@ -219,7 +294,11 @@ rec {
else if (elemAt l 1 == "linux") || (elemAt l 2 == "gnu")
then { cpu = elemAt l 0; kernel = elemAt l 1; abi = elemAt l 2; }
else if (elemAt l 2 == "mingw32") # autotools breaks on -gnu for window
then { cpu = elemAt l 0; vendor = elemAt l 1; kernel = "windows"; abi = "gnu"; }
then { cpu = elemAt l 0; vendor = elemAt l 1; kernel = "windows"; }
else if hasPrefix "netbsd" (elemAt l 2)
then { cpu = elemAt l 0; vendor = elemAt l 1; kernel = elemAt l 2; }
else if (elem (elemAt l 2) ["eabi" "eabihf" "elf"])
then { cpu = elemAt l 0; vendor = "unknown"; kernel = elemAt l 1; abi = elemAt l 2; }
else throw "Target specification with 3 components is ambiguous";
"4" = { cpu = elemAt l 0; vendor = elemAt l 1; kernel = elemAt l 2; abi = elemAt l 3; };
}.${toString (length l)}
@ -246,11 +325,17 @@ rec {
else if isDarwin parsed then vendors.apple
else if isWindows parsed then vendors.pc
else vendors.unknown;
kernel = getKernel args.kernel;
kernel = if hasPrefix "darwin" args.kernel then getKernel "darwin"
else if hasPrefix "netbsd" args.kernel then getKernel "netbsd"
else getKernel args.kernel;
abi =
/**/ if args ? abi then getAbi args.abi
else if isLinux parsed then abis.gnu
else if isWindows parsed then abis.gnu
else if isLinux parsed || isWindows parsed then
if isAarch32 parsed then
if lib.versionAtLeast (parsed.cpu.version or "0") "6"
then abis.gnueabihf
else abis.gnueabi
else abis.gnu
else abis.unknown;
};
@ -259,8 +344,8 @@ rec {
mkSystemFromString = s: mkSystemFromSkeleton (mkSkeletonFromList (lib.splitString "-" s));
doubleFromSystem = { cpu, vendor, kernel, abi, ... }:
if abi == abis.cygnus
then "${cpu.name}-cygwin"
/**/ if abi == abis.cygnus then "${cpu.name}-cygwin"
else if kernel.families ? darwin then "${cpu.name}-darwin"
else "${cpu.name}-${kernel.name}";
tripleFromSystem = { cpu, vendor, kernel, abi, ... } @ sys: assert isSystem sys; let

Some files were not shown because too many files have changed in this diff Show More