Merge remote-tracking branch 'origin/master' into crafty

This commit is contained in:
Samuel Rivas 2016-10-04 20:13:38 +02:00
commit 55286552d3
6919 changed files with 296968 additions and 1011867 deletions

24
.editorconfig Normal file
View File

@ -0,0 +1,24 @@
# EditorConfig configuration for nixpkgs
# http://EditorConfig.org
# Top-most EditorConfig file
root = true
# Unix-style newlines with a newline ending every file, utf-8 charset
[*]
end_of_line = lf
insert_final_newline = true
trim_trailing_whitespace = true
charset = utf-8
# see https://nixos.org/nixpkgs/manual/#chap-conventions
# Match nix/ruby files, set indent to spaces with width of two
[*.{nix,rb}]
indent_style = space
indent_size = 2
# Match shell/python/perl scripts, set indent to spaces with width of four
[*.{sh,py,pl}]
indent_style = space
indent_size = 4

View File

@ -28,5 +28,8 @@ under the terms of [COPYING](../COPYING), which is an MIT-like license.
* Not start with the package name
* Not have a dot at the end
See the nixpkgs manual for more details on how to [Submit changes to nixpkgs](http://hydra.nixos.org/job/nixpkgs/trunk/manual/latest/download-by-type/doc/manual#chap-submitting-changes).
See the nixpkgs manual for more details on how to [Submit changes to nixpkgs](https://nixos.org/nixpkgs/manual/#chap-submitting-changes).
## Reviewing contributions
See the nixpkgs manual for more details on how to [Review contributions](http://hydra.nixos.org/job/nixpkgs/trunk/manual/latest/download-by-type/doc/manual#chap-reviewing-contributions).

View File

@ -1,3 +1,6 @@
###### Motivation for this change
###### Things done
- [ ] Tested using sandboxing

View File

@ -2,5 +2,10 @@
"userBlacklist": [
"civodul",
"jhasse"
]
],
"alwaysNotifyForPaths": [
{ "name": "FRidh", "files": ["pkgs/top-level/python-packages.nix", "pkgs/development/interpreters/python/*", "pkgs/development/python-modules/*" ] },
{ "name": "copumpkin", "files": ["pkgs/stdenv/darwin/*", "pkgs/os-specific/darwin/apple-source-releases/*"] }
],
"fileBlacklist": ["pkgs/top-level/all-packages.nix"]
}

View File

@ -1,7 +1,20 @@
language: python
python: "3.4"
sudo: required
dist: trusty
before_install: ./maintainers/scripts/travis-nox-review-pr.sh nix
install: ./maintainers/scripts/travis-nox-review-pr.sh nox
script: ./maintainers/scripts/travis-nox-review-pr.sh build
language: nix
matrix:
include:
- os: linux
sudo: false
script:
- ./maintainers/scripts/travis-nox-review-pr.sh nixpkgs-verify nixpkgs-manual nixpkgs-tarball
- ./maintainers/scripts/travis-nox-review-pr.sh nixos-options nixos-manual
- os: linux
sudo: required
dist: trusty
before_script:
- sudo mount -o remount,exec,size=2G,mode=755 /run/user
script: ./maintainers/scripts/travis-nox-review-pr.sh nox pr
- os: osx
osx_image: xcode7.3
script: ./maintainers/scripts/travis-nox-review-pr.sh nox pr
env:
global:
- GITHUB_TOKEN=5edaaf1017f691ed34e7f80878f8f5fbd071603f

View File

@ -1 +1 @@
16.09
17.03

View File

@ -1,8 +1,7 @@
[<img src="http://nixos.org/logo/nixos-hires.png" width="500px" alt="logo" />](https://nixos.org/nixos)
[![Build Status](https://travis-ci.org/NixOS/nixpkgs.svg?branch=master)](https://travis-ci.org/NixOS/nixpkgs)
[![Issue Stats](http://www.issuestats.com/github/nixos/nixpkgs/badge/pr?style=flat)](http://www.issuestats.com/github/nixos/nixpkgs)
[![Issue Stats](http://www.issuestats.com/github/nixos/nixpkgs/badge/issue?style=flat)](http://www.issuestats.com/github/nixos/nixpkgs)
[![Code Triagers Badge](https://www.codetriage.com/nixos/nixpkgs/badges/users.svg)](https://www.codetriage.com/nixos/nixpkgs)
Nixpkgs is a collection of packages for the [Nix](https://nixos.org/nix/) package
manager. It is periodically built and tested by the [hydra](http://hydra.nixos.org/)
@ -14,12 +13,12 @@ build daemon as so-called channels. To get channel information via git, add
```
For stability and maximum binary package support, it is recommended to maintain
custom changes on top of one of the channels, e.g. `nixos-16.03` for the latest
custom changes on top of one of the channels, e.g. `nixos-16.09` for the latest
release and `nixos-unstable` for the latest successful build of master:
```
% git remote update channels
% git rebase channels/nixos-16.03
% git rebase channels/nixos-16.09
```
For pull-requests, please rebase onto nixpkgs `master`.
@ -31,11 +30,11 @@ For pull-requests, please rebase onto nixpkgs `master`.
* [Documentation (Nix Expression Language chapter)](https://nixos.org/nix/manual/#ch-expression-language)
* [Manual (How to write packages for Nix)](https://nixos.org/nixpkgs/manual/)
* [Manual (NixOS)](https://nixos.org/nixos/manual/)
* [Nix Wiki](https://nixos.org/wiki/)
* [Nix Wiki](https://nixos.org/wiki/) (deprecated, see milestone ["Move the Wiki!"](https://github.com/NixOS/nixpkgs/issues?q=is%3Aopen+is%3Aissue+milestone%3A%22Move+the+wiki%21%22))
* [Continuous package builds for unstable/master](https://hydra.nixos.org/jobset/nixos/trunk-combined)
* [Continuous package builds for 16.03 release](https://hydra.nixos.org/jobset/nixos/release-16.03)
* [Continuous package builds for 16.09 release](https://hydra.nixos.org/jobset/nixos/release-16.09)
* [Tests for unstable/master](https://hydra.nixos.org/job/nixos/trunk-combined/tested#tabs-constituents)
* [Tests for 16.03 release](https://hydra.nixos.org/job/nixos/release-16.03/tested#tabs-constituents)
* [Tests for 16.09 release](https://hydra.nixos.org/job/nixos/release-16.09/tested#tabs-constituents)
Communication:

View File

@ -6,4 +6,4 @@ if ! builtins ? nixVersion || builtins.compareVersions requiredVersion builtins.
else
import ./pkgs/top-level
import ./pkgs/top-level/impure.nix

View File

@ -1,376 +0,0 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="users-guide-to-the-erlang-infrastructure">
<title>User's Guide to the Beam Infrastructure</title>
<section xml:id="beam-introduction">
<title>Beam Languages (Erlang &amp; Elixir) on Nix</title>
<para>
In this document and related Nix expressions we use the term
<emphasis>Beam</emphasis> to describe the environment. Beam is
the name of the Erlang Virtial Machine and, as far as we know,
from a packaging perspective all languages that run on Beam are
interchangable. The things that do change, like the build
system, are transperant to the users of the package. So we make
no distinction.
</para>
</section>
<section xml:id="build-tools">
<title>Build Tools</title>
<section xml:id="build-tools-rebar3">
<title>Rebar3</title>
<para>
By default Rebar3 wants to manage it's own dependencies. In the
normal non-Nix, this is perfectly acceptable. In the Nix world it
is not. To support this we have created two versions of rebar3,
<literal>rebar3</literal> and <literal>rebar3-open</literal>. The
<literal>rebar3</literal> version has been patched to remove the
ability to download anything from it. If you are not running it a
nix-shell or a nix-build then its probably not going to work for
you. <literal>rebar3-open</literal> is the normal, un-modified
rebar3. It should work exactly as would any other version of
rebar3. Any Erlang package should rely on
<literal>rebar3</literal> and thats really what you should be
using too.
</para>
</section>
<section xml:id="build-tools-other">
<title>Mix &amp; Erlang.mk</title>
<para>
Both Mix and Erlang.mk work exactly as you would expect. There
is a bootstrap process that needs to be run for both of
them. However, that is supported by the
<literal>buildMix</literal> and <literal>buildErlangMk</literal> derivations.
</para>
</section>
</section>
<section xml:id="how-to-install-beam-packages">
<title>How to install Beam packages</title>
<para>
Beam packages are not registered in the top level simply because
they are not relevant to the vast majority of Nix users. They are
installable using the <literal>beamPackages</literal> attribute
set.
You can list the avialable packages in the
<literal>beamPackages</literal> with the following command:
</para>
<programlisting>
$ nix-env -f &quot;&lt;nixpkgs&gt;&quot; -qaP -A beamPackages
beamPackages.esqlite esqlite-0.2.1
beamPackages.goldrush goldrush-0.1.7
beamPackages.ibrowse ibrowse-4.2.2
beamPackages.jiffy jiffy-0.14.5
beamPackages.lager lager-3.0.2
beamPackages.meck meck-0.8.3
beamPackages.rebar3-pc pc-1.1.0
</programlisting>
<para>
To install any of those packages into your profile, refer to them by
their attribute path (first column):
</para>
<programlisting>
$ nix-env -f &quot;&lt;nixpkgs&gt;&quot; -iA beamPackages.ibrowse
</programlisting>
<para>
The attribute path of any Beam packages corresponds to the name
of that particular package in Hex or its OTP Application/Release name.
</para>
</section>
<section xml:id="packaging-beam-applications">
<title>Packaging Beam Applications</title>
<section xml:id="packaging-erlang-applications">
<title>Erlang Applications</title>
<section xml:id="rebar3-packages">
<title>Rebar3 Packages</title>
<para>
There is a Nix functional called
<literal>buildRebar3</literal>. We use this function to make a
derivation that understands how to build the rebar3 project. For
example, the epression we use to build the <link
xlink:href="https://github.com/erlang-nix/hex2nix">hex2nix</link>
project follows.
</para>
<programlisting>
{stdenv, fetchFromGitHub, buildRebar3, ibrowse, jsx, erlware_commons }:
buildRebar3 rec {
name = "hex2nix";
version = "0.0.1";
src = fetchFromGitHub {
owner = "ericbmerritt";
repo = "hex2nix";
rev = "${version}";
sha256 = "1w7xjidz1l5yjmhlplfx7kphmnpvqm67w99hd2m7kdixwdxq0zqg";
};
beamDeps = [ ibrowse jsx erlware_commons ];
}
</programlisting>
<para>
The only visible difference between this derivation and
something like <literal>stdenv.mkDerivation</literal> is that we
have added <literal>erlangDeps</literal> to the derivation. If
you add your Beam dependencies here they will be correctly
handled by the system.
</para>
<para>
If your package needs to compile native code via Rebar's port
compilation mechenism. You should add <literal>compilePort =
true;</literal> to the derivation.
</para>
</section>
<section xml:id="erlang-mk-packages">
<title>Erlang.mk Packages</title>
<para>
Erlang.mk functions almost identically to Rebar. The only real
difference is that <literal>buildErlangMk</literal> is called
instead of <literal>buildRebar3</literal>
</para>
<programlisting>
{ buildErlangMk, fetchHex, cowlib, ranch }:
buildErlangMk {
name = "cowboy";
version = "1.0.4";
src = fetchHex {
pkg = "cowboy";
version = "1.0.4";
sha256 =
"6a0edee96885fae3a8dd0ac1f333538a42e807db638a9453064ccfdaa6b9fdac";
};
beamDeps = [ cowlib ranch ];
meta = {
description = ''Small, fast, modular HTTP server written in
Erlang.'';
license = stdenv.lib.licenses.isc;
homepage = "https://github.com/ninenines/cowboy";
};
}
</programlisting>
</section>
<section xml:id="mix-packages">
<title>Mix Packages</title>
<para>
Mix functions almost identically to Rebar. The only real
difference is that <literal>buildMix</literal> is called
instead of <literal>buildRebar3</literal>
</para>
<programlisting>
{ buildMix, fetchHex, plug, absinthe }:
buildMix {
name = "absinthe_plug";
version = "1.0.0";
src = fetchHex {
pkg = "absinthe_plug";
version = "1.0.0";
sha256 =
"08459823fe1fd4f0325a8bf0c937a4520583a5a26d73b193040ab30a1dfc0b33";
};
beamDeps = [ plug absinthe];
meta = {
description = ''A plug for Absinthe, an experimental GraphQL
toolkit'';
license = stdenv.lib.licenses.bsd3;
homepage = "https://github.com/CargoSense/absinthe_plug";
};
}
</programlisting>
</section>
</section>
</section>
<section xml:id="how-to-develop">
<title>How to develop</title>
<section xml:id="accessing-an-environment">
<title>Accessing an Environment</title>
<para>
Often, all you want to do is be able to access a valid
environment that contains a specific package and its
dependencies. we can do that with the <literal>env</literal>
part of a derivation. For example, lets say we want to access an
erlang repl with ibrowse loaded up. We could do the following.
</para>
<programlisting>
~/w/nixpkgs nix-shell -A beamPackages.ibrowse.env --run "erl"
Erlang/OTP 18 [erts-7.0] [source] [64-bit] [smp:4:4] [async-threads:10] [hipe] [kernel-poll:false]
Eshell V7.0 (abort with ^G)
1> m(ibrowse).
Module: ibrowse
MD5: 3b3e0137d0cbb28070146978a3392945
Compiled: January 10 2016, 23:34
Object file: /nix/store/g1rlf65rdgjs4abbyj4grp37ry7ywivj-ibrowse-4.2.2/lib/erlang/lib/ibrowse-4.2.2/ebin/ibrowse.beam
Compiler options: [{outdir,"/tmp/nix-build-ibrowse-4.2.2.drv-0/hex-source-ibrowse-4.2.2/_build/default/lib/ibrowse/ebin"},
debug_info,debug_info,nowarn_shadow_vars,
warn_unused_import,warn_unused_vars,warnings_as_errors,
{i,"/tmp/nix-build-ibrowse-4.2.2.drv-0/hex-source-ibrowse-4.2.2/_build/default/lib/ibrowse/include"}]
Exports:
add_config/1 send_req_direct/7
all_trace_off/0 set_dest/3
code_change/3 set_max_attempts/3
get_config_value/1 set_max_pipeline_size/3
get_config_value/2 set_max_sessions/3
get_metrics/0 show_dest_status/0
get_metrics/2 show_dest_status/1
handle_call/3 show_dest_status/2
handle_cast/2 spawn_link_worker_process/1
handle_info/2 spawn_link_worker_process/2
init/1 spawn_worker_process/1
module_info/0 spawn_worker_process/2
module_info/1 start/0
rescan_config/0 start_link/0
rescan_config/1 stop/0
send_req/3 stop_worker_process/1
send_req/4 stream_close/1
send_req/5 stream_next/1
send_req/6 terminate/2
send_req_direct/4 trace_off/0
send_req_direct/5 trace_off/2
send_req_direct/6 trace_on/0
trace_on/2
ok
2>
</programlisting>
<para>
Notice the <literal>-A beamPackages.ibrowse.env</literal>.That
is the key to this functionality.
</para>
</section>
<section xml:id="creating-a-shell">
<title>Creating a Shell</title>
<para>
Getting access to an environment often isn't enough to do real
development. Many times we need to create a
<literal>shell.nix</literal> file and do our development inside
of the environment specified by that file. This file looks a lot
like the packageing described above. The main difference is that
<literal>src</literal> points to project root and we call the
package directly.
</para>
<programlisting>
{ pkgs ? import &quot;&lt;nixpkgs&quot;&gt; {} }:
with pkgs;
let
f = { buildRebar3, ibrowse, jsx, erlware_commons }:
buildRebar3 {
name = "hex2nix";
version = "0.1.0";
src = ./.;
erlangDeps = [ ibrowse jsx erlware_commons ];
};
drv = beamPackages.callPackage f {};
in
drv
</programlisting>
<section xml:id="building-in-a-shell">
<title>Building in a shell</title>
<para>
We can leveral the support of the Derivation, regardless of
which build Derivation is called by calling the commands themselv.s
</para>
<programlisting>
# =============================================================================
# Variables
# =============================================================================
NIX_TEMPLATES := "$(CURDIR)/nix-templates"
TARGET := "$(PREFIX)"
PROJECT_NAME := thorndyke
NIXPKGS=../nixpkgs
NIX_PATH=nixpkgs=$(NIXPKGS)
NIX_SHELL=nix-shell -I "$(NIX_PATH)" --pure
# =============================================================================
# Rules
# =============================================================================
.PHONY= all test clean repl shell build test analyze configure install \
test-nix-install publish plt analyze
all: build
guard-%:
@ if [ "${${*}}" == "" ]; then \
echo "Environment variable $* not set"; \
exit 1; \
fi
clean:
rm -rf _build
rm -rf .cache
repl:
$(NIX_SHELL) --run "iex -pa './_build/prod/lib/*/ebin'"
shell:
$(NIX_SHELL)
configure:
$(NIX_SHELL) --command 'eval "$$configurePhase"'
build: configure
$(NIX_SHELL) --command 'eval "$$buildPhase"'
install:
$(NIX_SHELL) --command 'eval "$$installPhase"'
test:
$(NIX_SHELL) --command 'mix test --no-start --no-deps-check'
plt:
$(NIX_SHELL) --run "mix dialyzer.plt --no-deps-check"
analyze: build plt
$(NIX_SHELL) --run "mix dialyzer --no-compile"
</programlisting>
<para>
If you add the <literal>shell.nix</literal> as described and
user rebar as follows things should simply work. Aside from the
<literal>test</literal>, <literal>plt</literal>, and
<literal>analyze</literal> the talks work just fine for all of
the build Derivations.
</para>
</section>
</section>
</section>
<section xml:id="generating-packages-from-hex-with-hex2nix">
<title>Generating Packages from Hex with Hex2Nix</title>
<para>
Updating the Hex packages requires the use of the
<literal>hex2nix</literal> tool. Given the path to the Erlang
modules (usually
<literal>pkgs/development/erlang-modules</literal>). It will
happily dump a file called
<literal>hex-packages.nix</literal>. That file will contain all
the packages that use a recognized build system in Hex. However,
it can't know whether or not all those packages are buildable.
</para>
<para>
To make life easier for our users, it makes good sense to go
ahead and attempt to build all those packages and remove the
ones that don't build. To do that, simply run the command (in
the root of your <literal>nixpkgs</literal> repository). that follows.
</para>
<programlisting>
$ nix-build -A beamPackages
</programlisting>
<para>
That will build every package in
<literal>beamPackages</literal>. Then you can go through and
manually remove the ones that fail. Hopefully, someone will
improve <literal>hex2nix</literal> in the future to automate
that.
</para>
</section>
</chapter>

View File

@ -251,16 +251,13 @@ bound to the variable name <varname>e2fsprogs</varname> in
<listitem><para>The version part of the <literal>name</literal>
attribute <emphasis>must</emphasis> start with a digit (following a
dash) — e.g., <literal>"hello-0.3-pre-r3910"</literal> instead of
<literal>"hello-svn-r3910"</literal>, as the latter would be seen as
a package named <literal>hello-svn</literal> by
<command>nix-env</command>.</para></listitem>
dash) — e.g., <literal>"hello-0.3.1rc2"</literal>.</para></listitem>
<listitem><para>If package is fetched from git's commit then
<listitem><para>If a package is not a release but a commit from a repository, then
the version part of the name <emphasis>must</emphasis> be the date of that
(fetched) commit. The date must be in <literal>"YYYY-MM-DD"</literal> format.
Also add <literal>"git"</literal> to the name - e.g.,
<literal>"pkgname-git-2014-09-23"</literal>.</para></listitem>
Also append <literal>"unstable"</literal> to the name - e.g.,
<literal>"pkgname-unstable-2014-09-23"</literal>.</para></listitem>
<listitem><para>Dashes in the package name should be preserved
in new variable names, rather than converted to underscores
@ -662,4 +659,22 @@ src = fetchFromGitHub {
</itemizedlist>
</para>
</section>
<section xml:id="sec-patches"><title>Patches</title>
<para>Only patches that are unique to <literal>nixpkgs</literal> should be
included in <literal>nixpkgs</literal> source.</para>
<para>Patches available online should be retrieved using
<literal>fetchpatch</literal>.</para>
<para>
<programlisting>
patches = [
(fetchpatch {
name = "fix-check-for-using-shared-freetype-lib.patch";
url = "http://git.ghostscript.com/?p=ghostpdl.git;a=patch;h=8f5d285";
sha256 = "1f0k043rng7f0rfl9hhb89qzvvksqmkrikmm38p61yfx51l325xr";
})
];
</programlisting>
</para>
</section>
</chapter>

View File

@ -46,10 +46,10 @@ $ export NIXPKGS_ALLOW_UNFREE=1
allowUnfreePredicate = (pkg: ...);
</programlisting>
Example to allow flash player only:
Example to allow flash player and visual studio code only:
<programlisting>
allowUnfreePredicate = (pkg: pkgs.lib.hasPrefix "flashplayer-" pkg.name);
allowUnfreePredicate = with builtins; (pkg: elem (parseDrvName pkg.name).name [ "flashplayer" "vscode" ]);
</programlisting>
</para>

View File

@ -1,14 +1,14 @@
with import ./.. { };
with lib;
let
sources = sourceFilesBySuffices ./. [".xml"];
pkgs = import ./.. { };
lib = pkgs.lib;
sources = lib.sourceFilesBySuffices ./. [".xml"];
sources-langs = ./languages-frameworks;
in
stdenv.mkDerivation {
pkgs.stdenv.mkDerivation {
name = "nixpkgs-manual";
buildInputs = [ pandoc libxml2 libxslt ];
buildInputs = with pkgs; [ pandoc libxml2 libxslt zip ];
xsltFlags = ''
--param section.autolabel 1
@ -26,7 +26,8 @@ stdenv.mkDerivation {
extraHeader = ''xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" '';
in ''
{
pandoc '${inputFile}' -w docbook ${optionalString useChapters "--chapters"} \
pandoc '${inputFile}' -w docbook ${lib.optionalString useChapters "--chapters"} \
--smart \
| sed -e 's|<ulink url=|<link xlink:href=|' \
-e 's|</ulink>|</link>|' \
-e 's|<sect. id=|<section xml:id=|' \
@ -52,38 +53,53 @@ stdenv.mkDerivation {
outputFile = "./languages-frameworks/python.xml";
}
+ toDocbook {
inputFile = ./haskell-users-guide.md;
outputFile = "haskell-users-guide.xml";
useChapters = true;
inputFile = ./languages-frameworks/haskell.md;
outputFile = "./languages-frameworks/haskell.xml";
}
+ toDocbook {
inputFile = ./../pkgs/development/idris-modules/README.md;
inputFile = ../pkgs/development/idris-modules/README.md;
outputFile = "languages-frameworks/idris.xml";
}
+ toDocbook {
inputFile = ./../pkgs/development/r-modules/README.md;
inputFile = ../pkgs/development/node-packages/README.md;
outputFile = "languages-frameworks/node.xml";
}
+ toDocbook {
inputFile = ../pkgs/development/r-modules/README.md;
outputFile = "languages-frameworks/r.xml";
}
+ ''
echo ${nixpkgsVersion} > .version
echo ${lib.nixpkgsVersion} > .version
# validate against relaxng schema
xmllint --nonet --xinclude --noxincludenode manual.xml --output manual-full.xml
${jing}/bin/jing ${docbook5}/xml/rng/docbook/docbook.rng manual-full.xml
${pkgs.jing}/bin/jing ${pkgs.docbook5}/xml/rng/docbook/docbook.rng manual-full.xml
dst=$out/share/doc/nixpkgs
mkdir -p $dst
xsltproc $xsltFlags --nonet --xinclude \
--output $dst/manual.html \
${docbook5_xsl}/xml/xsl/docbook/xhtml/docbook.xsl \
${pkgs.docbook5_xsl}/xml/xsl/docbook/xhtml/docbook.xsl \
./manual.xml
cp ${./style.css} $dst/style.css
mkdir -p $dst/images/callouts
cp "${docbook5_xsl}/xml/xsl/docbook/images/callouts/"*.gif $dst/images/callouts/
cp "${pkgs.docbook5_xsl}/xml/xsl/docbook/images/callouts/"*.gif $dst/images/callouts/
mkdir -p $out/nix-support
echo "doc manual $dst manual.html" >> $out/nix-support/hydra-build-products
xsltproc $xsltFlags --nonet --xinclude \
--output $dst/epub/ \
${pkgs.docbook5_xsl}/xml/xsl/docbook/epub/docbook.xsl \
./manual.xml
cp -r $dst/images $dst/epub/OEBPS
echo "application/epub+zip" > mimetype
manual="$dst/nixpkgs-manual.epub"
zip -0Xq "$manual" mimetype
cd $dst/epub && zip -Xr9D "$manual" *
rm -rf $dst/epub
'';
}

View File

@ -89,20 +89,27 @@ in ...</programlisting>
<title>&lt;pkg&gt;.overrideDerivation</title>
<warning>
<para>Do not use this function in Nixpkgs. Because it breaks
package abstraction and doesnt provide error checking for
function arguments, it is only intended for ad-hoc customisation
(such as in <filename>~/.nixpkgs/config.nix</filename>).</para>
<para>Do not use this function in Nixpkgs as it evaluates a Derivation
before modifying it, which breaks package abstraction and removes
error-checking of function arguments. In addition, this
evaluation-per-function application incurs a performance penalty,
which can become a problem if many overrides are used.
It is only intended for ad-hoc customisation, such as in
<filename>~/.nixpkgs/config.nix</filename>.
</para>
</warning>
<para>
The function <varname>overrideDerivation</varname> is usually available for all the
derivations in the nixpkgs expression (<varname>pkgs</varname>).
The function <varname>overrideDerivation</varname> creates a new derivation
based on an existing one by overriding the original's attributes with
the attribute set produced by the specified function.
This function is available on all
derivations defined using the <varname>makeOverridable</varname> function.
Most standard derivation-producing functions, such as
<varname>stdenv.mkDerivation</varname>, are defined using this
function, which means most packages in the nixpkgs expression,
<varname>pkgs</varname>, have this function.
</para>
<para>
It is used to create a new derivation by overriding the attributes of
the original derivation according to the given function.
</para>
<para>
Example usage:
@ -118,9 +125,9 @@ in ...</programlisting>
</para>
<para>
In the above example, the name, src and patches of the derivation
will be overridden, while all other attributes will be retained from the
original derivation.
In the above example, the <varname>name</varname>, <varname>src</varname>,
and <varname>patches</varname> of the derivation will be overridden, while
all other attributes will be retained from the original derivation.
</para>
<para>
@ -128,6 +135,20 @@ in ...</programlisting>
the original derivation.
</para>
<note>
<para>
A package's attributes are evaluated *before* being modified by
the <varname>overrideDerivation</varname> function.
For example, the <varname>name</varname> attribute reference
in <varname>url = "mirror://gnu/hello/${name}.tar.gz";</varname>
is filled-in *before* the <varname>overrideDerivation</varname> function
modifies the attribute set. This means that overriding the
<varname>name</varname> attribute, in this example, *will not* change the
value of the <varname>url</varname> attribute. Instead, we need to override
both the <varname>name</varname> *and* <varname>url</varname> attributes.
</para>
</note>
</section>
<section xml:id="sec-lib-makeOverridable">
@ -164,42 +185,18 @@ c = lib.makeOverridable f { a = 1; b = 2; }</programlisting>
<section xml:id="sec-fhs-environments">
<title>buildFHSChrootEnv/buildFHSUserEnv</title>
<title>buildFHSUserEnv</title>
<para>
<function>buildFHSChrootEnv</function> and
<function>buildFHSUserEnv</function> provide a way to build and run
FHS-compatible lightweight sandboxes. They get their own isolated root with
binded <filename>/nix/store</filename>, so their footprint in terms of disk
<function>buildFHSUserEnv</function> provides a way to build and run
FHS-compatible lightweight sandboxes. It creates an isolated root with
bound <filename>/nix/store</filename>, so its footprint in terms of disk
space needed is quite small. This allows one to run software which is hard or
unfeasible to patch for NixOS -- 3rd-party source trees with FHS assumptions,
games distributed as tarballs, software with integrity checking and/or external
self-updated binaries.
</para>
<para>
<function>buildFHSChrootEnv</function> allows to create persistent
environments, which can be constructed, deconstructed and entered by
multiple users at once. A downside is that it requires
<literal>root</literal> access for both those who create and destroy and
those who enter it. It can be useful to create environments for daemons that
one can enter and observe.
</para>
<para>
<function>buildFHSUserEnv</function> uses Linux namespaces feature to create
self-updated binaries. It uses Linux namespaces feature to create
temporary lightweight environments which are destroyed after all child
processes exit. It does not require root access, and can be useful to create
sandboxes and wrap applications.
</para>
<para>
Those functions both rely on <function>buildFHSEnv</function>, which creates
an actual directory structure given a list of necessary packages and extra
build commands.
<function>buildFHSChrootEnv</function> and <function>buildFHSUserEnv</function>
both accept those arguments which are passed to
<function>buildFHSEnv</function>:
processes exit, without root user rights requirement. Accepted arguments are:
</para>
<variablelist>
@ -213,14 +210,16 @@ c = lib.makeOverridable f { a = 1; b = 2; }</programlisting>
<term><literal>targetPkgs</literal></term>
<listitem><para>Packages to be installed for the main host's architecture
(i.e. x86_64 on x86_64 installations).</para></listitem>
(i.e. x86_64 on x86_64 installations). Along with libraries binaries are also
installed.</para></listitem>
</varlistentry>
<varlistentry>
<term><literal>multiPkgs</literal></term>
<listitem><para>Packages to be installed for all architectures supported by
a host (i.e. i686 and x86_64 on x86_64 installations).</para></listitem>
a host (i.e. i686 and x86_64 on x86_64 installations). Only libraries are
installed by default.</para></listitem>
</varlistentry>
<varlistentry>
@ -233,29 +232,33 @@ c = lib.makeOverridable f { a = 1; b = 2; }</programlisting>
<varlistentry>
<term><literal>extraBuildCommandsMulti</literal></term>
<listitem><para>Like <literal>extraBuildCommandsMulti</literal>, but
<listitem><para>Like <literal>extraBuildCommands</literal>, but
executed only on multilib architectures.</para></listitem>
</varlistentry>
<varlistentry>
<term><literal>extraOutputsToInstall</literal></term>
<listitem><para>Additional derivation outputs to be linked for both
target and multi-architecture packages.</para></listitem>
</varlistentry>
<varlistentry>
<term><literal>extraInstallCommands</literal></term>
<listitem><para>Additional commands to be executed for finalizing the
derivation with runner script.</para></listitem>
</varlistentry>
<varlistentry>
<term><literal>runScript</literal></term>
<listitem><para>A command that would be executed inside the sandbox and
passed all the command line arguments. It defaults to
<literal>bash</literal>.</para></listitem>
</varlistentry>
</variablelist>
<para>
Additionally, <function>buildFHSUserEnv</function> accepts
<literal>runScript</literal> parameter, which is a command that would be
executed inside the sandbox and passed all the command line arguments. It
default to <literal>bash</literal>.
</para>
<para>
It also uses <literal>CHROOTENV_EXTRA_BINDS</literal> environment variable
for binding extra directories in the sandbox to outside places. The format of
the variable is <literal>/mnt=test-mnt:/data</literal>, where
<literal>/mnt</literal> would be mounted as <literal>/test-mnt</literal>
and <literal>/data</literal> would be mounted as <literal>/data</literal>.
<literal>extraBindMounts</literal> array argument to
<function>buildFHSUserEnv</function> function is prepended to this variable.
Latter entries take priority if defined several times -- i.e. in case of
<literal>/data=data1:/data=data2</literal> the actual bind path would be
<literal>/data2</literal>.
</para>
<para>
One can create a simple environment using a <literal>shell.nix</literal>
like that:

View File

@ -1,808 +0,0 @@
---
title: User's Guide for Haskell in Nixpkgs
author: Peter Simons
date: 2015-06-01
---
# User's Guide to the Haskell Infrastructure
## How to install Haskell packages
Nixpkgs distributes build instructions for all Haskell packages registered on
[Hackage](http://hackage.haskell.org/), but strangely enough normal Nix package
lookups don't seem to discover any of them, except for the default version of ghc, cabal-install, and stack:
$ nix-env -i alex
error: selector alex matches no derivations
$ nix-env -qa ghc
ghc-7.10.2
The Haskell package set is not registered in the top-level namespace because it
is *huge*. If all Haskell packages were visible to these commands, then
name-based search/install operations would be much slower than they are now. We
avoided that by keeping all Haskell-related packages in a separate attribute
set called `haskellPackages`, which the following command will list:
$ nix-env -f "<nixpkgs>" -qaP -A haskellPackages
haskellPackages.a50 a50-0.5
haskellPackages.abacate haskell-abacate-0.0.0.0
haskellPackages.abcBridge haskell-abcBridge-0.12
haskellPackages.afv afv-0.1.1
haskellPackages.alex alex-3.1.4
haskellPackages.Allure Allure-0.4.101.1
haskellPackages.alms alms-0.6.7
[... some 8000 entries omitted ...]
To install any of those packages into your profile, refer to them by their
attribute path (first column):
$ nix-env -f "<nixpkgs>" -iA haskellPackages.Allure ...
The attribute path of any Haskell packages corresponds to the name of that
particular package on Hackage: the package `cabal-install` has the attribute
`haskellPackages.cabal-install`, and so on. (Actually, this convention causes
trouble with packages like `3dmodels` and `4Blocks`, because these names are
invalid identifiers in the Nix language. The issue of how to deal with these
rare corner cases is currently unresolved.)
Haskell packages who's Nix name (second column) begins with a `haskell-` prefix
are packages that provide a library whereas packages without that prefix
provide just executables. Libraries may provide executables too, though: the
package `haskell-pandoc`, for example, installs both a library and an
application. You can install and use Haskell executables just like any other
program in Nixpkgs, but using Haskell libraries for development is a bit
trickier and we'll address that subject in great detail in section [How to
create a development environment].
Attribute paths are deterministic inside of Nixpkgs, but the path necessary to
reach Nixpkgs varies from system to system. We dodged that problem by giving
`nix-env` an explicit `-f "<nixpkgs>"` parameter, but if you call `nix-env`
without that flag, then chances are the invocation fails:
$ nix-env -iA haskellPackages.cabal-install
error: attribute haskellPackages in selection path
haskellPackages.cabal-install not found
On NixOS, for example, Nixpkgs does *not* exist in the top-level namespace by
default. To figure out the proper attribute path, it's easiest to query for the
path of a well-known Nixpkgs package, i.e.:
$ nix-env -qaP coreutils
nixos.coreutils coreutils-8.23
If your system responds like that (most NixOS installations will), then the
attribute path to `haskellPackages` is `nixos.haskellPackages`. Thus, if you
want to use `nix-env` without giving an explicit `-f` flag, then that's the way
to do it:
$ nix-env -qaP -A nixos.haskellPackages
$ nix-env -iA nixos.haskellPackages.cabal-install
Our current default compiler is GHC 7.10.x and the `haskellPackages` set
contains packages built with that particular version. Nixpkgs contains the
latest major release of every GHC since 6.10.4, however, and there is a whole
family of package sets available that defines Hackage packages built with each
of those compilers, too:
$ nix-env -f "<nixpkgs>" -qaP -A haskell.packages.ghc6123
$ nix-env -f "<nixpkgs>" -qaP -A haskell.packages.ghc763
The name `haskellPackages` is really just a synonym for
`haskell.packages.ghc7102`, because we prefer that package set internally and
recommend it to our users as their default choice, but ultimately you are free
to compile your Haskell packages with any GHC version you please. The following
command displays the complete list of available compilers:
$ nix-env -f "<nixpkgs>" -qaP -A haskell.compiler
haskell.compiler.ghc6104 ghc-6.10.4
haskell.compiler.ghc6123 ghc-6.12.3
haskell.compiler.ghc704 ghc-7.0.4
haskell.compiler.ghc722 ghc-7.2.2
haskell.compiler.ghc742 ghc-7.4.2
haskell.compiler.ghc763 ghc-7.6.3
haskell.compiler.ghc784 ghc-7.8.4
haskell.compiler.ghc7102 ghc-7.10.2
haskell.compiler.ghcHEAD ghc-7.11.20150402
haskell.compiler.ghcNokinds ghc-nokinds-7.11.20150704
haskell.compiler.ghcjs ghcjs-0.1.0
haskell.compiler.jhc jhc-0.8.2
haskell.compiler.uhc uhc-1.1.9.0
We have no package sets for `jhc` or `uhc` yet, unfortunately, but for every
version of GHC listed above, there exists a package set based on that compiler.
Also, the attributes `haskell.compiler.ghcXYC` and
`haskell.packages.ghcXYC.ghc` are synonymous for the sake of convenience.
## How to create a development environment
### How to install a compiler
A simple development environment consists of a Haskell compiler and one or both
of the tools `cabal-install` and `stack`. We saw in section
[How to install Haskell packages] how you can install those programs into your
user profile:
$ nix-env -f "<nixpkgs>" -iA haskellPackages.ghc haskellPackages.cabal-install
Instead of the default package set `haskellPackages`, you can also use the more
precise name `haskell.compiler.ghc7102`, which has the advantage that it refers
to the same GHC version regardless of what Nixpkgs considers "default" at any
given time.
Once you've made those tools available in `$PATH`, it's possible to build
Hackage packages the same way people without access to Nix do it all the time:
$ cabal get lens-4.11 && cd lens-4.11
$ cabal install -j --dependencies-only
$ cabal configure
$ cabal build
If you enjoy working with Cabal sandboxes, then that's entirely possible too:
just execute the command
$ cabal sandbox init
before installing the required dependencies.
The `nix-shell` utility makes it easy to switch to a different compiler
version; just enter the Nix shell environment with the command
$ nix-shell -p haskell.compiler.ghc784
to bring GHC 7.8.4 into `$PATH`. Alternatively, you can use Stack instead of
`nix-shell` directly to select compiler versions and other build tools
per-project. It uses `nix-shell` under the hood when Nix support is turned on.
See [How to build a Haskell project using Stack].
If you're using `cabal-install`, re-running `cabal configure` inside the spawned
shell switches your build to use that compiler instead. If you're working on
a project that doesn't depend on any additional system libraries outside of GHC,
then it's even sufficient to just run the `cabal configure` command inside of
the shell:
$ nix-shell -p haskell.compiler.ghc784 --command "cabal configure"
Afterwards, all other commands like `cabal build` work just fine in any shell
environment, because the configure phase recorded the absolute paths to all
required tools like GHC in its build configuration inside of the `dist/`
directory. Please note, however, that `nix-collect-garbage` can break such an
environment because the Nix store paths created by `nix-shell` aren't "alive"
anymore once `nix-shell` has terminated. If you find that your Haskell builds
no longer work after garbage collection, then you'll have to re-run `cabal
configure` inside of a new `nix-shell` environment.
### How to install a compiler with libraries
GHC expects to find all installed libraries inside of its own `lib` directory.
This approach works fine on traditional Unix systems, but it doesn't work for
Nix, because GHC's store path is immutable once it's built. We cannot install
additional libraries into that location. As a consequence, our copies of GHC
don't know any packages except their own core libraries, like `base`,
`containers`, `Cabal`, etc.
We can register additional libraries to GHC, however, using a special build
function called `ghcWithPackages`. That function expects one argument: a
function that maps from an attribute set of Haskell packages to a list of
packages, which determines the libraries known to that particular version of
GHC. For example, the Nix expression `ghcWithPackages (pkgs: [pkgs.mtl])`
generates a copy of GHC that has the `mtl` library registered in addition to
its normal core packages:
$ nix-shell -p "haskellPackages.ghcWithPackages (pkgs: [pkgs.mtl])"
[nix-shell:~]$ ghc-pkg list mtl
/nix/store/zy79...-ghc-7.10.2/lib/ghc-7.10.2/package.conf.d:
mtl-2.2.1
This function allows users to define their own development environment by means
of an override. After adding the following snippet to `~/.nixpkgs/config.nix`,
{
packageOverrides = super: let self = super.pkgs; in
{
myHaskellEnv = self.haskell.packages.ghc7102.ghcWithPackages
(haskellPackages: with haskellPackages; [
# libraries
arrows async cgi criterion
# tools
cabal-install haskintex
]);
};
}
it's possible to install that compiler with `nix-env -f "<nixpkgs>" -iA
myHaskellEnv`. If you'd like to switch that development environment to a
different version of GHC, just replace the `ghc7102` bit in the previous
definition with the appropriate name. Of course, it's also possible to define
any number of these development environments! (You can't install two of them
into the same profile at the same time, though, because that would result in
file conflicts.)
The generated `ghc` program is a wrapper script that re-directs the real
GHC executable to use a new `lib` directory --- one that we specifically
constructed to contain all those packages the user requested:
$ cat $(type -p ghc)
#! /nix/store/xlxj...-bash-4.3-p33/bin/bash -e
export NIX_GHC=/nix/store/19sm...-ghc-7.10.2/bin/ghc
export NIX_GHCPKG=/nix/store/19sm...-ghc-7.10.2/bin/ghc-pkg
export NIX_GHC_DOCDIR=/nix/store/19sm...-ghc-7.10.2/share/doc/ghc/html
export NIX_GHC_LIBDIR=/nix/store/19sm...-ghc-7.10.2/lib/ghc-7.10.2
exec /nix/store/j50p...-ghc-7.10.2/bin/ghc "-B$NIX_GHC_LIBDIR" "$@"
The variables `$NIX_GHC`, `$NIX_GHCPKG`, etc. point to the *new* store path
`ghcWithPackages` constructed specifically for this environment. The last line
of the wrapper script then executes the real `ghc`, but passes the path to the
new `lib` directory using GHC's `-B` flag.
The purpose of those environment variables is to work around an impurity in the
popular [ghc-paths](http://hackage.haskell.org/package/ghc-paths) library. That
library promises to give its users access to GHC's installation paths. Only,
the library can't possible know that path when it's compiled, because the path
GHC considers its own is determined only much later, when the user configures
it through `ghcWithPackages`. So we [patched
ghc-paths](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/haskell-modules/patches/ghc-paths-nix.patch)
to return the paths found in those environment variables at run-time rather
than trying to guess them at compile-time.
To make sure that mechanism works properly all the time, we recommend that you
set those variables to meaningful values in your shell environment, too, i.e.
by adding the following code to your `~/.bashrc`:
if type >/dev/null 2>&1 -p ghc; then
eval "$(egrep ^export "$(type -p ghc)")"
fi
If you are certain that you'll use only one GHC environment which is located in
your user profile, then you can use the following code, too, which has the
advantage that it doesn't contain any paths from the Nix store, i.e. those
settings always remain valid even if a `nix-env -u` operation updates the GHC
environment in your profile:
if [ -e ~/.nix-profile/bin/ghc ]; then
export NIX_GHC="$HOME/.nix-profile/bin/ghc"
export NIX_GHCPKG="$HOME/.nix-profile/bin/ghc-pkg"
export NIX_GHC_DOCDIR="$HOME/.nix-profile/share/doc/ghc/html"
export NIX_GHC_LIBDIR="$HOME/.nix-profile/lib/ghc-$($NIX_GHC --numeric-version)"
fi
### How to install a compiler with libraries, hoogle and documentation indexes
If you plan to use your environment for interactive programming, not just
compiling random Haskell code, you might want to replace `ghcWithPackages` in
all the listings above with `ghcWithHoogle`.
This environment generator not only produces an environment with GHC and all
the specified libraries, but also generates a `hoogle` and `haddock` indexes
for all the packages, and provides a wrapper script around `hoogle` binary that
uses all those things. A precise name for this thing would be
"`ghcWithPackagesAndHoogleAndDocumentationIndexes`", which is, regrettably, too
long and scary.
For example, installing the following environment
{
packageOverrides = super: let self = super.pkgs; in
{
myHaskellEnv = self.haskellPackages.ghcWithHoogle
(haskellPackages: with haskellPackages; [
# libraries
arrows async cgi criterion
# tools
cabal-install haskintex
]);
};
}
allows one to browse module documentation index [not too dissimilar to
this](https://downloads.haskell.org/~ghc/latest/docs/html/libraries/index.html)
for all the specified packages and their dependencies by directing a browser of
choice to `~/.nix-profiles/share/doc/hoogle/index.html` (or
`/run/current-system/sw/share/doc/hoogle/index.html` in case you put it in
`environment.systemPackages` in NixOS).
After you've marveled enough at that try adding the following to your
`~/.ghc/ghci.conf`
:def hoogle \s -> return $ ":! hoogle search -cl --count=15 \"" ++ s ++ "\""
:def doc \s -> return $ ":! hoogle search -cl --info \"" ++ s ++ "\""
and test it by typing into `ghci`:
:hoogle a -> a
:doc a -> a
Be sure to note the links to `haddock` files in the output. With any modern and
properly configured terminal emulator you can just click those links to
navigate there.
Finally, you can run
hoogle server -p 8080
and navigate to http://localhost:8080/ for your own local
[Hoogle](https://www.haskell.org/hoogle/). Note, however, that Firefox and
possibly other browsers disallow navigation from `http:` to `file:` URIs for
security reasons, which might be quite an inconvenience. See [this
page](http://kb.mozillazine.org/Links_to_local_pages_do_not_work) for
workarounds.
### How to build a Haskell project using Stack
[Stack][http://haskellstack.org] is a popular build tool for Haskell projects.
It has first-class support for Nix. Stack can optionally use Nix to
automatically select the right version of GHC and other build tools to build,
test and execute apps in an existing project downloaded from somewhere on the
Internet. Pass the `--nix` flag to any `stack` command to do so, e.g.
$ git clone --recursive http://github.com/yesodweb/wai
$ cd wai
$ stack --nix build
If you want `stack` to use Nix by default, you can add a `nix` section to the
`stack.yaml` file, as explained in the [Stack documentation][stack-nix-doc]. For
example:
nix:
enable: true
packages: [pkgconfig zeromq zlib]
The example configuration snippet above tells Stack to create an ad hoc
environment for `nix-shell` as in the below section, in which the `pkgconfig`,
`zeromq` and `zlib` packages from Nixpkgs are available. All `stack` commands
will implicitly be executed inside this ad hoc environment.
Some projects have more sophisticated needs. For examples, some ad hoc
environments might need to expose Nixpkgs packages compiled in a certain way, or
with extra environment variables. In these cases, you'll need a `shell` field
instead of `packages`:
nix:
enable: true
shell-file: shell.nix
For more on how to write a `shell.nix` file see the below section. You'll need
to express a derivation. Note that Nixpkgs ships with a convenience wrapper
function around `mkDerivation` called `haskell.lib.buildStackProject` to help you
create this derivation in exactly the way Stack expects. All of the same inputs
as `mkDerivation` can be provided. For example, to build a Stack project that
including packages that link against a version of the R library compiled with
special options turned on:
with (import <nixpkgs> { });
let R = pkgs.R.override { enableStrictBarrier = true; };
in
haskell.lib.buildStackProject {
name = "HaskellR";
buildInputs = [ R zeromq zlib ];
}
[stack-nix-doc]: http://docs.haskellstack.org/en/stable/nix_integration.html
### How to create ad hoc environments for `nix-shell`
The easiest way to create an ad hoc development environment is to run
`nix-shell` with the appropriate GHC environment given on the command-line:
nix-shell -p "haskellPackages.ghcWithPackages (pkgs: with pkgs; [mtl pandoc])"
For more sophisticated use-cases, however, it's more convenient to save the
desired configuration in a file called `shell.nix` that looks like this:
{ nixpkgs ? import <nixpkgs> {}, compiler ? "ghc7102" }:
let
inherit (nixpkgs) pkgs;
ghc = pkgs.haskell.packages.${compiler}.ghcWithPackages (ps: with ps; [
monad-par mtl
]);
in
pkgs.stdenv.mkDerivation {
name = "my-haskell-env-0";
buildInputs = [ ghc ];
shellHook = "eval $(egrep ^export ${ghc}/bin/ghc)";
}
Now run `nix-shell` --- or even `nix-shell --pure` --- to enter a shell
environment that has the appropriate compiler in `$PATH`. If you use `--pure`,
then add all other packages that your development environment needs into the
`buildInputs` attribute. If you'd like to switch to a different compiler
version, then pass an appropriate `compiler` argument to the expression, i.e.
`nix-shell --argstr compiler ghc784`.
If you need such an environment because you'd like to compile a Hackage package
outside of Nix --- i.e. because you're hacking on the latest version from Git
---, then the package set provides suitable nix-shell environments for you
already! Every Haskell package has an `env` attribute that provides a shell
environment suitable for compiling that particular package. If you'd like to
hack the `lens` library, for example, then you just have to check out the
source code and enter the appropriate environment:
$ cabal get lens-4.11 && cd lens-4.11
Downloading lens-4.11...
Unpacking to lens-4.11/
$ nix-shell "<nixpkgs>" -A haskellPackages.lens.env
[nix-shell:/tmp/lens-4.11]$
At point, you can run `cabal configure`, `cabal build`, and all the other
development commands. Note that you need `cabal-install` installed in your
`$PATH` already to use it here --- the `nix-shell` environment does not provide
it.
## How to create Nix builds for your own private Haskell packages
If your own Haskell packages have build instructions for Cabal, then you can
convert those automatically into build instructions for Nix using the
`cabal2nix` utility, which you can install into your profile by running
`nix-env -i cabal2nix`.
### How to build a stand-alone project
For example, let's assume that you're working on a private project called
`foo`. To generate a Nix build expression for it, change into the project's
top-level directory and run the command:
$ cabal2nix . >foo.nix
Then write the following snippet into a file called `default.nix`:
{ nixpkgs ? import <nixpkgs> {}, compiler ? "ghc7102" }:
nixpkgs.pkgs.haskell.packages.${compiler}.callPackage ./foo.nix { }
Finally, store the following code in a file called `shell.nix`:
{ nixpkgs ? import <nixpkgs> {}, compiler ? "ghc7102" }:
(import ./default.nix { inherit nixpkgs compiler; }).env
At this point, you can run `nix-build` to have Nix compile your project and
install it into a Nix store path. The local directory will contain a symlink
called `result` after `nix-build` returns that points into that location. Of
course, passing the flag `--argstr compiler ghc763` allows switching the build
to any version of GHC currently supported.
Furthermore, you can call `nix-shell` to enter an interactive development
environment in which you can use `cabal configure` and `cabal build` to develop
your code. That environment will automatically contain a proper GHC derivation
with all the required libraries registered as well as all the system-level
libraries your package might need.
If your package does not depend on any system-level libraries, then it's
sufficient to run
$ nix-shell --command "cabal configure"
once to set up your build. `cabal-install` determines the absolute paths to all
resources required for the build and writes them into a config file in the
`dist/` directory. Once that's done, you can run `cabal build` and any other
command for that project even outside of the `nix-shell` environment. This
feature is particularly nice for those of us who like to edit their code with
an IDE, like Emacs' `haskell-mode`, because it's not necessary to start Emacs
inside of nix-shell just to make it find out the necessary settings for
building the project; `cabal-install` has already done that for us.
If you want to do some quick-and-dirty hacking and don't want to bother setting
up a `default.nix` and `shell.nix` file manually, then you can use the
`--shell` flag offered by `cabal2nix` to have it generate a stand-alone
`nix-shell` environment for you. With that feature, running
$ cabal2nix --shell . >shell.nix
$ nix-shell --command "cabal configure"
is usually enough to set up a build environment for any given Haskell package.
You can even use that generated file to run `nix-build`, too:
$ nix-build shell.nix
### How to build projects that depend on each other
If you have multiple private Haskell packages that depend on each other, then
you'll have to register those packages in the Nixpkgs set to make them visible
for the dependency resolution performed by `callPackage`. First of all, change
into each of your projects top-level directories and generate a `default.nix`
file with `cabal2nix`:
$ cd ~/src/foo && cabal2nix . >default.nix
$ cd ~/src/bar && cabal2nix . >default.nix
Then edit your `~/.nixpkgs/config.nix` file to register those builds in the
default Haskell package set:
{
packageOverrides = super: let self = super.pkgs; in
{
haskellPackages = super.haskellPackages.override {
overrides = self: super: {
foo = self.callPackage ../src/foo {};
bar = self.callPackage ../src/bar {};
};
};
};
}
Once that's accomplished, `nix-env -f "<nixpkgs>" -qA haskellPackages` will
show your packages like any other package from Hackage, and you can build them
$ nix-build "<nixpkgs>" -A haskellPackages.foo
or enter an interactive shell environment suitable for building them:
$ nix-shell "<nixpkgs>" -A haskellPackages.bar.env
## Miscellaneous Topics
### How to build with profiling enabled
Every Haskell package set takes a function called `overrides` that you can use
to manipulate the package as much as you please. One useful application of this
feature is to replace the default `mkDerivation` function with one that enables
library profiling for all packages. To accomplish that, add configure the
following snippet in your `~/.nixpkgs/config.nix` file:
{
packageOverrides = super: let self = super.pkgs; in
{
profiledHaskellPackages = self.haskellPackages.override {
overrides = self: super: {
mkDerivation = args: super.mkDerivation (args // {
enableLibraryProfiling = true;
});
};
};
};
}
Then, replace instances of `haskellPackages` in the `cabal2nix`-generated
`default.nix` or `shell.nix` files with `profiledHaskellPackages`.
### How to override package versions in a compiler-specific package set
Nixpkgs provides the latest version of
[`ghc-events`](http://hackage.haskell.org/package/ghc-events), which is 0.4.4.0
at the time of this writing. This is fine for users of GHC 7.10.x, but GHC
7.8.4 cannot compile that binary. Now, one way to solve that problem is to
register an older version of `ghc-events` in the 7.8.x-specific package set.
The first step is to generate Nix build instructions with `cabal2nix`:
$ cabal2nix cabal://ghc-events-0.4.3.0 >~/.nixpkgs/ghc-events-0.4.3.0.nix
Then add the override in `~/.nixpkgs/config.nix`:
{
packageOverrides = super: let self = super.pkgs; in
{
haskell = super.haskell // {
packages = super.haskell.packages // {
ghc784 = super.haskell.packages.ghc784.override {
overrides = self: super: {
ghc-events = self.callPackage ./ghc-events-0.4.3.0.nix {};
};
};
};
};
};
}
This code is a little crazy, no doubt, but it's necessary because the intuitive
version
haskell.packages.ghc784 = super.haskell.packages.ghc784.override {
overrides = self: super: {
ghc-events = self.callPackage ./ghc-events-0.4.3.0.nix {};
};
};
doesn't do what we want it to: that code replaces the `haskell` package set in
Nixpkgs with one that contains only one entry,`packages`, which contains only
one entry `ghc784`. This override loses the `haskell.compiler` set, and it
loses the `haskell.packages.ghcXYZ` sets for all compilers but GHC 7.8.4. To
avoid that problem, we have to perform the convoluted little dance from above,
iterating over each step in hierarchy.
Once it's accomplished, however, we can install a variant of `ghc-events`
that's compiled with GHC 7.8.4:
nix-env -f "<nixpkgs>" -iA haskell.packages.ghc784.ghc-events
Unfortunately, it turns out that this build fails again while executing the
test suite! Apparently, the release archive on Hackage is missing some data
files that the test suite requires, so we cannot run it. We accomplish that by
re-generating the Nix expression with the `--no-check` flag:
$ cabal2nix --no-check cabal://ghc-events-0.4.3.0 >~/.nixpkgs/ghc-events-0.4.3.0.nix
Now the builds succeeds.
Of course, in the concrete example of `ghc-events` this whole exercise is not
an ideal solution, because `ghc-events` can analyze the output emitted by any
version of GHC later than 6.12 regardless of the compiler version that was used
to build the `ghc-events' executable, so strictly speaking there's no reason to
prefer one built with GHC 7.8.x in the first place. However, for users who
cannot use GHC 7.10.x at all for some reason, the approach of downgrading to an
older version might be useful.
### How to recover from GHC's infamous non-deterministic library ID bug
GHC and distributed build farms don't get along well:
https://ghc.haskell.org/trac/ghc/ticket/4012
When you see an error like this one
package foo-0.7.1.0 is broken due to missing package
text-1.2.0.4-98506efb1b9ada233bb5c2b2db516d91
then you have to download and re-install `foo` and all its dependents from
scratch:
# nix-store -q --referrers /nix/store/*-haskell-text-1.2.0.4 \
| xargs -L 1 nix-store --repair-path --option binary-caches http://hydra.nixos.org
If you're using additional Hydra servers other than `hydra.nixos.org`, then it
might be necessary to purge the local caches that store data from those
machines to disable these binary channels for the duration of the previous
command, i.e. by running:
rm /nix/var/nix/binary-cache-v3.sqlite
rm /nix/var/nix/manifests/*
rm /nix/var/nix/channel-cache/*
### How to use the Haste Haskell-to-Javascript transpiler
Open a shell with `haste-compiler` and `haste-cabal-install` (you don't actually need
`node`, but it can be useful to test stuff):
$ nix-shell -p "haskellPackages.ghcWithPackages (self: with self; [haste-cabal-install haste-compiler])" -p nodejs
You may not need the following step but if `haste-boot` fails to compile all the
packages it needs, this might do the trick
$ haste-cabal update
`haste-boot` builds a set of core libraries so that they can be used from Javascript
transpiled programs:
$ haste-boot
Transpile and run a "Hello world" program:
$ echo 'module Main where main = putStrLn "Hello world"' > hello-world.hs
$ hastec --onexec hello-world.hs
$ node hello-world.js
Hello world
### Builds on Darwin fail with `math.h` not found
Users of GHC on Darwin have occasionally reported that builds fail, because the
compiler complains about a missing include file:
fatal error: 'math.h' file not found
The issue has been discussed at length in [ticket
6390](https://github.com/NixOS/nixpkgs/issues/6390), and so far no good
solution has been proposed. As a work-around, users who run into this problem
can configure the environment variables
export NIX_CFLAGS_COMPILE="-idirafter /usr/include"
export NIX_CFLAGS_LINK="-L/usr/lib"
in their `~/.bashrc` file to avoid the compiler error.
### Builds using Stack complain about missing system libraries
-- While building package zlib-0.5.4.2 using:
runhaskell -package=Cabal-1.22.4.0 -clear-package-db [... lots of flags ...]
Process exited with code: ExitFailure 1
Logs have been written to: /home/foo/src/stack-ide/.stack-work/logs/zlib-0.5.4.2.log
Configuring zlib-0.5.4.2...
Setup.hs: Missing dependency on a foreign library:
* Missing (or bad) header file: zlib.h
This problem can usually be solved by installing the system package that
provides this library (you may need the "-dev" version). If the library is
already installed but in a non-standard location then you can use the flags
--extra-include-dirs= and --extra-lib-dirs= to specify where it is.
If the header file does exist, it may contain errors that are caught by the C
compiler at the preprocessing stage. In this case you can re-run configure
with the verbosity flag -v3 to see the error messages.
When you run the build inside of the nix-shell environment, the system
is configured to find libz.so without any special flags -- the compiler
and linker "just know" how to find it. Consequently, Cabal won't record
any search paths for libz.so in the package description, which means
that the package works fine inside of nix-shell, but once you leave the
shell the shared object can no longer be found. That issue is by no
means specific to Stack: you'll have that problem with any other
Haskell package that's built inside of nix-shell but run outside of that
environment.
You can remedy this issue in several ways. The easiest is to add a `nix` section
to the `stack.yaml` like the following:
nix:
enable: true
packages: [ zlib ]
Stack's Nix support knows to add `${zlib}/lib` and `${zlib}/include` as an
`--extra-lib-dirs` and `extra-include-dirs`, respectively. Alternatively, you
can achieve the same effect by hand. First of all, run
$ nix-build --no-out-link "<nixpkgs>" -A zlib
/nix/store/alsvwzkiw4b7ip38l4nlfjijdvg3fvzn-zlib-1.2.8
to find out the store path of the system's zlib library. Now, you can
1) add that path (plus a "/lib" suffix) to your $LD_LIBRARY_PATH
environment variable to make sure your system linker finds libz.so
automatically. It's no pretty solution, but it will work.
2) As a variant of (1), you can also install any number of system
libraries into your user's profile (or some other profile) and point
$LD_LIBRARY_PATH to that profile instead, so that you don't have to
list dozens of those store paths all over the place.
3) The solution I prefer is to call stack with an appropriate
--extra-lib-dirs flag like so:
$ stack --extra-lib-dirs=/nix/store/alsvwzkiw4b7ip38l4nlfjijdvg3fvzn-zlib-1.2.8/lib build
Typically, you'll need --extra-include-dirs as well. It's possible
to add those flag to the project's "stack.yaml" or your user's
global "~/.stack/global/stack.yaml" file so that you don't have to
specify them manually every time. But again, you're likely better off using
Stack's Nix support instead.
The same thing applies to `cabal configure`, of course, if you're
building with `cabal-install` instead of Stack.
### Creating statically linked binaries
There are two levels of static linking. The first option is to configure the
build with the Cabal flag `--disable-executable-dynamic`. In Nix expressions,
this can be achieved by setting the attribute:
enableSharedExecutables = false;
That gives you a binary with statically linked Haskell libraries and
dynamically linked system libraries.
To link both Haskell libraries and system libraries statically, the additional
flags `--ghc-option=-optl=-static --ghc-option=-optl=-pthread` need to be used.
In Nix, this is accomplished with:
configureFlags = [ "--ghc-option=-optl=-static" "--ghc-option=-optl=-pthread" ];
It's important to realize, however, that most system libraries in Nix are built
as shared libraries only, i.e. there is just no static library available that
Cabal could link!
## Other resources
- The Youtube video [Nix Loves Haskell](https://www.youtube.com/watch?v=BsBhi_r-OeE)
provides an introduction into Haskell NG aimed at beginners. The slides are
available at http://cryp.to/nixos-meetup-3-slides.pdf and also -- in a form
ready for cut & paste -- at
https://github.com/NixOS/cabal2nix/blob/master/doc/nixos-meetup-3-slides.md.
- Another Youtube video is [Escaping Cabal Hell with Nix](https://www.youtube.com/watch?v=mQd3s57n_2Y),
which discusses the subject of Haskell development with Nix but also provides
a basic introduction to Nix as well, i.e. it's suitable for viewers with
almost no prior Nix experience.
- Oliver Charles wrote a very nice [Tutorial how to develop Haskell packages with Nix](http://wiki.ocharles.org.uk/Nix).
- The *Journey into the Haskell NG infrastructure* series of postings
describe the new Haskell infrastructure in great detail:
- [Part 1](http://lists.science.uu.nl/pipermail/nix-dev/2015-January/015591.html)
explains the differences between the old and the new code and gives
instructions how to migrate to the new setup.
- [Part 2](http://lists.science.uu.nl/pipermail/nix-dev/2015-January/015608.html)
looks in-depth at how to tweak and configure your setup by means of
overrides.
- [Part 3](http://lists.science.uu.nl/pipermail/nix-dev/2015-April/016912.html)
describes the infrastructure that keeps the Haskell package set in Nixpkgs
up-to-date.

View File

@ -0,0 +1,376 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="sec-beam">
<title>Beam Languages (Erlang &amp; Elixir)</title>
<section xml:id="beam-introduction">
<title>Introduction</title>
<para>
In this document and related Nix expressions we use the term
<emphasis>Beam</emphasis> to describe the environment. Beam is
the name of the Erlang Virtial Machine and, as far as we know,
from a packaging perspective all languages that run on Beam are
interchangable. The things that do change, like the build
system, are transperant to the users of the package. So we make
no distinction.
</para>
</section>
<section xml:id="build-tools">
<title>Build Tools</title>
<section xml:id="build-tools-rebar3">
<title>Rebar3</title>
<para>
By default Rebar3 wants to manage it's own dependencies. In the
normal non-Nix, this is perfectly acceptable. In the Nix world it
is not. To support this we have created two versions of rebar3,
<literal>rebar3</literal> and <literal>rebar3-open</literal>. The
<literal>rebar3</literal> version has been patched to remove the
ability to download anything from it. If you are not running it a
nix-shell or a nix-build then its probably not going to work for
you. <literal>rebar3-open</literal> is the normal, un-modified
rebar3. It should work exactly as would any other version of
rebar3. Any Erlang package should rely on
<literal>rebar3</literal> and thats really what you should be
using too.
</para>
</section>
<section xml:id="build-tools-other">
<title>Mix &amp; Erlang.mk</title>
<para>
Both Mix and Erlang.mk work exactly as you would expect. There
is a bootstrap process that needs to be run for both of
them. However, that is supported by the
<literal>buildMix</literal> and <literal>buildErlangMk</literal> derivations.
</para>
</section>
</section>
<section xml:id="how-to-install-beam-packages">
<title>How to install Beam packages</title>
<para>
Beam packages are not registered in the top level simply because
they are not relevant to the vast majority of Nix users. They are
installable using the <literal>beamPackages</literal> attribute
set.
You can list the avialable packages in the
<literal>beamPackages</literal> with the following command:
</para>
<programlisting>
$ nix-env -f &quot;&lt;nixpkgs&gt;&quot; -qaP -A beamPackages
beamPackages.esqlite esqlite-0.2.1
beamPackages.goldrush goldrush-0.1.7
beamPackages.ibrowse ibrowse-4.2.2
beamPackages.jiffy jiffy-0.14.5
beamPackages.lager lager-3.0.2
beamPackages.meck meck-0.8.3
beamPackages.rebar3-pc pc-1.1.0
</programlisting>
<para>
To install any of those packages into your profile, refer to them by
their attribute path (first column):
</para>
<programlisting>
$ nix-env -f &quot;&lt;nixpkgs&gt;&quot; -iA beamPackages.ibrowse
</programlisting>
<para>
The attribute path of any Beam packages corresponds to the name
of that particular package in Hex or its OTP Application/Release name.
</para>
</section>
<section xml:id="packaging-beam-applications">
<title>Packaging Beam Applications</title>
<section xml:id="packaging-erlang-applications">
<title>Erlang Applications</title>
<section xml:id="rebar3-packages">
<title>Rebar3 Packages</title>
<para>
There is a Nix functional called
<literal>buildRebar3</literal>. We use this function to make a
derivation that understands how to build the rebar3 project. For
example, the epression we use to build the <link
xlink:href="https://github.com/erlang-nix/hex2nix">hex2nix</link>
project follows.
</para>
<programlisting>
{stdenv, fetchFromGitHub, buildRebar3, ibrowse, jsx, erlware_commons }:
buildRebar3 rec {
name = "hex2nix";
version = "0.0.1";
src = fetchFromGitHub {
owner = "ericbmerritt";
repo = "hex2nix";
rev = "${version}";
sha256 = "1w7xjidz1l5yjmhlplfx7kphmnpvqm67w99hd2m7kdixwdxq0zqg";
};
beamDeps = [ ibrowse jsx erlware_commons ];
}
</programlisting>
<para>
The only visible difference between this derivation and
something like <literal>stdenv.mkDerivation</literal> is that we
have added <literal>erlangDeps</literal> to the derivation. If
you add your Beam dependencies here they will be correctly
handled by the system.
</para>
<para>
If your package needs to compile native code via Rebar's port
compilation mechenism. You should add <literal>compilePort =
true;</literal> to the derivation.
</para>
</section>
<section xml:id="erlang-mk-packages">
<title>Erlang.mk Packages</title>
<para>
Erlang.mk functions almost identically to Rebar. The only real
difference is that <literal>buildErlangMk</literal> is called
instead of <literal>buildRebar3</literal>
</para>
<programlisting>
{ buildErlangMk, fetchHex, cowlib, ranch }:
buildErlangMk {
name = "cowboy";
version = "1.0.4";
src = fetchHex {
pkg = "cowboy";
version = "1.0.4";
sha256 =
"6a0edee96885fae3a8dd0ac1f333538a42e807db638a9453064ccfdaa6b9fdac";
};
beamDeps = [ cowlib ranch ];
meta = {
description = ''Small, fast, modular HTTP server written in
Erlang.'';
license = stdenv.lib.licenses.isc;
homepage = "https://github.com/ninenines/cowboy";
};
}
</programlisting>
</section>
<section xml:id="mix-packages">
<title>Mix Packages</title>
<para>
Mix functions almost identically to Rebar. The only real
difference is that <literal>buildMix</literal> is called
instead of <literal>buildRebar3</literal>
</para>
<programlisting>
{ buildMix, fetchHex, plug, absinthe }:
buildMix {
name = "absinthe_plug";
version = "1.0.0";
src = fetchHex {
pkg = "absinthe_plug";
version = "1.0.0";
sha256 =
"08459823fe1fd4f0325a8bf0c937a4520583a5a26d73b193040ab30a1dfc0b33";
};
beamDeps = [ plug absinthe];
meta = {
description = ''A plug for Absinthe, an experimental GraphQL
toolkit'';
license = stdenv.lib.licenses.bsd3;
homepage = "https://github.com/CargoSense/absinthe_plug";
};
}
</programlisting>
</section>
</section>
</section>
<section xml:id="how-to-develop">
<title>How to develop</title>
<section xml:id="accessing-an-environment">
<title>Accessing an Environment</title>
<para>
Often, all you want to do is be able to access a valid
environment that contains a specific package and its
dependencies. we can do that with the <literal>env</literal>
part of a derivation. For example, lets say we want to access an
erlang repl with ibrowse loaded up. We could do the following.
</para>
<programlisting>
~/w/nixpkgs nix-shell -A beamPackages.ibrowse.env --run "erl"
Erlang/OTP 18 [erts-7.0] [source] [64-bit] [smp:4:4] [async-threads:10] [hipe] [kernel-poll:false]
Eshell V7.0 (abort with ^G)
1> m(ibrowse).
Module: ibrowse
MD5: 3b3e0137d0cbb28070146978a3392945
Compiled: January 10 2016, 23:34
Object file: /nix/store/g1rlf65rdgjs4abbyj4grp37ry7ywivj-ibrowse-4.2.2/lib/erlang/lib/ibrowse-4.2.2/ebin/ibrowse.beam
Compiler options: [{outdir,"/tmp/nix-build-ibrowse-4.2.2.drv-0/hex-source-ibrowse-4.2.2/_build/default/lib/ibrowse/ebin"},
debug_info,debug_info,nowarn_shadow_vars,
warn_unused_import,warn_unused_vars,warnings_as_errors,
{i,"/tmp/nix-build-ibrowse-4.2.2.drv-0/hex-source-ibrowse-4.2.2/_build/default/lib/ibrowse/include"}]
Exports:
add_config/1 send_req_direct/7
all_trace_off/0 set_dest/3
code_change/3 set_max_attempts/3
get_config_value/1 set_max_pipeline_size/3
get_config_value/2 set_max_sessions/3
get_metrics/0 show_dest_status/0
get_metrics/2 show_dest_status/1
handle_call/3 show_dest_status/2
handle_cast/2 spawn_link_worker_process/1
handle_info/2 spawn_link_worker_process/2
init/1 spawn_worker_process/1
module_info/0 spawn_worker_process/2
module_info/1 start/0
rescan_config/0 start_link/0
rescan_config/1 stop/0
send_req/3 stop_worker_process/1
send_req/4 stream_close/1
send_req/5 stream_next/1
send_req/6 terminate/2
send_req_direct/4 trace_off/0
send_req_direct/5 trace_off/2
send_req_direct/6 trace_on/0
trace_on/2
ok
2>
</programlisting>
<para>
Notice the <literal>-A beamPackages.ibrowse.env</literal>.That
is the key to this functionality.
</para>
</section>
<section xml:id="creating-a-shell">
<title>Creating a Shell</title>
<para>
Getting access to an environment often isn't enough to do real
development. Many times we need to create a
<literal>shell.nix</literal> file and do our development inside
of the environment specified by that file. This file looks a lot
like the packageing described above. The main difference is that
<literal>src</literal> points to project root and we call the
package directly.
</para>
<programlisting>
{ pkgs ? import &quot;&lt;nixpkgs&quot;&gt; {} }:
with pkgs;
let
f = { buildRebar3, ibrowse, jsx, erlware_commons }:
buildRebar3 {
name = "hex2nix";
version = "0.1.0";
src = ./.;
erlangDeps = [ ibrowse jsx erlware_commons ];
};
drv = beamPackages.callPackage f {};
in
drv
</programlisting>
<section xml:id="building-in-a-shell">
<title>Building in a shell</title>
<para>
We can leveral the support of the Derivation, regardless of
which build Derivation is called by calling the commands themselv.s
</para>
<programlisting>
# =============================================================================
# Variables
# =============================================================================
NIX_TEMPLATES := "$(CURDIR)/nix-templates"
TARGET := "$(PREFIX)"
PROJECT_NAME := thorndyke
NIXPKGS=../nixpkgs
NIX_PATH=nixpkgs=$(NIXPKGS)
NIX_SHELL=nix-shell -I "$(NIX_PATH)" --pure
# =============================================================================
# Rules
# =============================================================================
.PHONY= all test clean repl shell build test analyze configure install \
test-nix-install publish plt analyze
all: build
guard-%:
@ if [ "${${*}}" == "" ]; then \
echo "Environment variable $* not set"; \
exit 1; \
fi
clean:
rm -rf _build
rm -rf .cache
repl:
$(NIX_SHELL) --run "iex -pa './_build/prod/lib/*/ebin'"
shell:
$(NIX_SHELL)
configure:
$(NIX_SHELL) --command 'eval "$$configurePhase"'
build: configure
$(NIX_SHELL) --command 'eval "$$buildPhase"'
install:
$(NIX_SHELL) --command 'eval "$$installPhase"'
test:
$(NIX_SHELL) --command 'mix test --no-start --no-deps-check'
plt:
$(NIX_SHELL) --run "mix dialyzer.plt --no-deps-check"
analyze: build plt
$(NIX_SHELL) --run "mix dialyzer --no-compile"
</programlisting>
<para>
If you add the <literal>shell.nix</literal> as described and
user rebar as follows things should simply work. Aside from the
<literal>test</literal>, <literal>plt</literal>, and
<literal>analyze</literal> the talks work just fine for all of
the build Derivations.
</para>
</section>
</section>
</section>
<section xml:id="generating-packages-from-hex-with-hex2nix">
<title>Generating Packages from Hex with Hex2Nix</title>
<para>
Updating the Hex packages requires the use of the
<literal>hex2nix</literal> tool. Given the path to the Erlang
modules (usually
<literal>pkgs/development/erlang-modules</literal>). It will
happily dump a file called
<literal>hex-packages.nix</literal>. That file will contain all
the packages that use a recognized build system in Hex. However,
it can't know whether or not all those packages are buildable.
</para>
<para>
To make life easier for our users, it makes good sense to go
ahead and attempt to build all those packages and remove the
ones that don't build. To do that, simply run the command (in
the root of your <literal>nixpkgs</literal> repository). that follows.
</para>
<programlisting>
$ nix-build -A beamPackages
</programlisting>
<para>
That will build every package in
<literal>beamPackages</literal>. Then you can go through and
manually remove the ones that fail. Hopefully, someone will
improve <literal>hex2nix</literal> in the future to automate
that.
</para>
</section>
</section>

View File

@ -5,27 +5,29 @@
<title>Go</title>
<para>The function <varname>buildGoPackage</varname> builds
standard Go packages.
standard Go programs.
</para>
<example xml:id='ex-buildGoPackage'><title>buildGoPackage</title>
<programlisting>
net = buildGoPackage rec {
name = "go.net-${rev}";
goPackagePath = "golang.org/x/net"; <co xml:id='ex-buildGoPackage-1' />
subPackages = [ "ipv4" "ipv6" ]; <co xml:id='ex-buildGoPackage-2' />
rev = "e0403b4e005";
deis = buildGoPackage rec {
name = "deis-${version}";
version = "1.13.0";
goPackagePath = "github.com/deis/deis"; <co xml:id='ex-buildGoPackage-1' />
subPackages = [ "client" ]; <co xml:id='ex-buildGoPackage-2' />
src = fetchFromGitHub {
inherit rev;
owner = "golang";
repo = "net";
sha256 = "1g7cjzw4g4301a3yqpbk8n1d4s97sfby2aysl275x04g0zh8jxqp";
owner = "deis";
repo = "deis";
rev = "v${version}";
sha256 = "1qv9lxqx7m18029lj8cw3k7jngvxs4iciwrypdy0gd2nnghc68sw";
};
goPackageAliases = [ "code.google.com/p/go.net" ]; <co xml:id='ex-buildGoPackage-3' />
propagatedBuildInputs = [ goPackages.text ]; <co xml:id='ex-buildGoPackage-4' />
buildFlags = "--tags release"; <co xml:id='ex-buildGoPackage-5' />
disabled = isGo13;<co xml:id='ex-buildGoPackage-6' />
};
goDeps = ./deps.nix; <co xml:id='ex-buildGoPackage-3' />
buildFlags = "--tags release"; <co xml:id='ex-buildGoPackage-4' />
}
</programlisting>
</example>
@ -47,50 +49,80 @@ the following arguments are of special significance to the function:
packages will be built.
</para>
<para>
In this example only <literal>code.google.com/p/go.net/ipv4</literal> and
<literal>code.google.com/p/go.net/ipv6</literal> will be built.
In this example only <literal>github.com/deis/deis/client</literal> will be built.
</para>
</callout>
<callout arearefs='ex-buildGoPackage-3'>
<para>
<varname>goPackageAliases</varname> is a list of alternative import paths
that are valid for this library.
Packages that depend on this library will automatically rename
import paths that match any of the aliases to <literal>goPackagePath</literal>.
</para>
<para>
In this example imports will be renamed from
<literal>code.google.com/p/go.net</literal> to
<literal>golang.org/x/net</literal> in every package that depend on the
<literal>go.net</literal> library.
<varname>goDeps</varname> is where the Go dependencies of a Go program are listed
as a list of package source identified by Go import path.
It could be imported as a separate <varname>deps.nix</varname> file for
readability. The dependency data structure is described below.
</para>
</callout>
<callout arearefs='ex-buildGoPackage-4'>
<para>
<varname>propagatedBuildInputs</varname> is where the dependencies of a Go library are
listed. Only libraries should list <varname>propagatedBuildInputs</varname>. If a standalone
program is being built instead, use <varname>buildInputs</varname>. If a library's tests require
additional dependencies that are not propagated, they should be listed in <varname>buildInputs</varname>.
</para>
</callout>
<callout arearefs='ex-buildGoPackage-5'>
<para>
<varname>buildFlags</varname> is a list of flags passed to the go build command.
</para>
</callout>
<callout arearefs='ex-buildGoPackage-6'>
</calloutlist>
</para>
<para>The <varname>goDeps</varname> attribute can be imported from a separate
<varname>nix</varname> file that defines which Go libraries are needed and should
be included in <varname>GOPATH</varname> for <varname>buildPhase</varname>.
</para>
<example xml:id='ex-goDeps'><title>deps.nix</title>
<programlisting>
[ <co xml:id='ex-goDeps-1' />
{
goPackagePath = "gopkg.in/yaml.v2"; <co xml:id='ex-goDeps-2' />
fetch = {
type = "git"; <co xml:id='ex-goDeps-3' />
url = "https://gopkg.in/yaml.v2";
rev = "a83829b6f1293c91addabc89d0571c246397bbf4";
sha256 = "1m4dsmk90sbi17571h6pld44zxz7jc4lrnl4f27dpd1l8g5xvjhh";
};
}
{
goPackagePath = "github.com/docopt/docopt-go";
fetch = {
type = "git";
url = "https://github.com/docopt/docopt-go";
rev = "784ddc588536785e7299f7272f39101f7faccc3f";
sha256 = "0wwz48jl9fvl1iknvn9dqr4gfy1qs03gxaikrxxp9gry6773v3sj";
};
}
]
</programlisting>
</example>
<para>
<calloutlist>
<callout arearefs='ex-goDeps-1'>
<para>
If <varname>disabled</varname> is <literal>true</literal>,
nix will refuse to build this package.
<varname>goDeps</varname> is a list of Go dependencies.
</para>
</callout>
<callout arearefs='ex-goDeps-2'>
<para>
In this example the package will not be built for go 1.3. The <literal>isGo13</literal>
is an utility function that returns <literal>true</literal> if go used to build the
package has version 1.3.x.
<varname>goPackagePath</varname> specifies Go package import path.
</para>
</callout>
<callout arearefs='ex-goDeps-3'>
<para>
<varname>fetch type</varname> that needs to be used to get package source. If <varname>git</varname>
is used there should be <varname>url</varname>, <varname>rev</varname> and <varname>sha256</varname>
defined next to it.
</para>
</callout>
@ -99,12 +131,21 @@ the following arguments are of special significance to the function:
</para>
<para>
Reusable Go libraries may be found in the <varname>goPackages</varname> set. You can test
build a Go package as follows:
<varname>buildGoPackage</varname> produces <xref linkend='chap-multiple-output' xrefstyle="select: title" />
where <varname>bin</varname> includes program binaries. You can test build a Go binary as follows:
<screen>
$ nix-build -A goPackages.net
</screen>
<screen>
$ nix-build -A deis.bin
</screen>
or build all outputs with:
<screen>
$ nix-build -A deis.all
</screen>
<varname>bin</varname> output will be installed by default with <varname>nix-env -i</varname>
or <varname>systemPackages</varname>.
</para>
@ -119,6 +160,7 @@ done
</screen>
</para>
<para>To extract dependency information from a Go package in automated way use <link xlink:href="https://github.com/kamilchm/go2nix">go2nix</link>.</para>
<para>To extract dependency information from a Go package in automated way use <link xlink:href="https://github.com/kamilchm/go2nix">go2nix</link>.
It can produce complete derivation and <varname>goDeps</varname> file for Go programs.</para>
</section>

View File

@ -0,0 +1,825 @@
---
title: User's Guide for Haskell in Nixpkgs
author: Peter Simons
date: 2015-06-01
---
# User's Guide to the Haskell Infrastructure
## How to install Haskell packages
Nixpkgs distributes build instructions for all Haskell packages registered on
[Hackage](http://hackage.haskell.org/), but strangely enough normal Nix package
lookups don't seem to discover any of them, except for the default version of ghc, cabal-install, and stack:
$ nix-env -i alex
error: selector alex matches no derivations
$ nix-env -qa ghc
ghc-7.10.2
The Haskell package set is not registered in the top-level namespace because it
is *huge*. If all Haskell packages were visible to these commands, then
name-based search/install operations would be much slower than they are now. We
avoided that by keeping all Haskell-related packages in a separate attribute
set called `haskellPackages`, which the following command will list:
$ nix-env -f "<nixpkgs>" -qaP -A haskellPackages
haskellPackages.a50 a50-0.5
haskellPackages.abacate haskell-abacate-0.0.0.0
haskellPackages.abcBridge haskell-abcBridge-0.12
haskellPackages.afv afv-0.1.1
haskellPackages.alex alex-3.1.4
haskellPackages.Allure Allure-0.4.101.1
haskellPackages.alms alms-0.6.7
[... some 8000 entries omitted ...]
To install any of those packages into your profile, refer to them by their
attribute path (first column):
$ nix-env -f "<nixpkgs>" -iA haskellPackages.Allure ...
The attribute path of any Haskell packages corresponds to the name of that
particular package on Hackage: the package `cabal-install` has the attribute
`haskellPackages.cabal-install`, and so on. (Actually, this convention causes
trouble with packages like `3dmodels` and `4Blocks`, because these names are
invalid identifiers in the Nix language. The issue of how to deal with these
rare corner cases is currently unresolved.)
Haskell packages who's Nix name (second column) begins with a `haskell-` prefix
are packages that provide a library whereas packages without that prefix
provide just executables. Libraries may provide executables too, though: the
package `haskell-pandoc`, for example, installs both a library and an
application. You can install and use Haskell executables just like any other
program in Nixpkgs, but using Haskell libraries for development is a bit
trickier and we'll address that subject in great detail in section [How to
create a development environment].
Attribute paths are deterministic inside of Nixpkgs, but the path necessary to
reach Nixpkgs varies from system to system. We dodged that problem by giving
`nix-env` an explicit `-f "<nixpkgs>"` parameter, but if you call `nix-env`
without that flag, then chances are the invocation fails:
$ nix-env -iA haskellPackages.cabal-install
error: attribute haskellPackages in selection path
haskellPackages.cabal-install not found
On NixOS, for example, Nixpkgs does *not* exist in the top-level namespace by
default. To figure out the proper attribute path, it's easiest to query for the
path of a well-known Nixpkgs package, i.e.:
$ nix-env -qaP coreutils
nixos.coreutils coreutils-8.23
If your system responds like that (most NixOS installations will), then the
attribute path to `haskellPackages` is `nixos.haskellPackages`. Thus, if you
want to use `nix-env` without giving an explicit `-f` flag, then that's the way
to do it:
$ nix-env -qaP -A nixos.haskellPackages
$ nix-env -iA nixos.haskellPackages.cabal-install
Our current default compiler is GHC 7.10.x and the `haskellPackages` set
contains packages built with that particular version. Nixpkgs contains the
latest major release of every GHC since 6.10.4, however, and there is a whole
family of package sets available that defines Hackage packages built with each
of those compilers, too:
$ nix-env -f "<nixpkgs>" -qaP -A haskell.packages.ghc6123
$ nix-env -f "<nixpkgs>" -qaP -A haskell.packages.ghc763
The name `haskellPackages` is really just a synonym for
`haskell.packages.ghc7102`, because we prefer that package set internally and
recommend it to our users as their default choice, but ultimately you are free
to compile your Haskell packages with any GHC version you please. The following
command displays the complete list of available compilers:
$ nix-env -f "<nixpkgs>" -qaP -A haskell.compiler
haskell.compiler.ghc6104 ghc-6.10.4
haskell.compiler.ghc6123 ghc-6.12.3
haskell.compiler.ghc704 ghc-7.0.4
haskell.compiler.ghc722 ghc-7.2.2
haskell.compiler.ghc742 ghc-7.4.2
haskell.compiler.ghc763 ghc-7.6.3
haskell.compiler.ghc784 ghc-7.8.4
haskell.compiler.ghc7102 ghc-7.10.2
haskell.compiler.ghcHEAD ghc-7.11.20150402
haskell.compiler.ghcNokinds ghc-nokinds-7.11.20150704
haskell.compiler.ghcjs ghcjs-0.1.0
haskell.compiler.jhc jhc-0.8.2
haskell.compiler.uhc uhc-1.1.9.0
We have no package sets for `jhc` or `uhc` yet, unfortunately, but for every
version of GHC listed above, there exists a package set based on that compiler.
Also, the attributes `haskell.compiler.ghcXYC` and
`haskell.packages.ghcXYC.ghc` are synonymous for the sake of convenience.
## How to create a development environment
### How to install a compiler
A simple development environment consists of a Haskell compiler and one or both
of the tools `cabal-install` and `stack`. We saw in section
[How to install Haskell packages] how you can install those programs into your
user profile:
$ nix-env -f "<nixpkgs>" -iA haskellPackages.ghc haskellPackages.cabal-install
Instead of the default package set `haskellPackages`, you can also use the more
precise name `haskell.compiler.ghc7102`, which has the advantage that it refers
to the same GHC version regardless of what Nixpkgs considers "default" at any
given time.
Once you've made those tools available in `$PATH`, it's possible to build
Hackage packages the same way people without access to Nix do it all the time:
$ cabal get lens-4.11 && cd lens-4.11
$ cabal install -j --dependencies-only
$ cabal configure
$ cabal build
If you enjoy working with Cabal sandboxes, then that's entirely possible too:
just execute the command
$ cabal sandbox init
before installing the required dependencies.
The `nix-shell` utility makes it easy to switch to a different compiler
version; just enter the Nix shell environment with the command
$ nix-shell -p haskell.compiler.ghc784
to bring GHC 7.8.4 into `$PATH`. Alternatively, you can use Stack instead of
`nix-shell` directly to select compiler versions and other build tools
per-project. It uses `nix-shell` under the hood when Nix support is turned on.
See [How to build a Haskell project using Stack].
If you're using `cabal-install`, re-running `cabal configure` inside the spawned
shell switches your build to use that compiler instead. If you're working on
a project that doesn't depend on any additional system libraries outside of GHC,
then it's even sufficient to just run the `cabal configure` command inside of
the shell:
$ nix-shell -p haskell.compiler.ghc784 --command "cabal configure"
Afterwards, all other commands like `cabal build` work just fine in any shell
environment, because the configure phase recorded the absolute paths to all
required tools like GHC in its build configuration inside of the `dist/`
directory. Please note, however, that `nix-collect-garbage` can break such an
environment because the Nix store paths created by `nix-shell` aren't "alive"
anymore once `nix-shell` has terminated. If you find that your Haskell builds
no longer work after garbage collection, then you'll have to re-run `cabal
configure` inside of a new `nix-shell` environment.
### How to install a compiler with libraries
GHC expects to find all installed libraries inside of its own `lib` directory.
This approach works fine on traditional Unix systems, but it doesn't work for
Nix, because GHC's store path is immutable once it's built. We cannot install
additional libraries into that location. As a consequence, our copies of GHC
don't know any packages except their own core libraries, like `base`,
`containers`, `Cabal`, etc.
We can register additional libraries to GHC, however, using a special build
function called `ghcWithPackages`. That function expects one argument: a
function that maps from an attribute set of Haskell packages to a list of
packages, which determines the libraries known to that particular version of
GHC. For example, the Nix expression `ghcWithPackages (pkgs: [pkgs.mtl])`
generates a copy of GHC that has the `mtl` library registered in addition to
its normal core packages:
$ nix-shell -p "haskellPackages.ghcWithPackages (pkgs: [pkgs.mtl])"
[nix-shell:~]$ ghc-pkg list mtl
/nix/store/zy79...-ghc-7.10.2/lib/ghc-7.10.2/package.conf.d:
mtl-2.2.1
This function allows users to define their own development environment by means
of an override. After adding the following snippet to `~/.nixpkgs/config.nix`,
{
packageOverrides = super: let self = super.pkgs; in
{
myHaskellEnv = self.haskell.packages.ghc7102.ghcWithPackages
(haskellPackages: with haskellPackages; [
# libraries
arrows async cgi criterion
# tools
cabal-install haskintex
]);
};
}
it's possible to install that compiler with `nix-env -f "<nixpkgs>" -iA
myHaskellEnv`. If you'd like to switch that development environment to a
different version of GHC, just replace the `ghc7102` bit in the previous
definition with the appropriate name. Of course, it's also possible to define
any number of these development environments! (You can't install two of them
into the same profile at the same time, though, because that would result in
file conflicts.)
The generated `ghc` program is a wrapper script that re-directs the real
GHC executable to use a new `lib` directory --- one that we specifically
constructed to contain all those packages the user requested:
$ cat $(type -p ghc)
#! /nix/store/xlxj...-bash-4.3-p33/bin/bash -e
export NIX_GHC=/nix/store/19sm...-ghc-7.10.2/bin/ghc
export NIX_GHCPKG=/nix/store/19sm...-ghc-7.10.2/bin/ghc-pkg
export NIX_GHC_DOCDIR=/nix/store/19sm...-ghc-7.10.2/share/doc/ghc/html
export NIX_GHC_LIBDIR=/nix/store/19sm...-ghc-7.10.2/lib/ghc-7.10.2
exec /nix/store/j50p...-ghc-7.10.2/bin/ghc "-B$NIX_GHC_LIBDIR" "$@"
The variables `$NIX_GHC`, `$NIX_GHCPKG`, etc. point to the *new* store path
`ghcWithPackages` constructed specifically for this environment. The last line
of the wrapper script then executes the real `ghc`, but passes the path to the
new `lib` directory using GHC's `-B` flag.
The purpose of those environment variables is to work around an impurity in the
popular [ghc-paths](http://hackage.haskell.org/package/ghc-paths) library. That
library promises to give its users access to GHC's installation paths. Only,
the library can't possible know that path when it's compiled, because the path
GHC considers its own is determined only much later, when the user configures
it through `ghcWithPackages`. So we [patched
ghc-paths](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/haskell-modules/patches/ghc-paths-nix.patch)
to return the paths found in those environment variables at run-time rather
than trying to guess them at compile-time.
To make sure that mechanism works properly all the time, we recommend that you
set those variables to meaningful values in your shell environment, too, i.e.
by adding the following code to your `~/.bashrc`:
if type >/dev/null 2>&1 -p ghc; then
eval "$(egrep ^export "$(type -p ghc)")"
fi
If you are certain that you'll use only one GHC environment which is located in
your user profile, then you can use the following code, too, which has the
advantage that it doesn't contain any paths from the Nix store, i.e. those
settings always remain valid even if a `nix-env -u` operation updates the GHC
environment in your profile:
if [ -e ~/.nix-profile/bin/ghc ]; then
export NIX_GHC="$HOME/.nix-profile/bin/ghc"
export NIX_GHCPKG="$HOME/.nix-profile/bin/ghc-pkg"
export NIX_GHC_DOCDIR="$HOME/.nix-profile/share/doc/ghc/html"
export NIX_GHC_LIBDIR="$HOME/.nix-profile/lib/ghc-$($NIX_GHC --numeric-version)"
fi
### How to install a compiler with libraries, hoogle and documentation indexes
If you plan to use your environment for interactive programming, not just
compiling random Haskell code, you might want to replace `ghcWithPackages` in
all the listings above with `ghcWithHoogle`.
This environment generator not only produces an environment with GHC and all
the specified libraries, but also generates a `hoogle` and `haddock` indexes
for all the packages, and provides a wrapper script around `hoogle` binary that
uses all those things. A precise name for this thing would be
"`ghcWithPackagesAndHoogleAndDocumentationIndexes`", which is, regrettably, too
long and scary.
For example, installing the following environment
{
packageOverrides = super: let self = super.pkgs; in
{
myHaskellEnv = self.haskellPackages.ghcWithHoogle
(haskellPackages: with haskellPackages; [
# libraries
arrows async cgi criterion
# tools
cabal-install haskintex
]);
};
}
allows one to browse module documentation index [not too dissimilar to
this](https://downloads.haskell.org/~ghc/latest/docs/html/libraries/index.html)
for all the specified packages and their dependencies by directing a browser of
choice to `~/.nix-profiles/share/doc/hoogle/index.html` (or
`/run/current-system/sw/share/doc/hoogle/index.html` in case you put it in
`environment.systemPackages` in NixOS).
After you've marveled enough at that try adding the following to your
`~/.ghc/ghci.conf`
:def hoogle \s -> return $ ":! hoogle search -cl --count=15 \"" ++ s ++ "\""
:def doc \s -> return $ ":! hoogle search -cl --info \"" ++ s ++ "\""
and test it by typing into `ghci`:
:hoogle a -> a
:doc a -> a
Be sure to note the links to `haddock` files in the output. With any modern and
properly configured terminal emulator you can just click those links to
navigate there.
Finally, you can run
hoogle server -p 8080
and navigate to http://localhost:8080/ for your own local
[Hoogle](https://www.haskell.org/hoogle/). Note, however, that Firefox and
possibly other browsers disallow navigation from `http:` to `file:` URIs for
security reasons, which might be quite an inconvenience. See [this
page](http://kb.mozillazine.org/Links_to_local_pages_do_not_work) for
workarounds.
### How to build a Haskell project using Stack
[Stack](http://haskellstack.org) is a popular build tool for Haskell projects.
It has first-class support for Nix. Stack can optionally use Nix to
automatically select the right version of GHC and other build tools to build,
test and execute apps in an existing project downloaded from somewhere on the
Internet. Pass the `--nix` flag to any `stack` command to do so, e.g.
$ git clone --recursive http://github.com/yesodweb/wai
$ cd wai
$ stack --nix build
If you want `stack` to use Nix by default, you can add a `nix` section to the
`stack.yaml` file, as explained in the [Stack documentation][stack-nix-doc]. For
example:
nix:
enable: true
packages: [pkgconfig zeromq zlib]
The example configuration snippet above tells Stack to create an ad hoc
environment for `nix-shell` as in the below section, in which the `pkgconfig`,
`zeromq` and `zlib` packages from Nixpkgs are available. All `stack` commands
will implicitly be executed inside this ad hoc environment.
Some projects have more sophisticated needs. For examples, some ad hoc
environments might need to expose Nixpkgs packages compiled in a certain way, or
with extra environment variables. In these cases, you'll need a `shell` field
instead of `packages`:
nix:
enable: true
shell-file: shell.nix
For more on how to write a `shell.nix` file see the below section. You'll need
to express a derivation. Note that Nixpkgs ships with a convenience wrapper
function around `mkDerivation` called `haskell.lib.buildStackProject` to help you
create this derivation in exactly the way Stack expects. All of the same inputs
as `mkDerivation` can be provided. For example, to build a Stack project that
including packages that link against a version of the R library compiled with
special options turned on:
with (import <nixpkgs> { });
let R = pkgs.R.override { enableStrictBarrier = true; };
in
haskell.lib.buildStackProject {
name = "HaskellR";
buildInputs = [ R zeromq zlib ];
}
You can select a particular GHC version to compile with by setting the
`ghc` attribute as an argument to `buildStackProject`. Better yet, let
Stack choose what GHC version it wants based on the snapshot specified
in `stack.yaml` (only works with Stack >= 1.1.3):
{nixpkgs ? import <nixpkgs> { }, ghc ? nixpkgs.ghc}:
with nixpkgs;
let R = pkgs.R.override { enableStrictBarrier = true; };
in
haskell.lib.buildStackProject {
name = "HaskellR";
buildInputs = [ R zeromq zlib ];
inherit ghc;
}
[stack-nix-doc]: http://docs.haskellstack.org/en/stable/nix_integration.html
### How to create ad hoc environments for `nix-shell`
The easiest way to create an ad hoc development environment is to run
`nix-shell` with the appropriate GHC environment given on the command-line:
nix-shell -p "haskellPackages.ghcWithPackages (pkgs: with pkgs; [mtl pandoc])"
For more sophisticated use-cases, however, it's more convenient to save the
desired configuration in a file called `shell.nix` that looks like this:
{ nixpkgs ? import <nixpkgs> {}, compiler ? "ghc7102" }:
let
inherit (nixpkgs) pkgs;
ghc = pkgs.haskell.packages.${compiler}.ghcWithPackages (ps: with ps; [
monad-par mtl
]);
in
pkgs.stdenv.mkDerivation {
name = "my-haskell-env-0";
buildInputs = [ ghc ];
shellHook = "eval $(egrep ^export ${ghc}/bin/ghc)";
}
Now run `nix-shell` --- or even `nix-shell --pure` --- to enter a shell
environment that has the appropriate compiler in `$PATH`. If you use `--pure`,
then add all other packages that your development environment needs into the
`buildInputs` attribute. If you'd like to switch to a different compiler
version, then pass an appropriate `compiler` argument to the expression, i.e.
`nix-shell --argstr compiler ghc784`.
If you need such an environment because you'd like to compile a Hackage package
outside of Nix --- i.e. because you're hacking on the latest version from Git
---, then the package set provides suitable nix-shell environments for you
already! Every Haskell package has an `env` attribute that provides a shell
environment suitable for compiling that particular package. If you'd like to
hack the `lens` library, for example, then you just have to check out the
source code and enter the appropriate environment:
$ cabal get lens-4.11 && cd lens-4.11
Downloading lens-4.11...
Unpacking to lens-4.11/
$ nix-shell "<nixpkgs>" -A haskellPackages.lens.env
[nix-shell:/tmp/lens-4.11]$
At point, you can run `cabal configure`, `cabal build`, and all the other
development commands. Note that you need `cabal-install` installed in your
`$PATH` already to use it here --- the `nix-shell` environment does not provide
it.
## How to create Nix builds for your own private Haskell packages
If your own Haskell packages have build instructions for Cabal, then you can
convert those automatically into build instructions for Nix using the
`cabal2nix` utility, which you can install into your profile by running
`nix-env -i cabal2nix`.
### How to build a stand-alone project
For example, let's assume that you're working on a private project called
`foo`. To generate a Nix build expression for it, change into the project's
top-level directory and run the command:
$ cabal2nix . >foo.nix
Then write the following snippet into a file called `default.nix`:
{ nixpkgs ? import <nixpkgs> {}, compiler ? "ghc7102" }:
nixpkgs.pkgs.haskell.packages.${compiler}.callPackage ./foo.nix { }
Finally, store the following code in a file called `shell.nix`:
{ nixpkgs ? import <nixpkgs> {}, compiler ? "ghc7102" }:
(import ./default.nix { inherit nixpkgs compiler; }).env
At this point, you can run `nix-build` to have Nix compile your project and
install it into a Nix store path. The local directory will contain a symlink
called `result` after `nix-build` returns that points into that location. Of
course, passing the flag `--argstr compiler ghc763` allows switching the build
to any version of GHC currently supported.
Furthermore, you can call `nix-shell` to enter an interactive development
environment in which you can use `cabal configure` and `cabal build` to develop
your code. That environment will automatically contain a proper GHC derivation
with all the required libraries registered as well as all the system-level
libraries your package might need.
If your package does not depend on any system-level libraries, then it's
sufficient to run
$ nix-shell --command "cabal configure"
once to set up your build. `cabal-install` determines the absolute paths to all
resources required for the build and writes them into a config file in the
`dist/` directory. Once that's done, you can run `cabal build` and any other
command for that project even outside of the `nix-shell` environment. This
feature is particularly nice for those of us who like to edit their code with
an IDE, like Emacs' `haskell-mode`, because it's not necessary to start Emacs
inside of nix-shell just to make it find out the necessary settings for
building the project; `cabal-install` has already done that for us.
If you want to do some quick-and-dirty hacking and don't want to bother setting
up a `default.nix` and `shell.nix` file manually, then you can use the
`--shell` flag offered by `cabal2nix` to have it generate a stand-alone
`nix-shell` environment for you. With that feature, running
$ cabal2nix --shell . >shell.nix
$ nix-shell --command "cabal configure"
is usually enough to set up a build environment for any given Haskell package.
You can even use that generated file to run `nix-build`, too:
$ nix-build shell.nix
### How to build projects that depend on each other
If you have multiple private Haskell packages that depend on each other, then
you'll have to register those packages in the Nixpkgs set to make them visible
for the dependency resolution performed by `callPackage`. First of all, change
into each of your projects top-level directories and generate a `default.nix`
file with `cabal2nix`:
$ cd ~/src/foo && cabal2nix . >default.nix
$ cd ~/src/bar && cabal2nix . >default.nix
Then edit your `~/.nixpkgs/config.nix` file to register those builds in the
default Haskell package set:
{
packageOverrides = super: let self = super.pkgs; in
{
haskellPackages = super.haskellPackages.override {
overrides = self: super: {
foo = self.callPackage ../src/foo {};
bar = self.callPackage ../src/bar {};
};
};
};
}
Once that's accomplished, `nix-env -f "<nixpkgs>" -qA haskellPackages` will
show your packages like any other package from Hackage, and you can build them
$ nix-build "<nixpkgs>" -A haskellPackages.foo
or enter an interactive shell environment suitable for building them:
$ nix-shell "<nixpkgs>" -A haskellPackages.bar.env
## Miscellaneous Topics
### How to build with profiling enabled
Every Haskell package set takes a function called `overrides` that you can use
to manipulate the package as much as you please. One useful application of this
feature is to replace the default `mkDerivation` function with one that enables
library profiling for all packages. To accomplish that, add configure the
following snippet in your `~/.nixpkgs/config.nix` file:
{
packageOverrides = super: let self = super.pkgs; in
{
profiledHaskellPackages = self.haskellPackages.override {
overrides = self: super: {
mkDerivation = args: super.mkDerivation (args // {
enableLibraryProfiling = true;
});
};
};
};
}
Then, replace instances of `haskellPackages` in the `cabal2nix`-generated
`default.nix` or `shell.nix` files with `profiledHaskellPackages`.
### How to override package versions in a compiler-specific package set
Nixpkgs provides the latest version of
[`ghc-events`](http://hackage.haskell.org/package/ghc-events), which is 0.4.4.0
at the time of this writing. This is fine for users of GHC 7.10.x, but GHC
7.8.4 cannot compile that binary. Now, one way to solve that problem is to
register an older version of `ghc-events` in the 7.8.x-specific package set.
The first step is to generate Nix build instructions with `cabal2nix`:
$ cabal2nix cabal://ghc-events-0.4.3.0 >~/.nixpkgs/ghc-events-0.4.3.0.nix
Then add the override in `~/.nixpkgs/config.nix`:
{
packageOverrides = super: let self = super.pkgs; in
{
haskell = super.haskell // {
packages = super.haskell.packages // {
ghc784 = super.haskell.packages.ghc784.override {
overrides = self: super: {
ghc-events = self.callPackage ./ghc-events-0.4.3.0.nix {};
};
};
};
};
};
}
This code is a little crazy, no doubt, but it's necessary because the intuitive
version
haskell.packages.ghc784 = super.haskell.packages.ghc784.override {
overrides = self: super: {
ghc-events = self.callPackage ./ghc-events-0.4.3.0.nix {};
};
};
doesn't do what we want it to: that code replaces the `haskell` package set in
Nixpkgs with one that contains only one entry,`packages`, which contains only
one entry `ghc784`. This override loses the `haskell.compiler` set, and it
loses the `haskell.packages.ghcXYZ` sets for all compilers but GHC 7.8.4. To
avoid that problem, we have to perform the convoluted little dance from above,
iterating over each step in hierarchy.
Once it's accomplished, however, we can install a variant of `ghc-events`
that's compiled with GHC 7.8.4:
nix-env -f "<nixpkgs>" -iA haskell.packages.ghc784.ghc-events
Unfortunately, it turns out that this build fails again while executing the
test suite! Apparently, the release archive on Hackage is missing some data
files that the test suite requires, so we cannot run it. We accomplish that by
re-generating the Nix expression with the `--no-check` flag:
$ cabal2nix --no-check cabal://ghc-events-0.4.3.0 >~/.nixpkgs/ghc-events-0.4.3.0.nix
Now the builds succeeds.
Of course, in the concrete example of `ghc-events` this whole exercise is not
an ideal solution, because `ghc-events` can analyze the output emitted by any
version of GHC later than 6.12 regardless of the compiler version that was used
to build the `ghc-events' executable, so strictly speaking there's no reason to
prefer one built with GHC 7.8.x in the first place. However, for users who
cannot use GHC 7.10.x at all for some reason, the approach of downgrading to an
older version might be useful.
### How to recover from GHC's infamous non-deterministic library ID bug
GHC and distributed build farms don't get along well:
https://ghc.haskell.org/trac/ghc/ticket/4012
When you see an error like this one
package foo-0.7.1.0 is broken due to missing package
text-1.2.0.4-98506efb1b9ada233bb5c2b2db516d91
then you have to download and re-install `foo` and all its dependents from
scratch:
# nix-store -q --referrers /nix/store/*-haskell-text-1.2.0.4 \
| xargs -L 1 nix-store --repair-path
If you're using additional Hydra servers other than `hydra.nixos.org`, then it
might be necessary to purge the local caches that store data from those
machines to disable these binary channels for the duration of the previous
command, i.e. by running:
rm /nix/var/nix/binary-cache-v3.sqlite
rm /nix/var/nix/manifests/*
rm /nix/var/nix/channel-cache/*
### How to use the Haste Haskell-to-Javascript transpiler
Open a shell with `haste-compiler` and `haste-cabal-install` (you don't actually need
`node`, but it can be useful to test stuff):
$ nix-shell -p "haskellPackages.ghcWithPackages (self: with self; [haste-cabal-install haste-compiler])" -p nodejs
You may not need the following step but if `haste-boot` fails to compile all the
packages it needs, this might do the trick
$ haste-cabal update
`haste-boot` builds a set of core libraries so that they can be used from Javascript
transpiled programs:
$ haste-boot
Transpile and run a "Hello world" program:
$ echo 'module Main where main = putStrLn "Hello world"' > hello-world.hs
$ hastec --onexec hello-world.hs
$ node hello-world.js
Hello world
### Builds on Darwin fail with `math.h` not found
Users of GHC on Darwin have occasionally reported that builds fail, because the
compiler complains about a missing include file:
fatal error: 'math.h' file not found
The issue has been discussed at length in [ticket
6390](https://github.com/NixOS/nixpkgs/issues/6390), and so far no good
solution has been proposed. As a work-around, users who run into this problem
can configure the environment variables
export NIX_CFLAGS_COMPILE="-idirafter /usr/include"
export NIX_CFLAGS_LINK="-L/usr/lib"
in their `~/.bashrc` file to avoid the compiler error.
### Builds using Stack complain about missing system libraries
-- While building package zlib-0.5.4.2 using:
runhaskell -package=Cabal-1.22.4.0 -clear-package-db [... lots of flags ...]
Process exited with code: ExitFailure 1
Logs have been written to: /home/foo/src/stack-ide/.stack-work/logs/zlib-0.5.4.2.log
Configuring zlib-0.5.4.2...
Setup.hs: Missing dependency on a foreign library:
* Missing (or bad) header file: zlib.h
This problem can usually be solved by installing the system package that
provides this library (you may need the "-dev" version). If the library is
already installed but in a non-standard location then you can use the flags
--extra-include-dirs= and --extra-lib-dirs= to specify where it is.
If the header file does exist, it may contain errors that are caught by the C
compiler at the preprocessing stage. In this case you can re-run configure
with the verbosity flag -v3 to see the error messages.
When you run the build inside of the nix-shell environment, the system
is configured to find libz.so without any special flags -- the compiler
and linker "just know" how to find it. Consequently, Cabal won't record
any search paths for libz.so in the package description, which means
that the package works fine inside of nix-shell, but once you leave the
shell the shared object can no longer be found. That issue is by no
means specific to Stack: you'll have that problem with any other
Haskell package that's built inside of nix-shell but run outside of that
environment.
You can remedy this issue in several ways. The easiest is to add a `nix` section
to the `stack.yaml` like the following:
nix:
enable: true
packages: [ zlib ]
Stack's Nix support knows to add `${zlib.out}/lib` and `${zlib.dev}/include` as an
`--extra-lib-dirs` and `extra-include-dirs`, respectively. Alternatively, you
can achieve the same effect by hand. First of all, run
$ nix-build --no-out-link "<nixpkgs>" -A zlib
/nix/store/alsvwzkiw4b7ip38l4nlfjijdvg3fvzn-zlib-1.2.8
to find out the store path of the system's zlib library. Now, you can
1) add that path (plus a "/lib" suffix) to your $LD_LIBRARY_PATH
environment variable to make sure your system linker finds libz.so
automatically. It's no pretty solution, but it will work.
2) As a variant of (1), you can also install any number of system
libraries into your user's profile (or some other profile) and point
$LD_LIBRARY_PATH to that profile instead, so that you don't have to
list dozens of those store paths all over the place.
3) The solution I prefer is to call stack with an appropriate
--extra-lib-dirs flag like so:
$ stack --extra-lib-dirs=/nix/store/alsvwzkiw4b7ip38l4nlfjijdvg3fvzn-zlib-1.2.8/lib build
Typically, you'll need --extra-include-dirs as well. It's possible
to add those flag to the project's "stack.yaml" or your user's
global "~/.stack/global/stack.yaml" file so that you don't have to
specify them manually every time. But again, you're likely better off using
Stack's Nix support instead.
The same thing applies to `cabal configure`, of course, if you're
building with `cabal-install` instead of Stack.
### Creating statically linked binaries
There are two levels of static linking. The first option is to configure the
build with the Cabal flag `--disable-executable-dynamic`. In Nix expressions,
this can be achieved by setting the attribute:
enableSharedExecutables = false;
That gives you a binary with statically linked Haskell libraries and
dynamically linked system libraries.
To link both Haskell libraries and system libraries statically, the additional
flags `--ghc-option=-optl=-static --ghc-option=-optl=-pthread` need to be used.
In Nix, this is accomplished with:
configureFlags = [ "--ghc-option=-optl=-static" "--ghc-option=-optl=-pthread" ];
It's important to realize, however, that most system libraries in Nix are built
as shared libraries only, i.e. there is just no static library available that
Cabal could link!
## Other resources
- The Youtube video [Nix Loves Haskell](https://www.youtube.com/watch?v=BsBhi_r-OeE)
provides an introduction into Haskell NG aimed at beginners. The slides are
available at http://cryp.to/nixos-meetup-3-slides.pdf and also -- in a form
ready for cut & paste -- at
https://github.com/NixOS/cabal2nix/blob/master/doc/nixos-meetup-3-slides.md.
- Another Youtube video is [Escaping Cabal Hell with Nix](https://www.youtube.com/watch?v=mQd3s57n_2Y),
which discusses the subject of Haskell development with Nix but also provides
a basic introduction to Nix as well, i.e. it's suitable for viewers with
almost no prior Nix experience.
- Oliver Charles wrote a very nice [Tutorial how to develop Haskell packages with Nix](http://wiki.ocharles.org.uk/Nix).
- The *Journey into the Haskell NG infrastructure* series of postings
describe the new Haskell infrastructure in great detail:
- [Part 1](http://lists.science.uu.nl/pipermail/nix-dev/2015-January/015591.html)
explains the differences between the old and the new code and gives
instructions how to migrate to the new setup.
- [Part 2](http://lists.science.uu.nl/pipermail/nix-dev/2015-January/015608.html)
looks in-depth at how to tweak and configure your setup by means of
overrides.
- [Part 3](http://lists.science.uu.nl/pipermail/nix-dev/2015-April/016912.html)
describes the infrastructure that keeps the Haskell package set in Nixpkgs
up-to-date.

View File

@ -13,19 +13,21 @@ in Nixpkgs to easily build packages for other programming languages,
such as Perl or Haskell. These are described in this chapter.</para>
<xi:include href="perl.xml" />
<xi:include href="python.xml" />
<xi:include href="ruby.xml" />
<xi:include href="beam.xml" />
<xi:include href="bower.xml" />
<xi:include href="coq.xml" />
<xi:include href="go.xml" />
<xi:include href="haskell.xml" />
<xi:include href="idris.xml" /> <!-- generated from ../../pkgs/development/idris-modules/README.md -->
<xi:include href="java.xml" />
<xi:include href="lua.xml" />
<xi:include href="coq.xml" />
<xi:include href="idris.xml" /> <!-- generated from ../../pkgs/development/idris-modules/README.md -->
<xi:include href="r.xml" /> <!-- generated from ../../pkgs/development/r-modules/README.md -->
<xi:include href="node.xml" /> <!-- generated from ../../pkgs/development/node-packages/README.md -->
<xi:include href="perl.xml" />
<xi:include href="python.xml" />
<xi:include href="qt.xml" />
<xi:include href="r.xml" /> <!-- generated from ../../pkgs/development/r-modules/README.md -->
<xi:include href="ruby.xml" />
<xi:include href="texlive.xml" />
<xi:include href="bower.xml" />
</chapter>

View File

@ -157,16 +157,16 @@ expression on standard output. For example:
<screen>
$ nix-generate-from-cpan XML::Simple
XMLSimple = buildPerlPackage {
name = "XML-Simple-2.20";
XMLSimple = buildPerlPackage rec {
name = "XML-Simple-2.22";
src = fetchurl {
url = mirror://cpan/authors/id/G/GR/GRANTM/XML-Simple-2.20.tar.gz;
sha256 = "5cff13d0802792da1eb45895ce1be461903d98ec97c9c953bc8406af7294434a";
url = "mirror://cpan/authors/id/G/GR/GRANTM/${name}.tar.gz";
sha256 = "b9450ef22ea9644ae5d6ada086dc4300fa105be050a2030ebd4efd28c198eb49";
};
propagatedBuildInputs = [ XMLNamespaceSupport XMLSAX XMLSAXExpat ];
meta = {
description = "Easily read/write XML (esp config files)";
license = "perl";
description = "An API for simple XML files";
license = with stdenv.lib.licenses; [ artistic1 gpl1Plus ];
};
};
</screen>

View File

@ -78,18 +78,16 @@ containing
```nix
with import <nixpkgs> {};
(pkgs.python35.buildEnv.override {
extraLibs = with pkgs.python35Packages; [ numpy toolz ];
}).env
(pkgs.python35.withPackages (ps: [ps.numpy ps.toolz])).env
```
executing `nix-shell` gives you again a Nix shell from which you can run Python.
What's happening here?
1. We begin with importing the Nix Packages collections. `import <nixpkgs>` import the `<nixpkgs>` function, `{}` calls it and the `with` statement brings all attributes of `nixpkgs` in the local scope. Therefore we can now use `pkgs`.
2. Then we create a Python 3.5 environment with `pkgs.buildEnv`. Because we want to use it with a custom set of Python packages, we override it.
3. The `extraLibs` argument of the original `buildEnv` function can be used to specify which packages should be included. We want `numpy` and `toolz`. Again, we use the `with` statement to bring a set of attributes into the local scope.
4. And finally, for in interactive use we return the environment.
2. Then we create a Python 3.5 environment with the `withPackages` function.
3. The `withPackages` function expects us to provide a function as an argument that takes the set of all python packages and returns a list of packages to include in the environment. Here, we select the packages `numpy` and `toolz` from the package set.
4. And finally, for in interactive use we return the environment by using the `env` attribute.
### Developing with Python
@ -187,10 +185,7 @@ with import <nixpkgs> {};
};
};
in pkgs.python35.buildEnv.override rec {
extraLibs = [ pkgs.python35Packages.numpy toolz ];
}
in pkgs.python35.withPackages (ps: [ps.numpy toolz])
).env
```
@ -199,8 +194,11 @@ locally defined package as well as `numpy` which is build according to the
definition in Nixpkgs. What did we do here? Well, we took the Nix expression
that we used earlier to build a Python environment, and said that we wanted to
include our own version of `toolz`. To introduce our own package in the scope of
`buildEnv.override` we used a
`withPackages` we used a
[`let`](http://nixos.org/nix/manual/#sec-constructs) expression.
You can see that we used `ps.numpy` to select numpy from the nixpkgs package set (`ps`).
But we do not take `toolz` from the nixpkgs package set this time.
Instead, `toolz` will resolve to our local definition that we introduced with `let`.
### Handling dependencies
@ -293,8 +291,8 @@ pyfftw = buildPythonPackage rec {
# Tests cannot import pyfftw. pyfftw works fine though.
doCheck = false;
LDFLAGS="-L${pkgs.fftw}/lib -L${pkgs.fftwFloat}/lib -L${pkgs.fftwLongDouble}/lib"
CFLAGS="-I${pkgs.fftw}/include -I${pkgs.fftwFloat}/include -I${pkgs.fftwLongDouble}/include"
LDFLAGS="-L${pkgs.fftw.dev}/lib -L${pkgs.fftwFloat.out}/lib -L${pkgs.fftwLongDouble.out}/lib"
CFLAGS="-I${pkgs.fftw.dev}/include -I${pkgs.fftwFloat.dev}/include -I${pkgs.fftwLongDouble.dev}/include"
'';
meta = {
@ -359,7 +357,7 @@ own packages. The important functions here are `import` and `callPackage`.
### Including a derivation using `callPackage`
Earlier we created a Python environment using `buildEnv`, and included the
Earlier we created a Python environment using `withPackages`, and included the
`toolz` package via a `let` expression.
Let's split the package definition from the environment definition.
@ -394,9 +392,7 @@ with import <nixpkgs> {};
( let
toolz = pkgs.callPackage ~/path/to/toolz/release.nix { pkgs=pkgs; buildPythonPackage=pkgs.python35Packages.buildPythonPackage; };
in pkgs.python35.buildEnv.override rec {
extraLibs = [ pkgs.python35Packages.numpy toolz ];
}
in pkgs.python35.withPackages (ps: [ ps.numpy toolz ])
).env
```
@ -450,6 +446,7 @@ Each interpreter has the following attributes:
- `libPrefix`. Name of the folder in `${python}/lib/` for corresponding interpreter.
- `interpreter`. Alias for `${python}/bin/${executable}`.
- `buildEnv`. Function to build python interpreter environments with extra packages bundled together. See section *python.buildEnv function* for usage and documentation.
- `withPackages`. Simpler interface to `buildEnv`. See section *python.withPackages function* for usage and documentation.
- `sitePackages`. Alias for `lib/${libPrefix}/site-packages`.
- `executable`. Name of the interpreter executable, ie `python3.4`.
@ -484,7 +481,7 @@ and the aliases
#### `buildPythonPackage` function
The `buildPythonPackage` function is implemented in
`pkgs/development/python-modules/generic/default.nix`
`pkgs/development/interpreters/python/build-python-package.nix`
and can be used as:
@ -506,9 +503,12 @@ and can be used as:
The `buildPythonPackage` mainly does four things:
* In the `buildPhase`, it calls `${python.interpreter} setup.py bdist_wheel` to build a wheel binary zipfile.
* In the `buildPhase`, it calls `${python.interpreter} setup.py bdist_wheel` to
build a wheel binary zipfile.
* In the `installPhase`, it installs the wheel file using `pip install *.whl`.
* In the `postFixup` phase, the `wrapPythonPrograms` bash function is called to wrap all programs in the `$out/bin/*` directory to include `$PYTHONPATH` and `$PATH` environment variables.
* In the `postFixup` phase, the `wrapPythonPrograms` bash function is called to
wrap all programs in the `$out/bin/*` directory to include `$PATH`
environment variable and add dependent libraries to script's `sys.path`.
* In the `installCheck` phase, `${python.interpreter} setup.py test` is ran.
As in Perl, dependencies on other Python packages can be specified in the
@ -534,6 +534,9 @@ All parameters from `mkDerivation` function are still supported.
* `postShellHook`: Hook to execute commands after `shellHook`.
* `makeWrapperArgs`: A list of strings. Arguments to be passed to `makeWrapper`, which wraps generated binaries. By default, the arguments to `makeWrapper` set `PATH` and `PYTHONPATH` environment variables before calling the binary. Additional arguments here can allow a developer to set environment variables which will be available when the binary is run. For example, `makeWrapperArgs = ["--set FOO BAR" "--set BAZ QUX"]`.
* `installFlags`: A list of strings. Arguments to be passed to `pip install`. To pass options to `python setup.py install`, use `--install-option`. E.g., `installFlags=["--install-option='--cpp_implementation'"].
* `format`: Format of the source. Options are `setup` for when the source has a `setup.py` and `setuptools` is used to build a wheel, and `wheel` in case the source is already a binary wheel. The default value is `setup`.
* `catchConflicts` If `true`, abort package build if a package name appears more than once in dependency tree. Default is `true`.
* `checkInputs` Dependencies needed for running the `checkPhase`. These are added to `buildInputs` when `doCheck = true`.
#### `buildPythonApplication` function
@ -547,7 +550,7 @@ Python environments can be created using the low-level `pkgs.buildEnv` function.
This example shows how to create an environment that has the Pyramid Web Framework.
Saving the following as `default.nix`
with import {};
with import <nixpkgs> {};
python.buildEnv.override {
extraLibs = [ pkgs.pythonPackages.pyramid ];
@ -564,10 +567,10 @@ You can also use the `env` attribute to create local environments with needed
packages installed. This is somewhat comparable to `virtualenv`. For example,
running `nix-shell` with the following `shell.nix`
with import {};
with import <nixpkgs> {};
(python3.buildEnv.override {
extraLibs = with python3Packages; [ numpy requests ];
extraLibs = with python3Packages; [ numpy requests2 ];
}).env
will drop you into a shell where Python will have the
@ -580,6 +583,37 @@ specified packages in its path.
* `postBuild`: Shell command executed after the build of environment.
* `ignoreCollisions`: Ignore file collisions inside the environment (default is `false`).
#### python.withPackages function
The `python.withPackages` function provides a simpler interface to the `python.buildEnv` functionality.
It takes a function as an argument that is passed the set of python packages and returns the list
of the packages to be included in the environment. Using the `withPackages` function, the previous
example for the Pyramid Web Framework environment can be written like this:
with import <nixpkgs> {};
python.withPackages (ps: [ps.pyramid])
`withPackages` passes the correct package set for the specific interpreter version as an
argument to the function. In the above example, `ps` equals `pythonPackages`.
But you can also easily switch to using python3:
with import <nixpkgs> {};
python3.withPackages (ps: [ps.pyramid])
Now, `ps` is set to `python3Packages`, matching the version of the interpreter.
As `python.withPackages` simply uses `python.buildEnv` under the hood, it also supports the `env`
attribute. The `shell.nix` file from the previous section can thus be also written like this:
with import <nixpkgs> {};
(python33.withPackages (ps: [ps.numpy ps.requests2])).env
In contrast to `python.buildEnv`, `python.withPackages` does not support the more advanced options
such as `ignoreCollisions = true` or `postBuild`. If you need them, you have to use `python.buildEnv`.
### Development mode
Development or editable mode is supported. To develop Python packages
@ -590,7 +624,7 @@ Warning: `shellPhase` is executed only if `setup.py` exists.
Given a `default.nix`:
with import {};
with import <nixpkgs> {};
buildPythonPackage { name = "myproject";
@ -599,7 +633,7 @@ Given a `default.nix`:
src = ./.; }
Running `nix-shell` with no arguments should give you
the environment in which the package would be build with
the environment in which the package would be built with
`nix-build`.
Shortcut to setup environments with C headers/libraries and python packages:
@ -619,6 +653,56 @@ community to help save time. No tool is preferred at the moment.
## FAQ
### How can I install a working Python environment?
As explained in the user's guide installing individual Python packages
imperatively with `nix-env -i` or declaratively in `environment.systemPackages`
is not supported. However, it is possible to install a Python environment with packages (`python.buildEnv`).
In the following examples we create an environment with Python 3.5, `numpy` and `ipython`.
As you might imagine there is one limitation here, and that's you can install
only one environment at a time. You will notice the complaints about collisions
when you try to install a second environment.
#### Environment defined in separate `.nix` file
Create a file, e.g. `build.nix`, with the following expression
```nix
with import <nixpkgs> {};
with python35Packages;
python.withPackages (ps: with ps; [ numpy ipython ])
```
and install it in your profile with
```
nix-env -if build.nix
```
Now you can use the Python interpreter, as well as the extra packages that you added to the environment.
#### Environment defined in `~/.nixpkgs/config.nix`
If you prefer to, you could also add the environment as a package override to the Nixpkgs set.
```
packageOverrides = pkgs: with pkgs; with python35Packages; {
myEnv = python.withPackages (ps: with ps; [ numpy ipython ]);
};
```
and install it in your profile with
```
nix-env -iA nixos.blogEnv
```
Note that I'm using the attribute path here.
#### Environment defined in `/etc/nixos/configuration.nix`
For the sake of completeness, here's another example how to install the environment system-wide.
```nix
environment.systemPackages = with pkgs; [
(python35Packages.python.withPackages (ps: callPackage ../packages/common-python-packages.nix { pythonPackages = ps; }))
];
```
### How to solve circular dependencies?
Consider the packages `A` and `B` that depend on each other. When packaging `B`,
@ -632,8 +716,8 @@ Python attribute sets are created for each interpreter version. We will therefor
In the following example we change the name of the package `pandas` to `foo`.
```
newpkgs = pkgs.overridePackages(self: super: rec {
python35Packages = super.python35Packages.override {
self = python35Packages // { pandas = python35Packages.pandas.override{name="foo";};};
python35Packages = (super.python35Packages.override { self = python35Packages;})
// { pandas = super.python35Packages.pandas.override {name = "foo";};
};
});
```
@ -644,13 +728,12 @@ with import <nixpkgs> {};
(let
newpkgs = pkgs.overridePackages(self: super: rec {
python35Packages = super.python35Packages.override {
self = python35Packages // { pandas = python35Packages.pandas.override{name="foo";};};
python35Packages = (super.python35Packages.override { self = python35Packages;})
// { pandas = super.python35Packages.pandas.override {name = "foo";};
};
});
in newpkgs.python35.buildEnv.override{
extraLibs = [newpkgs.python35Packages.blaze ];
}).env
in newpkgs.python35.withPackages (ps: [ps.blaze])
).env
```
A typical use case is to switch to another version of a certain package. For example, in the Nixpkgs repository we have multiple versions of `django` and `scipy`.
In the following example we use a different version of `scipy`. All packages in `newpkgs` will now use the updated `scipy` version.
@ -661,15 +744,49 @@ with import <nixpkgs> {};
newpkgs = pkgs.overridePackages(self: super: rec {
python35Packages = super.python35Packages.override {
self = python35Packages // { scipy = python35Packages.scipy_0_16;};
self = python35Packages // { scipy = python35Packages.scipy_0_17;};
};
});
in pkgs.python35.buildEnv.override{
extraLibs = [newpkgs.python35Packages.blaze ];
}).env
in newpkgs.python35.withPackages (ps: [ps.blaze])
).env
```
The requested package `blaze` depends upon `pandas` which itself depends on `scipy`.
A similar example but now using `django`
```
with import <nixpkgs> {};
(let
newpkgs = pkgs.overridePackages(self: super: rec {
python27Packages = (super.python27Packages.override {self = python27Packages;})
// { django = super.python27Packages.django_1_9; };
});
in newpkgs.python27.withPackages (ps: [ps.django_guardian ])
).env
```
### `python setup.py bdist_wheel` cannot create .whl
Executing `python setup.py bdist_wheel` in a `nix-shell `fails with
```
ValueError: ZIP does not support timestamps before 1980
```
This is because files are included that depend on items in the Nix store which have a timestamp of, that is, it corresponds to January the 1st, 1970 at 00:00:00. And as the error informs you, ZIP does not support that.
The command `bdist_wheel` takes into account `SOURCE_DATE_EPOCH`, and `nix-shell` sets this to 1. By setting it to a value corresponding to 1980 or later, or by unsetting it, it is possible to build wheels.
Use 1980 as timestamp:
```
nix-shell --run "SOURCE_DATE_EPOCH=315532800 python3 setup.py bdist_wheel"
```
or the current time:
```
nix-shell --run "SOURCE_DATE_EPOCH=$(date +%s) python3 setup.py bdist_wheel"
```
or unset:
"""
nix-shell --run "unset SOURCE_DATE_EPOCH; python3 setup.py bdist_wheel"
"""
### `install_data` / `data_files` problems

View File

@ -20,8 +20,6 @@
<xi:include href="package-notes.xml" />
<xi:include href="coding-conventions.xml" />
<xi:include href="submitting-changes.xml" />
<xi:include href="haskell-users-guide.xml" />
<xi:include href="beam-users-guide.xml" />
<xi:include href="contributing.xml" />
</book>

View File

@ -29,15 +29,15 @@
<section><title>Using a split package</title>
<para>In the Nix language the individual outputs can be reached explicitly as attributes, e.g. <varname>coreutils.info</varname>, but the typical case is just using packages as build inputs.</para>
<para>When a multiple-output derivation gets into a build input of another derivation, the first output is added (<varname>.dev</varname> by convention) and also <varname>propagatedBuildOutputs</varname> of that package which by default contain <varname>$outputBin</varname> and <varname>$outputLib</varname>. (See <xref linkend="multiple-output-file-type-groups" />.)</para>
<para>When a multiple-output derivation gets into a build input of another derivation, the <varname>dev</varname> output is added if it exists, otherwise the first output is added. In addition to that, <varname>propagatedBuildOutputs</varname> of that package which by default contain <varname>$outputBin</varname> and <varname>$outputLib</varname> are also added. (See <xref linkend="multiple-output-file-type-groups" />.)</para>
</section>
<section><title>Writing a split derivation</title>
<para>Here you find how to write a derivation that produces multiple outputs.</para>
<para>In nixpkgs there is a framework supporting multiple-output derivations. It tries to cover most cases by default behavior. You can find the source separated in &lt;<filename>nixpkgs/pkgs/build-support/setup-hooks/multiple-outputs.sh</filename>&gt;; it's relatively well-readable. The whole machinery is triggered by defining the <varname>outputs</varname> attribute to contain the list of desired output names (strings).</para>
<programlisting>outputs = [ "dev" "out" "bin" "doc" ];</programlisting>
<para>Often such a single line is enough. For each output an equally named environment variable is passed to the builder and contains the path in nix store for that output. By convention, the first output should usually be <varname>dev</varname>; typically you also want to have the main <varname>out</varname> output, as it catches any files that didn't get elsewhere.</para>
<programlisting>outputs = [ "bin" "dev" "out" "doc" ];</programlisting>
<para>Often such a single line is enough. For each output an equally named environment variable is passed to the builder and contains the path in nix store for that output. By convention, the first output should contain the executable programs provided by the package as that output is used by Nix in string conversions, allowing references to binaries like <literal>${pkgs.perl}/bin/perl</literal> to always work. Typically you also want to have the main <varname>out</varname> output, as it catches any files that didn't get elsewhere.</para>
<note><para>There is a special handling of the <varname>debug</varname> output, described at <xref linkend="stdenv-separateDebugInfo" />.</para></note>
@ -63,7 +63,7 @@
</para></listitem></varlistentry>
<varlistentry><term><varname>
$outputDocdev</varname></term><listitem><para>
is for <emphasis>developer</emphasis> documentation. Currently we count gtk-doc and man3 pages in there. It goes to <varname>docdev</varname> or is removed (!) by default. This is because e.g. gtk-doc tends to be rather large and completely unused by nixpkgs users.
is for <emphasis>developer</emphasis> documentation. Currently we count gtk-doc and man3 pages in there. It goes to <varname>devdoc</varname> or is removed (!) by default. This is because e.g. gtk-doc tends to be rather large and completely unused by nixpkgs users.
</para></listitem></varlistentry>
<varlistentry><term><varname>
$outputMan</varname></term><listitem><para>

View File

@ -1,14 +0,0 @@
Semi-automatic source information updating using "update-upstream-data.sh" script and "src-{,info-}for-*.nix"
1. Recognizing when a pre-existing package uses this mechanism.
Packages using this automatical update mechanism have src-info-for-default.nix and src-for-default.nix next to default.nix. src-info-for-default.nix describes getting the freshest source from upstream web site; src-for-default.nix is a generated file with the current data about used source. Both files define a simple attrSet.
src-info-for-default.nix (for a file grabbed via http) contains at least downloadPage attribute - it is the page we need to look at to find out the latest version. It also contains baseName that is used for automatical generation of package name containing version. It can contain extra data for trickier cases.
src-for-default.nix will contain advertisedUrl (raw URL chosen on the site; its change prompts regeneration of source data), url for fetchurl, hash, version retrieved from the download URL and suggested package name.
2. Updating a package
nixpkgs/pkgs/build-support/upstream-updater directory contains some scripts. The worker script is called update-upstream-data.sh. This script requires main expression name (e.g. default.nix). It can optionally accpet a second parameter, URL which will be used instead of getting one by parsing the downloadPage (version extraction, mirror URL creation etc. will still be run). After running the script, check src-for-default.nix (or replace default.nix with expression name, if there are seceral expressions in the directory) for new version information.

View File

@ -557,8 +557,8 @@ script) if it exists.</para>
<varlistentry>
<term><varname>configureFlags</varname></term>
<listitem><para>Additional arguments passed to the configure
script.</para></listitem>
<listitem><para>A list of strings passed as additional arguments to the
configure script.</para></listitem>
</varlistentry>
<varlistentry>
@ -658,7 +658,7 @@ nothing.</para>
<varlistentry>
<term><varname>makeFlags</varname></term>
<listitem><para>Additional flags passed to
<listitem><para>A list of strings passed as additional flags to
<command>make</command>. These flags are also used by the default
install and check phase. For setting make flags specific to the
build phase, use <varname>buildFlags</varname> (see
@ -685,7 +685,7 @@ makeFlagsArray=(CFLAGS="-O0 -g" LDFLAGS="-lfoo -lbar")
<varlistentry>
<term><varname>buildFlags</varname> / <varname>buildFlagsArray</varname></term>
<listitem><para>Additional flags passed to
<listitem><para>A list of strings passed as additional flags to
<command>make</command>. Like <varname>makeFlags</varname> and
<varname>makeFlagsArray</varname>, but only used by the build
phase.</para></listitem>
@ -753,7 +753,7 @@ doCheck = true;</programlisting>
<varlistentry>
<term><varname>checkFlags</varname> / <varname>checkFlagsArray</varname></term>
<listitem><para>Additional flags passed to
<listitem><para>A list of strings passed as additional flags to
<command>make</command>. Like <varname>makeFlags</varname> and
<varname>makeFlagsArray</varname>, but only used by the check
phase.</para></listitem>
@ -808,7 +808,7 @@ installTargets = "install-bin install-doc";</programlisting>
<varlistentry>
<term><varname>installFlags</varname> / <varname>installFlagsArray</varname></term>
<listitem><para>Additional flags passed to
<listitem><para>A list of strings passed as additional flags to
<command>make</command>. Like <varname>makeFlags</varname> and
<varname>makeFlagsArray</varname>, but only used by the install
phase.</para></listitem>
@ -988,6 +988,41 @@ set debug-file-directory ~/.nix-profile/lib/debug
</section>
<section xml:id="ssec-installCheck-phase"><title>The installCheck phase</title>
<para>The installCheck phase checks whether the package was installed
correctly by running its test suite against the installed directories.
The default <function>installCheck</function> calls <command>make
installcheck</command>.</para>
<variablelist>
<title>Variables controlling the installCheck phase</title>
<varlistentry>
<term><varname>doInstallCheck</varname></term>
<listitem><para>If set to a non-empty string, the installCheck phase is
executed, otherwise it is skipped (default). Thus you should set
<programlisting>doInstallCheck = true;</programlisting>
in the derivation to enable install checks.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>preInstallCheck</varname></term>
<listitem><para>Hook executed at the start of the installCheck
phase.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>postInstallCheck</varname></term>
<listitem><para>Hook executed at the end of the installCheck
phase.</para></listitem>
</varlistentry>
</variablelist>
</section>
<section xml:id="ssec-distribution-phase"><title>The distribution
phase</title>
@ -1196,10 +1231,24 @@ echo @foo@
<term><function>stripHash</function>
<replaceable>path</replaceable></term>
<listitem><para>Strips the directory and hash part of a store
path, and prints (on standard output) only the name part. For
instance, <literal>stripHash
/nix/store/68afga4khv0w...-coreutils-6.12</literal> print
<literal>coreutils-6.12</literal>.</para></listitem>
path, storing the name part in the environment variable
<literal>strippedName</literal>. For example:
<programlisting>
stripHash "/nix/store/9s9r019176g7cvn2nvcw41gsp862y6b4-coreutils-8.24"
# prints coreutils-8.24
echo $strippedName
</programlisting>
If you wish to store the result in another variable, then the
following idiom may be useful:
<programlisting>
name="/nix/store/9s9r019176g7cvn2nvcw41gsp862y6b4-coreutils-8.24"
someVar=$(stripHash $name; echo $strippedName)
</programlisting>
</para></listitem>
</varlistentry>
@ -1305,6 +1354,25 @@ echo @foo@
<envar>GST_PLUGIN_SYSTEM_PATH</envar> environment variable.</para></listitem>
</varlistentry>
<varlistentry>
<term>paxctl</term>
<listitem><para>Defines the <varname>paxmark</varname> helper for
setting per-executable PaX flags on Linux (where it is available by
default; on all other platforms, <varname>paxmark</varname> is a no-op).
For example, to disable secure memory protections on the executable
<replaceable>foo</replaceable>:
<programlisting>
postFixup = ''
paxmark m $out/bin/<replaceable>foo</replaceable>
'';
</programlisting>
The <literal>m</literal> flag is the most common flag and is typically
required for applications that employ JIT compilation or otherwise need to
execute code generated at run-time. Disabling PaX protections should be
considered a last resort: if possible, problematic features should be
disabled or patched to work with PaX.</para></listitem>
</varlistentry>
</variablelist>
</para>
@ -1327,6 +1395,209 @@ in the default system locations.</para>
</section>
<section xml:id="sec-hardening-in-nixpkgs"><title>Hardening in Nixpkgs</title>
<para>There are flags available to harden packages at compile or link-time.
These can be toggled using the <varname>stdenv.mkDerivation</varname> parameters
<varname>hardeningDisable</varname> and <varname>hardeningEnable</varname>.
</para>
<para>The following flags are enabled by default and might require disabling
if the program to package is incompatible.
</para>
<variablelist>
<varlistentry>
<term><varname>format</varname></term>
<listitem><para>Adds the <option>-Wformat -Wformat-security
-Werror=format-security</option> compiler options. At present,
this warns about calls to <varname>printf</varname> and
<varname>scanf</varname> functions where the format string is
not a string literal and there are no format arguments, as in
<literal>printf(foo);</literal>. This may be a security hole
if the format string came from untrusted input and contains
<literal>%n</literal>.</para>
<para>This needs to be turned off or fixed for errors similar to:</para>
<programlisting>
/tmp/nix-build-zynaddsubfx-2.5.2.drv-0/zynaddsubfx-2.5.2/src/UI/guimain.cpp:571:28: error: format not a string literal and no format arguments [-Werror=format-security]
printf(help_message);
^
cc1plus: some warnings being treated as errors
</programlisting></listitem>
</varlistentry>
<varlistentry>
<term><varname>stackprotector</varname></term>
<listitem>
<para>Adds the <option>-fstack-protector-strong
--param ssp-buffer-size=4</option>
compiler options. This adds safety checks against stack overwrites
rendering many potential code injection attacks into aborting situations.
In the best case this turns code injection vulnerabilities into denial
of service or into non-issues (depending on the application).</para>
<para>This needs to be turned off or fixed for errors similar to:</para>
<programlisting>
bin/blib.a(bios_console.o): In function `bios_handle_cup':
/tmp/nix-build-ipxe-20141124-5cbdc41.drv-0/ipxe-5cbdc41/src/arch/i386/firmware/pcbios/bios_console.c:86: undefined reference to `__stack_chk_fail'
</programlisting></listitem>
</varlistentry>
<varlistentry>
<term><varname>fortify</varname></term>
<listitem>
<para>Adds the <option>-O2 -D_FORTIFY_SOURCE=2</option> compiler
options. During code generation the compiler knows a great deal of
information about buffer sizes (where possible), and attempts to replace
insecure unlimited length buffer function calls with length-limited ones.
This is especially useful for old, crufty code. Additionally, format
strings in writable memory that contain '%n' are blocked. If an application
depends on such a format string, it will need to be worked around.
</para>
<para>Addtionally, some warnings are enabled which might trigger build
failures if compiler warnings are treated as errors in the package build.
In this case, set <option>NIX_CFLAGS_COMPILE</option> to
<option>-Wno-error=warning-type</option>.</para>
<para>This needs to be turned off or fixed for errors similar to:</para>
<programlisting>
malloc.c:404:15: error: return type is an incomplete type
malloc.c:410:19: error: storage size of 'ms' isn't known
</programlisting>
<programlisting>
strdup.h:22:1: error: expected identifier or '(' before '__extension__'
</programlisting>
<programlisting>
strsep.c:65:23: error: register name not specified for 'delim'
</programlisting>
<programlisting>
installwatch.c:3751:5: error: conflicting types for '__open_2'
</programlisting>
<programlisting>
fcntl2.h:50:4: error: call to '__open_missing_mode' declared with attribute error: open with O_CREAT or O_TMPFILE in second argument needs 3 arguments
</programlisting>
</listitem>
</varlistentry>
<varlistentry>
<term><varname>pic</varname></term>
<listitem>
<para>Adds the <option>-fPIC</option> compiler options. This options adds
support for position independant code in shared libraries and thus making
ASLR possible.</para>
<para>Most notably, the Linux kernel, kernel modules and other code
not running in an operating system environment like boot loaders won't
build with PIC enabled. The compiler will is most cases complain that
PIC is not supported for a specific build.
</para>
<para>This needs to be turned off or fixed for assembler errors similar to:</para>
<programlisting>
ccbLfRgg.s: Assembler messages:
ccbLfRgg.s:33: Error: missing or invalid displacement expression `private_key_len@GOTOFF'
</programlisting>
</listitem>
</varlistentry>
<varlistentry>
<term><varname>strictoverflow</varname></term>
<listitem>
<para>Signed integer overflow is undefined behaviour according to the C
standard. If it happens, it is an error in the program as it should check
for overflow before it can happen, not afterwards. GCC provides built-in
functions to perform arithmetic with overflow checking, which are correct
and faster than any custom implementation. As a workaround, the option
<option>-fno-strict-overflow</option> makes gcc behave as if signed
integer overflows were defined.
</para>
<para>This flag should not trigger any build or runtime errors.</para>
</listitem>
</varlistentry>
<varlistentry>
<term><varname>relro</varname></term>
<listitem>
<para>Adds the <option>-z relro</option> linker option. During program
load, several ELF memory sections need to be written to by the linker,
but can be turned read-only before turning over control to the program.
This prevents some GOT (and .dtors) overwrite attacks, but at least the
part of the GOT used by the dynamic linker (.got.plt) is still vulnerable.
</para>
<para>This flag can break dynamic shared object loading. For instance, the
module systems of Xorg and OpenCV are incompatible with this flag. In almost
all cases the <varname>bindnow</varname> flag must also be disabled and
incompatible programs typically fail with similar errors at runtime.</para>
</listitem>
</varlistentry>
<varlistentry>
<term><varname>bindnow</varname></term>
<listitem>
<para>Adds the <option>-z bindnow</option> linker option. During program
load, all dynamic symbols are resolved, allowing for the complete GOT to
be marked read-only (due to <varname>relro</varname>). This prevents GOT
overwrite attacks. For very large applications, this can incur some
performance loss during initial load while symbols are resolved, but this
shouldn't be an issue for daemons.
</para>
<para>This flag can break dynamic shared object loading. For instance, the
module systems of Xorg and PHP are incompatible with this flag. Programs
incompatible with this flag often fail at runtime due to missing symbols,
like:</para>
<programlisting>
intel_drv.so: undefined symbol: vgaHWFreeHWRec
</programlisting>
</listitem>
</varlistentry>
</variablelist>
<para>The following flags are disabled by default and should be enabled
for packages that take untrusted input, like network services.
</para>
<variablelist>
<varlistentry>
<term><varname>pie</varname></term>
<listitem>
<para>Adds the <option>-fPIE</option> compiler and <option>-pie</option>
linker options. Position Independent Executables are needed to take
advantage of Address Space Layout Randomization, supported by modern
kernel versions. While ASLR can already be enforced for data areas in
the stack and heap (brk and mmap), the code areas must be compiled as
position-independent. Shared libraries already do this with the
<varname>pic</varname> flag, so they gain ASLR automatically, but binary
.text regions need to be build with <varname>pie</varname> to gain ASLR.
When this happens, ROP attacks are much harder since there are no static
locations to bounce off of during a memory corruption attack.
</para>
</listitem>
</varlistentry>
</variablelist>
<para>For more in-depth information on these hardening flags and hardening in
general, refer to the
<link xlink:href="https://wiki.debian.org/Hardening">Debian Wiki</link>,
<link xlink:href="https://wiki.ubuntu.com/Security/Features">Ubuntu Wiki</link>,
<link xlink:href="https://wiki.gentoo.org/wiki/Project:Hardened">Gentoo Wiki</link>,
and the <link xlink:href="https://wiki.archlinux.org/index.php/DeveloperWiki:Security">
Arch Wiki</link>.
</para>
</section>
</chapter>

View File

@ -296,12 +296,17 @@ rec {
/* Converts a store path to a fake derivation. */
toDerivation = path:
let path' = builtins.storePath path; in
{ type = "derivation";
name = builtins.unsafeDiscardStringContext (builtins.substring 33 (-1) (baseNameOf path'));
outPath = path';
outputs = [ "out" ];
};
let
path' = builtins.storePath path;
res =
{ type = "derivation";
name = builtins.unsafeDiscardStringContext (builtins.substring 33 (-1) (baseNameOf path'));
outPath = path';
outputs = [ "out" ];
out = res;
outputName = "out";
};
in res;
/* If `cond' is true, return the attribute set `as',
@ -438,28 +443,27 @@ rec {
overrideExisting = old: new:
old // listToAttrs (map (attr: nameValuePair attr (attrByPath [attr] old.${attr} new)) (attrNames old));
/* Try given attributes in order. If no attributes are found, return
attribute list itself.
/* Get a package output.
If no output is found, fallback to `.out` and then to the default.
Example:
tryAttrs ["a" "b"] { a = 1; b = 2; }
=> 1
tryAttrs ["a" "b"] { c = 3; }
=> { c = 3; }
getOutput "dev" pkgs.openssl
=> "/nix/store/9rz8gxhzf8sw4kf2j2f1grr49w8zx5vj-openssl-1.0.1r-dev"
*/
tryAttrs = allAttrs: set:
let tryAttrs_ = attrs:
if attrs == [] then set
else
(let h = head attrs; in
if hasAttr h set then getAttr h set
else tryAttrs_ (tail attrs));
in tryAttrs_ allAttrs;
getOutput = output: pkg:
if pkg.outputUnspecified or false
then pkg.${output} or pkg.out or pkg
else pkg;
getBin = getOutput "bin";
getLib = getOutput "lib";
getDev = getOutput "dev";
/* Pick the outputs of packages to place in buildInputs */
chooseDevOutputs = drvs: builtins.map getDev drvs;
/*** deprecated stuff ***/
deepSeqAttrs = throw "removed 2016-02-29 because unused and broken";
zipWithNames = zipAttrsWithNames;
zip = builtins.trace
"lib.zip is deprecated, use lib.zipAttrsWith instead" zipAttrsWith;

View File

@ -50,7 +50,7 @@ let inherit (lib) nv nvs; in
# nice features:
# declaring "optional featuers" is modular. For instance:
# flags.curl = {
# configureFlags = ["--with-curl=${curl}" "--with-curlwrappers"];
# configureFlags = ["--with-curl=${curl.dev}" "--with-curlwrappers"];
# buildInputs = [curl openssl];
# };
# flags.other = { .. }

View File

@ -19,6 +19,10 @@ rec {
traceXMLVal = x: trace (builtins.toXML x) x;
traceXMLValMarked = str: x: trace (str + builtins.toXML x) x;
# strict trace functions (traced structure is fully evaluated and printed)
traceSeq = x: y: trace (builtins.deepSeq x x) y;
traceValSeq = v: traceVal (builtins.deepSeq v v);
# this can help debug your code as well - designed to not produce thousands of lines
traceShowVal = x : trace (showVal x) x;
traceShowValMarked = str: x: trace (str + showVal x) x;
@ -69,27 +73,9 @@ rec {
# usage: { testX = allTrue [ true ]; }
testAllTrue = expr : { inherit expr; expected = map (x: true) expr; };
# evaluate everything once so that errors will occur earlier
# hacky: traverse attrs by adding a dummy
# ignores functions (should this behavior change?) See strictf
#
# Note: This should be a primop! Something like seq of haskell would be nice to
# have as well. It's used fore debugging only anyway
strict = x :
let
traverse = x :
if isString x then true
else if isAttrs x then
if x ? outPath then true
else all id (mapAttrsFlatten (n: traverse) x)
else if isList x then
all id (map traverse x)
else if isBool x then true
else if isFunction x then true
else if isInt x then true
else if x == null then true
else true; # a (store) path?
in if traverse x then x else throw "else never reached";
strict = v:
trace "Warning: strict is deprecated and will be removed in the next release"
(builtins.seq v v);
# example: (traceCallXml "myfun" id 3) will output something like
# calling myfun arg 1: 3 result: 3

View File

@ -1,27 +1,46 @@
let
let
# trivial, often used functions
trivial = import ./trivial.nix;
# datatypes
attrsets = import ./attrsets.nix;
lists = import ./lists.nix;
strings = import ./strings.nix;
stringsWithDeps = import ./strings-with-deps.nix;
attrsets = import ./attrsets.nix;
# packaging
customisation = import ./customisation.nix;
maintainers = import ./maintainers.nix;
meta = import ./meta.nix;
sources = import ./sources.nix;
# module system
modules = import ./modules.nix;
options = import ./options.nix;
types = import ./types.nix;
meta = import ./meta.nix;
debug = import ./debug.nix;
misc = import ./deprecated.nix;
maintainers = import ./maintainers.nix;
# constants
licenses = import ./licenses.nix;
platforms = import ./platforms.nix;
systems = import ./systems.nix;
customisation = import ./customisation.nix;
licenses = import ./licenses.nix;
# misc
debug = import ./debug.nix;
misc = import ./deprecated.nix;
# domain-specific
sandbox = import ./sandbox.nix;
fetchers = import ./fetchers.nix;
in
{ inherit trivial lists strings stringsWithDeps attrsets sources options
modules types meta debug maintainers licenses platforms systems sandbox;
{ inherit trivial
attrsets lists strings stringsWithDeps
customisation maintainers meta sources
modules options types
licenses platforms systems
debug misc
sandbox fetchers;
}
# !!! don't include everything at top-level; perhaps only the most
# commonly used functions.

12
lib/fetchers.nix Normal file
View File

@ -0,0 +1,12 @@
# snippets that can be shared by mutliple fetchers (pkgs/build-support)
{
proxyImpureEnvVars = [
# We borrow these environment variables from the caller to allow
# easy proxy configuration. This is impure, but a fixed-output
# derivation like fetchurl is allowed to do so since its result is
# by definition pure.
"http_proxy" "https_proxy" "ftp_proxy" "all_proxy" "no_proxy"
];
}

View File

@ -65,6 +65,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = "Boost Software License 1.0";
};
beerware = spdx {
spdxId = "Beerware";
fullName = ''Beerware License'';
};
bsd2 = spdx {
spdxId = "BSD-2-Clause";
fullName = ''BSD 2-clause "Simplified" License'';
@ -188,13 +193,24 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fdl13 = spdx {
spdxId = "GFDL-1.3";
fullName = "GNU Free Documentation License v1.2";
fullName = "GNU Free Documentation License v1.3";
};
free = {
fullName = "Unspecified free software license";
};
g4sl = {
fullName = "Geant4 Software License";
url = https://geant4.web.cern.ch/geant4/license/LICENSE.html;
};
geogebra = {
fullName = "GeoGebra Non-Commercial License Agreement";
url = https://www.geogebra.org/license;
free = false;
};
gpl1 = spdx {
spdxId = "GPL-1.0";
fullName = "GNU General Public License v1.0 only";
@ -459,6 +475,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = "The Unlicense";
};
upl = {
fullName = "Universal Permissive License";
url = "https://oss.oracle.com/licenses/upl/";
};
vim = spdx {
spdxId = "Vim";
fullName = "Vim License";

View File

@ -24,7 +24,7 @@ rec {
Example:
concat = fold (a: b: a + b) "z"
concat [ "a" "b" "c" ]
=> "abcnul"
=> "abcz"
*/
fold = op: nul: list:
let
@ -68,18 +68,7 @@ rec {
imap (i: v: "${v}-${toString i}") ["a" "b"]
=> [ "a-1" "b-2" ]
*/
imap =
if builtins ? genList then
f: list: genList (n: f (n + 1) (elemAt list n)) (length list)
else
f: list:
let
len = length list;
imap' = n:
if n == len
then []
else [ (f (n + 1) (elemAt list n)) ] ++ imap' (n + 1);
in imap' 0;
imap = f: list: genList (n: f (n + 1) (elemAt list n)) (length list);
/* Map and concatenate the result.
@ -100,7 +89,7 @@ rec {
*/
flatten = x:
if isList x
then foldl' (x: y: x ++ (flatten y)) [] x
then concatMap (y: flatten y) x
else [x];
/* Remove elements equal to 'e' from a list. Useful for buildInputs.
@ -216,17 +205,11 @@ rec {
range 3 2
=> [ ]
*/
range =
if builtins ? genList then
first: last:
if first > last
then []
else genList (n: first + n) (last - first + 1)
range = first: last:
if first > last then
[]
else
first: last:
if last < first
then []
else [first] ++ range (first + 1) last;
genList (n: first + n) (last - first + 1);
/* Splits the elements of a list in two lists, `right' and
`wrong', depending on the evaluation of a predicate.
@ -235,12 +218,12 @@ rec {
partition (x: x > 2) [ 5 1 2 3 4 ]
=> { right = [ 5 3 4 ]; wrong = [ 1 2 ]; }
*/
partition = pred:
partition = builtins.partition or (pred:
fold (h: t:
if pred h
then { right = [h] ++ t.right; wrong = t.wrong; }
else { right = t.right; wrong = [h] ++ t.wrong; }
) { right = []; wrong = []; };
) { right = []; wrong = []; });
/* Merges two lists of the same size together. If the sizes aren't the same
the merging stops at the shortest. How both lists are merged is defined
@ -250,19 +233,9 @@ rec {
zipListsWith (a: b: a + b) ["h" "l"] ["e" "o"]
=> ["he" "lo"]
*/
zipListsWith =
if builtins ? genList then
f: fst: snd: genList (n: f (elemAt fst n) (elemAt snd n)) (min (length fst) (length snd))
else
f: fst: snd:
let
len = min (length fst) (length snd);
zipListsWith' = n:
if n != len then
[ (f (elemAt fst n) (elemAt snd n)) ]
++ zipListsWith' (n + 1)
else [];
in zipListsWith' 0;
zipListsWith = f: fst: snd:
genList
(n: f (elemAt fst n) (elemAt snd n)) (min (length fst) (length snd));
/* Merges two lists of the same size together. If the sizes aren't the same
the merging stops at the shortest.
@ -280,11 +253,88 @@ rec {
reverseList [ "b" "o" "j" ]
=> [ "j" "o" "b" ]
*/
reverseList =
if builtins ? genList then
xs: let l = length xs; in genList (n: elemAt xs (l - n - 1)) l
else
fold (e: acc: acc ++ [ e ]) [];
reverseList = xs:
let l = length xs; in genList (n: elemAt xs (l - n - 1)) l;
/* Depth-First Search (DFS) for lists `list != []`.
`before a b == true` means that `b` depends on `a` (there's an
edge from `b` to `a`).
Examples:
listDfs true hasPrefix [ "/home/user" "other" "/" "/home" ]
== { minimal = "/"; # minimal element
visited = [ "/home/user" ]; # seen elements (in reverse order)
rest = [ "/home" "other" ]; # everything else
}
listDfs true hasPrefix [ "/home/user" "other" "/" "/home" "/" ]
== { cycle = "/"; # cycle encountered at this element
loops = [ "/" ]; # and continues to these elements
visited = [ "/" "/home/user" ]; # elements leading to the cycle (in reverse order)
rest = [ "/home" "other" ]; # everything else
*/
listDfs = stopOnCycles: before: list:
let
dfs' = us: visited: rest:
let
c = filter (x: before x us) visited;
b = partition (x: before x us) rest;
in if stopOnCycles && (length c > 0)
then { cycle = us; loops = c; inherit visited rest; }
else if length b.right == 0
then # nothing is before us
{ minimal = us; inherit visited rest; }
else # grab the first one before us and continue
dfs' (head b.right)
([ us ] ++ visited)
(tail b.right ++ b.wrong);
in dfs' (head list) [] (tail list);
/* Sort a list based on a partial ordering using DFS. This
implementation is O(N^2), if your ordering is linear, use `sort`
instead.
`before a b == true` means that `b` should be after `a`
in the result.
Examples:
toposort hasPrefix [ "/home/user" "other" "/" "/home" ]
== { result = [ "/" "/home" "/home/user" "other" ]; }
toposort hasPrefix [ "/home/user" "other" "/" "/home" "/" ]
== { cycle = [ "/home/user" "/" "/" ]; # path leading to a cycle
loops = [ "/" ]; } # loops back to these elements
toposort hasPrefix [ "other" "/home/user" "/home" "/" ]
== { result = [ "other" "/" "/home" "/home/user" ]; }
toposort (a: b: a < b) [ 3 2 1 ] == { result = [ 1 2 3 ]; }
*/
toposort = before: list:
let
dfsthis = listDfs true before list;
toporest = toposort before (dfsthis.visited ++ dfsthis.rest);
in
if length list < 2
then # finish
{ result = list; }
else if dfsthis ? "cycle"
then # there's a cycle, starting from the current vertex, return it
{ cycle = reverseList ([ dfsthis.cycle ] ++ dfsthis.visited);
inherit (dfsthis) loops; }
else if toporest ? "cycle"
then # there's a cycle somewhere else in the graph, return it
toporest
# Slow, but short. Can be made a bit faster with an explicit stack.
else # there are no cycles
{ result = [ dfsthis.minimal ] ++ toporest.result; };
/* Sort a list based on a comparator function which compares two
elements and returns true if the first argument is strictly below
@ -320,19 +370,7 @@ rec {
take 2 [ ]
=> [ ]
*/
take =
if builtins ? genList then
count: sublist 0 count
else
count: list:
let
len = length list;
take' = n:
if n == len || n == count
then []
else
[ (elemAt list n) ] ++ take' (n + 1);
in take' 0;
take = count: sublist 0 count;
/* Remove the first (at most) N elements of a list.
@ -342,19 +380,7 @@ rec {
drop 2 [ ]
=> [ ]
*/
drop =
if builtins ? genList then
count: list: sublist count (length list) list
else
count: list:
let
len = length list;
drop' = n:
if n == -1 || n < count
then []
else
drop' (n - 1) ++ [ (elemAt list n) ];
in drop' (len - 1);
drop = count: list: sublist count (length list) list;
/* Return a list consisting of at most count elements of list,
starting at index start.
@ -428,8 +454,4 @@ rec {
*/
subtractLists = e: filter (x: !(elem x e));
/*** deprecated stuff ***/
deepSeqList = throw "removed 2016-02-29 because unused and broken";
}

View File

@ -11,10 +11,13 @@
abaldeau = "Andreas Baldeau <andreas@baldeau.net>";
abbradar = "Nikolay Amiantov <ab@fmap.me>";
aboseley = "Adam Boseley <adam.boseley@gmail.com>";
abuibrahim = "Ruslan Babayev <ruslan@babayev.com>";
acowley = "Anthony Cowley <acowley@gmail.com>";
adev = "Adrien Devresse <adev@adev.name>";
Adjective-Object = "Maxwell Huang-Hobbs <mhuan13@gmail.com>";
aespinosa = "Allan Espinosa <allan.espinosa@outlook.com>";
adnelson = "Allen Nelson <ithinkican@gmail.com>";
adolfogc = "Adolfo E. García Castro <adolfo.garcia.cr@gmail.com>";
aespinosa = "Allan Espinosa <allan.espinosa@outlook.com>";
aflatter = "Alexander Flatter <flatter@fastmail.fm>";
aforemny = "Alexander Foremny <alexanderforemny@googlemail.com>";
afranchuk = "Alex Franchuk <alex.franchuk@gmail.com>";
@ -31,18 +34,23 @@
anderspapitto = "Anders Papitto <anderspapitto@gmail.com>";
andres = "Andres Loeh <ksnixos@andres-loeh.de>";
andrewrk = "Andrew Kelley <superjoe30@gmail.com>";
andsild = "Anders Sildnes <andsild@gmail.com>";
aneeshusa = "Aneesh Agrawal <aneeshusa@gmail.com>";
antono = "Antono Vasiljev <self@antono.info>";
apeyroux = "Alexandre Peyroux <alex@px.io>";
ardumont = "Antoine R. Dumont <eniotna.t@gmail.com>";
aristid = "Aristid Breitkreuz <aristidb@gmail.com>";
arobyn = "Alexei Robyn <shados@shados.net>";
artuuge = "Artur E. Ruuge <artuuge@gmail.com>";
ashalkhakov = "Artyom Shalkhakov <artyom.shalkhakov@gmail.com>";
aske = "Kirill Boltaev <aske@fmap.me>";
asppsa = "Alastair Pharo <asppsa@gmail.com>";
astsmtl = "Alexander Tsamutali <astsmtl@yandex.ru>";
aszlig = "aszlig <aszlig@redmoonstudios.org>";
auntie = "Jonathan Glines <auntieNeo@gmail.com>";
avnik = "Alexander V. Nikolaev <avn@avnik.info>";
aycanirican = "Aycan iRiCAN <iricanaycan@gmail.com>";
bachp = "Pascal Bach <pascal.bach@nextrem.ch>";
badi = "Badi' Abdul-Wahid <abdulwahidc@gmail.com>";
balajisivaraman = "Balaji Sivaraman<sivaraman.balaji@gmail.com>";
Baughn = "Svein Ove Aas <sveina@gmail.com>";
@ -69,13 +77,19 @@
c0dehero = "CodeHero <codehero@nerdpol.ch>";
calrama = "Moritz Maxeiner <moritz@ucworks.org>";
campadrenalin = "Philip Horger <campadrenalin@gmail.com>";
carlsverre = "Carl Sverre <accounts@carlsverre.com>";
cdepillabout = "Dennis Gosnell <cdep.illabout@gmail.com>";
cfouche = "Chaddaï Fouché <chaddai.fouche@gmail.com>";
chaoflow = "Florian Friesdorf <flo@chaoflow.net>";
chattered = "Phil Scott <me@philscotted.com>";
choochootrain = "Hurshal Patel <hurshal@imap.cc>";
chris-martin = "Chris Martin <ch.martin@gmail.com>";
chrisjefferson = "Christopher Jefferson <chris@bubblescope.net>";
christopherpoole = "Christopher Mark Poole <mail@christopherpoole.net>";
cko = "Christine Koppelt <christine.koppelt@gmail.com>";
cleverca22 = "Michael Bishop <cleverca22@gmail.com>";
cmcdragonkai = "Roger Qiu <roger.qiu@matrix.ai>";
cmfwyp = "cmfwyp <cmfwyp@riseup.net>";
coconnor = "Corey O'Connor <coreyoconnor@gmail.com>";
codsl = "codsl <codsl@riseup.net>";
codyopel = "Cody Opel <codyopel@gmail.com>";
@ -84,14 +98,17 @@
coroa = "Jonas Hörsch <jonas@chaoflow.net>";
couchemar = "Andrey Pavlov <couchemar@yandex.ru>";
cransom = "Casey Ransom <cransom@hubns.net>";
cryptix = "Henry Bubert <cryptix@riseup.net>";
CrystalGamma = "Jona Stubbe <nixos@crystalgamma.de>";
cstrahan = "Charles Strahan <charles.c.strahan@gmail.com>";
cstrahan = "Charles Strahan <charles@cstrahan.com>";
cwoac = "Oliver Matthews <oliver@codersoffortune.net>";
DamienCassou = "Damien Cassou <damien@cassou.me>";
dasuxullebt = "Christoph-Simon Senjak <christoph.senjak@googlemail.com>";
danbst = "Danylo Hlynskyi <abcz2.uprola@gmail.com>";
davidak = "David Kleuker <post@davidak.de>";
davidrusu = "David Rusu <davidrusu.me@gmail.com>";
dbohdan = "Danyil Bohdan <danyil.bohdan@gmail.com>";
dbrock = "Daniel Brockman <daniel@brockman.se>";
deepfire = "Kosyrev Serge <_deepfire@feelingofgreen.ru>";
demin-dmitriy = "Dmitriy Demin <demindf@gmail.com>";
DerGuteMoritz = "Moritz Heidkamp <moritz@twoticketsplease.de>";
@ -103,6 +120,7 @@
dmalikov = "Dmitry Malikov <malikov.d.y@gmail.com>";
dochang = "Desmond O. Chang <dochang@gmail.com>";
doublec = "Chris Double <chris.double@double.co.nz>";
drets = "Dmytro Rets <dmitryrets@gmail.com>";
drewkett = "Andrew Burkett <burkett.andrew@gmail.com>";
ebzzry = "Rommel Martinez <ebzzry@gmail.com>";
ederoyd46 = "Matthew Brown <matt@ederoyd.co.uk>";
@ -119,7 +137,8 @@
ericbmerritt = "Eric Merritt <eric@afiniate.com>";
ericsagnes = "Eric Sagnes <eric.sagnes@gmail.com>";
erikryb = "Erik Rybakken <erik.rybakken@math.ntnu.no>";
ertes = "Ertugrul Söylemez <ertesx@gmx.de>";
ertes = "Ertugrul Söylemez <esz@posteo.de>";
ethercrow = "Dmitry Ivanov <ethercrow@gmail.com>";
exi = "Reno Reckling <nixos@reckling.org>";
exlevan = "Alexey Levan <exlevan@gmail.com>";
expipiplus1 = "Joe Hermaszewski <nix@monoid.al>";
@ -127,6 +146,7 @@
falsifian = "James Cook <james.cook@utoronto.ca>";
flosse = "Markus Kohlhase <mail@markus-kohlhase.de>";
fluffynukeit = "Daniel Austin <dan@fluffynukeit.com>";
fmthoma = "Franz Thoma <f.m.thoma@googlemail.com>";
forkk = "Andrew Okin <forkk@forkk.net>";
fornever = "Friedrich von Never <friedrich@fornever.me>";
fpletz = "Franz Pletz <fpletz@fnordicwalking.de>";
@ -147,20 +167,24 @@
giogadi = "Luis G. Torres <lgtorres42@gmail.com>";
gleber = "Gleb Peregud <gleber.p@gmail.com>";
globin = "Robin Gloster <mail@glob.in>";
gpyh = "Yacine Hmito <yacine.hmito@gmail.com>";
gnidorah = "Alex Ivanov <yourbestfriend@opmbx.org>";
goibhniu = "Cillian de Róiste <cillian.deroiste@gmail.com>";
Gonzih = "Max Gonzih <gonzih@gmail.com>";
gpyh = "Yacine Hmito <yacine.hmito@gmail.com>";
grahamc = "Graham Christensen <graham@grahamc.com>";
gridaphobe = "Eric Seidel <eric@seidel.io>";
guibert = "David Guibert <david.guibert@gmail.com>";
hakuch = "Jesse Haber-Kucharsky <hakuch@gmail.com>";
havvy = "Ryan Scheel <ryan.havvy@gmail.com>";
hbunke = "Hendrik Bunke <bunke.hendrik@gmail.com>";
hce = "Hans-Christian Esperer <hc@hcesperer.org>";
henrytill = "Henry Till <henrytill@gmail.com>";
hiberno = "Christian Lask <hiberno@hiberno.net>";
hinton = "Tom Hinton <t@larkery.com>";
hrdinka = "Christoph Hrdinka <c.nix@hrdinka.at>";
iand675 = "Ian Duncan <ian@iankduncan.com>";
ianwookim = "Ian-Woo Kim <ianwookim@gmail.com>";
iElectric = "Domen Kozar <domen@dev.si>";
domenkozar = "Domen Kozar <domen@dev.si>";
igsha = "Igor Sharonov <igor.sharonov@gmail.com>";
ikervagyok = "Balázs Lengyel <ikervagyok@gmail.com>";
j-keck = "Jürgen Keck <jhyphenkeck@gmail.com>";
@ -177,8 +201,11 @@
joamaki = "Jussi Maki <joamaki@gmail.com>";
joelmo = "Joel Moberg <joel.moberg@gmail.com>";
joelteon = "Joel Taylor <me@joelt.io>";
joko = "Ioannis Koutras <ioannis.koutras@gmail.com>";
jonafato = "Jon Banafato <jon@jonafato.com>";
jpbernardy = "Jean-Philippe Bernardy <jeanphilippe.bernardy@gmail.com>";
jraygauthier = "Raymond Gauthier <jraygauthier@gmail.com>";
juliendehos = "Julien Dehos <dehos@lisic.univ-littoral.fr>";
jwiegley = "John Wiegley <johnw@newartisans.com>";
jwilberding = "Jordan Wilberding <jwilberding@afiniate.com>";
jzellner = "Jeff Zellner <jeffz@eml.cc>";
@ -204,9 +231,11 @@
linquize = "Linquize <linquize@yahoo.com.hk>";
linus = "Linus Arver <linusarver@gmail.com>";
lnl7 = "Daiderd Jordan <daiderd@gmail.com>";
loskutov = "Ignat Loskutov <ignat.loskutov@gmail.com>";
lovek323 = "Jason O'Conal <jason@oconal.id.au>";
lowfatcomputing = "Andreas Wagner <andreas.wagner@lowfatcomputing.org>";
lsix = "Lancelot SIX <lsix@lancelotsix.com>";
lucas8 = "Luc Chabassier <luc.linux@mailoo.org>";
ludo = "Ludovic Courtès <ludo@gnu.org>";
luispedro = "Luis Pedro Coelho <luis@luispedro.org>";
lukasepple = "Lukas Epple <post@lukasepple.de>";
@ -221,28 +250,37 @@
marcweber = "Marc Weber <marco-oweber@gmx.de>";
markus1189 = "Markus Hauck <markus1189@gmail.com>";
markWot = "Markus Wotringer <markus@wotringer.de>";
martijnvermaat = "Martijn Vermaat <martijn@vermaat.name>";
martingms = "Martin Gammelsæter <martin@mg.am>";
matejc = "Matej Cotman <cotman.matej@gmail.com>";
mathnerd314 = "Mathnerd314 <mathnerd314.gph+hs@gmail.com>";
matthiasbeyer = "Matthias Beyer <mail@beyermatthias.de>";
mbauer = "Matthew Bauer <mjbauer95@gmail.com>";
maurer = "Matthew Maurer <matthew.r.maurer+nix@gmail.com>";
mbakke = "Marius Bakke <ymse@tuta.io>";
mbakke = "Marius Bakke <mbakke@fastmail.com>";
matthewbauer = "Matthew Bauer <mjbauer95@gmail.com>";
mbe = "Brandon Edens <brandonedens@gmail.com>";
mboes = "Mathieu Boespflug <mboes@tweag.net>";
mcmtroffaes = "Matthias C. M. Troffaes <matthias.troffaes@gmail.com>";
meditans = "Carlo Nucera <meditans@gmail.com>";
meisternu = "Matt Miemiec <meister@krutt.org>";
mic92 = "Jörg Thalheim <joerg@higgsboson.tk>";
michaelpj = "Michael Peyton Jones <michaelpj@gmail.com>";
michalrus = "Michal Rus <m@michalrus.com>";
michelk = "Michel Kuhlmann <michel@kuhlmanns.info>";
mimadrid = "Miguel Madrid <mimadrid@ucm.es>";
mingchuan = "Ming Chuan <ming@culpring.com>";
mirdhyn = "Merlin Gaillard <mirdhyn@gmail.com>";
mirrexagon = "Andrew Abbott <mirrexagon@mirrexagon.com>";
modulistic = "Pablo Costa <modulistic@gmail.com>";
mog = "Matthew O'Gorman <mog-lists@rldn.net>";
moosingin3space = "Nathan Moos <moosingin3space@gmail.com>";
moretea = "Maarten Hoogendoorn <maarten@moretea.nl>";
mornfall = "Petr Ročkai <me@mornfall.net>";
MostAwesomeDude = "Corbin Simpson <cds@corbinsimpson.com>";
mounium = "Katona László <muoniurn@gmail.com>";
MP2E = "Cray Elliott <MP2E@archlinux.us>";
mpscholten = "Marc Scholten <marc@mpscholten.de>";
mpsyco = "Francis St-Amour <fr.st-amour@gmail.com>";
msackman = "Matthew Sackman <matthew@wellquite.org>";
mschristiansen = "Mikkel Christiansen <mikkel@rheosystems.com>";
msteen = "Matthijs Steen <emailmatthijs@gmail.com>";
@ -250,19 +288,24 @@
mudri = "James Wood <lamudri@gmail.com>";
muflax = "Stefan Dorn <mail@muflax.com>";
myrl = "Myrl Hex <myrl.0xf@gmail.com>";
nand0p = "Fernando Jose Pando <nando@hex7.com>";
nathan-gs = "Nathan Bijnens <nathan@nathan.gs>";
Nate-Devv = "Nathan Moore <natedevv@gmail.com>";
nckx = "Tobias Geerinckx-Rice <tobias.geerinckx.rice@gmail.com>";
nequissimus = "Tim Steinbach <tim@nequissimus.com>";
nfjinjing = "Jinjing Wang <nfjinjing@gmail.com>";
nhooyr = "Anmol Sethi <anmol@aubble.com>";
nico202 = "Nicolò Balzarotti <anothersms@gmail.com>";
notthemessiah = "Brian Cohen <brian.cohen.88@gmail.com>";
NikolaMandic = "Ratko Mladic <nikola@mandic.email>";
np = "Nicolas Pouillard <np.nix@nicolaspouillard.fr>";
nslqqq = "Nikita Mikhailov <nslqqq@gmail.com>";
obadz = "obadz <nixos@obadz.com>";
obadz = "obadz <obadz-nixos@obadz.com>";
ocharles = "Oliver Charles <ollie@ocharles.org.uk>";
odi = "Oliver Dunkl <oliver.dunkl@gmail.com>";
offline = "Jaka Hudoklin <jakahudoklin@gmail.com>";
olcai = "Erik Timan <dev@timan.info>";
olejorgenb = "Ole Jørgen Brønner <olejorgenb@yahoo.no>";
orbitz = "Malcolm Matalka <mmatalka@gmail.com>";
osener = "Ozan Sener <ozan@ozansener.com>";
otwieracz = "Slawomir Gonet <slawek@otwiera.cz>";
@ -272,8 +315,10 @@
pakhfn = "Fedor Pakhomov <pakhfn@gmail.com>";
palo = "Ingolf Wanger <palipalo9@googlemail.com>";
pashev = "Igor Pashev <pashev.igor@gmail.com>";
pawelpacana = "Paweł Pacana <pawel.pacana@gmail.com>";
pesterhazy = "Paulus Esterhazy <pesterhazy@gmail.com>";
peterhoeg = "Peter Hoeg <peter@hoeg.com>";
peti = "Peter Simons <simons@cryp.to>";
philandstuff = "Philip Potter <philip.g.potter@gmail.com>";
phile314 = "Philipp Hausmann <nix@314.ch>";
Phlogistique = "Noé Rubinstein <noe.rubinstein@gmail.com>";
@ -289,17 +334,23 @@
pmiddend = "Philipp Middendorf <pmidden@secure.mailbox.org>";
prikhi = "Pavan Rikhi <pavan.rikhi@gmail.com>";
profpatsch = "Profpatsch <mail@profpatsch.de>";
proglodyte = "Proglodyte <proglodyte23@gmail.com>";
pshendry = "Paul Hendry <paul@pshendry.com>";
psibi = "Sibi <sibi@psibi.in>";
pSub = "Pascal Wittmann <mail@pascal-wittmann.de>";
puffnfresh = "Brian McKenna <brian@brianmckenna.org>";
pxc = "Patrick Callahan <patrick.callahan@latitudeengineering.com>";
qknight = "Joachim Schiele <js@lastlog.de>";
ragge = "Ragnar Dahlen <r.dahlen@gmail.com>";
ralith = "Benjamin Saunders <ben.e.saunders@gmail.com>";
ramkromberg = "Ram Kromberg <ramkromberg@mail.com>";
rardiol = "Ricardo Ardissone <ricardo.ardissone@gmail.com>";
rasendubi = "Alexey Shmalko <rasen.dubi@gmail.com>";
raskin = "Michael Raskin <7c6f434c@mail.ru>";
redbaron = "Maxim Ivanov <ivanov.maxim@gmail.com>";
redvers = "Redvers Davies <red@infect.me>";
refnil = "Martin Lavoie <broemartino@gmail.com>";
regnat = "Théophane Hufschmitt <regnat@regnat.ovh>";
relrod = "Ricky Elrod <ricky@elrod.me>";
renzo = "Renzo Carbonara <renzocarbonara@gmail.com>";
retrry = "Tadas Barzdžius <retrry@gmail.com>";
@ -310,6 +361,7 @@
robberer = "Longrin Wischnewski <robberer@freakmail.de>";
robbinch = "Robbin C. <robbinch33@gmail.com>";
robgssp = "Rob Glossop <robgssp@gmail.com>";
roblabla = "Robin Lambertz <robinlambertz+dev@gmail.com>";
roconnor = "Russell O'Connor <roconnor@theorem.ca>";
romildo = "José Romildo Malaquias <malaquias@gmail.com>";
rszibele = "Richard Szibele <richard_szibele@hotmail.com>";
@ -318,19 +370,23 @@
rvlander = "Gaëtan André <rvlander@gaetanandre.eu>";
ryanartecona = "Ryan Artecona <ryanartecona@gmail.com>";
ryantm = "Ryan Mulligan <ryan@ryantm.com>";
ryansydnor = "Ryan Sydnor <ryan.t.sydnor@gmail.com>";
rycee = "Robert Helgesson <robert@rycee.net>";
ryneeverett = "Ryne Everett <ryneeverett@gmail.com>";
s1lvester = "Markus Silvester <s1lvester@bockhacker.me>";
samuelrivas = "Samuel Rivas <samuelrivas@gmail.com>";
sander = "Sander van der Burg <s.vanderburg@tudelft.nl>";
schmitthenner = "Fabian Schmitthenner <development@schmitthenner.eu>";
schneefux = "schneefux <schneefux+nixos_pkg@schneefux.xyz>";
schristo = "Scott Christopher <schristopher@konputa.com>";
scolobb = "Sergiu Ivanov <sivanov@colimite.fr>";
sepi = "Raffael Mancini <raffael@mancini.lu>";
seppeljordan = "Sebastian Jordan <sebastian.jordan.mail@googlemail.com>";
sheenobu = "Sheena Artrip <sheena.artrip@gmail.com>";
sheganinans = "Aistis Raulinaitis <sheganinans@gmail.com>";
shell = "Shell Turner <cam.turn@gmail.com>";
shlevy = "Shea Levy <shea@shealevy.com>";
simons = "Peter Simons <simons@cryp.to>";
siddharthist = "Langston Barrett <langston.barrett@gmail.com>";
simonvandel = "Simon Vandel Sillesen <simon.vandel@gmail.com>";
sjagoe = "Simon Jagoe <simon@simonjagoe.com>";
sjmackenzie = "Stewart Mackenzie <setori88@gmail.com>";
@ -338,14 +394,17 @@
skeidel = "Sven Keidel <svenkeidel@gmail.com>";
skrzyp = "Jakub Skrzypnik <jot.skrzyp@gmail.com>";
sleexyz = "Sean Lee <freshdried@gmail.com>";
smironov = "Sergey Mironov <ierton@gmail.com>";
solson = "Scott Olson <scott@solson.me>";
smironov = "Sergey Mironov <grrwlf@gmail.com>";
spacefrogg = "Michael Raitza <spacefrogg-nixos@meterriblecrew.net>";
spencerjanssen = "Spencer Janssen <spencerjanssen@gmail.com>";
spinus = "Tomasz Czyż <tomasz.czyz@gmail.com>";
sprock = "Roger Mason <rmason@mun.ca>";
spwhitt = "Spencer Whitt <sw@swhitt.me>";
SShrike = "Severen Redwood <severen@shrike.me>";
stephenmw = "Stephen Weinberg <stephen@q5comm.com>";
steveej = "Stefan Junker <mail@stefanjunker.de>";
swarren83 = "Shawn Warren <shawn.w.warren@gmail.com>";
swistak35 = "Rafał Łasocha <me@swistak35.com>";
szczyp = "Szczyp <qb@szczyp.com>";
sztupi = "Attila Sztupak <attila.sztupak@gmail.com>";
@ -353,6 +412,7 @@
tailhook = "Paul Colomiets <paul@colomiets.name>";
taktoa = "Remy Goldschmidt <taktoa@gmail.com>";
tavyc = "Octavian Cerna <octavian.cerna@gmail.com>";
teh = "Tom Hunger <tehunger@gmail.com>";
telotortium = "Robert Irelan <rirelan@gmail.com>";
thall = "Niclas Thall <niclas.thall@gmail.com>";
thammers = "Tobias Hammerschmidt <jawr@gmx.de>";
@ -370,6 +430,7 @@
ttuegel = "Thomas Tuegel <ttuegel@gmail.com>";
tv = "Tomislav Viljetić <tv@shackspace.de>";
tvestelind = "Tomas Vestelind <tomas.vestelind@fripost.org>";
tvorog = "Marsel Zaripov <marszaripov@gmail.com>";
twey = "James Twey Kay <twey@twey.co.uk>";
uralbash = "Svintsov Dmitry <root@uralbash.ru>";
urkud = "Yury G. Kudryashov <urkud+nix@ya.ru>";
@ -395,10 +456,13 @@
wscott = "Wayne Scott <wsc9tt@gmail.com>";
wyvie = "Elijah Rum <elijahrum@gmail.com>";
yarr = "Dmitry V. <savraz@gmail.com>";
yurrriq = "Eric Bailey <eric@ericb.me>";
z77z = "Marco Maggesi <maggesi@math.unifi.it>";
zagy = "Christian Zagrodnick <cz@flyingcircus.io>";
zef = "Zef Hemel <zef@zef.me>";
zimbatm = "zimbatm <zimbatm@zimbatm.com>";
zohl = "Al Zohali <zohl@fmap.me>";
zoomulator = "Kim Simmons <zoomulator@gmail.com>";
amiloradovsky = "Andrew Miloradovsky <miloradovsky@gmail.com>";
yochai = "Yochai <yochai@titat.info>";
}

View File

@ -1,4 +1,5 @@
with import ./lists.nix;
with import ./strings.nix;
with import ./trivial.nix;
with import ./attrsets.nix;
with import ./options.nix;
@ -105,8 +106,12 @@ rec {
/* Massage a module into canonical form, that is, a set consisting
of options, config and imports attributes. */
unifyModuleSyntax = file: key: m:
let metaSet = if m ? meta
then { meta = m.meta; }
else {};
in
if m ? config || m ? options then
let badAttrs = removeAttrs m ["imports" "options" "config" "key" "_file"]; in
let badAttrs = removeAttrs m ["imports" "options" "config" "key" "_file" "meta"]; in
if badAttrs != {} then
throw "Module `${key}' has an unsupported attribute `${head (attrNames badAttrs)}'. This is caused by assignments to the top-level attributes `config' or `options'."
else
@ -114,14 +119,14 @@ rec {
key = toString m.key or key;
imports = m.imports or [];
options = m.options or {};
config = m.config or {};
config = mkMerge [ (m.config or {}) metaSet ];
}
else
{ file = m._file or file;
key = toString m.key or key;
imports = m.require or [] ++ m.imports or [];
options = {};
config = removeAttrs m ["key" "_file" "require" "imports"];
config = mkMerge [ (removeAttrs m ["key" "_file" "require" "imports"]) metaSet ];
};
applyIfFunction = key: f: args@{ config, options, lib, ... }: if isFunction f then
@ -503,19 +508,25 @@ rec {
/* Return a module that causes a warning to be shown if the
specified option is defined. For example,
mkRemovedOptionModule [ "boot" "loader" "grub" "bootDevice" ]
mkRemovedOptionModule [ "boot" "loader" "grub" "bootDevice" ] "<replacement instructions>"
causes a warning if the user defines boot.loader.grub.bootDevice.
replacementInstructions is a string that provides instructions on
how to achieve the same functionality without the removed option,
or alternatively a reasoning why the functionality is not needed.
replacementInstructions SHOULD be provided!
*/
mkRemovedOptionModule = optionName:
mkRemovedOptionModule = optionName: replacementInstructions:
{ options, ... }:
{ options = setAttrByPath optionName (mkOption {
visible = false;
});
config.warnings =
let opt = getAttrFromPath optionName options; in
optional opt.isDefined
"The option definition `${showOption optionName}' in ${showFiles opt.files} no longer has any effect; please remove it.";
optional opt.isDefined ''
The option definition `${showOption optionName}' in ${showFiles opt.files} no longer has any effect; please remove it.
${replacementInstructions}'';
};
/* Return a module that causes a warning to be shown if the
@ -535,6 +546,84 @@ rec {
use = builtins.trace "Obsolete option `${showOption from}' is used. It was renamed to `${showOption to}'.";
};
/* Return a module that causes a warning to be shown if any of the "from"
option is defined; the defined values can be used in the "mergeFn" to set
the "to" value.
This function can be used to merge multiple options into one that has a
different type.
"mergeFn" takes the module "config" as a parameter and must return a value
of "to" option type.
mkMergedOptionModule
[ [ "a" "b" "c" ]
[ "d" "e" "f" ] ]
[ "x" "y" "z" ]
(config:
let value = p: getAttrFromPath p config;
in
if (value [ "a" "b" "c" ]) == true then "foo"
else if (value [ "d" "e" "f" ]) == true then "bar"
else "baz")
- options.a.b.c is a removed boolean option
- options.d.e.f is a removed boolean option
- options.x.y.z is a new str option that combines a.b.c and d.e.f
functionality
This show a warning if any a.b.c or d.e.f is set, and set the value of
x.y.z to the result of the merge function
*/
mkMergedOptionModule = from: to: mergeFn:
{ config, options, ... }:
{
options = foldl recursiveUpdate {} (map (path: setAttrByPath path (mkOption {
visible = false;
# To use the value in mergeFn without triggering errors
default = "_mkMergedOptionModule";
})) from);
config = {
warnings = filter (x: x != "") (map (f:
let val = getAttrFromPath f config;
opt = getAttrFromPath f options;
in
optionalString
(val != "_mkMergedOptionModule")
"The option `${showOption f}' defined in ${showFiles opt.files} has been changed to `${showOption to}' that has a different type. Please read `${showOption to}' documentation and update your configuration accordingly."
) from);
} // setAttrByPath to (mkMerge
(optional
(any (f: (getAttrFromPath f config) != "_mkMergedOptionModule") from)
(mergeFn config)));
};
/* Single "from" version of mkMergedOptionModule.
Return a module that causes a warning to be shown if the "from" option is
defined; the defined value can be used in the "mergeFn" to set the "to"
value.
This function can be used to change an option into another that has a
different type.
"mergeFn" takes the module "config" as a parameter and must return a value of
"to" option type.
mkChangedOptionModule [ "a" "b" "c" ] [ "x" "y" "z" ]
(config:
let value = getAttrFromPath [ "a" "b" "c" ] config;
in
if value > 100 then "high"
else "normal")
- options.a.b.c is a removed int option
- options.x.y.z is a new str option that supersedes a.b.c
This show a warning if a.b.c is set, and set the value of x.y.z to the
result of the change function
*/
mkChangedOptionModule = from: to: changeFn:
mkMergedOptionModule [ from ] to changeFn;
/* Like mkRenamedOptionModule, but doesn't show a warning. */
mkAliasOptionModule = from: to: doRename {
inherit from to;
@ -554,12 +643,10 @@ rec {
apply = x: use (toOf config);
});
config = {
/*
warnings =
let opt = getAttrFromPath from options; in
optional (warn && opt.isDefined)
"The option `${showOption from}' defined in ${showFiles opt.files} has been renamed to `${showOption to}'.";
*/
} // setAttrByPath to (mkAliasDefinitions (getAttrFromPath from options));
};

View File

@ -4,6 +4,11 @@ let lib = import ./default.nix; in
rec {
# Returns the type of a path: regular (for file), symlink, or directory
pathType = p: with builtins; getAttr (baseNameOf p) (readDir (dirOf p));
# Returns true if the path exists and is a directory, false otherwise
pathIsDirectory = p: if builtins.pathExists p then (pathType p) == "directory" else false;
# Bring in a path as a source, filtering out all Subversion and CVS
# directories, as well as backup files (*~).
@ -15,7 +20,9 @@ rec {
lib.hasSuffix "~" baseName ||
# Filter out generates files.
lib.hasSuffix ".o" baseName ||
lib.hasSuffix ".so" baseName
lib.hasSuffix ".so" baseName ||
# Filter out nix-build result symlinks
(type == "symlink" && lib.hasPrefix "result" baseName)
);
in src: builtins.filterSource filter src;
@ -29,4 +36,32 @@ rec {
in type == "directory" || lib.any (ext: lib.hasSuffix ext base) exts;
in builtins.filterSource filter path;
# Get the commit id of a git repo
# Example: commitIdFromGitRepo <nixpkgs/.git>
commitIdFromGitRepo =
let readCommitFromFile = path: file:
with builtins;
let fileName = toString path + "/" + file;
packedRefsName = toString path + "/packed-refs";
in if lib.pathExists fileName
then
let fileContent = lib.fileContents fileName;
# Sometimes git stores the commitId directly in the file but
# sometimes it stores something like: «ref: refs/heads/branch-name»
matchRef = match "^ref: (.*)$" fileContent;
in if isNull matchRef
then fileContent
else readCommitFromFile path (lib.head matchRef)
# Sometimes, the file isn't there at all and has been packed away in the
# packed-refs file, so we have to grep through it:
else if lib.pathExists packedRefsName
then
let fileContent = readFile packedRefsName;
matchRef = match (".*\n([^\n ]*) " + file + "\n.*") fileContent;
in if isNull matchRef
then throw ("Could not find " + file + " in " + packedRefsName)
else lib.head matchRef
else throw ("Not a .git directory: " + path);
in lib.flip readCommitFromFile "HEAD";
}

View File

@ -16,11 +16,7 @@ rec {
concatStrings ["foo" "bar"]
=> "foobar"
*/
concatStrings =
if builtins ? concatStringsSep then
builtins.concatStringsSep ""
else
lib.foldl' (x: y: x + y) "";
concatStrings = builtins.concatStringsSep "";
/* Map a function over a list and concatenate the resulting strings.
@ -88,15 +84,14 @@ rec {
makeSearchPath = subDir: packages:
concatStringsSep ":" (map (path: path + "/" + subDir) packages);
/* Construct a Unix-style search path, given trying outputs in order.
/* Construct a Unix-style search path, using given package output.
If no output is found, fallback to `.out` and then to the default.
Example:
makeSearchPathOutputs "bin" ["bin"] [ pkgs.openssl pkgs.zlib ]
=> "/nix/store/9rz8gxhzf8sw4kf2j2f1grr49w8zx5vj-openssl-1.0.1r-bin/bin:/nix/store/wwh7mhwh269sfjkm6k5665b5kgp7jrk2-zlib-1.2.8/bin"
makeSearchPathOutput "dev" "bin" [ pkgs.openssl pkgs.zlib ]
=> "/nix/store/9rz8gxhzf8sw4kf2j2f1grr49w8zx5vj-openssl-1.0.1r-dev/bin:/nix/store/wwh7mhwh269sfjkm6k5665b5kgp7jrk2-zlib-1.2.8/bin"
*/
makeSearchPathOutputs = subDir: outputs: pkgs:
makeSearchPath subDir (map (pkg: if pkg.outputUnspecified or false then lib.tryAttrs (outputs ++ ["out"]) pkg else pkg) pkgs);
makeSearchPathOutput = output: subDir: pkgs: makeSearchPath subDir (map (lib.getOutput output) pkgs);
/* Construct a library search path (such as RPATH) containing the
libraries for a set of packages
@ -108,9 +103,7 @@ rec {
makeLibraryPath [ pkgs.openssl pkgs.zlib ]
=> "/nix/store/9rz8gxhzf8sw4kf2j2f1grr49w8zx5vj-openssl-1.0.1r/lib:/nix/store/wwh7mhwh269sfjkm6k5665b5kgp7jrk2-zlib-1.2.8/lib"
*/
makeLibraryPath = pkgs: makeSearchPath "lib"
# try to guess the right output of each pkg
(map (pkg: if pkg.outputUnspecified or false then pkg.lib or (pkg.out or pkg) else pkg) pkgs);
makeLibraryPath = makeSearchPathOutput "lib" "lib";
/* Construct a binary search path (such as $PATH) containing the
binaries for a set of packages.
@ -119,8 +112,7 @@ rec {
makeBinPath ["/root" "/usr" "/usr/local"]
=> "/root/bin:/usr/bin:/usr/local/bin"
*/
makeBinPath = pkgs: makeSearchPath "bin"
(map (pkg: if pkg.outputUnspecified or false then pkg.bin or (pkg.out or pkg) else pkg) pkgs);
makeBinPath = makeSearchPathOutput "bin" "bin";
/* Construct a perl search path (such as $PERL5LIB)
@ -132,8 +124,7 @@ rec {
makePerlPath [ pkgs.perlPackages.NetSMTP ]
=> "/nix/store/n0m1fk9c960d8wlrs62sncnadygqqc6y-perl-Net-SMTP-1.25/lib/perl5/site_perl"
*/
makePerlPath = pkgs: makeSearchPath "lib/perl5/site_perl"
(map (pkg: if pkg.outputUnspecified or false then pkg.lib or (pkg.out or pkg) else pkg) pkgs);
makePerlPath = makeSearchPathOutput "lib" "lib/perl5/site_perl";
/* Dependening on the boolean `cond', return either the given string
or the empty string. Useful to contatenate against a bigger string.
@ -165,12 +156,12 @@ rec {
hasSuffix "foo" "barfoo"
=> true
*/
hasSuffix = suff: str:
hasSuffix = suffix: content:
let
lenStr = stringLength str;
lenSuff = stringLength suff;
in lenStr >= lenSuff &&
substring (lenStr - lenSuff) lenStr str == suff;
lenContent = stringLength content;
lenSuffix = stringLength suffix;
in lenContent >= lenSuffix &&
substring (lenContent - lenSuffix) lenContent content == suffix;
/* Convert a string to a list of characters (i.e. singleton strings).
This allows you to, e.g., map a function over each character. However,
@ -212,13 +203,21 @@ rec {
*/
escape = list: replaceChars list (map (c: "\\${c}") list);
/* Escape all characters that have special meaning in the Bourne shell.
/* Quote string to be used safely within the Bourne shell.
Example:
escapeShellArg "so([<>])me"
=> "so\\(\\[\\<\\>\\]\\)me"
escapeShellArg "esc'ape\nme"
=> "'esc'\\''ape\nme'"
*/
escapeShellArg = lib.escape (stringToCharacters "\\ ';$`()|<>\t*[]");
escapeShellArg = arg: "'${replaceStrings ["'"] ["'\\''"] (toString arg)}'";
/* Quote all arguments to be safely passed to the Bourne shell.
Example:
escapeShellArgs ["one" "two three" "four'five"]
=> "'one' 'two three' 'four'\\''five'"
*/
escapeShellArgs = concatMapStringsSep " " escapeShellArg;
/* Obsolete - use replaceStrings instead. */
replaceChars = builtins.replaceStrings or (
@ -249,7 +248,7 @@ rec {
/* Converts an ASCII string to upper-case.
Example:
toLower "home"
toUpper "home"
=> "HOME"
*/
toUpper = replaceChars lowerChars upperChars;
@ -373,7 +372,12 @@ rec {
getVersion pkgs.youtube-dl
=> "2016.01.01"
*/
getVersion = x: (builtins.parseDrvName (x.name or x)).version;
getVersion = x:
let
parse = drv: (builtins.parseDrvName drv).version;
in if isString x
then parse x
else x.version or (parse x.name);
/* Extract name with version from URL. Ask for separator which is
supposed to start extension.
@ -480,4 +484,14 @@ rec {
absolutePaths = builtins.map (path: builtins.toPath (root + "/" + path)) relativePaths;
in
absolutePaths;
/* Read the contents of a file removing the trailing \n
Example:
$ echo "1.0" > ./version
fileContents ./version
=> "1.0"
*/
fileContents = file: removeSuffix "\n" (builtins.readFile file);
}

View File

@ -1,6 +1,6 @@
{ nixpkgs }:
with import ./../.. { };
with import ../.. { };
with lib;
stdenv.mkDerivation {

View File

@ -62,14 +62,16 @@ rec {
isInt add sub lessThan
seq deepSeq genericClosure;
inherit (import ./strings.nix) fileContents;
# Return the Nixpkgs version number.
nixpkgsVersion =
let suffixFile = ../.version-suffix; in
readFile ../.version
+ (if pathExists suffixFile then readFile suffixFile else "pre-git");
fileContents ../.version
+ (if pathExists suffixFile then fileContents suffixFile else "pre-git");
# Whether we're being called by nix-shell.
inNixShell = builtins.getEnv "IN_NIX_SHELL" == "1";
inNixShell = builtins.getEnv "IN_NIX_SHELL" != "";
# Return minimum/maximum of two numbers.
min = x: y: if x < y then x else y;
@ -96,4 +98,19 @@ rec {
*/
importJSON = path:
builtins.fromJSON (builtins.readFile path);
/* See https://github.com/NixOS/nix/issues/749. Eventually we'd like these
to expand to Nix builtins that carry metadata so that Nix can filter out
the INFO messages without parsing the message string.
Usage:
{
foo = lib.warn "foo is deprecated" oldFoo;
}
TODO: figure out a clever way to integrate location information from
something like __unsafeGetAttrPos.
*/
warn = msg: builtins.trace "WARNING: ${msg}";
info = msg: builtins.trace "INFO: ${msg}";
}

View File

@ -100,6 +100,10 @@ rec {
in if isDerivation res then res else toDerivation res;
};
shellPackage = package // {
check = x: (package.check x) && (hasAttr "shellPath" x);
};
path = mkOptionType {
name = "path";
# Hacky: there is no isPath primop.
@ -114,13 +118,17 @@ rec {
name = "list of ${elemType.name}s";
check = isList;
merge = loc: defs:
map (x: x.value) (filter (x: x ? value) (concatLists (imap (n: def: imap (m: def':
(mergeDefinitions
(loc ++ ["[definition ${toString n}-entry ${toString m}]"])
elemType
[{ inherit (def) file; value = def'; }]
).optionalValue
) def.value) defs)));
map (x: x.value) (filter (x: x ? value) (concatLists (imap (n: def:
if isList def.value then
imap (m: def':
(mergeDefinitions
(loc ++ ["[definition ${toString n}-entry ${toString m}]"])
elemType
[{ inherit (def) file; value = def'; }]
).optionalValue
) def.value
else
throw "The option value `${showOption loc}' in `${def.file}' is not a list.") defs)));
getSubOptions = prefix: elemType.getSubOptions (prefix ++ ["*"]);
getSubModules = elemType.getSubModules;
substSubModules = m: listOf (elemType.substSubModules m);
@ -253,7 +261,7 @@ rec {
# declarations from the options attribute of containing option
# declaration.
optionSet = mkOptionType {
name = /* builtins.trace "types.optionSet is deprecated; use types.submodule instead" */ "option set";
name = builtins.trace "types.optionSet is deprecated; use types.submodule instead" "option set";
};
# Augment the given type with an additional type check function.

View File

@ -5,7 +5,7 @@
# content-addressed cache used by fetchurl as a fallback for when
# upstream tarballs disappear or change. Usage:
#
# 1) To upload a single file:
# 1) To upload one or more files:
#
# $ copy-tarballs.pl --file /path/to/tarball.tar.gz
#
@ -22,12 +22,38 @@ use JSON;
use Net::Amazon::S3;
use Nix::Store;
isValidPath("/nix/store/foo"); # FIXME: forces Nix::Store initialisation
isValidPath("/nix/store/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-foo"); # FIXME: forces Nix::Store initialisation
sub usage {
die "Syntax: $0 [--dry-run] [--exclude REGEXP] [--expr EXPR | --file FILES...]\n";
}
my $dryRun = 0;
my $expr;
my @fileNames;
my $exclude;
while (@ARGV) {
my $flag = shift @ARGV;
if ($flag eq "--expr") {
$expr = shift @ARGV or die "--expr requires an argument";
} elsif ($flag eq "--file") {
@fileNames = @ARGV;
last;
} elsif ($flag eq "--dry-run") {
$dryRun = 1;
} elsif ($flag eq "--exclude") {
$exclude = shift @ARGV or die "--exclude requires an argument";
} else {
usage();
}
}
# S3 setup.
my $aws_access_key_id = $ENV{'AWS_ACCESS_KEY_ID'} or die;
my $aws_secret_access_key = $ENV{'AWS_SECRET_ACCESS_KEY'} or die;
my $aws_access_key_id = $ENV{'AWS_ACCESS_KEY_ID'} or die "AWS_ACCESS_KEY_ID not set\n";
my $aws_secret_access_key = $ENV{'AWS_SECRET_ACCESS_KEY'} or die "AWS_SECRET_ACCESS_KEY not set\n";
my $s3 = Net::Amazon::S3->new(
{ aws_access_key_id => $aws_access_key_id,
@ -37,12 +63,15 @@ my $s3 = Net::Amazon::S3->new(
my $bucket = $s3->bucket("nixpkgs-tarballs") or die;
my $cacheFile = "/tmp/copy-tarballs-cache";
my $doWrite = 0;
my $cacheFile = ($ENV{"HOME"} or die "\$HOME is not set") . "/.cache/nix/copy-tarballs";
my %cache;
$cache{$_} = 1 foreach read_file($cacheFile, err_mode => 'quiet', chomp => 1);
$doWrite = 1;
END() {
write_file($cacheFile, map { "$_\n" } keys %cache);
File::Path::mkpath(dirname($cacheFile), 0, 0755);
write_file($cacheFile, map { "$_\n" } keys %cache) if $doWrite;
}
sub alreadyMirrored {
@ -87,11 +116,9 @@ sub uploadFile {
$cache{$mainKey} = 1;
}
my $op = shift @ARGV;
if ($op eq "--file") {
if (scalar @fileNames) {
my $res = 0;
foreach my $fn (@ARGV) {
foreach my $fn (@fileNames) {
eval {
if (alreadyMirrored("sha512", hashFile("sha512", 0, $fn))) {
print STDERR "$fn is already mirrored\n";
@ -100,17 +127,16 @@ if ($op eq "--file") {
}
};
if ($@) {
warn "$@\n";
warn "$@";
$res = 1;
}
}
exit $res;
}
elsif ($op eq "--expr") {
elsif (defined $expr) {
# Evaluate find-tarballs.nix.
my $expr = $ARGV[0] // die "$0: --expr requires a Nix expression\n";
my $pid = open(JSON, "-|", "nix-instantiate", "--eval", "--json", "--strict",
"<nixpkgs/maintainers/scripts/find-tarballs.nix>",
"--arg", "expr", $expr);
@ -126,7 +152,7 @@ elsif ($op eq "--expr") {
# Check every fetchurl call discovered by find-tarballs.nix.
my $mirrored = 0;
my $have = 0;
foreach my $fetch (@{$fetches}) {
foreach my $fetch (sort { $a->{url} cmp $b->{url} } @{$fetches}) {
my $url = $fetch->{url};
my $algo = $fetch->{type};
my $hash = $fetch->{hash};
@ -142,6 +168,8 @@ elsif ($op eq "--expr") {
next;
}
next if defined $exclude && $url =~ /$exclude/;
if (alreadyMirrored($algo, $hash)) {
$have++;
next;
@ -151,7 +179,10 @@ elsif ($op eq "--expr") {
print STDERR "mirroring $url ($storePath)...\n";
next if $ENV{DRY_RUN};
if ($dryRun) {
$mirrored++;
next;
}
# Substitute the output.
if (!isValidPath($storePath)) {
@ -184,5 +215,5 @@ elsif ($op eq "--expr") {
}
else {
die "Syntax: $0 --file FILENAMES... | --expr EXPR\n";
usage();
}

View File

@ -0,0 +1,60 @@
#! /usr/bin/env nix-shell
#! nix-shell -i bash -p coreutils findutils gnused nix wget
set -efuo pipefail
SRCS=
if [ -d "$1" ]; then
SRCS="$(pwd)/$1/srcs.nix"
. "$1/fetch.sh"
else
SRCS="$(pwd)/$(dirname $1)/srcs.nix"
. "$1"
fi
tmp=$(mktemp -d)
pushd $tmp >/dev/null
wget -nH -r -c --no-parent "${WGET_ARGS[@]}" >/dev/null
csv=$(mktemp)
find . -type f | while read src; do
# Sanitize file name
filename=$(basename "$src" | tr '@' '_')
nameVersion="${filename%.tar.*}"
name=$(echo "$nameVersion" | sed -e 's,-[[:digit:]].*,,' | sed -e 's,-opensource-src$,,')
version=$(echo "$nameVersion" | sed -e 's,^\([[:alpha:]][[:alnum:]]*-\)\+,,')
echo "$name,$version,$src,$filename" >>$csv
done
cat >"$SRCS" <<EOF
# DO NOT EDIT! This file is generated automatically by fetch-kde-qt.sh
{ fetchurl, mirror }:
{
EOF
gawk -F , "{ print \$1 }" $csv | sort | uniq | while read name; do
versions=$(gawk -F , "/^$name,/ { print \$2 }" $csv)
latestVersion=$(echo "$versions" | sort -rV | head -n 1)
src=$(gawk -F , "/^$name,$latestVersion,/ { print \$3 }" $csv)
filename=$(gawk -F , "/^$name,$latestVersion,/ { print \$4 }" $csv)
url="${src:2}"
sha256=$(nix-hash --type sha256 --base32 --flat "$src")
cat >>"$SRCS" <<EOF
$name = {
version = "$latestVersion";
src = fetchurl {
url = "\${mirror}/$url";
sha256 = "$sha256";
name = "$filename";
};
};
EOF
done
echo "}" >>"$SRCS"
popd >/dev/null
rm -fr $tmp >/dev/null
rm -f $csv >/dev/null

View File

@ -1,7 +1,7 @@
{ stdenv, makeWrapper, perl, perlPackages }:
stdenv.mkDerivation {
name = "nix-generate-from-cpan-2";
name = "nix-generate-from-cpan-3";
buildInputs = with perlPackages; [
makeWrapper perl CPANMeta GetoptLongDescriptive CPANPLUS Readonly Log4Perl
@ -20,5 +20,6 @@ stdenv.mkDerivation {
meta = {
maintainers = with stdenv.lib.maintainers; [ eelco rycee ];
description = "Utility to generate a Nix expression for a Perl package from CPAN";
platforms = stdenv.lib.platforms.unix;
};
}

View File

@ -278,13 +278,13 @@ sub get_deps {
foreach my $n ( $deps->required_modules ) {
next if $n eq "perl";
# Hacky way to figure out if this module is part of Perl.
if ( $n !~ /^JSON/ && $n !~ /^YAML/ && $n !~ /^Module::Pluggable/ && $n !~ /^if$/ ) {
eval "use $n;";
if ( !$@ ) {
DEBUG("skipping Perl-builtin module $n");
next;
}
# Figure out whether the module is a core module by attempting
# to `use` the module in a pure Perl interpreter and checking
# whether it succeeded. Note, $^X is a magic variable holding
# the path to the running Perl interpreter.
if ( system("env -i $^X -M$n -e1 >/dev/null 2>&1") == 0 ) {
DEBUG("skipping Perl-builtin module $n");
next;
}
my $pkg = module_to_pkg( $cb, $n );
@ -395,15 +395,20 @@ my $meta = read_meta($pkg_path);
DEBUG( "metadata: ", encode_json( $meta->as_struct ) ) if defined $meta;
my @runtime_deps = sort( uniq( get_deps( $cb, $meta, "runtime" ) ) );
INFO("runtime deps: @runtime_deps");
my @build_deps = sort( uniq(
get_deps( $cb, $meta, "configure" ),
get_deps( $cb, $meta, "build" ),
get_deps( $cb, $meta, "test" )
) );
INFO("build deps: @build_deps");
my @runtime_deps = sort( uniq( get_deps( $cb, $meta, "runtime" ) ) );
INFO("runtime deps: @runtime_deps");
# Filter out runtime dependencies since those are already handled.
my %in_runtime_deps = map { $_ => 1 } @runtime_deps;
@build_deps = grep { not $in_runtime_deps{$_} } @build_deps;
INFO("build deps: @build_deps");
my $homepage = $meta ? $meta->resources->{homepage} : undef;
INFO("homepage: $homepage") if defined $homepage;

View File

@ -18,5 +18,6 @@ stdenv.mkDerivation {
meta = {
maintainers = [ stdenv.lib.maintainers.eelco ];
description = "A utility for Nixpkgs contributors to check Nixpkgs for common errors";
platforms = stdenv.lib.platforms.unix;
};
}

View File

@ -1,46 +1,76 @@
#! /usr/bin/env bash
set -e
export NIX_CURL_FLAGS=-sS
while test -n "$1"; do
if [[ $1 == nix ]]; then
echo "=== Installing Nix..."
# Install Nix
bash <(curl -sS https://nixos.org/nix/install)
source $HOME/.nix-profile/etc/profile.d/nix.sh
# tell Travis to use folding
echo -en "travis_fold:start:$1\r"
# Make sure we can use hydra's binary cache
sudo mkdir /etc/nix
sudo sh -c 'echo "build-max-jobs = 4" > /etc/nix/nix.conf'
case $1 in
# Verify evaluation
echo "=== Verifying that nixpkgs evaluates..."
nix-env -f. -qa --json >/dev/null
elif [[ $1 == nox ]]; then
echo "=== Installing nox..."
git clone -q https://github.com/madjar/nox
pip --quiet install -e nox
elif [[ $1 == build ]]; then
source $HOME/.nix-profile/etc/profile.d/nix.sh
nixpkgs-verify)
echo "=== Verifying that nixpkgs evaluates..."
echo "=== Checking tarball creation"
nix-build pkgs/top-level/release.nix -A tarball
nix-env --file $TRAVIS_BUILD_DIR --query --available --json > /dev/null
;;
if [[ $TRAVIS_PULL_REQUEST == false ]]; then
echo "=== Not a pull request"
else
echo "=== Checking PR"
nixos-options)
echo "=== Checking NixOS options"
if ! nox-review pr ${TRAVIS_PULL_REQUEST}; then
if sudo dmesg | egrep 'Out of memory|Killed process' > /tmp/oom-log; then
echo "=== The build failed due to running out of memory:"
cat /tmp/oom-log
echo "=== Please disregard the result of this Travis build."
nix-build $TRAVIS_BUILD_DIR/nixos/release.nix --attr options --show-trace
;;
nixos-manual)
echo "=== Checking NixOS manuals"
nix-build $TRAVIS_BUILD_DIR/nixos/release.nix --attr manual --show-trace
;;
nixpkgs-manual)
echo "=== Checking nixpkgs manuals"
nix-build $TRAVIS_BUILD_DIR/pkgs/top-level/release.nix --attr manual --show-trace
;;
nixpkgs-tarball)
echo "=== Checking nixpkgs tarball creation"
nix-build $TRAVIS_BUILD_DIR/pkgs/top-level/release.nix --attr tarball --show-trace
;;
nixpkgs-lint)
echo "=== Checking nixpkgs lint"
nix-shell --packages nixpkgs-lint --run "nixpkgs-lint -f $TRAVIS_BUILD_DIR"
;;
nox)
echo "=== Fetching Nox from binary cache"
# build nox silently so it's not in the log
nix-build "<nixpkgs>" -A nox -A stdenv
;;
pr)
if [ "$TRAVIS_PULL_REQUEST" == "false" ]; then
echo "=== No pull request found"
else
echo "=== Building pull request #$TRAVIS_PULL_REQUEST"
token=""
if [ -n "$GITHUB_TOKEN" ]; then
token="--token $GITHUB_TOKEN"
fi
nix-shell --packages nox --run "nox-review pr --slug $TRAVIS_REPO_SLUG $token $TRAVIS_PULL_REQUEST"
fi
exit 1
fi
fi
else
echo "$0: Unknown option $1" >&2
false
fi
;;
*)
echo "Skipping unknown option $1"
;;
esac
echo -en "travis_fold:end:$1\r"
shift
done

View File

@ -101,15 +101,15 @@ cleaner_script="$(echo "$name_list_canonical" | denormalize_name |
# Add github usernames
if [ -n "$NIXPKGS_GITHUB_NAME_CACHE" ]; then
github_adder_script="$(echo "$github_name_list" |
github_adder_script="$(mktemp)"
echo "$github_name_list" |
grep -E "$(echo "$name_list_canonical" | cut -f 2 |
tr '\n' '|' )" |
sort | uniq |
sed -re 's/(.*)\t(.*)/s| \1$| \1\t\2|g;/' |
denormalize_name
)"
denormalize_name > "$github_adder_script"
else
github_adder_script=''
github_adder_script='/dev/null'
fi
echo "$name_list" | denormalize_name
@ -118,5 +118,5 @@ echo
echo "$git_data" | cut -f 1 |
sed -e "$cleaner_script" |
sort | uniq -c | sort -k1n | sed -re "$github_adder_script" |
sort | uniq -c | sort -k1n | sed -rf "$github_adder_script" |
sed -re 's/^ *([0-9]+) /\1\t/'

View File

@ -21,7 +21,7 @@ Alternatively, you can use a systemd unit that does the same in the
background:
<screen>
$ systemctl start nix-gc.service
# systemctl start nix-gc.service
</screen>
You can tell NixOS in <filename>configuration.nix</filename> to run
@ -59,4 +59,4 @@ $ nix-store --optimise
Since this command needs to read the entire Nix store, it can take
quite a while to finish.</para>
</chapter>
</chapter>

View File

@ -13,7 +13,7 @@ create</literal>, it gets it own private IPv4 address in the range
address as follows:
<screen>
$ nixos-container show-ip foo
# nixos-container show-ip foo
10.233.4.2
$ ping -c1 10.233.4.2
@ -47,4 +47,4 @@ where <literal>eth0</literal> should be replaced with the desired
external interface. Note that <literal>ve-+</literal> is a wildcard
that matches all container interfaces.</para>
</section>
</section>

View File

@ -7,11 +7,15 @@
<title>Imperative Container Management</title>
<para>Well cover imperative container management using
<command>nixos-container</command> first. You create a container with
<command>nixos-container</command> first.
Be aware that container management is currently only possible
as <literal>root</literal>.</para>
<para>You create a container with
identifier <literal>foo</literal> as follows:
<screen>
$ nixos-container create foo
# nixos-container create foo
</screen>
This creates the containers root directory in
@ -25,7 +29,7 @@ line. For instance, to create a container that has
<literal>root</literal>:
<screen>
$ nixos-container create foo --config 'services.openssh.enable = true; \
# nixos-container create foo --config 'services.openssh.enable = true; \
users.extraUsers.root.openssh.authorizedKeys.keys = ["ssh-dss AAAAB3N…"];'
</screen>
@ -35,7 +39,7 @@ $ nixos-container create foo --config 'services.openssh.enable = true; \
run:
<screen>
$ nixos-container start foo
# nixos-container start foo
</screen>
This command will return as soon as the container has booted and has
@ -46,7 +50,7 @@ Thus, if something went wrong, you can get status info using
<command>systemctl</command>:
<screen>
$ systemctl status container@foo
# systemctl status container@foo
</screen>
</para>
@ -55,7 +59,7 @@ $ systemctl status container@foo
root using the <command>root-login</command> operation:
<screen>
$ nixos-container root-login foo
# nixos-container root-login foo
[root@foo:~]#
</screen>
@ -65,7 +69,7 @@ authentication). You can also get a regular login prompt using the
the host:
<screen>
$ nixos-container login foo
# nixos-container login foo
foo login: alice
Password: ***
</screen>
@ -74,7 +78,7 @@ With <command>nixos-container run</command>, you can execute arbitrary
commands in the container:
<screen>
$ nixos-container run foo -- uname -a
# nixos-container run foo -- uname -a
Linux foo 3.4.82 #1-NixOS SMP Thu Mar 20 14:44:05 UTC 2014 x86_64 GNU/Linux
</screen>
@ -86,17 +90,17 @@ container. First, on the host, you can edit
and run
<screen>
$ nixos-container update foo
# nixos-container update foo
</screen>
This will build and activate the new configuration. You can also
specify a new configuration on the command line:
<screen>
$ nixos-container update foo --config 'services.httpd.enable = true; \
# nixos-container update foo --config 'services.httpd.enable = true; \
services.httpd.adminAddr = "foo@example.org";'
$ curl http://$(nixos-container show-ip foo)/
# curl http://$(nixos-container show-ip foo)/
&lt;!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">…
</screen>
@ -116,9 +120,9 @@ start</literal>, respectively, or by using
destroy a container, including its file system, do
<screen>
$ nixos-container destroy foo
# nixos-container destroy foo
</screen>
</para>
</section>
</section>

View File

@ -9,10 +9,10 @@
<para>You can enter rescue mode by running:
<screen>
$ systemctl rescue</screen>
# systemctl rescue</screen>
This will eventually give you a single-user root shell. Systemd will
stop (almost) all system services. To get out of maintenance mode,
just exit from the rescue shell.</para>
</section>
</section>

View File

@ -18,14 +18,14 @@ You can disable the use of the binary cache by adding <option>--option
use-binary-caches false</option>, e.g.
<screen>
$ nixos-rebuild switch --option use-binary-caches false
# nixos-rebuild switch --option use-binary-caches false
</screen>
If you have an alternative binary cache at your disposal, you can use
it instead:
<screen>
$ nixos-rebuild switch --option binary-caches http://my-cache.example.org/
# nixos-rebuild switch --option binary-caches http://my-cache.example.org/
</screen>
</para>

View File

@ -10,7 +10,7 @@
doing:
<screen>
$ shutdown
# shutdown
</screen>
This is equivalent to running <command>systemctl
@ -19,7 +19,7 @@ poweroff</command>.</para>
<para>To reboot the system, run
<screen>
$ reboot
# reboot
</screen>
which is equivalent to <command>systemctl reboot</command>.
@ -28,7 +28,7 @@ Alternatively, you can quickly reboot the system using
the new kernel into memory:
<screen>
$ systemctl kexec
# systemctl kexec
</screen>
</para>
@ -41,4 +41,4 @@ $ systemctl kexec
i.e. on a virtual console or in X11; otherwise, the user is asked for
authentication.</para>
</chapter>
</chapter>

View File

@ -19,7 +19,7 @@ fails to boot. After the system has booted, you can make the selected
configuration the default for subsequent boots:
<screen>
$ /run/current-system/bin/switch-to-configuration boot</screen>
# /run/current-system/bin/switch-to-configuration boot</screen>
</para>
@ -27,12 +27,12 @@ $ /run/current-system/bin/switch-to-configuration boot</screen>
system:
<screen>
$ nixos-rebuild switch --rollback</screen>
# nixos-rebuild switch --rollback</screen>
This is equivalent to running:
<screen>
$ /nix/var/nix/profiles/system-<replaceable>N</replaceable>-link/bin/switch-to-configuration switch</screen>
# /nix/var/nix/profiles/system-<replaceable>N</replaceable>-link/bin/switch-to-configuration switch</screen>
where <replaceable>N</replaceable> is the number of the NixOS system
configuration. To get a list of the available configurations, do:
@ -45,4 +45,4 @@ lrwxrwxrwx 1 root root 78 Aug 12 13:54 /nix/var/nix/profiles/system-268-link ->
</para>
</section>
</section>

View File

@ -66,9 +66,9 @@ messages from the service.
<para>Units can be stopped, started or restarted:
<screen>
$ systemctl stop postgresql.service
$ systemctl start postgresql.service
$ systemctl restart postgresql.service
# systemctl stop postgresql.service
# systemctl start postgresql.service
# systemctl restart postgresql.service
</screen>
These operations are synchronous: they wait until the service has

View File

@ -18,7 +18,7 @@ you may be able to fix it automatically.</para>
system configuration, you can fix it by doing
<screen>
$ nixos-rebuild switch --repair
# nixos-rebuild switch --repair
</screen>
This will cause Nix to check every path in the closure, and if its
@ -28,10 +28,10 @@ the path is rebuilt or redownloaded.</para>
<para>You can also scan the entire Nix store for corrupt paths:
<screen>
$ nix-store --verify --check-contents --repair
# nix-store --verify --check-contents --repair
</screen>
Any corrupt paths will be redownloaded if theyre available in a
binary cache; otherwise, they cannot be repaired.</para>
</section>
</section>

View File

@ -45,9 +45,9 @@ track of this, you can terminate a session in a way that ensures that
all the sessions processes are gone:
<screen>
$ loginctl terminate-session c3
# loginctl terminate-session c3
</screen>
</para>
</chapter>
</chapter>

View File

@ -31,7 +31,7 @@ and you run <command>nixos-rebuild</command>, specifying your own
Nixpkgs tree:
<screen>
$ nixos-rebuild switch -I nixpkgs=/path/to/my/nixpkgs</screen>
# nixos-rebuild switch -I nixpkgs=/path/to/my/nixpkgs</screen>
</para>

View File

@ -24,13 +24,9 @@ effect after you run <command>nixos-rebuild</command>.</para>
<xi:include href="networking.xml" />
<xi:include href="linux-kernel.xml" />
<!-- FIXME: auto-include NixOS module docs -->
<xi:include href="postgresql.xml" />
<xi:include href="gitlab.xml" />
<xi:include href="taskserver.xml" />
<xi:include href="acme.xml" />
<xi:include href="input-methods.xml" />
<xi:include href="modules.xml" xpointer="xpointer(//section[@id='modules']/*)" />
<!-- Apache; libvirtd virtualisation -->
</part>

View File

@ -12,8 +12,15 @@ can disable IPv6 support globally by setting:
<programlisting>
networking.enableIPv6 = false;
</programlisting>
</programlisting></para>
<para>You can disable IPv6 on a single interface using a normal sysctl (in this
example, we use interface <varname>eth0</varname>):
<programlisting>
boot.kernel.sysctl."net.ipv6.conf.eth0.disable_ipv6" = true;
</programlisting>
</para>
</section>

View File

@ -19,7 +19,7 @@ kernel.</para>
<para>The default Linux kernel configuration should be fine for most users. You can see the configuration of your current kernel with the following command:
<programlisting>
cat /proc/config.gz | gunzip
zcat /proc/config.gz
</programlisting>
If you want to change the kernel configuration, you can use the
<option>packageOverrides</option> feature (see <xref
@ -66,4 +66,25 @@ boot.kernel.sysctl."net.ipv4.tcp_keepalive_time" = 120;
sets the kernels TCP keepalive time to 120 seconds. To see the
available parameters, run <command>sysctl -a</command>.</para>
<section>
<title>Developing kernel modules</title>
<para>When developing kernel modules it's often convenient to run
edit-compile-run loop as quickly as possible.
See below snippet as an example of developing <literal>mellanox</literal>
drivers.
</para>
<screen><![CDATA[
$ nix-build '<nixpkgs>' -A linuxPackages.kernel.dev
$ nix-shell '<nixpkgs>' -A linuxPackages.kernel
$ unpackPhase
$ cd linux-*
$ make -C $dev/lib/modules/*/build M=$(pwd)/drivers/net/ethernet/mellanox modules
# insmod ./drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.ko
]]></screen>
</section>
</chapter>

View File

@ -9,23 +9,23 @@
<para>NixOS supports file systems that are encrypted using
<emphasis>LUKS</emphasis> (Linux Unified Key Setup). For example,
here is how you create an encrypted Ext4 file system on the device
<filename>/dev/sda2</filename>:
<filename>/dev/disk/by-uuid/3f6b0024-3a44-4fde-a43a-767b872abe5d</filename>:
<screen>
$ cryptsetup luksFormat /dev/sda2
# cryptsetup luksFormat /dev/disk/by-uuid/3f6b0024-3a44-4fde-a43a-767b872abe5d
WARNING!
========
This will overwrite data on /dev/sda2 irrevocably.
This will overwrite data on /dev/disk/by-uuid/3f6b0024-3a44-4fde-a43a-767b872abe5d irrevocably.
Are you sure? (Type uppercase yes): YES
Enter LUKS passphrase: ***
Verify passphrase: ***
$ cryptsetup luksOpen /dev/sda2 crypted
Enter passphrase for /dev/sda2: ***
# cryptsetup luksOpen /dev/disk/by-uuid/3f6b0024-3a44-4fde-a43a-767b872abe5d crypted
Enter passphrase for /dev/disk/by-uuid/3f6b0024-3a44-4fde-a43a-767b872abe5d: ***
$ mkfs.ext4 /dev/mapper/crypted
# mkfs.ext4 /dev/mapper/crypted
</screen>
To ensure that this file system is automatically mounted at boot time
@ -33,7 +33,7 @@ as <filename>/</filename>, add the following to
<filename>configuration.nix</filename>:
<programlisting>
boot.initrd.luks.devices = [ { device = "/dev/sda2"; name = "crypted"; } ];
boot.initrd.luks.devices.crypted.device = "/dev/disk/by-uuid/3f6b0024-3a44-4fde-a43a-767b872abe5d";
fileSystems."/".device = "/dev/mapper/crypted";
</programlisting>

View File

@ -16,12 +16,22 @@ networking.networkmanager.enable = true;
some desktop managers (e.g., GNOME) enable NetworkManager
automatically for you.</para>
<para>All users that should have permission to change network settings
must belong to the <code>networkmanager</code> group.</para>
<para>All users that should have permission to change network settings must
belong to the <code>networkmanager</code> group:
<programlisting>
users.extraUsers.youruser.extraGroups = [ "networkmanager" ];
</programlisting>
</para>
<para>NetworkManager is controlled using either <command>nmcli</command> or
<command>nmtui</command> (curses-based terminal user interface). See their
manual pages for details on their usage. Some desktop environments (GNOME, KDE)
have their own configuration tools for NetworkManager.</para>
<note><para><code>networking.networkmanager</code> and
<code>networking.wireless</code> can not be enabled at the same time:
you can still connect to the wireless networks using
<code>networking.wireless</code> (WPA Supplicant) cannot be enabled at the same
time: you can still connect to the wireless networks using
NetworkManager.</para></note>
</section>

View File

@ -63,14 +63,14 @@ commands such as <command>useradd</command>,
account named <literal>alice</literal>:
<screen>
$ useradd -m alice</screen>
# useradd -m alice</screen>
To make all nix tools available to this new user use `su - USER` which
opens a login shell (==shell that loads the profile) for given user.
This will create the ~/.nix-defexpr symlink. So run:
<screen>
$ su - alice -c "true"</screen>
# su - alice -c "true"</screen>
The flag <option>-m</option> causes the creation of a home directory
@ -79,7 +79,7 @@ have an initial password and therefore cannot log in. A password can
be set using the <command>passwd</command> utility:
<screen>
$ passwd alice
# passwd alice
Enter new UNIX password: ***
Retype new UNIX password: ***
</screen>
@ -87,7 +87,7 @@ Retype new UNIX password: ***
A user can be deleted using <command>userdel</command>:
<screen>
$ userdel -r alice</screen>
# userdel -r alice</screen>
The flag <option>-r</option> deletes the users home directory.
Accounts can be modified using <command>usermod</command>. Unix

View File

@ -41,13 +41,13 @@ If you are using WPA2 the <command>wpa_passphrase</command> tool might be useful
to generate the <literal>wpa_supplicant.conf</literal>.
<screen>
$ wpa_passphrase ESSID PSK > /etc/wpa_supplicant.conf</screen>
# wpa_passphrase ESSID PSK > /etc/wpa_supplicant.conf</screen>
After you have edited the <literal>wpa_supplicant.conf</literal>,
you need to restart the wpa_supplicant service.
<screen>
$ systemctl restart wpa_supplicant.service</screen>
# systemctl restart wpa_supplicant.service</screen>
</para>
</section>

View File

@ -5,7 +5,7 @@
xml:id="sec-x11">
<title>X Window System</title>
<para>The X Window System (X11) provides the basis of NixOS graphical
user interface. It can be enabled as follows:
<programlisting>
@ -48,7 +48,7 @@ services.xserver.autorun = false;
</programlisting>
The X server can then be started manually:
<screen>
$ systemctl start display-manager.service
# systemctl start display-manager.service
</screen>
</para>
@ -115,5 +115,14 @@ services.xserver.synaptics.twoFingerScroll = true;
</simplesect>
<simplesect><title>GTK/Qt themes</title>
<para>GTK themes can be installed either to user profile or system-wide (via
<literal>system.environmentPackages</literal>). To make Qt 5 applications look similar
to GTK2 ones, you can install <literal>qt5.qtbase.gtk</literal> package into your
system environment. It should work for all Qt 5 library versions.
</para>
</simplesect>
</chapter>

View File

@ -1,27 +1,27 @@
{ pkgs, options, version, revision, extraSources ? [] }:
{ pkgs, options, config, version, revision, extraSources ? [] }:
with pkgs;
with pkgs.lib;
let
lib = pkgs.lib;
# Remove invisible and internal options.
optionsList = filter (opt: opt.visible && !opt.internal) (optionAttrSetToDocList options);
optionsList = lib.filter (opt: opt.visible && !opt.internal) (lib.optionAttrSetToDocList options);
# Replace functions by the string <function>
substFunction = x:
if builtins.isAttrs x then mapAttrs (name: substFunction) x
if builtins.isAttrs x then lib.mapAttrs (name: substFunction) x
else if builtins.isList x then map substFunction x
else if builtins.isFunction x then "<function>"
else x;
# Clean up declaration sites to not refer to the NixOS source tree.
optionsList' = flip map optionsList (opt: opt // {
optionsList' = lib.flip map optionsList (opt: opt // {
declarations = map stripAnyPrefixes opt.declarations;
}
// optionalAttrs (opt ? example) { example = substFunction opt.example; }
// optionalAttrs (opt ? default) { default = substFunction opt.default; }
// optionalAttrs (opt ? type) { type = substFunction opt.type; });
// lib.optionalAttrs (opt ? example) { example = substFunction opt.example; }
// lib.optionalAttrs (opt ? default) { default = substFunction opt.default; }
// lib.optionalAttrs (opt ? type) { type = substFunction opt.type; });
# We need to strip references to /nix/store/* from options,
# including any `extraSources` if some modules came from elsewhere,
@ -30,7 +30,7 @@ let
# E.g. if some `options` came from modules in ${pkgs.customModules}/nix,
# you'd need to include `extraSources = [ pkgs.customModules ]`
prefixesToStrip = map (p: "${toString p}/") ([ ../../.. ] ++ extraSources);
stripAnyPrefixes = flip (fold removePrefix) prefixesToStrip;
stripAnyPrefixes = lib.flip (lib.fold lib.removePrefix) prefixesToStrip;
# Convert the list of options into an XML file.
optionsXML = builtins.toFile "options.xml" (builtins.toXML optionsList');
@ -49,17 +49,21 @@ let
-o $out ${./options-to-docbook.xsl} $optionsXML
'';
sources = sourceFilesBySuffices ./. [".xml"];
sources = lib.sourceFilesBySuffices ./. [".xml"];
modulesDoc = builtins.toFile "modules.xml" ''
<section xmlns:xi="http://www.w3.org/2001/XInclude" id="modules">
${(lib.concatMapStrings (path: ''
<xi:include href="${path}" />
'') (lib.catAttrs "value" config.meta.doc))}
</section>
'';
copySources =
''
cp -prd $sources/* . # */
chmod -R u+w .
cp ${../../modules/services/databases/postgresql.xml} configuration/postgresql.xml
cp ${../../modules/services/misc/gitlab.xml} configuration/gitlab.xml
cp ${../../modules/services/misc/taskserver/doc.xml} configuration/taskserver.xml
cp ${../../modules/security/acme.xml} configuration/acme.xml
cp ${../../modules/i18n/input-method/default.xml} configuration/input-methods.xml
ln -s ${modulesDoc} configuration/modules.xml
ln -s ${optionsDocBook} options-db.xml
echo "${version}" > version
'';
@ -90,14 +94,11 @@ let
"--stringparam chunk.toc ${toc}"
];
olinkDB = stdenv.mkDerivation {
name = "manual-olinkdb";
inherit sources;
buildInputs = [ libxml2 libxslt ];
buildCommand = ''
olinkDB = runCommand "manual-olinkdb"
{ inherit sources;
buildInputs = [ libxml2 libxslt ];
}
''
${copySources}
xsltproc \
@ -122,46 +123,41 @@ let
<targetset>
<targetsetinfo>
Allows for cross-referencing olinks between the manpages
and the HTML/PDF manuals.
and manual.
</targetsetinfo>
<document targetdoc="manual">&manualtargets;</document>
</targetset>
EOF
'';
};
in rec {
# The NixOS options in JSON format.
optionsJSON = stdenv.mkDerivation {
name = "options-json";
buildCommand = ''
optionsJSON = runCommand "options-json"
{ meta.description = "List of NixOS options in JSON format";
}
''
# Export list of options in different format.
dst=$out/share/doc/nixos
mkdir -p $dst
cp ${builtins.toFile "options.json" (builtins.unsafeDiscardStringContext (builtins.toJSON
(listToAttrs (map (o: { name = o.name; value = removeAttrs o ["name" "visible" "internal"]; }) optionsList'))))
(builtins.listToAttrs (map (o: { name = o.name; value = removeAttrs o ["name" "visible" "internal"]; }) optionsList'))))
} $dst/options.json
mkdir -p $out/nix-support
echo "file json $dst/options.json" >> $out/nix-support/hydra-build-products
''; # */
meta.description = "List of NixOS options in JSON format";
};
# Generate the NixOS manual.
manual = stdenv.mkDerivation {
name = "nixos-manual";
inherit sources;
buildInputs = [ libxml2 libxslt ];
buildCommand = ''
manual = runCommand "nixos-manual"
{ inherit sources;
buildInputs = [ libxml2 libxslt ];
meta.description = "The NixOS manual in HTML format";
allowedReferences = ["out"];
}
''
${copySources}
# Check the validity of the manual sources.
@ -188,42 +184,49 @@ in rec {
echo "doc manual $dst" >> $out/nix-support/hydra-build-products
''; # */
meta.description = "The NixOS manual in HTML format";
allowedReferences = ["out"];
};
manualPDF = stdenv.mkDerivation {
name = "nixos-manual-pdf";
inherit sources;
buildInputs = [ libxml2 libxslt dblatex dblatex.tex ];
buildCommand = ''
manualEpub = runCommand "nixos-manual-epub"
{ inherit sources;
buildInputs = [ libxml2 libxslt zip ];
}
''
${copySources}
# Check the validity of the manual sources.
xmllint --noout --nonet --xinclude --noxincludenode \
--relaxng ${docbook5}/xml/rng/docbook/docbook.rng \
manual.xml
# Generate the epub manual.
dst=$out/share/doc/nixos
mkdir -p $dst
xmllint --xinclude manual.xml | dblatex -o $dst/manual.pdf - \
-P target.database.document="${olinkDB}/olinkdb.xml" \
-P doc.collab.show=0 \
-P latex.output.revhistory=0
xsltproc \
${manualXsltprocOptions} \
--stringparam target.database.document "${olinkDB}/olinkdb.xml" \
--nonet --xinclude --output $dst/epub/ \
${docbook5_xsl}/xml/xsl/docbook/epub/docbook.xsl ./manual.xml
mkdir -p $dst/epub/OEBPS/images/callouts
cp -r ${docbook5_xsl}/xml/xsl/docbook/images/callouts/*.gif $dst/epub/OEBPS/images/callouts
echo "application/epub+zip" > mimetype
manual="$dst/nixos-manual.epub"
zip -0Xq "$manual" mimetype
cd $dst/epub && zip -Xr9D "$manual" *
rm -rf $dst/epub
mkdir -p $out/nix-support
echo "doc-pdf manual $dst/manual.pdf" >> $out/nix-support/hydra-build-products
echo "doc-epub manual $manual" >> $out/nix-support/hydra-build-products
'';
};
# Generate the NixOS manpages.
manpages = stdenv.mkDerivation {
name = "nixos-manpages";
inherit sources;
buildInputs = [ libxml2 libxslt ];
buildCommand = ''
manpages = runCommand "nixos-manpages"
{ inherit sources;
buildInputs = [ libxml2 libxslt ];
allowedReferences = ["out"];
}
''
${copySources}
# Check the validity of the man pages sources.
@ -243,7 +246,4 @@ in rec {
./man-pages.xml
'';
allowedReferences = ["out"];
};
}

View File

@ -25,8 +25,8 @@ $ nix-build -A config.system.build.isoImage -I nixos-config=modules/installer/cd
suggested by the following command:
<screen>
$ mount -o loop -t iso9660 ./result/iso/cd.iso /mnt/iso</screen>
# mount -o loop -t iso9660 ./result/iso/cd.iso /mnt/iso</screen>
</para>
</chapter>
</chapter>

View File

@ -94,8 +94,8 @@ $ nix-build -A 'config.systemd.units."httpd.service".unit'
<screen>
$ cp $(nix-build -A 'config.systemd.units."httpd.service".unit')/httpd.service \
/run/systemd/system/tmp-httpd.service
$ systemctl daemon-reload
$ systemctl start tmp-httpd.service
# systemctl daemon-reload
# systemctl start tmp-httpd.service
</screen>
Note that the unit must not have the same name as any unit in
@ -110,4 +110,4 @@ $ systemctl start tmp-httpd.service
</para>
</chapter>
</chapter>

View File

@ -17,5 +17,6 @@ NixOS.</para>
<xi:include href="building-nixos.xml" />
<xi:include href="nixos-tests.xml" />
<xi:include href="testing-installer.xml" />
<xi:include href="reviewing-contributions.xml" />
</part>

View File

@ -0,0 +1,62 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-meta-attributes">
<title>Meta Attributes</title>
<para>Like Nix packages, NixOS modules can declare meta-attributes to provide
extra information. Module meta attributes are defined in the
<filename
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/misc/meta.nix">meta.nix</filename>
special module.</para>
<para><literal>meta</literal> is a top level attribute like
<literal>options</literal> and <literal>config</literal>. Available
meta-attributes are <literal>maintainers</literal> and
<literal>doc</literal>.</para>
<para>Each of the meta-attributes must be defined at most once per module
file.</para>
<programlisting>
{ config, lib, pkgs, ... }:
{
options = {
...
};
config = {
...
};
meta = {
maintainers = with lib.maintainers; [ ericsagnes ]; <co
xml:id='modules-meta-1' />
doc = ./default.xml; <co xml:id='modules-meta-2' />
};
}
</programlisting>
<calloutlist>
<callout arearefs='modules-meta-1'>
<para>
<varname>maintainers</varname> contains a list of the module maintainers.
</para>
</callout>
<callout arearefs='modules-meta-2'>
<para>
<varname>doc</varname> points to a valid DocBook file containing the module
documentation. Its contents is automatically added to <xref
linkend="ch-configuration"/>.
Changes to a module documentation have to be checked to not break
building the NixOS manual:
</para>
<programlisting>$ nix-build nixos/release.nix -A manual</programlisting>
</callout>
</calloutlist>
</section>

View File

@ -7,8 +7,8 @@
<title>Option Declarations</title>
<para>An option declaration specifies the name, type and description
of a NixOS configuration option. It is illegal to define an option
that hasnt been declared in any module. A option declaration
of a NixOS configuration option. It is invalid to define an option
that hasnt been declared in any module. An option declaration
generally looks like this:
<programlisting>
@ -31,9 +31,9 @@ options = {
<varlistentry>
<term><varname>type</varname></term>
<listitem>
<para>The type of the option (see below). It may be omitted,
but thats not advisable since it may lead to errors that are
hard to diagnose.</para>
<para>The type of the option (see <xref linkend='sec-option-types' />).
It may be omitted, but thats not advisable since it may lead to errors
that are hard to diagnose.</para>
</listitem>
</varlistentry>
@ -42,7 +42,7 @@ options = {
<listitem>
<para>The default value used if no value is defined by any
module. A default is not required; in that case, if the option
value is ever used, an error will be thrown.</para>
value is never used, an error will be thrown.</para>
</listitem>
</varlistentry>
@ -65,86 +65,4 @@ options = {
</para>
<para>Here is a non-exhaustive list of option types:
<variablelist>
<varlistentry>
<term><varname>types.bool</varname></term>
<listitem>
<para>A Boolean.</para>
</listitem>
</varlistentry>
<varlistentry>
<term><varname>types.int</varname></term>
<listitem>
<para>An integer.</para>
</listitem>
</varlistentry>
<varlistentry>
<term><varname>types.str</varname></term>
<listitem>
<para>A string.</para>
</listitem>
</varlistentry>
<varlistentry>
<term><varname>types.lines</varname></term>
<listitem>
<para>A string. If there are multiple definitions, they are
concatenated, with newline characters in between.</para>
</listitem>
</varlistentry>
<varlistentry>
<term><varname>types.path</varname></term>
<listitem>
<para>A path, defined as anything that, when coerced to a
string, starts with a slash. This includes derivations.</para>
</listitem>
</varlistentry>
<varlistentry>
<term><varname>types.package</varname></term>
<listitem>
<para>A derivation (such as <literal>pkgs.hello</literal>) or a
store path (such as
<filename>/nix/store/1ifi1cfbfs5iajmvwgrbmrnrw3a147h9-hello-2.10</filename>).</para>
</listitem>
</varlistentry>
<varlistentry>
<term><varname>types.listOf</varname> <replaceable>t</replaceable></term>
<listitem>
<para>A list of elements of type <replaceable>t</replaceable>
(e.g., <literal>types.listOf types.str</literal> is a list of
strings). Multiple definitions are concatenated together.</para>
</listitem>
</varlistentry>
<varlistentry>
<term><varname>types.attrsOf</varname> <replaceable>t</replaceable></term>
<listitem>
<para>A set of elements of type <replaceable>t</replaceable>
(e.g., <literal>types.attrsOf types.int</literal> is a set of
name/value pairs, the values being integers).</para>
</listitem>
</varlistentry>
<varlistentry>
<term><varname>types.nullOr</varname> <replaceable>t</replaceable></term>
<listitem>
<para>Either the value <literal>null</literal> or something of
type <replaceable>t</replaceable>.</para>
</listitem>
</varlistentry>
</variablelist>
You can also create new types using the function
<varname>mkOptionType</varname>. See
<filename>lib/types.nix</filename> in Nixpkgs for details.</para>
</section>

View File

@ -0,0 +1,394 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-option-types">
<title>Options Types</title>
<para>Option types are a way to put constraints on the values a module option
can take.
Types are also responsible of how values are merged in case of multiple
value definitions.</para>
<section><title>Basic Types</title>
<para>Basic types are the simplest available types in the module system.
Basic types include multiple string types that mainly differ in how
definition merging is handled.</para>
<variablelist>
<varlistentry>
<term><varname>types.bool</varname></term>
<listitem><para>A boolean, its values can be <literal>true</literal> or
<literal>false</literal>.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>types.int</varname></term>
<listitem><para>An integer.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>types.path</varname></term>
<listitem><para>A filesystem path, defined as anything that when coerced to
a string starts with a slash. Even if derivations can be considered as
path, the more specific <literal>types.package</literal> should be
preferred.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>types.package</varname></term>
<listitem><para>A derivation or a store path.</para></listitem>
</varlistentry>
</variablelist>
<para>String related types:</para>
<variablelist>
<varlistentry>
<term><varname>types.str</varname></term>
<listitem><para>A string. Multiple definitions cannot be
merged.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>types.lines</varname></term>
<listitem><para>A string. Multiple definitions are concatenated with a new
line <literal>"\n"</literal>.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>types.commas</varname></term>
<listitem><para>A string. Multiple definitions are concatenated with a comma
<literal>","</literal>.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>types.envVar</varname></term>
<listitem><para>A string. Multiple definitions are concatenated with a
collon <literal>":"</literal>.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>types.separatedString</varname>
<replaceable>sep</replaceable></term>
<listitem><para>A string with a custom separator
<replaceable>sep</replaceable>, e.g. <literal>types.separatedString
"|"</literal>.</para></listitem>
</varlistentry>
</variablelist>
</section>
<section><title>Composed Types</title>
<para>Composed types allow to create complex types by taking another type(s)
or value(s) as parameter(s).
It is possible to compose types multiple times, e.g. <literal>with types;
nullOr (enum [ "left" "right" ])</literal>.</para>
<variablelist>
<varlistentry>
<term><varname>types.listOf</varname> <replaceable>t</replaceable></term>
<listitem><para>A list of <replaceable>t</replaceable> type, e.g.
<literal>types.listOf int</literal>. Multiple definitions are merged
with list concatenation.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>types.attrsOf</varname> <replaceable>t</replaceable></term>
<listitem><para>An attribute set of where all the values are of
<replaceable>t</replaceable> type. Multiple definitions result in the
joined attribute set.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>types.loaOf</varname> <replaceable>t</replaceable></term>
<listitem><para>An attribute set or a list of <replaceable>t</replaceable>
type. Multiple definitions are merged according to the
value.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>types.loeOf</varname> <replaceable>t</replaceable></term>
<listitem><para>A list or an element of <replaceable>t</replaceable> type.
Multiple definitions are merged according to the
values.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>types.nullOr</varname> <replaceable>t</replaceable></term>
<listitem><para><literal>null</literal> or type
<replaceable>t</replaceable>. Multiple definitions are merged according
to type <replaceable>t</replaceable>.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>types.uniq</varname> <replaceable>t</replaceable></term>
<listitem><para>Ensures that type <replaceable>t</replaceable> cannot be
merged. It is used to ensure option definitions are declared only
once.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>types.enum</varname> <replaceable>l</replaceable></term>
<listitem><para>One element of the list <replaceable>l</replaceable>, e.g.
<literal>types.enum [ "left" "right" ]</literal>. Multiple definitions
cannot be merged</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>types.either</varname> <replaceable>t1</replaceable>
<replaceable>t2</replaceable></term>
<listitem><para>Type <replaceable>t1</replaceable> or type
<replaceable>t2</replaceable>, e.g. <literal>with types; either int
str</literal>. Multiple definitions cannot be
merged.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>types.submodule</varname> <replaceable>o</replaceable></term>
<listitem><para>A set of sub options <replaceable>o</replaceable>.
<replaceable>o</replaceable> can be an attribute set or a function
returning an attribute set. Submodules are used in composed types to
create modular options. Submodule are detailed in <xref
linkend='section-option-types-submodule' />.</para></listitem>
</varlistentry>
</variablelist>
</section>
<section xml:id='section-option-types-submodule'><title>Submodule</title>
<para>Submodule is a very powerful type that defines a set of sub-options that
are handled like a separate module.
It is especially interesting when used with composed types like
<literal>attrsOf</literal> or <literal>listOf</literal>.</para>
<para>The submodule type take a parameter <replaceable>o</replaceable>, that
should be a set, or a function returning a set with an
<literal>options</literal> key defining the sub-options.
The option set can be defined directly (<xref linkend='ex-submodule-direct'
/>) or as reference (<xref linkend='ex-submodule-reference' />).</para>
<para>Submodule option definitions are type-checked accordingly to the options
declarations. It is possible to declare submodule options inside a submodule
sub-options for even higher modularity.</para>
<example xml:id='ex-submodule-direct'><title>Directly defined submodule</title>
<screen>
options.mod = mkOption {
name = "mod";
description = "submodule example";
type = with types; listOf (submodule {
options = {
foo = mkOption {
type = int;
};
bar = mkOption {
type = str;
};
};
});
};</screen></example>
<example xml:id='ex-submodule-reference'><title>Submodule defined as a
reference</title>
<screen>
let
modOptions = {
options = {
foo = mkOption {
type = int;
};
bar = mkOption {
type = int;
};
};
};
in
options.mod = mkOption {
description = "submodule example";
type = with types; listOf (submodule modOptions);
};</screen></example>
<section><title>Composed with <literal>listOf</literal></title>
<para>When composed with <literal>listOf</literal>, submodule allows multiple
definitions of the submodule option set.</para>
<example xml:id='ex-submodule-listof-declaration'><title>Declaration of a list
of submodules</title>
<screen>
options.mod = mkOption {
description = "submodule example";
type = with types; listOf (submodule {
options = {
foo = mkOption {
type = int;
};
bar = mkOption {
type = str;
};
};
});
};</screen></example>
<example xml:id='ex-submodule-listof-definition'><title>Definition of a list of
submodules</title>
<screen>
config.mod = [
{ foo = 1; bar = "one"; }
{ foo = 2; bar = "two"; }
];</screen></example>
</section>
<section><title>Composed with <literal>attrsOf</literal></title>
<para>When composed with <literal>attrsOf</literal>, submodule allows multiple
named definitions of the submodule option set.</para>
<example xml:id='ex-submodule-attrsof-declaration'><title>Declaration of
attribute sets of submodules</title>
<screen>
options.mod = mkOption {
description = "submodule example";
type = with types; attrsOf (submodule {
options = {
foo = mkOption {
type = int;
};
bar = mkOption {
type = str;
};
};
});
};</screen></example>
<example xml:id='ex-submodule-attrsof-definition'><title>Declaration of
attribute sets of submodules</title>
<screen>
config.mod.one = { foo = 1; bar = "one"; };
config.mod.two = { foo = 2; bar = "two"; };</screen></example>
</section>
</section>
<section><title>Extending types</title>
<para>Types are mainly characterized by their <literal>check</literal> and
<literal>merge</literal> functions.</para>
<variablelist>
<varlistentry>
<term><varname>check</varname></term>
<listitem><para>The function to type check the value. Takes a value as
parameter and return a boolean.
It is possible to extend a type check with the
<literal>addCheck</literal> function (<xref
linkend='ex-extending-type-check-1' />), or to fully override the
check function (<xref linkend='ex-extending-type-check-2' />).</para>
<example xml:id='ex-extending-type-check-1'><title>Adding a type check</title>
<screen>
byte = mkOption {
description = "An integer between 0 and 255.";
type = addCheck (x: x &gt;= 0 &amp;&amp; x &lt;= 255) types.int;
};</screen></example>
<example xml:id='ex-extending-type-check-2'><title>Overriding a type
check</title>
<screen>
nixThings = mkOption {
description = "words that start with 'nix'";
type = types.str // {
check = (x: lib.hasPrefix "nix" x)
};
};</screen></example>
</listitem>
</varlistentry>
<varlistentry>
<term><varname>merge</varname></term>
<listitem><para>Function to merge the options values when multiple values
are set.
The function takes two parameters, <literal>loc</literal> the option path as a
list of strings, and <literal>defs</literal> the list of defined values as a
list.
It is possible to override a type merge function for custom
needs.</para></listitem>
</varlistentry>
</variablelist>
</section>
<section><title>Custom Types</title>
<para>Custom types can be created with the <literal>mkOptionType</literal>
function.
As type creation includes some more complex topics such as submodule handling,
it is recommended to get familiar with <filename
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/lib/types.nix">types.nix</filename>
code before creating a new type.</para>
<para>The only required parameter is <literal>name</literal>.</para>
<variablelist>
<varlistentry>
<term><varname>name</varname></term>
<listitem><para>A string representation of the type function name, name
usually changes accordingly parameters passed to
types.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>check</varname></term>
<listitem><para>A function to type check the definition value. Takes the
definition value as a parameter and returns a boolean indicating the
type check result, <literal>true</literal> for success and
<literal>false</literal> for failure.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>merge</varname></term>
<listitem><para>A function to merge multiple definitions values. Takes two
parameters:</para>
<variablelist>
<varlistentry>
<term><replaceable>loc</replaceable></term>
<listitem><para>The option path as a list of strings, e.g.
<literal>["boot" "loader "grub"
"enable"]</literal>.</para></listitem>
</varlistentry>
<varlistentry>
<term><replaceable>defs</replaceable></term>
<listitem><para>The list of sets of defined <literal>value</literal>
and <literal>file</literal> where the value was defined, e.g.
<literal>[ { file = "/foo.nix"; value = 1; } { file = "/bar.nix";
value = 2 } ]</literal>. The <literal>merge</literal> function
should return the merged value or throw an error in case the
values are impossible or not meant to be merged.</para></listitem>
</varlistentry>
</variablelist>
</listitem>
</varlistentry>
<varlistentry>
<term><varname>getSubOptions</varname></term>
<listitem><para>For composed types that can take a submodule as type
parameter, this function generate sub-options documentation. It takes
the current option prefix as a list and return the set of sub-options.
Usually defined in a recursive manner by adding a term to the prefix,
e.g. <literal>prefix: elemType.getSubOptions (prefix ++
[<replaceable>"prefix"</replaceable>])</literal> where
<replaceable>"prefix"</replaceable> is the newly added
prefix.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>getSubModules</varname></term>
<listitem><para>For composed types that can take a submodule as type
parameter, this function should return the type parameters submodules.
If the type parameter is called <literal>elemType</literal>, the
function should just recursively look into submodules by returning
<literal>elemType.getSubModules;</literal>.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>substSubModules</varname></term>
<listitem><para>For composed types that can take a submodule as type
parameter, this function can be used to substitute the parameter of a
submodule type. It takes a module as parameter and return the type with
the submodule options substituted. It is usally defined as a type
function call with a recursive call to
<literal>substSubModules</literal>, e.g for a type
<literal>composedType</literal> that take an <literal>elemtype</literal>
type parameter, this function should be defined as <literal>m:
composedType (elemType.substSubModules m)</literal>.</para></listitem>
</varlistentry>
</variablelist>
</section>
</section>

View File

@ -0,0 +1,393 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-reviewing-contributions">
<title>Reviewing contributions</title>
<warning>
<para>The following section is a draft and reviewing policy is still being
discussed.</para>
</warning>
<para>The nixpkgs projects receives a fairly high number of contributions via
GitHub pull-requests. Reviewing and approving these is an important task and a
way to contribute to the project.</para>
<para>The high change rate of nixpkgs make any pull request that is open for
long enough subject to conflicts that will require extra work from the
submitter or the merger. Reviewing pull requests in a timely manner and being
responsive to the comments is the key to avoid these. Github provides sort
filters that can be used to see the <link
xlink:href="https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-desc">most
recently</link> and the <link
xlink:href="https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-asc">least
recently</link> updated pull-requests.</para>
<para>When reviewing a pull request, please always be nice and polite.
Controversial changes can lead to controversial opinions, but it is important
to respect every community members and their work.</para>
<para>GitHub provides reactions, they are a simple and quick way to provide
feedback to pull-requests or any comments. The thumb-down reaction should be
used with care and if possible accompanied with some explanations so the
submitter has directions to improve his contribution.</para>
<para>Pull-requests reviews should include a list of what has been reviewed in a
comment, so other reviewers and mergers can know the state of the
review.</para>
<para>All the review template samples provided in this section are generic and
meant as examples. Their usage is optional and the reviewer is free to adapt
them to his liking.</para>
<section><title>Package updates</title>
<para>A package update is the most trivial and common type of pull-request.
These pull-requests mainly consist in updating the version part of the package
name and the source hash.</para>
<para>It can happen that non trivial updates include patches or more complex
changes.</para>
<para>Reviewing process:</para>
<itemizedlist>
<listitem><para>Add labels to the pull-request. (Requires commit
rights)</para>
<itemizedlist>
<listitem><para><literal>8.has: package (update)</literal> and any topic
label that fit the updated package.</para></listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that the package versioning is fitting the
guidelines.</para></listitem>
<listitem><para>Ensure that the commit text is fitting the
guidelines.</para></listitem>
<listitem><para>Ensure that the package maintainers are notified.</para>
<itemizedlist>
<listitem><para>mention-bot usually notify GitHub users based on the
submitted changes, but it can happen that it misses some of the
package maintainers.</para></listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that the meta field contains correct
information.</para>
<itemizedlist>
<listitem><para>License can change with version updates, so it should be
checked to be fitting upstream license.</para></listitem>
<listitem><para>If the package has no maintainer, a maintainer must be
set. This can be the update submitter or a community member that
accepts to take maintainership of the package.</para></listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that the code contains no typos.</para></listitem>
<listitem><para>Building the package locally.</para>
<itemizedlist>
<listitem><para>Pull-requests are often targeted to the master or staging
branch so building the pull-request locally as it is submitted can
trigger a large amount of source builds.</para>
<para>It is possible to rebase the changes on nixos-unstable or
nixpkgs-unstable for easier review by running the following commands
from a nixpkgs clone.
<screen>
$ git remote add channels https://github.com/NixOS/nixpkgs-channels.git <co
xml:id='reviewing-rebase-1' />
$ git fetch channels nixos-unstable <co xml:id='reviewing-rebase-2' />
$ git fetch origin pull/PRNUMBER/head <co xml:id='reviewing-rebase-3' />
$ git rebase --onto nixos-unstable BASEBRANCH FETCH_HEAD <co
xml:id='reviewing-rebase-4' />
</screen>
<calloutlist>
<callout arearefs='reviewing-rebase-1'>
<para>This should be done only once to be able to fetch channel
branches from the nixpkgs-channels repository.</para>
</callout>
<callout arearefs='reviewing-rebase-2'>
<para>Fetching the nixos-unstable branch.</para>
</callout>
<callout arearefs='reviewing-rebase-3'>
<para>Fetching the pull-request changes, <varname>PRNUMBER</varname>
is the number at the end of the pull-request title and
<varname>BASEBRANCH</varname> the base branch of the
pull-request.</para>
</callout>
<callout arearefs='reviewing-rebase-3'>
<para>Rebasing the pull-request changes to the nixos-unstable
branch.</para>
</callout>
</calloutlist>
</para>
</listitem>
<listitem>
<para>The <link xlink:href="https://github.com/madjar/nox">nox</link>
tool can be used to review a pull-request content in a single command.
It doesn't rebase on a channel branch so it might trigger multiple
source builds. <varname>PRNUMBER</varname> should be replaced by the
number at the end of the pull-request title.</para>
<screen>
$ nix-shell -p nox --run "nox-review -k pr PRNUMBER"
</screen>
</listitem>
</itemizedlist>
</listitem>
<listitem><para>Running every binary.</para></listitem>
</itemizedlist>
<example><title>Sample template for a package update review</title>
<screen>
##### Reviewed points
- [ ] package name fits guidelines
- [ ] package version fits guidelines
- [ ] package build on ARCHITECTURE
- [ ] executables tested on ARCHITECTURE
- [ ] all depending packages build
##### Possible improvements
##### Comments
</screen></example>
</section>
<section><title>New packages</title>
<para>New packages are a common type of pull-requests. These pull requests
consists in adding a new nix-expression for a package.</para>
<para>Reviewing process:</para>
<itemizedlist>
<listitem><para>Add labels to the pull-request. (Requires commit
rights)</para>
<itemizedlist>
<listitem><para><literal>8.has: package (new)</literal> and any topic
label that fit the new package.</para></listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that the package versioning is fitting the
guidelines.</para></listitem>
<listitem><para>Ensure that the commit name is fitting the
guidelines.</para></listitem>
<listitem><para>Ensure that the meta field contains correct
information.</para>
<itemizedlist>
<listitem><para>License must be checked to be fitting upstream
license.</para></listitem>
<listitem><para>Platforms should be set or the package will not get binary
substitutes.</para></listitem>
<listitem><para>A maintainer must be set, this can be the package
submitter or a community member that accepts to take maintainership of
the package.</para></listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that the code contains no typos.</para></listitem>
<listitem><para>Ensure the package source.</para>
<itemizedlist>
<listitem><para>Mirrors urls should be used when
available.</para></listitem>
<listitem><para>The most appropriate function should be used (e.g.
packages from GitHub should use
<literal>fetchFromGitHub</literal>).</para></listitem>
</itemizedlist>
</listitem>
<listitem><para>Building the package locally.</para></listitem>
<listitem><para>Running every binary.</para></listitem>
</itemizedlist>
<example><title>Sample template for a new package review</title>
<screen>
##### Reviewed points
- [ ] package path fits guidelines
- [ ] package name fits guidelines
- [ ] package version fits guidelines
- [ ] package build on ARCHITECTURE
- [ ] executables tested on ARCHITECTURE
- [ ] `meta.description` is set and fits guidelines
- [ ] `meta.license` fits upstream license
- [ ] `meta.platforms` is set
- [ ] `meta.maintainers` is set
- [ ] build time only dependencies are declared in `nativeBuildInputs`
- [ ] source is fetched using the appropriate function
- [ ] phases are respected
- [ ] patches that are remotely available are fetched with `fetchPatch`
##### Possible improvements
##### Comments
</screen></example>
</section>
<section><title>Module updates</title>
<para>Module updates are submissions changing modules in some ways. These often
contains changes to the options or introduce new options.</para>
<para>Reviewing process</para>
<itemizedlist>
<listitem><para>Add labels to the pull-request. (Requires commit
rights)</para>
<itemizedlist>
<listitem><para><literal>8.has: module (update)</literal> and any topic
label that fit the module.</para></listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that the module maintainers are notified.</para>
<itemizedlist>
<listitem><para>Mention-bot notify GitHub users based on the submitted
changes, but it can happen that it miss some of the package
maintainers.</para></listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that the module tests, if any, are
succeeding.</para></listitem>
<listitem><para>Ensure that the introduced options are correct.</para>
<itemizedlist>
<listitem><para>Type should be appropriate (string related types differs
in their merging capabilities, <literal>optionSet</literal> and
<literal>string</literal> types are deprecated).</para></listitem>
<listitem><para>Description, default and example should be
provided.</para></listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that option changes are backward compatible.</para>
<itemizedlist>
<listitem><para><literal>mkRenamedOptionModule</literal> and
<literal>mkAliasOptionModule</literal> functions provide way to make
option changes backward compatible.</para></listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that removed options are declared with
<literal>mkRemovedOptionModule</literal></para></listitem>
<listitem><para>Ensure that changes that are not backward compatible are
mentioned in release notes.</para></listitem>
<listitem><para>Ensure that documentations affected by the change is
updated.</para></listitem>
</itemizedlist>
<example><title>Sample template for a module update review</title>
<screen>
##### Reviewed points
- [ ] changes are backward compatible
- [ ] removed options are declared with `mkRemovedOptionModule`
- [ ] changes that are not backward compatible are documented in release notes
- [ ] module tests succeed on ARCHITECTURE
- [ ] options types are appropriate
- [ ] options description is set
- [ ] options example is provided
- [ ] documentation affected by the changes is updated
##### Possible improvements
##### Comments
</screen></example>
</section>
<section><title>New modules</title>
<para>New modules submissions introduce a new module to NixOS.</para>
<itemizedlist>
<listitem><para>Add labels to the pull-request. (Requires commit
rights)</para>
<itemizedlist>
<listitem><para><literal>8.has: module (new)</literal> and any topic label
that fit the module.</para></listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that the module tests, if any, are
succeeding.</para></listitem>
<listitem><para>Ensure that the introduced options are correct.</para>
<itemizedlist>
<listitem><para>Type should be appropriate (string related types differs
in their merging capabilities, <literal>optionSet</literal> and
<literal>string</literal> types are deprecated).</para></listitem>
<listitem><para>Description, default and example should be
provided.</para></listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that module <literal>meta</literal> field is
present</para>
<itemizedlist>
<listitem><para>Maintainers should be declared in
<literal>meta.maintainers</literal>.</para></listitem>
<listitem><para>Module documentation should be declared with
<literal>meta.doc</literal>.</para></listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that the module respect other modules
functionality.</para>
<itemizedlist>
<listitem><para>For example, enabling a module should not open firewall
ports by default.</para></listitem>
</itemizedlist>
</listitem>
</itemizedlist>
<example><title>Sample template for a new module review</title>
<screen>
##### Reviewed points
- [ ] module path fits the guidelines
- [ ] module tests succeed on ARCHITECTURE
- [ ] options have appropriate types
- [ ] options have default
- [ ] options have example
- [ ] options have descriptions
- [ ] No unneeded package is added to system.environmentPackages
- [ ] meta.maintainers is set
- [ ] module documentation is declared in meta.doc
##### Possible improvements
##### Comments
</screen></example>
</section>
<section><title>Other submissions</title>
<para>Other type of submissions requires different reviewing steps.</para>
<para>If you consider having enough knowledge and experience in a topic and
would like to be a long-term reviewer for related submissions, please contact
the current reviewers for that topic. They will give you information about the
reviewing process.
The main reviewers for a topic can be hard to find as there is no list, but
checking past pull-requests to see who reviewed or git-blaming the code to see
who committed to that topic can give some hints.</para>
<para>Container system, boot system and library changes are some examples of the
pull requests fitting this category.</para>
</section>
<section><title>Merging pull-requests</title>
<para>It is possible for community members that have enough knowledge and
experience on a special topic to contribute by merging pull requests.</para>
<para>TODO: add the procedure to request merging rights.</para>
<!--
The following paragraph about how to deal with unactive contributors is just a
proposition and should be modified to what the community agrees to be the right
policy.
<para>Please note that contributors with commit rights unactive for more than
three months will have their commit rights revoked.</para>
-->
<para>In a case a contributor leaves definitively the Nix community, he should
create an issue or notify the mailing list with references of packages and
modules he maintains so the maintainership can be taken over by other
contributors.</para>
</section>
</chapter>

View File

@ -70,7 +70,7 @@ sources, you need to tell <command>nixos-rebuild</command> about them
using the <option>-I</option> flag:
<screen>
$ nixos-rebuild switch -I nixpkgs=<replaceable>/my/sources</replaceable>/nixpkgs
# nixos-rebuild switch -I nixpkgs=<replaceable>/my/sources</replaceable>/nixpkgs
</screen>
</para>

View File

@ -12,16 +12,16 @@ properly:
<screen>
$ nix-build -A config.system.build.nixos-install
$ mount -t tmpfs none /mnt
$ ./result/bin/nixos-install</screen>
# mount -t tmpfs none /mnt
# ./result/bin/nixos-install</screen>
To start a login shell in the new NixOS installation in
<filename>/mnt</filename>:
<screen>
$ ./result/bin/nixos-install --chroot
# ./result/bin/nixos-install --chroot
</screen>
</para>
</chapter>
</chapter>

View File

@ -176,6 +176,8 @@ in {
</example>
<xi:include href="option-declarations.xml" />
<xi:include href="option-types.xml" />
<xi:include href="option-def.xml" />
<xi:include href="meta-attributes.xml" />
</chapter>

View File

@ -10,7 +10,7 @@ contains the current configuration of your machine. Whenever youve
changed something to that file, you should do
<screen>
$ nixos-rebuild switch</screen>
# nixos-rebuild switch</screen>
to build the new configuration, make it the default configuration for
booting, and try to realise the configuration in the running system
@ -23,7 +23,7 @@ either run them from a root shell or by prefixing them with
<para>You can also do
<screen>
$ nixos-rebuild test</screen>
# nixos-rebuild test</screen>
to build the configuration and switch the running system to it, but
without making it the boot default. So if (say) the configuration
@ -33,7 +33,7 @@ configuration.</para>
<para>There is also
<screen>
$ nixos-rebuild boot</screen>
# nixos-rebuild boot</screen>
to build the configuration and make it the boot default, but not
switch to it now (so it will only take effect after the next
@ -44,7 +44,7 @@ of the GRUB 2 boot screen by giving it a different <emphasis>profile
name</emphasis>, e.g.
<screen>
$ nixos-rebuild switch -p test </screen>
# nixos-rebuild switch -p test </screen>
which causes the new configuration (and previous ones created using
<literal>-p test</literal>) to show up in the GRUB submenu “NixOS -

View File

@ -5,7 +5,7 @@
xml:id="sec-uefi-installation">
<title>UEFI Installation</title>
<para>NixOS can also be installed on UEFI systems. The procedure
is by and large the same as a BIOS installation, with the following
changes:
@ -26,7 +26,7 @@ changes:
<literal>vfat</literal> filesystem.</para>
</listitem>
<listitem>
<para>You must set <option>boot.loader.gummiboot.enable</option> to
<para>You must set <option>boot.loader.systemd-boot.enable</option> to
<literal>true</literal>. <command>nixos-generate-config</command>
should do this automatically for new configurations when booted in
UEFI mode.</para>
@ -38,7 +38,7 @@ changes:
</listitem>
<listitem>
<para>You may want to look at the options starting with
<option>boot.loader.efi</option> and <option>boot.loader.gummiboot</option>
<option>boot.loader.efi</option> and <option>boot.loader.systemd-boot</option>
as well.</para>
</listitem>
</itemizedlist>

View File

@ -0,0 +1,89 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-instaling-virtualbox-guest">
<title>Installing in a Virtualbox guest</title>
<para>
Installing NixOS into a Virtualbox guest is convenient for users who want to
try NixOS without installing it on bare metal. If you want to use a pre-made
Virtualbox appliance, it is available at <link
xlink:href="https://nixos.org/nixos/download.html">the downloads page</link>.
If you want to set up a Virtualbox guest manually, follow these instructions:
</para>
<orderedlist>
<listitem><para>Add a New Machine in Virtualbox with OS Type "Linux / Other
Linux"</para></listitem>
<listitem><para>Base Memory Size: 768 MB or higher.</para></listitem>
<listitem><para>New Hard Disk of 8 GB or higher.</para></listitem>
<listitem><para>Mount the CD-ROM with the NixOS ISO (by clicking on
CD/DVD-ROM)</para></listitem>
<listitem><para>Click on Settings / System / Processor and enable
PAE/NX</para></listitem>
<listitem><para>Click on Settings / System / Acceleration and enable
"VT-x/AMD-V" acceleration</para></listitem>
<listitem><para>Save the settings, start the virtual machine, and continue
installation like normal</para></listitem>
</orderedlist>
<para>
There are a few modifications you should make in configuration.nix. Enable
the virtualbox guest service in the main block:
</para>
<programlisting>
virtualisation.virtualbox.guest.enable = true;
</programlisting>
<para>
Enable booting:
</para>
<programlisting>
boot.loader.grub.device = "/dev/sda";
</programlisting>
<para>
Also remove the fsck that runs at startup. It will always fail to run,
stopping your boot until you press <literal>*</literal>.
</para>
<programlisting>
boot.initrd.checkJournalingFS = false;
</programlisting>
<para>
Shared folders can be given a name and a path in the host system in the
VirtualBox settings (Machine / Settings / Shared Folders, then click on the
"Add" icon). Add the following to the
<literal>/etc/nixos/configuration.nix</literal> to auto-mount them:
</para>
<programlisting>
{ config, pkgs, ...} :
{
...
fileSystems."/virtualboxshare" = {
fsType = "vboxsf";
device = "nameofthesharedfolder";
options = [ "rw" ];
};
}
</programlisting>
<para>
The folder will be available directly under the root directory.
</para>
</section>

View File

@ -22,7 +22,7 @@
(with empty password).</para></listitem>
<listitem><para>If you downloaded the graphical ISO image, you can
run <command>start display-manager</command> to start KDE. If you
run <command>systemctl start display-manager</command> to start KDE. If you
want to continue on the terminal, you can use
<command>loadkeys</command> to switch to your preferred keyboard layout.
(We even provide neo2 via <command>loadkeys de neo</command>!)</para></listitem>
@ -54,7 +54,7 @@
changes. For example:
<screen>
$ mkfs.ext4 -L nixos /dev/sda1</screen>
# mkfs.ext4 -L nixos /dev/sda1</screen>
</para></listitem>
@ -66,10 +66,10 @@ $ mkfs.ext4 -L nixos /dev/sda1</screen>
<listitem><para>For creating LVM volumes, the LVM commands, e.g.,
<screen>
$ pvcreate /dev/sda1 /dev/sdb1
$ vgcreate MyVolGroup /dev/sda1 /dev/sdb1
$ lvcreate --size 2G --name bigdisk MyVolGroup
$ lvcreate --size 1G --name smalldisk MyVolGroup</screen>
# pvcreate /dev/sda1 /dev/sdb1
# vgcreate MyVolGroup /dev/sda1 /dev/sdb1
# lvcreate --size 2G --name bigdisk MyVolGroup
# lvcreate --size 1G --name smalldisk MyVolGroup</screen>
</para></listitem>
@ -84,7 +84,7 @@ $ lvcreate --size 1G --name smalldisk MyVolGroup</screen>
be installed on <filename>/mnt</filename>, e.g.
<screen>
$ mount /dev/disk/by-label/nixos /mnt
# mount /dev/disk/by-label/nixos /mnt
</screen>
</para></listitem>
@ -113,14 +113,14 @@ $ mount /dev/disk/by-label/nixos /mnt
generate an initial configuration file for you:
<screen>
$ nixos-generate-config --root /mnt</screen>
# nixos-generate-config --root /mnt</screen>
You should then edit
<filename>/mnt/etc/nixos/configuration.nix</filename> to suit your
needs:
<screen>
$ nano /mnt/etc/nixos/configuration.nix
# nano /mnt/etc/nixos/configuration.nix
</screen>
If youre using the graphical ISO image, other editors may be
@ -162,7 +162,7 @@ $ nano /mnt/etc/nixos/configuration.nix
<listitem><para>Do the installation:
<screen>
$ nixos-install</screen>
# nixos-install</screen>
Cross fingers. If this fails due to a temporary problem (such as
a network issue while downloading binaries from the NixOS binary
@ -186,7 +186,7 @@ Retype new UNIX password: ***
<listitem><para>If everything went well:
<screen>
$ reboot</screen>
# reboot</screen>
</para></listitem>
@ -235,15 +235,15 @@ drive (here <filename>/dev/sda</filename>). <xref linkend="ex-config"
<example xml:id='ex-install-sequence'><title>Commands for Installing NixOS on <filename>/dev/sda</filename></title>
<screen>
$ fdisk /dev/sda # <lineannotation>(or whatever device you want to install on)</lineannotation>
$ mkfs.ext4 -L nixos /dev/sda1
$ mkswap -L swap /dev/sda2
$ swapon /dev/sda2
$ mount /dev/disk/by-label/nixos /mnt
$ nixos-generate-config --root /mnt
$ nano /mnt/etc/nixos/configuration.nix
$ nixos-install
$ reboot</screen>
# fdisk /dev/sda # <lineannotation>(or whatever device you want to install on)</lineannotation>
# mkfs.ext4 -L nixos /dev/sda1
# mkswap -L swap /dev/sda2
# swapon /dev/sda2
# mount /dev/disk/by-label/nixos /mnt
# nixos-generate-config --root /mnt
# nano /mnt/etc/nixos/configuration.nix
# nixos-install
# reboot</screen>
</example>
<example xml:id='ex-config'><title>NixOS Configuration</title>
@ -271,5 +271,6 @@ $ reboot</screen>
<xi:include href="installing-uefi.xml" />
<xi:include href="installing-usb.xml" />
<xi:include href="installing-pxe.xml" />
<xi:include href="installing-virtualbox-guest.xml" />
</chapter>

View File

@ -60,33 +60,33 @@ the <literal>nixos-14.12</literal> channel. To see which NixOS
channel youre subscribed to, run the following as root:
<screen>
$ nix-channel --list | grep nixos
# nix-channel --list | grep nixos
nixos https://nixos.org/channels/nixos-unstable
</screen>
To switch to a different NixOS channel, do
<screen>
$ nix-channel --add https://nixos.org/channels/<replaceable>channel-name</replaceable> nixos
# nix-channel --add https://nixos.org/channels/<replaceable>channel-name</replaceable> nixos
</screen>
(Be sure to include the <literal>nixos</literal> parameter at the
end.) For instance, to use the NixOS 14.12 stable channel:
<screen>
$ nix-channel --add https://nixos.org/channels/nixos-14.12 nixos
# nix-channel --add https://nixos.org/channels/nixos-14.12 nixos
</screen>
If you have a server, you may want to use the “small” channel instead:
<screen>
$ nix-channel --add https://nixos.org/channels/nixos-14.12-small nixos
# nix-channel --add https://nixos.org/channels/nixos-14.12-small nixos
</screen>
And if you want to live on the bleeding edge:
<screen>
$ nix-channel --add https://nixos.org/channels/nixos-unstable nixos
# nix-channel --add https://nixos.org/channels/nixos-unstable nixos
</screen>
</para>
@ -95,7 +95,7 @@ $ nix-channel --add https://nixos.org/channels/nixos-unstable nixos
channel by running
<screen>
$ nixos-rebuild switch --upgrade
# nixos-rebuild switch --upgrade
</screen>
which is equivalent to the more verbose <literal>nix-channel --update

View File

@ -1,7 +1,7 @@
<refentry xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude">
<refmeta>
<refentrytitle><filename>configuration.nix</filename></refentrytitle>
<manvolnum>5</manvolnum>
@ -34,5 +34,5 @@ therein.</para>
<xi:include href="options-db.xml" />
</refsection>
</refentry>

View File

@ -1,7 +1,7 @@
<refentry xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude">
<refmeta>
<refentrytitle><command>nixos-build-vms</command></refentrytitle>
<manvolnum>8</manvolnum>
@ -42,10 +42,10 @@ points to the generated virtual network.
services.openssh.enable = true;
nixpkgs.system = "i686-linux";
deployment.targetHost = "test1.example.net";
# Other NixOS options
};
test2 = {pkgs, config, ...}:
{
services.openssh.enable = true;
@ -53,7 +53,7 @@ points to the generated virtual network.
environment.systemPackages = [ pkgs.lynx ];
nixpkgs.system = "x86_64-linux";
deployment.targetHost = "test2.example.net";
# Other NixOS options
};
}

View File

@ -113,8 +113,8 @@
<varlistentry>
<term><option>--no-filesystems</option></term>
<listitem>
<para>Omit everything concerning file system information
(which includes swap devices) from the hardware configuration.</para>
<para>Omit everything concerning file systems and swap devices
from the hardware configuration.</para>
</listitem>
</varlistentry>

View File

@ -25,6 +25,19 @@
<arg choice='plain'><option>--root</option></arg>
<replaceable>root</replaceable>
</arg>
<arg>
<arg choice='plain'><option>--closure</option></arg>
<replaceable>closure</replaceable>
</arg>
<arg>
<arg choice='plain'><option>--no-channel-copy</option></arg>
</arg>
<arg>
<arg choice='plain'><option>--no-root-passwd</option></arg>
</arg>
<arg>
<arg choice='plain'><option>--no-bootloader</option></arg>
</arg>
<arg>
<group choice='req'>
<arg choice='plain'><option>--max-jobs</option></arg>
@ -71,12 +84,13 @@ the following steps:
<filename>/mnt/etc/nixos/configuration.nix</filename>.</para></listitem>
<listitem><para>It installs the GRUB boot loader on the device
specified in the option <option>boot.loader.grub.device</option>,
specified in the option <option>boot.loader.grub.device</option>
(unless <option>--no-bootloader</option> is specified),
and generates a GRUB configuration file that boots into the NixOS
configuration just installed.</para></listitem>
<listitem><para>It prompts you for a password for the root
account.</para></listitem>
<listitem><para>It prompts you for a password for the root account
(unless <option>--no-root-passwd</option> is specified).</para></listitem>
</itemizedlist>
@ -103,6 +117,19 @@ it.</para>
</listitem>
</varlistentry>
<varlistentry>
<term><option>--closure</option></term>
<listitem>
<para>If this option is provided, <command>nixos-install</command> will install the specified closure
rather than attempt to build one from <filename>/mnt/etc/nixos/configuration.nix</filename>.</para>
<para>The closure must be an appropriately configured NixOS system, with boot loader and partition
configuration that fits the target host. Such a closure is typically obtained with a command such as
<command>nix-build -I nixos-config=./configuration.nix '&lt;nixos&gt;' -A system --no-out-link</command>
</para>
</listitem>
</varlistentry>
<varlistentry>
<term><option>-I</option></term>
<listitem>

View File

@ -1,7 +1,7 @@
<refentry xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude">
<refmeta>
<refentrytitle><command>nixos-option</command></refentrytitle>
<manvolnum>8</manvolnum>
@ -64,7 +64,7 @@ $ nixos-option boot.loader.grub.enable
Value:
true
Default:
Default:
true
Description:

View File

@ -29,7 +29,7 @@
</group>
<sbr />
<arg><option>--upgrade</option></arg>
<arg><option>--install-grub</option></arg>
<arg><option>--install-bootloader</option></arg>
<arg><option>--no-build-nix</option></arg>
<arg><option>--fast</option></arg>
<arg><option>--rollback</option></arg>
@ -212,12 +212,11 @@ $ ./result/bin/run-*-vm
</varlistentry>
<varlistentry>
<term><option>--install-grub</option></term>
<term><option>--install-bootloader</option></term>
<listitem>
<para>Causes the GRUB boot loader to be (re)installed on the
device specified by the
<varname>boot.loader.grub.device</varname> configuration
option.</para>
<para>Causes the boot loader to be (re)installed on the
device specified by the relevant configuration options.
</para>
</listitem>
</varlistentry>

View File

@ -0,0 +1,97 @@
<refentry xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude">
<refmeta>
<refentrytitle><command>nixos-version</command></refentrytitle>
<manvolnum>8</manvolnum>
<refmiscinfo class="source">NixOS</refmiscinfo>
</refmeta>
<refnamediv>
<refname><command>nixos-version</command></refname>
<refpurpose>show the NixOS version</refpurpose>
</refnamediv>
<refsynopsisdiv>
<cmdsynopsis>
<command>nixos-version</command>
<arg><option>--hash</option></arg>
<arg><option>--revision</option></arg>
</cmdsynopsis>
</refsynopsisdiv>
<refsection><title>Description</title>
<para>This command shows the version of the currently active NixOS
configuration. For example:
<screen>$ nixos-version
16.03.1011.6317da4 (Emu)
</screen>
The version consists of the following elements:
<variablelist>
<varlistentry>
<term><literal>16.03</literal></term>
<listitem><para>The NixOS release, indicating the year and month
in which it was released (e.g. March 2016).</para></listitem>
</varlistentry>
<varlistentry>
<term><literal>1011</literal></term>
<listitem><para>The number of commits in the Nixpkgs Git
repository between the start of the release branch and the commit
from which this version was built. This ensures that NixOS
versions are monotonically increasing. It is
<literal>git</literal> when the current NixOS configuration was
built from a checkout of the Nixpkgs Git repository rather than
from a NixOS channel.</para></listitem>
</varlistentry>
<varlistentry>
<term><literal>6317da4</literal></term>
<listitem><para>The first 7 characters of the commit in the
Nixpkgs Git repository from which this version was
built.</para></listitem>
</varlistentry>
<varlistentry>
<term><literal>Emu</literal></term>
<listitem><para>The code name of the NixOS release. The first
letter of the code name indicates that this is the N'th stable
NixOS release; for example, Emu is the fifth
release.</para></listitem>
</varlistentry>
</variablelist>
</para>
</refsection>
<refsection><title>Options</title>
<para>This command accepts the following options:</para>
<variablelist>
<varlistentry>
<term><option>--hash</option></term>
<term><option>--revision</option></term>
<listitem>
<para>Show the full SHA1 hash of the Git commit from which this
configuration was built, e.g.
<screen>$ nixos-version --hash
6317da40006f6bc2480c6781999c52d88dde2acf
</screen>
</para>
</listitem>
</varlistentry>
</variablelist>
</refsection>
</refentry>

View File

@ -27,5 +27,6 @@
<xi:include href="man-nixos-install.xml" />
<xi:include href="man-nixos-option.xml" />
<xi:include href="man-nixos-rebuild.xml" />
<xi:include href="man-nixos-version.xml" />
</reference>

View File

@ -3,7 +3,7 @@
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="book-nixos-manual">
<info>
<title>NixOS Manual</title>
<subtitle>Version <xi:include href="version" parse="text" /></subtitle>
@ -26,6 +26,9 @@
xlink:href="https://github.com/NixOS/nixpkgs/issues">NixOS GitHub
issue tracker</link>.</para>
<note><para>Commands prefixed with <literal>#</literal> have to be run as
root, either requiring to login as root user or temporarily switching
to it using <literal>sudo</literal> for example.</para></note>
</preface>
<xi:include href="installation/installation.xml" />

View File

@ -11,6 +11,7 @@
<xsl:output method='xml' encoding="UTF-8" />
<xsl:param name="revision" />
<xsl:param name="program" />
<xsl:template match="/expr/list">
@ -188,7 +189,7 @@
</xsl:otherwise>
</xsl:choose>
</xsl:when>
<xsl:when test="$revision != 'local' and contains(@value, 'nixops') and contains(@value, '/nix/')">
<xsl:when test="$revision != 'local' and $program = 'nixops' and contains(@value, '/nix/')">
<xsl:attribute name="xlink:href">https://github.com/NixOS/nixops/blob/<xsl:value-of select="$revision"/>/nix/<xsl:value-of select="substring-after(@value, '/nix/')"/></xsl:attribute>
</xsl:when>
<xsl:otherwise>

View File

@ -9,6 +9,7 @@
<para>This section lists the release notes for each stable version of NixOS
and current unstable revision.</para>
<xi:include href="rl-1703.xml" />
<xi:include href="rl-1609.xml" />
<xi:include href="rl-1603.xml" />
<xi:include href="rl-1509.xml" />

View File

@ -471,7 +471,7 @@ in
<listitem> <para>
A newly packaged TeX Live 2015 is provided in <literal>pkgs.texlive</literal>,
split into 6500 nix packages. For basic user documentation see
<link xlink:href="https://github.com/NixOS/nixpkgs/blob/release-15.09/pkgs/tools/typesetting/tex/texlive-new/default.nix#L1"
<link xlink:href="https://github.com/NixOS/nixpkgs/blob/release-15.09/pkgs/tools/typesetting/tex/texlive/default.nix#L1"
>the source</link>.
Beware of <link xlink:href="https://github.com/NixOS/nixpkgs/issues/9757"
>an issue</link> when installing a too large package set.

View File

@ -279,7 +279,7 @@ fileSystems."/example" = {
<listitem>
<para><literal>services.xserver.vaapiDrivers</literal> has been removed. Use
<literal>services.hardware.opengl.extraPackages{,32}</literal> instead. You can
<literal>hardware.opengl.extraPackages{,32}</literal> instead. You can
also specify VDPAU drivers there.</para>
</listitem>
@ -385,6 +385,41 @@ services.syncthing = {
the github issue</link>.
</para>
</listitem>
<listitem>
<para>
The <literal>services.xserver.startGnuPGAgent</literal> option has been removed.
GnuPG 2.1.x changed the way the gpg-agent works, and that new approach no
longer requires (or even supports) the "start everything as a child of the
agent" scheme we've implemented in NixOS for older versions.
To configure the gpg-agent for your X session, add the following code to
<filename>~/.bashrc</filename> or some file thats sourced when your shell is started:
<programlisting>
GPG_TTY=$(tty)
export GPG_TTY
</programlisting>
If you want to use gpg-agent for SSH, too, add the following to your session
initialization (e.g. <literal>displayManager.sessionCommands</literal>)
<programlisting>
gpg-connect-agent /bye
unset SSH_AGENT_PID
export SSH_AUTH_SOCK="''${HOME}/.gnupg/S.gpg-agent.ssh"
</programlisting>
and make sure that
<programlisting>
enable-ssh-support
</programlisting>
is included in your <filename>~/.gnupg/gpg-agent.conf</filename>.
You will need to use <command>ssh-add</command> to re-add your ssh keys.
If gpgs automatic transformation of the private keys to the new format fails,
you will need to re-import your private keyring as well:
<programlisting>
gpg --import ~/.gnupg/secring.gpg
</programlisting>
The <command>gpg-agent(1)</command> man page has more details about this subject,
i.e. in the "EXAMPLES" section.
</para>
</listitem>
</itemizedlist>

View File

@ -4,7 +4,7 @@
version="5.0"
xml:id="sec-release-16.09">
<title>Release 16.09 (“Flounder”, 2016/09/??)</title>
<title>Release 16.09 (“Flounder”, 2016/09/30)</title>
<para>In addition to numerous new and upgraded packages, this release
has the following highlights: </para>
@ -12,36 +12,228 @@ has the following highlights: </para>
<itemizedlist>
<listitem>
<para>PXE "netboot" media has landed in <link xlink:href="https://github.com/NixOS/nixpkgs/pull/14740" />.
See <xref linkend="sec-booting-from-pxe" /> for documentation.</para>
<para>Many NixOS configurations and Nix packages now use
significantly less disk space, thanks to the <link
xlink:href="https://github.com/NixOS/nixpkgs/issues/7117">extensive
work on closure size reduction</link>. For example, the closure
size of a minimal NixOS container went down from ~424 MiB in 16.03
to ~212 MiB in 16.09, while the closure size of Firefox went from
~651 MiB to ~259 MiB.</para>
</listitem>
<listitem>
<para>To improve security, packages are now <link
xlink:href="https://github.com/NixOS/nixpkgs/pull/12895">built
using various hardening features</link>. See the Nixpkgs manual
for more information.</para>
</listitem>
<listitem>
<para>Support for PXE netboot. See <xref
linkend="sec-booting-from-pxe" /> for documentation.</para>
</listitem>
<listitem>
<para>X.org server 1.18. If you use the
<literal>ati_unfree</literal> driver, 1.17 is still used due to an
ABI incompatibility.</para>
</listitem>
<listitem>
<para>This release is based on Glibc 2.24, GCC 5.4.0 and systemd
231. The default Linux kernel remains 4.4.</para>
</listitem>
</itemizedlist>
<para>The following new services were added since the last release:</para>
<itemizedlist>
<listitem><para><literal>(this will get automatically generated at release time)</literal></para></listitem>
</itemizedlist>
<itemizedlist>
<listitem><para><literal>(this will get automatically generated at release time)</literal></para></listitem>
</itemizedlist>
<para>When upgrading from a previous release, please be aware of the
following incompatible changes:</para>
<itemizedlist>
<listitem>
<para>todo</para>
<para>A large number of packages have been converted to use the multiple outputs feature
of Nix to greatly reduce the amount of required disk space, as
mentioned above. This may require changes
to any custom packages to make them build again; see the relevant chapter in the
Nixpkgs manual for more information. (Additional caveat to packagers: some packaging conventions
related to multiple-output packages
<link xlink:href="https://github.com/NixOS/nixpkgs/pull/14766">were changed</link>
late (August 2016) in the release cycle and differ from the initial introduction of multiple outputs.)
</para>
</listitem>
<listitem>
<para>Previous versions of Nixpkgs had support for all versions of the LTS
Haskell package set. That support has been dropped. The previously provided
<literal>haskell.packages.lts-x_y</literal> package sets still exist in
name to aviod breaking user code, but these package sets don't actually
contain the versions mandated by the corresponding LTS release. Instead,
our package set it loosely based on the latest available LTS release, i.e.
LTS 7.x at the time of this writing. New releases of NixOS and Nixpkgs will
drop those old names entirely. <link
xlink:href="http://lists.science.uu.nl/pipermail/nix-dev/2016-June/020585.html">The
motivation for this change</link> has been discussed at length on the
<literal>nix-dev</literal> mailing list and in <link
xlink:href="https://github.com/NixOS/nixpkgs/issues/14897">Github issue
#14897</link>. Development strategies for Haskell hackers who want to rely
on Nix and NixOS have been described in <link
xlink:href="http://lists.science.uu.nl/pipermail/nix-dev/2016-June/020642.html">another
nix-dev article</link>.</para>
</listitem>
<listitem>
<para>Shell aliases for systemd sub-commands
<link xlink:href="https://github.com/NixOS/nixpkgs/pull/15598">were dropped</link>:
<command>start</command>, <command>stop</command>,
<command>restart</command>, <command>status</command>.</para>
</listitem>
<listitem>
<para>Redis now binds to 127.0.0.1 only instead of listening to all network interfaces. This is the default
behavior of Redis 3.2</para>
</listitem>
<listitem>
<para>
<literal>/var/empty</literal> is now immutable. Activation script runs <command>chattr +i</command>
to forbid any modifications inside the folder. See <link xlink:href="https://github.com/NixOS/nixpkgs/pull/18365">
the pull request</link> for what bugs this caused.
</para>
</listitem>
<listitem>
<para>Gitlab's maintainance script
<command>gitlab-runner</command> was removed and split up into the
more clearer <command>gitlab-run</command> and
<command>gitlab-rake</command> scripts, because
<command>gitlab-runner</command> is a component of Gitlab
CI.</para>
</listitem>
<listitem>
<para><literal>services.xserver.libinput.accelProfile</literal> default
changed from <literal>flat</literal> to <literal>adaptive</literal>,
as per <link xlink:href="https://wayland.freedesktop.org/libinput/doc/latest/group__config.html#gad63796972347f318b180e322e35cee79">
official documentation</link>.</para>
</listitem>
<listitem>
<para><literal>fonts.fontconfig.ultimate.rendering</literal> was removed
because our presets were obsolete for some time. New presets are hardcoded
into FreeType; you can select a preset via <literal>fonts.fontconfig.ultimate.preset</literal>.
You can customize those presets via ordinary environment variables, using
<literal>environment.variables</literal>.</para>
</listitem>
<listitem>
<para>The <literal>audit</literal> service is no longer enabled by default.
Use <literal>security.audit.enable = true</literal> to explicitly enable it.</para>
</listitem>
<listitem>
<para>
<literal>pkgs.linuxPackages.virtualbox</literal> now contains only the
kernel modules instead of the VirtualBox user space binaries.
If you want to reference the user space binaries, you have to use the new
<literal>pkgs.virtualbox</literal> instead.
</para>
</listitem>
<listitem>
<para><literal>goPackages</literal> was replaced with separated Go
applications in appropriate <literal>nixpkgs</literal>
categories. Each Go package uses its own dependency set. There's
also a new <literal>go2nix</literal> tool introduced to generate a
Go package definition from its Go source automatically.</para>
</listitem>
<listitem>
<para><literal>services.mongodb.extraConfig</literal> configuration format
was changed to YAML.</para>
</listitem>
<listitem>
<para>
PHP has been upgraded to 7.0
</para>
</listitem>
</itemizedlist>
<para>Other notable improvements:</para>
<itemizedlist>
<listitem><para>Revamped grsecurity/PaX support. There is now only a single
general-purpose distribution kernel and the configuration interface has been
streamlined. Desktop users should be able to simply set
<programlisting>security.grsecurity.enable = true</programlisting> to get
a reasonably secure system without having to sacrifice too much
functionality. See <xref linkend="sec-grsecurity" /> for documentation
</para></listitem>
<listitem><para>Special filesystems, like <literal>/proc</literal>,
<literal>/run</literal> and others, now have the same mount options
as recommended by systemd and are unified across different places in
NixOS. Mount options are updated during <command>nixos-rebuild
switch</command> if possible. One benefit from this is improved
security — most such filesystems are now mounted with
<literal>noexec</literal>, <literal>nodev</literal> and/or
<literal>nosuid</literal> options.</para></listitem>
<listitem><para>The reverse path filter was interfering with DHCPv4 server
operation in the past. An exception for DHCPv4 and a new option to log
packets that were dropped due to the reverse path filter was added
(<literal>networking.firewall.logReversePathDrops</literal>) for easier
debugging.</para></listitem>
<listitem><para>Containers configuration within
<literal>containers.&lt;name&gt;.config</literal> is <link
xlink:href="https://github.com/NixOS/nixpkgs/pull/17365">now
properly typed and checked</link>. In particular, partial
configurations are merged correctly.</para></listitem>
<listitem>
<para>todo</para>
<para>The directory container setuid wrapper programs,
<filename>/var/setuid-wrappers</filename>, <link
xlink:href="https://github.com/NixOS/nixpkgs/pull/18124">is now
updated atomically to prevent failures if the switch to a new
configuration is interrupted.</link></para>
</listitem>
<listitem>
<para><literal>services.xserver.startGnuPGAgent</literal>
has been removed due to GnuPG 2.1.x bump. See <link
xlink:href="https://github.com/NixOS/nixpkgs/commit/5391882ebd781149e213e8817fba6ac3c503740c">
how to achieve similar behavior</link>. You might need to
<literal>pkill gpg-agent</literal> after the upgrade
to prevent a stale agent being in the way.
</para>
</listitem>
<listitem><para>
<link xlink:href="https://github.com/NixOS/nixpkgs/commit/e561edc322d275c3687fec431935095cfc717147">
Declarative users could share the uid due to the bug in
the script handling conflict resolution.
</link>
</para></listitem>
<listitem><para>
Gummi boot has been replaced using systemd-boot.
</para></listitem>
<listitem><para>
Hydra package and NixOS module were added for convenience.
</para></listitem>
</itemizedlist>

View File

@ -0,0 +1,58 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-release-17.03">
<title>Release 17.03 (“XXX”, 2017/03/??)</title>
<para>In addition to numerous new and upgraded packages, this release
has the following highlights: </para>
<itemizedlist>
<listitem>
<para></para>
</listitem>
</itemizedlist>
<para>The following new services were added since the last release:</para>
<itemizedlist>
<listitem>
<para></para>
</listitem>
</itemizedlist>
<para>When upgrading from a previous release, please be aware of the
following incompatible changes:</para>
<itemizedlist>
<listitem>
<para>
<literal>gnome</literal> alias has been removed along with
<literal>gtk</literal>, <literal>gtkmm</literal> and several others.
Now you need to use versioned attributes, like <literal>gnome3</literal>.
</para>
</listitem>
<listitem>
<para>
The attribute name of the Radicale daemon has been changed from
<literal>pythonPackages.radicale</literal> to
<literal>radicale</literal>.
</para>
</listitem>
</itemizedlist>
<para>Other notable improvements:</para>
<itemizedlist>
<listitem>
<para></para>
</listitem>
</itemizedlist>
</section>

View File

@ -12,6 +12,9 @@
# directly.
partitioned ? true
# Whether to invoke switch-to-configuration boot during image creation
, installBootLoader ? true
, # The root file system type.
fsType ? "ext4"
@ -64,40 +67,24 @@ pkgs.vmTools.runInLinuxVM (
mkdir /mnt
mount $rootDisk /mnt
# The initrd expects these directories to exist.
mkdir /mnt/dev /mnt/proc /mnt/sys
mount -o bind /proc /mnt/proc
mount -o bind /dev /mnt/dev
mount -o bind /sys /mnt/sys
# Copy all paths in the closure to the filesystem.
storePaths=$(perl ${pkgs.pathsFromGraph} /tmp/xchg/closure)
mkdir -p /mnt/nix/store
echo "copying everything (will take a while)..."
set -f
cp -prd $storePaths /mnt/nix/store/
# Register the paths in the Nix database.
printRegistration=1 perl ${pkgs.pathsFromGraph} /tmp/xchg/closure | \
chroot /mnt ${config.nix.package.out}/bin/nix-store --load-db --option build-users-group ""
${config.nix.package.out}/bin/nix-store --load-db --option build-users-group ""
# Add missing size/hash fields to the database. FIXME:
# exportReferencesGraph should provide these directly.
chroot /mnt ${config.nix.package.out}/bin/nix-store --verify --check-contents
${config.nix.package.out}/bin/nix-store --verify --check-contents --option build-users-group ""
# Create the system profile to allow nixos-rebuild to work.
chroot /mnt ${config.nix.package.out}/bin/nix-env --option build-users-group "" \
-p /nix/var/nix/profiles/system --set ${config.system.build.toplevel}
# In case the bootloader tries to write to /dev/sda…
ln -s vda /dev/xvda
ln -s vda /dev/sda
# `nixos-rebuild' requires an /etc/NIXOS.
mkdir -p /mnt/etc
touch /mnt/etc/NIXOS
# `switch-to-configuration' requires a /bin/sh
mkdir -p /mnt/bin
ln -s ${config.system.build.binsh}/bin/sh /mnt/bin/sh
# Install the closure onto the image
USER=root ${config.system.build.nixos-install}/bin/nixos-install \
--closure ${config.system.build.toplevel} \
--no-channel-copy \
--no-root-passwd \
${optionalString (!installBootLoader) "--no-bootloader"}
# Install a configuration.nix.
mkdir -p /mnt/etc/nixos
@ -105,12 +92,9 @@ pkgs.vmTools.runInLinuxVM (
cp ${configFile} /mnt/etc/nixos/configuration.nix
''}
# Generate the GRUB menu.
ln -s vda /dev/xvda
ln -s vda /dev/sda
chroot /mnt ${config.system.build.toplevel}/bin/switch-to-configuration boot
# Remove /etc/machine-id so that each machine cloning this image will get its own id
rm -f /mnt/etc/machine-id
umount /mnt/proc /mnt/dev /mnt/sys
umount /mnt
# Do a fsck to make sure resize2fs works.

View File

@ -3,6 +3,7 @@ package Logger;
use strict;
use Thread::Queue;
use XML::Writer;
use Encode qw(decode encode);
sub new {
my ($class) = @_;
@ -56,7 +57,8 @@ sub nest {
sub sanitise {
my ($s) = @_;
$s =~ s/[[:cntrl:]\xff]//g;
return $s;
$s = decode('UTF-8', $s, Encode::FB_DEFAULT);
return encode('UTF-8', $s, Encode::FB_CROAK);
}
sub log {

View File

@ -382,9 +382,17 @@ sub waitForUnit {
my $state = $info->{ActiveState};
die "unit $unit reached state $state\n" if $state eq "failed";
if ($state eq "inactive") {
# If there are no pending jobs, then assume this unit
# will never reach active state.
my ($status, $jobs) = $self->execute("systemctl list-jobs --full 2>&1");
die "unit $unit is inactive and there are no pending jobs\n"
if $jobs =~ /No jobs/; # FIXME: fragile
if ($jobs =~ /No jobs/) { # FIXME: fragile
# Handle the case where the unit may have started
# between the previous getUnitInfo() and
# list-jobs.
my $info2 = $self->getUnitInfo($unit);
die "unit $unit is inactive and there are no pending jobs\n"
if $info2->{ActiveState} eq $state;
}
}
return 1 if $state eq "active";
};

View File

@ -29,7 +29,7 @@ rec {
cp ${./test-driver/Logger.pm} $libDir/Logger.pm
wrapProgram $out/bin/nixos-test-driver \
--prefix PATH : "${qemu_kvm}/bin:${vde2}/bin:${netpbm}/bin:${coreutils}/bin" \
--prefix PATH : "${lib.makeBinPath [ qemu_kvm vde2 netpbm coreutils ]}" \
--prefix PERL5LIB : "${with perlPackages; lib.makePerlPath [ TermReadLineGnu XMLWriter IOTty FileSlurp ]}:$out/lib/perl5/site_perl"
'';
};
@ -113,14 +113,14 @@ rec {
--add-flags "$vms" \
${lib.optionalString enableOCR "--prefix PATH : '${ocrProg}/bin'"} \
--run "testScript=\"\$(cat $out/test-script)\"" \
--set testScript '"$testScript"' \
--set VLANS '"${toString vlans}"'
--set testScript '$testScript' \
--set VLANS '${toString vlans}'
ln -s ${testDriver}/bin/nixos-test-driver $out/bin/nixos-run-vms
wrapProgram $out/bin/nixos-run-vms \
--add-flags "$vms" \
${lib.optionalString enableOCR "--prefix PATH : '${ocrProg}/bin'"} \
--set tests '"startAll; joinAll;"' \
--set VLANS '"${toString vlans}"' \
--set tests 'startAll; joinAll;' \
--set VLANS '${toString vlans}' \
${lib.optionalString (builtins.length vms == 1) "--set USE_SERIAL 1"}
''; # "
@ -157,9 +157,7 @@ rec {
${coreutils}/bin/mkdir -p $TMPDIR
cd $TMPDIR
$origBuilder $origArgs
exit $?
exec $origBuilder $origArgs
'';
testScript = ''
@ -172,9 +170,22 @@ rec {
'';
vmRunCommand = writeText "vm-run" ''
xchg=vm-state-client/xchg
${coreutils}/bin/mkdir $out
${coreutils}/bin/mkdir -p vm-state-client/xchg
export > vm-state-client/xchg/saved-env
${coreutils}/bin/mkdir -p $xchg
for i in $passAsFile; do
i2=''${i}Path
_basename=$(${coreutils}/bin/basename ''${!i2})
${coreutils}/bin/cp ''${!i2} $xchg/$_basename
eval $i2=/tmp/xchg/$_basename
${coreutils}/bin/ls -la $xchg
done
unset i i2 _basename
export | ${gnugrep}/bin/grep -v '^xchg=' > $xchg/saved-env
unset xchg
export tests='${testScript}'
${testDriver}/bin/nixos-test-driver ${vm.config.system.build.vm}/bin/run-*-vm
''; # */

View File

@ -2,10 +2,27 @@ pkgs: with pkgs.lib;
rec {
# Check whenever fileSystem is needed for boot
fsNeededForBoot = fs: fs.neededForBoot
|| elem fs.mountPoint [ "/" "/nix" "/nix/store" "/var" "/var/log" "/var/lib" "/etc" ];
# Check whenever `b` depends on `a` as a fileSystem
# FIXME: it's incorrect to simply use hasPrefix here: "/dev/a" is not a parent of "/dev/ab"
fsBefore = a: b: ((any (x: elem x [ "bind" "move" ]) b.options) && (a.mountPoint == b.device))
|| (hasPrefix a.mountPoint b.mountPoint);
# Escape a path according to the systemd rules, e.g. /dev/xyzzy
# becomes dev-xyzzy. FIXME: slow.
escapeSystemdPath = s:
replaceChars ["/" "-" " "] ["-" "\\x2d" "\\x20"]
(if hasPrefix "/" s then substring 1 (stringLength s) s else s);
# Returns a system path for a given shell package
toShellPath = shell:
if types.shellPackage.check shell then
"/run/current-system/sw${shell.shellPath}"
else if types.package.check shell then
throw "${shell} is not a shell package"
else
shell;
}

Some files were not shown because too many files have changed in this diff Show More