Merge remote-tracking branch 'upstream/master' into haskell-no-rec

This commit is contained in:
John Ericson 2018-09-24 20:09:25 -04:00
commit 4adf621798
1589 changed files with 83711 additions and 36849 deletions

View File

@ -1 +1 @@
18.09 19.03

View File

@ -325,7 +325,7 @@
}; };
}; };
} }
</screen> </screen>
<para> <para>
To install it into our environment, you can just run <literal>nix-env -iA To install it into our environment, you can just run <literal>nix-env -iA
@ -347,7 +347,7 @@
}; };
}; };
} }
</screen> </screen>
<para> <para>
<literal>pathsToLink</literal> tells Nixpkgs to only link the paths listed <literal>pathsToLink</literal> tells Nixpkgs to only link the paths listed
@ -383,7 +383,7 @@
}; };
}; };
} }
</screen> </screen>
<para> <para>
This provides us with some useful documentation for using our packages. This provides us with some useful documentation for using our packages.
@ -395,15 +395,15 @@
{ {
packageOverrides = pkgs: with pkgs; rec { packageOverrides = pkgs: with pkgs; rec {
myProfile = writeText "my-profile" '' myProfile = writeText "my-profile" ''
export PATH=$HOME/.nix-profile/bin:/nix/var/nix/profiles/default/bin:/sbin:/bin:/usr/sbin:/usr/bin export PATH=$HOME/.nix-profile/bin:/nix/var/nix/profiles/default/bin:/sbin:/bin:/usr/sbin:/usr/bin
export MANPATH=$HOME/.nix-profile/share/man:/nix/var/nix/profiles/default/share/man:/usr/share/man export MANPATH=$HOME/.nix-profile/share/man:/nix/var/nix/profiles/default/share/man:/usr/share/man
''; '';
myPackages = pkgs.buildEnv { myPackages = pkgs.buildEnv {
name = "my-packages"; name = "my-packages";
paths = [ paths = [
(runCommand "profile" {} '' (runCommand "profile" {} ''
mkdir -p $out/etc/profile.d mkdir -p $out/etc/profile.d
cp ${myProfile} $out/etc/profile.d/my-profile.sh cp ${myProfile} $out/etc/profile.d/my-profile.sh
'') '')
aspell aspell
bc bc
@ -421,7 +421,7 @@ cp ${myProfile} $out/etc/profile.d/my-profile.sh
}; };
}; };
} }
</screen> </screen>
<para> <para>
For this to work fully, you must also have this script sourced when you are For this to work fully, you must also have this script sourced when you are
@ -438,7 +438,7 @@ if [ -d $HOME/.nix-profile/etc/profile.d ]; then
fi fi
done done
fi fi
</screen> </screen>
<para> <para>
Now just run <literal>source $HOME/.profile</literal> and you can starting Now just run <literal>source $HOME/.profile</literal> and you can starting
@ -459,16 +459,16 @@ fi
{ {
packageOverrides = pkgs: with pkgs; rec { packageOverrides = pkgs: with pkgs; rec {
myProfile = writeText "my-profile" '' myProfile = writeText "my-profile" ''
export PATH=$HOME/.nix-profile/bin:/nix/var/nix/profiles/default/bin:/sbin:/bin:/usr/sbin:/usr/bin export PATH=$HOME/.nix-profile/bin:/nix/var/nix/profiles/default/bin:/sbin:/bin:/usr/sbin:/usr/bin
export MANPATH=$HOME/.nix-profile/share/man:/nix/var/nix/profiles/default/share/man:/usr/share/man export MANPATH=$HOME/.nix-profile/share/man:/nix/var/nix/profiles/default/share/man:/usr/share/man
export INFOPATH=$HOME/.nix-profile/share/info:/nix/var/nix/profiles/default/share/info:/usr/share/info export INFOPATH=$HOME/.nix-profile/share/info:/nix/var/nix/profiles/default/share/info:/usr/share/info
''; '';
myPackages = pkgs.buildEnv { myPackages = pkgs.buildEnv {
name = "my-packages"; name = "my-packages";
paths = [ paths = [
(runCommand "profile" {} '' (runCommand "profile" {} ''
mkdir -p $out/etc/profile.d mkdir -p $out/etc/profile.d
cp ${myProfile} $out/etc/profile.d/my-profile.sh cp ${myProfile} $out/etc/profile.d/my-profile.sh
'') '')
aspell aspell
bc bc
@ -485,17 +485,17 @@ cp ${myProfile} $out/etc/profile.d/my-profile.sh
pathsToLink = [ "/share/man" "/share/doc" "/share/info" "/bin" "/etc" ]; pathsToLink = [ "/share/man" "/share/doc" "/share/info" "/bin" "/etc" ];
extraOutputsToInstall = [ "man" "doc" "info" ]; extraOutputsToInstall = [ "man" "doc" "info" ];
postBuild = '' postBuild = ''
if [ -x $out/bin/install-info -a -w $out/share/info ]; then if [ -x $out/bin/install-info -a -w $out/share/info ]; then
shopt -s nullglob shopt -s nullglob
for i in $out/share/info/*.info $out/share/info/*.info.gz; do for i in $out/share/info/*.info $out/share/info/*.info.gz; do
$out/bin/install-info $i $out/share/info/dir $out/bin/install-info $i $out/share/info/dir
done done
fi fi
''; '';
}; };
}; };
} }
</screen> </screen>
<para> <para>
<literal>postBuild</literal> tells Nixpkgs to run a command after building <literal>postBuild</literal> tells Nixpkgs to run a command after building

View File

@ -1,7 +1,7 @@
<chapter xmlns="http://docbook.org/ns/docbook" <chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xi="http://www.w3.org/2001/XInclude"
xml:id="chap-functions"> xml:id="chap-functions">
<title>Functions reference</title> <title>Functions reference</title>
<para> <para>
The nixpkgs repository has several utility functions to manipulate Nix The nixpkgs repository has several utility functions to manipulate Nix
@ -31,12 +31,16 @@
<para> <para>
Example usages: Example usages:
<programlisting>pkgs.foo.override { arg1 = val1; arg2 = val2; ... }</programlisting> <programlisting>pkgs.foo.override { arg1 = val1; arg2 = val2; ... }</programlisting>
<programlisting>import pkgs.path { overlays = [ (self: super: { <programlisting>
foo = super.foo.override { barSupport = true ; }; import pkgs.path { overlays = [ (self: super: {
})]};</programlisting> foo = super.foo.override { barSupport = true ; };
<programlisting>mypkg = pkgs.callPackage ./mypkg.nix { })]};
mydep = pkgs.mydep.override { ... }; </programlisting>
}</programlisting> <programlisting>
mypkg = pkgs.callPackage ./mypkg.nix {
mydep = pkgs.mydep.override { ... };
}
</programlisting>
</para> </para>
<para> <para>
@ -61,9 +65,11 @@
<para> <para>
Example usage: Example usage:
<programlisting>helloWithDebug = pkgs.hello.overrideAttrs (oldAttrs: rec { <programlisting>
separateDebugInfo = true; helloWithDebug = pkgs.hello.overrideAttrs (oldAttrs: rec {
});</programlisting> separateDebugInfo = true;
});
</programlisting>
</para> </para>
<para> <para>
@ -134,14 +140,16 @@
<para> <para>
Example usage: Example usage:
<programlisting>mySed = pkgs.gnused.overrideDerivation (oldAttrs: { <programlisting>
name = "sed-4.2.2-pre"; mySed = pkgs.gnused.overrideDerivation (oldAttrs: {
src = fetchurl { name = "sed-4.2.2-pre";
url = ftp://alpha.gnu.org/gnu/sed/sed-4.2.2-pre.tar.bz2; src = fetchurl {
sha256 = "11nq06d131y4wmf3drm0yk502d2xc6n5qy82cg88rb9nqd2lj41k"; url = ftp://alpha.gnu.org/gnu/sed/sed-4.2.2-pre.tar.bz2;
}; sha256 = "11nq06d131y4wmf3drm0yk502d2xc6n5qy82cg88rb9nqd2lj41k";
patches = []; };
});</programlisting> patches = [];
});
</programlisting>
</para> </para>
<para> <para>
@ -181,8 +189,10 @@
<para> <para>
Example usage: Example usage:
<programlisting>f = { a, b }: { result = a+b; } <programlisting>
c = lib.makeOverridable f { a = 1; b = 2; }</programlisting> f = { a, b }: { result = a+b; };
c = lib.makeOverridable f { a = 1; b = 2; };
</programlisting>
</para> </para>
<para> <para>
@ -482,29 +492,29 @@ merge:"diff3"
<example xml:id='ex-dockerTools-buildImage'> <example xml:id='ex-dockerTools-buildImage'>
<title>Docker build</title> <title>Docker build</title>
<programlisting> <programlisting>
buildImage { buildImage {
name = "redis"; <co xml:id='ex-dockerTools-buildImage-1' /> name = "redis"; <co xml:id='ex-dockerTools-buildImage-1' />
tag = "latest"; <co xml:id='ex-dockerTools-buildImage-2' /> tag = "latest"; <co xml:id='ex-dockerTools-buildImage-2' />
fromImage = someBaseImage; <co xml:id='ex-dockerTools-buildImage-3' /> fromImage = someBaseImage; <co xml:id='ex-dockerTools-buildImage-3' />
fromImageName = null; <co xml:id='ex-dockerTools-buildImage-4' /> fromImageName = null; <co xml:id='ex-dockerTools-buildImage-4' />
fromImageTag = "latest"; <co xml:id='ex-dockerTools-buildImage-5' /> fromImageTag = "latest"; <co xml:id='ex-dockerTools-buildImage-5' />
contents = pkgs.redis; <co xml:id='ex-dockerTools-buildImage-6' /> contents = pkgs.redis; <co xml:id='ex-dockerTools-buildImage-6' />
runAsRoot = '' <co xml:id='ex-dockerTools-buildImage-runAsRoot' /> runAsRoot = '' <co xml:id='ex-dockerTools-buildImage-runAsRoot' />
#!${stdenv.shell} #!${stdenv.shell}
mkdir -p /data mkdir -p /data
''; '';
config = { <co xml:id='ex-dockerTools-buildImage-8' /> config = { <co xml:id='ex-dockerTools-buildImage-8' />
Cmd = [ "/bin/redis-server" ]; Cmd = [ "/bin/redis-server" ];
WorkingDir = "/data"; WorkingDir = "/data";
Volumes = { Volumes = {
"/data" = {}; "/data" = {};
};
}; };
} };
</programlisting> }
</programlisting>
</example> </example>
<para> <para>
@ -628,6 +638,48 @@ merge:"diff3"
<literal>pkgs.cacert</literal> to <varname>contents</varname>. <literal>pkgs.cacert</literal> to <varname>contents</varname>.
</para> </para>
</note> </note>
<example xml:id="example-pkgs-dockerTools-buildImage-creation-date">
<title>Impurely Defining a Docker Layer's Creation Date</title>
<para>
By default <function>buildImage</function> will use a static
date of one second past the UNIX Epoch. This allows
<function>buildImage</function> to produce binary reproducible
images. When listing images with <command>docker list
images</command>, the newly created images will be listed like
this:
</para>
<screen><![CDATA[
$ docker image list
REPOSITORY TAG IMAGE ID CREATED SIZE
hello latest 08c791c7846e 48 years ago 25.2MB
]]></screen>
<para>
You can break binary reproducibility but have a sorted,
meaningful <literal>CREATED</literal> column by setting
<literal>created</literal> to <literal>now</literal>.
</para>
<programlisting><![CDATA[
pkgs.dockerTools.buildImage {
name = "hello";
tag = "latest";
created = "now";
contents = pkgs.hello;
config.Cmd = [ "/bin/hello" ];
}
]]></programlisting>
<para>
and now the Docker CLI will display a reasonable date and
sort the images as expected:
<screen><![CDATA[
$ docker image list
REPOSITORY TAG IMAGE ID CREATED SIZE
hello latest de2bf4786de6 About a minute ago 25.2MB
]]></screen>
however, the produced images will not be binary reproducible.
</para>
</example>
</section> </section>
<section xml:id="ssec-pkgs-dockerTools-fetchFromRegistry"> <section xml:id="ssec-pkgs-dockerTools-fetchFromRegistry">
@ -647,15 +699,15 @@ merge:"diff3"
<example xml:id='ex-dockerTools-pullImage'> <example xml:id='ex-dockerTools-pullImage'>
<title>Docker pull</title> <title>Docker pull</title>
<programlisting> <programlisting>
pullImage { pullImage {
imageName = "nixos/nix"; <co xml:id='ex-dockerTools-pullImage-1' /> imageName = "nixos/nix"; <co xml:id='ex-dockerTools-pullImage-1' />
imageDigest = "sha256:20d9485b25ecfd89204e843a962c1bd70e9cc6858d65d7f5fadc340246e2116b"; <co xml:id='ex-dockerTools-pullImage-2' /> imageDigest = "sha256:20d9485b25ecfd89204e843a962c1bd70e9cc6858d65d7f5fadc340246e2116b"; <co xml:id='ex-dockerTools-pullImage-2' />
finalImageTag = "1.11"; <co xml:id='ex-dockerTools-pullImage-3' /> finalImageTag = "1.11"; <co xml:id='ex-dockerTools-pullImage-3' />
sha256 = "0mqjy3zq2v6rrhizgb9nvhczl87lcfphq9601wcprdika2jz7qh8"; <co xml:id='ex-dockerTools-pullImage-4' /> sha256 = "0mqjy3zq2v6rrhizgb9nvhczl87lcfphq9601wcprdika2jz7qh8"; <co xml:id='ex-dockerTools-pullImage-4' />
os = "linux"; <co xml:id='ex-dockerTools-pullImage-5' /> os = "linux"; <co xml:id='ex-dockerTools-pullImage-5' />
arch = "x86_64"; <co xml:id='ex-dockerTools-pullImage-6' /> arch = "x86_64"; <co xml:id='ex-dockerTools-pullImage-6' />
} }
</programlisting> </programlisting>
</example> </example>
<calloutlist> <calloutlist>
@ -677,9 +729,9 @@ merge:"diff3"
exactly which image you want. By default it will match the OS and exactly which image you want. By default it will match the OS and
architecture of the host the command is run on. architecture of the host the command is run on.
<programlisting> <programlisting>
$ nix-shell --packages skopeo jq --command "skopeo --override-os linux --override-arch x86_64 inspect docker://docker.io/nixos/nix:1.11 | jq -r '.Digest'" $ nix-shell --packages skopeo jq --command "skopeo --override-os linux --override-arch x86_64 inspect docker://docker.io/nixos/nix:1.11 | jq -r '.Digest'"
sha256:20d9485b25ecfd89204e843a962c1bd70e9cc6858d65d7f5fadc340246e2116b sha256:20d9485b25ecfd89204e843a962c1bd70e9cc6858d65d7f5fadc340246e2116b
</programlisting> </programlisting>
This argument is required. This argument is required.
</para> </para>
</callout> </callout>
@ -737,13 +789,13 @@ merge:"diff3"
<example xml:id='ex-dockerTools-exportImage'> <example xml:id='ex-dockerTools-exportImage'>
<title>Docker export</title> <title>Docker export</title>
<programlisting> <programlisting>
exportImage { exportImage {
fromImage = someLayeredImage; fromImage = someLayeredImage;
fromImageName = null; fromImageName = null;
fromImageTag = null; fromImageTag = null;
name = someLayeredImage.name; name = someLayeredImage.name;
} }
</programlisting> </programlisting>
</example> </example>
@ -774,19 +826,19 @@ merge:"diff3"
<example xml:id='ex-dockerTools-shadowSetup'> <example xml:id='ex-dockerTools-shadowSetup'>
<title>Shadow base files</title> <title>Shadow base files</title>
<programlisting> <programlisting>
buildImage { buildImage {
name = "shadow-basic"; name = "shadow-basic";
runAsRoot = '' runAsRoot = ''
#!${stdenv.shell} #!${stdenv.shell}
${shadowSetup} ${shadowSetup}
groupadd -r redis groupadd -r redis
useradd -r -g redis redis useradd -r -g redis redis
mkdir /data mkdir /data
chown redis:redis /data chown redis:redis /data
''; '';
} }
</programlisting> </programlisting>
</example> </example>
<para> <para>

View File

@ -64,9 +64,6 @@ When the `Cargo.lock`, provided by upstream, is not in sync with the
added in `cargoPatches` will also be prepended to the patches in `patches` at added in `cargoPatches` will also be prepended to the patches in `patches` at
build-time. build-time.
To install crates with nix there is also an experimental project called
[nixcrates](https://github.com/fractalide/nixcrates).
## Compiling Rust crates using Nix instead of Cargo ## Compiling Rust crates using Nix instead of Cargo
### Simple operation ### Simple operation

View File

@ -5,11 +5,17 @@ date: 2016-06-25
--- ---
# User's Guide to Vim Plugins/Addons/Bundles/Scripts in Nixpkgs # User's Guide to Vim Plugins/Addons/Bundles/Scripts in Nixpkgs
You'll get a vim(-your-suffix) in PATH also loading the plugins you want. Both Neovim and Vim can be configured to include your favorite plugins
and additional libraries.
Loading can be deferred; see examples. Loading can be deferred; see examples.
Vim packages, VAM (=vim-addon-manager) and Pathogen are supported to load At the moment we support three different methods for managing plugins:
packages.
- Vim packages (*recommend*)
- VAM (=vim-addon-manager)
- Pathogen
- vim-plug
## Custom configuration ## Custom configuration
@ -25,7 +31,19 @@ vim_configurable.customize {
} }
``` ```
## Vim packages For Neovim the `configure` argument can be overridden to achieve the same:
```
neovim.override {
configure = {
customRC = ''
# here your custom configuration goes!
'';
};
}
```
## Managing plugins with Vim packages
To store you plugins in Vim packages the following example can be used: To store you plugins in Vim packages the following example can be used:
@ -38,13 +56,79 @@ vim_configurable.customize {
opt = [ phpCompletion elm-vim ]; opt = [ phpCompletion elm-vim ];
# To automatically load a plugin when opening a filetype, add vimrc lines like: # To automatically load a plugin when opening a filetype, add vimrc lines like:
# autocmd FileType php :packadd phpCompletion # autocmd FileType php :packadd phpCompletion
} };
}; }
``` ```
## VAM For Neovim the syntax is:
### dependencies by Vim plugins ```
neovim.override {
configure = {
customRC = ''
# here your custom configuration goes!
'';
packages.myVimPackage = with pkgs.vimPlugins; {
# see examples below how to use custom packages
start = [ ];
opt = [ ];
};
};
}
```
The resulting package can be added to `packageOverrides` in `~/.nixpkgs/config.nix` to make it installable:
```
{
packageOverrides = pkgs: with pkgs; {
myVim = vim_configurable.customize {
name = "vim-with-plugins";
# add here code from the example section
};
myNeovim = neovim.override {
configure = {
# add here code from the example section
};
};
};
}
```
After that you can install your special grafted `myVim` or `myNeovim` packages.
## Managing plugins with vim-plug
To use [vim-plug](https://github.com/junegunn/vim-plug) to manage your Vim
plugins the following example can be used:
```
vim_configurable.customize {
vimrcConfig.packages.myVimPackage = with pkgs.vimPlugins; {
# loaded on launch
plug.plugins = [ youcompleteme fugitive phpCompletion elm-vim ];
};
}
```
For Neovim the syntax is:
```
neovim.override {
configure = {
customRC = ''
# here your custom configuration goes!
'';
plug.plugins = with pkgs.vimPlugins; [
vim-go
];
};
}
```
## Managing plugins with VAM
### Handling dependencies of Vim plugins
VAM introduced .json files supporting dependencies without versioning VAM introduced .json files supporting dependencies without versioning
assuming that "using latest version" is ok most of the time. assuming that "using latest version" is ok most of the time.
@ -125,6 +209,18 @@ Sample output2:
] ]
## Adding new plugins to nixpkgs
In `pkgs/misc/vim-plugins/vim-plugin-names` we store the plugin names
for all vim plugins we automatically generate plugins for.
The format of this file `github username/github repository`:
For example https://github.com/scrooloose/nerdtree becomes `scrooloose/nerdtree`.
After adding your plugin to this file run the `./update.py` in the same folder.
This will updated a file called `generated.nix` and make your plugin accessible in the
`vimPlugins` attribute set (`vimPlugins.nerdtree` in our example).
If additional steps to the build process of the plugin are required, add an
override to the `pkgs/misc/vim-plugins/default.nix` in the same directory.
## Important repositories ## Important repositories
- [vim-pi](https://bitbucket.org/vimcommunity/vim-pi) is a plugin repository - [vim-pi](https://bitbucket.org/vimcommunity/vim-pi) is a plugin repository

View File

@ -671,6 +671,8 @@ overrides = self: super: rec {
plugins = with availablePlugins; [ python perl ]; plugins = with availablePlugins; [ python perl ];
} }
}</programlisting> }</programlisting>
If the <literal>configure</literal> function returns an attrset without the <literal>plugins</literal>
attribute, <literal>availablePlugins</literal> will be used automatically.
</para> </para>
<para> <para>
@ -704,6 +706,55 @@ overrides = self: super: rec {
}; } }; }
</programlisting> </programlisting>
</para> </para>
<para>
WeeChat allows to set defaults on startup using the <literal>--run-command</literal>.
The <literal>configure</literal> method can be used to pass commands to the program:
<programlisting>weechat.override {
configure = { availablePlugins, ... }: {
init = ''
/set foo bar
/server add freenode chat.freenode.org
'';
};
}</programlisting>
Further values can be added to the list of commands when running
<literal>weechat --run-command "your-commands"</literal>.
</para>
<para>
Additionally it's possible to specify scripts to be loaded when starting <literal>weechat</literal>.
These will be loaded before the commands from <literal>init</literal>:
<programlisting>weechat.override {
configure = { availablePlugins, ... }: {
scripts = with pkgs.weechatScripts; [
weechat-xmpp weechat-matrix-bridge wee-slack
];
init = ''
/set plugins.var.python.jabber.key "val"
'':
};
}</programlisting>
</para>
<para>
In <literal>nixpkgs</literal> there's a subpackage which contains derivations for
WeeChat scripts. Such derivations expect a <literal>passthru.scripts</literal> attribute
which contains a list of all scripts inside the store path. Furthermore all scripts
have to live in <literal>$out/share</literal>. An exemplary derivation looks like this:
<programlisting>{ stdenv, fetchurl }:
stdenv.mkDerivation {
name = "exemplary-weechat-script";
src = fetchurl {
url = "https://scripts.tld/your-scripts.tar.gz";
sha256 = "...";
};
passthru.scripts = [ "foo.py" "bar.lua" ];
installPhase = ''
mkdir $out/share
cp foo.py $out/share
cp bar.lua $out/share
'';
}</programlisting>
</para>
</section> </section>
<section xml:id="sec-citrix"> <section xml:id="sec-citrix">
<title>Citrix Receiver</title> <title>Citrix Receiver</title>
@ -763,4 +814,64 @@ citrix_receiver.override {
</para> </para>
</section> </section>
</section> </section>
<section xml:id="sec-ibus-typing-booster">
<title>ibus-engines.typing-booster</title>
<para>This package is an ibus-based completion method to speed up typing.</para>
<section xml:id="sec-ibus-typing-booster-activate">
<title>Activating the engine</title>
<para>
IBus needs to be configured accordingly to activate <literal>typing-booster</literal>. The configuration
depends on the desktop manager in use. For detailed instructions, please refer to the
<link xlink:href="https://mike-fabian.github.io/ibus-typing-booster/documentation.html">upstream docs</link>.
</para>
<para>
On NixOS you need to explicitly enable <literal>ibus</literal> with given engines
before customizing your desktop to use <literal>typing-booster</literal>. This can be achieved
using the <literal>ibus</literal> module:
<programlisting>{ pkgs, ... }: {
i18n.inputMethod = {
enabled = "ibus";
ibus.engines = with pkgs.ibus-engines; [ typing-booster ];
};
}</programlisting>
</para>
</section>
<section xml:id="sec-ibus-typing-booster-customize-hunspell">
<title>Using custom hunspell dictionaries</title>
<para>
The IBus engine is based on <literal>hunspell</literal> to support completion in many languages.
By default the dictionaries <literal>de-de</literal>, <literal>en-us</literal>, <literal>es-es</literal>,
<literal>it-it</literal>, <literal>sv-se</literal> and <literal>sv-fi</literal>
are in use. To add another dictionary, the package can be overridden like this:
<programlisting>ibus-engines.typing-booster.override {
langs = [ "de-at" "en-gb" ];
}</programlisting>
</para>
<para>
<emphasis>Note: each language passed to <literal>langs</literal> must be an attribute name in
<literal>pkgs.hunspellDicts</literal>.</emphasis>
</para>
</section>
<section xml:id="sec-ibus-typing-booster-emoji-picker">
<title>Built-in emoji picker</title>
<para>
The <literal>ibus-engines.typing-booster</literal> package contains a program
named <literal>emoji-picker</literal>. To display all emojis correctly,
a special font such as <literal>noto-fonts-emoji</literal> is needed:
</para>
<para>
On NixOS it can be installed using the following expression:
<programlisting>{ pkgs, ... }: {
fonts.fonts = with pkgs; [ noto-fonts-emoji ];
}</programlisting>
</para>
</section>
</section>
</chapter> </chapter>

View File

@ -2129,7 +2129,7 @@ someVar=$(stripHash $name)
The most typical use of the setup hook is actually to add other hooks which The most typical use of the setup hook is actually to add other hooks which
are then run (i.e. after all the setup hooks) on each dependency. For are then run (i.e. after all the setup hooks) on each dependency. For
example, the C compiler wrapper's setup hook feeds itself flags for each example, the C compiler wrapper's setup hook feeds itself flags for each
dependency that contains relevant libaries and headers. This is done by dependency that contains relevant libraries and headers. This is done by
defining a bash function, and appending its name to one of defining a bash function, and appending its name to one of
<envar>envBuildBuildHooks</envar>`, <envar>envBuildHostHooks</envar>`, <envar>envBuildBuildHooks</envar>`, <envar>envBuildHostHooks</envar>`,
<envar>envBuildTargetHooks</envar>`, <envar>envHostHostHooks</envar>`, <envar>envBuildTargetHooks</envar>`, <envar>envHostHostHooks</envar>`,

44
lib/asserts.nix Normal file
View File

@ -0,0 +1,44 @@
{ lib }:
rec {
/* Print a trace message if pred is false.
Intended to be used to augment asserts with helpful error messages.
Example:
assertMsg false "nope"
=> false
stderr> trace: nope
assert (assertMsg ("foo" == "bar") "foo is not bar, silly"); ""
stderr> trace: foo is not bar, silly
stderr> assert failed at
Type:
assertMsg :: Bool -> String -> Bool
*/
# TODO(Profpatsch): add tests that check stderr
assertMsg = pred: msg:
if pred
then true
else builtins.trace msg false;
/* Specialized `assertMsg` for checking if val is one of the elements
of a list. Useful for checking enums.
Example:
let sslLibrary = "libressl"
in assertOneOf "sslLibrary" sslLibrary [ "openssl" "bearssl" ]
=> false
stderr> trace: sslLibrary must be one of "openssl", "bearssl", but is: "libressl"
Type:
assertOneOf :: String -> ComparableVal -> List ComparableVal -> Bool
*/
assertOneOf = name: val: xs: assertMsg
(lib.elem val xs)
"${name} must be one of ${
lib.generators.toPretty {} xs}, but is: ${
lib.generators.toPretty {} val}";
}

View File

@ -435,12 +435,15 @@ rec {
useful for deep-overriding. useful for deep-overriding.
Example: Example:
x = { a = { b = 4; c = 3; }; } overrideExisting {} { a = 1; }
overrideExisting x { a = { b = 6; d = 2; }; } => {}
=> { a = { b = 6; d = 2; }; } overrideExisting { b = 2; } { a = 1; }
=> { b = 2; }
overrideExisting { a = 3; b = 2; } { a = 1; }
=> { a = 1; b = 2; }
*/ */
overrideExisting = old: new: overrideExisting = old: new:
old // listToAttrs (map (attr: nameValuePair attr (attrByPath [attr] old.${attr} new)) (attrNames old)); mapAttrs (name: value: new.${name} or value) old;
/* Get a package output. /* Get a package output.
If no output is found, fallback to `.out` and then to the default. If no output is found, fallback to `.out` and then to the default.

View File

@ -38,10 +38,11 @@ let
systems = callLibs ./systems; systems = callLibs ./systems;
# misc # misc
asserts = callLibs ./asserts.nix;
debug = callLibs ./debug.nix; debug = callLibs ./debug.nix;
generators = callLibs ./generators.nix; generators = callLibs ./generators.nix;
misc = callLibs ./deprecated.nix; misc = callLibs ./deprecated.nix;
# domain-specific # domain-specific
fetchers = callLibs ./fetchers.nix; fetchers = callLibs ./fetchers.nix;
@ -60,7 +61,6 @@ let
boolToString mergeAttrs flip mapNullable inNixShell min max boolToString mergeAttrs flip mapNullable inNixShell min max
importJSON warn info nixpkgsVersion version mod compare importJSON warn info nixpkgsVersion version mod compare
splitByAndCompare functionArgs setFunctionArgs isFunction; splitByAndCompare functionArgs setFunctionArgs isFunction;
inherit (fixedPoints) fix fix' extends composeExtensions inherit (fixedPoints) fix fix' extends composeExtensions
makeExtensible makeExtensibleWithCustomName; makeExtensible makeExtensibleWithCustomName;
inherit (attrsets) attrByPath hasAttrByPath setAttrByPath inherit (attrsets) attrByPath hasAttrByPath setAttrByPath
@ -117,6 +117,8 @@ let
unknownModule mkOption; unknownModule mkOption;
inherit (types) isType setType defaultTypeMerge defaultFunctor inherit (types) isType setType defaultTypeMerge defaultFunctor
isOptionType mkOptionType; isOptionType mkOptionType;
inherit (asserts)
assertMsg assertOneOf;
inherit (debug) addErrorContextToAttrs traceIf traceVal traceValFn inherit (debug) addErrorContextToAttrs traceIf traceVal traceValFn
traceXMLVal traceXMLValMarked traceSeq traceSeqN traceValSeq traceXMLVal traceXMLValMarked traceSeq traceSeqN traceValSeq
traceValSeqFn traceValSeqN traceValSeqNFn traceShowVal traceValSeqFn traceValSeqN traceValSeqNFn traceShowVal

View File

@ -355,6 +355,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = "Independent JPEG Group License"; fullName = "Independent JPEG Group License";
}; };
imagemagick = spdx {
fullName = "ImageMagick License";
spdxId = "imagemagick";
};
inria-compcert = { inria-compcert = {
fullName = "INRIA Non-Commercial License Agreement for the CompCert verified compiler"; fullName = "INRIA Non-Commercial License Agreement for the CompCert verified compiler";
url = "http://compcert.inria.fr/doc/LICENSE"; url = "http://compcert.inria.fr/doc/LICENSE";
@ -546,6 +551,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = "Public Domain"; fullName = "Public Domain";
}; };
purdueBsd = {
fullName = " Purdue BSD-Style License"; # also know as lsof license
url = https://enterprise.dejacode.com/licenses/public/purdue-bsd;
};
qpl = spdx { qpl = spdx {
spdxId = "QPL-1.0"; spdxId = "QPL-1.0";
fullName = "Q Public License 1.0"; fullName = "Q Public License 1.0";

View File

@ -509,7 +509,8 @@ rec {
=> 3 => 3
*/ */
last = list: last = list:
assert list != []; elemAt list (length list - 1); assert lib.assertMsg (list != []) "lists.last: list must not be empty!";
elemAt list (length list - 1);
/* Return all elements but the last /* Return all elements but the last
@ -517,7 +518,9 @@ rec {
init [ 1 2 3 ] init [ 1 2 3 ]
=> [ 1 2 ] => [ 1 2 ]
*/ */
init = list: assert list != []; take (length list - 1) list; init = list:
assert lib.assertMsg (list != []) "lists.init: list must not be empty!";
take (length list - 1) list;
/* return the image of the cross product of some lists by a function /* return the image of the cross product of some lists by a function

View File

@ -410,7 +410,7 @@ rec {
components = splitString "/" url; components = splitString "/" url;
filename = lib.last components; filename = lib.last components;
name = builtins.head (splitString sep filename); name = builtins.head (splitString sep filename);
in assert name != filename; name; in assert name != filename; name;
/* Create an --{enable,disable}-<feat> string that can be passed to /* Create an --{enable,disable}-<feat> string that can be passed to
standard GNU Autoconf scripts. standard GNU Autoconf scripts.
@ -468,7 +468,10 @@ rec {
strw = lib.stringLength str; strw = lib.stringLength str;
reqWidth = width - (lib.stringLength filler); reqWidth = width - (lib.stringLength filler);
in in
assert strw <= width; assert lib.assertMsg (strw <= width)
"fixedWidthString: requested string length (${
toString width}) must not be shorter than actual length (${
toString strw})";
if strw == width then str else filler + fixedWidthString reqWidth filler str; if strw == width then str else filler + fixedWidthString reqWidth filler str;
/* Format a number adding leading zeroes up to fixed width. /* Format a number adding leading zeroes up to fixed width.
@ -501,7 +504,7 @@ rec {
isStorePath = x: isStorePath = x:
isCoercibleToString x isCoercibleToString x
&& builtins.substring 0 1 (toString x) == "/" && builtins.substring 0 1 (toString x) == "/"
&& dirOf (builtins.toPath x) == builtins.storeDir; && dirOf x == builtins.storeDir;
/* Convert string to int /* Convert string to int
Obviously, it is a bit hacky to use fromJSON that way. Obviously, it is a bit hacky to use fromJSON that way.
@ -537,11 +540,10 @@ rec {
*/ */
readPathsFromFile = rootPath: file: readPathsFromFile = rootPath: file:
let let
root = toString rootPath;
lines = lib.splitString "\n" (builtins.readFile file); lines = lib.splitString "\n" (builtins.readFile file);
removeComments = lib.filter (line: line != "" && !(lib.hasPrefix "#" line)); removeComments = lib.filter (line: line != "" && !(lib.hasPrefix "#" line));
relativePaths = removeComments lines; relativePaths = removeComments lines;
absolutePaths = builtins.map (path: builtins.toPath (root + "/" + path)) relativePaths; absolutePaths = builtins.map (path: rootPath + "/${path}") relativePaths;
in in
absolutePaths; absolutePaths;

7
lib/tests/check-eval.nix Normal file
View File

@ -0,0 +1,7 @@
# Throws an error if any of our lib tests fail.
let tests = [ "misc" "systems" ];
all = builtins.concatLists (map (f: import (./. + "/${f}.nix")) tests);
in if all == []
then null
else throw (builtins.toJSON all)

View File

@ -112,7 +112,7 @@ runTests {
storePathAppendix = isStorePath storePathAppendix = isStorePath
"${goodPath}/bin/python"; "${goodPath}/bin/python";
nonAbsolute = isStorePath (concatStrings (tail (stringToCharacters goodPath))); nonAbsolute = isStorePath (concatStrings (tail (stringToCharacters goodPath)));
asPath = isStorePath (builtins.toPath goodPath); asPath = isStorePath goodPath;
otherPath = isStorePath "/something/else"; otherPath = isStorePath "/something/else";
otherVals = { otherVals = {
attrset = isStorePath {}; attrset = isStorePath {};
@ -236,6 +236,20 @@ runTests {
}; };
}; };
testOverrideExistingEmpty = {
expr = overrideExisting {} { a = 1; };
expected = {};
};
testOverrideExistingDisjoint = {
expr = overrideExisting { b = 2; } { a = 1; };
expected = { b = 2; };
};
testOverrideExistingOverride = {
expr = overrideExisting { a = 3; b = 2; } { a = 1; };
expected = { a = 1; b = 2; };
};
# GENERATORS # GENERATORS
# these tests assume attributes are converted to lists # these tests assume attributes are converted to lists
@ -357,7 +371,7 @@ runTests {
int = 42; int = 42;
bool = true; bool = true;
string = ''fno"rd''; string = ''fno"rd'';
path = /. + "/foo"; # toPath returns a string path = /. + "/foo";
null_ = null; null_ = null;
function = x: x; function = x: x;
functionArgs = { arg ? 4, foo }: arg; functionArgs = { arg ? 4, foo }: arg;

View File

@ -36,18 +36,18 @@ rec {
/* bitwise and */ /* bitwise and */
bitAnd = builtins.bitAnd bitAnd = builtins.bitAnd
or import ./zip-int-bits.nix or (import ./zip-int-bits.nix
(a: b: if a==1 && b==1 then 1 else 0); (a: b: if a==1 && b==1 then 1 else 0));
/* bitwise or */ /* bitwise or */
bitOr = builtins.bitOr bitOr = builtins.bitOr
or import ./zip-int-bits.nix or (import ./zip-int-bits.nix
(a: b: if a==1 || b==1 then 1 else 0); (a: b: if a==1 || b==1 then 1 else 0));
/* bitwise xor */ /* bitwise xor */
bitXor = builtins.bitXor bitXor = builtins.bitXor
or import ./zip-int-bits.nix or (import ./zip-int-bits.nix
(a: b: if a!=b then 1 else 0); (a: b: if a!=b then 1 else 0));
/* bitwise not */ /* bitwise not */
bitNot = builtins.sub (-1); bitNot = builtins.sub (-1);
@ -171,7 +171,7 @@ rec {
builtins.fromJSON (builtins.readFile path); builtins.fromJSON (builtins.readFile path);
## Warnings and asserts ## Warnings
/* See https://github.com/NixOS/nix/issues/749. Eventually we'd like these /* See https://github.com/NixOS/nix/issues/749. Eventually we'd like these
to expand to Nix builtins that carry metadata so that Nix can filter out to expand to Nix builtins that carry metadata so that Nix can filter out

View File

@ -119,7 +119,9 @@ rec {
let let
betweenDesc = lowest: highest: betweenDesc = lowest: highest:
"${toString lowest} and ${toString highest} (both inclusive)"; "${toString lowest} and ${toString highest} (both inclusive)";
between = lowest: highest: assert lowest <= highest; between = lowest: highest:
assert lib.assertMsg (lowest <= highest)
"ints.between: lowest must be smaller than highest";
addCheck int (x: x >= lowest && x <= highest) // { addCheck int (x: x >= lowest && x <= highest) // {
name = "intBetween"; name = "intBetween";
description = "integer between ${betweenDesc lowest highest}"; description = "integer between ${betweenDesc lowest highest}";
@ -439,7 +441,9 @@ rec {
# Either value of type `finalType` or `coercedType`, the latter is # Either value of type `finalType` or `coercedType`, the latter is
# converted to `finalType` using `coerceFunc`. # converted to `finalType` using `coerceFunc`.
coercedTo = coercedType: coerceFunc: finalType: coercedTo = coercedType: coerceFunc: finalType:
assert coercedType.getSubModules == null; assert lib.assertMsg (coercedType.getSubModules == null)
"coercedTo: coercedType must not have submodules (its a ${
coercedType.description})";
mkOptionType rec { mkOptionType rec {
name = "coercedTo"; name = "coercedTo";
description = "${finalType.description} or ${coercedType.description} convertible to it"; description = "${finalType.description} or ${coercedType.description} convertible to it";

View File

@ -18,6 +18,11 @@
for an example on how to work with this data. for an example on how to work with this data.
*/ */
{ {
"1000101" = {
email = "jan.hrnko@satoshilabs.com";
github = "1000101";
name = "Jan Hrnko";
};
a1russell = { a1russell = {
email = "adamlr6+pub@gmail.com"; email = "adamlr6+pub@gmail.com";
github = "a1russell"; github = "a1russell";
@ -227,7 +232,7 @@
name = "Andrew Morsillo"; name = "Andrew Morsillo";
}; };
AndersonTorres = { AndersonTorres = {
email = "torres.anderson.85@gmail.com"; email = "torres.anderson.85@protonmail.com";
github = "AndersonTorres"; github = "AndersonTorres";
name = "Anderson Torres"; name = "Anderson Torres";
}; };
@ -376,6 +381,16 @@
github = "auntie"; github = "auntie";
name = "Jonathan Glines"; name = "Jonathan Glines";
}; };
avaq = {
email = "avaq+nixos@xs4all.nl";
github = "avaq";
name = "Aldwin Vlasblom";
};
avery = {
email = "averyl+nixos@protonmail.com";
github = "AveryLychee";
name = "Avery Lychee";
};
avnik = { avnik = {
email = "avn@avnik.info"; email = "avn@avnik.info";
github = "avnik"; github = "avnik";
@ -678,6 +693,11 @@
github = "Chaddai"; github = "Chaddai";
name = "Chaddaï Fouché"; name = "Chaddaï Fouché";
}; };
chaduffy = {
email = "charles@dyfis.net";
github = "charles-dyfis-net";
name = "Charles Duffy";
};
changlinli = { changlinli = {
email = "mail@changlinli.com"; email = "mail@changlinli.com";
github = "changlinli"; github = "changlinli";
@ -1847,6 +1867,16 @@
github = "jerith666"; github = "jerith666";
name = "Matt McHenry"; name = "Matt McHenry";
}; };
jeschli = {
email = "jeschli@gmail.com";
github = "jeschli";
name = "Markus Hihn";
};
jethro = {
email = "jethrokuan95@gmail.com";
github = "jethrokuan";
name = "Jethro Kuan";
};
jfb = { jfb = {
email = "james@yamtime.com"; email = "james@yamtime.com";
github = "tftio"; github = "tftio";
@ -2808,6 +2838,11 @@
github = "muflax"; github = "muflax";
name = "Stefan Dorn"; name = "Stefan Dorn";
}; };
mvnetbiz = {
email = "mvnetbiz@gmail.com";
github = "mvnetbiz";
name = "Matt Votava";
};
myrl = { myrl = {
email = "myrl.0xf@gmail.com"; email = "myrl.0xf@gmail.com";
github = "myrl"; github = "myrl";
@ -3396,6 +3431,11 @@
github = "relrod"; github = "relrod";
name = "Ricky Elrod"; name = "Ricky Elrod";
}; };
renatoGarcia = {
email = "fgarcia.renato@gmail.com";
github = "renatoGarcia";
name = "Renato Garcia";
};
renzo = { renzo = {
email = "renzocarbonara@gmail.com"; email = "renzocarbonara@gmail.com";
github = "k0001"; github = "k0001";
@ -3888,6 +3928,11 @@
github = "StillerHarpo"; github = "StillerHarpo";
name = "Florian Engel"; name = "Florian Engel";
}; };
stites = {
email = "sam@stites.io";
github = "stites";
name = "Sam Stites";
};
stumoss = { stumoss = {
email = "samoss@gmail.com"; email = "samoss@gmail.com";
github = "stumoss"; github = "stumoss";
@ -4153,6 +4198,11 @@
github = "tomsmeets"; github = "tomsmeets";
name = "Tom Smeets"; name = "Tom Smeets";
}; };
toonn = {
email = "nnoot@toonn.io";
github = "toonn";
name = "Toon Nolten";
};
travisbhartwell = { travisbhartwell = {
email = "nafai@travishartwell.net"; email = "nafai@travishartwell.net";
github = "travisbhartwell"; github = "travisbhartwell";
@ -4508,6 +4558,11 @@
github = "y0no"; github = "y0no";
name = "Yoann Ono"; name = "Yoann Ono";
}; };
yarny = {
email = "41838844+Yarny0@users.noreply.github.com";
github = "Yarny0";
name = "Yarny";
};
yarr = { yarr = {
email = "savraz@gmail.com"; email = "savraz@gmail.com";
github = "Eternity-Yarr"; github = "Eternity-Yarr";

View File

@ -90,7 +90,9 @@ let
fi fi
${buildPackages.libxslt.bin}/bin/xsltproc \ ${buildPackages.libxslt.bin}/bin/xsltproc \
--stringparam revision '${revision}' \ --stringparam revision '${revision}' \
-o $out ${./options-to-docbook.xsl} $optionsXML -o intermediate.xml ${./options-to-docbook.xsl} $optionsXML
${buildPackages.libxslt.bin}/bin/xsltproc \
-o "$out" ${./postprocess-option-descriptions.xsl} intermediate.xml
''; '';
sources = lib.sourceFilesBySuffices ./. [".xml"]; sources = lib.sourceFilesBySuffices ./. [".xml"];
@ -250,7 +252,7 @@ in rec {
''; # */ ''; # */
# Generate the NixOS manual. # Generate the NixOS manual.
manual = runCommand "nixos-manual" manualHTML = runCommand "nixos-manual-html"
{ inherit sources; { inherit sources;
nativeBuildInputs = [ buildPackages.libxml2.bin buildPackages.libxslt.bin ]; nativeBuildInputs = [ buildPackages.libxml2.bin buildPackages.libxslt.bin ];
meta.description = "The NixOS manual in HTML format"; meta.description = "The NixOS manual in HTML format";
@ -279,6 +281,11 @@ in rec {
echo "doc manual $dst" >> $out/nix-support/hydra-build-products echo "doc manual $dst" >> $out/nix-support/hydra-build-products
''; # */ ''; # */
# Alias for backward compatibility. TODO(@oxij): remove eventually.
manual = manualHTML;
# Index page of the NixOS manual.
manualHTMLIndex = "${manualHTML}/share/doc/nixos/index.html";
manualEpub = runCommand "nixos-manual-epub" manualEpub = runCommand "nixos-manual-epub"
{ inherit sources; { inherit sources;

View File

@ -34,7 +34,7 @@ $ nix-build -A system</screen>
</varlistentry> </varlistentry>
<varlistentry> <varlistentry>
<term> <term>
<varname>system.build.manual.manual</varname> <varname>system.build.manual.manualHTML</varname>
</term> </term>
<listitem> <listitem>
<para> <para>

View File

@ -19,6 +19,7 @@ starting VDE switch for network 1
&gt; startAll &gt; startAll
&gt; testScript &gt; testScript
&gt; $machine->succeed("touch /tmp/foo") &gt; $machine->succeed("touch /tmp/foo")
&gt; print($machine->succeed("pwd"), "\n") # Show stdout of command
</screen> </screen>
The function <command>testScript</command> executes the entire test script The function <command>testScript</command> executes the entire test script
and drops you back into the test driver command line upon its completion. and drops you back into the test driver command line upon its completion.
@ -33,8 +34,11 @@ $ nix-build nixos/tests/login.nix -A driver
$ ./result/bin/nixos-run-vms $ ./result/bin/nixos-run-vms
</screen> </screen>
The script <command>nixos-run-vms</command> starts the virtual machines The script <command>nixos-run-vms</command> starts the virtual machines
defined by test. The root file system of the VMs is created on the fly and defined by test.
kept across VM restarts in </para>
<filename>./</filename><varname>hostname</varname><filename>.qcow2</filename>.
<para>
The machine state is kept across VM restarts in
<filename>/tmp/vm-state-</filename><varname>machinename</varname>.
</para> </para>
</section> </section>

View File

@ -108,7 +108,7 @@ xlink:href="https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/virtualis
<programlisting> <programlisting>
$machine->start; $machine->start;
$machine->waitForUnit("default.target"); $machine->waitForUnit("default.target");
$machine->succeed("uname") =~ /Linux/; die unless $machine->succeed("uname") =~ /Linux/;
</programlisting> </programlisting>
The first line is actually unnecessary; machines are implicitly started when The first line is actually unnecessary; machines are implicitly started when
you first execute an action on them (such as <literal>waitForUnit</literal> you first execute an action on them (such as <literal>waitForUnit</literal>

View File

@ -52,10 +52,13 @@
</listitem> </listitem>
</itemizedlist> </itemizedlist>
To see what channels are available, go to To see what channels are available, go to
<link <link xlink:href="https://nixos.org/channels"/>. (Note that the URIs of the
xlink:href="https://nixos.org/channels"/>. (Note that the URIs of the
various channels redirect to a directory that contains the channels latest various channels redirect to a directory that contains the channels latest
version and includes ISO images and VirtualBox appliances.) version and includes ISO images and VirtualBox appliances.) Please note that
during the release process, channels that are not yet released will be
present here as well. See the Getting NixOS page
<link xlink:href="https://nixos.org/nixos/download.html"/> to find the newest
supported stable release.
</para> </para>
<para> <para>
When you first install NixOS, youre automatically subscribed to the NixOS When you first install NixOS, youre automatically subscribed to the NixOS

View File

@ -4,6 +4,7 @@
xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:str="http://exslt.org/strings" xmlns:str="http://exslt.org/strings"
xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:nixos="tag:nixos.org"
xmlns="http://docbook.org/ns/docbook" xmlns="http://docbook.org/ns/docbook"
extension-element-prefixes="str" extension-element-prefixes="str"
> >
@ -30,10 +31,12 @@
<listitem> <listitem>
<para> <nixos:option-description>
<xsl:value-of disable-output-escaping="yes" <para>
select="attr[@name = 'description']/string/@value" /> <xsl:value-of disable-output-escaping="yes"
</para> select="attr[@name = 'description']/string/@value" />
</para>
</nixos:option-description>
<xsl:if test="attr[@name = 'type']"> <xsl:if test="attr[@name = 'type']">
<para> <para>

View File

@ -0,0 +1,115 @@
<?xml version="1.0"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:str="http://exslt.org/strings"
xmlns:exsl="http://exslt.org/common"
xmlns:db="http://docbook.org/ns/docbook"
xmlns:nixos="tag:nixos.org"
extension-element-prefixes="str exsl">
<xsl:output method='xml' encoding="UTF-8" />
<xsl:template match="@*|node()">
<xsl:copy>
<xsl:apply-templates select="@*|node()" />
</xsl:copy>
</xsl:template>
<xsl:template name="break-up-description">
<xsl:param name="input" />
<xsl:param name="buffer" />
<!-- Every time we have two newlines following each other, we want to
break it into </para><para>. -->
<xsl:variable name="parbreak" select="'&#xa;&#xa;'" />
<!-- Similar to "(head:tail) = input" in Haskell. -->
<xsl:variable name="head" select="$input[1]" />
<xsl:variable name="tail" select="$input[position() &gt; 1]" />
<xsl:choose>
<xsl:when test="$head/self::text() and contains($head, $parbreak)">
<!-- If the haystack provided to str:split() directly starts or
ends with $parbreak, it doesn't generate a <token/> for that,
so we are doing this here. -->
<xsl:variable name="splitted-raw">
<xsl:if test="starts-with($head, $parbreak)"><token /></xsl:if>
<xsl:for-each select="str:split($head, $parbreak)">
<token><xsl:value-of select="node()" /></token>
</xsl:for-each>
<!-- Something like ends-with($head, $parbreak), but there is
no ends-with() in XSLT, so we need to use substring(). -->
<xsl:if test="
substring($head, string-length($head) -
string-length($parbreak) + 1) = $parbreak
"><token /></xsl:if>
</xsl:variable>
<xsl:variable name="splitted"
select="exsl:node-set($splitted-raw)/token" />
<!-- The buffer we had so far didn't contain any text nodes that
contain a $parbreak, so we can put the buffer along with the
first token of $splitted into a para element. -->
<para xmlns="http://docbook.org/ns/docbook">
<xsl:apply-templates select="exsl:node-set($buffer)" />
<xsl:apply-templates select="$splitted[1]/node()" />
</para>
<!-- We have already emitted the first splitted result, so the
last result is going to be set as the new $buffer later
because its contents may not be directly followed up by a
$parbreak. -->
<xsl:for-each select="$splitted[position() &gt; 1
and position() &lt; last()]">
<para xmlns="http://docbook.org/ns/docbook">
<xsl:apply-templates select="node()" />
</para>
</xsl:for-each>
<xsl:call-template name="break-up-description">
<xsl:with-param name="input" select="$tail" />
<xsl:with-param name="buffer" select="$splitted[last()]/node()" />
</xsl:call-template>
</xsl:when>
<!-- Either non-text node or one without $parbreak, which we just
want to buffer and continue recursing. -->
<xsl:when test="$input">
<xsl:call-template name="break-up-description">
<xsl:with-param name="input" select="$tail" />
<!-- This essentially appends $head to $buffer. -->
<xsl:with-param name="buffer">
<xsl:if test="$buffer">
<xsl:for-each select="exsl:node-set($buffer)">
<xsl:apply-templates select="." />
</xsl:for-each>
</xsl:if>
<xsl:apply-templates select="$head" />
</xsl:with-param>
</xsl:call-template>
</xsl:when>
<!-- No more $input, just put the remaining $buffer in a para. -->
<xsl:otherwise>
<para xmlns="http://docbook.org/ns/docbook">
<xsl:apply-templates select="exsl:node-set($buffer)" />
</para>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template match="nixos:option-description">
<xsl:choose>
<!--
Only process nodes that are comprised of a single <para/> element,
because if that's not the case the description already contains
</para><para> in between and we need no further processing.
-->
<xsl:when test="count(db:para) > 1">
<xsl:apply-templates select="node()" />
</xsl:when>
<xsl:otherwise>
<xsl:call-template name="break-up-description">
<xsl:with-param name="input"
select="exsl:node-set(db:para/node())" />
</xsl:call-template>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
</xsl:stylesheet>

View File

@ -8,6 +8,7 @@
This section lists the release notes for each stable version of NixOS and This section lists the release notes for each stable version of NixOS and
current unstable revision. current unstable revision.
</para> </para>
<xi:include href="rl-1903.xml" />
<xi:include href="rl-1809.xml" /> <xi:include href="rl-1809.xml" />
<xi:include href="rl-1803.xml" /> <xi:include href="rl-1803.xml" />
<xi:include href="rl-1709.xml" /> <xi:include href="rl-1709.xml" />

View File

@ -91,7 +91,7 @@ $ nix-instantiate -E '(import &lt;nixpkgsunstable&gt; {}).gitFull'
<para> <para>
When enabled the <literal>iproute2</literal> will copy the files expected When enabled the <literal>iproute2</literal> will copy the files expected
by ip route (e.g., <filename>rt_tables</filename>) in by ip route (e.g., <filename>rt_tables</filename>) in
<filename>/run/iproute2</filename>. This allows to write aliases for <filename>/etc/iproute2</filename>. This allows to write aliases for
routing tables for instance. routing tables for instance.
</para> </para>
</listitem> </listitem>

View File

@ -0,0 +1,118 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-release-19.03">
<title>Release 19.03 (“Koi”, 2019/03/??)</title>
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-release-19.03-highlights">
<title>Highlights</title>
<para>
In addition to numerous new and upgraded packages, this release has the
following highlights:
</para>
<itemizedlist>
<listitem>
<para />
</listitem>
</itemizedlist>
</section>
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-release-19.03-new-services">
<title>New Services</title>
<para>
The following new services were added since the last release:
</para>
<itemizedlist>
<listitem>
<para />
</listitem>
</itemizedlist>
</section>
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-release-19.03-incompatibilities">
<title>Backward Incompatibilities</title>
<para>
When upgrading from a previous release, please be aware of the following
incompatible changes:
</para>
<itemizedlist>
<listitem>
<para>
The minimum version of Nix required to evaluate Nixpkgs is now 2.0.
</para>
<itemizedlist>
<listitem>
<para>
For users of NixOS 18.03 and 19.03, NixOS defaults to Nix 2.0, but
supports using Nix 1.11 by setting <literal>nix.package =
pkgs.nix1;</literal>. If this option is set to a Nix 1.11 package, you
will need to either unset the option or upgrade it to Nix 2.0.
</para>
</listitem>
<listitem>
<para>
For users of NixOS 17.09, you will first need to upgrade Nix by setting
<literal>nix.package = pkgs.nixStable2;</literal> and run
<command>nixos-rebuild switch</command> as the <literal>root</literal>
user.
</para>
</listitem>
<listitem>
<para>
For users of a daemon-less Nix installation on Linux or macOS, you can
upgrade Nix by running <command>curl https://nixos.org/nix/install |
sh</command>, or prior to doing a channel update, running
<command>nix-env -iA nix</command>.
</para>
<para>
If you have already run a channel update and Nix is no longer able to
evaluate Nixpkgs, the error message printed should provide adequate
directions for upgrading Nix.
</para>
</listitem>
<listitem>
<para>
For users of the Nix daemon on macOS, you can upgrade Nix by running
<command>sudo -i sh -c 'nix-channel --update &amp;&amp; nix-env -iA
nixpkgs.nix'; sudo launchctl stop org.nixos.nix-daemon; sudo launchctl
start org.nixos.nix-daemon</command>.
</para>
</listitem>
</itemizedlist>
</listitem>
</itemizedlist>
</section>
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-release-19.03-notable-changes">
<title>Other Notable Changes</title>
<itemizedlist>
<listitem>
<para />
</listitem>
</itemizedlist>
</section>
</section>

View File

@ -28,7 +28,7 @@ rec {
modules = configurations ++ modules = configurations ++
[ ../modules/virtualisation/qemu-vm.nix [ ../modules/virtualisation/qemu-vm.nix
../modules/testing/test-instrumentation.nix # !!! should only get added for automated test runs ../modules/testing/test-instrumentation.nix # !!! should only get added for automated test runs
{ key = "no-manual"; services.nixosManual.enable = false; } { key = "no-manual"; documentation.nixos.enable = false; }
{ key = "qemu"; system.build.qemu = qemu; } { key = "qemu"; system.build.qemu = qemu; }
] ++ optional minimal ../modules/testing/minimal-kernel.nix; ] ++ optional minimal ../modules/testing/minimal-kernel.nix;
extraArgs = { inherit nodes; }; extraArgs = { inherit nodes; };

View File

@ -28,7 +28,7 @@
let extraArgs_ = extraArgs; pkgs_ = pkgs; let extraArgs_ = extraArgs; pkgs_ = pkgs;
extraModules = let e = builtins.getEnv "NIXOS_EXTRA_MODULE_PATH"; extraModules = let e = builtins.getEnv "NIXOS_EXTRA_MODULE_PATH";
in if e == "" then [] else [(import (builtins.toPath e))]; in if e == "" then [] else [(import e)];
in in
let let

View File

@ -4,20 +4,29 @@ with lib;
let let
cfg = config.networking.iproute2; cfg = config.networking.iproute2;
confDir = "/run/iproute2";
in in
{ {
options.networking.iproute2.enable = mkEnableOption "copy IP route configuration files"; options.networking.iproute2 = {
enable = mkEnableOption "copy IP route configuration files";
config = mkMerge [ rttablesExtraConfig = mkOption {
({ nixpkgs.config.iproute2.confDir = confDir; }) type = types.lines;
default = "";
(mkIf cfg.enable { description = ''
system.activationScripts.iproute2 = '' Verbatim lines to add to /etc/iproute2/rt_tables
cp -R ${pkgs.iproute}/etc/iproute2 ${confDir}
chmod -R 664 ${confDir}
chmod +x ${confDir}
''; '';
}) };
]; };
config = mkIf cfg.enable {
environment.etc."iproute2/bpf_pinning" = { mode = "0644"; text = fileContents "${pkgs.iproute}/etc/iproute2/bpf_pinning"; };
environment.etc."iproute2/ematch_map" = { mode = "0644"; text = fileContents "${pkgs.iproute}/etc/iproute2/ematch_map"; };
environment.etc."iproute2/group" = { mode = "0644"; text = fileContents "${pkgs.iproute}/etc/iproute2/group"; };
environment.etc."iproute2/nl_protos" = { mode = "0644"; text = fileContents "${pkgs.iproute}/etc/iproute2/nl_protos"; };
environment.etc."iproute2/rt_dsfield" = { mode = "0644"; text = fileContents "${pkgs.iproute}/etc/iproute2/rt_dsfield"; };
environment.etc."iproute2/rt_protos" = { mode = "0644"; text = fileContents "${pkgs.iproute}/etc/iproute2/rt_protos"; };
environment.etc."iproute2/rt_realms" = { mode = "0644"; text = fileContents "${pkgs.iproute}/etc/iproute2/rt_realms"; };
environment.etc."iproute2/rt_scopes" = { mode = "0644"; text = fileContents "${pkgs.iproute}/etc/iproute2/rt_scopes"; };
environment.etc."iproute2/rt_tables" = { mode = "0644"; text = (fileContents "${pkgs.iproute}/etc/iproute2/rt_tables")
+ (optionalString (cfg.rttablesExtraConfig != "") "\n\n${cfg.rttablesExtraConfig}"); };
};
} }

View File

@ -163,15 +163,24 @@ in
/bin/sh /bin/sh
''; '';
# For resetting environment with `. /etc/set-environment` when needed
# and discoverability (see motivation of #30418).
environment.etc."set-environment".source = config.system.build.setEnvironment;
system.build.setEnvironment = pkgs.writeText "set-environment" system.build.setEnvironment = pkgs.writeText "set-environment"
'' ''
${exportedEnvVars} # DO NOT EDIT -- this file has been generated automatically.
${cfg.extraInit} # Prevent this file from being sourced by child shells.
export __NIXOS_SET_ENVIRONMENT_DONE=1
# ~/bin if it exists overrides other bin directories. ${exportedEnvVars}
export PATH="$HOME/bin:$PATH"
''; ${cfg.extraInit}
# ~/bin if it exists overrides other bin directories.
export PATH="$HOME/bin:$PATH"
'';
system.activationScripts.binsh = stringAfter [ "stdio" ] system.activationScripts.binsh = stringAfter [ "stdio" ]
'' ''

View File

@ -13,7 +13,7 @@ let
pkgs.attr pkgs.attr
pkgs.bashInteractive # bash with ncurses support pkgs.bashInteractive # bash with ncurses support
pkgs.bzip2 pkgs.bzip2
pkgs.coreutils pkgs.coreutils-full
pkgs.cpio pkgs.cpio
pkgs.curl pkgs.curl
pkgs.diffutils pkgs.diffutils

View File

@ -23,12 +23,12 @@ with lib;
]; ];
environment.extraSetup = '' environment.extraSetup = ''
if [ -w $out/share/mime ]; then if [ -w $out/share/mime ] && [ -d $out/share/mime/packages ]; then
XDG_DATA_DIRS=$out/share ${pkgs.shared-mime-info}/bin/update-mime-database -V $out/share/mime > /dev/null XDG_DATA_DIRS=$out/share ${pkgs.shared-mime-info}/bin/update-mime-database -V $out/share/mime > /dev/null
fi fi
if [ -w $out/share/applications ]; then if [ -w $out/share/applications ]; then
${pkgs.desktop-file-utils}/bin/update-desktop-database $out/share/applications ${pkgs.desktop-file-utils}/bin/update-desktop-database $out/share/applications
fi fi
''; '';
}; };

View File

@ -0,0 +1,49 @@
# This module contains the basic configuration for building a graphical NixOS
# installation CD.
{ config, lib, pkgs, ... }:
with lib;
{
imports = [ ./installation-cd-base.nix ];
services.xserver = {
enable = true;
# Don't start the X server by default.
autorun = mkForce false;
# Automatically login as root.
displayManager.slim = {
enable = true;
defaultUser = "root";
autoLogin = true;
};
};
# Provide networkmanager for easy wireless configuration.
networking.networkmanager.enable = true;
networking.wireless.enable = mkForce false;
# KDE complains if power management is disabled (to be precise, if
# there is no power management backend such as upower).
powerManagement.enable = true;
environment.systemPackages = [
# Include gparted for partitioning disks.
pkgs.gparted
# Include some editors.
pkgs.vim
pkgs.bvi # binary editor
pkgs.joe
# Firefox for reading the manual.
pkgs.firefox
pkgs.glxinfo
];
}

View File

@ -6,47 +6,11 @@
with lib; with lib;
{ {
imports = [ ./installation-cd-base.nix ]; imports = [ ./installation-cd-graphical-base.nix ];
services.xserver = { services.xserver.desktopManager.gnome3.enable = true;
enable = true;
# GDM doesn't start in virtual machines with ISO
displayManager.slim = {
enable = true;
defaultUser = "root";
autoLogin = true;
};
desktopManager.gnome3 = {
enable = true;
extraGSettingsOverrides = ''
[org.gnome.desktop.background]
show-desktop-icons=true
[org.gnome.nautilus.desktop] services.xserver.displayManager.slim.enable = mkForce false;
trash-icon-visible=false
volumes-visible=false
home-icon-visible=false
network-icon-visible=false
'';
extraGSettingsOverridePackages = [ pkgs.gnome3.nautilus ];
};
};
environment.systemPackages =
[ # Include gparted for partitioning disks.
pkgs.gparted
# Include some editors.
pkgs.vim
pkgs.bvi # binary editor
pkgs.joe
pkgs.glxinfo
];
# Don't start the X server by default.
services.xserver.autorun = mkForce false;
# Auto-login as root. # Auto-login as root.
services.xserver.displayManager.gdm.autoLogin = { services.xserver.displayManager.gdm.autoLogin = {
@ -54,25 +18,4 @@ with lib;
user = "root"; user = "root";
}; };
system.activationScripts.installerDesktop = let
# Must be executable
desktopFile = pkgs.writeScript "nixos-manual.desktop" ''
[Desktop Entry]
Version=1.0
Type=Link
Name=NixOS Manual
URL=${config.system.build.manual.manual}/share/doc/nixos/index.html
Icon=system-help
'';
# use cp and chmod +x, we must be sure the apps are in the nix store though
in ''
mkdir -p /root/Desktop
ln -sfT ${desktopFile} /root/Desktop/nixos-manual.desktop
cp ${pkgs.gnome3.gnome-terminal}/share/applications/gnome-terminal.desktop /root/Desktop/gnome-terminal.desktop
chmod a+rx /root/Desktop/gnome-terminal.desktop
cp ${pkgs.gparted}/share/applications/gparted.desktop /root/Desktop/gparted.desktop
chmod a+rx /root/Desktop/gparted.desktop
'';
} }

View File

@ -1,23 +1,14 @@
# This module defines a NixOS installation CD that contains X11 and # This module defines a NixOS installation CD that contains X11 and
# KDE 5. # Plasma5.
{ config, lib, pkgs, ... }: { config, lib, pkgs, ... }:
with lib; with lib;
{ {
imports = [ ./installation-cd-base.nix ]; imports = [ ./installation-cd-graphical-base.nix ];
services.xserver = { services.xserver = {
enable = true;
# Automatically login as root.
displayManager.slim = {
enable = true;
defaultUser = "root";
autoLogin = true;
};
desktopManager.plasma5 = { desktopManager.plasma5 = {
enable = true; enable = true;
enableQt4Support = false; enableQt4Support = false;
@ -27,45 +18,25 @@ with lib;
synaptics.enable = true; synaptics.enable = true;
}; };
environment.systemPackages = environment.systemPackages = with pkgs; [
[ pkgs.glxinfo # Graphical text editor
kate
# Include gparted for partitioning disks. ];
pkgs.gparted
# Firefox for reading the manual.
pkgs.firefox
# Include some editors.
pkgs.vim
pkgs.bvi # binary editor
pkgs.joe
];
# Provide networkmanager for easy wireless configuration.
networking.networkmanager.enable = true;
networking.wireless.enable = mkForce false;
# KDE complains if power management is disabled (to be precise, if
# there is no power management backend such as upower).
powerManagement.enable = true;
# Don't start the X server by default.
services.xserver.autorun = mkForce false;
system.activationScripts.installerDesktop = let system.activationScripts.installerDesktop = let
desktopFile = pkgs.writeText "nixos-manual.desktop" ''
manualDesktopFile = pkgs.writeScript "nixos-manual.desktop" ''
[Desktop Entry] [Desktop Entry]
Version=1.0 Version=1.0
Type=Application Type=Application
Name=NixOS Manual Name=NixOS Manual
Exec=firefox ${config.system.build.manual.manual}/share/doc/nixos/index.html Exec=firefox ${config.system.build.manual.manualHTMLIndex}
Icon=text-html Icon=text-html
''; '';
in '' in ''
mkdir -p /root/Desktop mkdir -p /root/Desktop
ln -sfT ${desktopFile} /root/Desktop/nixos-manual.desktop ln -sfT ${manualDesktopFile} /root/Desktop/nixos-manual.desktop
ln -sfT ${pkgs.konsole}/share/applications/org.kde.konsole.desktop /root/Desktop/org.kde.konsole.desktop ln -sfT ${pkgs.konsole}/share/applications/org.kde.konsole.desktop /root/Desktop/org.kde.konsole.desktop
ln -sfT ${pkgs.gparted}/share/applications/gparted.desktop /root/Desktop/gparted.desktop ln -sfT ${pkgs.gparted}/share/applications/gparted.desktop /root/Desktop/gparted.desktop
''; '';

View File

@ -233,7 +233,7 @@ let
" "
# Make our own efi program, we can't rely on "grub-install" since it seems to # Make our own efi program, we can't rely on "grub-install" since it seems to
# probe for devices, even with --skip-fs-probe. # probe for devices, even with --skip-fs-probe.
${pkgs.grub2_efi}/bin/grub-mkimage -o $out/EFI/boot/${if targetArch == "x64" then "bootx64" else "bootx32"}.efi -p /EFI/boot -O ${if targetArch == "x64" then "x86_64" else "i386"}-efi \ ${pkgs.grub2_efi}/bin/grub-mkimage -o $out/EFI/boot/${if targetArch == "x64" then "bootx64" else "bootia32"}.efi -p /EFI/boot -O ${if targetArch == "x64" then "x86_64" else "i386"}-efi \
$MODULES $MODULES
cp ${pkgs.grub2_efi}/share/grub/unicode.pf2 $out/EFI/boot/ cp ${pkgs.grub2_efi}/share/grub/unicode.pf2 $out/EFI/boot/

View File

@ -137,7 +137,7 @@ in
# Setting vesa, we don't get the nvidia driver, which can't work in arm. # Setting vesa, we don't get the nvidia driver, which can't work in arm.
services.xserver.videoDrivers = [ "vesa" ]; services.xserver.videoDrivers = [ "vesa" ];
services.nixosManual.enable = false; documentation.nixos.enable = false;
# Include the firmware for various wireless cards. # Include the firmware for various wireless cards.
networking.enableRalinkFirmware = true; networking.enableRalinkFirmware = true;

View File

@ -1,6 +1,6 @@
{ {
x86_64-linux = "/nix/store/0d60i73mcv8z1m8d2m74yfn84980gfsa-nix-2.0.4"; x86_64-linux = "/nix/store/mxg4bbblxfns96yrz0nalxyiyjl7gj98-nix-2.1.2";
i686-linux = "/nix/store/6ssafj2s5a2g9x28yld7b70vwd6vw6lb-nix-2.0.4"; i686-linux = "/nix/store/bgjgmbwirx63mwwychpikd7yc4k4lbjv-nix-2.1.2";
aarch64-linux = "/nix/store/3wwch7bp7n7xsl8apgy2a4b16yzyij1z-nix-2.0.4"; aarch64-linux = "/nix/store/yi18azn4nwrcwvaiag04jnxc1qs38fy5-nix-2.1.2";
x86_64-darwin = "/nix/store/771l8i0mz4c8kry8cz3sz8rr3alalckg-nix-2.0.4"; x86_64-darwin = "/nix/store/fpivmcck2qpw5plrp599iraw2x9jp18k-nix-2.1.2";
} }

View File

@ -1,8 +1,72 @@
{ config, lib, pkgs, ... }: { config, lib, pkgs, baseModules, ... }:
with lib; with lib;
let cfg = config.documentation; in let
cfg = config.documentation;
/* For the purpose of generating docs, evaluate options with each derivation
in `pkgs` (recursively) replaced by a fake with path "\${pkgs.attribute.path}".
It isn't perfect, but it seems to cover a vast majority of use cases.
Caveat: even if the package is reached by a different means,
the path above will be shown and not e.g. `${config.services.foo.package}`. */
manual = import ../../doc/manual rec {
inherit pkgs config;
version = config.system.nixos.release;
revision = "release-${version}";
options =
let
scrubbedEval = evalModules {
modules = [ { nixpkgs.localSystem = config.nixpkgs.localSystem; } ] ++ baseModules;
args = (config._module.args) // { modules = [ ]; };
specialArgs = { pkgs = scrubDerivations "pkgs" pkgs; };
};
scrubDerivations = namePrefix: pkgSet: mapAttrs
(name: value:
let wholeName = "${namePrefix}.${name}"; in
if isAttrs value then
scrubDerivations wholeName value
// (optionalAttrs (isDerivation value) { outPath = "\${${wholeName}}"; })
else value
)
pkgSet;
in scrubbedEval.options;
};
helpScript = pkgs.writeScriptBin "nixos-help"
''
#! ${pkgs.runtimeShell} -e
# Finds first executable browser in a colon-separated list.
# (see how xdg-open defines BROWSER)
browser="$(
IFS=: ; for b in $BROWSER; do
[ -n "$(type -P "$b" || true)" ] && echo "$b" && break
done
)"
if [ -z "$browser" ]; then
browser="$(type -P xdg-open || true)"
if [ -z "$browser" ]; then
browser="$(type -P w3m || true)"
if [ -z "$browser" ]; then
echo "$0: unable to start a web browser; please set \$BROWSER"
exit 1
fi
fi
fi
exec "$browser" ${manual.manualHTMLIndex}
'';
desktopItem = pkgs.makeDesktopItem {
name = "nixos-manual";
desktopName = "NixOS Manual";
genericName = "View NixOS documentation in a web browser";
icon = "nix-snowflake";
exec = "${helpScript}/bin/nixos-help";
categories = "System";
};
in
{ {
@ -66,6 +130,22 @@ let cfg = config.documentation; in
''; '';
}; };
nixos.enable = mkOption {
type = types.bool;
default = true;
description = ''
Whether to install NixOS's own documentation.
<itemizedlist>
<listitem><para>This includes man pages like
<citerefentry><refentrytitle>configuration.nix</refentrytitle>
<manvolnum>5</manvolnum></citerefentry> if <option>man.enable</option> is
set.</para></listitem>
<listitem><para>This includes the HTML manual and the <command>nixos-help</command> command if
<option>doc.enable</option> is set.</para></listitem>
</itemizedlist>
'';
};
}; };
}; };
@ -99,6 +179,21 @@ let cfg = config.documentation; in
environment.extraOutputsToInstall = [ "doc" ] ++ optional cfg.dev.enable "devdoc"; environment.extraOutputsToInstall = [ "doc" ] ++ optional cfg.dev.enable "devdoc";
}) })
(mkIf cfg.nixos.enable {
system.build.manual = manual;
environment.systemPackages = []
++ optional cfg.man.enable manual.manpages
++ optionals cfg.doc.enable ([ manual.manualHTML helpScript ]
++ optionals config.services.xserver.enable [ desktopItem pkgs.nixos-icons ]);
services.mingetty.helpLine = mkIf cfg.doc.enable (
"\nRun `nixos-help` "
+ optionalString config.services.nixosManual.showManual "or press <Alt-F${toString config.services.nixosManual.ttyNumber}> "
+ "for the NixOS manual."
);
})
]); ]);
} }

View File

@ -53,7 +53,7 @@
tomcat = 16; tomcat = 16;
#audio = 17; # unused #audio = 17; # unused
#floppy = 18; # unused #floppy = 18; # unused
#uucp = 19; # unused uucp = 19;
#lp = 20; # unused #lp = 20; # unused
#proc = 21; # unused #proc = 21; # unused
pulseaudio = 22; # must match `pulseaudio' GID pulseaudio = 22; # must match `pulseaudio' GID
@ -329,6 +329,7 @@
# kvm = 302; # unused # kvm = 302; # unused
# render = 303; # unused # render = 303; # unused
zeronet = 304; zeronet = 304;
lirc = 305;
# When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399! # When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399!
@ -618,6 +619,7 @@
kvm = 302; # default udev rules from systemd requires these kvm = 302; # default udev rules from systemd requires these
render = 303; # default udev rules from systemd requires these render = 303; # default udev rules from systemd requires these
zeronet = 304; zeronet = 304;
lirc = 305;
# When adding a gid, make sure it doesn't match an existing # When adding a gid, make sure it doesn't match an existing
# uid. Users and groups with the same name should have equal # uid. Users and groups with the same name should have equal

View File

@ -84,7 +84,7 @@ in
versionSuffix = mkIf (pathIsDirectory gitRepo) (mkDefault (".git." + gitCommitId)); versionSuffix = mkIf (pathIsDirectory gitRepo) (mkDefault (".git." + gitCommitId));
# Note: the first letter is bumped on every release. It's an animal. # Note: the first letter is bumped on every release. It's an animal.
codeName = "Jellyfish"; codeName = "Koi";
}; };
# Generate /etc/os-release. See # Generate /etc/os-release. See

View File

@ -245,6 +245,7 @@
./services/desktops/gnome3/gnome-user-share.nix ./services/desktops/gnome3/gnome-user-share.nix
./services/desktops/gnome3/gpaste.nix ./services/desktops/gnome3/gpaste.nix
./services/desktops/gnome3/gvfs.nix ./services/desktops/gnome3/gvfs.nix
./services/desktops/gnome3/rygel.nix
./services/desktops/gnome3/seahorse.nix ./services/desktops/gnome3/seahorse.nix
./services/desktops/gnome3/sushi.nix ./services/desktops/gnome3/sushi.nix
./services/desktops/gnome3/tracker.nix ./services/desktops/gnome3/tracker.nix
@ -271,9 +272,11 @@
./services/hardware/interception-tools.nix ./services/hardware/interception-tools.nix
./services/hardware/irqbalance.nix ./services/hardware/irqbalance.nix
./services/hardware/lcd.nix ./services/hardware/lcd.nix
./services/hardware/lirc.nix
./services/hardware/nvidia-optimus.nix ./services/hardware/nvidia-optimus.nix
./services/hardware/pcscd.nix ./services/hardware/pcscd.nix
./services/hardware/pommed.nix ./services/hardware/pommed.nix
./services/hardware/ratbagd.nix
./services/hardware/sane.nix ./services/hardware/sane.nix
./services/hardware/sane_extra_backends/brscan4.nix ./services/hardware/sane_extra_backends/brscan4.nix
./services/hardware/tcsd.nix ./services/hardware/tcsd.nix
@ -406,6 +409,7 @@
./services/misc/taskserver ./services/misc/taskserver
./services/misc/tzupdate.nix ./services/misc/tzupdate.nix
./services/misc/uhub.nix ./services/misc/uhub.nix
./services/misc/weechat.nix
./services/misc/xmr-stak.nix ./services/misc/xmr-stak.nix
./services/misc/zookeeper.nix ./services/misc/zookeeper.nix
./services/monitoring/apcupsd.nix ./services/monitoring/apcupsd.nix
@ -494,6 +498,7 @@
./services/networking/dnsdist.nix ./services/networking/dnsdist.nix
./services/networking/dnsmasq.nix ./services/networking/dnsmasq.nix
./services/networking/ejabberd.nix ./services/networking/ejabberd.nix
./services/networking/epmd.nix
./services/networking/fakeroute.nix ./services/networking/fakeroute.nix
./services/networking/ferm.nix ./services/networking/ferm.nix
./services/networking/firefox/sync-server.nix ./services/networking/firefox/sync-server.nix
@ -515,9 +520,11 @@
./services/networking/heyefi.nix ./services/networking/heyefi.nix
./services/networking/hostapd.nix ./services/networking/hostapd.nix
./services/networking/htpdate.nix ./services/networking/htpdate.nix
./services/networking/hylafax/default.nix
./services/networking/i2pd.nix ./services/networking/i2pd.nix
./services/networking/i2p.nix ./services/networking/i2p.nix
./services/networking/iodine.nix ./services/networking/iodine.nix
./services/networking/iperf3.nix
./services/networking/ircd-hybrid/default.nix ./services/networking/ircd-hybrid/default.nix
./services/networking/iwd.nix ./services/networking/iwd.nix
./services/networking/keepalived/default.nix ./services/networking/keepalived/default.nix
@ -552,6 +559,7 @@
./services/networking/nsd.nix ./services/networking/nsd.nix
./services/networking/ntopng.nix ./services/networking/ntopng.nix
./services/networking/ntpd.nix ./services/networking/ntpd.nix
./services/networking/nullidentdmod.nix
./services/networking/nylon.nix ./services/networking/nylon.nix
./services/networking/ocserv.nix ./services/networking/ocserv.nix
./services/networking/oidentd.nix ./services/networking/oidentd.nix
@ -676,6 +684,7 @@
./services/web-apps/atlassian/confluence.nix ./services/web-apps/atlassian/confluence.nix
./services/web-apps/atlassian/crowd.nix ./services/web-apps/atlassian/crowd.nix
./services/web-apps/atlassian/jira.nix ./services/web-apps/atlassian/jira.nix
./services/web-apps/codimd.nix
./services/web-apps/frab.nix ./services/web-apps/frab.nix
./services/web-apps/mattermost.nix ./services/web-apps/mattermost.nix
./services/web-apps/nexus.nix ./services/web-apps/nexus.nix

View File

@ -7,9 +7,12 @@
services.xserver = { services.xserver = {
enable = true; enable = true;
displayManager.sddm.enable = true; displayManager.sddm.enable = true;
desktopManager.plasma5.enable = true; desktopManager.plasma5 = {
enable = true;
enableQt4Support = false;
};
libinput.enable = true; # for touchpad support on many laptops libinput.enable = true; # for touchpad support on many laptops
}; };
environment.systemPackages = [ pkgs.glxinfo ]; environment.systemPackages = [ pkgs.glxinfo pkgs.firefox ];
} }

View File

@ -22,7 +22,7 @@ with lib;
config = { config = {
# Enable in installer, even if the minimal profile disables it. # Enable in installer, even if the minimal profile disables it.
services.nixosManual.enable = mkForce true; documentation.nixos.enable = mkForce true;
# Show the manual. # Show the manual.
services.nixosManual.showManual = true; services.nixosManual.showManual = true;

View File

@ -12,7 +12,7 @@ with lib;
i18n.supportedLocales = [ (config.i18n.defaultLocale + "/UTF-8") ]; i18n.supportedLocales = [ (config.i18n.defaultLocale + "/UTF-8") ];
documentation.enable = mkDefault false; documentation.enable = mkDefault false;
services.nixosManual.enable = mkDefault false; documentation.nixos.enable = mkDefault false;
sound.enable = mkDefault false; sound.enable = mkDefault false;
} }

View File

@ -126,7 +126,9 @@ in
programs.bash = { programs.bash = {
shellInit = '' shellInit = ''
${config.system.build.setEnvironment.text} if [ -z "$__NIXOS_SET_ENVIRONMENT_DONE" ]; then
. ${config.system.build.setEnvironment}
fi
${cfge.shellInit} ${cfge.shellInit}
''; '';
@ -166,11 +168,11 @@ in
# Read system-wide modifications. # Read system-wide modifications.
if test -f /etc/profile.local; then if test -f /etc/profile.local; then
. /etc/profile.local . /etc/profile.local
fi fi
if [ -n "''${BASH_VERSION:-}" ]; then if [ -n "''${BASH_VERSION:-}" ]; then
. /etc/bashrc . /etc/bashrc
fi fi
''; '';
@ -191,12 +193,12 @@ in
# We are not always an interactive shell. # We are not always an interactive shell.
if [ -n "$PS1" ]; then if [ -n "$PS1" ]; then
${cfg.interactiveShellInit} ${cfg.interactiveShellInit}
fi fi
# Read system-wide modifications. # Read system-wide modifications.
if test -f /etc/bashrc.local; then if test -f /etc/bashrc.local; then
. /etc/bashrc.local . /etc/bashrc.local
fi fi
''; '';

View File

@ -32,6 +32,8 @@ in
environment.etc = optionals (cfg.profiles != {}) environment.etc = optionals (cfg.profiles != {})
(mapAttrsToList mkDconfProfile cfg.profiles); (mapAttrsToList mkDconfProfile cfg.profiles);
services.dbus.packages = [ pkgs.gnome3.dconf ];
environment.variables.GIO_EXTRA_MODULES = optional cfg.enable environment.variables.GIO_EXTRA_MODULES = optional cfg.enable
"${pkgs.gnome3.dconf.lib}/lib/gio/modules"; "${pkgs.gnome3.dconf.lib}/lib/gio/modules";
# https://github.com/NixOS/nixpkgs/pull/31891 # https://github.com/NixOS/nixpkgs/pull/31891

View File

@ -109,7 +109,9 @@ in
set fish_function_path ${pkgs.fish-foreign-env}/share/fish-foreign-env/functions $__fish_datadir/functions set fish_function_path ${pkgs.fish-foreign-env}/share/fish-foreign-env/functions $__fish_datadir/functions
# source the NixOS environment config # source the NixOS environment config
fenv source ${config.system.build.setEnvironment} if [ -z "$__NIXOS_SET_ENVIRONMENT_DONE" ]
fenv source ${config.system.build.setEnvironment}
end
# clear fish_function_path so that it will be correctly set when we return to $__fish_datadir/config.fish # clear fish_function_path so that it will be correctly set when we return to $__fish_datadir/config.fish
set -e fish_function_path set -e fish_function_path
@ -150,7 +152,6 @@ in
and begin and begin
${fishAliases} ${fishAliases}
set fish_function_path ${pkgs.fish-foreign-env}/share/fish-foreign-env/functions $fish_function_path set fish_function_path ${pkgs.fish-foreign-env}/share/fish-foreign-env/functions $fish_function_path
fenv source /etc/fish/foreign-env/interactiveShellInit > /dev/null fenv source /etc/fish/foreign-env/interactiveShellInit > /dev/null
set -e fish_function_path[1] set -e fish_function_path[1]

View File

@ -44,10 +44,23 @@ in
enable = mkEnableOption "yabar"; enable = mkEnableOption "yabar";
package = mkOption { package = mkOption {
default = pkgs.yabar; default = pkgs.yabar-unstable;
example = literalExample "pkgs.yabar-unstable"; example = literalExample "pkgs.yabar";
type = types.package; type = types.package;
# `yabar-stable` segfaults under certain conditions.
apply = x: if x == pkgs.yabar-unstable then x else flip warn x ''
It's not recommended to use `yabar' with `programs.yabar', the (old) stable release
tends to segfault under certain circumstances:
* https://github.com/geommer/yabar/issues/86
* https://github.com/geommer/yabar/issues/68
* https://github.com/geommer/yabar/issues/143
Most of them don't occur on master anymore, until a new release is published, it's recommended
to use `yabar-unstable'.
'';
description = '' description = ''
The package which contains the `yabar` binary. The package which contains the `yabar` binary.

View File

@ -70,7 +70,7 @@ in
promptInit = mkOption { promptInit = mkOption {
default = '' default = ''
if [ "$TERM" != dumb ]; then if [ "$TERM" != dumb ]; then
autoload -U promptinit && promptinit && prompt walters autoload -U promptinit && promptinit && prompt walters
fi fi
''; '';
description = '' description = ''
@ -116,7 +116,9 @@ in
if [ -n "$__ETC_ZSHENV_SOURCED" ]; then return; fi if [ -n "$__ETC_ZSHENV_SOURCED" ]; then return; fi
export __ETC_ZSHENV_SOURCED=1 export __ETC_ZSHENV_SOURCED=1
${config.system.build.setEnvironment.text} if [ -z "$__NIXOS_SET_ENVIRONMENT_DONE" ]; then
. ${config.system.build.setEnvironment}
fi
${cfge.shellInit} ${cfge.shellInit}
@ -124,7 +126,7 @@ in
# Read system-wide modifications. # Read system-wide modifications.
if test -f /etc/zshenv.local; then if test -f /etc/zshenv.local; then
. /etc/zshenv.local . /etc/zshenv.local
fi fi
''; '';
@ -143,7 +145,7 @@ in
# Read system-wide modifications. # Read system-wide modifications.
if test -f /etc/zprofile.local; then if test -f /etc/zprofile.local; then
. /etc/zprofile.local . /etc/zprofile.local
fi fi
''; '';
@ -169,7 +171,7 @@ in
# Tell zsh how to find installed completions # Tell zsh how to find installed completions
for p in ''${(z)NIX_PROFILES}; do for p in ''${(z)NIX_PROFILES}; do
fpath+=($p/share/zsh/site-functions $p/share/zsh/$ZSH_VERSION/functions $p/share/zsh/vendor-completions) fpath+=($p/share/zsh/site-functions $p/share/zsh/$ZSH_VERSION/functions $p/share/zsh/vendor-completions)
done done
${optionalString cfg.enableGlobalCompInit "autoload -U compinit && compinit"} ${optionalString cfg.enableGlobalCompInit "autoload -U compinit && compinit"}
@ -184,7 +186,7 @@ in
# Read system-wide modifications. # Read system-wide modifications.
if test -f /etc/zshrc.local; then if test -f /etc/zshrc.local; then
. /etc/zshrc.local . /etc/zshrc.local
fi fi
''; '';

View File

@ -276,6 +276,7 @@ with lib;
(mkRenamedOptionModule [ "programs" "info" "enable" ] [ "documentation" "info" "enable" ]) (mkRenamedOptionModule [ "programs" "info" "enable" ] [ "documentation" "info" "enable" ])
(mkRenamedOptionModule [ "programs" "man" "enable" ] [ "documentation" "man" "enable" ]) (mkRenamedOptionModule [ "programs" "man" "enable" ] [ "documentation" "man" "enable" ])
(mkRenamedOptionModule [ "services" "nixosManual" "enable" ] [ "documentation" "nixos" "enable" ])
] ++ (flip map [ "blackboxExporter" "collectdExporter" "fritzboxExporter" ] ++ (flip map [ "blackboxExporter" "collectdExporter" "fritzboxExporter"
"jsonExporter" "minioExporter" "nginxExporter" "nodeExporter" "jsonExporter" "minioExporter" "nginxExporter" "nodeExporter"

View File

@ -302,15 +302,15 @@ in
workdir="$(mktemp -d)" workdir="$(mktemp -d)"
# Create CA # Create CA
openssl genrsa -des3 -passout pass:x -out $workdir/ca.pass.key 2048 openssl genrsa -des3 -passout pass:xxxx -out $workdir/ca.pass.key 2048
openssl rsa -passin pass:x -in $workdir/ca.pass.key -out $workdir/ca.key openssl rsa -passin pass:xxxx -in $workdir/ca.pass.key -out $workdir/ca.key
openssl req -new -key $workdir/ca.key -out $workdir/ca.csr \ openssl req -new -key $workdir/ca.key -out $workdir/ca.csr \
-subj "/C=UK/ST=Warwickshire/L=Leamington/O=OrgName/OU=Security Department/CN=example.com" -subj "/C=UK/ST=Warwickshire/L=Leamington/O=OrgName/OU=Security Department/CN=example.com"
openssl x509 -req -days 1 -in $workdir/ca.csr -signkey $workdir/ca.key -out $workdir/ca.crt openssl x509 -req -days 1 -in $workdir/ca.csr -signkey $workdir/ca.key -out $workdir/ca.crt
# Create key # Create key
openssl genrsa -des3 -passout pass:x -out $workdir/server.pass.key 2048 openssl genrsa -des3 -passout pass:xxxx -out $workdir/server.pass.key 2048
openssl rsa -passin pass:x -in $workdir/server.pass.key -out $workdir/server.key openssl rsa -passin pass:xxxx -in $workdir/server.pass.key -out $workdir/server.key
openssl req -new -key $workdir/server.key -out $workdir/server.csr \ openssl req -new -key $workdir/server.key -out $workdir/server.csr \
-subj "/C=UK/ST=Warwickshire/L=Leamington/O=OrgName/OU=IT Department/CN=example.com" -subj "/C=UK/ST=Warwickshire/L=Leamington/O=OrgName/OU=IT Department/CN=example.com"
openssl x509 -req -days 1 -in $workdir/server.csr -CA $workdir/ca.crt \ openssl x509 -req -days 1 -in $workdir/server.csr -CA $workdir/ca.crt \

View File

@ -8,6 +8,7 @@ let
# configuration file can be generated by http://slurm.schedmd.com/configurator.html # configuration file can be generated by http://slurm.schedmd.com/configurator.html
configFile = pkgs.writeTextDir "slurm.conf" configFile = pkgs.writeTextDir "slurm.conf"
'' ''
ClusterName=${cfg.clusterName}
${optionalString (cfg.controlMachine != null) ''controlMachine=${cfg.controlMachine}''} ${optionalString (cfg.controlMachine != null) ''controlMachine=${cfg.controlMachine}''}
${optionalString (cfg.controlAddr != null) ''controlAddr=${cfg.controlAddr}''} ${optionalString (cfg.controlAddr != null) ''controlAddr=${cfg.controlAddr}''}
${optionalString (cfg.nodeName != null) ''nodeName=${cfg.nodeName}''} ${optionalString (cfg.nodeName != null) ''nodeName=${cfg.nodeName}''}
@ -105,6 +106,15 @@ in
''; '';
}; };
clusterName = mkOption {
type = types.str;
default = "default";
example = "myCluster";
description = ''
Necessary to distinguish accounting records in a multi-cluster environment.
'';
};
nodeName = mkOption { nodeName = mkOption {
type = types.nullOr types.str; type = types.nullOr types.str;
default = null; default = null;

View File

@ -0,0 +1,30 @@
# rygel service.
{ config, lib, pkgs, ... }:
with lib;
{
###### interface
options = {
services.gnome3.rygel = {
enable = mkOption {
default = false;
description = ''
Whether to enable Rygel UPnP Mediaserver.
You will need to also allow UPnP connections in firewall, see the following <link xlink:href="https://github.com/NixOS/nixpkgs/pull/45045#issuecomment-416030795">comment</link>.
'';
type = types.bool;
};
};
};
###### implementation
config = mkIf config.services.gnome3.rygel.enable {
environment.systemPackages = [ pkgs.gnome3.rygel ];
services.dbus.packages = [ pkgs.gnome3.rygel ];
systemd.packages = [ pkgs.gnome3.rygel ];
};
}

View File

@ -0,0 +1,85 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.lirc;
in {
###### interface
options = {
services.lirc = {
enable = mkEnableOption "LIRC daemon";
options = mkOption {
type = types.lines;
example = ''
[lircd]
nodaemon = False
'';
description = "LIRC default options descriped in man:lircd(8) (<filename>lirc_options.conf</filename>)";
};
configs = mkOption {
type = types.listOf types.lines;
description = "Configurations for lircd to load, see man:lircd.conf(5) for details (<filename>lircd.conf</filename>)";
};
extraArguments = mkOption {
type = types.listOf types.str;
default = [];
description = "Extra arguments to lircd.";
};
};
};
###### implementation
config = mkIf cfg.enable {
# Note: LIRC executables raises a warning, if lirc_options.conf do not exists
environment.etc."lirc/lirc_options.conf".text = cfg.options;
environment.systemPackages = [ pkgs.lirc ];
systemd.sockets.lircd = {
description = "LIRC daemon socket";
wantedBy = [ "sockets.target" ];
socketConfig = {
ListenStream = "/run/lirc/lircd";
SocketUser = "lirc";
SocketMode = "0660";
};
};
systemd.services.lircd = let
configFile = pkgs.writeText "lircd.conf" (builtins.concatStringsSep "\n" cfg.configs);
in {
description = "LIRC daemon service";
after = [ "network.target" ];
unitConfig.Documentation = [ "man:lircd(8)" ];
serviceConfig = {
RuntimeDirectory = "lirc";
ExecStart = ''
${pkgs.lirc}/bin/lircd --nodaemon \
${escapeShellArgs cfg.extraArguments} \
${configFile}
'';
User = "lirc";
};
};
users.users.lirc = {
uid = config.ids.uids.lirc;
group = "lirc";
description = "LIRC user for lircd";
};
users.groups.lirc.gid = config.ids.gids.lirc;
};
}

View File

@ -0,0 +1,32 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.ratbagd;
in
{
###### interface
options = {
services.ratbagd = {
enable = mkOption {
default = false;
description = ''
Whether to enable ratbagd for configuring gaming mice.
'';
};
};
};
###### implementation
config = mkIf cfg.enable {
# Give users access to the "ratbagctl" tool
environment.systemPackages = [ pkgs.libratbag ];
services.dbus.packages = [ pkgs.libratbag ];
systemd.packages = [ pkgs.libratbag ];
};
}

View File

@ -26,15 +26,14 @@ in {
name = "trezord-udev-rules"; name = "trezord-udev-rules";
destination = "/etc/udev/rules.d/51-trezor.rules"; destination = "/etc/udev/rules.d/51-trezor.rules";
text = '' text = ''
# Trezor 1 # TREZOR v1 (One)
SUBSYSTEM=="usb", ATTR{idVendor}=="534c", ATTR{idProduct}=="0001", MODE="0666", GROUP="dialout", SYMLINK+="trezor%n" SUBSYSTEM=="usb", ATTR{idVendor}=="534c", ATTR{idProduct}=="0001", MODE="0666", GROUP="dialout", TAG+="uaccess", TAG+="udev-acl", SYMLINK+="trezor%n"
KERNEL=="hidraw*", ATTRS{idVendor}=="534c", ATTRS{idProduct}=="0001", MODE="0666", GROUP="dialout" KERNEL=="hidraw*", ATTRS{idVendor}=="534c", ATTRS{idProduct}=="0001", MODE="0666", GROUP="dialout", TAG+="uaccess", TAG+="udev-acl"
# Trezor 2 (Model-T) # TREZOR v2 (T)
SUBSYSTEM=="usb", ATTR{idVendor}=="1209", ATTR{idProduct}=="53c0", MODE="0661", GROUP="dialout", TAG+="uaccess", TAG+="udev-acl", SYMLINK+="trezor%n" SUBSYSTEM=="usb", ATTR{idVendor}=="1209", ATTR{idProduct}=="53c0", MODE="0661", GROUP="dialout", TAG+="uaccess", TAG+="udev-acl", SYMLINK+="trezor%n"
SUBSYSTEM=="usb", ATTR{idVendor}=="1209", ATTR{idProduct}=="53c1", MODE="0660", GROUP="dialout", TAG+="uaccess", TAG+="udev-acl", SYMLINK+="trezor%n" SUBSYSTEM=="usb", ATTR{idVendor}=="1209", ATTR{idProduct}=="53c1", MODE="0666", GROUP="dialout", TAG+="uaccess", TAG+="udev-acl", SYMLINK+="trezor%n"
KERNEL=="hidraw*", ATTRS{idVendor}=="1209", ATTRS{idProduct}=="53c1", MODE="0660", GROUP="dialout", TAG+="uaccess", TAG+="udev-acl" KERNEL=="hidraw*", ATTRS{idVendor}=="1209", ATTRS{idProduct}=="53c1", MODE="0666", GROUP="dialout", TAG+="uaccess", TAG+="udev-acl"
];
''; '';
}); });

View File

@ -2,7 +2,7 @@
let let
inherit (lib) mkIf mkOption singleton types; inherit (lib) mkIf mkOption singleton types;
inherit (pkgs) coreutils exim; inherit (pkgs) coreutils;
cfg = config.services.exim; cfg = config.services.exim;
in in
@ -57,6 +57,16 @@ in
''; '';
}; };
package = mkOption {
type = types.package;
default = pkgs.exim;
defaultText = "pkgs.exim";
description = ''
The Exim derivation to use.
This can be used to enable features such as LDAP or PAM support.
'';
};
}; };
}; };
@ -74,7 +84,7 @@ in
spool_directory = ${cfg.spoolDir} spool_directory = ${cfg.spoolDir}
${cfg.config} ${cfg.config}
''; '';
systemPackages = [ exim ]; systemPackages = [ cfg.package ];
}; };
users.users = singleton { users.users = singleton {
@ -89,14 +99,14 @@ in
gid = config.ids.gids.exim; gid = config.ids.gids.exim;
}; };
security.wrappers.exim.source = "${exim}/bin/exim"; security.wrappers.exim.source = "${cfg.package}/bin/exim";
systemd.services.exim = { systemd.services.exim = {
description = "Exim Mail Daemon"; description = "Exim Mail Daemon";
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
restartTriggers = [ config.environment.etc."exim.conf".source ]; restartTriggers = [ config.environment.etc."exim.conf".source ];
serviceConfig = { serviceConfig = {
ExecStart = "${exim}/bin/exim -bdf -q30m"; ExecStart = "${cfg.package}/bin/exim -bdf -q30m";
ExecReload = "${coreutils}/bin/kill -HUP $MAINPID"; ExecReload = "${coreutils}/bin/kill -HUP $MAINPID";
}; };
preStart = '' preStart = ''

View File

@ -89,7 +89,7 @@ in
bindSocket.path = mkOption { bindSocket.path = mkOption {
type = types.str; type = types.str;
default = "/run/rmilter/rmilter.sock"; default = "/run/rmilter.sock";
description = '' description = ''
Path to Unix domain socket to listen on. Path to Unix domain socket to listen on.
''; '';
@ -193,6 +193,9 @@ in
config = mkMerge [ config = mkMerge [
(mkIf cfg.enable { (mkIf cfg.enable {
warnings = [
''`config.services.rmilter' is deprecated, `rmilter' deprecated and unsupported by upstream, and will be removed from next releases. Use built-in rspamd milter instead.''
];
users.users = singleton { users.users = singleton {
name = cfg.user; name = cfg.user;

View File

@ -73,6 +73,24 @@ in {
${cfg.home}/transcoders. ${cfg.home}/transcoders.
''; '';
}; };
jvmOptions = mkOption {
description = ''
Extra command line options for the JVM running AirSonic.
Useful for sending jukebox output to non-default alsa
devices.
'';
default = [
];
type = types.listOf types.str;
example = [
"-Djavax.sound.sampled.Clip='#CODEC [plughw:1,0]'"
"-Djavax.sound.sampled.Port='#Port CODEC [hw:1]'"
"-Djavax.sound.sampled.SourceDataLine='#CODEC [plughw:1,0]'"
"-Djavax.sound.sampled.TargetDataLine='#CODEC [plughw:1,0]'"
];
};
}; };
}; };
@ -98,6 +116,7 @@ in {
-Dserver.port=${toString cfg.port} \ -Dserver.port=${toString cfg.port} \
-Dairsonic.contextPath=${cfg.contextPath} \ -Dairsonic.contextPath=${cfg.contextPath} \
-Djava.awt.headless=true \ -Djava.awt.headless=true \
${toString cfg.jvmOptions} \
-verbose:gc \ -verbose:gc \
-jar ${pkgs.airsonic}/webapps/airsonic.war -jar ${pkgs.airsonic}/webapps/airsonic.war
''; '';

View File

@ -36,11 +36,18 @@ in
after = [ "network.target" ]; after = [ "network.target" ];
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
preStart = '' preStart = ''
test -d ${cfg.dataDir} || { if [ -d ${cfg.dataDir} ]
echo "Creating initial Emby data directory in ${cfg.dataDir}" then
mkdir -p ${cfg.dataDir} for plugin in ${cfg.dataDir}/plugins/*
chown -R ${cfg.user}:${cfg.group} ${cfg.dataDir} do
} echo "Correcting permissions of plugin: $plugin"
chmod u+w $plugin
done
else
echo "Creating initial Emby data directory in ${cfg.dataDir}"
mkdir -p ${cfg.dataDir}
chown -R ${cfg.user}:${cfg.group} ${cfg.dataDir}
fi
''; '';
serviceConfig = { serviceConfig = {

View File

@ -1,91 +1,18 @@
# This module includes the NixOS man-pages in the system environment, # This module optionally starts a browser that shows the NixOS manual
# and optionally starts a browser that shows the NixOS manual on one # on one of the virtual consoles which is useful for the installation
# of the virtual consoles. The latter is useful for the installation
# CD. # CD.
{ config, lib, pkgs, baseModules, ... }: { config, lib, pkgs, ... }:
with lib; with lib;
let let cfg = config.services.nixosManual; in
cfg = config.services.nixosManual;
/* For the purpose of generating docs, evaluate options with each derivation
in `pkgs` (recursively) replaced by a fake with path "\${pkgs.attribute.path}".
It isn't perfect, but it seems to cover a vast majority of use cases.
Caveat: even if the package is reached by a different means,
the path above will be shown and not e.g. `${config.services.foo.package}`. */
manual = import ../../../doc/manual rec {
inherit pkgs config;
version = config.system.nixos.release;
revision = "release-${version}";
options =
let
scrubbedEval = evalModules {
modules = [ { nixpkgs.localSystem = config.nixpkgs.localSystem; } ] ++ baseModules;
args = (config._module.args) // { modules = [ ]; };
specialArgs = { pkgs = scrubDerivations "pkgs" pkgs; };
};
scrubDerivations = namePrefix: pkgSet: mapAttrs
(name: value:
let wholeName = "${namePrefix}.${name}"; in
if isAttrs value then
scrubDerivations wholeName value
// (optionalAttrs (isDerivation value) { outPath = "\${${wholeName}}"; })
else value
)
pkgSet;
in scrubbedEval.options;
};
entry = "${manual.manual}/share/doc/nixos/index.html";
helpScript = pkgs.writeScriptBin "nixos-help"
''
#! ${pkgs.runtimeShell} -e
# Finds first executable browser in a colon-separated list.
# (see how xdg-open defines BROWSER)
browser="$(
IFS=: ; for b in $BROWSER; do
[ -n "$(type -P "$b" || true)" ] && echo "$b" && break
done
)"
if [ -z "$browser" ]; then
browser="$(type -P xdg-open || true)"
if [ -z "$browser" ]; then
browser="$(type -P w3m || true)"
if [ -z "$browser" ]; then
echo "$0: unable to start a web browser; please set \$BROWSER"
exit 1
fi
fi
fi
exec "$browser" ${entry}
'';
desktopItem = pkgs.makeDesktopItem {
name = "nixos-manual";
desktopName = "NixOS Manual";
genericName = "View NixOS documentation in a web browser";
icon = "nix-snowflake";
exec = "${helpScript}/bin/nixos-help";
categories = "System";
};
in
{ {
options = { options = {
services.nixosManual.enable = mkOption { # TODO(@oxij): rename this to `.enable` eventually.
type = types.bool;
default = true;
description = ''
Whether to build the NixOS manual pages.
'';
};
services.nixosManual.showManual = mkOption { services.nixosManual.showManual = mkOption {
type = types.bool; type = types.bool;
default = false; default = false;
@ -114,36 +41,28 @@ in
}; };
config = mkIf cfg.enable { config = mkIf cfg.showManual {
system.build.manual = manual; assertions = [{
assertion = config.documentation.nixos.enable;
message = "Can't enable `service.nixosManual.showManual` without `documentation.nixos.enable`";
}];
environment.systemPackages = [] boot.extraTTYs = [ "tty${toString cfg.ttyNumber}" ];
++ optionals config.services.xserver.enable [ desktopItem pkgs.nixos-icons ]
++ optional config.documentation.man.enable manual.manpages
++ optionals config.documentation.doc.enable [ manual.manual helpScript ];
boot.extraTTYs = mkIf cfg.showManual ["tty${toString cfg.ttyNumber}"]; systemd.services."nixos-manual" = {
description = "NixOS Manual";
systemd.services = optionalAttrs cfg.showManual wantedBy = [ "multi-user.target" ];
{ "nixos-manual" = serviceConfig = {
{ description = "NixOS Manual"; ExecStart = "${cfg.browser} ${config.system.build.manual.manualHTMLIndex}";
wantedBy = [ "multi-user.target" ]; StandardInput = "tty";
serviceConfig = StandardOutput = "tty";
{ ExecStart = "${cfg.browser} ${entry}"; TTYPath = "/dev/tty${toString cfg.ttyNumber}";
StandardInput = "tty"; TTYReset = true;
StandardOutput = "tty"; TTYVTDisallocate = true;
TTYPath = "/dev/tty${toString cfg.ttyNumber}"; Restart = "always";
TTYReset = true;
TTYVTDisallocate = true;
Restart = "always";
};
};
}; };
};
services.mingetty.helpLine = "\nRun `nixos-help` "
+ lib.optionalString cfg.showManual "or press <Alt-F${toString cfg.ttyNumber}> "
+ "for the NixOS manual.";
}; };

View File

@ -0,0 +1,56 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.weechat;
in
{
options.services.weechat = {
enable = mkEnableOption "weechat";
root = mkOption {
description = "Weechat state directory.";
type = types.str;
default = "/var/lib/weechat";
};
sessionName = mkOption {
description = "Name of the `screen' session for weechat.";
default = "weechat-screen";
type = types.str;
};
binary = mkOption {
description = "Binary to execute (by default \${weechat}/bin/weechat).";
example = literalExample ''
''${pkgs.weechat}/bin/weechat-headless
'';
default = "${pkgs.weechat}/bin/weechat";
};
};
config = mkIf cfg.enable {
users = {
groups.weechat = {};
users.weechat = {
createHome = true;
group = "weechat";
home = cfg.root;
isSystemUser = true;
};
};
systemd.services.weechat = {
environment.WEECHAT_HOME = cfg.root;
serviceConfig = {
User = "weechat";
Group = "weechat";
RemainAfterExit = "yes";
};
script = "exec ${pkgs.screen}/bin/screen -Dm -S ${cfg.sessionName} ${cfg.binary}";
wantedBy = [ "multi-user.target" ];
wants = [ "network.target" ];
};
};
meta.doc = ./weechat.xml;
}

View File

@ -0,0 +1,61 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="module-services-weechat">
<title>WeeChat</title>
<para><link xlink:href="https://weechat.org/">WeeChat</link> is a fast and extensible IRC client.</para>
<section><title>Basic Usage</title>
<para>
By default, the module creates a
<literal><link xlink:href="https://www.freedesktop.org/wiki/Software/systemd/">systemd</link></literal> unit
which runs the chat client in a detached
<literal><link xlink:href="https://www.gnu.org/software/screen/">screen</link></literal> session.
</para>
<para>
This can be done by enabling the <literal>weechat</literal> service:
<programlisting>
{ ... }:
{
<link linkend="opt-services.weechat.enable">services.weechat.enable</link> = true;
}
</programlisting>
</para>
<para>
The service is managed by a dedicated user
named <literal>weechat</literal> in the state directory
<literal>/var/lib/weechat</literal>.
</para>
</section>
<section><title>Re-attaching to WeeChat</title>
<para>
WeeChat runs in a screen session owned by a dedicated user. To explicitly
allow your another user to attach to this session, the <literal>screenrc</literal> needs to be tweaked
by adding <link xlink:href="https://www.gnu.org/software/screen/manual/html_node/Multiuser.html#Multiuser">multiuser</link> support:
<programlisting>
{
<link linkend="opt-programs.screen.screenrc">programs.screen.screenrc</link> = ''
multiuser on
acladd normal_user
'';
}
</programlisting>
Now, the session can be re-attached like this:
<programlisting>
screen -r weechat-screen
</programlisting>
</para>
<para>
<emphasis>The session name can be changed using <link linkend="opt-services.weechat.sessionName">services.weechat.sessionName.</link></emphasis>
</para>
</section>
</chapter>

View File

@ -8,7 +8,6 @@ let
ddConf = { ddConf = {
dd_url = "https://app.datadoghq.com"; dd_url = "https://app.datadoghq.com";
skip_ssl_validation = "no"; skip_ssl_validation = "no";
api_key = "";
confd_path = "/etc/datadog-agent/conf.d"; confd_path = "/etc/datadog-agent/conf.d";
additional_checksd = "/etc/datadog-agent/checks.d"; additional_checksd = "/etc/datadog-agent/checks.d";
use_dogstatsd = true; use_dogstatsd = true;
@ -16,6 +15,7 @@ let
// optionalAttrs (cfg.logLevel != null) { log_level = cfg.logLevel; } // optionalAttrs (cfg.logLevel != null) { log_level = cfg.logLevel; }
// optionalAttrs (cfg.hostname != null) { inherit (cfg) hostname; } // optionalAttrs (cfg.hostname != null) { inherit (cfg) hostname; }
// optionalAttrs (cfg.tags != null ) { tags = concatStringsSep ", " cfg.tags; } // optionalAttrs (cfg.tags != null ) { tags = concatStringsSep ", " cfg.tags; }
// optionalAttrs (cfg.enableLiveProcessCollection) { process_config = { enabled = "true"; }; }
// cfg.extraConfig; // cfg.extraConfig;
# Generate Datadog configuration files for each configured checks. # Generate Datadog configuration files for each configured checks.
@ -125,6 +125,13 @@ in {
''; '';
}; };
enableLiveProcessCollection = mkOption {
description = ''
Whether to enable the live process collection agent.
'';
default = false;
type = types.bool;
};
checks = mkOption { checks = mkOption {
description = '' description = ''
Configuration for all Datadog checks. Keys of this attribute Configuration for all Datadog checks. Keys of this attribute
@ -206,7 +213,6 @@ in {
Group = "datadog"; Group = "datadog";
Restart = "always"; Restart = "always";
RestartSec = 2; RestartSec = 2;
PrivateTmp = true;
}; };
restartTriggers = [ datadogPkg ] ++ map (etc: etc.source) etcfiles; restartTriggers = [ datadogPkg ] ++ map (etc: etc.source) etcfiles;
} attrs; } attrs;
@ -229,6 +235,15 @@ in {
path = [ datadogPkg pkgs.python pkgs.sysstat pkgs.procps pkgs.jdk ]; path = [ datadogPkg pkgs.python pkgs.sysstat pkgs.procps pkgs.jdk ];
serviceConfig.ExecStart = "${datadogPkg}/bin/dd-jmxfetch"; serviceConfig.ExecStart = "${datadogPkg}/bin/dd-jmxfetch";
}); });
datadog-process-agent = lib.mkIf cfg.enableLiveProcessCollection (makeService {
description = "Datadog Live Process Agent";
path = [ ];
script = ''
export DD_API_KEY=$(head -n 1 ${cfg.apiKeyFile})
${pkgs.datadog-process-agent}/bin/agent --config /etc/datadog-agent/datadog.yaml
'';
});
}; };
environment.etc = etcfiles; environment.etc = etcfiles;

View File

@ -235,7 +235,7 @@ in {
but without GF_ prefix but without GF_ prefix
''; '';
default = {}; default = {};
type = types.attrsOf types.str; type = with types; attrsOf (either str path);
}; };
}; };

View File

@ -17,9 +17,9 @@ let
launcher = writeScriptBin "riemann" '' launcher = writeScriptBin "riemann" ''
#!/bin/sh #!/bin/sh
exec ${jdk}/bin/java ${concatStringsSep "\n" cfg.extraJavaOpts} \ exec ${jdk}/bin/java ${concatStringsSep " " cfg.extraJavaOpts} \
-cp ${classpath} \ -cp ${classpath} \
riemann.bin ${writeText "riemann-config.clj" riemannConfig} riemann.bin ${cfg.configFile}
''; '';
in { in {
@ -37,7 +37,8 @@ in {
config = mkOption { config = mkOption {
type = types.lines; type = types.lines;
description = '' description = ''
Contents of the Riemann configuration file. Contents of the Riemann configuration file. For more complicated
config you should use configFile.
''; '';
}; };
configFiles = mkOption { configFiles = mkOption {
@ -47,7 +48,15 @@ in {
Extra files containing Riemann configuration. These files will be Extra files containing Riemann configuration. These files will be
loaded at runtime by Riemann (with Clojure's loaded at runtime by Riemann (with Clojure's
<literal>load-file</literal> function) at the end of the <literal>load-file</literal> function) at the end of the
configuration. configuration if you use the config option, this is ignored if you
use configFile.
'';
};
configFile = mkOption {
type = types.str;
description = ''
A Riemann config file. Any files in the same directory as this file
will be added to the classpath by Riemann.
''; '';
}; };
extraClasspathEntries = mkOption { extraClasspathEntries = mkOption {
@ -77,6 +86,10 @@ in {
group = "riemann"; group = "riemann";
}; };
services.riemann.configFile = mkDefault (
writeText "riemann-config.clj" riemannConfig
);
systemd.services.riemann = { systemd.services.riemann = {
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
path = [ inetutils ]; path = [ inetutils ];
@ -84,6 +97,7 @@ in {
User = "riemann"; User = "riemann";
ExecStart = "${launcher}/bin/riemann"; ExecStart = "${launcher}/bin/riemann";
}; };
serviceConfig.LimitNOFILE = 65536;
}; };
}; };

View File

@ -3,12 +3,10 @@
with lib; with lib;
let let
cfg = config.services.chrony;
stateDir = "/var/lib/chrony"; stateDir = "/var/lib/chrony";
keyFile = "${stateDir}/chrony.keys";
keyFile = "/etc/chrony.keys";
cfg = config.services.chrony;
configFile = pkgs.writeText "chrony.conf" '' configFile = pkgs.writeText "chrony.conf" ''
${concatMapStringsSep "\n" (server: "server " + server) cfg.servers} ${concatMapStringsSep "\n" (server: "server " + server) cfg.servers}
@ -19,7 +17,6 @@ let
} }
driftfile ${stateDir}/chrony.drift driftfile ${stateDir}/chrony.drift
keyfile ${keyFile} keyfile ${keyFile}
${optionalString (!config.time.hardwareClockInLocalTime) "rtconutc"} ${optionalString (!config.time.hardwareClockInLocalTime) "rtconutc"}
@ -27,18 +24,11 @@ let
${cfg.extraConfig} ${cfg.extraConfig}
''; '';
chronyFlags = "-n -m -u chrony -f ${configFile} ${toString cfg.extraFlags}"; chronyFlags = "-m -u chrony -f ${configFile} ${toString cfg.extraFlags}";
in in
{ {
###### interface
options = { options = {
services.chrony = { services.chrony = {
enable = mkOption { enable = mkOption {
default = false; default = false;
description = '' description = ''
@ -83,15 +73,9 @@ in
description = "Extra flags passed to the chronyd command."; description = "Extra flags passed to the chronyd command.";
}; };
}; };
}; };
###### implementation
config = mkIf cfg.enable { config = mkIf cfg.enable {
# Make chronyc available in the system path
environment.systemPackages = [ pkgs.chrony ]; environment.systemPackages = [ pkgs.chrony ];
users.groups = singleton users.groups = singleton
@ -113,26 +97,30 @@ in
{ description = "chrony NTP daemon"; { description = "chrony NTP daemon";
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
wants = [ "time-sync.target" ]; wants = [ "time-sync.target" ];
before = [ "time-sync.target" ]; before = [ "time-sync.target" ];
after = [ "network.target" ]; after = [ "network.target" ];
conflicts = [ "ntpd.service" "systemd-timesyncd.service" ]; conflicts = [ "ntpd.service" "systemd-timesyncd.service" ];
path = [ pkgs.chrony ]; path = [ pkgs.chrony ];
preStart = preStart = ''
'' mkdir -m 0755 -p ${stateDir}
mkdir -m 0755 -p ${stateDir} touch ${keyFile}
touch ${keyFile} chmod 0640 ${keyFile}
chmod 0640 ${keyFile} chown chrony:chrony ${stateDir} ${keyFile}
chown chrony:chrony ${stateDir} ${keyFile} '';
'';
serviceConfig = serviceConfig =
{ ExecStart = "${pkgs.chrony}/bin/chronyd ${chronyFlags}"; { Type = "forking";
ExecStart = "${pkgs.chrony}/bin/chronyd ${chronyFlags}";
ProtectHome = "yes";
ProtectSystem = "full";
PrivateTmp = "yes";
ConditionCapability = "CAP_SYS_TIME";
}; };
}; };
}; };
} }

View File

@ -0,0 +1,56 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.epmd;
in
{
###### interface
options.services.epmd = {
enable = mkOption {
type = types.bool;
default = false;
description = ''
Whether to enable socket activation for Erlang Port Mapper Daemon (epmd),
which acts as a name server on all hosts involved in distributed
Erlang computations.
'';
};
package = mkOption {
type = types.package;
default = pkgs.erlang;
description = ''
The Erlang package to use to get epmd binary. That way you can re-use
an Erlang runtime that is already installed for other purposes.
'';
};
};
###### implementation
config = mkIf cfg.enable {
systemd.sockets.epmd = rec {
description = "Erlang Port Mapper Daemon Activation Socket";
wantedBy = [ "sockets.target" ];
before = wantedBy;
socketConfig = {
ListenStream = "4369";
Accept = "false";
};
};
systemd.services.epmd = {
description = "Erlang Port Mapper Daemon";
after = [ "network.target" ];
requires = [ "epmd.socket" ];
serviceConfig = {
DynamicUser = true;
ExecStart = "${cfg.package}/bin/epmd -systemd";
Type = "notify";
};
};
};
}

View File

@ -0,0 +1,29 @@
{ config, lib, pkgs, ... }:
{
imports = [
./options.nix
./systemd.nix
];
config = lib.modules.mkIf config.services.hylafax.enable {
environment.systemPackages = [ pkgs.hylafaxplus ];
users.users.uucp = {
uid = config.ids.uids.uucp;
group = "uucp";
description = "Unix-to-Unix CoPy system";
isSystemUser = true;
inherit (config.users.users.nobody) home;
};
assertions = [{
assertion = config.services.hylafax.modems != {};
message = ''
HylaFAX cannot be used without modems.
Please define at least one modem with
<option>config.services.hylafax.modems</option>.
'';
}];
};
}

View File

@ -0,0 +1,12 @@
{ ... }:
# see man:hylafax-config(5)
{
ModemGroup = [ ''"any:.*"'' ];
ServerTracing = "0x78701";
SessionTracing = "0x78701";
UUCPLockDir = "/var/lock";
}

View File

@ -0,0 +1,29 @@
#! @shell@ -e
# skip this if there are no modems at all
if ! stat -t "@spoolAreaPath@"/etc/config.* >/dev/null 2>&1
then
exit 0
fi
echo "faxq started, waiting for modem(s) to initialize..."
for i in `seq @timeoutSec@0 -1 0` # gracefully timeout
do
sleep 0.1
# done if status files exist, but don't mention initialization
if \
stat -t "@spoolAreaPath@"/status/* >/dev/null 2>&1 \
&& \
! grep --silent --ignore-case 'initializing server' \
"@spoolAreaPath@"/status/*
then
echo "modem(s) apparently ready"
exit 0
fi
# if i reached 0, modems probably failed to initialize
if test $i -eq 0
then
echo "warning: modem initialization timed out"
fi
done

View File

@ -0,0 +1,10 @@
{ ... }:
# see man:hfaxd(8)
{
ServerTracing = "0x91";
XferLogFile = "/clientlog";
}

View File

@ -0,0 +1,22 @@
{ pkgs, ... }:
# see man:hylafax-config(5)
{
TagLineFont = "etc/LiberationSans-25.pcf";
TagLineLocale = ''en_US.UTF-8'';
AdminGroup = "root"; # groups that can change server config
AnswerRotary = "fax"; # don't accept anything else but faxes
LogFileMode = "0640";
PriorityScheduling = true;
RecvFileMode = "0640";
ServerTracing = "0x78701";
SessionTracing = "0x78701";
UUCPLockDir = "/var/lock";
SendPageCmd = ''${pkgs.coreutils}/bin/false''; # prevent pager transmit
SendUUCPCmd = ''${pkgs.coreutils}/bin/false''; # prevent UUCP transmit
}

View File

@ -0,0 +1,375 @@
{ config, lib, pkgs, ... }:
let
inherit (lib.options) literalExample mkEnableOption mkOption;
inherit (lib.types) bool enum int lines loaOf nullOr path str submodule;
inherit (lib.modules) mkDefault mkIf mkMerge;
commonDescr = ''
Values can be either strings or integers
(which will be added to the config file verbatimly)
or lists thereof
(which will be translated to multiple
lines with the same configuration key).
Boolean values are translated to "Yes" or "No".
The default contains some reasonable
configuration to yield an operational system.
'';
str1 = lib.types.addCheck str (s: s!=""); # non-empty string
int1 = lib.types.addCheck int (i: i>0); # positive integer
configAttrType =
# Options in HylaFAX configuration files can be
# booleans, strings, integers, or list thereof
# representing multiple config directives with the same key.
# This type definition resolves all
# those types into a list of strings.
let
inherit (lib.types) attrsOf coercedTo listOf;
innerType = coercedTo bool (x: if x then "Yes" else "No")
(coercedTo int (toString) str);
in
attrsOf (coercedTo innerType lib.singleton (listOf innerType));
cfg = config.services.hylafax;
modemConfigOptions = { name, config, ... }: {
options = {
name = mkOption {
type = str1;
example = "ttyS1";
description = ''
Name of modem device,
will be searched for in <filename>/dev</filename>.
'';
};
type = mkOption {
type = str1;
example = "cirrus";
description = ''
Name of modem configuration file,
will be searched for in <filename>config</filename>
in the spooling area directory.
'';
};
config = mkOption {
type = configAttrType;
example = {
AreaCode = "49";
LocalCode = "30";
FAXNumber = "123456";
LocalIdentifier = "LostInBerlin";
};
description = ''
Attribute set of values for the given modem.
${commonDescr}
Options defined here override options in
<option>commonModemConfig</option> for this modem.
'';
};
};
config.name = mkDefault name;
config.config.Include = [ "config/${config.type}" ];
};
defaultConfig =
let
inherit (config.security) wrapperDir;
inherit (config.services.mail.sendmailSetuidWrapper) program;
mkIfDefault = cond: value: mkIf cond (mkDefault value);
noWrapper = config.services.mail.sendmailSetuidWrapper==null;
# If a sendmail setuid wrapper exists,
# we add the path to the default configuration file.
# Otherwise, we use `false` to provoke
# an error if hylafax tries to use it.
c.sendmailPath = mkMerge [
(mkIfDefault noWrapper ''${pkgs.coreutils}/bin/false'')
(mkIfDefault (!noWrapper) ''${wrapperDir}/${program}'')
];
importDefaultConfig = file:
lib.attrsets.mapAttrs
(lib.trivial.const mkDefault)
(import file { inherit pkgs; });
c.commonModemConfig = importDefaultConfig ./modem-default.nix;
c.faxqConfig = importDefaultConfig ./faxq-default.nix;
c.hfaxdConfig = importDefaultConfig ./hfaxd-default.nix;
in
c;
localConfig =
let
c.hfaxdConfig.UserAccessFile = cfg.userAccessFile;
c.faxqConfig = lib.attrsets.mapAttrs
(lib.trivial.const (v: mkIf (v!=null) v))
{
AreaCode = cfg.areaCode;
CountryCode = cfg.countryCode;
LongDistancePrefix = cfg.longDistancePrefix;
InternationalPrefix = cfg.internationalPrefix;
};
c.commonModemConfig = c.faxqConfig;
in
c;
in
{
options.services.hylafax = {
enable = mkEnableOption ''HylaFAX server'';
autostart = mkOption {
type = bool;
default = true;
example = false;
description = ''
Autostart the HylaFAX queue manager at system start.
If this is <literal>false</literal>, the queue manager
will still be started if there are pending
jobs or if a user tries to connect to it.
'';
};
countryCode = mkOption {
type = nullOr str1;
default = null;
example = "49";
description = ''Country code for server and all modems.'';
};
areaCode = mkOption {
type = nullOr str1;
default = null;
example = "30";
description = ''Area code for server and all modems.'';
};
longDistancePrefix = mkOption {
type = nullOr str;
default = null;
example = "0";
description = ''Long distance prefix for server and all modems.'';
};
internationalPrefix = mkOption {
type = nullOr str;
default = null;
example = "00";
description = ''International prefix for server and all modems.'';
};
spoolAreaPath = mkOption {
type = path;
default = "/var/spool/fax";
description = ''
The spooling area will be created/maintained
at the location given here.
'';
};
userAccessFile = mkOption {
type = path;
default = "/etc/hosts.hfaxd";
description = ''
The <filename>hosts.hfaxd</filename>
file entry in the spooling area
will be symlinked to the location given here.
This file must exist and be
readable only by the <literal>uucp</literal> user.
See hosts.hfaxd(5) for details.
This configuration permits access for all users:
<literal>
environment.etc."hosts.hfaxd" = {
mode = "0600";
user = "uucp";
text = ".*";
};
</literal>
Note that host-based access can be controlled with
<option>config.systemd.sockets.hylafax-hfaxd.listenStreams</option>;
by default, only 127.0.0.1 is permitted to connect.
'';
};
sendmailPath = mkOption {
type = path;
example = literalExample "''${pkgs.postfix}/bin/sendmail";
# '' ; # fix vim
description = ''
Path to <filename>sendmail</filename> program.
The default uses the local sendmail wrapper
(see <option>config.services.mail.sendmailSetuidWrapper</option>),
otherwise the <filename>false</filename>
binary to cause an error if used.
'';
};
hfaxdConfig = mkOption {
type = configAttrType;
example.RecvqProtection = "0400";
description = ''
Attribute set of lines for the global
hfaxd config file <filename>etc/hfaxd.conf</filename>.
${commonDescr}
'';
};
faxqConfig = mkOption {
type = configAttrType;
example = {
InternationalPrefix = "00";
LongDistancePrefix = "0";
};
description = ''
Attribute set of lines for the global
faxq config file <filename>etc/config</filename>.
${commonDescr}
'';
};
commonModemConfig = mkOption {
type = configAttrType;
example = {
InternationalPrefix = "00";
LongDistancePrefix = "0";
};
description = ''
Attribute set of default values for
modem config files <filename>etc/config.*</filename>.
${commonDescr}
Think twice before changing
paths of fax-processing scripts.
'';
};
modems = mkOption {
type = loaOf (submodule [ modemConfigOptions ]);
default = {};
example.ttyS1 = {
type = "cirrus";
config = {
FAXNumber = "123456";
LocalIdentifier = "Smith";
};
};
description = ''
Description of installed modems.
At least on modem must be defined
to enable the HylaFAX server.
'';
};
spoolExtraInit = mkOption {
type = lines;
default = "";
example = ''chmod 0755 . # everyone may read my faxes'';
description = ''
Additional shell code that is executed within the
spooling area directory right after its setup.
'';
};
faxcron.enable.spoolInit = mkEnableOption ''
Purge old files from the spooling area with
<filename>faxcron</filename>
each time the spooling area is initialized.
'';
faxcron.enable.frequency = mkOption {
type = nullOr str1;
default = null;
example = "daily";
description = ''
Purge old files from the spooling area with
<filename>faxcron</filename> with the given frequency
(see systemd.time(7)).
'';
};
faxcron.infoDays = mkOption {
type = int1;
default = 30;
description = ''
Set the expiration time for data in the
remote machine information directory in days.
'';
};
faxcron.logDays = mkOption {
type = int1;
default = 30;
description = ''
Set the expiration time for
session trace log files in days.
'';
};
faxcron.rcvDays = mkOption {
type = int1;
default = 7;
description = ''
Set the expiration time for files in
the received facsimile queue in days.
'';
};
faxqclean.enable.spoolInit = mkEnableOption ''
Purge old files from the spooling area with
<filename>faxqclean</filename>
each time the spooling area is initialized.
'';
faxqclean.enable.frequency = mkOption {
type = nullOr str1;
default = null;
example = "daily";
description = ''
Purge old files from the spooling area with
<filename>faxcron</filename> with the given frequency
(see systemd.time(7)).
'';
};
faxqclean.archiving = mkOption {
type = enum [ "never" "as-flagged" "always" ];
default = "as-flagged";
example = "always";
description = ''
Enable or suppress job archiving:
<literal>never</literal> disables job archiving,
<literal>as-flagged</literal> archives jobs that
have been flagged for archiving by sendfax,
<literal>always</literal> forces archiving of all jobs.
See also sendfax(1) and faxqclean(8).
'';
};
faxqclean.doneqMinutes = mkOption {
type = int1;
default = 15;
example = literalExample ''24*60'';
description = ''
Set the job
age threshold (in minutes) that controls how long
jobs may reside in the doneq directory.
'';
};
faxqclean.docqMinutes = mkOption {
type = int1;
default = 60;
example = literalExample ''24*60'';
description = ''
Set the document
age threshold (in minutes) that controls how long
unreferenced files may reside in the docq directory.
'';
};
};
config.services.hylafax =
mkIf
(config.services.hylafax.enable)
(mkMerge [ defaultConfig localConfig ])
;
}

View File

@ -0,0 +1,111 @@
#! @shell@ -e
# The following lines create/update the HylaFAX spool directory:
# Subdirectories/files with persistent data are kept,
# other directories/files are removed/recreated,
# mostly from the template spool
# directory in the HylaFAX package.
# This block explains how the spool area is
# derived from the spool template in the HylaFAX package:
#
# + capital letter: directory; file otherwise
# + P/p: persistent directory
# + F/f: directory with symlinks per entry
# + T/t: temporary data
# + S/s: single symlink into package
# |
# | + u: change ownership to uucp:uucp
# | + U: ..also change access mode to user-only
# | |
# archive P U
# bin S
# client T u (client connection info)
# config S
# COPYRIGHT s
# dev T u (maybe some FIFOs)
# docq P U
# doneq P U
# etc F contains customized config files!
# etc/hosts.hfaxd f
# etc/xferfaxlog f
# info P u (database of called devices)
# log P u (communication logs)
# pollq P U
# recvq P u
# sendq P U
# status T u (modem status info files)
# tmp T U
shopt -s dotglob # if bash sees "*", it also includes dot files
lnsym () { ln --symbol "$@" ; }
lnsymfrc () { ln --symbolic --force "$@" ; }
cprd () { cp --remove-destination "$@" ; }
update () { install --owner=@faxuser@ --group=@faxgroup@ "$@" ; }
## create/update spooling area
update --mode=0750 -d "@spoolAreaPath@"
cd "@spoolAreaPath@"
persist=(archive docq doneq info log pollq recvq sendq)
# remove entries that don't belong here
touch dummy # ensure "*" resolves to something
for k in *
do
keep=0
for j in "${persist[@]}" xferfaxlog clientlog faxcron.lastrun
do
if test "$k" == "$j"
then
keep=1
break
fi
done
if test "$keep" == "0"
then
rm --recursive "$k"
fi
done
# create persistent data directories (unless they exist already)
update --mode=0700 -d "${persist[@]}"
chmod 0755 info log recvq
# create ``xferfaxlog``, ``faxcron.lastrun``, ``clientlog``
touch clientlog faxcron.lastrun xferfaxlog
chown @faxuser@:@faxgroup@ clientlog faxcron.lastrun xferfaxlog
# create symlinks for frozen directories/files
lnsym --target-directory=. "@hylafax@"/spool/{COPYRIGHT,bin,config}
# create empty temporary directories
update --mode=0700 -d client dev status
update -d tmp
## create and fill etc
install -d "@spoolAreaPath@/etc"
cd "@spoolAreaPath@/etc"
# create symlinks to all files in template's etc
lnsym --target-directory=. "@hylafax@/spool/etc"/*
# set LOCKDIR in setup.cache
sed --regexp-extended 's|^(UUCP_LOCKDIR=).*$|\1'"'@lockPath@'|g" --in-place setup.cache
# etc/{xferfaxlog,lastrun} are stored in the spool root
lnsymfrc --target-directory=. ../xferfaxlog
lnsymfrc --no-target-directory ../faxcron.lastrun lastrun
# etc/hosts.hfaxd is provided by the NixOS configuration
lnsymfrc --no-target-directory "@userAccessFile@" hosts.hfaxd
# etc/config and etc/config.${DEVID} must be copied:
# hfaxd reads these file after locking itself up in a chroot
cprd --no-target-directory "@globalConfigPath@" config
cprd --target-directory=. "@modemConfigPath@"/*

View File

@ -0,0 +1,249 @@
{ config, lib, pkgs, ... }:
let
inherit (lib) mkIf mkMerge;
inherit (lib) concatStringsSep optionalString;
cfg = config.services.hylafax;
mapModems = lib.flip map (lib.attrValues cfg.modems);
mkConfigFile = name: conf:
# creates hylafax config file,
# makes sure "Include" is listed *first*
let
mkLines = conf:
(lib.concatLists
(lib.flip lib.mapAttrsToList conf
(k: map (v: ''${k}: ${v}'')
)));
include = mkLines { Include = conf.Include or []; };
other = mkLines ( conf // { Include = []; } );
in
pkgs.writeText ''hylafax-config${name}''
(concatStringsSep "\n" (include ++ other));
globalConfigPath = mkConfigFile "" cfg.faxqConfig;
modemConfigPath =
let
mkModemConfigFile = { config, name, ... }:
mkConfigFile ''.${name}''
(cfg.commonModemConfig // config);
mkLine = { name, type, ... }@modem: ''
# check if modem config file exists:
test -f "${pkgs.hylafaxplus}/spool/config/${type}"
ln \
--symbolic \
--no-target-directory \
"${mkModemConfigFile modem}" \
"$out/config.${name}"
'';
in
pkgs.runCommand "hylafax-config-modems" {}
''mkdir --parents "$out/" ${concatStringsSep "\n" (mapModems mkLine)}'';
setupSpoolScript = pkgs.substituteAll {
name = "hylafax-setup-spool.sh";
src = ./spool.sh;
isExecutable = true;
inherit (pkgs.stdenv) shell;
hylafax = pkgs.hylafaxplus;
faxuser = "uucp";
faxgroup = "uucp";
lockPath = "/var/lock";
inherit globalConfigPath modemConfigPath;
inherit (cfg) sendmailPath spoolAreaPath userAccessFile;
};
waitFaxqScript = pkgs.substituteAll {
# This script checks the modems status files
# and waits until all modems report readiness.
name = "hylafax-faxq-wait-start.sh";
src = ./faxq-wait.sh;
isExecutable = true;
timeoutSec = toString 10;
inherit (pkgs.stdenv) shell;
inherit (cfg) spoolAreaPath;
};
sockets."hylafax-hfaxd" = {
description = "HylaFAX server socket";
documentation = [ "man:hfaxd(8)" ];
wantedBy = [ "multi-user.target" ];
listenStreams = [ "127.0.0.1:4559" ];
socketConfig.FreeBind = true;
socketConfig.Accept = true;
};
paths."hylafax-faxq" = {
description = "HylaFAX queue manager sendq watch";
documentation = [ "man:faxq(8)" "man:sendq(5)" ];
wantedBy = [ "multi-user.target" ];
pathConfig.PathExistsGlob = [ ''${cfg.spoolAreaPath}/sendq/q*'' ];
};
timers = mkMerge [
(
mkIf (cfg.faxcron.enable.frequency!=null)
{ "hylafax-faxcron".timerConfig.Persistent = true; }
)
(
mkIf (cfg.faxqclean.enable.frequency!=null)
{ "hylafax-faxqclean".timerConfig.Persistent = true; }
)
];
hardenService =
# Add some common systemd service hardening settings,
# but allow each service (here) to override
# settings by explicitely setting those to `null`.
# More hardening would be nice but makes
# customizing hylafax setups very difficult.
# If at all, it should only be added along
# with some options to customize it.
let
hardening = {
PrivateDevices = true; # breaks /dev/tty...
PrivateNetwork = true;
PrivateTmp = true;
ProtectControlGroups = true;
#ProtectHome = true; # breaks custom spool dirs
ProtectKernelModules = true;
ProtectKernelTunables = true;
#ProtectSystem = "strict"; # breaks custom spool dirs
RestrictNamespaces = true;
RestrictRealtime = true;
};
filter = key: value: (value != null) || ! (lib.hasAttr key hardening);
apply = service: lib.filterAttrs filter (hardening // (service.serviceConfig or {}));
in
service: service // { serviceConfig = apply service; };
services."hylafax-spool" = {
description = "HylaFAX spool area preparation";
documentation = [ "man:hylafax-server(4)" ];
script = ''
${setupSpoolScript}
cd "${cfg.spoolAreaPath}"
${cfg.spoolExtraInit}
if ! test -f "${cfg.spoolAreaPath}/etc/hosts.hfaxd"
then
echo hosts.hfaxd is missing
exit 1
fi
'';
serviceConfig.ExecStop = ''${setupSpoolScript}'';
serviceConfig.RemainAfterExit = true;
serviceConfig.Type = "oneshot";
unitConfig.RequiresMountsFor = [ cfg.spoolAreaPath ];
};
services."hylafax-faxq" = {
description = "HylaFAX queue manager";
documentation = [ "man:faxq(8)" ];
requires = [ "hylafax-spool.service" ];
after = [ "hylafax-spool.service" ];
wants = mapModems ( { name, ... }: ''hylafax-faxgetty@${name}.service'' );
wantedBy = mkIf cfg.autostart [ "multi-user.target" ];
serviceConfig.Type = "forking";
serviceConfig.ExecStart = ''${pkgs.hylafaxplus}/spool/bin/faxq -q "${cfg.spoolAreaPath}"'';
# This delays the "readiness" of this service until
# all modems are initialized (or a timeout is reached).
# Otherwise, sending a fax with the fax service
# stopped will always yield a failed send attempt:
# The fax service is started when the job is created with
# `sendfax`, but modems need some time to initialize.
serviceConfig.ExecStartPost = [ ''${waitFaxqScript}'' ];
# faxquit fails if the pipe is already gone
# (e.g. the service is already stopping)
serviceConfig.ExecStop = ''-${pkgs.hylafaxplus}/spool/bin/faxquit -q "${cfg.spoolAreaPath}"'';
# disable some systemd hardening settings
serviceConfig.PrivateDevices = null;
serviceConfig.RestrictRealtime = null;
};
services."hylafax-hfaxd@" = {
description = "HylaFAX server";
documentation = [ "man:hfaxd(8)" ];
after = [ "hylafax-faxq.service" ];
requires = [ "hylafax-faxq.service" ];
serviceConfig.StandardInput = "socket";
serviceConfig.StandardOutput = "socket";
serviceConfig.ExecStart = ''${pkgs.hylafaxplus}/spool/bin/hfaxd -q "${cfg.spoolAreaPath}" -d -I'';
unitConfig.RequiresMountsFor = [ cfg.userAccessFile ];
# disable some systemd hardening settings
serviceConfig.PrivateDevices = null;
serviceConfig.PrivateNetwork = null;
};
services."hylafax-faxcron" = rec {
description = "HylaFAX spool area maintenance";
documentation = [ "man:faxcron(8)" ];
after = [ "hylafax-spool.service" ];
requires = [ "hylafax-spool.service" ];
wantedBy = mkIf cfg.faxcron.enable.spoolInit requires;
startAt = mkIf (cfg.faxcron.enable.frequency!=null) cfg.faxcron.enable.frequency;
serviceConfig.ExecStart = concatStringsSep " " [
''${pkgs.hylafaxplus}/spool/bin/faxcron''
''-q "${cfg.spoolAreaPath}"''
''-info ${toString cfg.faxcron.infoDays}''
''-log ${toString cfg.faxcron.logDays}''
''-rcv ${toString cfg.faxcron.rcvDays}''
];
};
services."hylafax-faxqclean" = rec {
description = "HylaFAX spool area queue cleaner";
documentation = [ "man:faxqclean(8)" ];
after = [ "hylafax-spool.service" ];
requires = [ "hylafax-spool.service" ];
wantedBy = mkIf cfg.faxqclean.enable.spoolInit requires;
startAt = mkIf (cfg.faxqclean.enable.frequency!=null) cfg.faxqclean.enable.frequency;
serviceConfig.ExecStart = concatStringsSep " " [
''${pkgs.hylafaxplus}/spool/bin/faxqclean''
''-q "${cfg.spoolAreaPath}"''
''-v''
(optionalString (cfg.faxqclean.archiving!="never") ''-a'')
(optionalString (cfg.faxqclean.archiving=="always") ''-A'')
''-j ${toString (cfg.faxqclean.doneqMinutes*60)}''
''-d ${toString (cfg.faxqclean.docqMinutes*60)}''
];
};
mkFaxgettyService = { name, ... }:
lib.nameValuePair ''hylafax-faxgetty@${name}'' rec {
description = "HylaFAX faxgetty for %I";
documentation = [ "man:faxgetty(8)" ];
bindsTo = [ "dev-%i.device" ];
requires = [ "hylafax-spool.service" ];
after = bindsTo ++ requires;
before = [ "hylafax-faxq.service" "getty.target" ];
unitConfig.StopWhenUnneeded = true;
unitConfig.AssertFileNotEmpty = ''${cfg.spoolAreaPath}/etc/config.%I'';
serviceConfig.UtmpIdentifier = "%I";
serviceConfig.TTYPath = "/dev/%I";
serviceConfig.Restart = "always";
serviceConfig.KillMode = "process";
serviceConfig.IgnoreSIGPIPE = false;
serviceConfig.ExecStart = ''-${pkgs.hylafaxplus}/spool/bin/faxgetty -q "${cfg.spoolAreaPath}" /dev/%I'';
# faxquit fails if the pipe is already gone
# (e.g. the service is already stopping)
serviceConfig.ExecStop = ''-${pkgs.hylafaxplus}/spool/bin/faxquit -q "${cfg.spoolAreaPath}" %I'';
# disable some systemd hardening settings
serviceConfig.PrivateDevices = null;
serviceConfig.RestrictRealtime = null;
};
modemServices =
lib.listToAttrs (mapModems mkFaxgettyService);
in
{
config.systemd = mkIf cfg.enable {
inherit sockets timers paths;
services = lib.mapAttrs (lib.const hardenService) (services // modemServices);
};
}

View File

@ -8,6 +8,17 @@ let
homeDir = "/var/lib/i2pd"; homeDir = "/var/lib/i2pd";
strOpt = k: v: k + " = " + v;
boolOpt = k: v: k + " = " + boolToString v;
intOpt = k: v: k + " = " + toString v;
lstOpt = k: xs: k + " = " + concatStringsSep "," xs;
optionalNullString = o: s: optional (! isNull s) (strOpt o s);
optionalNullBool = o: b: optional (! isNull b) (boolOpt o b);
optionalNullInt = o: i: optional (! isNull i) (intOpt o i);
optionalEmptyList = o: l: optional ([] != l) (lstOpt o l);
mkEnableTrueOption = name: mkEnableOption name // { default = true; };
mkEndpointOpt = name: addr: port: { mkEndpointOpt = name: addr: port: {
enable = mkEnableOption name; enable = mkEnableOption name;
name = mkOption { name = mkOption {
@ -18,42 +29,54 @@ let
address = mkOption { address = mkOption {
type = types.str; type = types.str;
default = addr; default = addr;
description = "Bind address for ${name} endpoint. Default: " + addr; description = "Bind address for ${name} endpoint.";
}; };
port = mkOption { port = mkOption {
type = types.int; type = types.int;
default = port; default = port;
description = "Bind port for ${name} endoint. Default: " + toString port; description = "Bind port for ${name} endoint.";
}; };
}; };
mkKeyedEndpointOpt = name: addr: port: keyFile: i2cpOpts = name: {
length = mkOption {
type = types.int;
description = "Guaranteed minimum hops for ${name} tunnels.";
default = 3;
};
quantity = mkOption {
type = types.int;
description = "Number of simultaneous ${name} tunnels.";
default = 5;
};
};
mkKeyedEndpointOpt = name: addr: port: keyloc:
(mkEndpointOpt name addr port) // { (mkEndpointOpt name addr port) // {
keys = mkOption { keys = mkOption {
type = types.str; type = with types; nullOr str;
default = ""; default = keyloc;
description = '' description = ''
File to persist ${lib.toUpper name} keys. File to persist ${lib.toUpper name} keys.
''; '';
}; };
inbound = i2cpOpts name;
outbound = i2cpOpts name;
latency.min = mkOption {
type = with types; nullOr int;
description = "Min latency for tunnels.";
default = null;
};
latency.max = mkOption {
type = with types; nullOr int;
description = "Max latency for tunnels.";
default = null;
};
}; };
commonTunOpts = let commonTunOpts = name: {
i2cpOpts = { outbound = i2cpOpts name;
length = mkOption { inbound = i2cpOpts name;
type = types.int;
description = "Guaranteed minimum hops.";
default = 3;
};
quantity = mkOption {
type = types.int;
description = "Number of simultaneous tunnels.";
default = 5;
};
};
in name: {
outbound = i2cpOpts;
inbound = i2cpOpts;
crypto.tagsToSend = mkOption { crypto.tagsToSend = mkOption {
type = types.int; type = types.int;
description = "Number of ElGamal/AES tags to send."; description = "Number of ElGamal/AES tags to send.";
@ -70,94 +93,142 @@ let
}; };
} // mkEndpointOpt name "127.0.0.1" 0; } // mkEndpointOpt name "127.0.0.1" 0;
i2pdConf = pkgs.writeText "i2pd.conf" '' sec = name: "\n[" + name + "]";
# DO NOT EDIT -- this file has been generated automatically. notice = "# DO NOT EDIT -- this file has been generated automatically.";
loglevel = ${cfg.logLevel} i2pdConf = let
opts = [
ipv4 = ${boolToString cfg.enableIPv4} notice
ipv6 = ${boolToString cfg.enableIPv6} (strOpt "loglevel" cfg.logLevel)
notransit = ${boolToString cfg.notransit} (boolOpt "logclftime" cfg.logCLFTime)
floodfill = ${boolToString cfg.floodfill} (boolOpt "ipv4" cfg.enableIPv4)
netid = ${toString cfg.netid} (boolOpt "ipv6" cfg.enableIPv6)
${if isNull cfg.bandwidth then "" else "bandwidth = ${toString cfg.bandwidth}" } (boolOpt "notransit" cfg.notransit)
${if isNull cfg.port then "" else "port = ${toString cfg.port}"} (boolOpt "floodfill" cfg.floodfill)
(intOpt "netid" cfg.netid)
[limits] ] ++ (optionalNullInt "bandwidth" cfg.bandwidth)
transittunnels = ${toString cfg.limits.transittunnels} ++ (optionalNullInt "port" cfg.port)
++ (optionalNullString "family" cfg.family)
[upnp] ++ (optionalNullString "datadir" cfg.dataDir)
enabled = ${boolToString cfg.upnp.enable} ++ (optionalNullInt "share" cfg.share)
name = ${cfg.upnp.name} ++ (optionalNullBool "ssu" cfg.ssu)
++ (optionalNullBool "ntcp" cfg.ntcp)
[precomputation] ++ (optionalNullString "ntcpproxy" cfg.ntcpProxy)
elgamal = ${boolToString cfg.precomputation.elgamal} ++ (optionalNullString "ifname" cfg.ifname)
++ (optionalNullString "ifname4" cfg.ifname4)
[reseed] ++ (optionalNullString "ifname6" cfg.ifname6)
verify = ${boolToString cfg.reseed.verify} ++ [
file = ${cfg.reseed.file} (sec "limits")
urls = ${builtins.concatStringsSep "," cfg.reseed.urls} (intOpt "transittunnels" cfg.limits.transittunnels)
(intOpt "coresize" cfg.limits.coreSize)
[addressbook] (intOpt "openfiles" cfg.limits.openFiles)
defaulturl = ${cfg.addressbook.defaulturl} (intOpt "ntcphard" cfg.limits.ntcpHard)
subscriptions = ${builtins.concatStringsSep "," cfg.addressbook.subscriptions} (intOpt "ntcpsoft" cfg.limits.ntcpSoft)
(intOpt "ntcpthreads" cfg.limits.ntcpThreads)
${flip concatMapStrings (sec "upnp")
(boolOpt "enabled" cfg.upnp.enable)
(sec "precomputation")
(boolOpt "elgamal" cfg.precomputation.elgamal)
(sec "reseed")
(boolOpt "verify" cfg.reseed.verify)
] ++ (optionalNullString "file" cfg.reseed.file)
++ (optionalEmptyList "urls" cfg.reseed.urls)
++ (optionalNullString "floodfill" cfg.reseed.floodfill)
++ (optionalNullString "zipfile" cfg.reseed.zipfile)
++ (optionalNullString "proxy" cfg.reseed.proxy)
++ [
(sec "trust")
(boolOpt "enabled" cfg.trust.enable)
(boolOpt "hidden" cfg.trust.hidden)
] ++ (optionalEmptyList "routers" cfg.trust.routers)
++ (optionalNullString "family" cfg.trust.family)
++ [
(sec "websockets")
(boolOpt "enabled" cfg.websocket.enable)
(strOpt "address" cfg.websocket.address)
(intOpt "port" cfg.websocket.port)
(sec "exploratory")
(intOpt "inbound.length" cfg.exploratory.inbound.length)
(intOpt "inbound.quantity" cfg.exploratory.inbound.quantity)
(intOpt "outbound.length" cfg.exploratory.outbound.length)
(intOpt "outbound.quantity" cfg.exploratory.outbound.quantity)
(sec "ntcp2")
(boolOpt "enabled" cfg.ntcp2.enable)
(boolOpt "published" cfg.ntcp2.published)
(intOpt "port" cfg.ntcp2.port)
(sec "addressbook")
(strOpt "defaulturl" cfg.addressbook.defaulturl)
] ++ (optionalEmptyList "subscriptions" cfg.addressbook.subscriptions)
++ (flip map
(collect (proto: proto ? port && proto ? address && proto ? name) cfg.proto) (collect (proto: proto ? port && proto ? address && proto ? name) cfg.proto)
(proto: '' (proto: let protoOpts = [
[${proto.name}] (sec proto.name)
enabled = ${boolToString proto.enable} (boolOpt "enabled" proto.enable)
address = ${proto.address} (strOpt "address" proto.address)
port = ${toString proto.port} (intOpt "port" proto.port)
${if proto ? keys then "keys = ${proto.keys}" else ""} ] ++ (if proto ? keys then optionalNullString "keys" proto.keys else [])
${if proto ? auth then "auth = ${boolToString proto.auth}" else ""} ++ (if proto ? auth then optionalNullBool "auth" proto.auth else [])
${if proto ? user then "user = ${proto.user}" else ""} ++ (if proto ? user then optionalNullString "user" proto.user else [])
${if proto ? pass then "pass = ${proto.pass}" else ""} ++ (if proto ? pass then optionalNullString "pass" proto.pass else [])
${if proto ? outproxy then "outproxy = ${proto.outproxy}" else ""} ++ (if proto ? strictHeaders then optionalNullBool "strictheaders" proto.strictHeaders else [])
${if proto ? outproxyPort then "outproxyport = ${toString proto.outproxyPort}" else ""} ++ (if proto ? hostname then optionalNullString "hostname" proto.hostname else [])
'') ++ (if proto ? outproxy then optionalNullString "outproxy" proto.outproxy else [])
} ++ (if proto ? outproxyPort then optionalNullInt "outproxyport" proto.outproxyPort else [])
''; ++ (if proto ? outproxyEnable then optionalNullBool "outproxy.enabled" proto.outproxyEnable else []);
in (concatStringsSep "\n" protoOpts)
));
in
pkgs.writeText "i2pd.conf" (concatStringsSep "\n" opts);
i2pdTunnelConf = pkgs.writeText "i2pd-tunnels.conf" '' tunnelConf = let opts = [
# DO NOT EDIT -- this file has been generated automatically. notice
${flip concatMapStrings (flip map
(collect (tun: tun ? port && tun ? destination) cfg.outTunnels) (collect (tun: tun ? port && tun ? destination) cfg.outTunnels)
(tun: '' (tun: let outTunOpts = [
[${tun.name}] (sec tun.name)
type = client "type = client"
destination = ${tun.destination} (intOpt "port" tun.port)
destinationport = ${toString tun.destinationPort} (strOpt "destination" tun.destination)
keys = ${tun.keys} ] ++ (if tun ? destinationPort then optionalNullInt "destinationport" tun.destinationPort else [])
address = ${tun.address} ++ (if tun ? keys then
port = ${toString tun.port} optionalNullString "keys" tun.keys else [])
inbound.length = ${toString tun.inbound.length} ++ (if tun ? address then
outbound.length = ${toString tun.outbound.length} optionalNullString "address" tun.address else [])
inbound.quantity = ${toString tun.inbound.quantity} ++ (if tun ? inbound.length then
outbound.quantity = ${toString tun.outbound.quantity} optionalNullInt "inbound.length" tun.inbound.length else [])
crypto.tagsToSend = ${toString tun.crypto.tagsToSend} ++ (if tun ? inbound.quantity then
'') optionalNullInt "inbound.quantity" tun.inbound.quantity else [])
} ++ (if tun ? outbound.length then
${flip concatMapStrings optionalNullInt "outbound.length" tun.outbound.length else [])
++ (if tun ? outbound.quantity then
optionalNullInt "outbound.quantity" tun.outbound.quantity else [])
++ (if tun ? crypto.tagsToSend then
optionalNullInt "crypto.tagstosend" tun.crypto.tagsToSend else []);
in concatStringsSep "\n" outTunOpts))
(flip map
(collect (tun: tun ? port && tun ? address) cfg.inTunnels) (collect (tun: tun ? port && tun ? address) cfg.inTunnels)
(tun: '' (tun: let inTunOpts = [
[${tun.name}] (sec tun.name)
type = server "type = server"
destination = ${tun.destination} (intOpt "port" tun.port)
keys = ${tun.keys} (strOpt "host" tun.address)
host = ${tun.address} ] ++ (if tun ? destination then
port = ${toString tun.port} optionalNullString "destination" tun.destination else [])
inport = ${toString tun.inPort} ++ (if tun ? keys then
accesslist = ${builtins.concatStringsSep "," tun.accessList} optionalNullString "keys" tun.keys else [])
'') ++ (if tun ? inPort then
} optionalNullInt "inport" tun.inPort else [])
''; ++ (if tun ? accessList then
optionalEmptyList "accesslist" tun.accessList else []);
in concatStringsSep "\n" inTunOpts))];
in pkgs.writeText "i2pd-tunnels.conf" opts;
i2pdSh = pkgs.writeScriptBin "i2pd" '' i2pdSh = pkgs.writeScriptBin "i2pd" ''
#!/bin/sh #!/bin/sh
exec ${pkgs.i2pd}/bin/i2pd \ exec ${pkgs.i2pd}/bin/i2pd \
${if isNull cfg.address then "" else "--host="+cfg.address} \ ${if isNull cfg.address then "" else "--host="+cfg.address} \
--service \
--conf=${i2pdConf} \ --conf=${i2pdConf} \
--tunconf=${i2pdTunnelConf} --tunconf=${tunnelConf}
''; '';
in in
@ -170,9 +241,7 @@ in
services.i2pd = { services.i2pd = {
enable = mkOption { enable = mkEnableOption "I2Pd daemon" // {
type = types.bool;
default = false;
description = '' description = ''
Enables I2Pd as a running service upon activation. Enables I2Pd as a running service upon activation.
Please read http://i2pd.readthedocs.io/en/latest/ for further Please read http://i2pd.readthedocs.io/en/latest/ for further
@ -192,6 +261,8 @@ in
''; '';
}; };
logCLFTime = mkEnableOption "Full CLF-formatted date and time to log";
address = mkOption { address = mkOption {
type = with types; nullOr str; type = with types; nullOr str;
default = null; default = null;
@ -200,17 +271,72 @@ in
''; '';
}; };
notransit = mkOption { family = mkOption {
type = types.bool; type = with types; nullOr str;
default = false; default = null;
description = ''
Specify a family the router belongs to.
'';
};
dataDir = mkOption {
type = with types; nullOr str;
default = null;
description = ''
Alternative path to storage of i2pd data (RI, keys, peer profiles, ...)
'';
};
share = mkOption {
type = types.int;
default = 100;
description = ''
Limit of transit traffic from max bandwidth in percents.
'';
};
ifname = mkOption {
type = with types; nullOr str;
default = null;
description = ''
Network interface to bind to.
'';
};
ifname4 = mkOption {
type = with types; nullOr str;
default = null;
description = ''
IPv4 interface to bind to.
'';
};
ifname6 = mkOption {
type = with types; nullOr str;
default = null;
description = ''
IPv6 interface to bind to.
'';
};
ntcpProxy = mkOption {
type = with types; nullOr str;
default = null;
description = ''
Proxy URL for NTCP transport.
'';
};
ntcp = mkEnableTrueOption "ntcp";
ssu = mkEnableTrueOption "ssu";
notransit = mkEnableOption "notransit" // {
description = '' description = ''
Tells the router to not accept transit tunnels during startup. Tells the router to not accept transit tunnels during startup.
''; '';
}; };
floodfill = mkOption { floodfill = mkEnableOption "floodfill" // {
type = types.bool;
default = false;
description = '' description = ''
If the router is declared to be unreachable and needs introduction nodes. If the router is declared to be unreachable and needs introduction nodes.
''; '';
@ -241,51 +367,20 @@ in
''; '';
}; };
enableIPv4 = mkOption { enableIPv4 = mkEnableTrueOption "IPv4 connectivity";
type = types.bool; enableIPv6 = mkEnableOption "IPv6 connectivity";
default = true; nat = mkEnableTrueOption "NAT bypass";
upnp.enable = mkEnableOption "UPnP service discovery";
upnp.name = mkOption {
type = types.str;
default = "I2Pd";
description = '' description = ''
Enables IPv4 connectivity. Enabled by default. Name i2pd appears in UPnP forwardings list.
''; '';
}; };
enableIPv6 = mkOption { precomputation.elgamal = mkEnableTrueOption "Precomputed ElGamal tables" // {
type = types.bool;
default = false;
description = ''
Enables IPv6 connectivity. Disabled by default.
'';
};
nat = mkOption {
type = types.bool;
default = true;
description = ''
Assume router is NATed. Enabled by default.
'';
};
upnp = {
enable = mkOption {
type = types.bool;
default = false;
description = ''
Enables UPnP.
'';
};
name = mkOption {
type = types.str;
default = "I2Pd";
description = ''
Name i2pd appears in UPnP forwardings list.
'';
};
};
precomputation.elgamal = mkOption {
type = types.bool;
default = true;
description = '' description = ''
Whenever to use precomputated tables for ElGamal. Whenever to use precomputated tables for ElGamal.
<command>i2pd</command> defaults to <literal>false</literal> <command>i2pd</command> defaults to <literal>false</literal>
@ -296,76 +391,154 @@ in
''; '';
}; };
reseed = { reseed.verify = mkEnableOption "SU3 signature verification";
verify = mkOption {
type = types.bool;
default = false;
description = ''
Request SU3 signature verification
'';
};
file = mkOption { reseed.file = mkOption {
type = types.str; type = with types; nullOr str;
default = ""; default = null;
description = '' description = ''
Full path to SU3 file to reseed from Full path to SU3 file to reseed from.
''; '';
};
urls = mkOption {
type = with types; listOf str;
default = [
"https://reseed.i2p-project.de/"
"https://i2p.mooo.com/netDb/"
"https://netdb.i2p2.no/"
"https://us.reseed.i2p2.no:444/"
"https://uk.reseed.i2p2.no:444/"
"https://i2p.manas.ca:8443/"
];
description = ''
Reseed URLs
'';
};
}; };
addressbook = { reseed.urls = mkOption {
defaulturl = mkOption { type = with types; listOf str;
type = types.str; default = [];
default = "http://joajgazyztfssty4w2on5oaqksz6tqoxbduy553y34mf4byv6gpq.b32.i2p/export/alive-hosts.txt"; description = ''
description = '' Reseed URLs.
AddressBook subscription URL for initial setup '';
''; };
};
subscriptions = mkOption { reseed.floodfill = mkOption {
type = with types; listOf str; type = with types; nullOr str;
default = [ default = null;
"http://inr.i2p/export/alive-hosts.txt" description = ''
"http://i2p-projekt.i2p/hosts.txt" Path to router info of floodfill to reseed from.
"http://stats.i2p/cgi-bin/newhosts.txt" '';
]; };
description = ''
AddressBook subscription URLs reseed.zipfile = mkOption {
''; type = with types; nullOr str;
}; default = null;
description = ''
Path to local .zip file to reseed from.
'';
};
reseed.proxy = mkOption {
type = with types; nullOr str;
default = null;
description = ''
URL for reseed proxy, supports http/socks.
'';
};
addressbook.defaulturl = mkOption {
type = types.str;
default = "http://joajgazyztfssty4w2on5oaqksz6tqoxbduy553y34mf4byv6gpq.b32.i2p/export/alive-hosts.txt";
description = ''
AddressBook subscription URL for initial setup
'';
};
addressbook.subscriptions = mkOption {
type = with types; listOf str;
default = [
"http://inr.i2p/export/alive-hosts.txt"
"http://i2p-projekt.i2p/hosts.txt"
"http://stats.i2p/cgi-bin/newhosts.txt"
];
description = ''
AddressBook subscription URLs
'';
};
trust.enable = mkEnableOption "Explicit trust options";
trust.family = mkOption {
type = with types; nullOr str;
default = null;
description = ''
Router Familiy to trust for first hops.
'';
};
trust.routers = mkOption {
type = with types; listOf str;
default = [];
description = ''
Only connect to the listed routers.
'';
};
trust.hidden = mkEnableOption "Router concealment.";
websocket = mkEndpointOpt "websockets" "127.0.0.1" 7666;
exploratory.inbound = i2cpOpts "exploratory";
exploratory.outbound = i2cpOpts "exploratory";
ntcp2.enable = mkEnableTrueOption "NTCP2.";
ntcp2.published = mkEnableOption "NTCP2 publication.";
ntcp2.port = mkOption {
type = types.int;
default = 0;
description = ''
Port to listen for incoming NTCP2 connections (0=auto).
'';
}; };
limits.transittunnels = mkOption { limits.transittunnels = mkOption {
type = types.int; type = types.int;
default = 2500; default = 2500;
description = '' description = ''
Maximum number of active transit sessions Maximum number of active transit sessions.
'';
};
limits.coreSize = mkOption {
type = types.int;
default = 0;
description = ''
Maximum size of corefile in Kb (0 - use system limit).
'';
};
limits.openFiles = mkOption {
type = types.int;
default = 0;
description = ''
Maximum number of open files (0 - use system default).
'';
};
limits.ntcpHard = mkOption {
type = types.int;
default = 0;
description = ''
Maximum number of active transit sessions.
'';
};
limits.ntcpSoft = mkOption {
type = types.int;
default = 0;
description = ''
Threshold to start probabalistic backoff with ntcp sessions (default: use system limit).
'';
};
limits.ntcpThreads = mkOption {
type = types.int;
default = 1;
description = ''
Maximum number of threads used by NTCP DH worker.
''; '';
}; };
proto.http = (mkEndpointOpt "http" "127.0.0.1" 7070) // { proto.http = (mkEndpointOpt "http" "127.0.0.1" 7070) // {
auth = mkOption {
type = types.bool; auth = mkEnableOption "Webconsole authentication";
default = false;
description = ''
Enable authentication for webconsole.
'';
};
user = mkOption { user = mkOption {
type = types.str; type = types.str;
default = "i2pd"; default = "i2pd";
@ -373,6 +546,7 @@ in
Username for webconsole access Username for webconsole access
''; '';
}; };
pass = mkOption { pass = mkOption {
type = types.str; type = types.str;
default = "i2pd"; default = "i2pd";
@ -380,11 +554,35 @@ in
Password for webconsole access. Password for webconsole access.
''; '';
}; };
strictHeaders = mkOption {
type = with types; nullOr bool;
default = null;
description = ''
Enable strict host checking on WebUI.
'';
};
hostname = mkOption {
type = with types; nullOr str;
default = null;
description = ''
Expected hostname for WebUI.
'';
};
}; };
proto.httpProxy = mkKeyedEndpointOpt "httpproxy" "127.0.0.1" 4444 ""; proto.httpProxy = (mkKeyedEndpointOpt "httpproxy" "127.0.0.1" 4444 "httpproxy-keys.dat")
proto.socksProxy = (mkKeyedEndpointOpt "socksproxy" "127.0.0.1" 4447 "")
// { // {
outproxy = mkOption {
type = with types; nullOr str;
default = null;
description = "Upstream outproxy bind address.";
};
};
proto.socksProxy = (mkKeyedEndpointOpt "socksproxy" "127.0.0.1" 4447 "socksproxy-keys.dat")
// {
outproxyEnable = mkEnableOption "SOCKS outproxy";
outproxy = mkOption { outproxy = mkOption {
type = types.str; type = types.str;
default = "127.0.0.1"; default = "127.0.0.1";
@ -408,8 +606,8 @@ in
{ name, ... }: { { name, ... }: {
options = { options = {
destinationPort = mkOption { destinationPort = mkOption {
type = types.int; type = with types; nullOr int;
default = 0; default = null;
description = "Connect to particular port at destination."; description = "Connect to particular port at destination.";
}; };
} // commonTunOpts name; } // commonTunOpts name;

View File

@ -0,0 +1,87 @@
{ config, lib, pkgs, ... }: with lib;
let
cfg = config.services.iperf3;
api = {
enable = mkEnableOption "iperf3 network throughput testing server";
port = mkOption {
type = types.ints.u16;
default = 5201;
description = "Server port to listen on for iperf3 client requsts.";
};
affinity = mkOption {
type = types.nullOr types.ints.unsigned;
default = null;
description = "CPU affinity for the process.";
};
bind = mkOption {
type = types.nullOr types.str;
default = null;
description = "Bind to the specific interface associated with the given address.";
};
verbose = mkOption {
type = types.bool;
default = false;
description = "Give more detailed output.";
};
forceFlush = mkOption {
type = types.bool;
default = false;
description = "Force flushing output at every interval.";
};
debug = mkOption {
type = types.bool;
default = false;
description = "Emit debugging output.";
};
rsaPrivateKey = mkOption {
type = types.nullOr types.path;
default = null;
description = "Path to the RSA private key (not password-protected) used to decrypt authentication credentials from the client.";
};
authorizedUsersFile = mkOption {
type = types.nullOr types.path;
default = null;
description = "Path to the configuration file containing authorized users credentials to run iperf tests.";
};
extraFlags = mkOption {
type = types.listOf types.str;
default = [ ];
description = "Extra flags to pass to iperf3(1).";
};
};
imp = {
systemd.services.iperf3 = {
description = "iperf3 daemon";
unitConfig.Documentation = "man:iperf3(1) https://iperf.fr/iperf-doc.php";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
Restart = "on-failure";
RestartSec = 2;
DynamicUser = true;
PrivateDevices = true;
CapabilityBoundingSet = "";
NoNewPrivileges = true;
ExecStart = ''
${pkgs.iperf3}/bin/iperf \
--server \
--port ${toString cfg.port} \
${optionalString (cfg.affinity != null) "--affinity ${toString cfg.affinity}"} \
${optionalString (cfg.bind != null) "--bind ${cfg.bind}"} \
${optionalString (cfg.rsaPrivateKey != null) "--rsa-private-key-path ${cfg.rsaPrivateKey}"} \
${optionalString (cfg.authorizedUsersFile != null) "--authorized-users-path ${cfg.authorizedUsersFile}"} \
${optionalString cfg.verbose "--verbose"} \
${optionalString cfg.debug "--debug"} \
${optionalString cfg.forceFlush "--forceflush"} \
${escapeShellArgs cfg.extraFlags}
'';
};
};
};
in {
options.services.iperf3 = api;
config = mkIf cfg.enable imp;
}

View File

@ -22,6 +22,8 @@ in {
systemd.packages = [ pkgs.iwd ]; systemd.packages = [ pkgs.iwd ];
systemd.services.iwd.wantedBy = [ "multi-user.target" ];
systemd.tmpfiles.rules = [ systemd.tmpfiles.rules = [
"d /var/lib/iwd 0700 root root -" "d /var/lib/iwd 0700 root root -"
]; ];

View File

@ -406,25 +406,25 @@ in {
{ source = configFile; { source = configFile;
target = "NetworkManager/NetworkManager.conf"; target = "NetworkManager/NetworkManager.conf";
} }
{ source = "${networkmanager-openvpn}/etc/NetworkManager/VPN/nm-openvpn-service.name"; { source = "${networkmanager-openvpn}/lib/NetworkManager/VPN/nm-openvpn-service.name";
target = "NetworkManager/VPN/nm-openvpn-service.name"; target = "NetworkManager/VPN/nm-openvpn-service.name";
} }
{ source = "${networkmanager-vpnc}/etc/NetworkManager/VPN/nm-vpnc-service.name"; { source = "${networkmanager-vpnc}/lib/NetworkManager/VPN/nm-vpnc-service.name";
target = "NetworkManager/VPN/nm-vpnc-service.name"; target = "NetworkManager/VPN/nm-vpnc-service.name";
} }
{ source = "${networkmanager-openconnect}/etc/NetworkManager/VPN/nm-openconnect-service.name"; { source = "${networkmanager-openconnect}/lib/NetworkManager/VPN/nm-openconnect-service.name";
target = "NetworkManager/VPN/nm-openconnect-service.name"; target = "NetworkManager/VPN/nm-openconnect-service.name";
} }
{ source = "${networkmanager-fortisslvpn}/etc/NetworkManager/VPN/nm-fortisslvpn-service.name"; { source = "${networkmanager-fortisslvpn}/lib/NetworkManager/VPN/nm-fortisslvpn-service.name";
target = "NetworkManager/VPN/nm-fortisslvpn-service.name"; target = "NetworkManager/VPN/nm-fortisslvpn-service.name";
} }
{ source = "${networkmanager-l2tp}/etc/NetworkManager/VPN/nm-l2tp-service.name"; { source = "${networkmanager-l2tp}/lib/NetworkManager/VPN/nm-l2tp-service.name";
target = "NetworkManager/VPN/nm-l2tp-service.name"; target = "NetworkManager/VPN/nm-l2tp-service.name";
} }
{ source = "${networkmanager_strongswan}/etc/NetworkManager/VPN/nm-strongswan-service.name"; { source = "${networkmanager_strongswan}/lib/NetworkManager/VPN/nm-strongswan-service.name";
target = "NetworkManager/VPN/nm-strongswan-service.name"; target = "NetworkManager/VPN/nm-strongswan-service.name";
} }
{ source = "${networkmanager-iodine}/etc/NetworkManager/VPN/nm-iodine-service.name"; { source = "${networkmanager-iodine}/lib/NetworkManager/VPN/nm-iodine-service.name";
target = "NetworkManager/VPN/nm-iodine-service.name"; target = "NetworkManager/VPN/nm-iodine-service.name";
} }
] ++ optional (cfg.appendNameservers == [] || cfg.insertNameservers == []) ] ++ optional (cfg.appendNameservers == [] || cfg.insertNameservers == [])

View File

@ -0,0 +1,34 @@
{ config, lib, pkgs, ... }: with lib; let
cfg = config.services.nullidentdmod;
in {
options.services.nullidentdmod = with types; {
enable = mkEnableOption "Enable the nullidentdmod identd daemon";
userid = mkOption {
type = nullOr str;
description = "User ID to return. Set to null to return a random string each time.";
default = null;
example = "alice";
};
};
config = mkIf cfg.enable {
systemd.sockets.nullidentdmod = {
description = "Socket for identd (NullidentdMod)";
listenStreams = [ "113" ];
socketConfig.Accept = true;
wantedBy = [ "sockets.target" ];
};
systemd.services."nullidentdmod@" = {
description = "NullidentdMod service";
serviceConfig = {
DynamicUser = true;
ExecStart = "${pkgs.nullidentdmod}/bin/nullidentdmod${optionalString (cfg.userid != null) " ${cfg.userid}"}";
StandardInput = "socket";
StandardOutput = "socket";
};
};
};
}

View File

@ -124,7 +124,7 @@ in
dbsqlpath=${ts3}/lib/teamspeak/sql/ logpath=${cfg.logPath} \ dbsqlpath=${ts3}/lib/teamspeak/sql/ logpath=${cfg.logPath} \
voice_ip=${cfg.voiceIP} default_voice_port=${toString cfg.defaultVoicePort} \ voice_ip=${cfg.voiceIP} default_voice_port=${toString cfg.defaultVoicePort} \
filetransfer_ip=${cfg.fileTransferIP} filetransfer_port=${toString cfg.fileTransferPort} \ filetransfer_ip=${cfg.fileTransferIP} filetransfer_port=${toString cfg.fileTransferPort} \
query_ip=${cfg.queryIP} query_port=${toString cfg.queryPort} query_ip=${cfg.queryIP} query_port=${toString cfg.queryPort} license_accepted=1
''; '';
WorkingDirectory = cfg.dataDir; WorkingDirectory = cfg.dataDir;
User = user; User = user;

View File

@ -12,6 +12,8 @@ let
log_dir = ${cfg.logDir} log_dir = ${cfg.logDir}
'' + lib.optionalString (cfg.port != null) '' '' + lib.optionalString (cfg.port != null) ''
ui_port = ${toString cfg.port} ui_port = ${toString cfg.port}
'' + lib.optionalString (cfg.torAlways) ''
tor = always
'' + cfg.extraConfig; '' + cfg.extraConfig;
}; };
in with lib; { in with lib; {
@ -35,11 +37,17 @@ in with lib; {
port = mkOption { port = mkOption {
type = types.nullOr types.int; type = types.nullOr types.int;
default = null; default = null;
example = 15441; example = 43110;
description = "Optional zeronet port."; description = "Optional zeronet web UI port.";
}; };
tor = mkOption { tor = mkOption {
type = types.bool;
default = false;
description = "Use TOR for zeronet traffic where possible.";
};
torAlways = mkOption {
type = types.bool; type = types.bool;
default = false; default = false;
description = "Use TOR for all zeronet traffic."; description = "Use TOR for all zeronet traffic.";
@ -60,7 +68,11 @@ in with lib; {
services.tor = mkIf cfg.tor { services.tor = mkIf cfg.tor {
enable = true; enable = true;
controlPort = 9051; controlPort = 9051;
extraConfig = "CookieAuthentication 1"; extraConfig = ''
CacheDirectoryGroupReadable 1
CookieAuthentication 1
CookieAuthFileGroupReadable 1
'';
}; };
systemd.services.zeronet = { systemd.services.zeronet = {

View File

@ -3,78 +3,112 @@
with lib; with lib;
let let
cfg = config.services.sks; cfg = config.services.sks;
sksPkg = cfg.package; sksPkg = cfg.package;
in in {
meta.maintainers = with maintainers; [ primeos calbrecht jcumming ];
{
options = { options = {
services.sks = { services.sks = {
enable = mkEnableOption "sks"; enable = mkEnableOption ''
SKS (synchronizing key server for OpenPGP) and start the database
server. You need to create "''${dataDir}/dump/*.gpg" for the initial
import'';
package = mkOption { package = mkOption {
default = pkgs.sks; default = pkgs.sks;
defaultText = "pkgs.sks"; defaultText = "pkgs.sks";
type = types.package; type = types.package;
description = " description = "Which SKS derivation to use.";
Which sks derivation to use. };
";
dataDir = mkOption {
type = types.path;
default = "/var/db/sks";
example = "/var/lib/sks";
# TODO: The default might change to "/var/lib/sks" as this is more
# common. There's also https://github.com/NixOS/nixpkgs/issues/26256
# and "/var/db" is not FHS compliant (seems to come from BSD).
description = ''
Data directory (-basedir) for SKS, where the database and all
configuration files are located (e.g. KDB, PTree, membership and
sksconf).
'';
}; };
hkpAddress = mkOption { hkpAddress = mkOption {
default = [ "127.0.0.1" "::1" ]; default = [ "127.0.0.1" "::1" ];
type = types.listOf types.str; type = types.listOf types.str;
description = " description = ''
Wich ip addresses the sks-keyserver is listening on. Domain names, IPv4 and/or IPv6 addresses to listen on for HKP
"; requests.
'';
}; };
hkpPort = mkOption { hkpPort = mkOption {
default = 11371; default = 11371;
type = types.int; type = types.ints.u16;
description = " description = "HKP port to listen on.";
Which port the sks-keyserver is listening on. };
";
webroot = mkOption {
type = types.nullOr types.path;
default = "${sksPkg.webSamples}/OpenPKG";
defaultText = "\${pkgs.sks.webSamples}/OpenPKG";
description = ''
Source directory (will be symlinked, if not null) for the files the
built-in webserver should serve. SKS (''${pkgs.sks.webSamples})
provides the following examples: "HTML5", "OpenPKG", and "XHTML+ES".
The index file can be named index.html, index.htm, index.xhtm, or
index.xhtml. Files with the extensions .css, .es, .js, .jpg, .jpeg,
.png, or .gif are supported. Subdirectories and filenames with
anything other than alphanumeric characters and the '.' character
will be ignored.
'';
}; };
}; };
}; };
config = mkIf cfg.enable { config = mkIf cfg.enable {
environment.systemPackages = [ sksPkg ]; users = {
users.sks = {
users.users.sks = { isSystemUser = true;
createHome = true; description = "SKS user";
home = "/var/db/sks"; home = cfg.dataDir;
isSystemUser = true; createHome = true;
shell = "${pkgs.coreutils}/bin/true"; group = "sks";
useDefaultShell = true;
packages = [ sksPkg pkgs.db ];
};
groups.sks = { };
}; };
systemd.services = let systemd.services = let
hkpAddress = "'" + (builtins.concatStringsSep " " cfg.hkpAddress) + "'" ; hkpAddress = "'" + (builtins.concatStringsSep " " cfg.hkpAddress) + "'" ;
hkpPort = builtins.toString cfg.hkpPort; hkpPort = builtins.toString cfg.hkpPort;
home = config.users.users.sks.home;
user = config.users.users.sks.name;
in { in {
sks-keyserver = { "sks-db" = {
description = "SKS database server";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
preStart = '' preStart = ''
mkdir -p ${home}/dump ${lib.optionalString (cfg.webroot != null)
${pkgs.sks}/bin/sks build ${home}/dump/*.gpg -n 10 -cache 100 || true #*/ "ln -sfT \"${cfg.webroot}\" web"}
${pkgs.sks}/bin/sks cleandb || true mkdir -p dump
${pkgs.sks}/bin/sks pbuild -cache 20 -ptree_cache 70 || true ${sksPkg}/bin/sks build dump/*.gpg -n 10 -cache 100 || true #*/
${sksPkg}/bin/sks cleandb || true
${sksPkg}/bin/sks pbuild -cache 20 -ptree_cache 70 || true
''; '';
serviceConfig = { serviceConfig = {
WorkingDirectory = home; WorkingDirectory = "~";
User = user; User = "sks";
Group = "sks";
Restart = "always"; Restart = "always";
ExecStart = "${pkgs.sks}/bin/sks db -hkp_address ${hkpAddress} -hkp_port ${hkpPort}"; ExecStart = "${sksPkg}/bin/sks db -hkp_address ${hkpAddress} -hkp_port ${hkpPort}";
}; };
}; };
}; };

View File

@ -208,7 +208,7 @@ in
enable = mkOption { enable = mkOption {
type = types.bool; type = types.bool;
default = false; default = false;
description = "Whether to enable tor transaprent proxy"; description = "Whether to enable tor transparent proxy";
}; };
listenAddress = mkOption { listenAddress = mkOption {

View File

@ -42,7 +42,7 @@ in
protocol = "tcp"; protocol = "tcp";
user = "root"; user = "root";
server = "${pkgs.tcp_wrappers}/bin/tcpd"; server = "${pkgs.tcp_wrappers}/bin/tcpd";
serverArgs = "${pkgs.heimdalFull}/bin/kadmind"; serverArgs = "${pkgs.heimdalFull}/libexec/heimdal/kadmind";
}; };
systemd.services.kdc = { systemd.services.kdc = {
@ -51,13 +51,13 @@ in
preStart = '' preStart = ''
mkdir -m 0755 -p ${stateDir} mkdir -m 0755 -p ${stateDir}
''; '';
script = "${heimdalFull}/bin/kdc"; script = "${heimdalFull}/libexec/heimdal/kdc";
}; };
systemd.services.kpasswdd = { systemd.services.kpasswdd = {
description = "Kerberos Password Changing daemon"; description = "Kerberos Password Changing daemon";
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
script = "${heimdalFull}/bin/kpasswdd"; script = "${heimdalFull}/libexec/heimdal/kpasswdd";
}; };
}; };

View File

@ -0,0 +1,920 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.codimd;
prettyJSON = conf:
pkgs.runCommand "codimd-config.json" { } ''
echo '${builtins.toJSON conf}' | ${pkgs.jq}/bin/jq \
'{production:del(.[]|nulls)|del(.[][]?|nulls)}' > $out
'';
in
{
options.services.codimd = {
enable = mkEnableOption "the CodiMD Markdown Editor";
groups = mkOption {
type = types.listOf types.str;
default = [];
description = ''
Groups to which the codimd user should be added.
'';
};
workDir = mkOption {
type = types.path;
default = "/var/lib/codimd";
description = ''
Working directory for the CodiMD service.
'';
};
configuration = {
debug = mkEnableOption "debug mode";
domain = mkOption {
type = types.nullOr types.str;
default = null;
example = "codimd.org";
description = ''
Domain name for the CodiMD instance.
'';
};
urlPath = mkOption {
type = types.nullOr types.str;
default = null;
example = "/url/path/to/codimd";
description = ''
Path under which CodiMD is accessible.
'';
};
host = mkOption {
type = types.str;
default = "localhost";
description = ''
Address to listen on.
'';
};
port = mkOption {
type = types.int;
default = 3000;
example = "80";
description = ''
Port to listen on.
'';
};
path = mkOption {
type = types.nullOr types.str;
default = null;
example = "/var/run/codimd.sock";
description = ''
Specify where a UNIX domain socket should be placed.
'';
};
allowOrigin = mkOption {
type = types.listOf types.str;
default = [];
example = [ "localhost" "codimd.org" ];
description = ''
List of domains to whitelist.
'';
};
useSSL = mkOption {
type = types.bool;
default = false;
description = ''
Enable to use SSL server. This will also enable
<option>protocolUseSSL</option>.
'';
};
hsts = {
enable = mkOption {
type = types.bool;
default = true;
description = ''
Wheter to enable HSTS if HTTPS is also enabled.
'';
};
maxAgeSeconds = mkOption {
type = types.int;
default = 31536000;
description = ''
Max duration for clients to keep the HSTS status.
'';
};
includeSubdomains = mkOption {
type = types.bool;
default = true;
description = ''
Whether to include subdomains in HSTS.
'';
};
preload = mkOption {
type = types.bool;
default = true;
description = ''
Whether to allow preloading of the site's HSTS status.
'';
};
};
csp = mkOption {
type = types.nullOr types.attrs;
default = null;
example = literalExample ''
{
enable = true;
directives = {
scriptSrc = "trustworthy.scripts.example.com";
};
upgradeInsecureRequest = "auto";
addDefaults = true;
}
'';
description = ''
Specify the Content Security Policy which is passed to Helmet.
For configuration details see <link xlink:href="https://helmetjs.github.io/docs/csp/"
>https://helmetjs.github.io/docs/csp/</link>.
'';
};
protocolUseSSL = mkOption {
type = types.bool;
default = false;
description = ''
Enable to use TLS for resource paths.
This only applies when <option>domain</option> is set.
'';
};
urlAddPort = mkOption {
type = types.bool;
default = false;
description = ''
Enable to add the port to callback URLs.
This only applies when <option>domain</option> is set
and only for ports other than 80 and 443.
'';
};
useCDN = mkOption {
type = types.bool;
default = true;
description = ''
Whether to use CDN resources or not.
'';
};
allowAnonymous = mkOption {
type = types.bool;
default = true;
description = ''
Whether to allow anonymous usage.
'';
};
allowAnonymousEdits = mkOption {
type = types.bool;
default = false;
description = ''
Whether to allow guests to edit existing notes with the `freely' permission,
when <option>allowAnonymous</option> is enabled.
'';
};
allowFreeURL = mkOption {
type = types.bool;
default = false;
description = ''
Whether to allow note creation by accessing a nonexistent note URL.
'';
};
defaultPermission = mkOption {
type = types.enum [ "freely" "editable" "limited" "locked" "private" ];
default = "editable";
description = ''
Default permissions for notes.
This only applies for signed-in users.
'';
};
dbURL = mkOption {
type = types.nullOr types.str;
default = null;
example = ''
postgres://user:pass@host:5432/dbname
'';
description = ''
Specify which database to use.
CodiMD supports mysql, postgres, sqlite and mssql.
See <link xlink:href="https://sequelize.readthedocs.io/en/v3/">
https://sequelize.readthedocs.io/en/v3/</link> for more information.
Note: This option overrides <option>db</option>.
'';
};
db = mkOption {
type = types.attrs;
default = {};
example = literalExample ''
{
dialect = "sqlite";
storage = "/var/lib/codimd/db.codimd.sqlite";
}
'';
description = ''
Specify the configuration for sequelize.
CodiMD supports mysql, postgres, sqlite and mssql.
See <link xlink:href="https://sequelize.readthedocs.io/en/v3/">
https://sequelize.readthedocs.io/en/v3/</link> for more information.
Note: This option overrides <option>db</option>.
'';
};
sslKeyPath= mkOption {
type = types.nullOr types.str;
default = null;
example = "/var/lib/codimd/codimd.key";
description = ''
Path to the SSL key. Needed when <option>useSSL</option> is enabled.
'';
};
sslCertPath = mkOption {
type = types.nullOr types.str;
default = null;
example = "/var/lib/codimd/codimd.crt";
description = ''
Path to the SSL cert. Needed when <option>useSSL</option> is enabled.
'';
};
sslCAPath = mkOption {
type = types.listOf types.str;
default = [];
example = [ "/var/lib/codimd/ca.crt" ];
description = ''
SSL ca chain. Needed when <option>useSSL</option> is enabled.
'';
};
dhParamPath = mkOption {
type = types.nullOr types.str;
default = null;
example = "/var/lib/codimd/dhparam.pem";
description = ''
Path to the SSL dh params. Needed when <option>useSSL</option> is enabled.
'';
};
tmpPath = mkOption {
type = types.str;
default = "/tmp";
description = ''
Path to the temp directory CodiMD should use.
Note that <option>serviceConfig.PrivateTmp</option> is enabled for
the CodiMD systemd service by default.
(Non-canonical paths are relative to CodiMD's base directory)
'';
};
defaultNotePath = mkOption {
type = types.nullOr types.str;
default = "./public/default.md";
description = ''
Path to the default Note file.
(Non-canonical paths are relative to CodiMD's base directory)
'';
};
docsPath = mkOption {
type = types.nullOr types.str;
default = "./public/docs";
description = ''
Path to the docs directory.
(Non-canonical paths are relative to CodiMD's base directory)
'';
};
indexPath = mkOption {
type = types.nullOr types.str;
default = "./public/views/index.ejs";
description = ''
Path to the index template file.
(Non-canonical paths are relative to CodiMD's base directory)
'';
};
hackmdPath = mkOption {
type = types.nullOr types.str;
default = "./public/views/hackmd.ejs";
description = ''
Path to the hackmd template file.
(Non-canonical paths are relative to CodiMD's base directory)
'';
};
errorPath = mkOption {
type = types.nullOr types.str;
default = null;
defaultText = "./public/views/error.ejs";
description = ''
Path to the error template file.
(Non-canonical paths are relative to CodiMD's base directory)
'';
};
prettyPath = mkOption {
type = types.nullOr types.str;
default = null;
defaultText = "./public/views/pretty.ejs";
description = ''
Path to the pretty template file.
(Non-canonical paths are relative to CodiMD's base directory)
'';
};
slidePath = mkOption {
type = types.nullOr types.str;
default = null;
defaultText = "./public/views/slide.hbs";
description = ''
Path to the slide template file.
(Non-canonical paths are relative to CodiMD's base directory)
'';
};
uploadsPath = mkOption {
type = types.str;
default = "${cfg.workDir}/uploads";
defaultText = "/var/lib/codimd/uploads";
description = ''
Path under which uploaded files are saved.
'';
};
sessionName = mkOption {
type = types.str;
default = "connect.sid";
description = ''
Specify the name of the session cookie.
'';
};
sessionSecret = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
Specify the secret used to sign the session cookie.
If unset, one will be generated on startup.
'';
};
sessionLife = mkOption {
type = types.int;
default = 1209600000;
description = ''
Session life time in milliseconds.
'';
};
heartbeatInterval = mkOption {
type = types.int;
default = 5000;
description = ''
Specify the socket.io heartbeat interval.
'';
};
heartbeatTimeout = mkOption {
type = types.int;
default = 10000;
description = ''
Specify the socket.io heartbeat timeout.
'';
};
documentMaxLength = mkOption {
type = types.int;
default = 100000;
description = ''
Specify the maximum document length.
'';
};
email = mkOption {
type = types.bool;
default = true;
description = ''
Whether to enable email sign-in.
'';
};
allowEmailRegister = mkOption {
type = types.bool;
default = true;
description = ''
Wether to enable email registration.
'';
};
allowGravatar = mkOption {
type = types.bool;
default = true;
description = ''
Whether to use gravatar as profile picture source.
'';
};
imageUploadType = mkOption {
type = types.enum [ "imgur" "s3" "minio" "filesystem" ];
default = "filesystem";
description = ''
Specify where to upload images.
'';
};
minio = mkOption {
type = types.nullOr (types.submodule {
options = {
accessKey = mkOption {
type = types.str;
description = ''
Minio access key.
'';
};
secretKey = mkOption {
type = types.str;
description = ''
Minio secret key.
'';
};
endpoint = mkOption {
type = types.str;
description = ''
Minio endpoint.
'';
};
port = mkOption {
type = types.int;
default = 9000;
description = ''
Minio listen port.
'';
};
secure = mkOption {
type = types.bool;
default = true;
description = ''
Whether to use HTTPS for Minio.
'';
};
};
});
default = null;
description = "Configure the minio third-party integration.";
};
s3 = mkOption {
type = types.nullOr (types.submodule {
options = {
accessKeyId = mkOption {
type = types.str;
description = ''
AWS access key id.
'';
};
secretAccessKey = mkOption {
type = types.str;
description = ''
AWS access key.
'';
};
region = mkOption {
type = types.str;
description = ''
AWS S3 region.
'';
};
};
});
default = null;
description = "Configure the s3 third-party integration.";
};
s3bucket = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
Specify the bucket name for upload types <literal>s3</literal> and <literal>minio</literal>.
'';
};
allowPDFExport = mkOption {
type = types.bool;
default = true;
description = ''
Whether to enable PDF exports.
'';
};
imgur.clientId = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
Imgur API client ID.
'';
};
azure = mkOption {
type = types.nullOr (types.submodule {
options = {
connectionString = mkOption {
type = types.str;
description = ''
Azure Blob Storage connection string.
'';
};
container = mkOption {
type = types.str;
description = ''
Azure Blob Storage container name.
It will be created if non-existent.
'';
};
};
});
default = null;
description = "Configure the azure third-party integration.";
};
oauth2 = mkOption {
type = types.nullOr (types.submodule {
options = {
authorizationURL = mkOption {
type = types.str;
description = ''
Specify the OAuth authorization URL.
'';
};
tokenURL = mkOption {
type = types.str;
description = ''
Specify the OAuth token URL.
'';
};
clientID = mkOption {
type = types.str;
description = ''
Specify the OAuth client ID.
'';
};
clientSecret = mkOption {
type = types.str;
description = ''
Specify the OAuth client secret.
'';
};
};
});
default = null;
description = "Configure the OAuth integration.";
};
facebook = mkOption {
type = types.nullOr (types.submodule {
options = {
clientID = mkOption {
type = types.str;
description = ''
Facebook API client ID.
'';
};
clientSecret = mkOption {
type = types.str;
description = ''
Facebook API client secret.
'';
};
};
});
default = null;
description = "Configure the facebook third-party integration";
};
twitter = mkOption {
type = types.nullOr (types.submodule {
options = {
consumerKey = mkOption {
type = types.str;
description = ''
Twitter API consumer key.
'';
};
consumerSecret = mkOption {
type = types.str;
description = ''
Twitter API consumer secret.
'';
};
};
});
default = null;
description = "Configure the Twitter third-party integration.";
};
github = mkOption {
type = types.nullOr (types.submodule {
options = {
clientID = mkOption {
type = types.str;
description = ''
GitHub API client ID.
'';
};
clientSecret = mkOption {
type = types.str;
description = ''
Github API client secret.
'';
};
};
});
default = null;
description = "Configure the GitHub third-party integration.";
};
gitlab = mkOption {
type = types.nullOr (types.submodule {
options = {
baseURL = mkOption {
type = types.str;
default = "";
description = ''
GitLab API authentication endpoint.
Only needed for other endpoints than gitlab.com.
'';
};
clientID = mkOption {
type = types.str;
description = ''
GitLab API client ID.
'';
};
clientSecret = mkOption {
type = types.str;
description = ''
GitLab API client secret.
'';
};
scope = mkOption {
type = types.enum [ "api" "read_user" ];
default = "api";
description = ''
GitLab API requested scope.
GitLab snippet import/export requires api scope.
'';
};
};
});
default = null;
description = "Configure the GitLab third-party integration.";
};
mattermost = mkOption {
type = types.nullOr (types.submodule {
options = {
baseURL = mkOption {
type = types.str;
description = ''
Mattermost authentication endpoint.
'';
};
clientID = mkOption {
type = types.str;
description = ''
Mattermost API client ID.
'';
};
clientSecret = mkOption {
type = types.str;
description = ''
Mattermost API client secret.
'';
};
};
});
default = null;
description = "Configure the Mattermost third-party integration.";
};
dropbox = mkOption {
type = types.nullOr (types.submodule {
options = {
clientID = mkOption {
type = types.str;
description = ''
Dropbox API client ID.
'';
};
clientSecret = mkOption {
type = types.str;
description = ''
Dropbox API client secret.
'';
};
appKey = mkOption {
type = types.str;
description = ''
Dropbox app key.
'';
};
};
});
default = null;
description = "Configure the Dropbox third-party integration.";
};
google = mkOption {
type = types.nullOr (types.submodule {
options = {
clientID = mkOption {
type = types.str;
description = ''
Google API client ID.
'';
};
clientSecret = mkOption {
type = types.str;
description = ''
Google API client secret.
'';
};
};
});
default = null;
description = "Configure the Google third-party integration.";
};
ldap = mkOption {
type = types.nullOr (types.submodule {
options = {
providerName = mkOption {
type = types.str;
default = "";
description = ''
Optional name to be displayed at login form, indicating the LDAP provider.
'';
};
url = mkOption {
type = types.str;
example = "ldap://localhost";
description = ''
URL of LDAP server.
'';
};
bindDn = mkOption {
type = types.str;
description = ''
Bind DN for LDAP access.
'';
};
bindCredentials = mkOption {
type = types.str;
description = ''
Bind credentials for LDAP access.
'';
};
searchBase = mkOption {
type = types.str;
example = "o=users,dc=example,dc=com";
description = ''
LDAP directory to begin search from.
'';
};
searchFilter = mkOption {
type = types.str;
example = "(uid={{username}})";
description = ''
LDAP filter to search with.
'';
};
searchAttributes = mkOption {
type = types.listOf types.str;
example = [ "displayName" "mail" ];
description = ''
LDAP attributes to search with.
'';
};
userNameField = mkOption {
type = types.str;
default = "";
description = ''
LDAP field which is used as the username on CodiMD.
By default <option>useridField</option> is used.
'';
};
useridField = mkOption {
type = types.str;
example = "uid";
description = ''
LDAP field which is a unique identifier for users on CodiMD.
'';
};
tlsca = mkOption {
type = types.str;
example = "server-cert.pem,root.pem";
description = ''
Root CA for LDAP TLS in PEM format.
'';
};
};
});
default = null;
description = "Configure the LDAP integration.";
};
saml = mkOption {
type = types.nullOr (types.submodule {
options = {
idpSsoUrl = mkOption {
type = types.str;
example = "https://idp.example.com/sso";
description = ''
IdP authentication endpoint.
'';
};
idpCert = mkOption {
type = types.path;
example = "/path/to/cert.pem";
description = ''
Path to IdP certificate file in PEM format.
'';
};
issuer = mkOption {
type = types.str;
default = "";
description = ''
Optional identity of the service provider.
This defaults to the server URL.
'';
};
identifierFormat = mkOption {
type = types.str;
default = "urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress";
description = ''
Optional name identifier format.
'';
};
groupAttribute = mkOption {
type = types.str;
default = "";
example = "memberOf";
description = ''
Optional attribute name for group list.
'';
};
externalGroups = mkOption {
type = types.listOf types.str;
default = [];
example = [ "Temporary-staff" "External-users" ];
description = ''
Excluded group names.
'';
};
requiredGroups = mkOption {
type = types.listOf types.str;
default = [];
example = [ "Hackmd-users" "Codimd-users" ];
description = ''
Required group names.
'';
};
attribute = {
id = mkOption {
type = types.str;
default = "";
description = ''
Attribute map for `id'.
Defaults to `NameID' of SAML response.
'';
};
username = mkOption {
type = types.str;
default = "";
description = ''
Attribute map for `username'.
Defaults to `NameID' of SAML response.
'';
};
email = mkOption {
type = types.str;
default = "";
description = ''
Attribute map for `email'.
Defaults to `NameID' of SAML response if
<option>identifierFormat</option> has
the default value.
'';
};
};
};
});
default = null;
description = "Configure the SAML integration.";
};
};
};
config = mkIf cfg.enable {
assertions = [
{ assertion = cfg.configuration.db == {} -> (
cfg.configuration.dbURL != "" && cfg.configuration.dbURL != null
);
message = "Database configuration for CodiMD missing."; }
];
users.groups.codimd = {};
users.users.codimd = {
description = "CodiMD service user";
group = "codimd";
extraGroups = cfg.groups;
home = cfg.workDir;
createHome = true;
};
systemd.services.codimd = {
description = "CodiMD Service";
wantedBy = [ "multi-user.target" ];
after = [ "networking.target" ];
preStart = ''
mkdir -p ${cfg.workDir}
chown -R codimd: ${cfg.workDir}
'';
serviceConfig = {
WorkingDirectory = cfg.workDir;
ExecStart = "${pkgs.codimd}/bin/codimd";
Environment = [
"CMD_CONFIG_FILE=${prettyJSON cfg.configuration}"
"NODE_ENV=production"
];
Restart = "always";
User = "codimd";
PermissionsStartOnly = true;
PrivateTmp = true;
};
};
};
}

View File

@ -66,7 +66,7 @@ in
''; '';
}]; }];
security.wrappers = (import (builtins.toPath "${e.enlightenment}/e-wrappers.nix")).security.wrappers; security.wrappers = (import "${e.enlightenment}/e-wrappers.nix").security.wrappers;
environment.etc = singleton environment.etc = singleton
{ source = xcfg.xkbDir; { source = xcfg.xkbDir;

View File

@ -110,6 +110,7 @@ in {
services.gnome3.gnome-terminal-server.enable = mkDefault true; services.gnome3.gnome-terminal-server.enable = mkDefault true;
services.gnome3.gnome-user-share.enable = mkDefault true; services.gnome3.gnome-user-share.enable = mkDefault true;
services.gnome3.gvfs.enable = true; services.gnome3.gvfs.enable = true;
services.gnome3.rygel.enable = mkDefault true;
services.gnome3.seahorse.enable = mkDefault true; services.gnome3.seahorse.enable = mkDefault true;
services.gnome3.sushi.enable = mkDefault true; services.gnome3.sushi.enable = mkDefault true;
services.gnome3.tracker.enable = mkDefault true; services.gnome3.tracker.enable = mkDefault true;

View File

@ -81,6 +81,7 @@ in
kconfig kconfig
kconfigwidgets kconfigwidgets
kcoreaddons kcoreaddons
kdoctools
kdbusaddons kdbusaddons
kdeclarative kdeclarative
kded kded

View File

@ -266,7 +266,7 @@ in
session. Each session script can set the session. Each session script can set the
<varname>waitPID</varname> shell variable to make this script <varname>waitPID</varname> shell variable to make this script
wait until the end of the user session. Each script is used wait until the end of the user session. Each script is used
to define either a windows manager or a desktop manager. These to define either a window manager or a desktop manager. These
can be differentiated by setting the attribute can be differentiated by setting the attribute
<varname>manage</varname> either to <literal>"window"</literal> <varname>manage</varname> either to <literal>"window"</literal>
or <literal>"desktop"</literal>. or <literal>"desktop"</literal>.

View File

@ -197,7 +197,7 @@ in
# lightdm relaunches itself via just `lightdm`, so needs to be on the PATH # lightdm relaunches itself via just `lightdm`, so needs to be on the PATH
execCmd = '' execCmd = ''
export PATH=${lightdm}/sbin:$PATH export PATH=${lightdm}/sbin:$PATH
exec ${lightdm}/sbin/lightdm --log-dir=/var/log --run-dir=/run exec ${lightdm}/sbin/lightdm
''; '';
}; };
@ -246,12 +246,19 @@ in
''; '';
users.users.lightdm = { users.users.lightdm = {
createHome = true; home = "/var/lib/lightdm";
home = "/var/lib/lightdm-data";
group = "lightdm"; group = "lightdm";
uid = config.ids.uids.lightdm; uid = config.ids.uids.lightdm;
}; };
systemd.tmpfiles.rules = [
"d /run/lightdm 0711 lightdm lightdm 0"
"d /var/cache/lightdm 0711 root lightdm -"
"d /var/lib/lightdm 1770 lightdm lightdm -"
"d /var/lib/lightdm-data 1775 lightdm lightdm -"
"d /var/log/lightdm 0711 root lightdm -"
];
users.groups.lightdm.gid = config.ids.gids.lightdm; users.groups.lightdm.gid = config.ids.gids.lightdm;
services.xserver.tty = null; # We might start multiple X servers so let the tty increment themselves.. services.xserver.tty = null; # We might start multiple X servers so let the tty increment themselves..
services.xserver.display = null; # We specify our own display (and logfile) in xserver-wrapper up there services.xserver.display = null; # We specify our own display (and logfile) in xserver-wrapper up there

View File

@ -263,7 +263,9 @@ in
}; };
environment.etc."sddm.conf".source = cfgFile; environment.etc."sddm.conf".source = cfgFile;
environment.pathsToLink = [ "/share/sddm/themes" ]; environment.pathsToLink = [
"/share/sddm"
];
users.groups.sddm.gid = config.ids.gids.sddm; users.groups.sddm.gid = config.ids.gids.sddm;

View File

@ -205,7 +205,7 @@ in {
}) })
]; ];
services.udev.packages = [ pkgs.libinput ]; services.udev.packages = [ pkgs.libinput.out ];
services.xserver.config = services.xserver.config =
'' ''

View File

@ -419,7 +419,7 @@ while (my $f = <$listActiveUsers>) {
my ($uid, $name) = ($+{uid}, $+{user}); my ($uid, $name) = ($+{uid}, $+{user});
print STDERR "reloading user units for $name...\n"; print STDERR "reloading user units for $name...\n";
system("su", "-l", $name, "-c", "XDG_RUNTIME_DIR=/run/user/$uid @systemd@/bin/systemctl --user daemon-reload"); system("su", "-s", "@shell@", "-l", $name, "-c", "XDG_RUNTIME_DIR=/run/user/$uid @systemd@/bin/systemctl --user daemon-reload");
} }
close $listActiveUsers; close $listActiveUsers;

View File

@ -115,6 +115,7 @@ let
inherit (pkgs) utillinux coreutils; inherit (pkgs) utillinux coreutils;
systemd = config.systemd.package; systemd = config.systemd.package;
shell = "${pkgs.bash}/bin/sh";
inherit children; inherit children;
kernelParams = config.boot.kernelParams; kernelParams = config.boot.kernelParams;

View File

@ -11,19 +11,30 @@ let
exit 1 exit 1
} }
dev_exist() {
local target="$1"
if [ -e $target ]; then
return 0
else
local uuid=$(echo -n $target | sed -e 's,UUID=\(.*\),\1,g')
local dev=$(blkid --uuid $uuid)
return $?
fi
}
wait_target() { wait_target() {
local name="$1" local name="$1"
local target="$2" local target="$2"
local secs="''${3:-10}" local secs="''${3:-10}"
local desc="''${4:-$name $target to appear}" local desc="''${4:-$name $target to appear}"
if [ ! -e $target ]; then if ! dev_exist $target; then
echo -n "Waiting $secs seconds for $desc..." echo -n "Waiting $secs seconds for $desc..."
local success=false; local success=false;
for try in $(seq $secs); do for try in $(seq $secs); do
echo -n "." echo -n "."
sleep 1 sleep 1
if [ -e $target ]; then if dev_exist $target; then
success=true success=true
break break
fi fi

Some files were not shown because too many files have changed in this diff Show More