Merge rename.nix changes.
This commit is contained in:
commit
6de931a0f8
|
@ -612,15 +612,45 @@ sed -i '/ = data_files/d' setup.py</programlisting>
|
|||
|
||||
|
||||
<section xml:id="ssec-language-ruby"><title>Ruby</title>
|
||||
<para>For example, to package yajl-ruby package, use gem-nix:</para>
|
||||
<para>There currently is support to bundle applications that are packaged as Ruby gems. The utility "bundix" allows you to write a <filename>Gemfile</filename>, let bundler create a <filename>Gemfile.lock</filename>, and then convert
|
||||
this into a nix expression that contains all Gem dependencies automatically.</para>
|
||||
|
||||
<para>For example, to package sensu, we did:</para>
|
||||
|
||||
<screen>
|
||||
$ nix-env -i gem-nix
|
||||
$ gem-nix --no-user-install --nix-file=pkgs/development/interpreters/ruby/generated.nix yajl-ruby
|
||||
$ nix-build -A rubyPackages.yajl-ruby
|
||||
</screen>
|
||||
</section>
|
||||
<![CDATA[$ cd pkgs/servers/monitoring
|
||||
$ mkdir sensu
|
||||
$ cat > Gemfile
|
||||
source 'https://rubygems.org'
|
||||
gem 'sensu'
|
||||
$ bundler package --path /tmp/vendor/bundle
|
||||
$ $(nix-build '&nixpkgs>' -A bundix)/bin/bundix
|
||||
$ cat > default.nix
|
||||
{ lib, bundlerEnv, ruby }:
|
||||
|
||||
bundlerEnv {
|
||||
name = "sensu-0.17.1";
|
||||
|
||||
inherit ruby;
|
||||
gemfile = ./Gemfile;
|
||||
lockfile = ./Gemfile.lock;
|
||||
gemset = ./gemset.nix;
|
||||
|
||||
meta = with lib; {
|
||||
description = "A monitoring framework that aims to be simple, malleable,
|
||||
and scalable.";
|
||||
homepage = http://sensuapp.org/;
|
||||
license = with licenses; mit;
|
||||
maintainers = with maintainers; [ theuni ];
|
||||
platforms = platforms.unix;
|
||||
};
|
||||
}]]>
|
||||
</screen>
|
||||
|
||||
<para>Please check in the <filename>Gemfile</filename>, <filename>Gemfile.lock</filename> and the <filename>gemset.nix</filename> so future updates can be run easily.
|
||||
</para>
|
||||
|
||||
</section>
|
||||
|
||||
<section xml:id="ssec-language-go"><title>Go</title>
|
||||
|
||||
|
|
|
@ -29,8 +29,8 @@ rec {
|
|||
|
||||
For another application, see build-support/vm, where this
|
||||
function is used to build arbitrary derivations inside a QEMU
|
||||
virtual machine. */
|
||||
|
||||
virtual machine.
|
||||
*/
|
||||
overrideDerivation = drv: f:
|
||||
let
|
||||
newDrv = derivation (drv.drvAttrs // (f drv));
|
||||
|
@ -56,18 +56,17 @@ rec {
|
|||
makeOverridable = f: origArgs:
|
||||
let
|
||||
ff = f origArgs;
|
||||
overrideWith = newArgs: origArgs // (if builtins.isFunction newArgs then newArgs origArgs else newArgs);
|
||||
in
|
||||
if builtins.isAttrs ff then (ff //
|
||||
{ override = newArgs:
|
||||
makeOverridable f (origArgs // (if builtins.isFunction newArgs then newArgs origArgs else newArgs));
|
||||
{ override = newArgs: makeOverridable f (overrideWith newArgs);
|
||||
deepOverride = newArgs:
|
||||
makeOverridable f (lib.overrideExisting (lib.mapAttrs (deepOverrider newArgs) origArgs) newArgs);
|
||||
overrideDerivation = fdrv:
|
||||
makeOverridable (args: overrideDerivation (f args) fdrv) origArgs;
|
||||
})
|
||||
else if builtins.isFunction ff then
|
||||
{ override = newArgs:
|
||||
makeOverridable f (origArgs // (if builtins.isFunction newArgs then newArgs origArgs else newArgs));
|
||||
{ override = newArgs: makeOverridable f (overrideWith newArgs);
|
||||
__functor = self: ff;
|
||||
deepOverride = throw "deepOverride not yet supported for functors";
|
||||
overrideDerivation = throw "overrideDerivation not yet supported for functors";
|
||||
|
@ -102,8 +101,11 @@ rec {
|
|||
};
|
||||
*/
|
||||
callPackageWith = autoArgs: fn: args:
|
||||
let f = if builtins.isFunction fn then fn else import fn; in
|
||||
makeOverridable f ((builtins.intersectAttrs (builtins.functionArgs f) autoArgs) // args);
|
||||
let
|
||||
f = if builtins.isFunction fn then fn else import fn;
|
||||
auto = builtins.intersectAttrs (builtins.functionArgs f) autoArgs;
|
||||
in makeOverridable f (auto // args);
|
||||
|
||||
|
||||
/* Add attributes to each output of a derivation without changing the derivation itself */
|
||||
addPassthru = drv: passthru:
|
||||
|
@ -122,4 +124,38 @@ rec {
|
|||
|
||||
outputsList = map outputToAttrListElement outputs;
|
||||
in commonAttrs.${drv.outputName};
|
||||
|
||||
|
||||
/* Strip a derivation of all non-essential attributes, returning
|
||||
only those needed by hydra-eval-jobs. Also strictly evaluate the
|
||||
result to ensure that there are no thunks kept alive to prevent
|
||||
garbage collection. */
|
||||
hydraJob = drv:
|
||||
let
|
||||
outputs = drv.outputs or ["out"];
|
||||
|
||||
commonAttrs =
|
||||
{ inherit (drv) name system meta; inherit outputs; }
|
||||
// lib.optionalAttrs (drv._hydraAggregate or false) {
|
||||
_hydraAggregate = true;
|
||||
constituents = map hydraJob (lib.flatten drv.constituents);
|
||||
}
|
||||
// (lib.listToAttrs outputsList);
|
||||
|
||||
makeOutput = outputName:
|
||||
let output = drv.${outputName}; in
|
||||
{ name = outputName;
|
||||
value = commonAttrs // {
|
||||
outPath = output.outPath;
|
||||
drvPath = output.drvPath;
|
||||
type = "derivation";
|
||||
inherit outputName;
|
||||
};
|
||||
};
|
||||
|
||||
outputsList = map makeOutput outputs;
|
||||
|
||||
drv' = (lib.head outputsList).value;
|
||||
in lib.deepSeq drv' drv';
|
||||
|
||||
}
|
||||
|
|
|
@ -13,10 +13,11 @@ rec {
|
|||
|
||||
addErrorContextToAttrs = lib.mapAttrs (a: v: lib.addErrorContext "while evaluating ${a}" v);
|
||||
|
||||
traceIf = p: msg: x: if p then trace msg x else x;
|
||||
|
||||
traceVal = x: builtins.trace x x;
|
||||
traceXMLVal = x: builtins.trace (builtins.toXML x) x;
|
||||
traceXMLValMarked = str: x: builtins.trace (str + builtins.toXML x) x;
|
||||
traceVal = x: trace x x;
|
||||
traceXMLVal = x: trace (builtins.toXML x) x;
|
||||
traceXMLValMarked = str: x: trace (str + builtins.toXML x) x;
|
||||
|
||||
# this can help debug your code as well - designed to not produce thousands of lines
|
||||
traceShowVal = x : trace (showVal x) x;
|
||||
|
@ -42,6 +43,7 @@ rec {
|
|||
traceCall2 = n : f : a : b : let t = n2 : x : traceShowValMarked "${n} ${n2}:" x; in t "result" (f (t "arg 1" a) (t "arg 2" b));
|
||||
traceCall3 = n : f : a : b : c : let t = n2 : x : traceShowValMarked "${n} ${n2}:" x; in t "result" (f (t "arg 1" a) (t "arg 2" b) (t "arg 3" c));
|
||||
|
||||
# FIXME: rename this?
|
||||
traceValIfNot = c: x:
|
||||
if c x then true else trace (showVal x) false;
|
||||
|
||||
|
@ -106,6 +108,6 @@ rec {
|
|||
)
|
||||
else
|
||||
let r = strict expr;
|
||||
in builtins.trace "${str}\n result:\n${builtins.toXML r}" r
|
||||
in trace "${str}\n result:\n${builtins.toXML r}" r
|
||||
);
|
||||
}
|
||||
|
|
|
@ -100,6 +100,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
|
|||
fullName = "Creative Commons Attribution 4.0";
|
||||
};
|
||||
|
||||
cc-by-sa-40 = spdx {
|
||||
spdxId = "CC-BY-SA-4.0";
|
||||
fullName = "Creative Commons Attribution Share Alike 4.0";
|
||||
};
|
||||
|
||||
cddl = spdx {
|
||||
spdxId = "CDDL-1.0";
|
||||
fullName = "Common Development and Distribution License 1.0";
|
||||
|
@ -125,6 +130,16 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
|
|||
fullName = "Common Public License 1.0";
|
||||
};
|
||||
|
||||
efl10 = spdx {
|
||||
spdxId = "EFL-1.0";
|
||||
fullName = "Eiffel Forum License v1.0";
|
||||
};
|
||||
|
||||
efl20 = spdx {
|
||||
spdxId = "EFL-2.0";
|
||||
fullName = "Eiffel Forum License v2.0";
|
||||
};
|
||||
|
||||
epl10 = spdx {
|
||||
spdxId = "EPL-1.0";
|
||||
fullName = "Eclipse Public License 1.0";
|
||||
|
@ -282,6 +297,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
|
|||
fullName = "MIT License";
|
||||
};
|
||||
|
||||
mpl10 = spdx {
|
||||
spdxId = "MPL-1.0";
|
||||
fullName = "Mozilla Public License 1.0";
|
||||
};
|
||||
|
||||
mpl11 = spdx {
|
||||
spdxId = "MPL-1.1";
|
||||
fullName = "Mozilla Public License 1.1";
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
alphabetically sorted. */
|
||||
|
||||
_1126 = "Christian Lask <mail@elfsechsundzwanzig.de>";
|
||||
abaldeau = "Andreas Baldeau <andreas@baldeau.net>";
|
||||
abbradar = "Nikolay Amiantov <ab@fmap.me>";
|
||||
aforemny = "Alexander Foremny <alexanderforemny@googlemail.com>";
|
||||
aherrmann = "Andreas Herrmann <andreash87@gmx.ch>";
|
||||
|
@ -81,6 +82,7 @@
|
|||
garrison = "Jim Garrison <jim@garrison.cc>";
|
||||
gavin = "Gavin Rogers <gavin@praxeology.co.uk>";
|
||||
gebner = "Gabriel Ebner <gebner@gebner.org>";
|
||||
giogadi = "Luis G. Torres <lgtorres42@gmail.com>";
|
||||
globin = "Robin Gloster <robin@glob.in>";
|
||||
goibhniu = "Cillian de Róiste <cillian.deroiste@gmail.com>";
|
||||
gridaphobe = "Eric Seidel <eric@seidel.io>";
|
||||
|
@ -94,11 +96,13 @@
|
|||
iyzsong = "Song Wenwu <iyzsong@gmail.com>";
|
||||
j-keck = "Jürgen Keck <jhyphenkeck@gmail.com>";
|
||||
jagajaga = "Arseniy Seroka <ars.seroka@gmail.com>";
|
||||
jb55 = "William Casarin <bill@casarin.me>";
|
||||
jcumming = "Jack Cummings <jack@mudshark.org>";
|
||||
jgeerds = "Jascha Geerds <jg@ekby.de>";
|
||||
jirkamarsik = "Jirka Marsik <jiri.marsik89@gmail.com>";
|
||||
joachifm = "Joachim Fasting <joachifm@fastmail.fm>";
|
||||
joamaki = "Jussi Maki <joamaki@gmail.com>";
|
||||
joelmo = "Joel Moberg <joel.moberg@gmail.com>";
|
||||
joelteon = "Joel Taylor <me@joelt.io>";
|
||||
jpbernardy = "Jean-Philippe Bernardy <jeanphilippe.bernardy@gmail.com>";
|
||||
jwiegley = "John Wiegley <johnw@newartisans.com>";
|
||||
|
@ -141,6 +145,7 @@
|
|||
orbitz = "Malcolm Matalka <mmatalka@gmail.com>";
|
||||
page = "Carles Pagès <page@cubata.homelinux.net>";
|
||||
paholg = "Paho Lurie-Gregg <paho@paholg.com>";
|
||||
pakhfn = "Fedor Pakhomov <pakhfn@gmail.com>";
|
||||
pashev = "Igor Pashev <pashev.igor@gmail.com>";
|
||||
phausmann = "Philipp Hausmann <nix@314.ch>";
|
||||
phreedom = "Evgeny Egorochkin <phreedom@yandex.ru>";
|
||||
|
@ -185,9 +190,11 @@
|
|||
tailhook = "Paul Colomiets <paul@colomiets.name>";
|
||||
thammers = "Tobias Hammerschmidt <jawr@gmx.de>";
|
||||
the-kenny = "Moritz Ulrich <moritz@tarn-vedra.de>";
|
||||
theuni = "Christian Theune <ct@flyingcircus.io>";
|
||||
thoughtpolice = "Austin Seipp <aseipp@pobox.com>";
|
||||
titanous = "Jonathan Rudenberg <jonathan@titanous.com>";
|
||||
tomberek = "Thomas Bereknyei <tomberek@gmail.com>";
|
||||
trino = "Hubert Mühlhans <muehlhans.hubert@ekodia.de>";
|
||||
tstrobel = "Thomas Strobel <ts468@cam.ac.uk>";
|
||||
ttuegel = "Thomas Tuegel <ttuegel@gmail.com>";
|
||||
tv = "Tomislav Viljetić <tv@shackspace.de>";
|
||||
|
|
|
@ -1,8 +1,3 @@
|
|||
with {
|
||||
inherit (import ./lists.nix) deepSeqList;
|
||||
inherit (import ./attrsets.nix) deepSeqAttrs;
|
||||
};
|
||||
|
||||
rec {
|
||||
|
||||
# Identity function.
|
||||
|
@ -23,23 +18,11 @@ rec {
|
|||
# Flip the order of the arguments of a binary function.
|
||||
flip = f: a: b: f b a;
|
||||
|
||||
# `seq x y' evaluates x, then returns y. That is, it forces strict
|
||||
# evaluation of its first argument.
|
||||
seq = x: y: if x == null then y else y;
|
||||
|
||||
# Like `seq', but recurses into lists and attribute sets to force evaluation
|
||||
# of all list elements/attributes.
|
||||
deepSeq = x: y:
|
||||
if builtins.isList x
|
||||
then deepSeqList x y
|
||||
else if builtins.isAttrs x
|
||||
then deepSeqAttrs x y
|
||||
else seq x y;
|
||||
|
||||
# Pull in some builtins not included elsewhere.
|
||||
inherit (builtins)
|
||||
pathExists readFile isBool isFunction
|
||||
isInt add sub lessThan;
|
||||
isInt add sub lessThan
|
||||
seq deepSeq;
|
||||
|
||||
# Return the Nixpkgs version number.
|
||||
nixpkgsVersion =
|
||||
|
|
|
@ -25,6 +25,22 @@
|
|||
<arg choice='plain'><option>--root</option></arg>
|
||||
<replaceable>root</replaceable>
|
||||
</arg>
|
||||
<arg>
|
||||
<group choice='req'>
|
||||
<arg choice='plain'><option>--max-jobs</option></arg>
|
||||
<arg choice='plain'><option>-j</option></arg>
|
||||
</group>
|
||||
<replaceable>number</replaceable>
|
||||
</arg>
|
||||
<arg>
|
||||
<option>--cores</option>
|
||||
<replaceable>number</replaceable>
|
||||
</arg>
|
||||
<arg>
|
||||
<option>--option</option>
|
||||
<replaceable>name</replaceable>
|
||||
<replaceable>value</replaceable>
|
||||
</arg>
|
||||
<arg>
|
||||
<arg choice='plain'><option>--show-trace</option></arg>
|
||||
</arg>
|
||||
|
@ -96,6 +112,37 @@ it.</para>
|
|||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry><term><option>--max-jobs</option></term>
|
||||
<term><option>-j</option></term>
|
||||
|
||||
<listitem><para>Sets the maximum number of build jobs that Nix will
|
||||
perform in parallel to the specified number. The default is <literal>1</literal>.
|
||||
A higher value is useful on SMP systems or to exploit I/O latency.</para></listitem>
|
||||
|
||||
</varlistentry>
|
||||
|
||||
|
||||
<varlistentry><term><option>--cores</option></term>
|
||||
|
||||
<listitem><para>Sets the value of the <envar>NIX_BUILD_CORES</envar>
|
||||
environment variable in the invocation of builders. Builders can
|
||||
use this variable at their discretion to control the maximum amount
|
||||
of parallelism. For instance, in Nixpkgs, if the derivation
|
||||
attribute <varname>enableParallelBuilding</varname> is set to
|
||||
<literal>true</literal>, the builder passes the
|
||||
<option>-j<replaceable>N</replaceable></option> flag to GNU Make.
|
||||
The value <literal>0</literal> means that the builder should use all
|
||||
available CPU cores in the system.</para></listitem>
|
||||
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry><term><option>--option</option> <replaceable>name</replaceable> <replaceable>value</replaceable></term>
|
||||
|
||||
<listitem><para>Set the Nix configuration option
|
||||
<replaceable>name</replaceable> to <replaceable>value</replaceable>.</para></listitem>
|
||||
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>--show-trace</option></term>
|
||||
<listitem>
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
{ stdenv, perl, cdrkit, pathsFromGraph
|
||||
{ stdenv, perl, pathsFromGraph, xorriso, syslinux
|
||||
|
||||
, # The file name of the resulting ISO image.
|
||||
isoName ? "cd.iso"
|
||||
|
@ -22,12 +22,18 @@
|
|||
, # Whether this should be an efi-bootable El-Torito CD.
|
||||
efiBootable ? false
|
||||
|
||||
, # Wheter this should be an hybrid CD (bootable from USB as well as CD).
|
||||
usbBootable ? false
|
||||
|
||||
, # The path (in the ISO file system) of the boot image.
|
||||
bootImage ? ""
|
||||
|
||||
, # The path (in the ISO file system) of the efi boot image.
|
||||
efiBootImage ? ""
|
||||
|
||||
, # The path (outside the ISO file system) of the isohybrid-mbr image.
|
||||
isohybridMbrImage ? ""
|
||||
|
||||
, # Whether to compress the resulting ISO image with bzip2.
|
||||
compressImage ? false
|
||||
|
||||
|
@ -38,13 +44,14 @@
|
|||
|
||||
assert bootable -> bootImage != "";
|
||||
assert efiBootable -> efiBootImage != "";
|
||||
assert usbBootable -> isohybridMbrImage != "";
|
||||
|
||||
stdenv.mkDerivation {
|
||||
name = "iso9660-image";
|
||||
builder = ./make-iso9660-image.sh;
|
||||
buildInputs = [perl cdrkit];
|
||||
buildInputs = [perl xorriso syslinux];
|
||||
|
||||
inherit isoName bootable bootImage compressImage volumeID pathsFromGraph efiBootImage efiBootable;
|
||||
inherit isoName bootable bootImage compressImage volumeID pathsFromGraph efiBootImage efiBootable isohybridMbrImage usbBootable;
|
||||
|
||||
# !!! should use XML.
|
||||
sources = map (x: x.source) contents;
|
||||
|
|
|
@ -13,6 +13,20 @@ stripSlash() {
|
|||
if test "${res:0:1}" = /; then res=${res:1}; fi
|
||||
}
|
||||
|
||||
# Escape potential equal signs (=) with backslash (\=)
|
||||
escapeEquals() {
|
||||
echo "$1" | sed -e 's/\\/\\\\/g' -e 's/=/\\=/g'
|
||||
}
|
||||
|
||||
# Queues an file/directory to be placed on the ISO.
|
||||
# An entry consists of a local source path (2) and
|
||||
# a destination path on the ISO (1).
|
||||
addPath() {
|
||||
target="$1"
|
||||
source="$2"
|
||||
echo "$(escapeEquals "$target")=$(escapeEquals "$source")" >> pathlist
|
||||
}
|
||||
|
||||
stripSlash "$bootImage"; bootImage="$res"
|
||||
|
||||
|
||||
|
@ -31,11 +45,20 @@ if test -n "$bootable"; then
|
|||
fi
|
||||
done
|
||||
|
||||
bootFlags="-b $bootImage -c .boot.cat -no-emul-boot -boot-load-size 4 -boot-info-table"
|
||||
isoBootFlags="-eltorito-boot ${bootImage}
|
||||
-eltorito-catalog .boot.cat
|
||||
-no-emul-boot -boot-load-size 4 -boot-info-table"
|
||||
fi
|
||||
|
||||
if test -n "$usbBootable"; then
|
||||
usbBootFlags="-isohybrid-mbr ${isohybridMbrImage}"
|
||||
fi
|
||||
|
||||
if test -n "$efiBootable"; then
|
||||
bootFlags="$bootFlags -eltorito-alt-boot -e $efiBootImage -no-emul-boot"
|
||||
efiBootFlags="-eltorito-alt-boot
|
||||
-e $efiBootImage
|
||||
-no-emul-boot
|
||||
-isohybrid-gpt-basdat"
|
||||
fi
|
||||
|
||||
touch pathlist
|
||||
|
@ -44,14 +67,14 @@ touch pathlist
|
|||
# Add the individual files.
|
||||
for ((i = 0; i < ${#targets_[@]}; i++)); do
|
||||
stripSlash "${targets_[$i]}"
|
||||
echo "$res=${sources_[$i]}" >> pathlist
|
||||
addPath "$res" "${sources_[$i]}"
|
||||
done
|
||||
|
||||
|
||||
# Add the closures of the top-level store objects.
|
||||
storePaths=$(perl $pathsFromGraph closure-*)
|
||||
for i in $storePaths; do
|
||||
echo "${i:1}=$i" >> pathlist
|
||||
addPath "${i:1}" "$i"
|
||||
done
|
||||
|
||||
|
||||
|
@ -59,7 +82,7 @@ done
|
|||
# nix-store --load-db.
|
||||
if [ -n "$object" ]; then
|
||||
printRegistration=1 perl $pathsFromGraph closure-* > nix-path-registration
|
||||
echo "nix-path-registration=nix-path-registration" >> pathlist
|
||||
addPath "nix-path-registration" "nix-path-registration"
|
||||
fi
|
||||
|
||||
|
||||
|
@ -70,22 +93,39 @@ for ((n = 0; n < ${#objects[*]}; n++)); do
|
|||
if test "$symlink" != "none"; then
|
||||
mkdir -p $(dirname ./$symlink)
|
||||
ln -s $object ./$symlink
|
||||
echo "$symlink=./$symlink" >> pathlist
|
||||
addPath "$symlink" "./$symlink"
|
||||
fi
|
||||
done
|
||||
|
||||
# !!! what does this do?
|
||||
cat pathlist | sed -e 's/=\(.*\)=\(.*\)=/\\=\1=\2\\=/' | tee pathlist.safer
|
||||
|
||||
|
||||
mkdir -p $out/iso
|
||||
genCommand="genisoimage -iso-level 4 -r -J $bootFlags -hide-rr-moved -graft-points -path-list pathlist.safer ${volumeID:+-V $volumeID}"
|
||||
if test -z "$compressImage"; then
|
||||
$genCommand -o $out/iso/$isoName
|
||||
else
|
||||
$genCommand | bzip2 > $out/iso/$isoName.bz2
|
||||
|
||||
xorriso="xorriso
|
||||
-as mkisofs
|
||||
-iso-level 3
|
||||
-volid ${volumeID}
|
||||
-appid nixos
|
||||
-publisher nixos
|
||||
-graft-points
|
||||
-full-iso9660-filenames
|
||||
${isoBootFlags}
|
||||
${usbBootFlags}
|
||||
${efiBootFlags}
|
||||
-r
|
||||
-path-list pathlist
|
||||
--sort-weight 0 /
|
||||
--sort-weight 1 /isolinux" # Make sure isolinux is near the beginning of the ISO
|
||||
|
||||
$xorriso -output $out/iso/$isoName
|
||||
|
||||
if test -n "$usbBootable"; then
|
||||
echo "Making image hybrid..."
|
||||
isohybrid --uefi $out/iso/$isoName
|
||||
fi
|
||||
|
||||
if test -n "$compressImage"; then
|
||||
echo "Compressing image..."
|
||||
bzip2 $out/iso/$isoName
|
||||
fi
|
||||
|
||||
mkdir -p $out/nix-support
|
||||
echo $system > $out/nix-support/system
|
||||
|
|
|
@ -37,6 +37,10 @@ sub new {
|
|||
if defined $args->{hda};
|
||||
$startCommand .= "-cdrom $args->{cdrom} "
|
||||
if defined $args->{cdrom};
|
||||
$startCommand .= "-device piix3-usb-uhci -drive id=usbdisk,file=$args->{usb},if=none,readonly -device usb-storage,drive=usbdisk "
|
||||
if defined $args->{usb};
|
||||
$startCommand .= "-bios $args->{bios} "
|
||||
if defined $args->{bios};
|
||||
$startCommand .= $args->{qemuFlags} || "";
|
||||
} else {
|
||||
$startCommand = Cwd::abs_path $startCommand;
|
||||
|
|
|
@ -23,9 +23,9 @@ in
|
|||
boot.kernelParams = [ "console=ttyS0" ];
|
||||
|
||||
boot.initrd.extraUtilsCommands = ''
|
||||
cp -v ${pkgs.gawk}/bin/gawk $out/bin/gawk
|
||||
cp -v ${pkgs.gnused}/bin/sed $out/bin/gnused
|
||||
cp -v ${pkgs.utillinux}/sbin/sfdisk $out/bin/sfdisk
|
||||
copy_bin_and_libs ${pkgs.gawk}/bin/gawk
|
||||
copy_bin_and_libs ${pkgs.gnused}/bin/sed
|
||||
copy_bin_and_libs ${pkgs.utillinux}/sbin/sfdisk
|
||||
cp -v ${growpart} $out/bin/growpart
|
||||
'';
|
||||
boot.initrd.postDeviceCommands = ''
|
||||
|
|
|
@ -27,6 +27,6 @@ with lib;
|
|||
fonts.fontconfig.enable = false;
|
||||
|
||||
nixpkgs.config.packageOverrides = pkgs:
|
||||
{ dbus = pkgs.dbus.override { useX11 = false; }; };
|
||||
{ dbus = pkgs.dbus.override { x11Support = false; }; };
|
||||
};
|
||||
}
|
||||
|
|
|
@ -124,9 +124,7 @@ in {
|
|||
}
|
||||
|
||||
(mkIf cfg.enable {
|
||||
environment.systemPackages = [
|
||||
cfg.package
|
||||
] ++ lib.optionals enable32BitAlsaPlugins [ pkgs_i686.pulseaudio ];
|
||||
environment.systemPackages = [ cfg.package ];
|
||||
|
||||
environment.etc = singleton {
|
||||
target = "asound.conf";
|
||||
|
|
|
@ -64,6 +64,6 @@ in
|
|||
#
|
||||
# Removed under grsecurity.
|
||||
boot.kernel.sysctl."kernel.kptr_restrict" =
|
||||
if config.security.grsecurity.enable then null else 1;
|
||||
if (config.boot.kernelPackages.kernel.features.grsecurity or false) then null else 1;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -110,7 +110,7 @@ let
|
|||
|
||||
shell = mkOption {
|
||||
type = types.str;
|
||||
default = "/run/current-system/sw/sbin/nologin";
|
||||
default = "/run/current-system/sw/bin/nologin";
|
||||
description = "The path to the user's shell.";
|
||||
};
|
||||
|
||||
|
|
|
@ -22,8 +22,7 @@ with lib;
|
|||
###### implementation
|
||||
|
||||
config = mkIf config.hardware.cpu.amd.updateMicrocode {
|
||||
hardware.firmware = [ "${pkgs.amdUcode}/lib/firmware" ];
|
||||
boot.kernelModules = [ "microcode" ];
|
||||
boot.initrd.prepend = [ "${pkgs.microcodeAmd}/amd-ucode.img" ];
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -22,8 +22,7 @@ with lib;
|
|||
###### implementation
|
||||
|
||||
config = mkIf config.hardware.cpu.intel.updateMicrocode {
|
||||
hardware.firmware = [ "${pkgs.microcodeIntel}/lib/firmware" ];
|
||||
boot.kernelModules = [ "microcode" ];
|
||||
boot.initrd.prepend = [ "${pkgs.microcodeIntel}/intel-ucode.img" ];
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
{ config, lib, ... }:
|
||||
|
||||
{
|
||||
options.hardware.enableKSM = lib.mkEnableOption "Kernel Same-Page Merging";
|
||||
|
||||
config = lib.mkIf config.hardware.enableKSM {
|
||||
systemd.services.enable-ksm = {
|
||||
description = "Enable Kernel Same-Page Merging";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "systemd-udev-settle.service" ];
|
||||
script = ''
|
||||
if [ -e /sys/kernel/mm/ksm ]; then
|
||||
echo 1 > /sys/kernel/mm/ksm/run
|
||||
fi
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
|
@ -36,6 +36,9 @@ with lib;
|
|||
# EFI booting
|
||||
isoImage.makeEfiBootable = true;
|
||||
|
||||
# USB booting
|
||||
isoImage.makeUsbBootable = true;
|
||||
|
||||
# Add Memtest86+ to the CD.
|
||||
boot.loader.grub.memtest86.enable = true;
|
||||
|
||||
|
|
|
@ -7,66 +7,89 @@
|
|||
with lib;
|
||||
|
||||
let
|
||||
# Timeout in syslinux is in units of 1/10 of a second.
|
||||
# 0 is used to disable timeouts.
|
||||
syslinuxTimeout = if config.boot.loader.timeout == null then
|
||||
0
|
||||
else
|
||||
max (config.boot.loader.timeout * 10) 1;
|
||||
|
||||
# The Grub image.
|
||||
grubImage = pkgs.runCommand "grub_eltorito" {}
|
||||
|
||||
max = x: y: if x > y then x else y;
|
||||
|
||||
# The configuration file for syslinux.
|
||||
|
||||
# Notes on syslinux configuration and UNetbootin compatiblity:
|
||||
# * Do not use '/syslinux/syslinux.cfg' as the path for this
|
||||
# configuration. UNetbootin will not parse the file and use it as-is.
|
||||
# This results in a broken configuration if the partition label does
|
||||
# not match the specified config.isoImage.volumeID. For this reason
|
||||
# we're using '/isolinux/isolinux.cfg'.
|
||||
# * Use APPEND instead of adding command-line arguments directly after
|
||||
# the LINUX entries.
|
||||
# * COM32 entries (chainload, reboot, poweroff) are not recognized. They
|
||||
# result in incorrect boot entries.
|
||||
|
||||
baseIsolinuxCfg =
|
||||
''
|
||||
${pkgs.grub2}/bin/grub-mkimage -p /boot/grub -O i386-pc -o tmp biosdisk iso9660 help linux linux16 chain png jpeg echo gfxmenu reboot
|
||||
cat ${pkgs.grub2}/lib/grub/*/cdboot.img tmp > $out
|
||||
''; # */
|
||||
SERIAL 0 38400
|
||||
TIMEOUT ${builtins.toString syslinuxTimeout}
|
||||
UI vesamenu.c32
|
||||
MENU TITLE NixOS
|
||||
MENU BACKGROUND /isolinux/background.png
|
||||
DEFAULT boot
|
||||
|
||||
|
||||
# The configuration file for Grub.
|
||||
grubCfg =
|
||||
''
|
||||
set default=${builtins.toString config.boot.loader.grub.default}
|
||||
set timeout=${builtins.toString config.boot.loader.grub.timeout}
|
||||
|
||||
if loadfont /boot/grub/unicode.pf2; then
|
||||
set gfxmode=640x480
|
||||
insmod gfxterm
|
||||
insmod vbe
|
||||
terminal_output gfxterm
|
||||
|
||||
insmod png
|
||||
if background_image /boot/grub/splash.png; then
|
||||
set color_normal=white/black
|
||||
set color_highlight=black/white
|
||||
else
|
||||
set menu_color_normal=cyan/blue
|
||||
set menu_color_highlight=white/blue
|
||||
fi
|
||||
|
||||
fi
|
||||
|
||||
${config.boot.loader.grub.extraEntries}
|
||||
LABEL boot
|
||||
MENU LABEL NixOS ${config.system.nixosVersion} Installer
|
||||
LINUX /boot/bzImage
|
||||
APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}
|
||||
INITRD /boot/initrd
|
||||
'';
|
||||
|
||||
isolinuxMemtest86Entry = ''
|
||||
LABEL memtest
|
||||
MENU LABEL Memtest86+
|
||||
LINUX /boot/memtest.bin
|
||||
APPEND ${toString config.boot.loader.grub.memtest86.params}
|
||||
'';
|
||||
|
||||
isolinuxCfg = baseIsolinuxCfg + (optionalString config.boot.loader.grub.memtest86.enable isolinuxMemtest86Entry);
|
||||
|
||||
# The efi boot image
|
||||
efiDir = pkgs.runCommand "efi-directory" {} ''
|
||||
mkdir -p $out/efi/boot
|
||||
cp -v ${pkgs.gummiboot}/lib/gummiboot/gummiboot${targetArch}.efi $out/efi/boot/boot${targetArch}.efi
|
||||
mkdir -p $out/EFI/boot
|
||||
cp -v ${pkgs.gummiboot}/lib/gummiboot/gummiboot${targetArch}.efi $out/EFI/boot/boot${targetArch}.efi
|
||||
mkdir -p $out/loader/entries
|
||||
echo "title NixOS LiveCD" > $out/loader/entries/nixos-livecd.conf
|
||||
echo "linux /boot/bzImage" >> $out/loader/entries/nixos-livecd.conf
|
||||
echo "initrd /boot/initrd" >> $out/loader/entries/nixos-livecd.conf
|
||||
echo "options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}" >> $out/loader/entries/nixos-livecd.conf
|
||||
echo "default nixos-livecd" > $out/loader/loader.conf
|
||||
echo "timeout 5" >> $out/loader/loader.conf
|
||||
echo "timeout ${builtins.toString config.boot.loader.gummiboot.timeout}" >> $out/loader/loader.conf
|
||||
'';
|
||||
|
||||
efiImg = pkgs.runCommand "efi-image_eltorito" { buildInputs = [ pkgs.mtools ]; }
|
||||
efiImg = pkgs.runCommand "efi-image_eltorito" { buildInputs = [ pkgs.mtools pkgs.libfaketime ]; }
|
||||
# Be careful about determinism: du --apparent-size,
|
||||
# dates (cp -p, touch, mcopy -m, faketime for label), IDs (mkfs.vfat -i)
|
||||
''
|
||||
#Let's hope 15M is enough
|
||||
dd bs=2048 count=7680 if=/dev/zero of="$out"
|
||||
${pkgs.dosfstools}/sbin/mkfs.vfat "$out"
|
||||
mcopy -svi "$out" ${efiDir}/* ::
|
||||
mmd -i "$out" boot
|
||||
mcopy -v -i "$out" \
|
||||
${config.boot.kernelPackages.kernel}/bzImage ::boot/bzImage
|
||||
mcopy -v -i "$out" \
|
||||
${config.system.build.initialRamdisk}/initrd ::boot/initrd
|
||||
mkdir ./contents && cd ./contents
|
||||
cp -rp "${efiDir}"/* .
|
||||
mkdir ./boot
|
||||
cp -p "${config.boot.kernelPackages.kernel}/bzImage" \
|
||||
"${config.system.build.initialRamdisk}/initrd" ./boot/
|
||||
touch --date=@0 ./*
|
||||
|
||||
usage_size=$(du -sb --apparent-size . | tr -cd '[:digit:]')
|
||||
# Make the image 110% as big as the files need to make up for FAT overhead
|
||||
image_size=$(( ($usage_size * 110) / 100 ))
|
||||
# Make the image fit blocks of 1M
|
||||
block_size=$((1024*1024))
|
||||
image_size=$(( ($image_size / $block_size + 1) * $block_size ))
|
||||
echo "Usage size: $usage_size"
|
||||
echo "Image size: $image_size"
|
||||
truncate --size=$image_size "$out"
|
||||
${pkgs.libfaketime}/bin/faketime "2000-01-01 00:00:00" ${pkgs.dosfstools}/sbin/mkfs.vfat -i 12345678 -n EFIBOOT "$out"
|
||||
mcopy -bpsvm -i "$out" ./* ::
|
||||
''; # */
|
||||
|
||||
targetArch = if pkgs.stdenv.isi686 then
|
||||
|
@ -152,10 +175,25 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
isoImage.makeUsbBootable = mkOption {
|
||||
default = false;
|
||||
description = ''
|
||||
Whether the ISO image should be bootable from CD as well as USB.
|
||||
'';
|
||||
};
|
||||
|
||||
isoImage.splashImage = mkOption {
|
||||
default = pkgs.fetchurl {
|
||||
url = https://raw.githubusercontent.com/NixOS/nixos-artwork/5729ab16c6a5793c10a2913b5a1b3f59b91c36ee/ideas/grub-splash/grub-nixos-1.png;
|
||||
sha256 = "43fd8ad5decf6c23c87e9026170a13588c2eba249d9013cb9f888da5e2002217";
|
||||
};
|
||||
description = ''
|
||||
The splash image to use in the bootloader.
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
|
||||
config = {
|
||||
|
||||
boot.loader.grub.version = 2;
|
||||
|
@ -166,7 +204,7 @@ in
|
|||
|
||||
# !!! Hack - attributes expected by other modules.
|
||||
system.boot.loader.kernelFile = "bzImage";
|
||||
environment.systemPackages = [ pkgs.grub2 ];
|
||||
environment.systemPackages = [ pkgs.grub2 pkgs.syslinux ];
|
||||
|
||||
# In stage 1 of the boot, mount the CD as the root FS by label so
|
||||
# that we don't need to know its device. We pass the label of the
|
||||
|
@ -216,7 +254,7 @@ in
|
|||
options = "allow_other,cow,nonempty,chroot=/mnt-root,max_files=32768,hide_meta_files,dirs=/nix/.rw-store=rw:/nix/.ro-store=ro";
|
||||
};
|
||||
|
||||
boot.initrd.availableKernelModules = [ "squashfs" "iso9660" ];
|
||||
boot.initrd.availableKernelModules = [ "squashfs" "iso9660" "usb-storage" ];
|
||||
|
||||
boot.initrd.kernelModules = [ "loop" ];
|
||||
|
||||
|
@ -236,15 +274,12 @@ in
|
|||
# Individual files to be included on the CD, outside of the Nix
|
||||
# store on the CD.
|
||||
isoImage.contents =
|
||||
[ { source = grubImage;
|
||||
target = "/boot/grub/grub_eltorito";
|
||||
}
|
||||
{ source = pkgs.substituteAll {
|
||||
name = "grub.cfg";
|
||||
src = pkgs.writeText "grub.cfg-in" grubCfg;
|
||||
[ { source = pkgs.substituteAll {
|
||||
name = "isolinux.cfg";
|
||||
src = pkgs.writeText "isolinux.cfg-in" isolinuxCfg;
|
||||
bootRoot = "/boot";
|
||||
};
|
||||
target = "/boot/grub/grub.cfg";
|
||||
target = "/isolinux/isolinux.cfg";
|
||||
}
|
||||
{ source = config.boot.kernelPackages.kernel + "/bzImage";
|
||||
target = "/boot/bzImage";
|
||||
|
@ -252,51 +287,44 @@ in
|
|||
{ source = config.system.build.initialRamdisk + "/initrd";
|
||||
target = "/boot/initrd";
|
||||
}
|
||||
{ source = "${pkgs.grub2}/share/grub/unicode.pf2";
|
||||
target = "/boot/grub/unicode.pf2";
|
||||
}
|
||||
{ source = config.boot.loader.grub.splashImage;
|
||||
target = "/boot/grub/splash.png";
|
||||
}
|
||||
{ source = config.system.build.squashfsStore;
|
||||
target = "/nix-store.squashfs";
|
||||
}
|
||||
{ source = "${pkgs.syslinux}/share/syslinux";
|
||||
target = "/isolinux";
|
||||
}
|
||||
{ source = config.isoImage.splashImage;
|
||||
target = "/isolinux/background.png";
|
||||
}
|
||||
] ++ optionals config.isoImage.makeEfiBootable [
|
||||
{ source = efiImg;
|
||||
target = "/boot/efi.img";
|
||||
}
|
||||
{ source = "${efiDir}/efi";
|
||||
target = "/efi";
|
||||
{ source = "${efiDir}/EFI";
|
||||
target = "/EFI";
|
||||
}
|
||||
{ source = "${efiDir}/loader";
|
||||
target = "/loader";
|
||||
}
|
||||
] ++ mapAttrsToList (n: v: { source = v; target = "/boot/${n}"; }) config.boot.loader.grub.extraFiles;
|
||||
|
||||
# The Grub menu.
|
||||
boot.loader.grub.extraEntries =
|
||||
''
|
||||
menuentry "NixOS ${config.system.nixosVersion} Installer" {
|
||||
linux /boot/bzImage init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}
|
||||
initrd /boot/initrd
|
||||
] ++ optionals config.boot.loader.grub.memtest86.enable [
|
||||
{ source = "${pkgs.memtest86plus}/memtest.bin";
|
||||
target = "/boot/memtest.bin";
|
||||
}
|
||||
];
|
||||
|
||||
menuentry "Boot from hard disk" {
|
||||
set root=(hd0)
|
||||
chainloader +1
|
||||
}
|
||||
'';
|
||||
|
||||
boot.loader.grub.timeout = 10;
|
||||
boot.loader.timeout = 10;
|
||||
|
||||
# Create the ISO image.
|
||||
system.build.isoImage = import ../../../lib/make-iso9660-image.nix ({
|
||||
inherit (pkgs) stdenv perl cdrkit pathsFromGraph;
|
||||
inherit (pkgs) stdenv perl pathsFromGraph xorriso syslinux;
|
||||
|
||||
inherit (config.isoImage) isoName compressImage volumeID contents;
|
||||
|
||||
bootable = true;
|
||||
bootImage = "/boot/grub/grub_eltorito";
|
||||
bootImage = "/isolinux/isolinux.bin";
|
||||
} // optionalAttrs config.isoImage.makeUsbBootable {
|
||||
usbBootable = true;
|
||||
isohybridMbrImage = "${pkgs.syslinux}/share/syslinux/isohdpfx.bin";
|
||||
} // optionalAttrs config.isoImage.makeEfiBootable {
|
||||
efiBootable = true;
|
||||
efiBootImage = "boot/efi.img";
|
||||
|
|
|
@ -98,7 +98,7 @@ in
|
|||
|
||||
boot.initrd.extraUtilsCommands =
|
||||
''
|
||||
cp ${pkgs.utillinux}/sbin/hwclock $out/bin
|
||||
copy_bin_and_libs ${pkgs.utillinux}/sbin/hwclock
|
||||
'';
|
||||
|
||||
boot.initrd.postDeviceCommands =
|
||||
|
|
|
@ -28,9 +28,14 @@ chrootCommand=(/run/current-system/sw/bin/bash)
|
|||
while [ "$#" -gt 0 ]; do
|
||||
i="$1"; shift 1
|
||||
case "$i" in
|
||||
-I)
|
||||
given_path="$1"; shift 1
|
||||
extraBuildFlags+=("$i" "$given_path")
|
||||
--max-jobs|-j|--cores|-I)
|
||||
j="$1"; shift 1
|
||||
extraBuildFlags+=("$i" "$j")
|
||||
;;
|
||||
--option)
|
||||
j="$1"; shift 1
|
||||
k="$1"; shift 1
|
||||
extraBuildFlags+=("$i" "$j" "$k")
|
||||
;;
|
||||
--root)
|
||||
mountPoint="$1"; shift 1
|
||||
|
@ -128,7 +133,7 @@ mkdir -m 0755 -p \
|
|||
$mountPoint/nix/var/nix/db \
|
||||
$mountPoint/nix/var/log/nix/drvs
|
||||
|
||||
mkdir -m 1775 -p $mountPoint/nix/store
|
||||
mkdir -m 1735 -p $mountPoint/nix/store
|
||||
chown root:nixbld $mountPoint/nix/store
|
||||
|
||||
|
||||
|
|
|
@ -27,28 +27,38 @@
|
|||
|
||||
ids.uids = {
|
||||
root = 0;
|
||||
nscd = 1;
|
||||
sshd = 2;
|
||||
ntp = 3;
|
||||
#wheel = 1; # unused
|
||||
#kmem = 2; # unused
|
||||
#tty = 3; # unused
|
||||
messagebus = 4; # D-Bus
|
||||
haldaemon = 5;
|
||||
nagios = 6;
|
||||
#disk = 6; # unused
|
||||
vsftpd = 7;
|
||||
ftp = 8;
|
||||
bitlbee = 9;
|
||||
avahi = 10;
|
||||
nagios = 11;
|
||||
atd = 12;
|
||||
zabbix = 13;
|
||||
postfix = 14;
|
||||
postfix = 13;
|
||||
#postdrop = 14; # unused
|
||||
dovecot = 15;
|
||||
tomcat = 16;
|
||||
#audio = 17; # unused
|
||||
#floppy = 18; # unused
|
||||
#uucp = 19; # unused
|
||||
#lp = 20; # unused
|
||||
pulseaudio = 22; # must match `pulseaudio' GID
|
||||
gpsd = 23;
|
||||
#cdrom = 24; # unused
|
||||
#tape = 25; # unused
|
||||
#video = 26; # unused
|
||||
#dialout = 27; # unused
|
||||
polkituser = 28;
|
||||
uptimed = 29;
|
||||
#utmp = 29; # unused
|
||||
ddclient = 30;
|
||||
davfs2 = 31;
|
||||
privoxy = 32;
|
||||
#disnix = 33; # unused
|
||||
osgi = 34;
|
||||
tor = 35;
|
||||
cups = 36;
|
||||
|
@ -70,18 +80,25 @@
|
|||
fprot = 52;
|
||||
bind = 53;
|
||||
wwwrun = 54;
|
||||
#adm = 55; # unused
|
||||
spamd = 56;
|
||||
#networkmanager = 57; # unused
|
||||
nslcd = 58;
|
||||
#scanner = 59; # unused
|
||||
nginx = 60;
|
||||
chrony = 61;
|
||||
#systemd-journal = 62; # unused
|
||||
smtpd = 63;
|
||||
smtpq = 64;
|
||||
supybot = 65;
|
||||
iodined = 66;
|
||||
#libvirtd = 67; # unused
|
||||
graphite = 68;
|
||||
statsd = 69;
|
||||
transmission = 70;
|
||||
postgres = 71;
|
||||
#vboxusers = 72; # unused
|
||||
#vboxsf = 73; # unused
|
||||
smbguest = 74; # unused
|
||||
varnish = 75;
|
||||
datadog = 76;
|
||||
|
@ -102,13 +119,13 @@
|
|||
minidlna = 91;
|
||||
elasticsearch = 92;
|
||||
tcpcryptd = 93; # tcpcryptd uses a hard-coded uid. We patch it in Nixpkgs to match this choice.
|
||||
zope2 = 94;
|
||||
#connman = 94; # unused
|
||||
firebird = 95;
|
||||
redis = 96;
|
||||
#keys = 96; # unused
|
||||
haproxy = 97;
|
||||
mongodb = 98;
|
||||
openldap = 99;
|
||||
memcached = 100;
|
||||
#users = 100; # unused
|
||||
cgminer = 101;
|
||||
munin = 102;
|
||||
logcheck = 103;
|
||||
|
@ -129,6 +146,7 @@
|
|||
foundationdb = 118;
|
||||
newrelic = 119;
|
||||
starbound = 120;
|
||||
#grsecurity = 121; # unused
|
||||
hydra = 122;
|
||||
spiped = 123;
|
||||
teamspeak = 124;
|
||||
|
@ -138,7 +156,7 @@
|
|||
znc = 128;
|
||||
polipo = 129;
|
||||
mopidy = 130;
|
||||
unifi = 131;
|
||||
#docker = 131; # unused
|
||||
gdm = 132;
|
||||
dhcpd = 133;
|
||||
siproxd = 134;
|
||||
|
@ -180,7 +198,21 @@
|
|||
panamax = 170;
|
||||
marathon = 171;
|
||||
exim = 172;
|
||||
#fleet = 173; # unused
|
||||
#input = 174; # unused
|
||||
sddm = 175;
|
||||
tss = 176;
|
||||
memcached = 177;
|
||||
nscd = 178;
|
||||
ntp = 179;
|
||||
zabbix = 180;
|
||||
redis = 181;
|
||||
sshd = 182;
|
||||
unifi = 183;
|
||||
uptimed = 184;
|
||||
zope2 = 185;
|
||||
ripple-data-api = 186;
|
||||
mediatomb = 187;
|
||||
|
||||
# When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399!
|
||||
|
||||
|
@ -200,15 +232,16 @@
|
|||
ftp = 8;
|
||||
bitlbee = 9;
|
||||
avahi = 10;
|
||||
#nagios = 11; # unused
|
||||
atd = 12;
|
||||
postfix = 13;
|
||||
postdrop = 14;
|
||||
dovecot = 15;
|
||||
tomcat = 16;
|
||||
audio = 17;
|
||||
floppy = 18;
|
||||
uucp = 19;
|
||||
lp = 20;
|
||||
tomcat = 21;
|
||||
pulseaudio = 22; # must match `pulseaudio' UID
|
||||
gpsd = 23;
|
||||
cdrom = 24;
|
||||
|
@ -217,21 +250,30 @@
|
|||
dialout = 27;
|
||||
#polkituser = 28; # currently unused, polkitd doesn't need a group
|
||||
utmp = 29;
|
||||
#ddclient = 30; # unused
|
||||
davfs2 = 31;
|
||||
privoxy = 32;
|
||||
disnix = 33;
|
||||
osgi = 34;
|
||||
tor = 35;
|
||||
ghostOne = 40;
|
||||
#cups = 36; # unused
|
||||
#foldingathome = 37; # unused
|
||||
#sabnzd = 38; # unused
|
||||
#kdm = 39; # unused
|
||||
ghostone = 40;
|
||||
git = 41;
|
||||
fourstore = 42;
|
||||
fourstorehttpd = 43;
|
||||
fourstorehttp = 43;
|
||||
virtuoso = 44;
|
||||
#rtkit = 45; # unused
|
||||
dovecot2 = 46;
|
||||
#dovenull = 47; # unused
|
||||
#unbound = 48; # unused
|
||||
prayer = 49;
|
||||
mpd = 50;
|
||||
clamav = 51;
|
||||
fprot = 52;
|
||||
#bind = 53; # unused
|
||||
wwwrun = 54;
|
||||
adm = 55;
|
||||
spamd = 56;
|
||||
|
@ -239,6 +281,7 @@
|
|||
nslcd = 58;
|
||||
scanner = 59;
|
||||
nginx = 60;
|
||||
#chrony = 61; # unused
|
||||
systemd-journal = 62;
|
||||
smtpd = 63;
|
||||
smtpq = 64;
|
||||
|
@ -246,6 +289,7 @@
|
|||
iodined = 66;
|
||||
libvirtd = 67;
|
||||
graphite = 68;
|
||||
#statsd = 69; # unused
|
||||
transmission = 70;
|
||||
postgres = 71;
|
||||
vboxusers = 72;
|
||||
|
@ -268,11 +312,17 @@
|
|||
quassel = 89;
|
||||
amule = 90;
|
||||
minidlna = 91;
|
||||
haproxy = 92;
|
||||
openldap = 93;
|
||||
#elasticsearch = 92; # unused
|
||||
#tcpcryptd = 93; # unused
|
||||
connman = 94;
|
||||
munin = 95;
|
||||
firebird = 95;
|
||||
keys = 96;
|
||||
haproxy = 97;
|
||||
#mongodb = 98; # unused
|
||||
openldap = 99;
|
||||
munin = 102;
|
||||
#logcheck = 103; # unused
|
||||
#nix-ssh = 104; # unused
|
||||
dictd = 105;
|
||||
couchdb = 106;
|
||||
searx = 107;
|
||||
|
@ -280,8 +330,12 @@
|
|||
jenkins = 109;
|
||||
systemd-journal-gateway = 110;
|
||||
notbit = 111;
|
||||
#ngircd = 112; # unused
|
||||
btsync = 113;
|
||||
#minecraft = 114; # unused
|
||||
monetdb = 115;
|
||||
#ripped = 116; # unused
|
||||
#murmur = 117; # unused
|
||||
foundationdb = 118;
|
||||
newrelic = 119;
|
||||
starbound = 120;
|
||||
|
@ -291,39 +345,64 @@
|
|||
teamspeak = 124;
|
||||
influxdb = 125;
|
||||
nsd = 126;
|
||||
firebird = 127;
|
||||
#gitolite = 127; # unused
|
||||
znc = 128;
|
||||
polipo = 129;
|
||||
mopidy = 130;
|
||||
docker = 131;
|
||||
gdm = 132;
|
||||
tss = 133;
|
||||
#dhcpcd = 133; # unused
|
||||
siproxd = 134;
|
||||
mlmmj = 135;
|
||||
#neo4j = 136; # unused
|
||||
riemann = 137;
|
||||
riemanndash = 138;
|
||||
#radvd = 139; # unused
|
||||
#zookeeper = 140; # unused
|
||||
#dnsmasq = 141; # unused
|
||||
uhub = 142;
|
||||
#yandexdisk = 143; # unused
|
||||
#collectd = 144; # unused
|
||||
#consul = 145; # unused
|
||||
mailpile = 146;
|
||||
redmine = 147;
|
||||
seeks = 148;
|
||||
prosody = 149;
|
||||
i2pd = 150;
|
||||
#dnscrypt-proxy = 151; # unused
|
||||
systemd-network = 152;
|
||||
systemd-resolve = 153;
|
||||
systemd-timesync = 154;
|
||||
liquidsoap = 155;
|
||||
#etcd = 156; # unused
|
||||
#docker-registry = 157; # unused
|
||||
hbase = 158;
|
||||
opentsdb = 159;
|
||||
scollector = 160;
|
||||
bosun = 161;
|
||||
kubernetes = 162;
|
||||
#peerflix = 163; # unused
|
||||
#chronos = 164; # unused
|
||||
gitlab = 165;
|
||||
nylon = 168;
|
||||
panamax = 170;
|
||||
#marathon = 171; # unused
|
||||
exim = 172;
|
||||
fleet = 173;
|
||||
input = 174;
|
||||
sddm = 175;
|
||||
tss = 176;
|
||||
#memcached = 177; # unused
|
||||
#nscd = 178; # unused
|
||||
#ntp = 179; # unused
|
||||
#zabbix = 180; # unused
|
||||
#redis = 181; # unused
|
||||
#sshd = 182; # unused
|
||||
#unifi = 183; # unused
|
||||
#uptimed = 184; # unused
|
||||
#zope2 = 185; # unused
|
||||
#ripple-data-api = 186; #unused
|
||||
mediatomb = 187;
|
||||
|
||||
# When adding a gid, make sure it doesn't match an existing
|
||||
# uid. Users and groups with the same name should have equal
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
./hardware/all-firmware.nix
|
||||
./hardware/cpu/amd-microcode.nix
|
||||
./hardware/cpu/intel-microcode.nix
|
||||
./hardware/ksm.nix
|
||||
./hardware/network/b43.nix
|
||||
./hardware/network/intel-2100bg.nix
|
||||
./hardware/network/intel-2200bg.nix
|
||||
|
@ -91,8 +92,9 @@
|
|||
./services/amqp/activemq/default.nix
|
||||
./services/amqp/rabbitmq.nix
|
||||
./services/audio/alsa.nix
|
||||
# Disabled as fuppes it does no longer builds.
|
||||
# Disabled as fuppes no longer builds.
|
||||
# ./services/audio/fuppes.nix
|
||||
./services/audio/icecast.nix
|
||||
./services/audio/liquidsoap.nix
|
||||
./services/audio/mpd.nix
|
||||
./services/audio/mopidy.nix
|
||||
|
@ -109,6 +111,7 @@
|
|||
./services/cluster/panamax.nix
|
||||
./services/computing/torque/server.nix
|
||||
./services/computing/torque/mom.nix
|
||||
./services/computing/slurm/slurm.nix
|
||||
./services/continuous-integration/jenkins/default.nix
|
||||
./services/continuous-integration/jenkins/slave.nix
|
||||
./services/databases/4store-endpoint.nix
|
||||
|
@ -159,6 +162,7 @@
|
|||
./services/hardware/udisks2.nix
|
||||
./services/hardware/upower.nix
|
||||
./services/hardware/thermald.nix
|
||||
./services/logging/fluentd.nix
|
||||
./services/logging/klogd.nix
|
||||
./services/logging/logcheck.nix
|
||||
./services/logging/logrotate.nix
|
||||
|
@ -188,6 +192,7 @@
|
|||
./services/misc/gitlab.nix
|
||||
./services/misc/gitolite.nix
|
||||
./services/misc/gpsd.nix
|
||||
./services/misc/mediatomb.nix
|
||||
./services/misc/mesos-master.nix
|
||||
./services/misc/mesos-slave.nix
|
||||
./services/misc/nix-daemon.nix
|
||||
|
@ -198,6 +203,7 @@
|
|||
./services/misc/phd.nix
|
||||
./services/misc/redmine.nix
|
||||
./services/misc/rippled.nix
|
||||
./services/misc/ripple-data-api.nix
|
||||
./services/misc/rogue.nix
|
||||
./services/misc/siproxd.nix
|
||||
./services/misc/svnserve.nix
|
||||
|
@ -231,6 +237,7 @@
|
|||
./services/network-filesystems/diod.nix
|
||||
./services/network-filesystems/u9fs.nix
|
||||
./services/network-filesystems/yandex-disk.nix
|
||||
./services/networking/aiccu.nix
|
||||
./services/networking/amuled.nix
|
||||
./services/networking/atftpd.nix
|
||||
./services/networking/avahi-daemon.nix
|
||||
|
@ -327,6 +334,7 @@
|
|||
./services/security/fprot.nix
|
||||
./services/security/frandom.nix
|
||||
./services/security/haveged.nix
|
||||
./services/security/munge.nix
|
||||
./services/security/torify.nix
|
||||
./services/security/tor.nix
|
||||
./services/security/torsocks.nix
|
||||
|
|
|
@ -100,7 +100,7 @@ in
|
|||
chgpasswd = { rootOK = true; };
|
||||
};
|
||||
|
||||
security.setuidPrograms = [ "passwd" "chfn" "su" "newgrp"
|
||||
security.setuidPrograms = [ "passwd" "chfn" "su" "sg" "newgrp"
|
||||
"newuidmap" "newgidmap" # new in shadow 4.2.x
|
||||
];
|
||||
|
||||
|
|
|
@ -107,7 +107,6 @@ in zipModules ([]
|
|||
++ obsolete [ "services" "sshd" "permitRootLogin" ] [ "services" "openssh" "permitRootLogin" ]
|
||||
++ obsolete [ "services" "xserver" "startSSHAgent" ] [ "services" "xserver" "startOpenSSHAgent" ]
|
||||
++ obsolete [ "services" "xserver" "startOpenSSHAgent" ] [ "programs" "ssh" "startAgent" ]
|
||||
++ obsolete [ "services" "xserver" "windowManager" "xbmc" ] [ "services" "xserver" "desktopManager" "xbmc" ]
|
||||
|
||||
# VirtualBox
|
||||
++ obsolete [ "services" "virtualbox" "enable" ] [ "services" "virtualboxGuest" "enable" ]
|
||||
|
@ -138,6 +137,10 @@ in zipModules ([]
|
|||
|
||||
++ obsolete [ "environment" "checkConfigurationOptions" ] [ "_module" "check" ]
|
||||
|
||||
# XBMC
|
||||
++ obsolete [ "services" "xserver" "windowManager" "xbmc" ] [ "services" "xserver" "desktopManager" "kodi" ]
|
||||
++ obsolete [ "services" "xserver" "desktopManager" "xbmc" ] [ "services" "xserver" "desktopManager" "kodi" ]
|
||||
|
||||
# Options that are obsolete and have no replacement.
|
||||
++ obsolete' [ "boot" "loader" "grub" "bootDevice" ]
|
||||
++ obsolete' [ "boot" "initrd" "luks" "enable" ]
|
||||
|
|
|
@ -1,43 +1,49 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
inherit (lib) mkIf mkOption types concatMapStrings;
|
||||
cfg = config.security.apparmor;
|
||||
in
|
||||
|
||||
{
|
||||
options = {
|
||||
security.apparmor = {
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "Enable the AppArmor Mandatory Access Control system.";
|
||||
};
|
||||
options = {
|
||||
security.apparmor = {
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "Enable the AppArmor Mandatory Access Control system.";
|
||||
};
|
||||
profiles = mkOption {
|
||||
type = types.listOf types.path;
|
||||
default = [];
|
||||
description = "List of files containing AppArmor profiles.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
profiles = mkOption {
|
||||
type = types.listOf types.path;
|
||||
default = [];
|
||||
description = "List of files containing AppArmor profiles.";
|
||||
};
|
||||
};
|
||||
};
|
||||
config = mkIf cfg.enable {
|
||||
environment.systemPackages = [ pkgs.apparmor-utils ];
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
environment.systemPackages = [ pkgs.apparmor ];
|
||||
systemd.services.apparmor = {
|
||||
wantedBy = [ "local-fs.target" ];
|
||||
path = [ pkgs.apparmor ];
|
||||
systemd.services.apparmor = {
|
||||
wantedBy = [ "local-fs.target" ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = "yes";
|
||||
ExecStart = concatMapStrings (p:
|
||||
''${pkgs.apparmor-parser}/bin/apparmor_parser -rKv -I ${pkgs.apparmor-profiles}/etc/apparmor.d "${p}" ; ''
|
||||
) cfg.profiles;
|
||||
ExecStop = concatMapStrings (p:
|
||||
''${pkgs.apparmor-parser}/bin/apparmor_parser -Rv "${p}" ; ''
|
||||
) cfg.profiles;
|
||||
};
|
||||
};
|
||||
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = "yes";
|
||||
ExecStart = concatMapStrings (profile:
|
||||
''${pkgs.apparmor}/sbin/apparmor_parser -rKv -I ${pkgs.apparmor}/etc/apparmor.d/ "${profile}" ; ''
|
||||
) cfg.profiles;
|
||||
ExecStop = concatMapStrings (profile:
|
||||
''${pkgs.apparmor}/sbin/apparmor_parser -Rv -I ${pkgs.apparmor}/etc/apparmor.d/ "${profile}" ; ''
|
||||
) cfg.profiles;
|
||||
};
|
||||
};
|
||||
};
|
||||
security.pam.services.apparmor.text = ''
|
||||
## AppArmor changes hats according to `order`: first try user, then
|
||||
## group, and finally fall back to a hat called "DEFAULT"
|
||||
##
|
||||
## For now, enable debugging as this is an experimental feature.
|
||||
session optional ${pkgs.apparmor-pam}/lib/security/pam_apparmor.so order=user,group,default debug
|
||||
'';
|
||||
};
|
||||
}
|
||||
|
|
|
@ -38,59 +38,47 @@ in
|
|||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Enable the testing grsecurity patch, based on Linux 3.18.
|
||||
Enable the testing grsecurity patch, based on Linux 3.19.
|
||||
'';
|
||||
};
|
||||
|
||||
config = {
|
||||
mode = mkOption {
|
||||
type = types.str;
|
||||
type = types.enum [ "auto" "custom" ];
|
||||
default = "auto";
|
||||
example = "custom";
|
||||
description = ''
|
||||
grsecurity configuration mode. This specifies whether
|
||||
grsecurity is auto-configured or otherwise completely
|
||||
manually configured. Can either be
|
||||
<literal>custom</literal> or <literal>auto</literal>.
|
||||
|
||||
<literal>auto</literal> is recommended.
|
||||
manually configured.
|
||||
'';
|
||||
};
|
||||
|
||||
priority = mkOption {
|
||||
type = types.str;
|
||||
type = types.enum [ "security" "performance" ];
|
||||
default = "security";
|
||||
example = "performance";
|
||||
description = ''
|
||||
grsecurity configuration priority. This specifies whether
|
||||
the kernel configuration should emphasize speed or
|
||||
security. Can either be <literal>security</literal> or
|
||||
<literal>performance</literal>.
|
||||
security.
|
||||
'';
|
||||
};
|
||||
|
||||
system = mkOption {
|
||||
type = types.str;
|
||||
default = "";
|
||||
example = "desktop";
|
||||
type = types.enum [ "desktop" "server" ];
|
||||
default = "desktop";
|
||||
description = ''
|
||||
grsecurity system configuration. This specifies whether
|
||||
the kernel configuration should be suitable for a Desktop
|
||||
or a Server. Can either be <literal>server</literal> or
|
||||
<literal>desktop</literal>.
|
||||
grsecurity system configuration.
|
||||
'';
|
||||
};
|
||||
|
||||
virtualisationConfig = mkOption {
|
||||
type = types.str;
|
||||
default = "none";
|
||||
example = "host";
|
||||
type = types.nullOr (types.enum [ "host" "guest" ]);
|
||||
default = null;
|
||||
description = ''
|
||||
grsecurity virtualisation configuration. This specifies
|
||||
the virtualisation role of the machine - that is, whether
|
||||
it will be a virtual machine guest, a virtual machine
|
||||
host, or neither. Can be one of <literal>none</literal>,
|
||||
<literal>host</literal>, or <literal>guest</literal>.
|
||||
host, or neither.
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -106,17 +94,10 @@ in
|
|||
};
|
||||
|
||||
virtualisationSoftware = mkOption {
|
||||
type = types.str;
|
||||
default = "";
|
||||
example = "kvm";
|
||||
type = types.nullOr (types.enum [ "kvm" "xen" "vmware" "virtualbox" ]);
|
||||
default = null;
|
||||
description = ''
|
||||
grsecurity virtualisation software. Set this to the
|
||||
specified virtual machine technology if the machine is
|
||||
running as a guest, or a host.
|
||||
|
||||
Can be one of <literal>kvm</literal>,
|
||||
<literal>xen</literal>, <literal>vmware</literal> or
|
||||
<literal>virtualbox</literal>.
|
||||
Configure grsecurity for use with this virtualisation software.
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -245,7 +226,7 @@ in
|
|||
message = ''
|
||||
If grsecurity is enabled, you must select either the
|
||||
stable patch (with kernel 3.14), or the testing patch (with
|
||||
kernel 3.18) to continue.
|
||||
kernel 3.19) to continue.
|
||||
'';
|
||||
}
|
||||
{ assertion = (cfg.stable -> !cfg.testing) || (cfg.testing -> !cfg.stable);
|
||||
|
@ -262,25 +243,13 @@ in
|
|||
&& config.boot.kernelPackages.kernel.features.grsecurity;
|
||||
message = "grsecurity enabled, but kernel doesn't have grsec support";
|
||||
}
|
||||
{ assertion = elem cfg.config.mode [ "auto" "custom" ];
|
||||
message = "grsecurity mode must either be 'auto' or 'custom'.";
|
||||
}
|
||||
{ assertion = cfg.config.mode == "auto" -> elem cfg.config.system [ "desktop" "server" ];
|
||||
message = "when using auto grsec mode, system must be either 'desktop' or 'server'";
|
||||
}
|
||||
{ assertion = cfg.config.mode == "auto" -> elem cfg.config.priority [ "performance" "security" ];
|
||||
message = "when using auto grsec mode, priority must be 'performance' or 'security'.";
|
||||
}
|
||||
{ assertion = cfg.config.mode == "auto" -> elem cfg.config.virtualisationConfig [ "host" "guest" "none" ];
|
||||
message = "when using auto grsec mode, 'virt' must be 'host', 'guest' or 'none'.";
|
||||
}
|
||||
{ assertion = (cfg.config.mode == "auto" && (elem cfg.config.virtualisationConfig [ "host" "guest" ])) ->
|
||||
{ assertion = (cfg.config.mode == "auto" && (cfg.config.virtualisationConfig != null)) ->
|
||||
cfg.config.hardwareVirtualisation != null;
|
||||
message = "when using auto grsec mode with virtualisation, you must specify if your hardware has virtualisation extensions";
|
||||
}
|
||||
{ assertion = (cfg.config.mode == "auto" && (elem cfg.config.virtualisationConfig [ "host" "guest" ])) ->
|
||||
elem cfg.config.virtualisationSoftware [ "kvm" "xen" "virtualbox" "vmware" ];
|
||||
message = "virtualisation software must be 'kvm', 'xen', 'vmware' or 'virtualbox'";
|
||||
{ assertion = (cfg.config.mode == "auto" && (cfg.config.virtualisationConfig != null)) ->
|
||||
cfg.config.virtualisationSoftware != null;
|
||||
message = "grsecurity configured for virtualisation but no virtualisation software specified";
|
||||
}
|
||||
];
|
||||
|
||||
|
|
|
@ -77,7 +77,7 @@ in
|
|||
root ALL=(ALL) SETENV: ALL
|
||||
|
||||
# Users in the "wheel" group can do anything.
|
||||
%wheel ALL=(ALL) ${if cfg.wheelNeedsPassword then "" else "NOPASSWD: ALL, "}SETENV: ALL
|
||||
%wheel ALL=(ALL:ALL) ${if cfg.wheelNeedsPassword then "" else "NOPASSWD: ALL, "}SETENV: ALL
|
||||
${cfg.extraConfig}
|
||||
'';
|
||||
|
||||
|
|
|
@ -0,0 +1,130 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.icecast;
|
||||
configFile = pkgs.writeText "icecast.xml" ''
|
||||
<icecast>
|
||||
<hostname>${cfg.hostname}</hostname>
|
||||
|
||||
<authentication>
|
||||
<admin-user>${cfg.admin.user}</admin-user>
|
||||
<admin-password>${cfg.admin.password}</admin-password>
|
||||
</authentication>
|
||||
|
||||
<paths>
|
||||
<logdir>${cfg.logDir}</logdir>
|
||||
<adminroot>${pkgs.icecast}/share/icecast/admin</adminroot>
|
||||
<webroot>${pkgs.icecast}/share/icecast/web</webroot>
|
||||
<alias source="/" dest="/status.xsl"/>
|
||||
</paths>
|
||||
|
||||
<listen-socket>
|
||||
<port>${toString cfg.listen.port}</port>
|
||||
<bind-address>${cfg.listen.address}</bind-address>
|
||||
</listen-socket>
|
||||
|
||||
<security>
|
||||
<chroot>0</chroot>
|
||||
<changeowner>
|
||||
<user>${cfg.user}</user>
|
||||
<group>${cfg.group}</group>
|
||||
</changeowner>
|
||||
</security>
|
||||
|
||||
${cfg.extraConf}
|
||||
</icecast>
|
||||
'';
|
||||
in {
|
||||
|
||||
###### interface
|
||||
|
||||
options = {
|
||||
|
||||
services.icecast = {
|
||||
|
||||
enable = mkEnableOption "Icecast server";
|
||||
|
||||
hostname = mkOption {
|
||||
type = types.str;
|
||||
description = "DNS name or IP address that will be used for the stream directory lookups or possibily the playlist generation if a Host header is not provided.";
|
||||
default = config.networking.domain;
|
||||
};
|
||||
|
||||
admin = {
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
description = "Username used for all administration functions.";
|
||||
default = "admin";
|
||||
};
|
||||
|
||||
password = mkOption {
|
||||
type = types.str;
|
||||
description = "Password used for all administration functions.";
|
||||
};
|
||||
};
|
||||
|
||||
logDir = mkOption {
|
||||
type = types.path;
|
||||
description = "Base directory used for logging.";
|
||||
default = "/var/log/icecast";
|
||||
};
|
||||
|
||||
listen = {
|
||||
port = mkOption {
|
||||
type = types.int;
|
||||
description = "TCP port that will be used to accept client connections.";
|
||||
default = 8000;
|
||||
};
|
||||
|
||||
address = mkOption {
|
||||
type = types.str;
|
||||
description = "Address Icecast will listen on.";
|
||||
default = "::";
|
||||
};
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
description = "User privileges for the server.";
|
||||
default = "nobody";
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = types.str;
|
||||
description = "Group privileges for the server.";
|
||||
default = "nogroup";
|
||||
};
|
||||
|
||||
extraConf = mkOption {
|
||||
type = types.lines;
|
||||
description = "icecast.xml content.";
|
||||
default = "";
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
|
||||
###### implementation
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
systemd.services.icecast = {
|
||||
after = [ "network.target" ];
|
||||
description = "Icecast Network Audio Streaming Server";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
preStart = "mkdir -p ${cfg.logDir} && chown ${cfg.user}:${cfg.group} ${cfg.logDir}";
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
ExecStart = "${pkgs.icecast}/bin/icecast -c ${configFile}";
|
||||
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
}
|
|
@ -17,10 +17,10 @@ let
|
|||
log_file "syslog"
|
||||
user "${cfg.user}"
|
||||
group "${cfg.group}"
|
||||
${if cfg.network.host != "any" then
|
||||
"bind_to_address ${cfg.network.host}" else ""}
|
||||
${if cfg.network.port != 6600 then
|
||||
"port ${toString cfg.network.port}" else ""}
|
||||
|
||||
${optionalString (cfg.network.host != "any") ''bind_to_address "${cfg.network.host}"''}
|
||||
${optionalString (cfg.network.port != 6600) ''port "${toString cfg.network.port}"''}
|
||||
|
||||
${cfg.extraConfig}
|
||||
'';
|
||||
|
||||
|
@ -125,6 +125,7 @@ in {
|
|||
});
|
||||
|
||||
users.extraGroups = optionalAttrs (cfg.group == "mpd") (singleton {
|
||||
name = "mpd";
|
||||
gid = gid;
|
||||
});
|
||||
};
|
||||
|
|
|
@ -44,6 +44,12 @@ in {
|
|||
type = types.path;
|
||||
};
|
||||
|
||||
dockerCfg = mkOption {
|
||||
description = "Kubernetes contents of dockercfg file.";
|
||||
default = "";
|
||||
type = types.lines;
|
||||
};
|
||||
|
||||
apiserver = {
|
||||
enable = mkOption {
|
||||
description = "Whether to enable kubernetes apiserver.";
|
||||
|
@ -217,13 +223,13 @@ in {
|
|||
};
|
||||
|
||||
machines = mkOption {
|
||||
description = "Kubernetes apiserver list of machines to schedule to schedule onto";
|
||||
description = "Kubernetes controller list of machines to schedule to schedule onto";
|
||||
default = [];
|
||||
type = types.listOf types.str;
|
||||
};
|
||||
|
||||
extraOpts = mkOption {
|
||||
description = "Kubernetes scheduler extra command line options.";
|
||||
description = "Kubernetes controller extra command line options.";
|
||||
default = "";
|
||||
type = types.str;
|
||||
};
|
||||
|
@ -260,6 +266,30 @@ in {
|
|||
type = types.bool;
|
||||
};
|
||||
|
||||
apiServers = mkOption {
|
||||
description = "Kubernetes kubelet list of Kubernetes API servers for publishing events, and reading pods and services.";
|
||||
default = ["${cfg.apiserver.address}:${toString cfg.apiserver.port}"];
|
||||
type = types.listOf types.str;
|
||||
};
|
||||
|
||||
cadvisorPort = mkOption {
|
||||
description = "Kubernetes kubelet local cadvisor port.";
|
||||
default = config.services.cadvisor.port;
|
||||
type = types.int;
|
||||
};
|
||||
|
||||
clusterDns = mkOption {
|
||||
description = "Use alternative dns.";
|
||||
default = "";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
clusterDomain = mkOption {
|
||||
description = "Use alternative domain.";
|
||||
default = "";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
extraOpts = mkOption {
|
||||
description = "Kubernetes kubelet extra command line options.";
|
||||
default = "";
|
||||
|
@ -295,6 +325,7 @@ in {
|
|||
systemd.services.kubernetes-apiserver = {
|
||||
description = "Kubernetes Api Server";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
requires = ["kubernetes-setup.service"];
|
||||
after = [ "network-interfaces.target" "etcd.service" ];
|
||||
serviceConfig = {
|
||||
ExecStart = let
|
||||
|
@ -306,26 +337,25 @@ in {
|
|||
(concatImapStringsSep "\n" (i: v: v + "," + (toString i))
|
||||
(mapAttrsToList (name: token: token + "," + name) cfg.apiserver.tokenAuth));
|
||||
in ''${cfg.package}/bin/kube-apiserver \
|
||||
-etcd_servers=${concatMapStringsSep "," (f: "http://${f}") cfg.etcdServers} \
|
||||
-address=${cfg.apiserver.address} \
|
||||
-port=${toString cfg.apiserver.port} \
|
||||
-read_only_port=${toString cfg.apiserver.readOnlyPort} \
|
||||
-public_address_override=${cfg.apiserver.publicAddress} \
|
||||
-allow_privileged=${if cfg.apiserver.allowPrivileged then "true" else "false"} \
|
||||
--etcd_servers=${concatMapStringsSep "," (f: "http://${f}") cfg.etcdServers} \
|
||||
--address=${cfg.apiserver.address} \
|
||||
--port=${toString cfg.apiserver.port} \
|
||||
--read_only_port=${toString cfg.apiserver.readOnlyPort} \
|
||||
--public_address_override=${cfg.apiserver.publicAddress} \
|
||||
--allow_privileged=${if cfg.apiserver.allowPrivileged then "true" else "false"} \
|
||||
${optionalString (cfg.apiserver.tlsCertFile!="")
|
||||
"-tls_cert_file=${cfg.apiserver.tlsCertFile}"} \
|
||||
"--tls_cert_file=${cfg.apiserver.tlsCertFile}"} \
|
||||
${optionalString (cfg.apiserver.tlsPrivateKeyFile!="")
|
||||
"-tls_private_key_file=${cfg.apiserver.tlsPrivateKeyFile}"} \
|
||||
"--tls_private_key_file=${cfg.apiserver.tlsPrivateKeyFile}"} \
|
||||
${optionalString (cfg.apiserver.tokenAuth!=[])
|
||||
"-token_auth_file=${tokenAuthFile}"} \
|
||||
-authorization_mode=${cfg.apiserver.authorizationMode} \
|
||||
"--token_auth_file=${tokenAuthFile}"} \
|
||||
--authorization_mode=${cfg.apiserver.authorizationMode} \
|
||||
${optionalString (cfg.apiserver.authorizationMode == "ABAC")
|
||||
"-authorization_policy_file=${authorizationPolicyFile}"} \
|
||||
${optionalString (cfg.apiserver.tlsCertFile!="" && cfg.apiserver.tlsCertFile!="")
|
||||
"-secure_port=${toString cfg.apiserver.securePort}"} \
|
||||
-portal_net=${cfg.apiserver.portalNet} \
|
||||
-logtostderr=true \
|
||||
${optionalString cfg.verbose "-v=6 -log_flush_frequency=1s"} \
|
||||
"--authorization_policy_file=${authorizationPolicyFile}"} \
|
||||
--secure_port=${toString cfg.apiserver.securePort} \
|
||||
--portal_net=${cfg.apiserver.portalNet} \
|
||||
--logtostderr=true \
|
||||
${optionalString cfg.verbose "--v=6 --log_flush_frequency=1s"} \
|
||||
${cfg.apiserver.extraOpts}
|
||||
'';
|
||||
User = "kubernetes";
|
||||
|
@ -345,11 +375,11 @@ in {
|
|||
after = [ "network-interfaces.target" "kubernetes-apiserver.service" ];
|
||||
serviceConfig = {
|
||||
ExecStart = ''${cfg.package}/bin/kube-scheduler \
|
||||
-address=${cfg.scheduler.address} \
|
||||
-port=${toString cfg.scheduler.port} \
|
||||
-master=${cfg.scheduler.master} \
|
||||
-logtostderr=true \
|
||||
${optionalString cfg.verbose "-v=6 -log_flush_frequency=1s"} \
|
||||
--address=${cfg.scheduler.address} \
|
||||
--port=${toString cfg.scheduler.port} \
|
||||
--master=${cfg.scheduler.master} \
|
||||
--logtostderr=true \
|
||||
${optionalString cfg.verbose "--v=6 --log_flush_frequency=1s"} \
|
||||
${cfg.scheduler.extraOpts}
|
||||
'';
|
||||
User = "kubernetes";
|
||||
|
@ -364,13 +394,12 @@ in {
|
|||
after = [ "network-interfaces.target" "kubernetes-apiserver.service" ];
|
||||
serviceConfig = {
|
||||
ExecStart = ''${cfg.package}/bin/kube-controller-manager \
|
||||
-address=${cfg.controllerManager.address} \
|
||||
-port=${toString cfg.controllerManager.port} \
|
||||
-master=${cfg.controllerManager.master} \
|
||||
${optionalString (cfg.controllerManager.machines != [])
|
||||
"-machines=${concatStringsSep "," cfg.controllerManager.machines}"} \
|
||||
-logtostderr=true \
|
||||
${optionalString cfg.verbose "-v=6 -log_flush_frequency=1s"} \
|
||||
--address=${cfg.controllerManager.address} \
|
||||
--port=${toString cfg.controllerManager.port} \
|
||||
--master=${cfg.controllerManager.master} \
|
||||
--machines=${concatStringsSep "," cfg.controllerManager.machines} \
|
||||
--logtostderr=true \
|
||||
${optionalString cfg.verbose "--v=6 --log_flush_frequency=1s"} \
|
||||
${cfg.controllerManager.extraOpts}
|
||||
'';
|
||||
User = "kubernetes";
|
||||
|
@ -382,23 +411,28 @@ in {
|
|||
systemd.services.kubernetes-kubelet = {
|
||||
description = "Kubernetes Kubelet Service";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
requires = ["kubernetes-setup.service"];
|
||||
after = [ "network-interfaces.target" "etcd.service" "docker.service" ];
|
||||
serviceConfig = {
|
||||
ExecStart = ''${cfg.package}/bin/kubelet \
|
||||
-etcd_servers=${concatMapStringsSep "," (f: "http://${f}") cfg.etcdServers} \
|
||||
-address=${cfg.kubelet.address} \
|
||||
-port=${toString cfg.kubelet.port} \
|
||||
-hostname_override=${cfg.kubelet.hostname} \
|
||||
-allow_privileged=${if cfg.kubelet.allowPrivileged then "true" else "false"} \
|
||||
-root_dir=${cfg.dataDir} \
|
||||
-logtostderr=true \
|
||||
${optionalString cfg.verbose "-v=6 -log_flush_frequency=1s"} \
|
||||
script = ''
|
||||
export PATH="/bin:/sbin:/usr/bin:/usr/sbin:$PATH"
|
||||
exec ${cfg.package}/bin/kubelet \
|
||||
--etcd_servers=${concatMapStringsSep "," (f: "http://${f}") cfg.etcdServers} \
|
||||
--api_servers=${concatMapStringsSep "," (f: "http://${f}") cfg.kubelet.apiServers} \
|
||||
--address=${cfg.kubelet.address} \
|
||||
--port=${toString cfg.kubelet.port} \
|
||||
--hostname_override=${cfg.kubelet.hostname} \
|
||||
--allow_privileged=${if cfg.kubelet.allowPrivileged then "true" else "false"} \
|
||||
--root_dir=${cfg.dataDir} \
|
||||
--cadvisor_port=${toString cfg.kubelet.cadvisorPort} \
|
||||
${optionalString (cfg.kubelet.clusterDns != "")
|
||||
''--cluster_dns=${cfg.kubelet.clusterDns}''} \
|
||||
${optionalString (cfg.kubelet.clusterDomain != "")
|
||||
''--cluster_domain=${cfg.kubelet.clusterDomain}''} \
|
||||
--logtostderr=true \
|
||||
${optionalString cfg.verbose "--v=6 --log_flush_frequency=1s"} \
|
||||
${cfg.kubelet.extraOpts}
|
||||
'';
|
||||
User = "kubernetes";
|
||||
PermissionsStartOnly = true;
|
||||
WorkingDirectory = cfg.dataDir;
|
||||
};
|
||||
serviceConfig.WorkingDirectory = cfg.dataDir;
|
||||
};
|
||||
})
|
||||
|
||||
|
@ -409,10 +443,10 @@ in {
|
|||
after = [ "network-interfaces.target" "etcd.service" ];
|
||||
serviceConfig = {
|
||||
ExecStart = ''${cfg.package}/bin/kube-proxy \
|
||||
-etcd_servers=${concatMapStringsSep "," (s: "http://${s}") cfg.etcdServers} \
|
||||
-bind_address=${cfg.proxy.address} \
|
||||
-logtostderr=true \
|
||||
${optionalString cfg.verbose "-v=6 -log_flush_frequency=1s"} \
|
||||
--etcd_servers=${concatMapStringsSep "," (s: "http://${s}") cfg.etcdServers} \
|
||||
--bind_address=${cfg.proxy.address} \
|
||||
--logtostderr=true \
|
||||
${optionalString cfg.verbose "--v=6 --log_flush_frequency=1s"} \
|
||||
${cfg.proxy.extraOpts}
|
||||
'';
|
||||
};
|
||||
|
@ -427,6 +461,8 @@ in {
|
|||
|
||||
(mkIf (any (el: el == "node") cfg.roles) {
|
||||
virtualisation.docker.enable = mkDefault true;
|
||||
services.cadvisor.enable = mkDefault true;
|
||||
services.cadvisor.port = mkDefault 4194;
|
||||
services.kubernetes.kubelet.enable = mkDefault true;
|
||||
services.kubernetes.proxy.enable = mkDefault true;
|
||||
})
|
||||
|
@ -442,6 +478,16 @@ in {
|
|||
cfg.kubelet.enable ||
|
||||
cfg.proxy.enable
|
||||
) {
|
||||
systemd.services.kubernetes-setup = {
|
||||
description = "Kubernetes setup.";
|
||||
serviceConfig.Type = "oneshot";
|
||||
script = ''
|
||||
mkdir -p /var/run/kubernetes
|
||||
chown kubernetes /var/run/kubernetes
|
||||
ln -fs ${pkgs.writeText "kubernetes-dockercfg" cfg.dockerCfg} /var/run/kubernetes/.dockercfg
|
||||
'';
|
||||
};
|
||||
|
||||
services.kubernetes.package = mkDefault pkgs.kubernetes;
|
||||
|
||||
environment.systemPackages = [ cfg.package ];
|
||||
|
|
|
@ -124,14 +124,15 @@ in {
|
|||
};
|
||||
|
||||
preStart = ''
|
||||
rm -rf ${cfg.dataDir}/state/tmp
|
||||
mkdir -p ${cfg.dataDir}/ui/state/{log,tmp}
|
||||
chown -R panamax:panamax ${cfg.dataDir}
|
||||
'';
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = "${panamax_ui}/bin/bundle exec rails server --binding 127.0.0.1 --port ${toString cfg.UIPort}";
|
||||
User = "panamax";
|
||||
Group = "panamax";
|
||||
PermissionsStartOnly = true;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -145,6 +146,8 @@ in {
|
|||
|
||||
services.journald.enableHttpGateway = mkDefault true;
|
||||
services.fleet.enable = mkDefault true;
|
||||
services.cadvisor.enable = mkDefault true;
|
||||
services.cadvisor.port = mkDefault 3002;
|
||||
virtualisation.docker.enable = mkDefault true;
|
||||
|
||||
environment.systemPackages = [ panamax_api panamax_ui ];
|
||||
|
|
|
@ -0,0 +1,130 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
|
||||
cfg = config.services.slurm;
|
||||
# configuration file can be generated by http://slurm.schedmd.com/configurator.html
|
||||
configFile = pkgs.writeText "slurm.conf"
|
||||
''
|
||||
${optionalString (cfg.controlMachine != null) ''controlMachine=${cfg.controlMachine}''}
|
||||
${optionalString (cfg.controlAddr != null) ''controlAddr=${cfg.controlAddr}''}
|
||||
${optionalString (cfg.nodeName != null) ''nodeName=${cfg.nodeName}''}
|
||||
${optionalString (cfg.partitionName != null) ''partitionName=${cfg.partitionName}''}
|
||||
${cfg.extraConfig}
|
||||
'';
|
||||
in
|
||||
|
||||
{
|
||||
|
||||
###### interface
|
||||
|
||||
options = {
|
||||
|
||||
services.slurm = {
|
||||
|
||||
server = {
|
||||
enable = mkEnableOption "slurm control daemon";
|
||||
|
||||
};
|
||||
|
||||
client = {
|
||||
enable = mkEnableOption "slurm rlient daemon";
|
||||
|
||||
};
|
||||
|
||||
controlMachine = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
example = null;
|
||||
description = ''
|
||||
The short hostname of the machine where SLURM control functions are
|
||||
executed (i.e. the name returned by the command "hostname -s", use "tux001"
|
||||
rather than "tux001.my.com").
|
||||
'';
|
||||
};
|
||||
|
||||
controlAddr = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = cfg.controlMachine;
|
||||
example = null;
|
||||
description = ''
|
||||
Name that ControlMachine should be referred to in establishing a
|
||||
communications path.
|
||||
'';
|
||||
};
|
||||
|
||||
nodeName = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
example = "linux[1-32] CPUs=1 State=UNKNOWN";
|
||||
description = ''
|
||||
Name that SLURM uses to refer to a node (or base partition for BlueGene
|
||||
systems). Typically this would be the string that "/bin/hostname -s"
|
||||
returns. Note that now you have to write node's parameters after the name.
|
||||
'';
|
||||
};
|
||||
|
||||
partitionName = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
example = "debug Nodes=linux[1-32] Default=YES MaxTime=INFINITE State=UP";
|
||||
description = ''
|
||||
Name by which the partition may be referenced. Note that now you have
|
||||
to write patrition's parameters after the name.
|
||||
'';
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
default = "";
|
||||
type = types.lines;
|
||||
description = ''
|
||||
Extra configuration options that will be added verbatim at
|
||||
the end of the slurm configuration file.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
|
||||
###### implementation
|
||||
|
||||
config = mkIf (cfg.client.enable || cfg.server.enable) {
|
||||
|
||||
environment.systemPackages = [ pkgs.slurm-llnl ];
|
||||
|
||||
systemd.services.slurmd = mkIf (cfg.client.enable) {
|
||||
path = with pkgs; [ slurm-llnl coreutils ];
|
||||
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "systemd-tmpfiles-clean.service" ];
|
||||
|
||||
serviceConfig = {
|
||||
Type = "forking";
|
||||
ExecStart = "${pkgs.slurm-llnl}/bin/slurmd -f ${configFile}";
|
||||
PIDFile = "/run/slurmd.pid";
|
||||
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.slurmctld = mkIf (cfg.server.enable) {
|
||||
path = with pkgs; [ slurm-llnl munge coreutils ];
|
||||
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" "auditd.service" "munged.service" "slurmdbd.service" ];
|
||||
requires = [ "munged.service" ];
|
||||
|
||||
serviceConfig = {
|
||||
Type = "forking";
|
||||
ExecStart = "${pkgs.slurm-llnl}/bin/slurmctld";
|
||||
PIDFile = "/run/slurmctld.pid";
|
||||
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
|
||||
};
|
||||
environment = { SLURM_CONF = "${configFile}"; };
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
}
|
|
@ -8,9 +8,7 @@ let
|
|||
|
||||
mysql = cfg.package;
|
||||
|
||||
is55 = mysql.mysqlVersion == "5.5";
|
||||
|
||||
mysqldDir = if is55 then "${mysql}/bin" else "${mysql}/libexec";
|
||||
atLeast55 = versionAtLeast mysql.mysqlVersion "5.5";
|
||||
|
||||
pidFile = "${cfg.pidDir}/mysqld.pid";
|
||||
|
||||
|
@ -24,7 +22,7 @@ let
|
|||
port = ${toString cfg.port}
|
||||
${optionalString (cfg.replication.role == "master" || cfg.replication.role == "slave") "log-bin=mysql-bin"}
|
||||
${optionalString (cfg.replication.role == "master" || cfg.replication.role == "slave") "server-id = ${toString cfg.replication.serverId}"}
|
||||
${optionalString (cfg.replication.role == "slave" && !is55)
|
||||
${optionalString (cfg.replication.role == "slave" && !atLeast55)
|
||||
''
|
||||
master-host = ${cfg.replication.masterHost}
|
||||
master-user = ${cfg.replication.masterUser}
|
||||
|
@ -75,7 +73,7 @@ in
|
|||
};
|
||||
|
||||
pidDir = mkOption {
|
||||
default = "/var/run/mysql";
|
||||
default = "/run/mysqld";
|
||||
description = "Location of the file which stores the PID of the MySQL server";
|
||||
};
|
||||
|
||||
|
@ -180,15 +178,19 @@ in
|
|||
|
||||
mkdir -m 0700 -p ${cfg.pidDir}
|
||||
chown -R ${cfg.user} ${cfg.pidDir}
|
||||
|
||||
# Make the socket directory
|
||||
mkdir -m 0700 -p /run/mysqld
|
||||
chown -R ${cfg.user} /run/mysqld
|
||||
'';
|
||||
|
||||
serviceConfig.ExecStart = "${mysqldDir}/mysqld --defaults-extra-file=${myCnf} ${mysqldOptions}";
|
||||
serviceConfig.ExecStart = "${mysql}/bin/mysqld --defaults-extra-file=${myCnf} ${mysqldOptions}";
|
||||
|
||||
postStart =
|
||||
''
|
||||
# Wait until the MySQL server is available for use
|
||||
count=0
|
||||
while [ ! -e /tmp/mysql.sock ]
|
||||
while [ ! -e /run/mysqld/mysqld.sock ]
|
||||
do
|
||||
if [ $count -eq 30 ]
|
||||
then
|
||||
|
@ -222,7 +224,7 @@ in
|
|||
fi
|
||||
'') cfg.initialDatabases}
|
||||
|
||||
${optionalString (cfg.replication.role == "slave" && is55)
|
||||
${optionalString (cfg.replication.role == "slave" && atLeast55)
|
||||
''
|
||||
# Set up the replication master
|
||||
|
||||
|
|
|
@ -128,12 +128,12 @@ in
|
|||
users.extraUsers = optionalAttrs (cfg.user == "tss") (singleton
|
||||
{ name = "tss";
|
||||
group = "tss";
|
||||
uid = config.ids.uids.nginx;
|
||||
uid = config.ids.uids.tss;
|
||||
});
|
||||
|
||||
users.extraGroups = optionalAttrs (cfg.group == "tss") (singleton
|
||||
{ name = "tss";
|
||||
gid = config.ids.gids.nginx;
|
||||
gid = config.ids.gids.tss;
|
||||
});
|
||||
};
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ let
|
|||
# Perform substitutions in all udev rules files.
|
||||
udevRules = stdenv.mkDerivation {
|
||||
name = "udev-rules";
|
||||
preferLocalBuild = true;
|
||||
buildCommand = ''
|
||||
mkdir -p $out
|
||||
shopt -s nullglob
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.fluentd;
|
||||
in {
|
||||
###### interface
|
||||
|
||||
options = {
|
||||
|
||||
services.fluentd = {
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "Whether to enable fluentd.";
|
||||
};
|
||||
|
||||
config = mkOption {
|
||||
type = types.lines;
|
||||
default = "";
|
||||
description = "Fluentd config.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
###### implementation
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
systemd.services.fluentd = with pkgs; {
|
||||
description = "Fluentd Daemon";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.fluentd}/bin/fluentd -c ${pkgs.writeText "fluentd.conf" cfg.config}";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,282 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
|
||||
uid = config.ids.uids.mediatomb;
|
||||
gid = config.ids.gids.mediatomb;
|
||||
cfg = config.services.mediatomb;
|
||||
|
||||
mtConf = pkgs.writeText "config.xml" ''
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<config version="2" xmlns="http://mediatomb.cc/config/2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://mediatomb.cc/config/2 http://mediatomb.cc/config/2.xsd">
|
||||
<server>
|
||||
<ui enabled="yes" show-tooltips="yes">
|
||||
<accounts enabled="no" session-timeout="30">
|
||||
<account user="mediatomb" password="mediatomb"/>
|
||||
</accounts>
|
||||
</ui>
|
||||
<name>${cfg.serverName}</name>
|
||||
<udn>uuid:${cfg.uuid}</udn>
|
||||
<home>${cfg.dataDir}</home>
|
||||
<webroot>${pkgs.mediatomb}/share/mediatomb/web</webroot>
|
||||
<storage>
|
||||
<sqlite3 enabled="yes">
|
||||
<database-file>mediatomb.db</database-file>
|
||||
</sqlite3>
|
||||
</storage>
|
||||
<protocolInfo extend="${if cfg.ps3Support then "yes" else "no"}"/>
|
||||
${if cfg.dsmSupport then ''
|
||||
<custom-http-headers>
|
||||
<add header="X-User-Agent: redsonic"/>
|
||||
</custom-http-headers>
|
||||
|
||||
<manufacturerURL>redsonic.com</manufacturerURL>
|
||||
<modelNumber>105</modelNumber>
|
||||
'' else ""}
|
||||
${if cfg.tg100Support then ''
|
||||
<upnp-string-limit>101</upnp-string-limit>
|
||||
'' else ""}
|
||||
<extended-runtime-options>
|
||||
<mark-played-items enabled="yes" suppress-cds-updates="yes">
|
||||
<string mode="prepend">*</string>
|
||||
<mark>
|
||||
<content>video</content>
|
||||
</mark>
|
||||
</mark-played-items>
|
||||
</extended-runtime-options>
|
||||
</server>
|
||||
<import hidden-files="no">
|
||||
<scripting script-charset="UTF-8">
|
||||
<common-script>/nix/store/cngbzn39vidd6jm4wgzxfafqll74ybfa-mediatomb-0.12.1/share/mediatomb/js/common.js</common-script>
|
||||
<playlist-script>/nix/store/cngbzn39vidd6jm4wgzxfafqll74ybfa-mediatomb-0.12.1/share/mediatomb/js/playlists.js</playlist-script>
|
||||
<virtual-layout type="builtin">
|
||||
<import-script>/nix/store/cngbzn39vidd6jm4wgzxfafqll74ybfa-mediatomb-0.12.1/share/mediatomb/js/import.js</import-script>
|
||||
</virtual-layout>
|
||||
</scripting>
|
||||
<mappings>
|
||||
<extension-mimetype ignore-unknown="no">
|
||||
<map from="mp3" to="audio/mpeg"/>
|
||||
<map from="ogx" to="application/ogg"/>
|
||||
<map from="ogv" to="video/ogg"/>
|
||||
<map from="oga" to="audio/ogg"/>
|
||||
<map from="ogg" to="audio/ogg"/>
|
||||
<map from="ogm" to="video/ogg"/>
|
||||
<map from="asf" to="video/x-ms-asf"/>
|
||||
<map from="asx" to="video/x-ms-asf"/>
|
||||
<map from="wma" to="audio/x-ms-wma"/>
|
||||
<map from="wax" to="audio/x-ms-wax"/>
|
||||
<map from="wmv" to="video/x-ms-wmv"/>
|
||||
<map from="wvx" to="video/x-ms-wvx"/>
|
||||
<map from="wm" to="video/x-ms-wm"/>
|
||||
<map from="wmx" to="video/x-ms-wmx"/>
|
||||
<map from="m3u" to="audio/x-mpegurl"/>
|
||||
<map from="pls" to="audio/x-scpls"/>
|
||||
<map from="flv" to="video/x-flv"/>
|
||||
<map from="mkv" to="video/x-matroska"/>
|
||||
<map from="mka" to="audio/x-matroska"/>
|
||||
${if cfg.ps3Support then ''
|
||||
<map from="avi" to="video/divx"/>
|
||||
'' else ""}
|
||||
${if cfg.dsmSupport then ''
|
||||
<map from="avi" to="video/avi"/>
|
||||
'' else ""}
|
||||
</extension-mimetype>
|
||||
<mimetype-upnpclass>
|
||||
<map from="audio/*" to="object.item.audioItem.musicTrack"/>
|
||||
<map from="video/*" to="object.item.videoItem"/>
|
||||
<map from="image/*" to="object.item.imageItem"/>
|
||||
</mimetype-upnpclass>
|
||||
<mimetype-contenttype>
|
||||
<treat mimetype="audio/mpeg" as="mp3"/>
|
||||
<treat mimetype="application/ogg" as="ogg"/>
|
||||
<treat mimetype="audio/ogg" as="ogg"/>
|
||||
<treat mimetype="audio/x-flac" as="flac"/>
|
||||
<treat mimetype="audio/x-ms-wma" as="wma"/>
|
||||
<treat mimetype="audio/x-wavpack" as="wv"/>
|
||||
<treat mimetype="image/jpeg" as="jpg"/>
|
||||
<treat mimetype="audio/x-mpegurl" as="playlist"/>
|
||||
<treat mimetype="audio/x-scpls" as="playlist"/>
|
||||
<treat mimetype="audio/x-wav" as="pcm"/>
|
||||
<treat mimetype="audio/L16" as="pcm"/>
|
||||
<treat mimetype="video/x-msvideo" as="avi"/>
|
||||
<treat mimetype="video/mp4" as="mp4"/>
|
||||
<treat mimetype="audio/mp4" as="mp4"/>
|
||||
<treat mimetype="application/x-iso9660" as="dvd"/>
|
||||
<treat mimetype="application/x-iso9660-image" as="dvd"/>
|
||||
</mimetype-contenttype>
|
||||
</mappings>
|
||||
<online-content>
|
||||
<YouTube enabled="no" refresh="28800" update-at-start="no" purge-after="604800" racy-content="exclude" format="mp4" hd="no">
|
||||
<favorites user="mediatomb"/>
|
||||
<standardfeed feed="most_viewed" time-range="today"/>
|
||||
<playlists user="mediatomb"/>
|
||||
<uploads user="mediatomb"/>
|
||||
<standardfeed feed="recently_featured" time-range="today"/>
|
||||
</YouTube>
|
||||
</online-content>
|
||||
</import>
|
||||
<transcoding enabled="${if cfg.transcoding then "yes" else "no"}">
|
||||
<mimetype-profile-mappings>
|
||||
<transcode mimetype="video/x-flv" using="vlcmpeg"/>
|
||||
<transcode mimetype="application/ogg" using="vlcmpeg"/>
|
||||
<transcode mimetype="application/ogg" using="oggflac2raw"/>
|
||||
<transcode mimetype="audio/x-flac" using="oggflac2raw"/>
|
||||
</mimetype-profile-mappings>
|
||||
<profiles>
|
||||
<profile name="oggflac2raw" enabled="no" type="external">
|
||||
<mimetype>audio/L16</mimetype>
|
||||
<accept-url>no</accept-url>
|
||||
<first-resource>yes</first-resource>
|
||||
<accept-ogg-theora>no</accept-ogg-theora>
|
||||
<agent command="ogg123" arguments="-d raw -o byteorder:big -f %out %in"/>
|
||||
<buffer size="1048576" chunk-size="131072" fill-size="262144"/>
|
||||
</profile>
|
||||
<profile name="vlcmpeg" enabled="no" type="external">
|
||||
<mimetype>video/mpeg</mimetype>
|
||||
<accept-url>yes</accept-url>
|
||||
<first-resource>yes</first-resource>
|
||||
<accept-ogg-theora>yes</accept-ogg-theora>
|
||||
<agent command="vlc" arguments="-I dummy %in --sout #transcode{venc=ffmpeg,vcodec=mp2v,vb=4096,fps=25,aenc=ffmpeg,acodec=mpga,ab=192,samplerate=44100,channels=2}:standard{access=file,mux=ps,dst=%out} vlc:quit"/>
|
||||
<buffer size="14400000" chunk-size="512000" fill-size="120000"/>
|
||||
</profile>
|
||||
</profiles>
|
||||
</transcoding>
|
||||
</config>
|
||||
'';
|
||||
|
||||
in {
|
||||
|
||||
|
||||
###### interface
|
||||
|
||||
options = {
|
||||
|
||||
services.mediatomb = {
|
||||
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to enable the mediatomb DLNA server.
|
||||
'';
|
||||
};
|
||||
|
||||
serverName = mkOption {
|
||||
type = types.string;
|
||||
default = "mediatomb";
|
||||
description = ''
|
||||
How to identify the server on the network.
|
||||
'';
|
||||
};
|
||||
|
||||
ps3Support = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to enable ps3 specific tweaks.
|
||||
WARNING: incompatible with DSM 320 support.
|
||||
'';
|
||||
};
|
||||
|
||||
dsmSupport = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to enable D-Link DSM 320 specific tweaks.
|
||||
WARNING: incompatible with ps3 support.
|
||||
'';
|
||||
};
|
||||
|
||||
tg100Support = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to enable Telegent TG100 specific tweaks.
|
||||
'';
|
||||
};
|
||||
|
||||
transcoding = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to enable transcoding.
|
||||
'';
|
||||
};
|
||||
|
||||
dataDir = mkOption {
|
||||
type = types.path;
|
||||
default = "/var/lib/mediatomb";
|
||||
description = ''
|
||||
The directory where mediatomb stores its state, data, etc.
|
||||
'';
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
default = "mediatomb";
|
||||
description = "User account under which mediatomb runs.";
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
default = "mediatomb";
|
||||
description = "Group account under which mediatomb runs.";
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
default = 49152;
|
||||
description = ''
|
||||
The network port to listen on.
|
||||
'';
|
||||
};
|
||||
|
||||
uuid = mkOption {
|
||||
default = "fdfc8a4e-a3ad-4c1d-b43d-a2eedb03a687";
|
||||
description = ''
|
||||
A unique (on your network) to identify the server by.
|
||||
'';
|
||||
};
|
||||
|
||||
customCfg = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Allow mediatomb to create and use its own config file inside ${cfg.dataDir}.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
###### implementation
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
systemd.services.mediatomb = {
|
||||
description = "MediaTomb media Server";
|
||||
after = [ "local-fs.target" "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
path = [ pkgs.mediatomb ];
|
||||
serviceConfig.ExecStart = "${pkgs.mediatomb}/bin/mediatomb -p ${toString cfg.port} ${if cfg.customCfg then "" else "-c ${mtConf}"} -m ${cfg.dataDir}";
|
||||
serviceConfig.User = "${cfg.user}";
|
||||
};
|
||||
|
||||
users.extraGroups = optionalAttrs (cfg.group == "mediatomb") (singleton {
|
||||
name = "mediatomb";
|
||||
gid = gid;
|
||||
});
|
||||
|
||||
users.extraUsers = optionalAttrs (cfg.user == "mediatomb") (singleton {
|
||||
name = "mediatomb";
|
||||
isSystemUser = true;
|
||||
group = cfg.group;
|
||||
home = "${cfg.dataDir}";
|
||||
createHome = true;
|
||||
description = "Mediatomb DLNA Server User";
|
||||
});
|
||||
|
||||
networking.firewall = {
|
||||
allowedUDPPorts = [ 1900 cfg.port ];
|
||||
allowedTCPPorts = [ cfg.port ];
|
||||
};
|
||||
};
|
||||
}
|
|
@ -379,9 +379,6 @@ in
|
|||
/nix/var/nix/gcroots/per-user \
|
||||
/nix/var/nix/profiles/per-user \
|
||||
/nix/var/nix/gcroots/tmp
|
||||
|
||||
ln -sf /nix/var/nix/profiles /nix/var/nix/gcroots/
|
||||
ln -sf /nix/var/nix/manifests /nix/var/nix/gcroots/
|
||||
'';
|
||||
|
||||
};
|
||||
|
|
|
@ -0,0 +1,168 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.rippleDataApi;
|
||||
|
||||
deployment_env_config = builtins.toJSON {
|
||||
production = {
|
||||
port = toString cfg.port;
|
||||
maxSockets = 150;
|
||||
batchSize = 100;
|
||||
startIndex = 32570;
|
||||
rippleds = cfg.rippleds;
|
||||
redis = {
|
||||
enable = cfg.redis.enable;
|
||||
host = cfg.redis.host;
|
||||
port = cfg.redis.port;
|
||||
options.auth_pass = null;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
db_config = builtins.toJSON {
|
||||
production = {
|
||||
username = optional (cfg.couchdb.pass != "") cfg.couchdb.user;
|
||||
password = optional (cfg.couchdb.pass != "") cfg.couchdb.pass;
|
||||
host = cfg.couchdb.host;
|
||||
port = cfg.couchdb.port;
|
||||
database = cfg.couchdb.db;
|
||||
protocol = "http";
|
||||
};
|
||||
};
|
||||
|
||||
in {
|
||||
options = {
|
||||
services.rippleDataApi = {
|
||||
enable = mkEnableOption "Whether to enable ripple data api.";
|
||||
|
||||
port = mkOption {
|
||||
description = "Ripple data api port";
|
||||
default = 5993;
|
||||
type = types.int;
|
||||
};
|
||||
|
||||
redis = {
|
||||
enable = mkOption {
|
||||
description = "Whether to enable caching of ripple data to redis.";
|
||||
default = true;
|
||||
type = types.bool;
|
||||
};
|
||||
|
||||
host = mkOption {
|
||||
description = "Ripple data api redis host.";
|
||||
default = "localhost";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
description = "Ripple data api redis port.";
|
||||
default = 5984;
|
||||
type = types.int;
|
||||
};
|
||||
};
|
||||
|
||||
couchdb = {
|
||||
host = mkOption {
|
||||
description = "Ripple data api couchdb host.";
|
||||
default = "localhost";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
description = "Ripple data api couchdb port.";
|
||||
default = 5984;
|
||||
type = types.int;
|
||||
};
|
||||
|
||||
db = mkOption {
|
||||
description = "Ripple data api couchdb database.";
|
||||
default = "rippled";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
description = "Ripple data api couchdb username.";
|
||||
default = "rippled";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
pass = mkOption {
|
||||
description = "Ripple data api couchdb password.";
|
||||
default = "";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
create = mkOption {
|
||||
description = "Whether to create couchdb database needed by ripple data api.";
|
||||
type = types.bool;
|
||||
default = true;
|
||||
};
|
||||
};
|
||||
|
||||
rippleds = mkOption {
|
||||
description = "List of rippleds to be used by ripple data api.";
|
||||
default = [
|
||||
"http://s_east.ripple.com:51234"
|
||||
"http://s_west.ripple.com:51234"
|
||||
];
|
||||
type = types.listOf types.str;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf (cfg.enable) {
|
||||
services.couchdb.enable = mkDefault true;
|
||||
services.couchdb.bindAddress = mkDefault "0.0.0.0";
|
||||
services.redis.enable = mkDefault true;
|
||||
|
||||
systemd.services.ripple-data-api = {
|
||||
after = [ "couchdb.service" "redis.service" "ripple-data-api-importer.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
environment = {
|
||||
NODE_ENV = "production";
|
||||
DEPLOYMENT_ENVS_CONFIG = pkgs.writeText "deployment.environment.json" deployment_env_config;
|
||||
DB_CONFIG = pkgs.writeText "db.config.json" db_config;
|
||||
};
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.ripple-data-api}/bin/api";
|
||||
User = "ripple-data-api";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.ripple-data-importer = {
|
||||
after = [ "couchdb.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
path = [ pkgs.curl ];
|
||||
|
||||
environment = {
|
||||
NODE_ENV = "production";
|
||||
DEPLOYMENT_ENVS_CONFIG = pkgs.writeText "deployment.environment.json" deployment_env_config;
|
||||
DB_CONFIG = pkgs.writeText "db.config.json" db_config;
|
||||
LOG_FILE = "/dev/null";
|
||||
};
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.ripple-data-api}/bin/importer live debug2";
|
||||
User = "ripple-data-api";
|
||||
};
|
||||
|
||||
preStart = mkMerge [
|
||||
(mkIf (cfg.couchdb.create) ''
|
||||
HOST="http://${optionalString (cfg.couchdb.pass != "") "${cfg.couchdb.user}:${cfg.couchdb.pass}@"}${cfg.couchdb.host}:${toString cfg.couchdb.port}"
|
||||
curl -X PUT $HOST/${cfg.couchdb.db} || true
|
||||
'')
|
||||
"${pkgs.ripple-data-api}/bin/update-views"
|
||||
];
|
||||
};
|
||||
|
||||
users.extraUsers = singleton
|
||||
{ name = "ripple-data-api";
|
||||
description = "Ripple data api user";
|
||||
uid = config.ids.uids.ripple-data-api;
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,5 +1,3 @@
|
|||
# configuration building is commented out until better tested.
|
||||
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
@ -7,29 +5,189 @@ with lib;
|
|||
let
|
||||
cfg = config.services.rippled;
|
||||
|
||||
rippledStateCfgFile = "/var/lib/rippled/rippled.cfg";
|
||||
b2i = val: if val then "1" else "0";
|
||||
|
||||
dbCfg = db: ''
|
||||
type=${db.type}
|
||||
path=${db.path}
|
||||
${optionalString (db.compression != null) ("compression=${b2i db.compression}") }
|
||||
${optionalString (db.onlineDelete != null) ("online_delete=${toString db.onlineDelete}")}
|
||||
${optionalString (db.advisoryDelete != null) ("advisory_delete=${toString db.advisoryDelete}")}
|
||||
${db.extraOpts}
|
||||
'';
|
||||
|
||||
rippledCfg = ''
|
||||
[server]
|
||||
${concatMapStringsSep "\n" (n: "port_${n}") (attrNames cfg.ports)}
|
||||
|
||||
${concatMapStrings (p: ''
|
||||
[port_${p.name}]
|
||||
ip=${p.ip}
|
||||
port=${toString p.port}
|
||||
protocol=${concatStringsSep "," p.protocol}
|
||||
${optionalString (p.user != "") "user=${p.user}"}
|
||||
${optionalString (p.password != "") "user=${p.password}"}
|
||||
admin=${if p.admin then "allow" else "no"}
|
||||
${optionalString (p.ssl.key != null) "ssl_key=${p.ssl.key}"}
|
||||
${optionalString (p.ssl.cert != null) "ssl_cert=${p.ssl.cert}"}
|
||||
${optionalString (p.ssl.chain != null) "ssl_chain=${p.ssl.chain}"}
|
||||
'') (attrValues cfg.ports)}
|
||||
|
||||
[database_path]
|
||||
${cfg.databasePath}
|
||||
|
||||
[node_db]
|
||||
type=HyperLevelDB
|
||||
path=/var/lib/rippled/db/hyperldb
|
||||
${dbCfg cfg.nodeDb}
|
||||
|
||||
[debug_logfile]
|
||||
/var/log/rippled/debug.log
|
||||
${optionalString (cfg.tempDb != null) ''
|
||||
[temp_db]
|
||||
${dbCfg cfg.tempDb}''}
|
||||
|
||||
''
|
||||
+ optionalString (cfg.peerIp != null) ''
|
||||
[peer_ip]
|
||||
${cfg.peerIp}
|
||||
${optionalString (cfg.importDb != null) ''
|
||||
[import_db]
|
||||
${dbCfg cfg.importDb}''}
|
||||
|
||||
[peer_port]
|
||||
${toString cfg.peerPort}
|
||||
[ips]
|
||||
${concatStringsSep "\n" cfg.ips}
|
||||
|
||||
''
|
||||
+ cfg.extraConfig;
|
||||
[ips_fixed]
|
||||
${concatStringsSep "\n" cfg.ipsFixed}
|
||||
|
||||
[validators]
|
||||
${concatStringsSep "\n" cfg.validators}
|
||||
|
||||
[node_size]
|
||||
${cfg.nodeSize}
|
||||
|
||||
[ledger_history]
|
||||
${toString cfg.ledgerHistory}
|
||||
|
||||
[fetch_depth]
|
||||
${toString cfg.fetchDepth}
|
||||
|
||||
[validation_quorum]
|
||||
${toString cfg.validationQuorum}
|
||||
|
||||
[sntp_servers]
|
||||
${concatStringsSep "\n" cfg.sntpServers}
|
||||
|
||||
[rpc_startup]
|
||||
{ "command": "log_level", "severity": "${cfg.logLevel}" }
|
||||
'' + cfg.extraConfig;
|
||||
|
||||
portOptions = { name, ...}: {
|
||||
options = {
|
||||
name = mkOption {
|
||||
internal = true;
|
||||
default = name;
|
||||
};
|
||||
|
||||
ip = mkOption {
|
||||
default = "127.0.0.1";
|
||||
description = "Ip where rippled listens.";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
description = "Port where rippled listens.";
|
||||
type = types.int;
|
||||
};
|
||||
|
||||
protocol = mkOption {
|
||||
description = "Protocols expose by rippled.";
|
||||
type = types.listOf (types.enum ["http" "https" "ws" "wss" "peer"]);
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
description = "When set, these credentials will be required on HTTP/S requests.";
|
||||
type = types.str;
|
||||
default = "";
|
||||
};
|
||||
|
||||
password = mkOption {
|
||||
description = "When set, these credentials will be required on HTTP/S requests.";
|
||||
type = types.str;
|
||||
default = "";
|
||||
};
|
||||
|
||||
admin = mkOption {
|
||||
description = "Controls whether or not administrative commands are allowed.";
|
||||
type = types.bool;
|
||||
default = false;
|
||||
};
|
||||
|
||||
ssl = {
|
||||
key = mkOption {
|
||||
description = ''
|
||||
Specifies the filename holding the SSL key in PEM format.
|
||||
'';
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
|
||||
cert = mkOption {
|
||||
description = ''
|
||||
Specifies the path to the SSL certificate file in PEM format.
|
||||
This is not needed if the chain includes it.
|
||||
'';
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
|
||||
chain = mkOption {
|
||||
description = ''
|
||||
If you need a certificate chain, specify the path to the
|
||||
certificate chain here. The chain may include the end certificate.
|
||||
'';
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
dbOptions = {
|
||||
type = mkOption {
|
||||
description = "Rippled database type.";
|
||||
type = types.enum ["rocksdb" "nudb" "sqlite"];
|
||||
default = "rocksdb";
|
||||
};
|
||||
|
||||
path = mkOption {
|
||||
description = "Location to store the database.";
|
||||
type = types.path;
|
||||
default = cfg.databasePath;
|
||||
};
|
||||
|
||||
compression = mkOption {
|
||||
description = "Whether to enable snappy compression.";
|
||||
type = types.nullOr types.bool;
|
||||
default = null;
|
||||
};
|
||||
|
||||
onlineDelete = mkOption {
|
||||
description = "Enable automatic purging of older ledger information.";
|
||||
type = types.addCheck (types.nullOr types.int) (v: v > 256);
|
||||
default = cfg.ledgerHistory;
|
||||
};
|
||||
|
||||
advisoryDelete = mkOption {
|
||||
description = ''
|
||||
If set, then require administrative RPC call "can_delete"
|
||||
to enable online deletion of ledger records.
|
||||
'';
|
||||
type = types.nullOr types.bool;
|
||||
default = null;
|
||||
};
|
||||
|
||||
extraOpts = mkOption {
|
||||
description = "Extra database options.";
|
||||
type = types.lines;
|
||||
default = "";
|
||||
};
|
||||
};
|
||||
|
||||
rippledCfgFile = pkgs.writeText "rippled.cfg" rippledCfg;
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
|
@ -37,236 +195,176 @@ in
|
|||
###### interface
|
||||
|
||||
options = {
|
||||
|
||||
services.rippled = {
|
||||
enable = mkEnableOption "Whether to enable rippled";
|
||||
|
||||
enable = mkOption {
|
||||
default = false;
|
||||
description = "Whether to enable rippled";
|
||||
package = mkOption {
|
||||
description = "Which rippled package to use.";
|
||||
type = types.package;
|
||||
default = pkgs.rippled;
|
||||
};
|
||||
|
||||
#
|
||||
# Rippled has a simple configuration file layout that is easy to
|
||||
# build with nix. Many of the options are defined here but are
|
||||
# commented out until the code to append them to the config above
|
||||
# is written and they are tested.
|
||||
#
|
||||
# If you find a yourself implementing more options, please submit a
|
||||
# pull request.
|
||||
#
|
||||
ports = mkOption {
|
||||
description = "Ports exposed by rippled";
|
||||
type = types.attrsOf types.optionSet;
|
||||
options = [portOptions];
|
||||
default = {
|
||||
rpc = {
|
||||
port = 5005;
|
||||
admin = true;
|
||||
protocol = ["http"];
|
||||
};
|
||||
|
||||
peer = {
|
||||
port = 51235;
|
||||
ip = "0.0.0.0";
|
||||
protocol = ["peer"];
|
||||
};
|
||||
|
||||
ws_public = {
|
||||
port = 5006;
|
||||
ip = "0.0.0.0";
|
||||
protocol = ["ws" "wss"];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
nodeDb = mkOption {
|
||||
description = "Rippled main database options.";
|
||||
type = types.nullOr types.optionSet;
|
||||
options = [dbOptions];
|
||||
default = {
|
||||
type = "rocksdb";
|
||||
extraOpts = ''
|
||||
open_files=2000
|
||||
filter_bits=12
|
||||
cache_mb=256
|
||||
file_size_pb=8
|
||||
file_size_mult=2;
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
tempDb = mkOption {
|
||||
description = "Rippled temporary database options.";
|
||||
type = types.nullOr types.optionSet;
|
||||
options = [dbOptions];
|
||||
default = null;
|
||||
};
|
||||
|
||||
importDb = mkOption {
|
||||
description = "Settings for performing a one-time import.";
|
||||
type = types.nullOr types.optionSet;
|
||||
options = [dbOptions];
|
||||
default = null;
|
||||
};
|
||||
|
||||
nodeSize = mkOption {
|
||||
description = ''
|
||||
Rippled size of the node you are running.
|
||||
"tiny", "small", "medium", "large", and "huge"
|
||||
'';
|
||||
type = types.enum ["tiny" "small" "medium" "large" "huge"];
|
||||
default = "small";
|
||||
};
|
||||
|
||||
/*
|
||||
ips = mkOption {
|
||||
default = [ "r.ripple.com 51235" ];
|
||||
example = [ "192.168.0.1" "192.168.0.1 3939" "r.ripple.com 51235" ];
|
||||
description = ''
|
||||
List of hostnames or ips where the Ripple protocol is served.
|
||||
For a starter list, you can either copy entries from:
|
||||
For a starter list, you can either copy entries from:
|
||||
https://ripple.com/ripple.txt or if you prefer you can let it
|
||||
default to r.ripple.com 51235
|
||||
|
||||
A port may optionally be specified after adding a space to the
|
||||
address. By convention, if known, IPs are listed in from most
|
||||
A port may optionally be specified after adding a space to the
|
||||
address. By convention, if known, IPs are listed in from most
|
||||
to least trusted.
|
||||
'';
|
||||
type = types.listOf types.str;
|
||||
default = ["r.ripple.com 51235"];
|
||||
};
|
||||
|
||||
ipsFixed = mkOption {
|
||||
default = null;
|
||||
example = [ "192.168.0.1" "192.168.0.1 3939" "r.ripple.com 51235" ];
|
||||
description = ''
|
||||
List of IP addresses or hostnames to which rippled should always
|
||||
attempt to maintain peer connections with. This is useful for
|
||||
manually forming private networks, for example to configure a
|
||||
validation server that connects to the Ripple network through a
|
||||
List of IP addresses or hostnames to which rippled should always
|
||||
attempt to maintain peer connections with. This is useful for
|
||||
manually forming private networks, for example to configure a
|
||||
validation server that connects to the Ripple network through a
|
||||
public-facing server, or for building a set of cluster peers.
|
||||
|
||||
A port may optionally be specified after adding a space to the address
|
||||
'';
|
||||
};
|
||||
*/
|
||||
|
||||
peerIp = mkOption {
|
||||
default = null;
|
||||
example = "0.0.0.0";
|
||||
description = ''
|
||||
IP address or domain to bind to allow external connections from peers.
|
||||
Defaults to not binding, which disallows external connections from peers.
|
||||
'';
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
};
|
||||
|
||||
peerPort = mkOption {
|
||||
default = 51235;
|
||||
validators = mkOption {
|
||||
description = ''
|
||||
If peerIp is supplied, corresponding port to bind to for peer connections.
|
||||
List of nodes to always accept as validators. Nodes are specified by domain
|
||||
or public key.
|
||||
'';
|
||||
type = types.listOf types.str;
|
||||
default = [
|
||||
"n949f75evCHwgyP4fPVgaHqNHxUVN15PsJEZ3B3HnXPcPjcZAoy7 RL1"
|
||||
"n9MD5h24qrQqiyBC8aeqqCWvpiBiYQ3jxSr91uiDvmrkyHRdYLUj RL2"
|
||||
"n9L81uNCaPgtUJfaHh89gmdvXKAmSt5Gdsw2g1iPWaPkAHW5Nm4C RL3"
|
||||
"n9KiYM9CgngLvtRCQHZwgC2gjpdaZcCcbt3VboxiNFcKuwFVujzS RL4"
|
||||
"n9LdgEtkmGB9E2h3K4Vp7iGUaKuq23Zr32ehxiU8FWY7xoxbWTSA RL5"
|
||||
];
|
||||
};
|
||||
|
||||
/*
|
||||
peerPortProxy = mkOption {
|
||||
type = types.int;
|
||||
example = 51236;
|
||||
databasePath = mkOption {
|
||||
description = ''
|
||||
An optional, additional listening port number for peers. Incoming
|
||||
connections on this port will be required to provide a PROXY Protocol
|
||||
handshake, described in this document (external link):
|
||||
|
||||
http://haproxy.1wt.eu/download/1.5/doc/proxy-protocol.txt
|
||||
|
||||
The PROXY Protocol is a popular method used by elastic load balancing
|
||||
service providers such as Amazon, to identify the true IP address and
|
||||
port number of external incoming connections.
|
||||
|
||||
In addition to enabling this setting, it will also be required to
|
||||
use your provider-specific control panel or administrative web page
|
||||
to configure your server instance to receive PROXY Protocol handshakes,
|
||||
and also to restrict access to your instance to the Elastic Load Balancer.
|
||||
Path to the ripple database.
|
||||
'';
|
||||
type = types.path;
|
||||
default = "/var/lib/rippled/db";
|
||||
};
|
||||
|
||||
peerPrivate = mkOption {
|
||||
default = null;
|
||||
example = 0;
|
||||
validationQuorum = mkOption {
|
||||
description = ''
|
||||
0: Request peers to broadcast your address. Normal outbound peer connections [default]
|
||||
1: Request peers not broadcast your address. Only connect to configured peers.
|
||||
'';
|
||||
};
|
||||
|
||||
peerSslCipherList = mkOption {
|
||||
default = null;
|
||||
example = "ALL:!LOW:!EXP:!MD5:@STRENGTH";
|
||||
description = ''
|
||||
A colon delimited string with the allowed SSL cipher modes for peer. The
|
||||
choices for for ciphers are defined by the OpenSSL API function
|
||||
SSL_CTX_set_cipher_list, documented here (external link):
|
||||
|
||||
http://pic.dhe.ibm.com/infocenter/tpfhelp/current/index.jsp?topic=%2Fcom.ibm.ztpf-ztpfdf.doc_put.cur%2Fgtpc2%2Fcpp_ssl_ctx_set_cipher_list.html
|
||||
|
||||
The default setting of "ALL:!LOW:!EXP:!MD5:@STRENGTH", which allows
|
||||
non-authenticated peer connections (they are, however, secure).
|
||||
'';
|
||||
};
|
||||
|
||||
nodeSeed = mkOption {
|
||||
default = null;
|
||||
example = "RASH BUSH MILK LOOK BAD BRIM AVID GAFF BAIT ROT POD LOVE";
|
||||
description = ''
|
||||
This is used for clustering. To force a particular node seed or key, the
|
||||
key can be set here. The format is the same as the validation_seed field.
|
||||
To obtain a validation seed, use the rippled validation_create command.
|
||||
'';
|
||||
};
|
||||
|
||||
clusterNodes = mkOption {
|
||||
default = null;
|
||||
example = [ "n9KorY8QtTdRx7TVDpwnG9NvyxsDwHUKUEeDLY3AkiGncVaSXZi5" ];
|
||||
description = ''
|
||||
To extend full trust to other nodes, place their node public keys here.
|
||||
Generally, you should only do this for nodes under common administration.
|
||||
Node public keys start with an 'n'. To give a node a name for identification
|
||||
place a space after the public key and then the name.
|
||||
'';
|
||||
};
|
||||
|
||||
sntpServers = mkOption {
|
||||
default = null;
|
||||
example = [ "time.nist.gov" "pool.ntp.org" ];
|
||||
description = ''
|
||||
IP address or domain of NTP servers to use for time synchronization.
|
||||
'';
|
||||
};
|
||||
|
||||
# TODO: websocket options
|
||||
|
||||
rpcAllowRemote = mkOption {
|
||||
default = false;
|
||||
description = ''
|
||||
false: Allow RPC connections only from 127.0.0.1. [default]
|
||||
true: Allow RPC connections from any IP.
|
||||
'';
|
||||
};
|
||||
|
||||
rpcAdminAllow = mkOption {
|
||||
example = [ "10.0.0.4" ];
|
||||
description = ''
|
||||
List of IP addresses allowed to have admin access.
|
||||
'';
|
||||
};
|
||||
|
||||
rpcAdminUser = mkOption {
|
||||
type = types.str;
|
||||
description = ''
|
||||
As a server, require this as the admin user to be specified. Also, require
|
||||
rpc_admin_user and rpc_admin_password to be checked for RPC admin functions.
|
||||
The request must specify these as the admin_user and admin_password in the
|
||||
request object.
|
||||
'';
|
||||
};
|
||||
|
||||
rpcAdminPassword = mkOption {
|
||||
type = types.str;
|
||||
description = ''
|
||||
As a server, require this as the admin pasword to be specified. Also,
|
||||
require rpc_admin_user and rpc_admin_password to be checked for RPC admin
|
||||
functions. The request must specify these as the admin_user and
|
||||
admin_password in the request object.
|
||||
'';
|
||||
};
|
||||
|
||||
rpcIp = mkOption {
|
||||
type = types.str;
|
||||
description = ''
|
||||
IP address or domain to bind to allow insecure RPC connections.
|
||||
Defaults to not binding, which disallows RPC connections.
|
||||
The minimum number of trusted validations a ledger must have before
|
||||
the server considers it fully validated.
|
||||
'';
|
||||
type = types.int;
|
||||
default = 3;
|
||||
};
|
||||
|
||||
rpcPort = mkOption {
|
||||
type = types.int;
|
||||
description = ''
|
||||
If rpcIp is supplied, corresponding port to bind to for peer connections.
|
||||
'';
|
||||
};
|
||||
|
||||
rpcUser = mkOption {
|
||||
type = types.str;
|
||||
ledgerHistory = mkOption {
|
||||
description = ''
|
||||
Require a this user to specified and require rpcPassword to
|
||||
be checked for RPC access via the rpcIp and rpcPort. The user and password
|
||||
must be specified via HTTP's basic authentication method.
|
||||
As a client, supply this to the server via HTTP's basic authentication
|
||||
method.
|
||||
The number of past ledgers to acquire on server startup and the minimum
|
||||
to maintain while running.
|
||||
'';
|
||||
type = types.either types.int (types.enum ["full"]);
|
||||
default = 1296000; # 1 month
|
||||
};
|
||||
|
||||
rpcPassword = mkOption {
|
||||
type = types.str;
|
||||
fetchDepth = mkOption {
|
||||
description = ''
|
||||
Require a this password to specified and require rpc_user to
|
||||
be checked for RPC access via the rpcIp and rpcPort. The user and password
|
||||
must be specified via HTTP's basic authentication method.
|
||||
As a client, supply this to the server via HTTP's basic authentication
|
||||
method.
|
||||
The number of past ledgers to serve to other peers that request historical
|
||||
ledger data (or "full" for no limit).
|
||||
'';
|
||||
type = types.either types.int (types.enum ["full"]);
|
||||
default = "full";
|
||||
};
|
||||
|
||||
rpcStartup = mkOption {
|
||||
example = [ ''"command" : "log_level"'' ''"partition" : "ripplecalc"'' ''"severity" : "trace"'' ];
|
||||
description = "List of RPC commands to run at startup.";
|
||||
};
|
||||
|
||||
rpcSecure = mkOption {
|
||||
default = false;
|
||||
sntpServers = mkOption {
|
||||
description = ''
|
||||
false: Server certificates are not provided for RPC clients using SSL [default]
|
||||
true: Client RPC connections wil be provided with SSL certificates.
|
||||
|
||||
Note that if rpc_secure is enabled, it will also be necessasry to configure the
|
||||
certificate file settings located in rpcSslCert, rpcSslChain, and rpcSslKey
|
||||
IP address or domain of NTP servers to use for time synchronization.;
|
||||
'';
|
||||
type = types.listOf types.str;
|
||||
default = [
|
||||
"time.windows.com"
|
||||
"time.apple.com"
|
||||
"time.nist.gov"
|
||||
"pool.ntp.org"
|
||||
];
|
||||
};
|
||||
|
||||
logLevel = mkOption {
|
||||
description = "Logging verbosity.";
|
||||
type = types.enum ["debug" "error" "info"];
|
||||
default = "error";
|
||||
};
|
||||
*/
|
||||
|
||||
extraConfig = mkOption {
|
||||
default = "";
|
||||
|
@ -275,8 +373,11 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
config = mkOption {
|
||||
internal = true;
|
||||
default = pkgs.writeText "rippled.conf" rippledCfg;
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
@ -288,27 +389,21 @@ in
|
|||
{ name = "rippled";
|
||||
description = "Ripple server user";
|
||||
uid = config.ids.uids.rippled;
|
||||
home = "/var/lib/rippled";
|
||||
home = cfg.databasePath;
|
||||
createHome = true;
|
||||
};
|
||||
|
||||
systemd.services.rippled = {
|
||||
path = [ pkgs.rippled ];
|
||||
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.rippled}/bin/rippled --fg -q --conf ${rippledStateCfgFile}";
|
||||
WorkingDirectory = "/var/lib/rippled";
|
||||
ExecStart = "${cfg.package}/bin/rippled --fg --conf ${cfg.config}";
|
||||
User = "rippled";
|
||||
};
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = mkIf (cfg.peerIp != null) [ cfg.peerPort ];
|
||||
environment.systemPackages = [ cfg.package ];
|
||||
|
||||
system.activationScripts.rippled = ''
|
||||
mkdir -p /var/{lib,log}/rippled
|
||||
chown -R rippled /var/{lib,log}/rippled
|
||||
ln -sf ${rippledCfgFile} ${rippledStateCfgFile}
|
||||
'';
|
||||
};
|
||||
}
|
||||
|
|
|
@ -90,17 +90,9 @@ in {
|
|||
${optionalString cfg.storageDriverSecure "-storage_driver_secure"}
|
||||
''}
|
||||
'';
|
||||
User = "cadvisor";
|
||||
};
|
||||
};
|
||||
|
||||
virtualisation.docker.enable = true;
|
||||
|
||||
users.extraUsers = singleton {
|
||||
name = "cadvisor";
|
||||
uid = config.ids.uids.cadvisor;
|
||||
description = "Cadvisor user";
|
||||
extraGroups = [ "docker" ];
|
||||
};
|
||||
virtualisation.docker.enable = mkDefault true;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ let
|
|||
cap=$(sed -nr 's/.*#%#\s+capabilities\s*=\s*(.+)/\1/p' $file)
|
||||
|
||||
wrapProgram $file \
|
||||
--set PATH "/var/setuid-wrappers:/run/current-system/sw/bin:/run/current-system/sw/sbin" \
|
||||
--set PATH "/var/setuid-wrappers:/run/current-system/sw/bin:/run/current-system/sw/bin" \
|
||||
--set MUNIN_LIBDIR "${pkgs.munin}/lib" \
|
||||
--set MUNIN_PLUGSTATE "/var/run/munin"
|
||||
|
||||
|
@ -194,7 +194,7 @@ in
|
|||
|
||||
mkdir -p /etc/munin/plugins
|
||||
rm -rf /etc/munin/plugins/*
|
||||
PATH="/var/setuid-wrappers:/run/current-system/sw/bin:/run/current-system/sw/sbin" ${pkgs.munin}/sbin/munin-node-configure --shell --families contrib,auto,manual --config ${nodeConf} --libdir=${muninPlugins} --servicedir=/etc/munin/plugins 2>/dev/null | ${pkgs.bash}/bin/bash
|
||||
PATH="/var/setuid-wrappers:/run/current-system/sw/bin:/run/current-system/sw/bin" ${pkgs.munin}/sbin/munin-node-configure --shell --families contrib,auto,manual --config ${nodeConf} --libdir=${muninPlugins} --servicedir=/etc/munin/plugins 2>/dev/null | ${pkgs.bash}/bin/bash
|
||||
'';
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.munin}/sbin/munin-node --config ${nodeConf} --servicedir /etc/munin/plugins/";
|
||||
|
|
|
@ -20,6 +20,10 @@ let
|
|||
cfg.collectors)}
|
||||
'';
|
||||
|
||||
cmdLineOpts = concatStringsSep " " (
|
||||
[ "-h=${cfg.bosunHost}" "-c=${collectors}" ] ++ cfg.extraOpts
|
||||
);
|
||||
|
||||
in {
|
||||
|
||||
options = {
|
||||
|
@ -79,6 +83,15 @@ in {
|
|||
'';
|
||||
};
|
||||
|
||||
extraOpts = mkOption {
|
||||
type = with types; listOf str;
|
||||
default = [];
|
||||
example = [ "-d" ];
|
||||
description = ''
|
||||
Extra scollector command line options
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
@ -95,9 +108,7 @@ in {
|
|||
PermissionsStartOnly = true;
|
||||
User = cfg.user;
|
||||
Group = cfg.group;
|
||||
ExecStart = ''
|
||||
${cfg.package}/bin/scollector -h=${cfg.bosunHost} -c=${collectors}
|
||||
'';
|
||||
ExecStart = "${cfg.package}/bin/scollector ${cmdLineOpts}";
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ let cfg = config.services.drbd; in
|
|||
|
||||
boot.extraModprobeConfig =
|
||||
''
|
||||
options drbd usermode_helper=/run/current-system/sw/sbin/drbdadm
|
||||
options drbd usermode_helper=/run/current-system/sw/bin/drbdadm
|
||||
'';
|
||||
|
||||
environment.etc = singleton
|
||||
|
|
|
@ -0,0 +1,195 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
|
||||
cfg = config.services.aiccu;
|
||||
showBool = b: if b then "true" else "false";
|
||||
notNull = a: ! isNull a;
|
||||
configFile = pkgs.writeText "aiccu.conf" ''
|
||||
${if notNull cfg.username then "username " + cfg.username else ""}
|
||||
${if notNull cfg.password then "password " + cfg.password else ""}
|
||||
protocol ${cfg.protocol}
|
||||
server ${cfg.server}
|
||||
ipv6_interface ${cfg.interfaceName}
|
||||
verbose ${showBool cfg.verbose}
|
||||
daemonize true
|
||||
automatic ${showBool cfg.automatic}
|
||||
requiretls ${showBool cfg.requireTLS}
|
||||
pidfile ${cfg.pidFile}
|
||||
defaultroute ${showBool cfg.defaultRoute}
|
||||
${if notNull cfg.setupScript then cfg.setupScript else ""}
|
||||
makebeats ${showBool cfg.makeHeartBeats}
|
||||
noconfigure ${showBool cfg.noConfigure}
|
||||
behindnat ${showBool cfg.behindNAT}
|
||||
${if cfg.localIPv4Override then "local_ipv4_override" else ""}
|
||||
'';
|
||||
|
||||
in {
|
||||
|
||||
options = {
|
||||
|
||||
services.aiccu = {
|
||||
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
example = true;
|
||||
description = "Enable aiccu IPv6 over IPv4 SiXXs tunnel";
|
||||
};
|
||||
|
||||
username = mkOption {
|
||||
type = with types; nullOr str;
|
||||
default = null;
|
||||
example = "FAB5-SIXXS";
|
||||
description = "Login credential";
|
||||
};
|
||||
|
||||
password = mkOption {
|
||||
type = with types; nullOr str;
|
||||
default = null;
|
||||
example = "TmAkRbBEr0";
|
||||
description = "Login credential";
|
||||
};
|
||||
|
||||
protocol = mkOption {
|
||||
type = types.str;
|
||||
default = "tic";
|
||||
example = "tic|tsp|l2tp";
|
||||
description = "Protocol to use for setting up the tunnel";
|
||||
};
|
||||
|
||||
server = mkOption {
|
||||
type = types.str;
|
||||
default = "tic.sixxs.net";
|
||||
example = "enabled.ipv6server.net";
|
||||
description = "Server to use for setting up the tunnel";
|
||||
};
|
||||
|
||||
interfaceName = mkOption {
|
||||
type = types.str;
|
||||
default = "aiccu";
|
||||
example = "sixxs";
|
||||
description = ''
|
||||
The name of the interface that will be used as a tunnel interface.
|
||||
On *BSD the ipv6_interface should be set to gifX (eg gif0) for proto-41 tunnels
|
||||
or tunX (eg tun0) for AYIYA tunnels.
|
||||
'';
|
||||
};
|
||||
|
||||
tunnelID = mkOption {
|
||||
type = with types; nullOr str;
|
||||
default = null;
|
||||
example = "T12345";
|
||||
description = "The tunnel id to use, only required when there are multiple tunnels in the list";
|
||||
};
|
||||
|
||||
verbose = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
example = true;
|
||||
description = "Be verbose?";
|
||||
};
|
||||
|
||||
automatic = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
example = false;
|
||||
description = "Automatic Login and Tunnel activation";
|
||||
};
|
||||
|
||||
requireTLS = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
example = true;
|
||||
description = ''
|
||||
When set to true, if TLS is not supported on the server
|
||||
the TIC transaction will fail.
|
||||
When set to false, it will try a starttls, when that is
|
||||
not supported it will continue.
|
||||
In any case if AICCU is build with TLS support it will
|
||||
try to do a 'starttls' to the TIC server to see if that
|
||||
is supported.
|
||||
'';
|
||||
};
|
||||
|
||||
pidFile = mkOption {
|
||||
type = types.path;
|
||||
default = "/run/aiccu.pid";
|
||||
example = "/var/lib/aiccu/aiccu.pid";
|
||||
description = "Location of PID File";
|
||||
};
|
||||
|
||||
defaultRoute = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
example = false;
|
||||
description = "Add a default route";
|
||||
};
|
||||
|
||||
setupScript = mkOption {
|
||||
type = with types; nullOr path;
|
||||
default = null;
|
||||
example = "/var/lib/aiccu/fix-subnets.sh";
|
||||
description = "Script to run after setting up the interfaces";
|
||||
};
|
||||
|
||||
makeHeartBeats = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
example = false;
|
||||
description = ''
|
||||
In general you don't want to turn this off
|
||||
Of course only applies to AYIYA and heartbeat tunnels not to static ones
|
||||
'';
|
||||
};
|
||||
|
||||
noConfigure = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
example = true;
|
||||
description = "Don't configure anything";
|
||||
};
|
||||
|
||||
behindNAT = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
example = true;
|
||||
description = "Notify the user that a NAT-kind network is detected";
|
||||
};
|
||||
|
||||
localIPv4Override = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
example = true;
|
||||
description = ''
|
||||
Overrides the IPv4 parameter received from TIC
|
||||
This allows one to configure a NAT into "DMZ" mode and then
|
||||
forwarding the proto-41 packets to an internal host.
|
||||
|
||||
This is only needed for static proto-41 tunnels!
|
||||
AYIYA and heartbeat tunnels don't require this.
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
systemd.services.aiccu = {
|
||||
description = "Automatic IPv6 Connectivity Client Utility";
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.aiccu}/bin/aiccu start ${configFile}";
|
||||
ExecStop = "${pkgs.aiccu}/bin/aiccu stop";
|
||||
Type = "forking";
|
||||
PIDFile = cfg.pidFile;
|
||||
Restart = "no"; # aiccu startup errors are serious, do not pound the tic server or be banned.
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
}
|
|
@ -3,24 +3,22 @@
|
|||
let
|
||||
|
||||
inherit (lib) mkOption mkIf singleton;
|
||||
|
||||
inherit (pkgs) ddclient;
|
||||
|
||||
stateDir = "/var/spool/ddclient";
|
||||
|
||||
ddclientUser = "ddclient";
|
||||
|
||||
ddclientFlags = "-foreground -file ${ddclientCfg}";
|
||||
|
||||
ddclientFlags = "-foreground -verbose -noquiet -file ${ddclientCfg}";
|
||||
ddclientPIDFile = "${stateDir}/ddclient.pid";
|
||||
ddclientCfg = pkgs.writeText "ddclient.conf" ''
|
||||
daemon=600
|
||||
cache=${stateDir}/ddclient.cache
|
||||
pid=${stateDir}/ddclient.pid
|
||||
use=${config.services.ddclient.web}
|
||||
pid=${ddclientPIDFile}
|
||||
use=${config.services.ddclient.use}
|
||||
login=${config.services.ddclient.username}
|
||||
password=${config.services.ddclient.password}
|
||||
protocol=${config.services.ddclient.protocol}
|
||||
server=${config.services.ddclient.server}
|
||||
ssl=${if config.services.ddclient.ssl then "yes" else "yes"}
|
||||
wildcard=YES
|
||||
${config.services.ddclient.domain}
|
||||
${config.services.ddclient.extraConfig}
|
||||
|
@ -34,10 +32,11 @@ in
|
|||
|
||||
options = {
|
||||
|
||||
services.ddclient = {
|
||||
services.ddclient = with lib.types; {
|
||||
|
||||
enable = mkOption {
|
||||
default = false;
|
||||
type = bool;
|
||||
description = ''
|
||||
Whether to synchronise your machine's IP address with a dynamic DNS provider (e.g. dyndns.org).
|
||||
'';
|
||||
|
@ -45,6 +44,7 @@ in
|
|||
|
||||
domain = mkOption {
|
||||
default = "";
|
||||
type = str;
|
||||
description = ''
|
||||
Domain name to synchronize.
|
||||
'';
|
||||
|
@ -52,76 +52,93 @@ in
|
|||
|
||||
username = mkOption {
|
||||
default = "";
|
||||
type = str;
|
||||
description = ''
|
||||
Username.
|
||||
'';
|
||||
};
|
||||
|
||||
password = mkOption {
|
||||
default = "" ;
|
||||
default = "";
|
||||
type = str;
|
||||
description = ''
|
||||
Password.
|
||||
'';
|
||||
};
|
||||
|
||||
protocol = mkOption {
|
||||
default = "dyndns2" ;
|
||||
default = "dyndns2";
|
||||
type = str;
|
||||
description = ''
|
||||
Protocol to use with dynamic DNS provider. (see also, http://sourceforge.net/apps/trac/ddclient/wiki/Protocols)
|
||||
Protocol to use with dynamic DNS provider (see http://sourceforge.net/apps/trac/ddclient/wiki/Protocols).
|
||||
'';
|
||||
};
|
||||
|
||||
server = mkOption {
|
||||
default = "members.dyndns.org" ;
|
||||
default = "";
|
||||
type = str;
|
||||
description = ''
|
||||
Server
|
||||
Server address.
|
||||
'';
|
||||
};
|
||||
|
||||
ssl = mkOption {
|
||||
default = true;
|
||||
type = bool;
|
||||
description = ''
|
||||
Whether to use to use SSL/TLS to connect to dynamic DNS provider.
|
||||
'';
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
default = "" ;
|
||||
default = "";
|
||||
type = str;
|
||||
description = ''
|
||||
Extra configuration. Contents will be added verbatim to the configuration file.
|
||||
'';
|
||||
};
|
||||
|
||||
web = mkOption {
|
||||
default = "web, web=checkip.dyndns.com/, web-skip='Current IP Address: '" ;
|
||||
description = "";
|
||||
use = mkOption {
|
||||
default = "web, web=checkip.dyndns.com/, web-skip='Current IP Address: '";
|
||||
type = str;
|
||||
description = ''
|
||||
Method to determine the IP address to send to the dymanic DNS provider.
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
|
||||
###### implementation
|
||||
|
||||
config = mkIf config.services.ddclient.enable {
|
||||
|
||||
|
||||
environment.systemPackages = [ ddclient ];
|
||||
|
||||
users.extraUsers = singleton
|
||||
{ name = ddclientUser;
|
||||
uid = config.ids.uids.ddclient;
|
||||
description = "ddclient daemon user";
|
||||
home = stateDir;
|
||||
users.extraUsers = singleton {
|
||||
name = ddclientUser;
|
||||
uid = config.ids.uids.ddclient;
|
||||
description = "ddclient daemon user";
|
||||
home = stateDir;
|
||||
};
|
||||
|
||||
systemd.services.ddclient = {
|
||||
description = "Dynamic DNS Client";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
serviceConfig = {
|
||||
# This may change back to forking if too many problems occur:
|
||||
type = "simple";
|
||||
User = ddclientUser;
|
||||
Group = "nogroup"; #TODO get this to work
|
||||
PermissionsStartOnly = "true";
|
||||
PIDFile = ddclientPIDFile;
|
||||
ExecStartPre = ''
|
||||
${pkgs.stdenv.shell} -c "${pkgs.coreutils}/bin/mkdir -m 0755 -p ${stateDir} && ${pkgs.coreutils}/bin/chown ${ddclientUser} ${stateDir}"
|
||||
'';
|
||||
ExecStart = "${ddclient}/bin/ddclient ${ddclientFlags}";
|
||||
#ExecStartPost = "${pkgs.coreutils}/bin/rm -r ${stateDir}"; # Should we have this?
|
||||
};
|
||||
|
||||
jobs.ddclient =
|
||||
{ name = "ddclient";
|
||||
|
||||
startOn = "startup";
|
||||
|
||||
preStart =
|
||||
''
|
||||
mkdir -m 0755 -p ${stateDir}
|
||||
chown ${ddclientUser} ${stateDir}
|
||||
'';
|
||||
|
||||
exec = "${ddclient}/bin/ddclient ${ddclientFlags}";
|
||||
};
|
||||
|
||||
};
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -183,6 +183,9 @@ in {
|
|||
{ source = "${networkmanager_pptp}/etc/NetworkManager/VPN/nm-pptp-service.name";
|
||||
target = "NetworkManager/VPN/nm-pptp-service.name";
|
||||
}
|
||||
{ source = "${networkmanager_l2tp}/etc/NetworkManager/VPN/nm-l2tp-service.name";
|
||||
target = "NetworkManager/VPN/nm-l2tp-service.name";
|
||||
}
|
||||
] ++ optional (cfg.appendNameservers == [] || cfg.insertNameservers == [])
|
||||
{ source = overrideNameserversScript;
|
||||
target = "NetworkManager/dispatcher.d/02overridedns";
|
||||
|
@ -197,6 +200,7 @@ in {
|
|||
networkmanager_vpnc
|
||||
networkmanager_openconnect
|
||||
networkmanager_pptp
|
||||
networkmanager_l2tp
|
||||
modemmanager
|
||||
];
|
||||
|
||||
|
@ -240,6 +244,7 @@ in {
|
|||
networkmanager_vpnc
|
||||
networkmanager_openconnect
|
||||
networkmanager_pptp
|
||||
networkmanager_l2tp
|
||||
modemmanager
|
||||
];
|
||||
|
||||
|
|
|
@ -9,6 +9,14 @@ let
|
|||
stateDir = "/var/lib/nsd";
|
||||
pidFile = stateDir + "/var/nsd.pid";
|
||||
|
||||
nsdPkg = pkgs.nsd.override {
|
||||
bind8Stats = cfg.bind8Stats;
|
||||
ipv6 = cfg.ipv6;
|
||||
ratelimit = cfg.ratelimit.enable;
|
||||
rootServer = cfg.rootServer;
|
||||
zoneStats = length (collect (x: (x.zoneStats or null) != null) cfg.zones) > 0;
|
||||
};
|
||||
|
||||
zoneFiles = pkgs.stdenv.mkDerivation {
|
||||
preferLocalBuild = true;
|
||||
name = "nsd-env";
|
||||
|
@ -107,6 +115,7 @@ let
|
|||
zone:
|
||||
name: "${name}"
|
||||
zonefile: "${stateDir}/zones/${name}"
|
||||
${maybeString "zonestats: " zone.zoneStats}
|
||||
${maybeString "outgoing-interface: " zone.outgoingInterface}
|
||||
${forEach " rrl-whitelist: " zone.rrlWhitelist}
|
||||
|
||||
|
@ -270,6 +279,19 @@ let
|
|||
Use imports or pkgs.lib.readFile if you don't want this data in your config file.
|
||||
'';
|
||||
};
|
||||
|
||||
zoneStats = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
example = "%s";
|
||||
description = ''
|
||||
When set to something distinct to null NSD is able to collect
|
||||
statistics per zone. All statistics of this zone(s) will be added
|
||||
to the group specified by this given name. Use "%s" to use the zones
|
||||
name as the group. The groups are output from nsd-control stats
|
||||
and stats_noreset.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -286,6 +308,15 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
bind8Stats = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
example = true;
|
||||
description = ''
|
||||
Wheter to enable BIND8 like statisics.
|
||||
'';
|
||||
};
|
||||
|
||||
rootServer = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
|
@ -659,13 +690,6 @@ in
|
|||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
# this is not working :(
|
||||
nixpkgs.config.nsd = {
|
||||
ipv6 = cfg.ipv6;
|
||||
ratelimit = cfg.ratelimit.enable;
|
||||
rootServer = cfg.rootServer;
|
||||
};
|
||||
|
||||
users.extraGroups = singleton {
|
||||
name = username;
|
||||
gid = config.ids.gids.nsd;
|
||||
|
@ -688,7 +712,7 @@ in
|
|||
serviceConfig = {
|
||||
PIDFile = pidFile;
|
||||
Restart = "always";
|
||||
ExecStart = "${pkgs.nsd}/sbin/nsd -d -c ${configFile}";
|
||||
ExecStart = "${nsdPkg}/sbin/nsd -d -c ${configFile}";
|
||||
};
|
||||
|
||||
preStart = ''
|
||||
|
|
|
@ -9,12 +9,6 @@ let
|
|||
|
||||
nssModulesPath = config.system.nssModules.path;
|
||||
|
||||
permitRootLoginCheck = v:
|
||||
v == "yes" ||
|
||||
v == "without-password" ||
|
||||
v == "forced-commands-only" ||
|
||||
v == "no";
|
||||
|
||||
knownHosts = map (h: getAttr h cfg.knownHosts) (attrNames cfg.knownHosts);
|
||||
|
||||
knownHostsText = flip (concatMapStringsSep "\n") knownHosts
|
||||
|
@ -116,12 +110,9 @@ in
|
|||
|
||||
permitRootLogin = mkOption {
|
||||
default = "without-password";
|
||||
type = types.addCheck types.str permitRootLoginCheck;
|
||||
type = types.enum ["yes" "without-password" "forced-commands-only" "no"];
|
||||
description = ''
|
||||
Whether the root user can login using ssh. Valid values are
|
||||
<literal>yes</literal>, <literal>without-password</literal>,
|
||||
<literal>forced-commands-only</literal> or
|
||||
<literal>no</literal>.
|
||||
Whether the root user can login using ssh.
|
||||
'';
|
||||
};
|
||||
|
||||
|
|
|
@ -0,0 +1,61 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
|
||||
cfg = config.services.munge;
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
|
||||
###### interface
|
||||
|
||||
options = {
|
||||
|
||||
services.munge = {
|
||||
enable = mkEnableOption "munge service";
|
||||
|
||||
password = mkOption {
|
||||
default = "/etc/munge/munge.key";
|
||||
type = types.string;
|
||||
description = ''
|
||||
The path to a daemon's secret key.
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
###### implementation
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
environment.systemPackages = [ pkgs.munge ];
|
||||
|
||||
systemd.services.munged = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
|
||||
path = [ pkgs.munge pkgs.coreutils ];
|
||||
|
||||
preStart = ''
|
||||
chmod 0700 ${cfg.password}
|
||||
mkdir -p /var/lib/munge -m 0711
|
||||
mkdir -p /var/log/munge -m 0700
|
||||
mkdir -p /run/munge -m 0755
|
||||
'';
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.munge}/bin/munged --syslog --key-file ${cfg.password}";
|
||||
PIDFile = "/run/munge/munged.pid";
|
||||
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
}
|
|
@ -130,6 +130,9 @@ in
|
|||
config.system.path
|
||||
];
|
||||
|
||||
# Don't restart dbus-daemon. Bad things tend to happen if we do.
|
||||
systemd.services.dbus.reloadIfChanged = true;
|
||||
|
||||
environment.pathsToLink = [ "/etc/dbus-1" "/share/dbus-1" ];
|
||||
|
||||
};
|
||||
|
|
|
@ -384,8 +384,7 @@ rec {
|
|||
};
|
||||
|
||||
adminPassword = mkOption {
|
||||
description = "The admin password for accessing owncloud.
|
||||
Warning: this is stored in cleartext in the Nix store!";
|
||||
description = "The admin password for accessing owncloud.";
|
||||
};
|
||||
|
||||
dbType = mkOption {
|
||||
|
@ -571,7 +570,7 @@ rec {
|
|||
|
||||
chown wwwrun:wwwrun ${config.dataDir}/owncloud.log || true
|
||||
|
||||
QUERY="INSERT INTO groups (gid) values('admin'); INSERT INTO users (uid,password) values('${config.adminUser}','`echo -n "${config.adminPassword}" | ${pkgs.openssl}/bin/openssl dgst -sha1 | ${pkgs.gawk}/bin/awk '{print $2}'`'); INSERT INTO group_user (gid,uid) values('admin','${config.adminUser}');"
|
||||
QUERY="INSERT INTO groups (gid) values('admin'); INSERT INTO users (uid,password) values('${config.adminUser}','${builtins.hashString "sha1" config.adminPassword}'); INSERT INTO group_user (gid,uid) values('admin','${config.adminUser}');"
|
||||
${pkgs.sudo}/bin/sudo -u postgres ${pkgs.postgresql}/bin/psql -h "/tmp" -U postgres -d ${config.dbName} -Atw -c "$QUERY" || true
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -102,6 +102,9 @@ in
|
|||
'';
|
||||
serviceConfig = {
|
||||
ExecStart = "${nginx}/bin/nginx -c ${configFile} -p ${cfg.stateDir}";
|
||||
Restart = "on-failure";
|
||||
RestartSec = "10s";
|
||||
StartLimitInterval = "1min";
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ in
|
|||
# E.g., if KDE is enabled, it supersedes xterm.
|
||||
imports = [
|
||||
./none.nix ./xterm.nix ./xfce.nix ./kde4.nix ./kde5.nix
|
||||
./e19.nix ./gnome3.nix ./xbmc.nix ./kodi.nix
|
||||
./e19.nix ./gnome3.nix ./kodi.nix
|
||||
];
|
||||
|
||||
options = {
|
||||
|
|
|
@ -1,31 +0,0 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.xserver.desktopManager.xbmc;
|
||||
in
|
||||
|
||||
{
|
||||
options = {
|
||||
services.xserver.desktopManager.xbmc = {
|
||||
enable = mkOption {
|
||||
default = false;
|
||||
example = true;
|
||||
description = "Enable the xbmc multimedia center.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
services.xserver.desktopManager.session = [{
|
||||
name = "xbmc";
|
||||
start = ''
|
||||
${pkgs.xbmc}/bin/xbmc --lircdev /var/run/lirc/lircd --standalone &
|
||||
waitPID=$!
|
||||
'';
|
||||
}];
|
||||
|
||||
environment.systemPackages = [ pkgs.xbmc ];
|
||||
};
|
||||
}
|
|
@ -55,7 +55,7 @@ let
|
|||
[UserList]
|
||||
minimum-uid=500
|
||||
hidden-users=${concatStringsSep " " dmcfg.hiddenUsers}
|
||||
hidden-shells=/run/current-system/sw/sbin/nologin
|
||||
hidden-shells=/run/current-system/sw/bin/nologin
|
||||
'';
|
||||
|
||||
lightdmConf = writeText "lightdm.conf"
|
||||
|
|
|
@ -26,7 +26,7 @@ let
|
|||
[Users]
|
||||
MaximumUid=${toString config.ids.uids.nixbld}
|
||||
HideUsers=${concatStringsSep "," dmcfg.hiddenUsers}
|
||||
HideShells=/run/current-system/sw/sbin/nologin
|
||||
HideShells=/run/current-system/sw/bin/nologin
|
||||
|
||||
[XDisplay]
|
||||
MinimumVT=${toString xcfg.tty}
|
||||
|
|
|
@ -18,6 +18,7 @@ in
|
|||
./openbox.nix
|
||||
./sawfish.nix
|
||||
./stumpwm.nix
|
||||
./spectrwm.nix
|
||||
./twm.nix
|
||||
./windowmaker.nix
|
||||
./wmii.nix
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.xserver.windowManager.spectrwm;
|
||||
in
|
||||
|
||||
{
|
||||
options = {
|
||||
services.xserver.windowManager.spectrwm = {
|
||||
enable = mkOption {
|
||||
default = false;
|
||||
example = true;
|
||||
description = "Enable the spectrwm window manager.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
services.xserver.windowManager = {
|
||||
session = [{
|
||||
name = "spectrwm";
|
||||
start = ''
|
||||
${pkgs.spectrwm}/bin/spectrwm &
|
||||
waitPID=$!
|
||||
'';
|
||||
}];
|
||||
};
|
||||
environment.systemPackages = [ pkgs.spectrwm ];
|
||||
};
|
||||
}
|
|
@ -384,9 +384,13 @@ system("@systemd@/bin/systemctl", "reset-failed");
|
|||
# Make systemd reload its units.
|
||||
system("@systemd@/bin/systemctl", "daemon-reload") == 0 or $res = 3;
|
||||
|
||||
# Signal dbus to reload its configuration before starting other units.
|
||||
# Other units may rely on newly installed policy files under /etc/dbus-1
|
||||
system("@systemd@/bin/systemctl", "reload-or-restart", "dbus.service");
|
||||
# Reload units that need it. This includes remounting changed mount
|
||||
# units.
|
||||
if (scalar(keys %unitsToReload) > 0) {
|
||||
print STDERR "reloading the following units: ", join(", ", sort(keys %unitsToReload)), "\n";
|
||||
system("@systemd@/bin/systemctl", "reload", "--", sort(keys %unitsToReload)) == 0 or $res = 4;
|
||||
unlink($reloadListFile);
|
||||
}
|
||||
|
||||
# Restart changed services (those that have to be restarted rather
|
||||
# than stopped and started).
|
||||
|
@ -407,14 +411,6 @@ print STDERR "starting the following units: ", join(", ", @unitsToStartFiltered)
|
|||
system("@systemd@/bin/systemctl", "start", "--", sort(keys %unitsToStart)) == 0 or $res = 4;
|
||||
unlink($startListFile);
|
||||
|
||||
# Reload units that need it. This includes remounting changed mount
|
||||
# units.
|
||||
if (scalar(keys %unitsToReload) > 0) {
|
||||
print STDERR "reloading the following units: ", join(", ", sort(keys %unitsToReload)), "\n";
|
||||
system("@systemd@/bin/systemctl", "reload", "--", sort(keys %unitsToReload)) == 0 or $res = 4;
|
||||
unlink($reloadListFile);
|
||||
}
|
||||
|
||||
|
||||
# Print failed and new units.
|
||||
my (@failed, @new, @restarting);
|
||||
|
|
|
@ -405,29 +405,19 @@ in
|
|||
|
||||
# copy the cryptsetup binary and it's dependencies
|
||||
boot.initrd.extraUtilsCommands = ''
|
||||
cp -pdv ${pkgs.cryptsetup}/sbin/cryptsetup $out/bin
|
||||
|
||||
cp -pdv ${pkgs.libgcrypt}/lib/libgcrypt*.so.* $out/lib
|
||||
cp -pdv ${pkgs.libgpgerror}/lib/libgpg-error*.so.* $out/lib
|
||||
cp -pdv ${pkgs.cryptsetup}/lib/libcryptsetup*.so.* $out/lib
|
||||
cp -pdv ${pkgs.popt}/lib/libpopt*.so.* $out/lib
|
||||
copy_bin_and_libs ${pkgs.cryptsetup}/bin/cryptsetup
|
||||
|
||||
${optionalString luks.yubikeySupport ''
|
||||
cp -pdv ${pkgs.ykpers}/bin/ykchalresp $out/bin
|
||||
cp -pdv ${pkgs.ykpers}/bin/ykinfo $out/bin
|
||||
cp -pdv ${pkgs.openssl}/bin/openssl $out/bin
|
||||
copy_bin_and_libs ${pkgs.ykpers}/bin/ykchalresp
|
||||
copy_bin_and_libs ${pkgs.ykpers}/bin/ykinfo
|
||||
copy_bin_and_libs ${pkgs.openssl}/bin/openssl
|
||||
|
||||
cc -O3 -I${pkgs.openssl}/include -L${pkgs.openssl}/lib ${./pbkdf2-sha512.c} -o $out/bin/pbkdf2-sha512 -lcrypto
|
||||
strip -s $out/bin/pbkdf2-sha512
|
||||
cc -O3 -I${pkgs.openssl}/include -L${pkgs.openssl}/lib ${./pbkdf2-sha512.c} -o pbkdf2-sha512 -lcrypto
|
||||
strip -s pbkdf2-sha512
|
||||
copy_bin_and_libs pbkdf2-sha512
|
||||
|
||||
cp -pdv ${pkgs.libusb1}/lib/libusb*.so.* $out/lib
|
||||
cp -pdv ${pkgs.ykpers}/lib/libykpers*.so.* $out/lib
|
||||
cp -pdv ${pkgs.libyubikey}/lib/libyubikey*.so.* $out/lib
|
||||
cp -pdv ${pkgs.openssl}/lib/libssl*.so.* $out/lib
|
||||
cp -pdv ${pkgs.openssl}/lib/libcrypto*.so.* $out/lib
|
||||
|
||||
mkdir -p $out/etc/ssl
|
||||
cp -pdv ${pkgs.openssl}/etc/ssl/openssl.cnf $out/etc/ssl
|
||||
mkdir -p $out/etc/ssl
|
||||
cp -pdv ${pkgs.openssl}/etc/ssl/openssl.cnf $out/etc/ssl
|
||||
|
||||
cat > $out/bin/openssl-wrap <<EOF
|
||||
#!$out/bin/sh
|
||||
|
|
|
@ -39,46 +39,60 @@ let
|
|||
mkdir -p $out/bin $out/lib
|
||||
ln -s $out/bin $out/sbin
|
||||
|
||||
# Copy what we need from Glibc.
|
||||
cp -pv ${pkgs.glibc}/lib/ld*.so.? $out/lib
|
||||
cp -pv ${pkgs.glibc}/lib/libc.so.* $out/lib
|
||||
cp -pv ${pkgs.glibc}/lib/libm.so.* $out/lib
|
||||
cp -pv ${pkgs.glibc}/lib/libpthread.so.* $out/lib
|
||||
cp -pv ${pkgs.glibc}/lib/librt.so.* $out/lib
|
||||
cp -pv ${pkgs.glibc}/lib/libdl.so.* $out/lib
|
||||
cp -pv ${pkgs.gcc.cc}/lib*/libgcc_s.so.* $out/lib
|
||||
copy_bin_and_libs () {
|
||||
[ -f "$out/bin/$(basename $1)" ] && rm "$out/bin/$(basename $1)"
|
||||
cp -pdv $1 $out/bin
|
||||
}
|
||||
|
||||
# Copy BusyBox.
|
||||
cp -pvd ${pkgs.busybox}/bin/* ${pkgs.busybox}/sbin/* $out/bin/
|
||||
for BIN in ${pkgs.busybox}/{s,}bin/*; do
|
||||
copy_bin_and_libs $BIN
|
||||
done
|
||||
|
||||
# Copy some utillinux stuff.
|
||||
cp -vf --remove-destination ${pkgs.utillinux}/sbin/blkid $out/bin
|
||||
cp -pdv ${pkgs.utillinux}/lib/libblkid*.so.* $out/lib
|
||||
cp -pdv ${pkgs.utillinux}/lib/libuuid*.so.* $out/lib
|
||||
copy_bin_and_libs ${pkgs.utillinux}/sbin/blkid
|
||||
|
||||
# Copy dmsetup and lvm.
|
||||
cp -v ${pkgs.lvm2}/sbin/dmsetup $out/bin/dmsetup
|
||||
cp -v ${pkgs.lvm2}/sbin/lvm $out/bin/lvm
|
||||
cp -v ${pkgs.lvm2}/lib/libdevmapper.so.*.* $out/lib
|
||||
cp -v ${pkgs.systemd}/lib/libsystemd.so.* $out/lib
|
||||
copy_bin_and_libs ${pkgs.lvm2}/sbin/dmsetup
|
||||
copy_bin_and_libs ${pkgs.lvm2}/sbin/lvm
|
||||
|
||||
# Add RAID mdadm tool.
|
||||
cp -v ${pkgs.mdadm}/sbin/mdadm $out/bin/mdadm
|
||||
copy_bin_and_libs ${pkgs.mdadm}/sbin/mdadm
|
||||
|
||||
# Copy udev.
|
||||
cp -v ${udev}/lib/systemd/systemd-udevd ${udev}/bin/udevadm $out/bin
|
||||
cp -v ${udev}/lib/udev/*_id $out/bin
|
||||
cp -pdv ${udev}/lib/libudev.so.* $out/lib
|
||||
cp -v ${pkgs.kmod}/lib/libkmod.so.* $out/lib
|
||||
cp -v ${pkgs.acl}/lib/libacl.so.* $out/lib
|
||||
cp -v ${pkgs.attr}/lib/libattr.so.* $out/lib
|
||||
copy_bin_and_libs ${udev}/lib/systemd/systemd-udevd
|
||||
copy_bin_and_libs ${udev}/bin/udevadm
|
||||
for BIN in ${udev}/lib/udev/*_id; do
|
||||
copy_bin_and_libs $BIN
|
||||
done
|
||||
|
||||
# Copy modprobe.
|
||||
cp -v ${pkgs.kmod}/bin/kmod $out/bin/
|
||||
copy_bin_and_libs ${pkgs.kmod}/bin/kmod
|
||||
ln -sf kmod $out/bin/modprobe
|
||||
|
||||
${config.boot.initrd.extraUtilsCommands}
|
||||
|
||||
# Copy ld manually since it isn't detected correctly
|
||||
cp -pv ${pkgs.glibc}/lib/ld*.so.? $out/lib
|
||||
|
||||
# Copy all of the needed libraries for the binaries
|
||||
for BIN in $(find $out/{bin,sbin} -type f); do
|
||||
echo "Copying libs for bin $BIN"
|
||||
LDD="$(ldd $BIN)" || continue
|
||||
LIBS="$(echo "$LDD" | awk '{print $3}' | sed '/^$/d')"
|
||||
for LIB in $LIBS; do
|
||||
[ ! -f "$out/lib/$(basename $LIB)" ] && cp -pdv $LIB $out/lib
|
||||
while [ "$(readlink $LIB)" != "" ]; do
|
||||
LINK="$(readlink $LIB)"
|
||||
if [ "${LINK:0:1}" != "/" ]; then
|
||||
LINK="$(dirname $LIB)/$LINK"
|
||||
fi
|
||||
LIB="$LINK"
|
||||
[ ! -f "$out/lib/$(basename $LIB)" ] && cp -pdv $LIB $out/lib
|
||||
done
|
||||
done
|
||||
done
|
||||
|
||||
# Strip binaries further than normal.
|
||||
chmod -R u+w $out
|
||||
stripDirs "lib bin" "-s"
|
||||
|
@ -100,10 +114,11 @@ let
|
|||
echo "testing patched programs..."
|
||||
$out/bin/ash -c 'echo hello world' | grep "hello world"
|
||||
export LD_LIBRARY_PATH=$out/lib
|
||||
$out/bin/mount --help 2>&1 | grep "BusyBox"
|
||||
$out/bin/mount --help 2>&1 | grep -q "BusyBox"
|
||||
$out/bin/blkid --help 2>&1 | grep -q 'libblkid'
|
||||
$out/bin/udevadm --version
|
||||
$out/bin/dmsetup --version 2>&1 | tee -a log | grep "version:"
|
||||
LVM_SYSTEM_DIR=$out $out/bin/lvm version 2>&1 | tee -a log | grep "LVM"
|
||||
$out/bin/dmsetup --version 2>&1 | tee -a log | grep -q "version:"
|
||||
LVM_SYSTEM_DIR=$out $out/bin/lvm version 2>&1 | tee -a log | grep -q "LVM"
|
||||
$out/bin/mdadm --version
|
||||
|
||||
${config.boot.initrd.extraUtilsCommandsTest}
|
||||
|
@ -205,7 +220,7 @@ let
|
|||
# The closure of the init script of boot stage 1 is what we put in
|
||||
# the initial RAM disk.
|
||||
initialRamdisk = pkgs.makeInitrd {
|
||||
inherit (config.boot.initrd) compressor;
|
||||
inherit (config.boot.initrd) compressor prepend;
|
||||
|
||||
contents =
|
||||
[ { object = bootStage1;
|
||||
|
@ -247,6 +262,14 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
boot.initrd.prepend = mkOption {
|
||||
default = [ ];
|
||||
type = types.listOf types.str;
|
||||
description = ''
|
||||
Other initrd files to prepend to the final initrd we are building.
|
||||
'';
|
||||
};
|
||||
|
||||
boot.initrd.checkJournalingFS = mkOption {
|
||||
default = true;
|
||||
type = types.bool;
|
||||
|
|
|
@ -53,7 +53,7 @@ echo "booting system configuration $systemConfig" > /dev/kmsg
|
|||
# Silence chown/chmod to fail gracefully on a readonly filesystem
|
||||
# like squashfs.
|
||||
chown -f 0:30000 /nix/store
|
||||
chmod -f 1775 /nix/store
|
||||
chmod -f 1735 /nix/store
|
||||
if [ -n "@readOnlyStore@" ]; then
|
||||
if ! readonly-mountpoint /nix/store; then
|
||||
mount --bind /nix/store /nix/store
|
||||
|
|
|
@ -13,7 +13,7 @@ let
|
|||
|
||||
makeUnit = name: unit:
|
||||
let
|
||||
pathSafeName = lib.replaceChars ["@" "\\"] ["-" "-"] name;
|
||||
pathSafeName = lib.replaceChars ["@" ":" "\\"] ["-" "-" "-"] name;
|
||||
in
|
||||
if unit.enable then
|
||||
pkgs.runCommand "unit-${pathSafeName}" { preferLocalBuild = true; inherit (unit) text; }
|
||||
|
|
|
@ -17,13 +17,9 @@ in
|
|||
|
||||
boot.initrd.extraUtilsCommands = mkIf inInitrd
|
||||
''
|
||||
mkdir -p $out/bin
|
||||
cp -v ${pkgs.btrfsProgs}/bin/btrfs $out/bin
|
||||
copy_bin_and_libs ${pkgs.btrfsProgs}/bin/btrfs
|
||||
ln -sv btrfs $out/bin/btrfsck
|
||||
ln -sv btrfsck $out/bin/fsck.btrfs
|
||||
# !!! Increases uncompressed initrd by 240k
|
||||
cp -pv ${pkgs.zlib}/lib/libz.so* $out/lib
|
||||
cp -pv ${pkgs.lzo}/lib/liblzo2.so* $out/lib
|
||||
'';
|
||||
|
||||
boot.initrd.extraUtilsCommandsTest = mkIf inInitrd
|
||||
|
|
|
@ -18,7 +18,7 @@ in
|
|||
|
||||
boot.initrd.extraUtilsCommands = mkIf inInitrd
|
||||
''
|
||||
cp -v ${pkgs.cifs_utils}/sbin/mount.cifs $out/bin
|
||||
copy_bin_and_libs ${pkgs.cifs_utils}/sbin/mount.cifs
|
||||
'';
|
||||
|
||||
};
|
||||
|
|
|
@ -10,12 +10,11 @@
|
|||
boot.initrd.extraUtilsCommands =
|
||||
''
|
||||
# Copy e2fsck and friends.
|
||||
cp -v ${pkgs.e2fsprogs}/sbin/e2fsck $out/bin
|
||||
cp -v ${pkgs.e2fsprogs}/sbin/tune2fs $out/bin
|
||||
copy_bin_and_libs ${pkgs.e2fsprogs}/sbin/e2fsck
|
||||
copy_bin_and_libs ${pkgs.e2fsprogs}/sbin/tune2fs
|
||||
ln -sv e2fsck $out/bin/fsck.ext2
|
||||
ln -sv e2fsck $out/bin/fsck.ext3
|
||||
ln -sv e2fsck $out/bin/fsck.ext4
|
||||
cp -pdv ${pkgs.e2fsprogs}/lib/lib*.so.* $out/lib
|
||||
'';
|
||||
|
||||
};
|
||||
|
|
|
@ -13,9 +13,7 @@ in
|
|||
boot.initrd.availableKernelModules = mkIf inInitrd [ "f2fs" ];
|
||||
|
||||
boot.initrd.extraUtilsCommands = mkIf inInitrd ''
|
||||
mkdir -p $out/bin $out/lib
|
||||
cp -v ${pkgs.f2fs-tools}/sbin/fsck.f2fs $out/bin
|
||||
cp -pdv ${pkgs.f2fs-tools}/lib/lib*.so.* $out/lib
|
||||
copy_bin_and_libs ${pkgs.f2fs-tools}/sbin/fsck.f2fs
|
||||
'';
|
||||
};
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@ in
|
|||
boot.initrd.kernelModules = mkIf inInitrd [ "jfs" ];
|
||||
|
||||
boot.initrd.extraUtilsCommands = mkIf inInitrd ''
|
||||
cp -v ${pkgs.jfsutils}/sbin/fsck.jfs "$out/bin/"
|
||||
copy_bin_and_libs ${pkgs.jfsutils}/sbin/fsck.jfs
|
||||
'';
|
||||
};
|
||||
}
|
||||
|
|
|
@ -17,8 +17,8 @@ in
|
|||
|
||||
boot.initrd.extraUtilsCommands = mkIf inInitrd
|
||||
''
|
||||
cp -v ${pkgs.reiserfsprogs}/sbin/reiserfsck $out/bin
|
||||
ln -sv reiserfsck $out/bin/fsck.reiserfs
|
||||
copy_bin_and_libs ${pkgs.reiserfsprogs}/sbin/reiserfsck
|
||||
ln -s reiserfsck $out/bin/fsck.reiserfs
|
||||
'';
|
||||
|
||||
};
|
||||
|
|
|
@ -7,9 +7,8 @@
|
|||
boot.initrd.kernelModules = [ "fuse" ];
|
||||
|
||||
boot.initrd.extraUtilsCommands = ''
|
||||
cp -v ${pkgs.fuse}/lib/libfuse* $out/lib
|
||||
cp -v ${pkgs.fuse}/sbin/mount.fuse $out/bin
|
||||
cp -v ${pkgs.unionfs-fuse}/bin/unionfs $out/bin
|
||||
copy_bin_and_libs ${pkgs.fuse}/sbin/mount.fuse
|
||||
copy_bin_and_libs ${pkgs.unionfs-fuse}/bin/unionfs
|
||||
substitute ${pkgs.unionfs-fuse}/sbin/mount.unionfs-fuse $out/bin/mount.unionfs-fuse \
|
||||
--replace '${pkgs.bash}/bin/bash' /bin/sh \
|
||||
--replace '${pkgs.fuse}/sbin' /bin \
|
||||
|
|
|
@ -17,7 +17,7 @@ in
|
|||
|
||||
boot.initrd.extraUtilsCommands = mkIf inInitrd
|
||||
''
|
||||
cp -v ${pkgs.dosfstools}/sbin/dosfsck $out/bin
|
||||
copy_bin_and_libs ${pkgs.dosfstools}/sbin/dosfsck
|
||||
ln -sv dosfsck $out/bin/fsck.vfat
|
||||
'';
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ in
|
|||
|
||||
boot.initrd.extraUtilsCommands = mkIf inInitrd
|
||||
''
|
||||
cp -v ${pkgs.xfsprogs}/sbin/fsck.xfs $out/bin
|
||||
copy_bin_and_libs ${pkgs.xfsprogs}/sbin/fsck.xfs
|
||||
'';
|
||||
|
||||
# Trick just to set 'sh' after the extraUtils nuke-refs.
|
||||
|
|
|
@ -55,8 +55,7 @@ in
|
|||
boot.zfs = {
|
||||
useGit = mkOption {
|
||||
type = types.bool;
|
||||
# TODO(wkennington): Revert when 0.6.4 is out
|
||||
default = versionAtLeast config.boot.kernelPackages.kernel.version "3.19";
|
||||
default = false;
|
||||
example = true;
|
||||
description = ''
|
||||
Use the git version of the SPL and ZFS packages.
|
||||
|
@ -204,11 +203,14 @@ in
|
|||
kernelModules = [ "spl" "zfs" ];
|
||||
extraUtilsCommands =
|
||||
''
|
||||
cp -v ${zfsUserPkg}/sbin/zfs $out/bin
|
||||
cp -v ${zfsUserPkg}/sbin/zdb $out/bin
|
||||
cp -v ${zfsUserPkg}/sbin/zpool $out/bin
|
||||
cp -pdv ${zfsUserPkg}/lib/lib*.so* $out/lib
|
||||
cp -pdv ${pkgs.zlib}/lib/lib*.so* $out/lib
|
||||
copy_bin_and_libs ${zfsUserPkg}/sbin/zfs
|
||||
copy_bin_and_libs ${zfsUserPkg}/sbin/zdb
|
||||
copy_bin_and_libs ${zfsUserPkg}/sbin/zpool
|
||||
'';
|
||||
extraUtilsCommandsTest = mkIf inInitrd
|
||||
''
|
||||
$out/bin/zfs --help >/dev/null 2>&1
|
||||
$out/bin/zpool --help >/dev/null 2>&1
|
||||
'';
|
||||
postDeviceCommands = concatStringsSep "\n" ([''
|
||||
ZFS_FORCE="${optionalString cfgZfs.forceImportRoot "-f"}"
|
||||
|
|
|
@ -16,7 +16,7 @@ with lib;
|
|||
Enable sensitivity and speed configuration for trackpoints.
|
||||
'';
|
||||
};
|
||||
|
||||
|
||||
sensitivity = mkOption {
|
||||
default = 128;
|
||||
example = 255;
|
||||
|
@ -44,7 +44,7 @@ with lib;
|
|||
Enable scrolling while holding the middle mouse button.
|
||||
'';
|
||||
};
|
||||
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
@ -70,7 +70,7 @@ with lib;
|
|||
''
|
||||
Section "InputClass"
|
||||
Identifier "Trackpoint Wheel Emulation"
|
||||
MatchProduct "TPPS/2 IBM TrackPoint|DualPoint Stick|Synaptics Inc. Composite TouchPad / TrackPoint|ThinkPad USB Keyboard with TrackPoint|USB Trackpoint pointing device|Composite TouchPad / TrackPoint"
|
||||
MatchProduct "Elantech PS/2 TrackPoint|TPPS/2 IBM TrackPoint|DualPoint Stick|Synaptics Inc. Composite TouchPad / TrackPoint|ThinkPad USB Keyboard with TrackPoint|USB Trackpoint pointing device|Composite TouchPad / TrackPoint"
|
||||
MatchDevicePath "/dev/input/event*"
|
||||
Option "EmulateWheel" "true"
|
||||
Option "EmulateWheelButton" "2"
|
||||
|
|
|
@ -165,7 +165,7 @@ in
|
|||
boot.initrd.extraUtilsCommands =
|
||||
''
|
||||
# We need swapon in the initrd.
|
||||
cp --remove-destination ${pkgs.utillinux}/sbin/swapon $out/bin
|
||||
copy_bin_and_libs ${pkgs.utillinux}/sbin/swapon
|
||||
'';
|
||||
|
||||
# Don't put old configurations in the GRUB menu. The user has no
|
||||
|
|
|
@ -7,6 +7,9 @@ in
|
|||
{
|
||||
imports = [ ../profiles/headless.nix ../profiles/qemu-guest.nix ];
|
||||
|
||||
# https://cloud.google.com/compute/docs/tutorials/building-images
|
||||
networking.firewall.enable = mkDefault false;
|
||||
|
||||
system.build.googleComputeImage =
|
||||
pkgs.vmTools.runInLinuxVM (
|
||||
pkgs.runCommand "google-compute-image"
|
||||
|
@ -95,6 +98,7 @@ in
|
|||
|
||||
boot.kernelParams = [ "console=ttyS0" "panic=1" "boot.panic_on_fail" ];
|
||||
boot.initrd.kernelModules = [ "virtio_scsi" ];
|
||||
boot.kernelModules = [ "virtio_pci" "virtio_net" ];
|
||||
|
||||
# Generate a GRUB menu. Amazon's pv-grub uses this to boot our kernel/initrd.
|
||||
boot.loader.grub.device = "/dev/sda";
|
||||
|
@ -108,6 +112,7 @@ in
|
|||
# at instance creation time.
|
||||
services.openssh.enable = true;
|
||||
services.openssh.permitRootLogin = "without-password";
|
||||
services.openssh.passwordAuthentication = mkDefault false;
|
||||
|
||||
# Force getting the hostname from Google Compute.
|
||||
networking.hostName = mkDefault "";
|
||||
|
@ -178,5 +183,79 @@ in
|
|||
serviceConfig.RemainAfterExit = true;
|
||||
serviceConfig.StandardError = "journal+console";
|
||||
serviceConfig.StandardOutput = "journal+console";
|
||||
};
|
||||
};
|
||||
|
||||
# Setings taken from https://cloud.google.com/compute/docs/tutorials/building-images#providedkernel
|
||||
boot.kernel.sysctl = {
|
||||
# enables syn flood protection
|
||||
"net.ipv4.tcp_syncookies" = mkDefault "1";
|
||||
|
||||
# ignores source-routed packets
|
||||
"net.ipv4.conf.all.accept_source_route" = mkDefault "0";
|
||||
|
||||
# ignores source-routed packets
|
||||
"net.ipv4.conf.default.accept_source_route" = mkDefault "0";
|
||||
|
||||
# ignores ICMP redirects
|
||||
"net.ipv4.conf.all.accept_redirects" = mkDefault "0";
|
||||
|
||||
# ignores ICMP redirects
|
||||
"net.ipv4.conf.default.accept_redirects" = mkDefault "0";
|
||||
|
||||
# ignores ICMP redirects from non-GW hosts
|
||||
"net.ipv4.conf.all.secure_redirects" = mkDefault "1";
|
||||
|
||||
# ignores ICMP redirects from non-GW hosts
|
||||
"net.ipv4.conf.default.secure_redirects" = mkDefault "1";
|
||||
|
||||
# don't allow traffic between networks or act as a router
|
||||
"net.ipv4.ip_forward" = mkDefault "0";
|
||||
|
||||
# don't allow traffic between networks or act as a router
|
||||
"net.ipv4.conf.all.send_redirects" = mkDefault "0";
|
||||
|
||||
# don't allow traffic between networks or act as a router
|
||||
"net.ipv4.conf.default.send_redirects" = mkDefault "0";
|
||||
|
||||
# reverse path filtering - IP spoofing protection
|
||||
"net.ipv4.conf.all.rp_filter" = mkDefault "1";
|
||||
|
||||
# reverse path filtering - IP spoofing protection
|
||||
"net.ipv4.conf.default.rp_filter" = mkDefault "1";
|
||||
|
||||
# ignores ICMP broadcasts to avoid participating in Smurf attacks
|
||||
"net.ipv4.icmp_echo_ignore_broadcasts" = mkDefault "1";
|
||||
|
||||
# ignores bad ICMP errors
|
||||
"net.ipv4.icmp_ignore_bogus_error_responses" = mkDefault "1";
|
||||
|
||||
# logs spoofed, source-routed, and redirect packets
|
||||
"net.ipv4.conf.all.log_martians" = mkDefault "1";
|
||||
|
||||
# log spoofed, source-routed, and redirect packets
|
||||
"net.ipv4.conf.default.log_martians" = mkDefault "1";
|
||||
|
||||
# implements RFC 1337 fix
|
||||
"net.ipv4.tcp_rfc1337" = mkDefault "1";
|
||||
|
||||
# randomizes addresses of mmap base, heap, stack and VDSO page
|
||||
"kernel.randomize_va_space" = mkDefault "2";
|
||||
|
||||
# provides protection from ToCToU races
|
||||
"fs.protected_hardlinks" = mkDefault "1";
|
||||
|
||||
# provides protection from ToCToU races
|
||||
"fs.protected_symlinks" = mkDefault "1";
|
||||
|
||||
# makes locating kernel addresses more difficult
|
||||
"kernel.kptr_restrict" = mkDefault "1";
|
||||
|
||||
# set ptrace protections
|
||||
"kernel.yama.ptrace_scope" = mkDefault "1";
|
||||
|
||||
# set perf only available to root
|
||||
"kernel.perf_event_paranoid" = mkDefault "2";
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ Usage: nixos-container list
|
|||
nixos-container start <container-name>
|
||||
nixos-container stop <container-name>
|
||||
nixos-container status <container-name>
|
||||
nixos-container update <container-name> [--config <string>]
|
||||
nixos-container login <container-name>
|
||||
nixos-container root-login <container-name>
|
||||
nixos-container run <container-name> -- args...
|
||||
|
|
|
@ -346,7 +346,7 @@ in
|
|||
boot.initrd.extraUtilsCommands =
|
||||
''
|
||||
# We need mke2fs in the initrd.
|
||||
cp -vf --remove-destination ${pkgs.e2fsprogs}/sbin/mke2fs $out/bin
|
||||
copy_bin_and_libs ${pkgs.e2fsprogs}/sbin/mke2fs
|
||||
'';
|
||||
|
||||
boot.initrd.postDeviceCommands =
|
||||
|
|
|
@ -31,7 +31,7 @@ in rec {
|
|||
nixpkgs = nixpkgsSrc;
|
||||
})) [ "unstable" ];
|
||||
|
||||
tested = pkgs.releaseTools.aggregate {
|
||||
tested = pkgs.lib.hydraJob (pkgs.releaseTools.aggregate {
|
||||
name = "nixos-${nixos.channel.version}";
|
||||
meta = {
|
||||
description = "Release-critical builds for the NixOS channel";
|
||||
|
@ -57,6 +57,7 @@ in rec {
|
|||
(all nixos.tests.installer.simple)
|
||||
(all nixos.tests.installer.simpleLabels)
|
||||
(all nixos.tests.installer.simpleProvided)
|
||||
(all nixos.tests.installer.swraid)
|
||||
(all nixos.tests.installer.btrfsSimple)
|
||||
(all nixos.tests.installer.btrfsSubvols)
|
||||
(all nixos.tests.installer.btrfsSubvolDefault)
|
||||
|
@ -85,6 +86,6 @@ in rec {
|
|||
nixpkgs.tarball
|
||||
(all nixpkgs.emacs)
|
||||
];
|
||||
};
|
||||
});
|
||||
|
||||
}
|
||||
|
|
|
@ -79,7 +79,7 @@ in rec {
|
|||
vim;
|
||||
};
|
||||
|
||||
tested = pkgs.releaseTools.aggregate {
|
||||
tested = lib.hydraJob (pkgs.releaseTools.aggregate {
|
||||
name = "nixos-${nixos.channel.version}";
|
||||
meta = {
|
||||
description = "Release-critical builds for the NixOS channel";
|
||||
|
@ -88,6 +88,6 @@ in rec {
|
|||
constituents =
|
||||
let all = x: map (system: x.${system}) supportedSystems; in
|
||||
[ nixpkgs.tarball ] ++ lib.collect lib.isDerivation nixos;
|
||||
};
|
||||
});
|
||||
|
||||
}
|
||||
|
|
|
@ -3,22 +3,20 @@
|
|||
, supportedSystems ? [ "x86_64-linux" "i686-linux" ]
|
||||
}:
|
||||
|
||||
with import ../lib;
|
||||
|
||||
let
|
||||
|
||||
version = builtins.readFile ../.version;
|
||||
versionSuffix =
|
||||
(if stableBranch then "." else "pre") + "${toString nixpkgs.revCount}.${nixpkgs.shortRev}";
|
||||
|
||||
forAllSystems = pkgs.lib.genAttrs supportedSystems;
|
||||
forAllSystems = genAttrs supportedSystems;
|
||||
|
||||
scrubDrv = drv: let res = { inherit (drv) drvPath outPath type name system meta; outputName = "out"; out = res; }; in res;
|
||||
|
||||
callTest = fn: args: forAllSystems (system: scrubDrv (import fn ({ inherit system; } // args)));
|
||||
callTest = fn: args: forAllSystems (system: hydraJob (import fn ({ inherit system; } // args)));
|
||||
|
||||
pkgs = import nixpkgs { system = "x86_64-linux"; };
|
||||
|
||||
lib = pkgs.lib;
|
||||
|
||||
|
||||
versionModule =
|
||||
{ system.nixosVersionSuffix = versionSuffix;
|
||||
|
@ -42,10 +40,10 @@ let
|
|||
|
||||
in
|
||||
# Declare the ISO as a build product so that it shows up in Hydra.
|
||||
scrubDrv (runCommand "nixos-iso-${config.system.nixosVersion}"
|
||||
hydraJob (runCommand "nixos-iso-${config.system.nixosVersion}"
|
||||
{ meta = {
|
||||
description = "NixOS installation CD (${description}) - ISO image for ${system}";
|
||||
maintainers = map (x: lib.getAttr x lib.maintainers) maintainers;
|
||||
maintainers = map (x: lib.maintainers.${x}) maintainers;
|
||||
};
|
||||
inherit iso;
|
||||
passthru = { inherit config; };
|
||||
|
@ -74,7 +72,7 @@ let
|
|||
tarball //
|
||||
{ meta = {
|
||||
description = "NixOS system tarball for ${system} - ${stdenv.platform.name}";
|
||||
maintainers = map (x: lib.getAttr x lib.maintainers) maintainers;
|
||||
maintainers = map (x: lib.maintainers.${x}) maintainers;
|
||||
};
|
||||
inherit config;
|
||||
};
|
||||
|
@ -83,12 +81,12 @@ let
|
|||
makeClosure = module: buildFromConfig module (config: config.system.build.toplevel);
|
||||
|
||||
|
||||
buildFromConfig = module: sel: forAllSystems (system: scrubDrv (sel (import ./lib/eval-config.nix {
|
||||
buildFromConfig = module: sel: forAllSystems (system: hydraJob (sel (import ./lib/eval-config.nix {
|
||||
inherit system;
|
||||
modules = [ module versionModule ] ++ lib.singleton
|
||||
modules = [ module versionModule ] ++ singleton
|
||||
({ config, lib, ... }:
|
||||
{ fileSystems."/".device = lib.mkDefault "/dev/sda1";
|
||||
boot.loader.grub.device = lib.mkDefault "/dev/sda";
|
||||
{ fileSystems."/".device = mkDefault "/dev/sda1";
|
||||
boot.loader.grub.device = mkDefault "/dev/sda";
|
||||
});
|
||||
}).config));
|
||||
|
||||
|
@ -175,10 +173,10 @@ in rec {
|
|||
|
||||
in
|
||||
# Declare the OVA as a build product so that it shows up in Hydra.
|
||||
scrubDrv (runCommand "nixos-ova-${config.system.nixosVersion}-${system}"
|
||||
hydraJob (runCommand "nixos-ova-${config.system.nixosVersion}-${system}"
|
||||
{ meta = {
|
||||
description = "NixOS VirtualBox appliance (${system})";
|
||||
maintainers = lib.maintainers.eelco;
|
||||
maintainers = maintainers.eelco;
|
||||
};
|
||||
ova = config.system.build.virtualBoxOVA;
|
||||
}
|
||||
|
@ -195,9 +193,9 @@ in rec {
|
|||
dummy = forAllSystems (system: pkgs.runCommand "dummy"
|
||||
{ toplevel = (import lib/eval-config.nix {
|
||||
inherit system;
|
||||
modules = lib.singleton ({ config, pkgs, ... }:
|
||||
{ fileSystems."/".device = lib.mkDefault "/dev/sda1";
|
||||
boot.loader.grub.device = lib.mkDefault "/dev/sda";
|
||||
modules = singleton ({ config, pkgs, ... }:
|
||||
{ fileSystems."/".device = mkDefault "/dev/sda1";
|
||||
boot.loader.grub.device = mkDefault "/dev/sda";
|
||||
});
|
||||
}).config.system.build.toplevel;
|
||||
}
|
||||
|
@ -242,34 +240,35 @@ in rec {
|
|||
tests.avahi = callTest tests/avahi.nix {};
|
||||
tests.bittorrent = callTest tests/bittorrent.nix {};
|
||||
tests.blivet = callTest tests/blivet.nix {};
|
||||
tests.cadvisor = scrubDrv (import tests/cadvisor.nix { system = "x86_64-linux"; });
|
||||
tests.cadvisor = hydraJob (import tests/cadvisor.nix { system = "x86_64-linux"; });
|
||||
tests.chromium = callTest tests/chromium.nix {};
|
||||
#tests.cjdns = callTest tests/cjdns.nix {};
|
||||
tests.cjdns = callTest tests/cjdns.nix {};
|
||||
tests.containers = callTest tests/containers.nix {};
|
||||
tests.docker = scrubDrv (import tests/docker.nix { system = "x86_64-linux"; });
|
||||
tests.dockerRegistry = scrubDrv (import tests/docker-registry.nix { system = "x86_64-linux"; });
|
||||
tests.etcd = scrubDrv (import tests/etcd.nix { system = "x86_64-linux"; });
|
||||
tests.docker = hydraJob (import tests/docker.nix { system = "x86_64-linux"; });
|
||||
tests.dockerRegistry = hydraJob (import tests/docker-registry.nix { system = "x86_64-linux"; });
|
||||
tests.etcd = hydraJob (import tests/etcd.nix { system = "x86_64-linux"; });
|
||||
tests.firefox = callTest tests/firefox.nix {};
|
||||
tests.firewall = callTest tests/firewall.nix {};
|
||||
tests.fleet = scrubDrv (import tests/fleet.nix { system = "x86_64-linux"; });
|
||||
tests.fleet = hydraJob (import tests/fleet.nix { system = "x86_64-linux"; });
|
||||
#tests.gitlab = callTest tests/gitlab.nix {};
|
||||
tests.gnome3 = callTest tests/gnome3.nix {};
|
||||
tests.i3wm = callTest tests/i3wm.nix {};
|
||||
tests.installer.grub1 = forAllSystems (system: scrubDrv (import tests/installer.nix { inherit system; }).grub1.test);
|
||||
tests.installer.lvm = forAllSystems (system: scrubDrv (import tests/installer.nix { inherit system; }).lvm.test);
|
||||
tests.installer.rebuildCD = forAllSystems (system: scrubDrv (import tests/installer.nix { inherit system; }).rebuildCD.test);
|
||||
tests.installer.separateBoot = forAllSystems (system: scrubDrv (import tests/installer.nix { inherit system; }).separateBoot.test);
|
||||
tests.installer.simple = forAllSystems (system: scrubDrv (import tests/installer.nix { inherit system; }).simple.test);
|
||||
tests.installer.simpleLabels = forAllSystems (system: scrubDrv (import tests/installer.nix { inherit system; }).simpleLabels.test);
|
||||
tests.installer.simpleProvided = forAllSystems (system: scrubDrv (import tests/installer.nix { inherit system; }).simpleProvided.test);
|
||||
tests.installer.btrfsSimple = forAllSystems (system: scrubDrv (import tests/installer.nix { inherit system; }).btrfsSimple.test);
|
||||
tests.installer.btrfsSubvols = forAllSystems (system: scrubDrv (import tests/installer.nix { inherit system; }).btrfsSubvols.test);
|
||||
tests.installer.btrfsSubvolDefault = forAllSystems (system: scrubDrv (import tests/installer.nix { inherit system; }).btrfsSubvolDefault.test);
|
||||
tests.installer.grub1 = forAllSystems (system: hydraJob (import tests/installer.nix { inherit system; }).grub1.test);
|
||||
tests.installer.lvm = forAllSystems (system: hydraJob (import tests/installer.nix { inherit system; }).lvm.test);
|
||||
tests.installer.rebuildCD = forAllSystems (system: hydraJob (import tests/installer.nix { inherit system; }).rebuildCD.test);
|
||||
tests.installer.separateBoot = forAllSystems (system: hydraJob (import tests/installer.nix { inherit system; }).separateBoot.test);
|
||||
tests.installer.simple = forAllSystems (system: hydraJob (import tests/installer.nix { inherit system; }).simple.test);
|
||||
tests.installer.simpleLabels = forAllSystems (system: hydraJob (import tests/installer.nix { inherit system; }).simpleLabels.test);
|
||||
tests.installer.simpleProvided = forAllSystems (system: hydraJob (import tests/installer.nix { inherit system; }).simpleProvided.test);
|
||||
tests.installer.swraid = forAllSystems (system: hydraJob (import tests/installer.nix { inherit system; }).swraid.test);
|
||||
tests.installer.btrfsSimple = forAllSystems (system: hydraJob (import tests/installer.nix { inherit system; }).btrfsSimple.test);
|
||||
tests.installer.btrfsSubvols = forAllSystems (system: hydraJob (import tests/installer.nix { inherit system; }).btrfsSubvols.test);
|
||||
tests.installer.btrfsSubvolDefault = forAllSystems (system: hydraJob (import tests/installer.nix { inherit system; }).btrfsSubvolDefault.test);
|
||||
tests.influxdb = callTest tests/influxdb.nix {};
|
||||
tests.ipv6 = callTest tests/ipv6.nix {};
|
||||
tests.jenkins = callTest tests/jenkins.nix {};
|
||||
tests.kde4 = callTest tests/kde4.nix {};
|
||||
tests.kubernetes = scrubDrv (import tests/kubernetes.nix { system = "x86_64-linux"; });
|
||||
tests.kubernetes = hydraJob (import tests/kubernetes.nix { system = "x86_64-linux"; });
|
||||
tests.latestKernel.login = callTest tests/login.nix { latestKernel = true; };
|
||||
tests.login = callTest tests/login.nix {};
|
||||
#tests.logstash = callTest tests/logstash.nix {};
|
||||
|
@ -299,9 +298,10 @@ in rec {
|
|||
# TODO: put in networking.nix after the test becomes more complete
|
||||
tests.networkingProxy = callTest tests/networking-proxy.nix {};
|
||||
tests.nfs3 = callTest tests/nfs.nix { version = 3; };
|
||||
tests.nfs4 = callTest tests/nfs.nix { version = 4; };
|
||||
tests.nsd = callTest tests/nsd.nix {};
|
||||
tests.openssh = callTest tests/openssh.nix {};
|
||||
tests.panamax = scrubDrv (import tests/panamax.nix { system = "x86_64-linux"; });
|
||||
tests.panamax = hydraJob (import tests/panamax.nix { system = "x86_64-linux"; });
|
||||
tests.peerflix = callTest tests/peerflix.nix {};
|
||||
tests.printing = callTest tests/printing.nix {};
|
||||
tests.proxy = callTest tests/proxy.nix {};
|
||||
|
@ -312,6 +312,10 @@ in rec {
|
|||
tests.udisks2 = callTest tests/udisks2.nix {};
|
||||
tests.virtualbox = callTest tests/virtualbox.nix {};
|
||||
tests.xfce = callTest tests/xfce.nix {};
|
||||
tests.bootBiosCdrom = forAllSystems (system: hydraJob (import tests/boot.nix { inherit system; }).bootBiosCdrom);
|
||||
tests.bootBiosUsb = forAllSystems (system: hydraJob (import tests/boot.nix { inherit system; }).bootBiosUsb);
|
||||
tests.bootUefiCdrom = forAllSystems (system: hydraJob (import tests/boot.nix { inherit system; }).bootUefiCdrom);
|
||||
tests.bootUefiUsb = forAllSystems (system: hydraJob (import tests/boot.nix { inherit system; }).bootUefiUsb);
|
||||
|
||||
|
||||
/* Build a bunch of typical closures so that Hydra can keep track of
|
||||
|
|
|
@ -0,0 +1,63 @@
|
|||
{ system ? builtins.currentSystem }:
|
||||
|
||||
with import ../lib/testing.nix { inherit system; };
|
||||
with import ../lib/qemu-flags.nix;
|
||||
with pkgs.lib;
|
||||
|
||||
let
|
||||
|
||||
iso =
|
||||
(import ../lib/eval-config.nix {
|
||||
inherit system;
|
||||
modules =
|
||||
[ ../modules/installer/cd-dvd/installation-cd-minimal.nix
|
||||
../modules/testing/test-instrumentation.nix
|
||||
{ key = "serial";
|
||||
boot.loader.grub.timeout = mkOverride 0 0;
|
||||
|
||||
# The test cannot access the network, so any sources we
|
||||
# need must be included in the ISO.
|
||||
isoImage.storeContents =
|
||||
[ pkgs.glibcLocales
|
||||
pkgs.sudo
|
||||
pkgs.docbook5
|
||||
pkgs.docbook5_xsl
|
||||
pkgs.grub
|
||||
pkgs.perlPackages.XMLLibXML
|
||||
pkgs.unionfs-fuse
|
||||
pkgs.gummiboot
|
||||
];
|
||||
}
|
||||
];
|
||||
}).config.system.build.isoImage;
|
||||
|
||||
makeBootTest = name: machineConfig:
|
||||
makeTest {
|
||||
inherit iso;
|
||||
name = "boot-" + name;
|
||||
nodes = { };
|
||||
testScript =
|
||||
''
|
||||
my $machine = createMachine({ ${machineConfig}, qemuFlags => '-m 768' });
|
||||
$machine->start;
|
||||
$machine->waitForUnit("multi-user.target");
|
||||
$machine->shutdown;
|
||||
'';
|
||||
};
|
||||
in {
|
||||
bootBiosCdrom = makeBootTest "bios-cdrom" ''
|
||||
cdrom => glob("${iso}/iso/*.iso")
|
||||
'';
|
||||
bootBiosUsb = makeBootTest "bios-usb" ''
|
||||
usb => glob("${iso}/iso/*.iso")
|
||||
'';
|
||||
bootUefiCdrom = makeBootTest "uefi-cdrom" ''
|
||||
cdrom => glob("${iso}/iso/*.iso"),
|
||||
bios => '${pkgs.OVMF}/FV/OVMF.fd'
|
||||
'';
|
||||
bootUefiUsb = makeBootTest "uefi-usb" ''
|
||||
usb => glob("${iso}/iso/*.iso"),
|
||||
bios => '${pkgs.OVMF}/FV/OVMF.fd'
|
||||
'';
|
||||
}
|
||||
|
|
@ -109,7 +109,12 @@ import ./make-test.nix (
|
|||
$machine->waitUntilSucceeds("${xdo "check-startup" ''
|
||||
search --sync --onlyvisible --name "startup done"
|
||||
# close first start help popup
|
||||
key Escape
|
||||
key -delay 1000 Escape
|
||||
# XXX: This is to make sure the popup is closed, but we better do
|
||||
# screenshots to detect visual changes.
|
||||
key -delay 2000 Escape
|
||||
key -delay 3000 Escape
|
||||
key -delay 4000 Escape
|
||||
windowfocus --sync
|
||||
windowactivate --sync
|
||||
''}");
|
||||
|
|
|
@ -3,15 +3,15 @@ let
|
|||
carolPubKey = "n932l3pjvmhtxxcdrqq2qpw5zc58f01vvjx01h4dtd1bb0nnu2h0.k";
|
||||
carolPassword = "678287829ce4c67bc8b227e56d94422ee1b85fa11618157b2f591de6c6322b52";
|
||||
carolIp4 = "192.168.0.9";
|
||||
|
||||
|
||||
basicConfig =
|
||||
{ config, pkgs, ... }:
|
||||
{ services.cjdns.enable = true;
|
||||
|
||||
|
||||
# Turning off DHCP isn't very realistic but makes
|
||||
# the sequence of address assignment less stochastic.
|
||||
networking.useDHCP = false;
|
||||
|
||||
|
||||
networking.interfaces.eth1.prefixLength = 24;
|
||||
# CJDNS output is incompatible with the XML log.
|
||||
systemd.services.cjdns.serviceConfig.StandardOutput = "null";
|
||||
|
@ -41,19 +41,18 @@ import ./make-test.nix {
|
|||
# Bob explicitly connects to Carol over UDPInterface.
|
||||
bob =
|
||||
{ config, lib, nodes, ... }:
|
||||
|
||||
|
||||
let carolIp4 = lib.mkForce nodes.carol.config.networking.interfaces.eth1; in
|
||||
|
||||
|
||||
{ imports = [ basicConfig ];
|
||||
|
||||
|
||||
networking.interfaces.eth1.ipAddress = "192.168.0.2";
|
||||
|
||||
|
||||
services.cjdns =
|
||||
{ UDPInterface =
|
||||
{ bind = "0.0.0.0:1024";
|
||||
connectTo."192.168.0.1:1024}" =
|
||||
{ hostname = "carol.hype";
|
||||
password = carolPassword;
|
||||
{ password = carolPassword;
|
||||
publicKey = carolPubKey;
|
||||
};
|
||||
};
|
||||
|
@ -75,7 +74,7 @@ import ./make-test.nix {
|
|||
'';
|
||||
|
||||
networking.interfaces.eth1.ipAddress = "192.168.0.1";
|
||||
|
||||
|
||||
services.cjdns =
|
||||
{ authorizedPasswords = [ carolPassword ];
|
||||
ETHInterface.bind = "eth1";
|
||||
|
@ -106,13 +105,13 @@ import ./make-test.nix {
|
|||
my $carolIp6 = cjdnsIp $carol;
|
||||
|
||||
# ping a few times each to let the routing table establish itself
|
||||
|
||||
|
||||
$alice->succeed("ping6 -c 4 $carolIp6");
|
||||
$bob->succeed("ping6 -c 4 carol.hype");
|
||||
$bob->succeed("ping6 -c 4 $carolIp6");
|
||||
|
||||
$carol->succeed("ping6 -c 4 $aliceIp6");
|
||||
$carol->succeed("ping6 -c 4 $bobIp6");
|
||||
|
||||
|
||||
$alice->succeed("ping6 -c 4 $bobIp6");
|
||||
$bob->succeed("ping6 -c 4 $aliceIp6");
|
||||
|
||||
|
|
|
@ -327,12 +327,12 @@ in {
|
|||
$machine->succeed(
|
||||
"parted /dev/vda --"
|
||||
. " mklabel msdos"
|
||||
. " mkpart primary ext2 1M 30MB" # /boot
|
||||
. " mkpart extended 30M -1s"
|
||||
. " mkpart logical 31M 1531M" # md0 (root), first device
|
||||
. " mkpart logical 1540M 3040M" # md0 (root), second device
|
||||
. " mkpart logical 3050M 3306M" # md1 (swap), first device
|
||||
. " mkpart logical 3320M 3576M", # md1 (swap), second device
|
||||
. " mkpart primary ext2 1M 100MB" # /boot
|
||||
. " mkpart extended 100M -1s"
|
||||
. " mkpart logical 102M 1602M" # md0 (root), first device
|
||||
. " mkpart logical 1603M 3103M" # md0 (root), second device
|
||||
. " mkpart logical 3104M 3360M" # md1 (swap), first device
|
||||
. " mkpart logical 3361M 3617M", # md1 (swap), second device
|
||||
"udevadm settle",
|
||||
"ls -l /dev/vda* >&2",
|
||||
"cat /proc/partitions >&2",
|
||||
|
|
|
@ -13,7 +13,7 @@ import ./make-test.nix rec {
|
|||
id: redis-master-pod
|
||||
containers:
|
||||
- name: master
|
||||
image: master:5000/scratch
|
||||
image: master:5000/nix
|
||||
cpu: 100
|
||||
ports:
|
||||
- name: redis-server
|
||||
|
@ -50,8 +50,8 @@ import ./make-test.nix rec {
|
|||
virtualisation.memorySize = 768;
|
||||
services.kubernetes = {
|
||||
roles = ["master" "node"];
|
||||
dockerCfg = ''{"master:5000":{}}'';
|
||||
controllerManager.machines = ["master" "node"];
|
||||
kubelet.extraOpts = "-network_container_image=master:5000/pause";
|
||||
apiserver.address = "0.0.0.0";
|
||||
verbose = true;
|
||||
};
|
||||
|
@ -94,7 +94,8 @@ import ./make-test.nix rec {
|
|||
{
|
||||
services.kubernetes = {
|
||||
roles = ["node"];
|
||||
kubelet.extraOpts = "-network_container_image=master:5000/pause";
|
||||
dockerCfg = ''{"master:5000":{}}'';
|
||||
kubelet.apiServers = ["master:8080"];
|
||||
verbose = true;
|
||||
};
|
||||
virtualisation.docker.extraOptions = "--iptables=false --ip-masq=false -b cbr0 --insecure-registry master:5000";
|
||||
|
@ -155,14 +156,14 @@ import ./make-test.nix rec {
|
|||
$node->waitForUnit("kubernetes-kubelet.service");
|
||||
$node->waitForUnit("kubernetes-proxy.service");
|
||||
|
||||
$master->waitUntilSucceeds("kubecfg list minions | grep master");
|
||||
$master->waitUntilSucceeds("kubecfg list minions | grep node");
|
||||
$master->waitUntilSucceeds("kubectl get minions | grep master");
|
||||
$master->waitUntilSucceeds("kubectl get minions | grep node");
|
||||
|
||||
$client->waitForUnit("docker.service");
|
||||
$client->succeed("tar cv --files-from /dev/null | docker import - scratch");
|
||||
$client->succeed("docker tag scratch master:5000/scratch");
|
||||
$client->succeed("tar cv --files-from /dev/null | docker import - nix");
|
||||
$client->succeed("docker tag nix master:5000/nix");
|
||||
$master->waitForUnit("docker-registry.service");
|
||||
$client->succeed("docker push master:5000/scratch");
|
||||
$client->succeed("docker push master:5000/nix");
|
||||
$client->succeed("mkdir -p /root/pause");
|
||||
$client->succeed("cp /etc/test/pause /root/pause/");
|
||||
$client->succeed("cp /etc/test/Dockerfile /root/pause/");
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
import ./make-test.nix ({ version, ... }:
|
||||
import ./make-test.nix ({ version ? 4, ... }:
|
||||
|
||||
let
|
||||
|
||||
|
|
|
@ -31,8 +31,8 @@ import ./make-test.nix ({pkgs, ... }: {
|
|||
startAll;
|
||||
|
||||
# Make sure that cups is up on both sides.
|
||||
$server->waitForUnit("cupsd.service");
|
||||
$client->waitForUnit("cupsd.service");
|
||||
$server->waitForUnit("cups.service");
|
||||
$client->waitForUnit("cups.service");
|
||||
$client->succeed("lpstat -r") =~ /scheduler is running/ or die;
|
||||
$client->succeed("lpstat -H") =~ "/var/run/cups/cups.sock" or die;
|
||||
$client->succeed("curl --fail http://localhost:631/");
|
||||
|
|
|
@ -39,9 +39,8 @@ import ./make-test.nix ({ pkgs, ... }: with pkgs.lib; let
|
|||
];
|
||||
|
||||
boot.initrd.extraUtilsCommands = ''
|
||||
cp -av -t "$out/bin/" \
|
||||
"${pkgs.linuxPackages.virtualboxGuestAdditions}/sbin/mount.vboxsf" \
|
||||
"${pkgs.utillinux}/bin/unshare"
|
||||
copy_bin_and_libs "${pkgs.linuxPackages.virtualboxGuestAdditions}/sbin/mount.vboxsf"
|
||||
copy_bin_and_libs "${pkgs.utillinux}/bin/unshare"
|
||||
${(attrs.extraUtilsCommands or (const "")) pkgs}
|
||||
'';
|
||||
|
||||
|
|
|
@ -2,8 +2,8 @@
|
|||
|
||||
rec {
|
||||
|
||||
bitcoin = callPackage ./bitcoin.nix { openssl = pkgs.openssl_1_0_1j; withGui = true; };
|
||||
bitcoind = callPackage ./bitcoin.nix { openssl = pkgs.openssl_1_0_1j; withGui = false; };
|
||||
bitcoin = callPackage ./bitcoin.nix { withGui = true; };
|
||||
bitcoind = callPackage ./bitcoin.nix { withGui = false; };
|
||||
|
||||
darkcoin = callPackage ./darkcoin.nix { withGui = true; };
|
||||
darkcoind = callPackage ./darkcoin.nix { withGui = false; };
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{ stdenv, fetchurl, alsaLib, dbus, jack2, pkgconfig, python }:
|
||||
{ stdenv, fetchurl, makeWrapper, pkgconfig, alsaLib, dbus, jack2
|
||||
, python, pythonDBus }:
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
name = "a2jmidid-${version}";
|
||||
|
@ -9,13 +10,16 @@ stdenv.mkDerivation rec {
|
|||
sha256 = "0pzm0qk5ilqhwz74pydg1jwrds27vm47185dakdrxidb5bv3b5ia";
|
||||
};
|
||||
|
||||
buildInputs = [ alsaLib dbus jack2 pkgconfig python ];
|
||||
buildInputs = [ makeWrapper pkgconfig alsaLib dbus jack2 python pythonDBus ];
|
||||
|
||||
configurePhase = "python waf configure --prefix=$out";
|
||||
|
||||
buildPhase = "python waf";
|
||||
|
||||
installPhase = "python waf install";
|
||||
installPhase = ''
|
||||
python waf install
|
||||
wrapProgram $out/bin/a2j_control --set PYTHONPATH $PYTHONPATH
|
||||
'';
|
||||
|
||||
meta = with stdenv.lib; {
|
||||
homepage = http://home.gna.org/a2jmidid;
|
||||
|
|
|
@ -17,7 +17,7 @@ stdenv.mkDerivation rec {
|
|||
QT_PLUGIN_PATH="${qtscriptgenerator}/lib/qt4/plugins";
|
||||
|
||||
buildInputs = [ qtscriptgenerator stdenv.cc.libc gettext curl
|
||||
libxml2 mysql taglib taglib_extras loudmouth kdelibs automoc4 phonon strigi
|
||||
libxml2 mysql.lib taglib taglib_extras loudmouth kdelibs automoc4 phonon strigi
|
||||
soprano qca2 libmtp liblastfm libgpod pkgconfig qjson ffmpeg libofa nepomuk_core ];
|
||||
|
||||
cmakeFlags = "-DKDE4_BUILD_TESTS=OFF";
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
{ stdenv, fetchurl, pkgconfig, gettext, gtk2, expat, intltool, libgcrypt,
|
||||
libunique, gnutls, libxml2, curl, mpd_clientlib, dbus_glib, libnotify,
|
||||
libsoup, avahi, taglib
|
||||
}:
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
version = "1.5.1";
|
||||
name = "ario-${version}";
|
||||
|
||||
src = fetchurl {
|
||||
url = "mirror://sourceforge/ario-player/${name}.tar.gz";
|
||||
sha256 = "07n97618jv1ilxnm5c6qj9zjz0imw3p304mn4hjbjkk3p0d2hc88";
|
||||
};
|
||||
|
||||
patches = [ ./glib-single-include.patch ];
|
||||
|
||||
buildInputs = [
|
||||
pkgconfig gettext gtk2 expat intltool libgcrypt libunique gnutls
|
||||
libxml2 curl mpd_clientlib dbus_glib libnotify libsoup avahi taglib
|
||||
];
|
||||
|
||||
meta = {
|
||||
description = "GTK2 client for MPD (Music player daemon)";
|
||||
homepage = "http://ario-player.sourceforge.net/";
|
||||
license = stdenv.lib.licenses.gpl2Plus;
|
||||
maintainers = [ stdenv.lib.maintainers.garrison ];
|
||||
platforms = stdenv.lib.platforms.all;
|
||||
};
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
From: Michael Biebl <biebl@debian.org>
|
||||
Origin: vendor
|
||||
Bug-Debian: http://bugs.debian.org/665506
|
||||
Subject: Including individual glib headers no longer supported
|
||||
|
||||
--- a/src/ario-profiles.h
|
||||
+++ b/src/ario-profiles.h
|
||||
@@ -20,7 +20,7 @@
|
||||
#ifndef __ARIO_PROFILES_H
|
||||
#define __ARIO_PROFILES_H
|
||||
|
||||
-#include <glib/gslist.h>
|
||||
+#include <glib.h>
|
||||
#include "servers/ario-server.h"
|
||||
|
||||
G_BEGIN_DECLS
|
||||
--- a/src/plugins/ario-plugin-info.c
|
||||
+++ b/src/plugins/ario-plugin-info.c
|
||||
@@ -27,7 +27,7 @@
|
||||
|
||||
#include <string.h>
|
||||
#include <glib/gi18n.h>
|
||||
-#include <glib/gkeyfile.h>
|
||||
+#include <glib.h>
|
||||
|
||||
#include "plugins/ario-plugin-info-priv.h"
|
||||
#include "ario-debug.h"
|
||||
--- a/src/ario-util.h
|
||||
+++ b/src/ario-util.h
|
||||
@@ -18,8 +18,8 @@
|
||||
*/
|
||||
|
||||
#include "servers/ario-server.h"
|
||||
-#include "glib/gslist.h"
|
||||
-#include "gdk/gdkpixbuf.h"
|
||||
+#include <glib.h>
|
||||
+#include <gdk/gdkpixbuf.h>
|
||||
|
||||
/* Number of covers used to generate the drag & drop image */
|
||||
#define MAX_COVERS_IN_DRAG 3
|
|
@ -15,7 +15,7 @@ stdenv.mkDerivation rec {
|
|||
preConfigure = /* we prefer system-wide libs */ ''
|
||||
mv lib-src lib-src-rm
|
||||
mkdir lib-src
|
||||
mv lib-src-rm/{Makefile*,lib-widget-extra,portaudio-v19,portmixer,portsmf,FileDialog,sbsms} lib-src/
|
||||
mv lib-src-rm/{Makefile*,lib-widget-extra,portaudio-v19,portmixer,portsmf,FileDialog,sbsms,libnyquist} lib-src/
|
||||
rm -r lib-src-rm/
|
||||
'';
|
||||
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
{ stdenv, fetchurl, pkgconfig, gtk2, libsndfile, portaudio }:
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
name = "gnaural-1.0.20110606";
|
||||
buildInputs = [ pkgconfig gtk2 libsndfile portaudio ];
|
||||
src = fetchurl {
|
||||
url = "mirror://sourceforge/gnaural/Gnaural/${name}.tar.gz";
|
||||
sha256 = "0p9rasz1jmxf16vnpj17g3vzdjygcyz3l6nmbq6wr402l61f1vy5";
|
||||
};
|
||||
meta = with stdenv.lib;
|
||||
{ description = "Auditory binaural-beat generator";
|
||||
homepage = http://gnaural.sourceforge.net/;
|
||||
licenses = licenses.gpl2;
|
||||
maintainers = [ maintainers.emery ];
|
||||
platforms = platforms.linux;
|
||||
};
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
{stdenv, fetchurl, scons, boost, ladspaH, pkgconfig }:
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
version = "0.2-2";
|
||||
name = "nova-filters-${version}";
|
||||
|
||||
src = fetchurl {
|
||||
url = http://klingt.org/~tim/nova-filters/nova-filters_0.2-2.tar.gz;
|
||||
sha256 = "16064vvl2w5lz4xi3lyjk4xx7fphwsxc14ajykvndiz170q32s6i";
|
||||
};
|
||||
|
||||
buildInputs = [ scons boost ladspaH pkgconfig ];
|
||||
|
||||
patchPhase = ''
|
||||
# remove TERM:
|
||||
sed -i -e '4d' SConstruct
|
||||
sed -i "s@mfpmath=sse@mfpmath=sse -I ${boost.dev}/include@g" SConstruct
|
||||
sed -i "s@ladspa.h@${ladspaH}/include/ladspa.h@g" filters.cpp
|
||||
sed -i "s/= check/= detail::filter_base<internal_type, checked>::check/" nova/source/dsp/filter.hpp
|
||||
'';
|
||||
|
||||
buildPhase = ''
|
||||
scons
|
||||
'';
|
||||
|
||||
installPhase = ''
|
||||
scons $sconsFlags "prefix=$out" install
|
||||
'';
|
||||
|
||||
meta = {
|
||||
homepage = http://klingt.org/~tim/nova-filters/;
|
||||
description = "LADSPA plugins based on filters of nova";
|
||||
license = stdenv.lib.licenses.gpl2Plus;
|
||||
};
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue