Merge branch 'master' into dapphub-hevm
This commit is contained in:
commit
0b8b72081b
@ -785,7 +785,20 @@ example of such a situation is when `py.test` is used.
|
|||||||
|
|
||||||
#### Common issues
|
#### Common issues
|
||||||
|
|
||||||
- Non-working tests can often be deselected. In the case of `py.test`: `py.test -k 'not function_name and not other_function'`.
|
- Non-working tests can often be deselected. By default `buildPythonPackage` runs `python setup.py test`.
|
||||||
|
Most python modules follows the standard test protocol where the pytest runner can be used instead.
|
||||||
|
`py.test` supports a `-k` parameter to ignore test methods or classes:
|
||||||
|
|
||||||
|
```nix
|
||||||
|
buildPythonPackage {
|
||||||
|
# ...
|
||||||
|
# assumes the tests are located in tests
|
||||||
|
checkInputs = [ pytest ];
|
||||||
|
checkPhase = ''
|
||||||
|
py.test -k 'not function_name and not other_function' tests
|
||||||
|
'';
|
||||||
|
}
|
||||||
|
```
|
||||||
- Unicode issues can typically be fixed by including `glibcLocales` in `buildInputs` and exporting `LC_ALL=en_US.utf-8`.
|
- Unicode issues can typically be fixed by including `glibcLocales` in `buildInputs` and exporting `LC_ALL=en_US.utf-8`.
|
||||||
- Tests that attempt to access `$HOME` can be fixed by using the following work-around before running tests (e.g. `preCheck`): `export HOME=$(mktemp -d)`
|
- Tests that attempt to access `$HOME` can be fixed by using the following work-around before running tests (e.g. `preCheck`): `export HOME=$(mktemp -d)`
|
||||||
|
|
||||||
|
@ -477,32 +477,18 @@ it. Place the resulting <filename>package.nix</filename> file into
|
|||||||
|
|
||||||
<varlistentry>
|
<varlistentry>
|
||||||
<term>Using the FOSS Radeon or nouveau (nvidia) drivers</term>
|
<term>Using the FOSS Radeon or nouveau (nvidia) drivers</term>
|
||||||
<listitem><itemizedlist><listitem><para>
|
<listitem><itemizedlist>
|
||||||
Both the open source radeon drivers as well as the nouveau drivers (nvidia)
|
<listitem><para>The <literal>newStdcpp</literal> parameter
|
||||||
need a newer libc++ than is provided by the default runtime, which leads to a
|
was removed since NixOS 17.09 and should not be needed anymore.
|
||||||
crash on launch. Use <programlisting>environment.systemPackages =
|
</para></listitem>
|
||||||
[(pkgs.steam.override { newStdcpp = true; })];</programlisting> in your config
|
|
||||||
if you get an error like
|
<listitem><para>
|
||||||
<programlisting>
|
Steam ships statically linked with a version of libcrypto that
|
||||||
libGL error: unable to load driver: radeonsi_dri.so
|
conflics with the one dynamically loaded by radeonsi_dri.so.
|
||||||
libGL error: driver pointer missing
|
If you get the error
|
||||||
libGL error: failed to load driver: radeonsi
|
<programlisting>steam.sh: line 713: 7842 Segmentation fault (core dumped)</programlisting>
|
||||||
libGL error: unable to load driver: swrast_dri.so
|
have a look at <link xlink:href="https://github.com/NixOS/nixpkgs/pull/20269">this pull request</link>.
|
||||||
libGL error: failed to load driver: swrast</programlisting>
|
</para></listitem>
|
||||||
or
|
|
||||||
<programlisting>
|
|
||||||
libGL error: unable to load driver: nouveau_dri.so
|
|
||||||
libGL error: driver pointer missing
|
|
||||||
libGL error: failed to load driver: nouveau
|
|
||||||
libGL error: unable to load driver: swrast_dri.so
|
|
||||||
libGL error: failed to load driver: swrast</programlisting></para></listitem>
|
|
||||||
<listitem><para>
|
|
||||||
Steam ships statically linked with a version of libcrypto that
|
|
||||||
conflics with the one dynamically loaded by radeonsi_dri.so.
|
|
||||||
If you get the error
|
|
||||||
<programlisting>steam.sh: line 713: 7842 Segmentation fault (core dumped)</programlisting>
|
|
||||||
have a look at <link xlink:href="https://github.com/NixOS/nixpkgs/pull/20269">this pull request</link>.
|
|
||||||
</para></listitem>
|
|
||||||
|
|
||||||
</itemizedlist></listitem></varlistentry>
|
</itemizedlist></listitem></varlistentry>
|
||||||
|
|
||||||
|
@ -281,6 +281,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
|
|||||||
url = https://fedoraproject.org/wiki/Licensing/GPL_Classpath_Exception;
|
url = https://fedoraproject.org/wiki/Licensing/GPL_Classpath_Exception;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
hpnd = spdx {
|
||||||
|
spdxId = "HPND";
|
||||||
|
fullName = "Historic Permission Notice and Disclaimer";
|
||||||
|
};
|
||||||
|
|
||||||
# Intel's license, seems free
|
# Intel's license, seems free
|
||||||
iasl = {
|
iasl = {
|
||||||
fullName = "iASL";
|
fullName = "iASL";
|
||||||
|
@ -107,6 +107,7 @@
|
|||||||
choochootrain = "Hurshal Patel <hurshal@imap.cc>";
|
choochootrain = "Hurshal Patel <hurshal@imap.cc>";
|
||||||
chris-martin = "Chris Martin <ch.martin@gmail.com>";
|
chris-martin = "Chris Martin <ch.martin@gmail.com>";
|
||||||
chrisjefferson = "Christopher Jefferson <chris@bubblescope.net>";
|
chrisjefferson = "Christopher Jefferson <chris@bubblescope.net>";
|
||||||
|
chrisrosset = "Christopher Rosset <chris@rosset.org.uk>";
|
||||||
christopherpoole = "Christopher Mark Poole <mail@christopherpoole.net>";
|
christopherpoole = "Christopher Mark Poole <mail@christopherpoole.net>";
|
||||||
ciil = "Simon Lackerbauer <simon@lackerbauer.com>";
|
ciil = "Simon Lackerbauer <simon@lackerbauer.com>";
|
||||||
ckampka = "Christian Kampka <christian@kampka.net>";
|
ckampka = "Christian Kampka <christian@kampka.net>";
|
||||||
@ -186,17 +187,20 @@
|
|||||||
ellis = "Ellis Whitehead <nixos@ellisw.net>";
|
ellis = "Ellis Whitehead <nixos@ellisw.net>";
|
||||||
eperuffo = "Emanuele Peruffo <info@emanueleperuffo.com>";
|
eperuffo = "Emanuele Peruffo <info@emanueleperuffo.com>";
|
||||||
epitrochoid = "Mabry Cervin <mpcervin@uncg.edu>";
|
epitrochoid = "Mabry Cervin <mpcervin@uncg.edu>";
|
||||||
|
eqyiel = "Ruben Maher <r@rkm.id.au>";
|
||||||
ericbmerritt = "Eric Merritt <eric@afiniate.com>";
|
ericbmerritt = "Eric Merritt <eric@afiniate.com>";
|
||||||
ericsagnes = "Eric Sagnes <eric.sagnes@gmail.com>";
|
ericsagnes = "Eric Sagnes <eric.sagnes@gmail.com>";
|
||||||
erikryb = "Erik Rybakken <erik.rybakken@math.ntnu.no>";
|
erikryb = "Erik Rybakken <erik.rybakken@math.ntnu.no>";
|
||||||
ertes = "Ertugrul Söylemez <esz@posteo.de>";
|
ertes = "Ertugrul Söylemez <esz@posteo.de>";
|
||||||
ethercrow = "Dmitry Ivanov <ethercrow@gmail.com>";
|
ethercrow = "Dmitry Ivanov <ethercrow@gmail.com>";
|
||||||
|
etu = "Elis Hirwing <elis@hirwing.se>";
|
||||||
exi = "Reno Reckling <nixos@reckling.org>";
|
exi = "Reno Reckling <nixos@reckling.org>";
|
||||||
exlevan = "Alexey Levan <exlevan@gmail.com>";
|
exlevan = "Alexey Levan <exlevan@gmail.com>";
|
||||||
expipiplus1 = "Joe Hermaszewski <nix@monoid.al>";
|
expipiplus1 = "Joe Hermaszewski <nix@monoid.al>";
|
||||||
fadenb = "Tristan Helmich <tristan.helmich+nixos@gmail.com>";
|
fadenb = "Tristan Helmich <tristan.helmich+nixos@gmail.com>";
|
||||||
fare = "Francois-Rene Rideau <fahree@gmail.com>";
|
|
||||||
falsifian = "James Cook <james.cook@utoronto.ca>";
|
falsifian = "James Cook <james.cook@utoronto.ca>";
|
||||||
|
fare = "Francois-Rene Rideau <fahree@gmail.com>";
|
||||||
|
fgaz = "Francesco Gazzetta <francygazz@gmail.com>";
|
||||||
florianjacob = "Florian Jacob <projects+nixos@florianjacob.de>";
|
florianjacob = "Florian Jacob <projects+nixos@florianjacob.de>";
|
||||||
flosse = "Markus Kohlhase <mail@markus-kohlhase.de>";
|
flosse = "Markus Kohlhase <mail@markus-kohlhase.de>";
|
||||||
fluffynukeit = "Daniel Austin <dan@fluffynukeit.com>";
|
fluffynukeit = "Daniel Austin <dan@fluffynukeit.com>";
|
||||||
@ -288,12 +292,12 @@
|
|||||||
jonafato = "Jon Banafato <jon@jonafato.com>";
|
jonafato = "Jon Banafato <jon@jonafato.com>";
|
||||||
jpierre03 = "Jean-Pierre PRUNARET <nix@prunetwork.fr>";
|
jpierre03 = "Jean-Pierre PRUNARET <nix@prunetwork.fr>";
|
||||||
jpotier = "Martin Potier <jpo.contributes.to.nixos@marvid.fr>";
|
jpotier = "Martin Potier <jpo.contributes.to.nixos@marvid.fr>";
|
||||||
jyp = "Jean-Philippe Bernardy <jeanphilippe.bernardy@gmail.com>";
|
|
||||||
jraygauthier = "Raymond Gauthier <jraygauthier@gmail.com>";
|
jraygauthier = "Raymond Gauthier <jraygauthier@gmail.com>";
|
||||||
jtojnar = "Jan Tojnar <jtojnar@gmail.com>";
|
jtojnar = "Jan Tojnar <jtojnar@gmail.com>";
|
||||||
juliendehos = "Julien Dehos <dehos@lisic.univ-littoral.fr>";
|
juliendehos = "Julien Dehos <dehos@lisic.univ-littoral.fr>";
|
||||||
jwiegley = "John Wiegley <johnw@newartisans.com>";
|
jwiegley = "John Wiegley <johnw@newartisans.com>";
|
||||||
jwilberding = "Jordan Wilberding <jwilberding@afiniate.com>";
|
jwilberding = "Jordan Wilberding <jwilberding@afiniate.com>";
|
||||||
|
jyp = "Jean-Philippe Bernardy <jeanphilippe.bernardy@gmail.com>";
|
||||||
jzellner = "Jeff Zellner <jeffz@eml.cc>";
|
jzellner = "Jeff Zellner <jeffz@eml.cc>";
|
||||||
kaiha = "Kai Harries <kai.harries@gmail.com>";
|
kaiha = "Kai Harries <kai.harries@gmail.com>";
|
||||||
kamilchm = "Kamil Chmielewski <kamil.chm@gmail.com>";
|
kamilchm = "Kamil Chmielewski <kamil.chm@gmail.com>";
|
||||||
@ -333,6 +337,7 @@
|
|||||||
lovek323 = "Jason O'Conal <jason@oconal.id.au>";
|
lovek323 = "Jason O'Conal <jason@oconal.id.au>";
|
||||||
lowfatcomputing = "Andreas Wagner <andreas.wagner@lowfatcomputing.org>";
|
lowfatcomputing = "Andreas Wagner <andreas.wagner@lowfatcomputing.org>";
|
||||||
lsix = "Lancelot SIX <lsix@lancelotsix.com>";
|
lsix = "Lancelot SIX <lsix@lancelotsix.com>";
|
||||||
|
ltavard = "Laure Tavard <laure.tavard@univ-grenoble-alpes.fr>";
|
||||||
lucas8 = "Luc Chabassier <luc.linux@mailoo.org>";
|
lucas8 = "Luc Chabassier <luc.linux@mailoo.org>";
|
||||||
ludo = "Ludovic Courtès <ludo@gnu.org>";
|
ludo = "Ludovic Courtès <ludo@gnu.org>";
|
||||||
lufia = "Kyohei Kadota <lufia@lufia.org>";
|
lufia = "Kyohei Kadota <lufia@lufia.org>";
|
||||||
@ -495,6 +500,7 @@
|
|||||||
renzo = "Renzo Carbonara <renzocarbonara@gmail.com>";
|
renzo = "Renzo Carbonara <renzocarbonara@gmail.com>";
|
||||||
retrry = "Tadas Barzdžius <retrry@gmail.com>";
|
retrry = "Tadas Barzdžius <retrry@gmail.com>";
|
||||||
rht = "rht <rhtbot@protonmail.com>";
|
rht = "rht <rhtbot@protonmail.com>";
|
||||||
|
richardipsum = "Richard Ipsum <richardipsum@fastmail.co.uk>";
|
||||||
rick68 = "Wei-Ming Yang <rick68@gmail.com>";
|
rick68 = "Wei-Ming Yang <rick68@gmail.com>";
|
||||||
rickynils = "Rickard Nilsson <rickynils@gmail.com>";
|
rickynils = "Rickard Nilsson <rickynils@gmail.com>";
|
||||||
ris = "Robert Scott <code@humanleg.org.uk>";
|
ris = "Robert Scott <code@humanleg.org.uk>";
|
||||||
@ -504,6 +510,7 @@
|
|||||||
robberer = "Longrin Wischnewski <robberer@freakmail.de>";
|
robberer = "Longrin Wischnewski <robberer@freakmail.de>";
|
||||||
robbinch = "Robbin C. <robbinch33@gmail.com>";
|
robbinch = "Robbin C. <robbinch33@gmail.com>";
|
||||||
roberth = "Robert Hensing <nixpkgs@roberthensing.nl>";
|
roberth = "Robert Hensing <nixpkgs@roberthensing.nl>";
|
||||||
|
robertodr = "Roberto Di Remigio <roberto.diremigio@gmail.com>";
|
||||||
robgssp = "Rob Glossop <robgssp@gmail.com>";
|
robgssp = "Rob Glossop <robgssp@gmail.com>";
|
||||||
roblabla = "Robin Lambertz <robinlambertz+dev@gmail.com>";
|
roblabla = "Robin Lambertz <robinlambertz+dev@gmail.com>";
|
||||||
roconnor = "Russell O'Connor <roconnor@theorem.ca>";
|
roconnor = "Russell O'Connor <roconnor@theorem.ca>";
|
||||||
@ -577,10 +584,9 @@
|
|||||||
taku0 = "Takuo Yonezawa <mxxouy6x3m_github@tatapa.org>";
|
taku0 = "Takuo Yonezawa <mxxouy6x3m_github@tatapa.org>";
|
||||||
tari = "Peter Marheine <peter@taricorp.net>";
|
tari = "Peter Marheine <peter@taricorp.net>";
|
||||||
tavyc = "Octavian Cerna <octavian.cerna@gmail.com>";
|
tavyc = "Octavian Cerna <octavian.cerna@gmail.com>";
|
||||||
ltavard = "Laure Tavard <laure.tavard@univ-grenoble-alpes.fr>";
|
|
||||||
teh = "Tom Hunger <tehunger@gmail.com>";
|
teh = "Tom Hunger <tehunger@gmail.com>";
|
||||||
teto = "Matthieu Coudron <mcoudron@hotmail.com>";
|
|
||||||
telotortium = "Robert Irelan <rirelan@gmail.com>";
|
telotortium = "Robert Irelan <rirelan@gmail.com>";
|
||||||
|
teto = "Matthieu Coudron <mcoudron@hotmail.com>";
|
||||||
thall = "Niclas Thall <niclas.thall@gmail.com>";
|
thall = "Niclas Thall <niclas.thall@gmail.com>";
|
||||||
thammers = "Tobias Hammerschmidt <jawr@gmx.de>";
|
thammers = "Tobias Hammerschmidt <jawr@gmx.de>";
|
||||||
the-kenny = "Moritz Ulrich <moritz@tarn-vedra.de>";
|
the-kenny = "Moritz Ulrich <moritz@tarn-vedra.de>";
|
||||||
@ -609,6 +615,7 @@
|
|||||||
#urkud = "Yury G. Kudryashov <urkud+nix@ya.ru>"; inactive since 2012
|
#urkud = "Yury G. Kudryashov <urkud+nix@ya.ru>"; inactive since 2012
|
||||||
uwap = "uwap <me@uwap.name>";
|
uwap = "uwap <me@uwap.name>";
|
||||||
vaibhavsagar = "Vaibhav Sagar <vaibhavsagar@gmail.com>";
|
vaibhavsagar = "Vaibhav Sagar <vaibhavsagar@gmail.com>";
|
||||||
|
valeriangalliat = "Valérian Galliat <val@codejam.info>";
|
||||||
vandenoever = "Jos van den Oever <jos@vandenoever.info>";
|
vandenoever = "Jos van den Oever <jos@vandenoever.info>";
|
||||||
vanschelven = "Klaas van Schelven <klaas@vanschelven.com>";
|
vanschelven = "Klaas van Schelven <klaas@vanschelven.com>";
|
||||||
vanzef = "Ivan Solyankin <vanzef@gmail.com>";
|
vanzef = "Ivan Solyankin <vanzef@gmail.com>";
|
||||||
@ -625,7 +632,6 @@
|
|||||||
vlstill = "Vladimír Štill <xstill@fi.muni.cz>";
|
vlstill = "Vladimír Štill <xstill@fi.muni.cz>";
|
||||||
vmandela = "Venkateswara Rao Mandela <venkat.mandela@gmail.com>";
|
vmandela = "Venkateswara Rao Mandela <venkat.mandela@gmail.com>";
|
||||||
vmchale = "Vanessa McHale <tmchale@wisc.edu>";
|
vmchale = "Vanessa McHale <tmchale@wisc.edu>";
|
||||||
valeriangalliat = "Valérian Galliat <val@codejam.info>";
|
|
||||||
volhovm = "Mikhail Volkhov <volhovm.cs@gmail.com>";
|
volhovm = "Mikhail Volkhov <volhovm.cs@gmail.com>";
|
||||||
volth = "Jaroslavas Pocepko <jaroslavas@volth.com>";
|
volth = "Jaroslavas Pocepko <jaroslavas@volth.com>";
|
||||||
vozz = "Oliver Hunt <oliver.huntuk@gmail.com>";
|
vozz = "Oliver Hunt <oliver.huntuk@gmail.com>";
|
||||||
@ -647,6 +653,7 @@
|
|||||||
xvapx = "Marti Serra <marti.serra.coscollano@gmail.com>";
|
xvapx = "Marti Serra <marti.serra.coscollano@gmail.com>";
|
||||||
xwvvvvwx = "David Terry <davidterry@posteo.de>";
|
xwvvvvwx = "David Terry <davidterry@posteo.de>";
|
||||||
yarr = "Dmitry V. <savraz@gmail.com>";
|
yarr = "Dmitry V. <savraz@gmail.com>";
|
||||||
|
yegortimoshenko = "Yegor Timoshenko <yegortimoshenko@gmail.com>";
|
||||||
yochai = "Yochai <yochai@titat.info>";
|
yochai = "Yochai <yochai@titat.info>";
|
||||||
yorickvp = "Yorick van Pelt <yorickvanpelt@gmail.com>";
|
yorickvp = "Yorick van Pelt <yorickvanpelt@gmail.com>";
|
||||||
yuriaisaka = "Yuri Aisaka <yuri.aisaka+nix@gmail.com>";
|
yuriaisaka = "Yuri Aisaka <yuri.aisaka+nix@gmail.com>";
|
||||||
|
@ -9,17 +9,15 @@ GNOME_FTP=ftp.gnome.org/pub/GNOME/sources
|
|||||||
NO_GNOME_MAJOR="ghex gtkhtml gdm"
|
NO_GNOME_MAJOR="ghex gtkhtml gdm"
|
||||||
|
|
||||||
usage() {
|
usage() {
|
||||||
echo "Usage: $0 gnome_dir <show project>|<update project>|<update-all> [major.minor]" >&2
|
echo "Usage: $0 <show project>|<update project>|<update-all> [major.minor]" >&2
|
||||||
echo "gnome_dir is for example pkgs/desktops/gnome-3/3.18" >&2
|
|
||||||
exit 0
|
exit 0
|
||||||
}
|
}
|
||||||
|
|
||||||
if [ "$#" -lt 2 ]; then
|
if [ "$#" -lt 1 ]; then
|
||||||
usage
|
usage
|
||||||
fi
|
fi
|
||||||
|
|
||||||
GNOME_TOP=$1
|
GNOME_TOP=pkgs/desktops/gnome-3
|
||||||
shift
|
|
||||||
|
|
||||||
action=$1
|
action=$1
|
||||||
|
|
||||||
|
@ -13,10 +13,8 @@ from pyquery import PyQuery as pq
|
|||||||
|
|
||||||
|
|
||||||
maintainers_json = subprocess.check_output([
|
maintainers_json = subprocess.check_output([
|
||||||
'nix-instantiate',
|
'nix-instantiate', '-E', 'import ./lib/maintainers.nix {}', '--eval', '--json'
|
||||||
'lib/maintainers.nix',
|
])
|
||||||
'--eval',
|
|
||||||
'--json'])
|
|
||||||
maintainers = json.loads(maintainers_json)
|
maintainers = json.loads(maintainers_json)
|
||||||
MAINTAINERS = {v: k for k, v in maintainers.iteritems()}
|
MAINTAINERS = {v: k for k, v in maintainers.iteritems()}
|
||||||
|
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
<para>This section lists the release notes for each stable version of NixOS
|
<para>This section lists the release notes for each stable version of NixOS
|
||||||
and current unstable revision.</para>
|
and current unstable revision.</para>
|
||||||
|
|
||||||
|
<xi:include href="rl-1803.xml" />
|
||||||
<xi:include href="rl-1709.xml" />
|
<xi:include href="rl-1709.xml" />
|
||||||
<xi:include href="rl-1703.xml" />
|
<xi:include href="rl-1703.xml" />
|
||||||
<xi:include href="rl-1609.xml" />
|
<xi:include href="rl-1609.xml" />
|
||||||
|
@ -45,6 +45,33 @@ has the following highlights: </para>
|
|||||||
even though <literal>HDMI-0</literal> is the first head in the list.
|
even though <literal>HDMI-0</literal> is the first head in the list.
|
||||||
</para>
|
</para>
|
||||||
</listitem>
|
</listitem>
|
||||||
|
<listitem>
|
||||||
|
<para>
|
||||||
|
The handling of SSL in the nginx module has been cleaned up, renaming
|
||||||
|
the misnomed <literal>enableSSL</literal> to <literal>onlySSL</literal>
|
||||||
|
which reflects its original intention. This is not to be used with the
|
||||||
|
already existing <literal>forceSSL</literal> which creates a second
|
||||||
|
non-SSL virtual host redirecting to the SSL virtual host. This by
|
||||||
|
chance had worked earlier due to specific implementation details. In
|
||||||
|
case you had specified both please remove the <literal>enableSSL</literal>
|
||||||
|
option to keep the previous behaviour.
|
||||||
|
</para>
|
||||||
|
<para>
|
||||||
|
Another <literal>addSSL</literal> option has been introduced to configure
|
||||||
|
both a non-SSL virtual host and an SSL virtual host.
|
||||||
|
</para>
|
||||||
|
<para>
|
||||||
|
Options to configure <literal>resolver</literal>s and
|
||||||
|
<literal>upstream</literal>s have been introduced. See their information
|
||||||
|
for further details.
|
||||||
|
</para>
|
||||||
|
<para>
|
||||||
|
The <literal>port</literal> option has been replaced by a more generic
|
||||||
|
<literal>listen</literal> option which makes it possible to specify
|
||||||
|
multiple addresses, ports and SSL configs dependant on the new SSL
|
||||||
|
handling mentioned above.
|
||||||
|
</para>
|
||||||
|
</listitem>
|
||||||
</itemizedlist>
|
</itemizedlist>
|
||||||
|
|
||||||
<para>The following new services were added since the last release:</para>
|
<para>The following new services were added since the last release:</para>
|
||||||
@ -62,10 +89,17 @@ following incompatible changes:</para>
|
|||||||
<itemizedlist>
|
<itemizedlist>
|
||||||
<listitem>
|
<listitem>
|
||||||
<para>
|
<para>
|
||||||
<literal>aiccu</literal> package was removed. This is due to SixXS
|
The <literal>aiccu</literal> package was removed. This is due to SixXS
|
||||||
<link xlink:href="https://www.sixxs.net/main/"> sunsetting</link> its IPv6 tunnel.
|
<link xlink:href="https://www.sixxs.net/main/"> sunsetting</link> its IPv6 tunnel.
|
||||||
</para>
|
</para>
|
||||||
</listitem>
|
</listitem>
|
||||||
|
<listitem>
|
||||||
|
<para>
|
||||||
|
The <literal>fanctl</literal> package and <literal>fan</literal> module
|
||||||
|
have been removed due to the developers not upstreaming their iproute2
|
||||||
|
patches and lagging with compatibility to recent iproute2 versions.
|
||||||
|
</para>
|
||||||
|
</listitem>
|
||||||
<listitem>
|
<listitem>
|
||||||
<para>
|
<para>
|
||||||
Top-level <literal>idea</literal> package collection was renamed.
|
Top-level <literal>idea</literal> package collection was renamed.
|
||||||
@ -202,6 +236,112 @@ rmdir /var/lib/ipfs/.ipfs
|
|||||||
<command>gpgv</command>, etc.
|
<command>gpgv</command>, etc.
|
||||||
</para>
|
</para>
|
||||||
</listitem>
|
</listitem>
|
||||||
|
<listitem>
|
||||||
|
<para>
|
||||||
|
<literal>services.mysql</literal> now has declarative
|
||||||
|
configuration of databases and users with the <literal>ensureDatabases</literal> and
|
||||||
|
<literal>ensureUsers</literal> options.
|
||||||
|
</para>
|
||||||
|
|
||||||
|
<para>
|
||||||
|
These options will never delete existing databases and users,
|
||||||
|
especially not when the value of the options are changed.
|
||||||
|
</para>
|
||||||
|
|
||||||
|
<para>
|
||||||
|
The MySQL users will be identified using
|
||||||
|
<link xlink:href="https://mariadb.com/kb/en/library/authentication-plugin-unix-socket/">
|
||||||
|
Unix socket authentication</link>. This authenticates the
|
||||||
|
Unix user with the same name only, and that without the need
|
||||||
|
for a password.
|
||||||
|
</para>
|
||||||
|
|
||||||
|
<para>
|
||||||
|
If you have previously created a MySQL <literal>root</literal>
|
||||||
|
user <emphasis>with a password</emphasis>, you will need to add
|
||||||
|
<literal>root</literal> user for unix socket authentication
|
||||||
|
before using the new options. This can be done by running the
|
||||||
|
following SQL script:
|
||||||
|
|
||||||
|
<programlisting language="sql">
|
||||||
|
CREATE USER 'root'@'%' IDENTIFIED BY '';
|
||||||
|
GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;
|
||||||
|
FLUSH PRIVILEGES;
|
||||||
|
|
||||||
|
-- Optionally, delete the password-authenticated user:
|
||||||
|
-- DROP USER 'root'@'localhost';
|
||||||
|
</programlisting>
|
||||||
|
</para>
|
||||||
|
</listitem>
|
||||||
|
<listitem>
|
||||||
|
<para>
|
||||||
|
<literal>sha256</literal> argument value of
|
||||||
|
<literal>dockerTools.pullImage</literal> expression must be
|
||||||
|
updated since the mechanism to download the image has been
|
||||||
|
changed. Skopeo is now used to pull the image instead of the
|
||||||
|
Docker daemon.
|
||||||
|
</para>
|
||||||
|
</listitem>
|
||||||
|
<listitem>
|
||||||
|
<para>
|
||||||
|
Templated systemd services e.g <literal>container@name</literal> are
|
||||||
|
now handled currectly when switching to a new configuration, resulting
|
||||||
|
in them being reloaded.
|
||||||
|
</para>
|
||||||
|
</listitem>
|
||||||
|
|
||||||
|
<listitem>
|
||||||
|
<para>
|
||||||
|
<literal>services.mysqlBackup</literal> now works by default
|
||||||
|
without any user setup, including for users other than
|
||||||
|
<literal>mysql</literal>.
|
||||||
|
</para>
|
||||||
|
|
||||||
|
<para>
|
||||||
|
By default, the <literal>mysql</literal> user is no longer the
|
||||||
|
user which performs the backup. Instead a system account
|
||||||
|
<literal>mysqlbackup</literal> is used.
|
||||||
|
</para>
|
||||||
|
|
||||||
|
<para>
|
||||||
|
The <literal>mysqlBackup</literal> service is also now using
|
||||||
|
systemd timers instead of <literal>cron</literal>.
|
||||||
|
</para>
|
||||||
|
|
||||||
|
<para>
|
||||||
|
Therefore, the <literal>services.mysqlBackup.period</literal>
|
||||||
|
option no longer exists, and has been replaced with
|
||||||
|
<literal>services.mysqlBackup.calendar</literal>, which is in
|
||||||
|
the format of <link
|
||||||
|
xlink:href="https://www.freedesktop.org/software/systemd/man/systemd.time.html#Calendar%20Events">systemd.time(7)</link>.
|
||||||
|
</para>
|
||||||
|
|
||||||
|
<para>
|
||||||
|
If you expect to be sent an e-mail when the backup fails,
|
||||||
|
consider using a script which monitors the systemd journal for
|
||||||
|
errors. Regretfully, at present there is no built-in
|
||||||
|
functionality for this.
|
||||||
|
</para>
|
||||||
|
|
||||||
|
<para>
|
||||||
|
You can check that backups still work by running
|
||||||
|
<command>systemctl start mysql-backup</command> then
|
||||||
|
<command>systemctl status mysql-backup</command>.
|
||||||
|
</para>
|
||||||
|
</listitem>
|
||||||
|
|
||||||
|
<listitem>
|
||||||
|
<para>Steam: the <literal>newStdcpp</literal> parameter
|
||||||
|
was removed and should not be needed anymore.</para>
|
||||||
|
</listitem>
|
||||||
|
|
||||||
|
<listitem>
|
||||||
|
<para>
|
||||||
|
Redis has been updated to version 4 which mandates a cluster
|
||||||
|
mass-restart, due to changes in the network handling, in order
|
||||||
|
to ensure compatibility with networks NATing traffic.
|
||||||
|
</para>
|
||||||
|
</listitem>
|
||||||
</itemizedlist>
|
</itemizedlist>
|
||||||
|
|
||||||
<para>Other notable improvements:</para>
|
<para>Other notable improvements:</para>
|
||||||
@ -257,11 +397,55 @@ rmdir /var/lib/ipfs/.ipfs
|
|||||||
</listitem>
|
</listitem>
|
||||||
<listitem>
|
<listitem>
|
||||||
<para>
|
<para>
|
||||||
<literal>sha256</literal> argument value of
|
Definitions for <filename>/etc/hosts</filename> can now be specified
|
||||||
<literal>dockerTools.pullImage</literal> expression must be
|
declaratively with <literal>networking.hosts</literal>.
|
||||||
updated since the mechanism to download the image has been
|
</para>
|
||||||
changed. Skopeo is now used to pull the image instead of the
|
</listitem>
|
||||||
Docker daemon.
|
<listitem>
|
||||||
|
<para>
|
||||||
|
Two new options have been added to the installer loader, in addition
|
||||||
|
to the default having changed. The kernel log verbosity has been lowered
|
||||||
|
to the upstream default for the default options, in order to not spam
|
||||||
|
the console when e.g. joining a network.
|
||||||
|
</para>
|
||||||
|
<para>
|
||||||
|
This therefore leads to adding a new <literal>debug</literal> option
|
||||||
|
to set the log level to the previous verbose mode, to make debugging
|
||||||
|
easier, but still accessible easily.
|
||||||
|
</para>
|
||||||
|
<para>
|
||||||
|
Additionally a <literal>copytoram</literal> option has been added,
|
||||||
|
which makes it possible to remove the install medium after booting.
|
||||||
|
This allows tethering from your phone after booting from it.
|
||||||
|
</para>
|
||||||
|
<para>
|
||||||
|
<literal>services.gitlab-runner.configOptions</literal> has been added
|
||||||
|
to specify the configuration of gitlab-runners declaratively.
|
||||||
|
</para>
|
||||||
|
<para>
|
||||||
|
<literal>services.jenkins.plugins</literal> has been added
|
||||||
|
to install plugins easily, this can be generated with jenkinsPlugins2nix.
|
||||||
|
</para>
|
||||||
|
<para>
|
||||||
|
<literal>services.postfix.config</literal> has been added
|
||||||
|
to specify the main.cf with NixOS options. Additionally other options
|
||||||
|
have been added to the postfix module and has been improved further.
|
||||||
|
</para>
|
||||||
|
<para>
|
||||||
|
The GitLab package and module have been updated to the latest 9.5 release.
|
||||||
|
</para>
|
||||||
|
<para>
|
||||||
|
The <literal>systemd-boot</literal> boot loader now lists the NixOS
|
||||||
|
version, kernel version and build date of all bootable generations.
|
||||||
|
</para>
|
||||||
|
</listitem>
|
||||||
|
<listitem>
|
||||||
|
<para>
|
||||||
|
The dnscrypt-proxy service now defaults to using a random upstream resolver,
|
||||||
|
selected from the list of public non-logging resolvers with DNSSEC support.
|
||||||
|
Existing configurations can be migrated to this mode of operation by
|
||||||
|
omitting the <option>services.dnscrypt-proxy.resolverName</option> option
|
||||||
|
or setting it to <literal>"random"</literal>.
|
||||||
</para>
|
</para>
|
||||||
</listitem>
|
</listitem>
|
||||||
|
|
||||||
|
@ -29,8 +29,7 @@ following incompatible changes:</para>
|
|||||||
|
|
||||||
<itemizedlist>
|
<itemizedlist>
|
||||||
<listitem>
|
<listitem>
|
||||||
<para>
|
<para></para>
|
||||||
</para>
|
|
||||||
</listitem>
|
</listitem>
|
||||||
</itemizedlist>
|
</itemizedlist>
|
||||||
|
|
||||||
|
@ -77,7 +77,6 @@ let
|
|||||||
excludedOptions = [
|
excludedOptions = [
|
||||||
"boot.systemd.services"
|
"boot.systemd.services"
|
||||||
"systemd.services"
|
"systemd.services"
|
||||||
"environment.gnome3.packageSet"
|
|
||||||
"kde.extraPackages"
|
"kde.extraPackages"
|
||||||
];
|
];
|
||||||
excludeOptions = list:
|
excludeOptions = list:
|
||||||
|
@ -9,9 +9,7 @@ let
|
|||||||
cfg = config.networking;
|
cfg = config.networking;
|
||||||
dnsmasqResolve = config.services.dnsmasq.enable &&
|
dnsmasqResolve = config.services.dnsmasq.enable &&
|
||||||
config.services.dnsmasq.resolveLocalQueries;
|
config.services.dnsmasq.resolveLocalQueries;
|
||||||
bindResolve = config.services.bind.enable &&
|
hasLocalResolver = config.services.bind.enable || dnsmasqResolve;
|
||||||
config.services.bind.resolveLocalQueries;
|
|
||||||
hasLocalResolver = bindResolve || dnsmasqResolve;
|
|
||||||
|
|
||||||
resolvconfOptions = cfg.resolvconfOptions
|
resolvconfOptions = cfg.resolvconfOptions
|
||||||
++ optional cfg.dnsSingleRequest "single-request"
|
++ optional cfg.dnsSingleRequest "single-request"
|
||||||
|
@ -40,6 +40,12 @@ in
|
|||||||
{
|
{
|
||||||
|
|
||||||
config = mkIf enabled {
|
config = mkIf enabled {
|
||||||
|
assertions = [
|
||||||
|
{
|
||||||
|
assertion = services.xserver.displayManager.gdm.wayland;
|
||||||
|
message = "NVidia drivers don't support wayland";
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
services.xserver.drivers = singleton
|
services.xserver.drivers = singleton
|
||||||
{ name = "nvidia"; modules = [ nvidia_x11.bin ]; libPath = [ nvidia_x11 ]; };
|
{ name = "nvidia"; modules = [ nvidia_x11.bin ]; libPath = [ nvidia_x11 ]; };
|
||||||
@ -62,11 +68,16 @@ in
|
|||||||
boot.extraModulePackages = [ nvidia_x11.bin ];
|
boot.extraModulePackages = [ nvidia_x11.bin ];
|
||||||
|
|
||||||
# nvidia-uvm is required by CUDA applications.
|
# nvidia-uvm is required by CUDA applications.
|
||||||
boot.kernelModules = [ "nvidia-uvm" ];
|
boot.kernelModules = [ "nvidia-uvm" ] ++
|
||||||
|
lib.optionals config.services.xserver.enable [ "nvidia" "nvidia_modeset" "nvidia_drm" ];
|
||||||
|
|
||||||
|
|
||||||
# Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
|
# Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
|
||||||
services.udev.extraRules =
|
services.udev.extraRules =
|
||||||
''
|
''
|
||||||
|
KERNEL=="nvidia", RUN+="${pkgs.stdenv.shell} -c 'mknod -m 666 /dev/nvidiactl c $(grep nvidia-frontend /proc/devices | cut -d \ -f 1) 255'"
|
||||||
|
KERNEL=="nvidia_modeset", RUN+="${pkgs.stdenv.shell} -c 'mknod -m 666 /dev/nvidia-modeset c $(grep nvidia-frontend /proc/devices | cut -d \ -f 1) 254'"
|
||||||
|
KERNEL=="card*", SUBSYSTEM=="drm", DRIVERS=="nvidia", RUN+="${pkgs.stdenv.shell} -c 'mknod -m 666 /dev/nvidia%n c $(grep nvidia-frontend /proc/devices | cut -d \ -f 1) %n'"
|
||||||
KERNEL=="nvidia_uvm", RUN+="${pkgs.stdenv.shell} -c 'mknod -m 666 /dev/nvidia-uvm c $(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 0'"
|
KERNEL=="nvidia_uvm", RUN+="${pkgs.stdenv.shell} -c 'mknod -m 666 /dev/nvidia-uvm c $(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 0'"
|
||||||
'';
|
'';
|
||||||
|
|
||||||
|
@ -46,17 +46,24 @@ let
|
|||||||
|
|
||||||
# A variant to boot with 'nomodeset'
|
# A variant to boot with 'nomodeset'
|
||||||
LABEL boot-nomodeset
|
LABEL boot-nomodeset
|
||||||
MENU LABEL NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel} (with nomodeset)
|
MENU LABEL NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel} (nomodeset)
|
||||||
LINUX /boot/bzImage
|
LINUX /boot/bzImage
|
||||||
APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} nomodeset
|
APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} nomodeset
|
||||||
INITRD /boot/initrd
|
INITRD /boot/initrd
|
||||||
|
|
||||||
# A variant to boot with 'copytoram'
|
# A variant to boot with 'copytoram'
|
||||||
LABEL boot-copytoram
|
LABEL boot-copytoram
|
||||||
MENU LABEL NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel} (with copytoram)
|
MENU LABEL NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel} (copytoram)
|
||||||
LINUX /boot/bzImage
|
LINUX /boot/bzImage
|
||||||
APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} copytoram
|
APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} copytoram
|
||||||
INITRD /boot/initrd
|
INITRD /boot/initrd
|
||||||
|
|
||||||
|
# A variant to boot with verbose logging to the console
|
||||||
|
LABEL boot-nomodeset
|
||||||
|
MENU LABEL NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel} (debug)
|
||||||
|
LINUX /boot/bzImage
|
||||||
|
APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} loglevel=7
|
||||||
|
INITRD /boot/initrd
|
||||||
'';
|
'';
|
||||||
|
|
||||||
isolinuxMemtest86Entry = ''
|
isolinuxMemtest86Entry = ''
|
||||||
@ -74,25 +81,43 @@ let
|
|||||||
cp -v ${pkgs.systemd}/lib/systemd/boot/efi/systemd-boot${targetArch}.efi $out/EFI/boot/boot${targetArch}.efi
|
cp -v ${pkgs.systemd}/lib/systemd/boot/efi/systemd-boot${targetArch}.efi $out/EFI/boot/boot${targetArch}.efi
|
||||||
mkdir -p $out/loader/entries
|
mkdir -p $out/loader/entries
|
||||||
|
|
||||||
echo "title NixOS Live CD" > $out/loader/entries/nixos-livecd.conf
|
cat << EOF > $out/loader/entries/nixos-iso.conf
|
||||||
echo "linux /boot/bzImage" >> $out/loader/entries/nixos-livecd.conf
|
title NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel}
|
||||||
echo "initrd /boot/initrd" >> $out/loader/entries/nixos-livecd.conf
|
linux /boot/bzImage
|
||||||
echo "options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}" >> $out/loader/entries/nixos-livecd.conf
|
initrd /boot/initrd
|
||||||
|
options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}
|
||||||
|
EOF
|
||||||
|
|
||||||
# A variant to boot with 'nomodeset'
|
# A variant to boot with 'nomodeset'
|
||||||
echo "title NixOS Live CD (with nomodeset)" > $out/loader/entries/nixos-livecd-nomodeset.conf
|
cat << EOF > $out/loader/entries/nixos-iso-nomodeset.conf
|
||||||
echo "linux /boot/bzImage" >> $out/loader/entries/nixos-livecd-nomodeset.conf
|
title NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel}
|
||||||
echo "initrd /boot/initrd" >> $out/loader/entries/nixos-livecd-nomodeset.conf
|
version nomodeset
|
||||||
echo "options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} nomodeset" >> $out/loader/entries/nixos-livecd-nomodeset.conf
|
linux /boot/bzImage
|
||||||
|
initrd /boot/initrd
|
||||||
|
options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} nomodeset
|
||||||
|
EOF
|
||||||
|
|
||||||
# A variant to boot with 'copytoram'
|
# A variant to boot with 'copytoram'
|
||||||
echo "title NixOS Live CD (with copytoram)" > $out/loader/entries/nixos-livecd-copytoram.conf
|
cat << EOF > $out/loader/entries/nixos-iso-copytoram.conf
|
||||||
echo "linux /boot/bzImage" >> $out/loader/entries/nixos-livecd-copytoram.conf
|
title NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel}
|
||||||
echo "initrd /boot/initrd" >> $out/loader/entries/nixos-livecd-copytoram.conf
|
version copytoram
|
||||||
echo "options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} copytoram" >> $out/loader/entries/nixos-livecd-copytoram.conf
|
linux /boot/bzImage
|
||||||
|
initrd /boot/initrd
|
||||||
|
options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} copytoram
|
||||||
|
EOF
|
||||||
|
|
||||||
echo "default nixos-livecd" > $out/loader/loader.conf
|
# A variant to boot with verbose logging to the console
|
||||||
echo "timeout ${builtins.toString config.boot.loader.timeout}" >> $out/loader/loader.conf
|
cat << EOF > $out/loader/entries/nixos-iso-debug.conf
|
||||||
|
title NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel} (debug)
|
||||||
|
linux /boot/bzImage
|
||||||
|
initrd /boot/initrd
|
||||||
|
options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} loglevel=7
|
||||||
|
EOF
|
||||||
|
|
||||||
|
cat << EOF > $out/loader/loader.conf
|
||||||
|
default nixos-iso
|
||||||
|
timeout ${builtins.toString config.boot.loader.timeout}
|
||||||
|
EOF
|
||||||
'';
|
'';
|
||||||
|
|
||||||
efiImg = pkgs.runCommand "efi-image_eltorito" { buildInputs = [ pkgs.mtools pkgs.libfaketime ]; }
|
efiImg = pkgs.runCommand "efi-image_eltorito" { buildInputs = [ pkgs.mtools pkgs.libfaketime ]; }
|
||||||
@ -336,6 +361,9 @@ in
|
|||||||
{ source = config.isoImage.splashImage;
|
{ source = config.isoImage.splashImage;
|
||||||
target = "/isolinux/background.png";
|
target = "/isolinux/background.png";
|
||||||
}
|
}
|
||||||
|
{ source = pkgs.writeText "version" config.system.nixosVersion;
|
||||||
|
target = "/version.txt";
|
||||||
|
}
|
||||||
] ++ optionals config.isoImage.makeEfiBootable [
|
] ++ optionals config.isoImage.makeEfiBootable [
|
||||||
{ source = efiImg;
|
{ source = efiImg;
|
||||||
target = "/boot/efi.img";
|
target = "/boot/efi.img";
|
||||||
|
@ -583,9 +583,15 @@ $bootLoaderConfig
|
|||||||
# List packages installed in system profile. To search by name, run:
|
# List packages installed in system profile. To search by name, run:
|
||||||
# \$ nix-env -qaP | grep wget
|
# \$ nix-env -qaP | grep wget
|
||||||
# environment.systemPackages = with pkgs; [
|
# environment.systemPackages = with pkgs; [
|
||||||
# wget
|
# wget vim
|
||||||
# ];
|
# ];
|
||||||
|
|
||||||
|
# Some programs need SUID wrappers, can be configured further or are
|
||||||
|
# started in user sessions.
|
||||||
|
# programs.bash.enableCompletion = true;
|
||||||
|
# programs.mtr.enable = true;
|
||||||
|
# programs.gnupg.agent = { enable = true; enableSSHSupport = true; };
|
||||||
|
|
||||||
# List services that you want to enable:
|
# List services that you want to enable:
|
||||||
|
|
||||||
# Enable the OpenSSH daemon.
|
# Enable the OpenSSH daemon.
|
||||||
|
@ -102,7 +102,7 @@ fi
|
|||||||
extraBuildFlags+=(--option "build-users-group" "$buildUsersGroup")
|
extraBuildFlags+=(--option "build-users-group" "$buildUsersGroup")
|
||||||
|
|
||||||
# Inherit binary caches from the host
|
# Inherit binary caches from the host
|
||||||
# TODO: will this still work with Nix 1.12 now that it has no perl? Probably not...
|
# TODO: will this still work with Nix 1.12 now that it has no perl? Probably not...
|
||||||
binary_caches="$(@perl@/bin/perl -I @nix@/lib/perl5/site_perl/*/* -e 'use Nix::Config; Nix::Config::readConfig; print $Nix::Config::config{"binary-caches"};')"
|
binary_caches="$(@perl@/bin/perl -I @nix@/lib/perl5/site_perl/*/* -e 'use Nix::Config; Nix::Config::readConfig; print $Nix::Config::config{"binary-caches"};')"
|
||||||
extraBuildFlags+=(--option "binary-caches" "$binary_caches")
|
extraBuildFlags+=(--option "binary-caches" "$binary_caches")
|
||||||
|
|
||||||
@ -113,8 +113,33 @@ if [[ -z "$closure" ]]; then
|
|||||||
fi
|
fi
|
||||||
unset NIXOS_CONFIG
|
unset NIXOS_CONFIG
|
||||||
|
|
||||||
# TODO: do I need to set NIX_SUBSTITUTERS here or is the --option binary-caches above enough?
|
# These get created in nixos-prepare-root as well, but we want to make sure they're here in case we're
|
||||||
|
# running with --chroot. TODO: --chroot should just be split into a separate tool.
|
||||||
|
mkdir -m 0755 -p "$mountPoint/dev" "$mountPoint/proc" "$mountPoint/sys"
|
||||||
|
|
||||||
|
# Set up some bind mounts we'll want regardless of chroot or not
|
||||||
|
mount --rbind /dev "$mountPoint/dev"
|
||||||
|
mount --rbind /proc "$mountPoint/proc"
|
||||||
|
mount --rbind /sys "$mountPoint/sys"
|
||||||
|
|
||||||
|
# If we asked for a chroot, that means we're not actually installing anything (yeah I was confused too)
|
||||||
|
# and we just want to run a command in the context of a $mountPoint that we're assuming has already been
|
||||||
|
# set up by a previous nixos-install invocation. In that case we set up some remaining bind mounts and
|
||||||
|
# exec the requested command, skipping the rest of the installation procedure.
|
||||||
|
if [ -n "$runChroot" ]; then
|
||||||
|
mount -t tmpfs -o "mode=0755" none $mountPoint/run
|
||||||
|
rm -rf $mountPoint/var/run
|
||||||
|
ln -s /run $mountPoint/var/run
|
||||||
|
for f in /etc/resolv.conf /etc/hosts; do rm -f $mountPoint/$f; [ -f "$f" ] && cp -Lf $f $mountPoint/etc/; done
|
||||||
|
for f in /etc/passwd /etc/group; do touch $mountPoint/$f; [ -f "$f" ] && mount --rbind -o ro $f $mountPoint/$f; done
|
||||||
|
|
||||||
|
if ! [ -L $mountPoint/nix/var/nix/profiles/system ]; then
|
||||||
|
echo "$0: installation not finished; cannot chroot into installation directory"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
ln -s /nix/var/nix/profiles/system $mountPoint/run/current-system
|
||||||
|
exec chroot $mountPoint "${chrootCommand[@]}"
|
||||||
|
fi
|
||||||
|
|
||||||
# A place to drop temporary closures
|
# A place to drop temporary closures
|
||||||
trap "rm -rf $tmpdir" EXIT
|
trap "rm -rf $tmpdir" EXIT
|
||||||
@ -153,9 +178,7 @@ nix-store --export $channel_root > $channel_closure
|
|||||||
# nixos-prepare-root doesn't currently do anything with file ownership, so we set it up here instead
|
# nixos-prepare-root doesn't currently do anything with file ownership, so we set it up here instead
|
||||||
chown @root_uid@:@nixbld_gid@ $mountPoint/nix/store
|
chown @root_uid@:@nixbld_gid@ $mountPoint/nix/store
|
||||||
|
|
||||||
mount --rbind /dev $mountPoint/dev
|
|
||||||
mount --rbind /proc $mountPoint/proc
|
|
||||||
mount --rbind /sys $mountPoint/sys
|
|
||||||
|
|
||||||
# Grub needs an mtab.
|
# Grub needs an mtab.
|
||||||
ln -sfn /proc/mounts $mountPoint/etc/mtab
|
ln -sfn /proc/mounts $mountPoint/etc/mtab
|
||||||
|
@ -426,7 +426,7 @@
|
|||||||
teamspeak = 124;
|
teamspeak = 124;
|
||||||
influxdb = 125;
|
influxdb = 125;
|
||||||
nsd = 126;
|
nsd = 126;
|
||||||
#gitolite = 127; # unused
|
gitolite = 127;
|
||||||
znc = 128;
|
znc = 128;
|
||||||
polipo = 129;
|
polipo = 129;
|
||||||
mopidy = 130;
|
mopidy = 130;
|
||||||
|
@ -92,6 +92,7 @@
|
|||||||
./programs/mosh.nix
|
./programs/mosh.nix
|
||||||
./programs/mtr.nix
|
./programs/mtr.nix
|
||||||
./programs/nano.nix
|
./programs/nano.nix
|
||||||
|
./programs/npm.nix
|
||||||
./programs/oblogout.nix
|
./programs/oblogout.nix
|
||||||
./programs/qt5ct.nix
|
./programs/qt5ct.nix
|
||||||
./programs/screen.nix
|
./programs/screen.nix
|
||||||
@ -156,7 +157,9 @@
|
|||||||
./services/backup/tarsnap.nix
|
./services/backup/tarsnap.nix
|
||||||
./services/backup/znapzend.nix
|
./services/backup/znapzend.nix
|
||||||
./services/cluster/fleet.nix
|
./services/cluster/fleet.nix
|
||||||
./services/cluster/kubernetes.nix
|
./services/cluster/kubernetes/default.nix
|
||||||
|
./services/cluster/kubernetes/dns.nix
|
||||||
|
./services/cluster/kubernetes/dashboard.nix
|
||||||
./services/cluster/panamax.nix
|
./services/cluster/panamax.nix
|
||||||
./services/computing/boinc/client.nix
|
./services/computing/boinc/client.nix
|
||||||
./services/computing/torque/server.nix
|
./services/computing/torque/server.nix
|
||||||
@ -352,6 +355,7 @@
|
|||||||
./services/monitoring/collectd.nix
|
./services/monitoring/collectd.nix
|
||||||
./services/monitoring/das_watchdog.nix
|
./services/monitoring/das_watchdog.nix
|
||||||
./services/monitoring/dd-agent/dd-agent.nix
|
./services/monitoring/dd-agent/dd-agent.nix
|
||||||
|
./services/monitoring/fusion-inventory.nix
|
||||||
./services/monitoring/grafana.nix
|
./services/monitoring/grafana.nix
|
||||||
./services/monitoring/graphite.nix
|
./services/monitoring/graphite.nix
|
||||||
./services/monitoring/hdaps.nix
|
./services/monitoring/hdaps.nix
|
||||||
@ -423,12 +427,12 @@
|
|||||||
./services/networking/ddclient.nix
|
./services/networking/ddclient.nix
|
||||||
./services/networking/dhcpcd.nix
|
./services/networking/dhcpcd.nix
|
||||||
./services/networking/dhcpd.nix
|
./services/networking/dhcpd.nix
|
||||||
|
./services/networking/dnscache.nix
|
||||||
./services/networking/dnschain.nix
|
./services/networking/dnschain.nix
|
||||||
./services/networking/dnscrypt-proxy.nix
|
./services/networking/dnscrypt-proxy.nix
|
||||||
./services/networking/dnscrypt-wrapper.nix
|
./services/networking/dnscrypt-wrapper.nix
|
||||||
./services/networking/dnsmasq.nix
|
./services/networking/dnsmasq.nix
|
||||||
./services/networking/ejabberd.nix
|
./services/networking/ejabberd.nix
|
||||||
./services/networking/fan.nix
|
|
||||||
./services/networking/fakeroute.nix
|
./services/networking/fakeroute.nix
|
||||||
./services/networking/ferm.nix
|
./services/networking/ferm.nix
|
||||||
./services/networking/firefox/sync-server.nix
|
./services/networking/firefox/sync-server.nix
|
||||||
@ -524,6 +528,7 @@
|
|||||||
./services/networking/tcpcrypt.nix
|
./services/networking/tcpcrypt.nix
|
||||||
./services/networking/teamspeak3.nix
|
./services/networking/teamspeak3.nix
|
||||||
./services/networking/tinc.nix
|
./services/networking/tinc.nix
|
||||||
|
./services/networking/tinydns.nix
|
||||||
./services/networking/tftpd.nix
|
./services/networking/tftpd.nix
|
||||||
./services/networking/tox-bootstrapd.nix
|
./services/networking/tox-bootstrapd.nix
|
||||||
./services/networking/toxvpn.nix
|
./services/networking/toxvpn.nix
|
||||||
|
@ -77,7 +77,6 @@ with lib;
|
|||||||
# Show all debug messages from the kernel but don't log refused packets
|
# Show all debug messages from the kernel but don't log refused packets
|
||||||
# because we have the firewall enabled. This makes installs from the
|
# because we have the firewall enabled. This makes installs from the
|
||||||
# console less cumbersome if the machine has a public IP.
|
# console less cumbersome if the machine has a public IP.
|
||||||
boot.consoleLogLevel = mkDefault 7;
|
|
||||||
networking.firewall.logRefusedConnections = mkDefault false;
|
networking.firewall.logRefusedConnections = mkDefault false;
|
||||||
|
|
||||||
environment.systemPackages = [ pkgs.vim ];
|
environment.systemPackages = [ pkgs.vim ];
|
||||||
|
44
nixos/modules/programs/npm.nix
Normal file
44
nixos/modules/programs/npm.nix
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
{ config, lib, ... }:
|
||||||
|
|
||||||
|
with lib;
|
||||||
|
|
||||||
|
let
|
||||||
|
cfg = config.programs.npm;
|
||||||
|
in
|
||||||
|
|
||||||
|
{
|
||||||
|
###### interface
|
||||||
|
|
||||||
|
options = {
|
||||||
|
programs.npm = {
|
||||||
|
enable = mkEnableOption "<command>npm</command> global config";
|
||||||
|
|
||||||
|
npmrc = lib.mkOption {
|
||||||
|
type = lib.types.lines;
|
||||||
|
description = ''
|
||||||
|
The system-wide npm configuration.
|
||||||
|
See <link xlink:href="https://docs.npmjs.com/misc/config"/>.
|
||||||
|
'';
|
||||||
|
default = ''
|
||||||
|
prefix = ''${HOME}/.npm
|
||||||
|
'';
|
||||||
|
example = ''
|
||||||
|
prefix = ''${HOME}/.npm
|
||||||
|
https-proxy=proxy.example.com
|
||||||
|
init-license=MIT
|
||||||
|
init-author-url=http://npmjs.org
|
||||||
|
color=true
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
###### implementation
|
||||||
|
|
||||||
|
config = lib.mkIf cfg.enable {
|
||||||
|
environment.etc."npmrc".text = cfg.npmrc;
|
||||||
|
|
||||||
|
environment.variables.NPM_CONFIG_GLOBALCONFIG = "/etc/npmrc";
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
@ -17,19 +17,27 @@ with lib;
|
|||||||
};
|
};
|
||||||
|
|
||||||
config = mkIf config.security.lockKernelModules {
|
config = mkIf config.security.lockKernelModules {
|
||||||
|
boot.kernelModules = concatMap (x:
|
||||||
|
if x.device != null
|
||||||
|
then
|
||||||
|
if x.fsType == "vfat"
|
||||||
|
then [ "vfat" "nls-cp437" "nls-iso8859-1" ]
|
||||||
|
else [ x.fsType ]
|
||||||
|
else []) config.system.build.fileSystems;
|
||||||
|
|
||||||
systemd.services.disable-kernel-module-loading = rec {
|
systemd.services.disable-kernel-module-loading = rec {
|
||||||
description = "Disable kernel module loading";
|
description = "Disable kernel module loading";
|
||||||
|
|
||||||
wantedBy = [ config.systemd.defaultUnit ];
|
wantedBy = [ config.systemd.defaultUnit ];
|
||||||
after = [ "systemd-udev-settle.service" "firewall.service" "systemd-modules-load.service" ] ++ wantedBy;
|
|
||||||
|
|
||||||
script = "echo -n 1 > /proc/sys/kernel/modules_disabled";
|
after = [ "systemd-udev-settle.service" "firewall.service" "systemd-modules-load.service" ] ++ wantedBy;
|
||||||
|
|
||||||
unitConfig.ConditionPathIsReadWrite = "/proc/sys/kernel";
|
unitConfig.ConditionPathIsReadWrite = "/proc/sys/kernel";
|
||||||
|
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
Type = "oneshot";
|
Type = "oneshot";
|
||||||
RemainAfterExit = true;
|
RemainAfterExit = true;
|
||||||
|
ExecStart = "/bin/sh -c 'echo -n 1 >/proc/sys/kernel/modules_disabled'";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
@ -105,7 +105,8 @@ in {
|
|||||||
RABBITMQ_MNESIA_BASE = "${cfg.dataDir}/mnesia";
|
RABBITMQ_MNESIA_BASE = "${cfg.dataDir}/mnesia";
|
||||||
RABBITMQ_NODE_IP_ADDRESS = cfg.listenAddress;
|
RABBITMQ_NODE_IP_ADDRESS = cfg.listenAddress;
|
||||||
RABBITMQ_NODE_PORT = toString cfg.port;
|
RABBITMQ_NODE_PORT = toString cfg.port;
|
||||||
RABBITMQ_SERVER_START_ARGS = "-rabbit error_logger tty -rabbit sasl_error_logger false";
|
RABBITMQ_LOGS = "-";
|
||||||
|
RABBITMQ_SASL_LOGS = "-";
|
||||||
RABBITMQ_PID_FILE = "${cfg.dataDir}/pid";
|
RABBITMQ_PID_FILE = "${cfg.dataDir}/pid";
|
||||||
SYS_PREFIX = "";
|
SYS_PREFIX = "";
|
||||||
RABBITMQ_ENABLED_PLUGINS_FILE = pkgs.writeText "enabled_plugins" ''
|
RABBITMQ_ENABLED_PLUGINS_FILE = pkgs.writeText "enabled_plugins" ''
|
||||||
@ -128,7 +129,7 @@ in {
|
|||||||
preStart = ''
|
preStart = ''
|
||||||
${optionalString (cfg.cookie != "") ''
|
${optionalString (cfg.cookie != "") ''
|
||||||
echo -n ${cfg.cookie} > ${cfg.dataDir}/.erlang.cookie
|
echo -n ${cfg.cookie} > ${cfg.dataDir}/.erlang.cookie
|
||||||
chmod 400 ${cfg.dataDir}/.erlang.cookie
|
chmod 600 ${cfg.dataDir}/.erlang.cookie
|
||||||
''}
|
''}
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
@ -6,10 +6,28 @@ let
|
|||||||
|
|
||||||
inherit (pkgs) mysql gzip;
|
inherit (pkgs) mysql gzip;
|
||||||
|
|
||||||
cfg = config.services.mysqlBackup ;
|
cfg = config.services.mysqlBackup;
|
||||||
location = cfg.location ;
|
defaultUser = "mysqlbackup";
|
||||||
mysqlBackupCron = db : ''
|
|
||||||
${cfg.period} ${cfg.user} ${mysql}/bin/mysqldump ${if cfg.singleTransaction then "--single-transaction" else ""} ${db} | ${gzip}/bin/gzip -c > ${location}/${db}.gz
|
backupScript = ''
|
||||||
|
set -o pipefail
|
||||||
|
failed=""
|
||||||
|
${concatMapStringsSep "\n" backupDatabaseScript cfg.databases}
|
||||||
|
if [ -n "$failed" ]; then
|
||||||
|
echo "Backup of database(s) failed:$failed"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
'';
|
||||||
|
backupDatabaseScript = db: ''
|
||||||
|
dest="${cfg.location}/${db}.gz"
|
||||||
|
if ${mysql}/bin/mysqldump ${if cfg.singleTransaction then "--single-transaction" else ""} ${db} | ${gzip}/bin/gzip -c > $dest.tmp; then
|
||||||
|
mv $dest.tmp $dest
|
||||||
|
echo "Backed up to $dest"
|
||||||
|
else
|
||||||
|
echo "Failed to back up to $dest"
|
||||||
|
rm -f $dest.tmp
|
||||||
|
failed="$failed ${db}"
|
||||||
|
fi
|
||||||
'';
|
'';
|
||||||
|
|
||||||
in
|
in
|
||||||
@ -26,17 +44,16 @@ in
|
|||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
period = mkOption {
|
calendar = mkOption {
|
||||||
default = "15 01 * * *";
|
type = types.str;
|
||||||
|
default = "01:15:00";
|
||||||
description = ''
|
description = ''
|
||||||
This option defines (in the format used by cron) when the
|
Configured when to run the backup service systemd unit (DayOfWeek Year-Month-Day Hour:Minute:Second).
|
||||||
databases should be dumped.
|
|
||||||
The default is to update at 01:15 (at night) every day.
|
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
user = mkOption {
|
user = mkOption {
|
||||||
default = "mysql";
|
default = defaultUser;
|
||||||
description = ''
|
description = ''
|
||||||
User to be used to perform backup.
|
User to be used to perform backup.
|
||||||
'';
|
'';
|
||||||
@ -66,16 +83,49 @@ in
|
|||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
config = mkIf config.services.mysqlBackup.enable {
|
config = mkIf cfg.enable {
|
||||||
|
users.extraUsers = optionalAttrs (cfg.user == defaultUser) (singleton
|
||||||
|
{ name = defaultUser;
|
||||||
|
isSystemUser = true;
|
||||||
|
createHome = false;
|
||||||
|
home = cfg.location;
|
||||||
|
group = "nogroup";
|
||||||
|
});
|
||||||
|
|
||||||
services.cron.systemCronJobs = map mysqlBackupCron config.services.mysqlBackup.databases;
|
services.mysql.ensureUsers = [{
|
||||||
|
name = cfg.user;
|
||||||
system.activationScripts.mysqlBackup = stringAfter [ "stdio" "users" ]
|
ensurePermissions = with lib;
|
||||||
''
|
let
|
||||||
mkdir -m 0700 -p ${config.services.mysqlBackup.location}
|
privs = "SELECT, SHOW VIEW, TRIGGER, LOCK TABLES";
|
||||||
chown ${config.services.mysqlBackup.user} ${config.services.mysqlBackup.location}
|
grant = db: nameValuePair "${db}.*" privs;
|
||||||
'';
|
in
|
||||||
|
listToAttrs (map grant cfg.databases);
|
||||||
|
}];
|
||||||
|
|
||||||
|
systemd = {
|
||||||
|
timers."mysql-backup" = {
|
||||||
|
description = "Mysql backup timer";
|
||||||
|
wantedBy = [ "timers.target" ];
|
||||||
|
timerConfig = {
|
||||||
|
OnCalendar = cfg.calendar;
|
||||||
|
AccuracySec = "5m";
|
||||||
|
Unit = "mysql-backup.service";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
services."mysql-backup" = {
|
||||||
|
description = "Mysql backup service";
|
||||||
|
enable = true;
|
||||||
|
serviceConfig = {
|
||||||
|
User = cfg.user;
|
||||||
|
PermissionsStartOnly = true;
|
||||||
|
};
|
||||||
|
preStart = ''
|
||||||
|
mkdir -m 0700 -p ${cfg.location}
|
||||||
|
chown -R ${cfg.user} ${cfg.location}
|
||||||
|
'';
|
||||||
|
script = backupScript;
|
||||||
|
};
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
160
nixos/modules/services/cluster/kubernetes/dashboard.nix
Normal file
160
nixos/modules/services/cluster/kubernetes/dashboard.nix
Normal file
@ -0,0 +1,160 @@
|
|||||||
|
{ config, pkgs, lib, ... }:
|
||||||
|
|
||||||
|
with lib;
|
||||||
|
|
||||||
|
let
|
||||||
|
cfg = config.services.kubernetes.addons.dashboard;
|
||||||
|
|
||||||
|
name = "gcr.io/google_containers/kubernetes-dashboard-amd64";
|
||||||
|
version = "v1.6.3";
|
||||||
|
|
||||||
|
image = pkgs.dockerTools.pullImage {
|
||||||
|
imageName = name;
|
||||||
|
imageTag = version;
|
||||||
|
sha256 = "0b5v7xa3s91yi9yfsw2b8wijiprnicbb02f5kqa579h4yndb3gfz";
|
||||||
|
};
|
||||||
|
in {
|
||||||
|
options.services.kubernetes.addons.dashboard = {
|
||||||
|
enable = mkEnableOption "kubernetes dashboard addon";
|
||||||
|
|
||||||
|
enableRBAC = mkOption {
|
||||||
|
description = "Whether to enable role based access control is enabled for kubernetes dashboard";
|
||||||
|
type = types.bool;
|
||||||
|
default = elem "RBAC" config.services.kubernetes.apiserver.authorizationMode;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = mkIf cfg.enable {
|
||||||
|
services.kubernetes.kubelet.seedDockerImages = [image];
|
||||||
|
|
||||||
|
services.kubernetes.addonManager.addons = {
|
||||||
|
kubernetes-dashboard-deployment = {
|
||||||
|
kind = "Deployment";
|
||||||
|
apiVersion = "apps/v1beta1";
|
||||||
|
metadata = {
|
||||||
|
labels = {
|
||||||
|
k8s-addon = "kubernetes-dashboard.addons.k8s.io";
|
||||||
|
k8s-app = "kubernetes-dashboard";
|
||||||
|
version = version;
|
||||||
|
"kubernetes.io/cluster-service" = "true";
|
||||||
|
"addonmanager.kubernetes.io/mode" = "Reconcile";
|
||||||
|
};
|
||||||
|
name = "kubernetes-dashboard";
|
||||||
|
namespace = "kube-system";
|
||||||
|
};
|
||||||
|
spec = {
|
||||||
|
replicas = 1;
|
||||||
|
revisionHistoryLimit = 10;
|
||||||
|
selector.matchLabels."k8s-app" = "kubernetes-dashboard";
|
||||||
|
template = {
|
||||||
|
metadata = {
|
||||||
|
labels = {
|
||||||
|
k8s-addon = "kubernetes-dashboard.addons.k8s.io";
|
||||||
|
k8s-app = "kubernetes-dashboard";
|
||||||
|
version = version;
|
||||||
|
"kubernetes.io/cluster-service" = "true";
|
||||||
|
};
|
||||||
|
annotations = {
|
||||||
|
"scheduler.alpha.kubernetes.io/critical-pod" = "";
|
||||||
|
#"scheduler.alpha.kubernetes.io/tolerations" = ''[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
spec = {
|
||||||
|
containers = [{
|
||||||
|
name = "kubernetes-dashboard";
|
||||||
|
image = "${name}:${version}";
|
||||||
|
ports = [{
|
||||||
|
containerPort = 9090;
|
||||||
|
protocol = "TCP";
|
||||||
|
}];
|
||||||
|
resources = {
|
||||||
|
limits = {
|
||||||
|
cpu = "100m";
|
||||||
|
memory = "50Mi";
|
||||||
|
};
|
||||||
|
requests = {
|
||||||
|
cpu = "100m";
|
||||||
|
memory = "50Mi";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
livenessProbe = {
|
||||||
|
httpGet = {
|
||||||
|
path = "/";
|
||||||
|
port = 9090;
|
||||||
|
};
|
||||||
|
initialDelaySeconds = 30;
|
||||||
|
timeoutSeconds = 30;
|
||||||
|
};
|
||||||
|
}];
|
||||||
|
serviceAccountName = "kubernetes-dashboard";
|
||||||
|
tolerations = [{
|
||||||
|
key = "node-role.kubernetes.io/master";
|
||||||
|
effect = "NoSchedule";
|
||||||
|
}];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
kubernetes-dashboard-svc = {
|
||||||
|
apiVersion = "v1";
|
||||||
|
kind = "Service";
|
||||||
|
metadata = {
|
||||||
|
labels = {
|
||||||
|
k8s-addon = "kubernetes-dashboard.addons.k8s.io";
|
||||||
|
k8s-app = "kubernetes-dashboard";
|
||||||
|
"kubernetes.io/cluster-service" = "true";
|
||||||
|
"kubernetes.io/name" = "KubeDashboard";
|
||||||
|
"addonmanager.kubernetes.io/mode" = "Reconcile";
|
||||||
|
};
|
||||||
|
name = "kubernetes-dashboard";
|
||||||
|
namespace = "kube-system";
|
||||||
|
};
|
||||||
|
spec = {
|
||||||
|
ports = [{
|
||||||
|
port = 80;
|
||||||
|
targetPort = 9090;
|
||||||
|
}];
|
||||||
|
selector.k8s-app = "kubernetes-dashboard";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
kubernetes-dashboard-sa = {
|
||||||
|
apiVersion = "v1";
|
||||||
|
kind = "ServiceAccount";
|
||||||
|
metadata = {
|
||||||
|
labels = {
|
||||||
|
k8s-app = "kubernetes-dashboard";
|
||||||
|
k8s-addon = "kubernetes-dashboard.addons.k8s.io";
|
||||||
|
"addonmanager.kubernetes.io/mode" = "Reconcile";
|
||||||
|
};
|
||||||
|
name = "kubernetes-dashboard";
|
||||||
|
namespace = "kube-system";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
} // (optionalAttrs cfg.enableRBAC {
|
||||||
|
kubernetes-dashboard-crb = {
|
||||||
|
apiVersion = "rbac.authorization.k8s.io/v1beta1";
|
||||||
|
kind = "ClusterRoleBinding";
|
||||||
|
metadata = {
|
||||||
|
name = "kubernetes-dashboard";
|
||||||
|
labels = {
|
||||||
|
k8s-app = "kubernetes-dashboard";
|
||||||
|
k8s-addon = "kubernetes-dashboard.addons.k8s.io";
|
||||||
|
"addonmanager.kubernetes.io/mode" = "Reconcile";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
roleRef = {
|
||||||
|
apiGroup = "rbac.authorization.k8s.io";
|
||||||
|
kind = "ClusterRole";
|
||||||
|
name = "cluster-admin";
|
||||||
|
};
|
||||||
|
subjects = [{
|
||||||
|
kind = "ServiceAccount";
|
||||||
|
name = "kubernetes-dashboard";
|
||||||
|
namespace = "kube-system";
|
||||||
|
}];
|
||||||
|
};
|
||||||
|
});
|
||||||
|
};
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
311
nixos/modules/services/cluster/kubernetes/dns.nix
Normal file
311
nixos/modules/services/cluster/kubernetes/dns.nix
Normal file
@ -0,0 +1,311 @@
|
|||||||
|
{ config, pkgs, lib, ... }:
|
||||||
|
|
||||||
|
with lib;
|
||||||
|
|
||||||
|
let
|
||||||
|
version = "1.14.4";
|
||||||
|
|
||||||
|
k8s-dns-kube-dns = pkgs.dockerTools.pullImage {
|
||||||
|
imageName = "gcr.io/google_containers/k8s-dns-kube-dns-amd64";
|
||||||
|
imageTag = version;
|
||||||
|
sha256 = "0g64jc2076ng28xl4w3w9svf7hc6s9h8rq9mhvvwpfy2p6lgj6gy";
|
||||||
|
};
|
||||||
|
|
||||||
|
k8s-dns-dnsmasq-nanny = pkgs.dockerTools.pullImage {
|
||||||
|
imageName = "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64";
|
||||||
|
imageTag = version;
|
||||||
|
sha256 = "0sdpsbj1vismihy7ass1cn96nwmav6sf3r5h6i4k2dxha0y0jsh5";
|
||||||
|
};
|
||||||
|
|
||||||
|
k8s-dns-sidecar = pkgs.dockerTools.pullImage {
|
||||||
|
imageName = "gcr.io/google_containers/k8s-dns-sidecar-amd64";
|
||||||
|
imageTag = version;
|
||||||
|
sha256 = "01zpi189hpy2z62awl38fap908s8rrhc3v5gb6m90y2pycl4ad6q";
|
||||||
|
};
|
||||||
|
|
||||||
|
cfg = config.services.kubernetes.addons.dns;
|
||||||
|
in {
|
||||||
|
options.services.kubernetes.addons.dns = {
|
||||||
|
enable = mkEnableOption "kubernetes dns addon";
|
||||||
|
|
||||||
|
clusterIp = mkOption {
|
||||||
|
description = "Dns addon clusterIP";
|
||||||
|
|
||||||
|
# this default is also what kubernetes users
|
||||||
|
default = (
|
||||||
|
concatStringsSep "." (
|
||||||
|
take 3 (splitString "." config.services.kubernetes.apiserver.serviceClusterIpRange
|
||||||
|
))
|
||||||
|
) + ".254";
|
||||||
|
type = types.str;
|
||||||
|
};
|
||||||
|
|
||||||
|
clusterDomain = mkOption {
|
||||||
|
description = "Dns cluster domain";
|
||||||
|
default = "cluster.local";
|
||||||
|
type = types.str;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = mkIf cfg.enable {
|
||||||
|
services.kubernetes.kubelet.seedDockerImages = [
|
||||||
|
k8s-dns-kube-dns
|
||||||
|
k8s-dns-dnsmasq-nanny
|
||||||
|
k8s-dns-sidecar
|
||||||
|
];
|
||||||
|
|
||||||
|
services.kubernetes.addonManager.addons = {
|
||||||
|
kubedns-deployment = {
|
||||||
|
apiVersion = "apps/v1beta1";
|
||||||
|
kind = "Deployment";
|
||||||
|
metadata = {
|
||||||
|
labels = {
|
||||||
|
"addonmanager.kubernetes.io/mode" = "Reconcile";
|
||||||
|
"k8s-app" = "kube-dns";
|
||||||
|
"kubernetes.io/cluster-service" = "true";
|
||||||
|
};
|
||||||
|
name = "kube-dns";
|
||||||
|
namespace = "kube-system";
|
||||||
|
};
|
||||||
|
spec = {
|
||||||
|
selector.matchLabels."k8s-app" = "kube-dns";
|
||||||
|
strategy = {
|
||||||
|
rollingUpdate = {
|
||||||
|
maxSurge = "10%";
|
||||||
|
maxUnavailable = 0;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
template = {
|
||||||
|
metadata = {
|
||||||
|
annotations."scheduler.alpha.kubernetes.io/critical-pod" = "";
|
||||||
|
labels.k8s-app = "kube-dns";
|
||||||
|
};
|
||||||
|
spec = {
|
||||||
|
containers = [
|
||||||
|
{
|
||||||
|
name = "kubedns";
|
||||||
|
args = [
|
||||||
|
"--domain=${cfg.clusterDomain}"
|
||||||
|
"--dns-port=10053"
|
||||||
|
"--config-dir=/kube-dns-config"
|
||||||
|
"--v=2"
|
||||||
|
];
|
||||||
|
env = [
|
||||||
|
{
|
||||||
|
name = "PROMETHEUS_PORT";
|
||||||
|
value = "10055";
|
||||||
|
}
|
||||||
|
];
|
||||||
|
image = "gcr.io/google_containers/k8s-dns-kube-dns-amd64:${version}";
|
||||||
|
livenessProbe = {
|
||||||
|
failureThreshold = 5;
|
||||||
|
httpGet = {
|
||||||
|
path = "/healthcheck/kubedns";
|
||||||
|
port = 10054;
|
||||||
|
scheme = "HTTP";
|
||||||
|
};
|
||||||
|
initialDelaySeconds = 60;
|
||||||
|
successThreshold = 1;
|
||||||
|
timeoutSeconds = 5;
|
||||||
|
};
|
||||||
|
ports = [
|
||||||
|
{
|
||||||
|
containerPort = 10053;
|
||||||
|
name = "dns-local";
|
||||||
|
protocol = "UDP";
|
||||||
|
}
|
||||||
|
{
|
||||||
|
containerPort = 10053;
|
||||||
|
name = "dns-tcp-local";
|
||||||
|
protocol = "TCP";
|
||||||
|
}
|
||||||
|
{
|
||||||
|
containerPort = 10055;
|
||||||
|
name = "metrics";
|
||||||
|
protocol = "TCP";
|
||||||
|
}
|
||||||
|
];
|
||||||
|
readinessProbe = {
|
||||||
|
httpGet = {
|
||||||
|
path = "/readiness";
|
||||||
|
port = 8081;
|
||||||
|
scheme = "HTTP";
|
||||||
|
};
|
||||||
|
initialDelaySeconds = 3;
|
||||||
|
timeoutSeconds = 5;
|
||||||
|
};
|
||||||
|
resources = {
|
||||||
|
limits.memory = "170Mi";
|
||||||
|
requests = {
|
||||||
|
cpu = "100m";
|
||||||
|
memory = "70Mi";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
volumeMounts = [
|
||||||
|
{
|
||||||
|
mountPath = "/kube-dns-config";
|
||||||
|
name = "kube-dns-config";
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
{
|
||||||
|
args = [
|
||||||
|
"-v=2"
|
||||||
|
"-logtostderr"
|
||||||
|
"-configDir=/etc/k8s/dns/dnsmasq-nanny"
|
||||||
|
"-restartDnsmasq=true"
|
||||||
|
"--"
|
||||||
|
"-k"
|
||||||
|
"--cache-size=1000"
|
||||||
|
"--log-facility=-"
|
||||||
|
"--server=/${cfg.clusterDomain}/127.0.0.1#10053"
|
||||||
|
"--server=/in-addr.arpa/127.0.0.1#10053"
|
||||||
|
"--server=/ip6.arpa/127.0.0.1#10053"
|
||||||
|
];
|
||||||
|
image = "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:${version}";
|
||||||
|
livenessProbe = {
|
||||||
|
failureThreshold = 5;
|
||||||
|
httpGet = {
|
||||||
|
path = "/healthcheck/dnsmasq";
|
||||||
|
port = 10054;
|
||||||
|
scheme = "HTTP";
|
||||||
|
};
|
||||||
|
initialDelaySeconds = 60;
|
||||||
|
successThreshold = 1;
|
||||||
|
timeoutSeconds = 5;
|
||||||
|
};
|
||||||
|
name = "dnsmasq";
|
||||||
|
ports = [
|
||||||
|
{
|
||||||
|
containerPort = 53;
|
||||||
|
name = "dns";
|
||||||
|
protocol = "UDP";
|
||||||
|
}
|
||||||
|
{
|
||||||
|
containerPort = 53;
|
||||||
|
name = "dns-tcp";
|
||||||
|
protocol = "TCP";
|
||||||
|
}
|
||||||
|
];
|
||||||
|
resources = {
|
||||||
|
requests = {
|
||||||
|
cpu = "150m";
|
||||||
|
memory = "20Mi";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
volumeMounts = [
|
||||||
|
{
|
||||||
|
mountPath = "/etc/k8s/dns/dnsmasq-nanny";
|
||||||
|
name = "kube-dns-config";
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
{
|
||||||
|
name = "sidecar";
|
||||||
|
image = "gcr.io/google_containers/k8s-dns-sidecar-amd64:${version}";
|
||||||
|
args = [
|
||||||
|
"--v=2"
|
||||||
|
"--logtostderr"
|
||||||
|
"--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.${cfg.clusterDomain},5,A"
|
||||||
|
"--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.${cfg.clusterDomain},5,A"
|
||||||
|
];
|
||||||
|
livenessProbe = {
|
||||||
|
failureThreshold = 5;
|
||||||
|
httpGet = {
|
||||||
|
path = "/metrics";
|
||||||
|
port = 10054;
|
||||||
|
scheme = "HTTP";
|
||||||
|
};
|
||||||
|
initialDelaySeconds = 60;
|
||||||
|
successThreshold = 1;
|
||||||
|
timeoutSeconds = 5;
|
||||||
|
};
|
||||||
|
ports = [
|
||||||
|
{
|
||||||
|
containerPort = 10054;
|
||||||
|
name = "metrics";
|
||||||
|
protocol = "TCP";
|
||||||
|
}
|
||||||
|
];
|
||||||
|
resources = {
|
||||||
|
requests = {
|
||||||
|
cpu = "10m";
|
||||||
|
memory = "20Mi";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
];
|
||||||
|
dnsPolicy = "Default";
|
||||||
|
serviceAccountName = "kube-dns";
|
||||||
|
tolerations = [
|
||||||
|
{
|
||||||
|
key = "CriticalAddonsOnly";
|
||||||
|
operator = "Exists";
|
||||||
|
}
|
||||||
|
];
|
||||||
|
volumes = [
|
||||||
|
{
|
||||||
|
configMap = {
|
||||||
|
name = "kube-dns";
|
||||||
|
optional = true;
|
||||||
|
};
|
||||||
|
name = "kube-dns-config";
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
kubedns-svc = {
|
||||||
|
apiVersion = "v1";
|
||||||
|
kind = "Service";
|
||||||
|
metadata = {
|
||||||
|
labels = {
|
||||||
|
"addonmanager.kubernetes.io/mode" = "Reconcile";
|
||||||
|
"k8s-app" = "kube-dns";
|
||||||
|
"kubernetes.io/cluster-service" = "true";
|
||||||
|
"kubernetes.io/name" = "KubeDNS";
|
||||||
|
};
|
||||||
|
name = "kube-dns";
|
||||||
|
namespace = "kube-system";
|
||||||
|
};
|
||||||
|
spec = {
|
||||||
|
clusterIP = cfg.clusterIp;
|
||||||
|
ports = [
|
||||||
|
{name = "dns"; port = 53; protocol = "UDP";}
|
||||||
|
{name = "dns-tcp"; port = 53; protocol = "TCP";}
|
||||||
|
];
|
||||||
|
selector.k8s-app = "kube-dns";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
kubedns-sa = {
|
||||||
|
apiVersion = "v1";
|
||||||
|
kind = "ServiceAccount";
|
||||||
|
metadata = {
|
||||||
|
name = "kube-dns";
|
||||||
|
namespace = "kube-system";
|
||||||
|
labels = {
|
||||||
|
"kubernetes.io/cluster-service" = "true";
|
||||||
|
"addonmanager.kubernetes.io/mode" = "Reconcile";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
kubedns-cm = {
|
||||||
|
apiVersion = "v1";
|
||||||
|
kind = "ConfigMap";
|
||||||
|
metadata = {
|
||||||
|
name = "kube-dns";
|
||||||
|
namespace = "kube-system";
|
||||||
|
labels = {
|
||||||
|
"addonmanager.kubernetes.io/mode" = "EnsureExists";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
services.kubernetes.kubelet.clusterDns = mkDefault cfg.clusterIp;
|
||||||
|
};
|
||||||
|
}
|
@ -170,11 +170,16 @@ in
|
|||||||
mkdir -m 0770 -p ${cfg.dataDir}
|
mkdir -m 0770 -p ${cfg.dataDir}
|
||||||
if [ "$(id -u)" = 0 ]; then chown -R ${cfg.user}:${cfg.group} ${cfg.dataDir}; fi
|
if [ "$(id -u)" = 0 ]; then chown -R ${cfg.user}:${cfg.group} ${cfg.dataDir}; fi
|
||||||
'';
|
'';
|
||||||
postStart = mkBefore ''
|
postStart =
|
||||||
until ${pkgs.curl.bin}/bin/curl -s -o /dev/null ${if configOptions.http.https-enabled then "-k https" else "http"}://127.0.0.1${toString configOptions.http.bind-address}/ping; do
|
let
|
||||||
sleep 1;
|
scheme = if configOptions.http.https-enabled then "-k https" else "http";
|
||||||
done
|
bindAddr = (ba: if hasPrefix ":" ba then "127.0.0.1${ba}" else "${ba}")(toString configOptions.http.bind-address);
|
||||||
'';
|
in
|
||||||
|
mkBefore ''
|
||||||
|
until ${pkgs.curl.bin}/bin/curl -s -o /dev/null ${scheme}://${bindAddr}/ping; do
|
||||||
|
sleep 1;
|
||||||
|
done
|
||||||
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
users.extraUsers = optional (cfg.user == "influxdb") {
|
users.extraUsers = optional (cfg.user == "influxdb") {
|
||||||
|
@ -34,6 +34,8 @@ with lib;
|
|||||||
|
|
||||||
services.dbus.packages = [ pkgs.at_spi2_core ];
|
services.dbus.packages = [ pkgs.at_spi2_core ];
|
||||||
|
|
||||||
|
systemd.packages = [ pkgs.at_spi2_core ];
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -4,9 +4,6 @@
|
|||||||
|
|
||||||
with lib;
|
with lib;
|
||||||
|
|
||||||
let
|
|
||||||
gnome3 = config.environment.gnome3.packageSet;
|
|
||||||
in
|
|
||||||
{
|
{
|
||||||
|
|
||||||
###### interface
|
###### interface
|
||||||
@ -33,11 +30,11 @@ in
|
|||||||
|
|
||||||
config = mkIf config.services.gnome3.evolution-data-server.enable {
|
config = mkIf config.services.gnome3.evolution-data-server.enable {
|
||||||
|
|
||||||
environment.systemPackages = [ gnome3.evolution_data_server ];
|
environment.systemPackages = [ pkgs.gnome3.evolution_data_server ];
|
||||||
|
|
||||||
services.dbus.packages = [ gnome3.evolution_data_server ];
|
services.dbus.packages = [ pkgs.gnome3.evolution_data_server ];
|
||||||
|
|
||||||
systemd.packages = [ gnome3.evolution_data_server ];
|
systemd.packages = [ pkgs.gnome3.evolution_data_server ];
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -4,9 +4,6 @@
|
|||||||
|
|
||||||
with lib;
|
with lib;
|
||||||
|
|
||||||
let
|
|
||||||
gnome3 = config.environment.gnome3.packageSet;
|
|
||||||
in
|
|
||||||
{
|
{
|
||||||
|
|
||||||
###### interface
|
###### interface
|
||||||
@ -33,9 +30,9 @@ in
|
|||||||
|
|
||||||
config = mkIf config.services.gnome3.gnome-disks.enable {
|
config = mkIf config.services.gnome3.gnome-disks.enable {
|
||||||
|
|
||||||
environment.systemPackages = [ gnome3.gnome-disk-utility ];
|
environment.systemPackages = [ pkgs.gnome3.gnome-disk-utility ];
|
||||||
|
|
||||||
services.dbus.packages = [ gnome3.gnome-disk-utility ];
|
services.dbus.packages = [ pkgs.gnome3.gnome-disk-utility ];
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -4,9 +4,6 @@
|
|||||||
|
|
||||||
with lib;
|
with lib;
|
||||||
|
|
||||||
let
|
|
||||||
gnome3 = config.environment.gnome3.packageSet;
|
|
||||||
in
|
|
||||||
{
|
{
|
||||||
|
|
||||||
###### interface
|
###### interface
|
||||||
@ -33,9 +30,9 @@ in
|
|||||||
|
|
||||||
config = mkIf config.services.gnome3.gnome-documents.enable {
|
config = mkIf config.services.gnome3.gnome-documents.enable {
|
||||||
|
|
||||||
environment.systemPackages = [ gnome3.gnome-documents ];
|
environment.systemPackages = [ pkgs.gnome3.gnome-documents ];
|
||||||
|
|
||||||
services.dbus.packages = [ gnome3.gnome-documents ];
|
services.dbus.packages = [ pkgs.gnome3.gnome-documents ];
|
||||||
|
|
||||||
services.gnome3.gnome-online-accounts.enable = true;
|
services.gnome3.gnome-online-accounts.enable = true;
|
||||||
|
|
||||||
|
@ -4,9 +4,6 @@
|
|||||||
|
|
||||||
with lib;
|
with lib;
|
||||||
|
|
||||||
let
|
|
||||||
gnome3 = config.environment.gnome3.packageSet;
|
|
||||||
in
|
|
||||||
{
|
{
|
||||||
|
|
||||||
###### interface
|
###### interface
|
||||||
@ -34,9 +31,9 @@ in
|
|||||||
|
|
||||||
config = mkIf config.services.gnome3.gnome-keyring.enable {
|
config = mkIf config.services.gnome3.gnome-keyring.enable {
|
||||||
|
|
||||||
environment.systemPackages = [ gnome3.gnome_keyring ];
|
environment.systemPackages = [ pkgs.gnome3.gnome_keyring ];
|
||||||
|
|
||||||
services.dbus.packages = [ gnome3.gnome_keyring gnome3.gcr ];
|
services.dbus.packages = [ pkgs.gnome3.gnome_keyring pkgs.gnome3.gcr ];
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -4,9 +4,6 @@
|
|||||||
|
|
||||||
with lib;
|
with lib;
|
||||||
|
|
||||||
let
|
|
||||||
gnome3 = config.environment.gnome3.packageSet;
|
|
||||||
in
|
|
||||||
{
|
{
|
||||||
|
|
||||||
###### interface
|
###### interface
|
||||||
@ -33,9 +30,9 @@ in
|
|||||||
|
|
||||||
config = mkIf config.services.gnome3.gnome-online-accounts.enable {
|
config = mkIf config.services.gnome3.gnome-online-accounts.enable {
|
||||||
|
|
||||||
environment.systemPackages = [ gnome3.gnome_online_accounts ];
|
environment.systemPackages = [ pkgs.gnome3.gnome_online_accounts ];
|
||||||
|
|
||||||
services.dbus.packages = [ gnome3.gnome_online_accounts ];
|
services.dbus.packages = [ pkgs.gnome3.gnome_online_accounts ];
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -4,9 +4,6 @@
|
|||||||
|
|
||||||
with lib;
|
with lib;
|
||||||
|
|
||||||
let
|
|
||||||
gnome3 = config.environment.gnome3.packageSet;
|
|
||||||
in
|
|
||||||
{
|
{
|
||||||
|
|
||||||
###### interface
|
###### interface
|
||||||
@ -33,9 +30,9 @@ in
|
|||||||
|
|
||||||
config = mkIf config.services.gnome3.gnome-online-miners.enable {
|
config = mkIf config.services.gnome3.gnome-online-miners.enable {
|
||||||
|
|
||||||
environment.systemPackages = [ gnome3.gnome-online-miners ];
|
environment.systemPackages = [ pkgs.gnome3.gnome-online-miners ];
|
||||||
|
|
||||||
services.dbus.packages = [ gnome3.gnome-online-miners ];
|
services.dbus.packages = [ pkgs.gnome3.gnome-online-miners ];
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -4,9 +4,6 @@
|
|||||||
|
|
||||||
with lib;
|
with lib;
|
||||||
|
|
||||||
let
|
|
||||||
gnome3 = config.environment.gnome3.packageSet;
|
|
||||||
in
|
|
||||||
{
|
{
|
||||||
|
|
||||||
###### interface
|
###### interface
|
||||||
@ -33,11 +30,11 @@ in
|
|||||||
|
|
||||||
config = mkIf config.services.gnome3.gnome-terminal-server.enable {
|
config = mkIf config.services.gnome3.gnome-terminal-server.enable {
|
||||||
|
|
||||||
environment.systemPackages = [ gnome3.gnome_terminal ];
|
environment.systemPackages = [ pkgs.gnome3.gnome_terminal ];
|
||||||
|
|
||||||
services.dbus.packages = [ gnome3.gnome_terminal ];
|
services.dbus.packages = [ pkgs.gnome3.gnome_terminal ];
|
||||||
|
|
||||||
systemd.packages = [ gnome3.gnome_terminal ];
|
systemd.packages = [ pkgs.gnome3.gnome_terminal ];
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -4,9 +4,6 @@
|
|||||||
|
|
||||||
with lib;
|
with lib;
|
||||||
|
|
||||||
let
|
|
||||||
gnome3 = config.environment.gnome3.packageSet;
|
|
||||||
in
|
|
||||||
{
|
{
|
||||||
|
|
||||||
###### interface
|
###### interface
|
||||||
@ -33,9 +30,9 @@ in
|
|||||||
|
|
||||||
config = mkIf config.services.gnome3.gnome-user-share.enable {
|
config = mkIf config.services.gnome3.gnome-user-share.enable {
|
||||||
|
|
||||||
environment.systemPackages = [ gnome3.gnome-user-share ];
|
environment.systemPackages = [ pkgs.gnome3.gnome-user-share ];
|
||||||
|
|
||||||
services.xserver.displayManager.sessionCommands = with gnome3; ''
|
services.xserver.displayManager.sessionCommands = with pkgs.gnome3; ''
|
||||||
# Don't let gnome-control-center depend upon gnome-user-share
|
# Don't let gnome-control-center depend upon gnome-user-share
|
||||||
export XDG_DATA_DIRS=$XDG_DATA_DIRS''${XDG_DATA_DIRS:+:}${gnome-user-share}/share/gsettings-schemas/${gnome-user-share.name}
|
export XDG_DATA_DIRS=$XDG_DATA_DIRS''${XDG_DATA_DIRS:+:}${gnome-user-share}/share/gsettings-schemas/${gnome-user-share.name}
|
||||||
'';
|
'';
|
||||||
|
@ -1,11 +1,8 @@
|
|||||||
# GPaste daemon.
|
# GPaste daemon.
|
||||||
{ config, lib, ... }:
|
{ config, lib, pkgs, ... }:
|
||||||
|
|
||||||
with lib;
|
with lib;
|
||||||
|
|
||||||
let
|
|
||||||
gnome3 = config.environment.gnome3.packageSet;
|
|
||||||
in
|
|
||||||
{
|
{
|
||||||
###### interface
|
###### interface
|
||||||
options = {
|
options = {
|
||||||
@ -22,9 +19,9 @@ in
|
|||||||
|
|
||||||
###### implementation
|
###### implementation
|
||||||
config = mkIf config.services.gnome3.gpaste.enable {
|
config = mkIf config.services.gnome3.gpaste.enable {
|
||||||
environment.systemPackages = [ gnome3.gpaste ];
|
environment.systemPackages = [ pkgs.gnome3.gpaste ];
|
||||||
services.dbus.packages = [ gnome3.gpaste ];
|
services.dbus.packages = [ pkgs.gnome3.gpaste ];
|
||||||
services.xserver.desktopManager.gnome3.sessionPath = [ gnome3.gpaste ];
|
services.xserver.desktopManager.gnome3.sessionPath = [ pkgs.gnome3.gpaste ];
|
||||||
systemd.packages = [ gnome3.gpaste ];
|
systemd.packages = [ pkgs.gnome3.gpaste ];
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -4,9 +4,6 @@
|
|||||||
|
|
||||||
with lib;
|
with lib;
|
||||||
|
|
||||||
let
|
|
||||||
gnome3 = config.environment.gnome3.packageSet;
|
|
||||||
in
|
|
||||||
{
|
{
|
||||||
|
|
||||||
###### interface
|
###### interface
|
||||||
@ -33,11 +30,11 @@ in
|
|||||||
|
|
||||||
config = mkIf config.services.gnome3.gvfs.enable {
|
config = mkIf config.services.gnome3.gvfs.enable {
|
||||||
|
|
||||||
environment.systemPackages = [ gnome3.gvfs ];
|
environment.systemPackages = [ pkgs.gnome3.gvfs ];
|
||||||
|
|
||||||
services.dbus.packages = [ gnome3.gvfs ];
|
services.dbus.packages = [ pkgs.gnome3.gvfs ];
|
||||||
|
|
||||||
systemd.packages = [ gnome3.gvfs ];
|
systemd.packages = [ pkgs.gnome3.gvfs ];
|
||||||
|
|
||||||
services.udev.packages = [ pkgs.libmtp.bin ];
|
services.udev.packages = [ pkgs.libmtp.bin ];
|
||||||
|
|
||||||
|
@ -4,9 +4,6 @@
|
|||||||
|
|
||||||
with lib;
|
with lib;
|
||||||
|
|
||||||
let
|
|
||||||
gnome3 = config.environment.gnome3.packageSet;
|
|
||||||
in
|
|
||||||
{
|
{
|
||||||
|
|
||||||
###### interface
|
###### interface
|
||||||
@ -32,9 +29,9 @@ in
|
|||||||
|
|
||||||
config = mkIf config.services.gnome3.seahorse.enable {
|
config = mkIf config.services.gnome3.seahorse.enable {
|
||||||
|
|
||||||
environment.systemPackages = [ gnome3.seahorse ];
|
environment.systemPackages = [ pkgs.gnome3.seahorse ];
|
||||||
|
|
||||||
services.dbus.packages = [ gnome3.seahorse ];
|
services.dbus.packages = [ pkgs.gnome3.seahorse ];
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -4,9 +4,6 @@
|
|||||||
|
|
||||||
with lib;
|
with lib;
|
||||||
|
|
||||||
let
|
|
||||||
gnome3 = config.environment.gnome3.packageSet;
|
|
||||||
in
|
|
||||||
{
|
{
|
||||||
|
|
||||||
###### interface
|
###### interface
|
||||||
@ -32,9 +29,9 @@ in
|
|||||||
|
|
||||||
config = mkIf config.services.gnome3.sushi.enable {
|
config = mkIf config.services.gnome3.sushi.enable {
|
||||||
|
|
||||||
environment.systemPackages = [ gnome3.sushi ];
|
environment.systemPackages = [ pkgs.gnome3.sushi ];
|
||||||
|
|
||||||
services.dbus.packages = [ gnome3.sushi ];
|
services.dbus.packages = [ pkgs.gnome3.sushi ];
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -4,9 +4,6 @@
|
|||||||
|
|
||||||
with lib;
|
with lib;
|
||||||
|
|
||||||
let
|
|
||||||
gnome3 = config.environment.gnome3.packageSet;
|
|
||||||
in
|
|
||||||
{
|
{
|
||||||
|
|
||||||
###### interface
|
###### interface
|
||||||
@ -33,11 +30,11 @@ in
|
|||||||
|
|
||||||
config = mkIf config.services.gnome3.tracker.enable {
|
config = mkIf config.services.gnome3.tracker.enable {
|
||||||
|
|
||||||
environment.systemPackages = [ gnome3.tracker ];
|
environment.systemPackages = [ pkgs.gnome3.tracker ];
|
||||||
|
|
||||||
services.dbus.packages = [ gnome3.tracker ];
|
services.dbus.packages = [ pkgs.gnome3.tracker ];
|
||||||
|
|
||||||
systemd.packages = [ gnome3.tracker ];
|
systemd.packages = [ pkgs.gnome3.tracker ];
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -42,7 +42,7 @@ in
|
|||||||
|
|
||||||
Then you can Use this sieve filter:
|
Then you can Use this sieve filter:
|
||||||
require ["fileinto", "reject", "envelope"];
|
require ["fileinto", "reject", "envelope"];
|
||||||
|
|
||||||
if header :contains "X-Spam-Flag" "YES" {
|
if header :contains "X-Spam-Flag" "YES" {
|
||||||
fileinto "spam";
|
fileinto "spam";
|
||||||
}
|
}
|
||||||
@ -67,11 +67,11 @@ in
|
|||||||
initPreConf = mkOption {
|
initPreConf = mkOption {
|
||||||
type = types.str;
|
type = types.str;
|
||||||
description = "The SpamAssassin init.pre config.";
|
description = "The SpamAssassin init.pre config.";
|
||||||
default =
|
default =
|
||||||
''
|
''
|
||||||
#
|
#
|
||||||
# to update this list, run this command in the rules directory:
|
# to update this list, run this command in the rules directory:
|
||||||
# grep 'loadplugin.*Mail::SpamAssassin::Plugin::.*' -o -h * | sort | uniq
|
# grep 'loadplugin.*Mail::SpamAssassin::Plugin::.*' -o -h * | sort | uniq
|
||||||
#
|
#
|
||||||
|
|
||||||
#loadplugin Mail::SpamAssassin::Plugin::AccessDB
|
#loadplugin Mail::SpamAssassin::Plugin::AccessDB
|
||||||
@ -122,7 +122,11 @@ in
|
|||||||
config = mkIf cfg.enable {
|
config = mkIf cfg.enable {
|
||||||
|
|
||||||
# Allow users to run 'spamc'.
|
# Allow users to run 'spamc'.
|
||||||
environment.systemPackages = [ pkgs.spamassassin ];
|
|
||||||
|
environment = {
|
||||||
|
etc = singleton { source = spamdEnv; target = "spamassassin"; };
|
||||||
|
systemPackages = [ pkgs.spamassassin ];
|
||||||
|
};
|
||||||
|
|
||||||
users.extraUsers = singleton {
|
users.extraUsers = singleton {
|
||||||
name = "spamd";
|
name = "spamd";
|
||||||
@ -138,7 +142,7 @@ in
|
|||||||
|
|
||||||
systemd.services.sa-update = {
|
systemd.services.sa-update = {
|
||||||
script = ''
|
script = ''
|
||||||
set +e
|
set +e
|
||||||
${pkgs.su}/bin/su -s "${pkgs.bash}/bin/bash" -c "${pkgs.spamassassin}/bin/sa-update --gpghomedir=/var/lib/spamassassin/sa-update-keys/ --siteconfigpath=${spamdEnv}/" spamd
|
${pkgs.su}/bin/su -s "${pkgs.bash}/bin/bash" -c "${pkgs.spamassassin}/bin/sa-update --gpghomedir=/var/lib/spamassassin/sa-update-keys/ --siteconfigpath=${spamdEnv}/" spamd
|
||||||
|
|
||||||
v=$?
|
v=$?
|
||||||
@ -153,7 +157,7 @@ in
|
|||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
systemd.timers.sa-update = {
|
systemd.timers.sa-update = {
|
||||||
description = "sa-update-service";
|
description = "sa-update-service";
|
||||||
partOf = [ "sa-update.service" ];
|
partOf = [ "sa-update.service" ];
|
||||||
wantedBy = [ "timers.target" ];
|
wantedBy = [ "timers.target" ];
|
||||||
@ -177,15 +181,10 @@ in
|
|||||||
# 0 and 1 no error, exitcode > 1 means error:
|
# 0 and 1 no error, exitcode > 1 means error:
|
||||||
# https://spamassassin.apache.org/full/3.1.x/doc/sa-update.html#exit_codes
|
# https://spamassassin.apache.org/full/3.1.x/doc/sa-update.html#exit_codes
|
||||||
preStart = ''
|
preStart = ''
|
||||||
# this abstraction requires no centralized config at all
|
|
||||||
if [ -d /etc/spamassassin ]; then
|
|
||||||
echo "This spamassassin does not support global '/etc/spamassassin' folder for configuration as this would be impure. Merge your configs into 'services.spamassassin' and remove the '/etc/spamassassin' folder to make this service work. Also see 'https://github.com/NixOS/nixpkgs/pull/26470'.";
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo "Recreating '/var/lib/spamasassin' with creating '3.004001' (or similar) and 'sa-update-keys'"
|
echo "Recreating '/var/lib/spamasassin' with creating '3.004001' (or similar) and 'sa-update-keys'"
|
||||||
mkdir -p /var/lib/spamassassin
|
mkdir -p /var/lib/spamassassin
|
||||||
chown spamd:spamd /var/lib/spamassassin -R
|
chown spamd:spamd /var/lib/spamassassin -R
|
||||||
set +e
|
set +e
|
||||||
${pkgs.su}/bin/su -s "${pkgs.bash}/bin/bash" -c "${pkgs.spamassassin}/bin/sa-update --gpghomedir=/var/lib/spamassassin/sa-update-keys/ --siteconfigpath=${spamdEnv}/" spamd
|
${pkgs.su}/bin/su -s "${pkgs.bash}/bin/bash" -c "${pkgs.spamassassin}/bin/sa-update --gpghomedir=/var/lib/spamassassin/sa-update-keys/ --siteconfigpath=${spamdEnv}/" spamd
|
||||||
v=$?
|
v=$?
|
||||||
set -e
|
set -e
|
||||||
|
@ -41,6 +41,15 @@ in
|
|||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enableGitAnnex = mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = false;
|
||||||
|
description = ''
|
||||||
|
Enable git-annex support. Uses the <literal>extraGitoliteRc</literal> option
|
||||||
|
to apply the necessary configuration.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
commonHooks = mkOption {
|
commonHooks = mkOption {
|
||||||
type = types.listOf types.path;
|
type = types.listOf types.path;
|
||||||
default = [];
|
default = [];
|
||||||
@ -49,6 +58,37 @@ in
|
|||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
|
extraGitoliteRc = mkOption {
|
||||||
|
type = types.lines;
|
||||||
|
default = "";
|
||||||
|
example = literalExample ''
|
||||||
|
$RC{UMASK} = 0027;
|
||||||
|
$RC{SITE_INFO} = 'This is our private repository host';
|
||||||
|
push( @{$RC{ENABLE}}, 'Kindergarten' ); # enable the command/feature
|
||||||
|
@{$RC{ENABLE}} = grep { $_ ne 'desc' } @{$RC{ENABLE}}; # disable the command/feature
|
||||||
|
'';
|
||||||
|
description = ''
|
||||||
|
Extra configuration to append to the default <literal>~/.gitolite.rc</literal>.
|
||||||
|
|
||||||
|
This should be Perl code that modifies the <literal>%RC</literal>
|
||||||
|
configuration variable. The default <literal>~/.gitolite.rc</literal>
|
||||||
|
content is generated by invoking <literal>gitolite print-default-rc</literal>,
|
||||||
|
and extra configuration from this option is appended to it. The result
|
||||||
|
is placed to Nix store, and the <literal>~/.gitolite.rc</literal> file
|
||||||
|
becomes a symlink to it.
|
||||||
|
|
||||||
|
If you already have a customized (or otherwise changed)
|
||||||
|
<literal>~/.gitolite.rc</literal> file, NixOS will refuse to replace
|
||||||
|
it with a symlink, and the `gitolite-init` initialization service
|
||||||
|
will fail. In this situation, in order to use this option, you
|
||||||
|
will need to take any customizations you may have in
|
||||||
|
<literal>~/.gitolite.rc</literal>, convert them to appropriate Perl
|
||||||
|
statements, add them to this option, and remove the file.
|
||||||
|
|
||||||
|
See also the <literal>enableGitAnnex</literal> option.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
user = mkOption {
|
user = mkOption {
|
||||||
type = types.str;
|
type = types.str;
|
||||||
default = "gitolite";
|
default = "gitolite";
|
||||||
@ -56,17 +96,59 @@ in
|
|||||||
Gitolite user account. This is the username of the gitolite endpoint.
|
Gitolite user account. This is the username of the gitolite endpoint.
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
|
group = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "gitolite";
|
||||||
|
description = ''
|
||||||
|
Primary group of the Gitolite user account.
|
||||||
|
'';
|
||||||
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
config = mkIf cfg.enable {
|
config = mkIf cfg.enable (
|
||||||
|
let
|
||||||
|
manageGitoliteRc = cfg.extraGitoliteRc != "";
|
||||||
|
rcDir = pkgs.runCommand "gitolite-rc" { } rcDirScript;
|
||||||
|
rcDirScript =
|
||||||
|
''
|
||||||
|
mkdir "$out"
|
||||||
|
export HOME=temp-home
|
||||||
|
mkdir -p "$HOME/.gitolite/logs" # gitolite can't run without it
|
||||||
|
'${pkgs.gitolite}'/bin/gitolite print-default-rc >>"$out/gitolite.rc.default"
|
||||||
|
cat <<END >>"$out/gitolite.rc"
|
||||||
|
# This file is managed by NixOS.
|
||||||
|
# Use services.gitolite options to control it.
|
||||||
|
|
||||||
|
END
|
||||||
|
cat "$out/gitolite.rc.default" >>"$out/gitolite.rc"
|
||||||
|
'' +
|
||||||
|
optionalString (cfg.extraGitoliteRc != "") ''
|
||||||
|
echo -n ${escapeShellArg ''
|
||||||
|
|
||||||
|
# Added by NixOS:
|
||||||
|
${removeSuffix "\n" cfg.extraGitoliteRc}
|
||||||
|
|
||||||
|
# per perl rules, this should be the last line in such a file:
|
||||||
|
1;
|
||||||
|
''} >>"$out/gitolite.rc"
|
||||||
|
'';
|
||||||
|
in {
|
||||||
|
services.gitolite.extraGitoliteRc = optionalString cfg.enableGitAnnex ''
|
||||||
|
# Enable git-annex support:
|
||||||
|
push( @{$RC{ENABLE}}, 'git-annex-shell ua');
|
||||||
|
'';
|
||||||
|
|
||||||
users.extraUsers.${cfg.user} = {
|
users.extraUsers.${cfg.user} = {
|
||||||
description = "Gitolite user";
|
description = "Gitolite user";
|
||||||
home = cfg.dataDir;
|
home = cfg.dataDir;
|
||||||
createHome = true;
|
createHome = true;
|
||||||
uid = config.ids.uids.gitolite;
|
uid = config.ids.uids.gitolite;
|
||||||
|
group = cfg.group;
|
||||||
useDefaultShell = true;
|
useDefaultShell = true;
|
||||||
};
|
};
|
||||||
|
users.extraGroups."${cfg.group}".gid = config.ids.gids.gitolite;
|
||||||
|
|
||||||
systemd.services."gitolite-init" = {
|
systemd.services."gitolite-init" = {
|
||||||
description = "Gitolite initialization";
|
description = "Gitolite initialization";
|
||||||
@ -77,21 +159,62 @@ in
|
|||||||
serviceConfig.Type = "oneshot";
|
serviceConfig.Type = "oneshot";
|
||||||
serviceConfig.RemainAfterExit = true;
|
serviceConfig.RemainAfterExit = true;
|
||||||
|
|
||||||
path = [ pkgs.gitolite pkgs.git pkgs.perl pkgs.bash config.programs.ssh.package ];
|
path = [ pkgs.gitolite pkgs.git pkgs.perl pkgs.bash pkgs.diffutils config.programs.ssh.package ];
|
||||||
script = ''
|
script =
|
||||||
cd ${cfg.dataDir}
|
let
|
||||||
mkdir -p .gitolite/logs
|
rcSetupScriptIfCustomFile =
|
||||||
if [ ! -d repositories ]; then
|
if manageGitoliteRc then ''
|
||||||
gitolite setup -pk ${pubkeyFile}
|
cat <<END
|
||||||
fi
|
<3>ERROR: NixOS can't apply declarative configuration
|
||||||
if [ -n "${hooks}" ]; then
|
<3>to your .gitolite.rc file, because it seems to be
|
||||||
cp ${hooks} .gitolite/hooks/common/
|
<3>already customized manually.
|
||||||
chmod +x .gitolite/hooks/common/*
|
<3>See the services.gitolite.extraGitoliteRc option
|
||||||
fi
|
<3>in "man configuration.nix" for more information.
|
||||||
gitolite setup # Upgrade if needed
|
END
|
||||||
'';
|
# Not sure if the line below addresses the issue directly or just
|
||||||
|
# adds a delay, but without it our error message often doesn't
|
||||||
|
# show up in `systemctl status gitolite-init`.
|
||||||
|
journalctl --flush
|
||||||
|
exit 1
|
||||||
|
'' else ''
|
||||||
|
:
|
||||||
|
'';
|
||||||
|
rcSetupScriptIfDefaultFileOrStoreSymlink =
|
||||||
|
if manageGitoliteRc then ''
|
||||||
|
ln -sf "${rcDir}/gitolite.rc" "$GITOLITE_RC"
|
||||||
|
'' else ''
|
||||||
|
[[ -L "$GITOLITE_RC" ]] && rm -f "$GITOLITE_RC"
|
||||||
|
'';
|
||||||
|
in
|
||||||
|
''
|
||||||
|
cd ${cfg.dataDir}
|
||||||
|
mkdir -p .gitolite/logs
|
||||||
|
|
||||||
|
GITOLITE_RC=.gitolite.rc
|
||||||
|
GITOLITE_RC_DEFAULT=${rcDir}/gitolite.rc.default
|
||||||
|
if ( [[ ! -e "$GITOLITE_RC" ]] && [[ ! -L "$GITOLITE_RC" ]] ) ||
|
||||||
|
( [[ -f "$GITOLITE_RC" ]] && diff -q "$GITOLITE_RC" "$GITOLITE_RC_DEFAULT" >/dev/null ) ||
|
||||||
|
( [[ -L "$GITOLITE_RC" ]] && [[ "$(readlink "$GITOLITE_RC")" =~ ^/nix/store/ ]] )
|
||||||
|
then
|
||||||
|
'' + rcSetupScriptIfDefaultFileOrStoreSymlink +
|
||||||
|
''
|
||||||
|
else
|
||||||
|
'' + rcSetupScriptIfCustomFile +
|
||||||
|
''
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -d repositories ]; then
|
||||||
|
gitolite setup -pk ${pubkeyFile}
|
||||||
|
fi
|
||||||
|
if [ -n "${hooks}" ]; then
|
||||||
|
cp ${hooks} .gitolite/hooks/common/
|
||||||
|
chmod +x .gitolite/hooks/common/*
|
||||||
|
fi
|
||||||
|
gitolite setup # Upgrade if needed
|
||||||
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
environment.systemPackages = [ pkgs.gitolite pkgs.git ];
|
environment.systemPackages = [ pkgs.gitolite pkgs.git ]
|
||||||
};
|
++ optional cfg.enableGitAnnex pkgs.gitAndTools.git-annex;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
@ -428,7 +428,7 @@ in
|
|||||||
fi
|
fi
|
||||||
'';
|
'';
|
||||||
|
|
||||||
nix.nrBuildUsers = mkDefault (lib.max 10 cfg.maxJobs);
|
nix.nrBuildUsers = mkDefault (lib.max 32 cfg.maxJobs);
|
||||||
|
|
||||||
users.extraUsers = nixbldUsers;
|
users.extraUsers = nixbldUsers;
|
||||||
|
|
||||||
|
66
nixos/modules/services/monitoring/fusion-inventory.nix
Normal file
66
nixos/modules/services/monitoring/fusion-inventory.nix
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
# Fusion Inventory daemon.
|
||||||
|
{ config, lib, pkgs, ... }:
|
||||||
|
|
||||||
|
with lib;
|
||||||
|
|
||||||
|
let
|
||||||
|
cfg = config.services.fusionInventory;
|
||||||
|
|
||||||
|
configFile = pkgs.writeText "fusion_inventory.conf" ''
|
||||||
|
server = ${concatStringsSep ", " cfg.servers}
|
||||||
|
|
||||||
|
logger = stderr
|
||||||
|
|
||||||
|
${cfg.extraConfig}
|
||||||
|
'';
|
||||||
|
|
||||||
|
in {
|
||||||
|
|
||||||
|
###### interface
|
||||||
|
|
||||||
|
options = {
|
||||||
|
|
||||||
|
services.fusionInventory = {
|
||||||
|
|
||||||
|
enable = mkEnableOption "Fusion Inventory Agent";
|
||||||
|
|
||||||
|
servers = mkOption {
|
||||||
|
type = types.listOf types.str;
|
||||||
|
description = ''
|
||||||
|
The urls of the OCS/GLPI servers to connect to.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
extraConfig = mkOption {
|
||||||
|
default = "";
|
||||||
|
type = types.lines;
|
||||||
|
description = ''
|
||||||
|
Configuration that is injected verbatim into the configuration file.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
###### implementation
|
||||||
|
|
||||||
|
config = mkIf cfg.enable {
|
||||||
|
|
||||||
|
users.extraUsers = singleton {
|
||||||
|
name = "fusion-inventory";
|
||||||
|
description = "FusionInventory user";
|
||||||
|
};
|
||||||
|
|
||||||
|
systemd.services."fusion-inventory" = {
|
||||||
|
description = "Fusion Inventory Agent";
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
|
||||||
|
environment = {
|
||||||
|
OPTIONS = "--no-category=software";
|
||||||
|
};
|
||||||
|
serviceConfig = {
|
||||||
|
ExecStart = "${pkgs.fusionInventory}/bin/fusioninventory-agent --conf-file=${configFile} --daemon --no-fork";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
@ -17,20 +17,22 @@ in
|
|||||||
};
|
};
|
||||||
config = mkOption {
|
config = mkOption {
|
||||||
default = "";
|
default = "";
|
||||||
description = "monit.conf content";
|
description = "monitrc content";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
config = mkIf config.services.monit.enable {
|
config = mkIf config.services.monit.enable {
|
||||||
|
|
||||||
|
environment.systemPackages = [ pkgs.monit ];
|
||||||
|
|
||||||
environment.etc = [
|
environment.etc = [
|
||||||
{
|
{
|
||||||
source = pkgs.writeTextFile {
|
source = pkgs.writeTextFile {
|
||||||
name = "monit.conf";
|
name = "monitrc";
|
||||||
text = config.services.monit.config;
|
text = config.services.monit.config;
|
||||||
};
|
};
|
||||||
target = "monit.conf";
|
target = "monitrc";
|
||||||
mode = "0400";
|
mode = "0400";
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
@ -40,9 +42,9 @@ in
|
|||||||
after = [ "network.target" ];
|
after = [ "network.target" ];
|
||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "multi-user.target" ];
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
ExecStart = "${pkgs.monit}/bin/monit -I -c /etc/monit.conf";
|
ExecStart = "${pkgs.monit}/bin/monit -I -c /etc/monitrc";
|
||||||
ExecStop = "${pkgs.monit}/bin/monit -c /etc/monit.conf quit";
|
ExecStop = "${pkgs.monit}/bin/monit -c /etc/monitrc quit";
|
||||||
ExecReload = "${pkgs.monit}/bin/monit -c /etc/monit.conf reload";
|
ExecReload = "${pkgs.monit}/bin/monit -c /etc/monitrc reload";
|
||||||
KillMode = "process";
|
KillMode = "process";
|
||||||
Restart = "always";
|
Restart = "always";
|
||||||
};
|
};
|
||||||
|
@ -27,6 +27,14 @@ in
|
|||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
|
extraNfsdConfig = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "";
|
||||||
|
description = ''
|
||||||
|
Extra configuration options for the [nfsd] section of /etc/nfs.conf.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
exports = mkOption {
|
exports = mkOption {
|
||||||
type = types.lines;
|
type = types.lines;
|
||||||
default = "";
|
default = "";
|
||||||
@ -107,6 +115,7 @@ in
|
|||||||
[nfsd]
|
[nfsd]
|
||||||
threads=${toString cfg.nproc}
|
threads=${toString cfg.nproc}
|
||||||
${optionalString (cfg.hostName != null) "host=${cfg.hostName}"}
|
${optionalString (cfg.hostName != null) "host=${cfg.hostName}"}
|
||||||
|
${cfg.extraNfsdConfig}
|
||||||
|
|
||||||
[mountd]
|
[mountd]
|
||||||
${optionalString (cfg.mountdPort != null) "port=${toString cfg.mountdPort}"}
|
${optionalString (cfg.mountdPort != null) "port=${toString cfg.mountdPort}"}
|
||||||
|
@ -151,15 +151,6 @@ in
|
|||||||
";
|
";
|
||||||
};
|
};
|
||||||
|
|
||||||
resolveLocalQueries = mkOption {
|
|
||||||
type = types.bool;
|
|
||||||
default = true;
|
|
||||||
description = ''
|
|
||||||
Whether bind should resolve local queries (i.e. add 127.0.0.1 to
|
|
||||||
/etc/resolv.conf, overriding networking.nameserver).
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
};
|
};
|
||||||
|
@ -183,6 +183,7 @@ in
|
|||||||
ExecReload = "${cfg.package.bin}/bin/consul reload";
|
ExecReload = "${cfg.package.bin}/bin/consul reload";
|
||||||
PermissionsStartOnly = true;
|
PermissionsStartOnly = true;
|
||||||
User = if cfg.dropPrivileges then "consul" else null;
|
User = if cfg.dropPrivileges then "consul" else null;
|
||||||
|
Restart = "on-failure";
|
||||||
TimeoutStartSec = "0";
|
TimeoutStartSec = "0";
|
||||||
} // (optionalAttrs (cfg.leaveOnStop) {
|
} // (optionalAttrs (cfg.leaveOnStop) {
|
||||||
ExecStop = "${cfg.package.bin}/bin/consul leave";
|
ExecStop = "${cfg.package.bin}/bin/consul leave";
|
||||||
|
86
nixos/modules/services/networking/dnscache.nix
Normal file
86
nixos/modules/services/networking/dnscache.nix
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
{ config, lib, pkgs, ... }:
|
||||||
|
|
||||||
|
with lib;
|
||||||
|
|
||||||
|
let
|
||||||
|
cfg = config.services.dnscache;
|
||||||
|
|
||||||
|
dnscache-root = pkgs.runCommand "dnscache-root" {} ''
|
||||||
|
mkdir -p $out/{servers,ip}
|
||||||
|
|
||||||
|
${concatMapStrings (ip: ''
|
||||||
|
echo > "$out/ip/"${lib.escapeShellArg ip}
|
||||||
|
'') cfg.clientIps}
|
||||||
|
|
||||||
|
${concatStrings (mapAttrsToList (host: ips: ''
|
||||||
|
${concatMapStrings (ip: ''
|
||||||
|
echo ${lib.escapeShellArg ip} > "$out/servers/"${lib.escapeShellArg host}
|
||||||
|
'') ips}
|
||||||
|
'') cfg.domainServers)}
|
||||||
|
|
||||||
|
# djbdns contains an outdated list of root servers;
|
||||||
|
# if one was not provided in config, provide a current list
|
||||||
|
if [ ! -e servers/@ ]; then
|
||||||
|
awk '/^.?.ROOT-SERVERS.NET/ { print $4 }' ${pkgs.dns-root-data}/root.hints > $out/servers/@
|
||||||
|
fi
|
||||||
|
'';
|
||||||
|
|
||||||
|
in {
|
||||||
|
|
||||||
|
###### interface
|
||||||
|
|
||||||
|
options = {
|
||||||
|
services.dnscache = {
|
||||||
|
enable = mkOption {
|
||||||
|
default = false;
|
||||||
|
type = types.bool;
|
||||||
|
description = "Whether to run the dnscache caching dns server";
|
||||||
|
};
|
||||||
|
|
||||||
|
ip = mkOption {
|
||||||
|
default = "0.0.0.0";
|
||||||
|
type = types.str;
|
||||||
|
description = "IP address on which to listen for connections";
|
||||||
|
};
|
||||||
|
|
||||||
|
clientIps = mkOption {
|
||||||
|
default = [ "127.0.0.1" ];
|
||||||
|
type = types.listOf types.str;
|
||||||
|
description = "client IP addresses (or prefixes) from which to accept connections";
|
||||||
|
example = ["192.168" "172.23.75.82"];
|
||||||
|
};
|
||||||
|
|
||||||
|
domainServers = mkOption {
|
||||||
|
default = { };
|
||||||
|
type = types.attrsOf (types.listOf types.str);
|
||||||
|
description = "table of {hostname: server} pairs to use as authoritative servers for hosts (and subhosts)";
|
||||||
|
example = {
|
||||||
|
"example.com" = ["8.8.8.8" "8.8.4.4"];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
###### implementation
|
||||||
|
|
||||||
|
config = mkIf config.services.dnscache.enable {
|
||||||
|
environment.systemPackages = [ pkgs.djbdns ];
|
||||||
|
users.extraUsers.dnscache = {};
|
||||||
|
|
||||||
|
systemd.services.dnscache = {
|
||||||
|
description = "djbdns dnscache server";
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
path = with pkgs; [ bash daemontools djbdns ];
|
||||||
|
preStart = ''
|
||||||
|
rm -rf /var/lib/dnscache
|
||||||
|
dnscache-conf dnscache dnscache /var/lib/dnscache ${config.services.dnscache.ip}
|
||||||
|
rm -rf /var/lib/dnscache/root
|
||||||
|
ln -sf ${dnscache-root} /var/lib/dnscache/root
|
||||||
|
'';
|
||||||
|
script = ''
|
||||||
|
cd /var/lib/dnscache/
|
||||||
|
exec ./run
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
@ -42,7 +42,7 @@ in
|
|||||||
default = true;
|
default = true;
|
||||||
description = ''
|
description = ''
|
||||||
Whether dnsmasq should resolve local queries (i.e. add 127.0.0.1 to
|
Whether dnsmasq should resolve local queries (i.e. add 127.0.0.1 to
|
||||||
/etc/resolv.conf overriding networking.nameservers).
|
/etc/resolv.conf).
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1,60 +0,0 @@
|
|||||||
{ config, lib, pkgs, ... }:
|
|
||||||
|
|
||||||
with lib;
|
|
||||||
|
|
||||||
let
|
|
||||||
|
|
||||||
cfg = config.networking.fan;
|
|
||||||
modprobe = "${pkgs.kmod}/bin/modprobe";
|
|
||||||
|
|
||||||
in
|
|
||||||
|
|
||||||
{
|
|
||||||
|
|
||||||
###### interface
|
|
||||||
|
|
||||||
options = {
|
|
||||||
|
|
||||||
networking.fan = {
|
|
||||||
|
|
||||||
enable = mkEnableOption "FAN Networking";
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
###### implementation
|
|
||||||
|
|
||||||
config = mkIf cfg.enable {
|
|
||||||
|
|
||||||
environment.systemPackages = [ pkgs.fanctl ];
|
|
||||||
|
|
||||||
systemd.services.fan = {
|
|
||||||
description = "FAN Networking";
|
|
||||||
wantedBy = [ "multi-user.target" ];
|
|
||||||
after = [ "network-online.target" ];
|
|
||||||
before = [ "docker.service" ];
|
|
||||||
restartIfChanged = false;
|
|
||||||
preStart = ''
|
|
||||||
if [ ! -f /proc/sys/net/fan/version ]; then
|
|
||||||
${modprobe} ipip
|
|
||||||
if [ ! -f /proc/sys/net/fan/version ]; then
|
|
||||||
echo "The Fan Networking patches have not been applied to this kernel!" 1>&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
mkdir -p /var/lib/fan-networking
|
|
||||||
'';
|
|
||||||
serviceConfig = {
|
|
||||||
Type = "oneshot";
|
|
||||||
RemainAfterExit = true;
|
|
||||||
ExecStart = "${pkgs.fanctl}/bin/fanctl up -a";
|
|
||||||
ExecStop = "${pkgs.fanctl}/bin/fanctl down -a";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
}
|
|
@ -9,7 +9,7 @@ let
|
|||||||
confFile = pkgs.writeText "radicale.conf" cfg.config;
|
confFile = pkgs.writeText "radicale.conf" cfg.config;
|
||||||
|
|
||||||
# This enables us to default to version 2 while still not breaking configurations of people with version 1
|
# This enables us to default to version 2 while still not breaking configurations of people with version 1
|
||||||
defaultPackage = if versionAtLeast "17.09" config.system.stateVersion then {
|
defaultPackage = if versionAtLeast config.system.stateVersion "17.09" then {
|
||||||
pkg = pkgs.radicale2;
|
pkg = pkgs.radicale2;
|
||||||
text = "pkgs.radicale2";
|
text = "pkgs.radicale2";
|
||||||
} else {
|
} else {
|
||||||
|
@ -141,7 +141,6 @@ in
|
|||||||
${optionalString (data.ed25519PrivateKeyFile != null) "Ed25519PrivateKeyFile = ${data.ed25519PrivateKeyFile}"}
|
${optionalString (data.ed25519PrivateKeyFile != null) "Ed25519PrivateKeyFile = ${data.ed25519PrivateKeyFile}"}
|
||||||
${optionalString (data.listenAddress != null) "ListenAddress = ${data.listenAddress}"}
|
${optionalString (data.listenAddress != null) "ListenAddress = ${data.listenAddress}"}
|
||||||
${optionalString (data.bindToAddress != null) "BindToAddress = ${data.bindToAddress}"}
|
${optionalString (data.bindToAddress != null) "BindToAddress = ${data.bindToAddress}"}
|
||||||
Device = /dev/net/tun
|
|
||||||
Interface = tinc.${network}
|
Interface = tinc.${network}
|
||||||
${data.extraConfig}
|
${data.extraConfig}
|
||||||
'';
|
'';
|
||||||
@ -164,10 +163,17 @@ in
|
|||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "multi-user.target" ];
|
||||||
after = [ "network.target" ];
|
after = [ "network.target" ];
|
||||||
path = [ data.package ];
|
path = [ data.package ];
|
||||||
|
restartTriggers =
|
||||||
|
let
|
||||||
|
drvlist = [ config.environment.etc."tinc/${network}/tinc.conf".source ]
|
||||||
|
++ mapAttrsToList (host: _: config.environment.etc."tinc/${network}/hosts/${host}".source) data.hosts;
|
||||||
|
in # drvlist might be too long to be used directly
|
||||||
|
[ (builtins.hashString "sha256" (concatMapStrings (d: d.outPath) drvlist)) ];
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
Type = "simple";
|
Type = "simple";
|
||||||
Restart = "always";
|
Restart = "always";
|
||||||
RestartSec = "3";
|
RestartSec = "3";
|
||||||
|
ExecStart = "${data.package}/bin/tincd -D -U tinc.${network} -n ${network} ${optionalString (data.chroot) "-R"} --pidfile /run/tinc.${network}.pid -d ${toString data.debugLevel}";
|
||||||
};
|
};
|
||||||
preStart = ''
|
preStart = ''
|
||||||
mkdir -p /etc/tinc/${network}/hosts
|
mkdir -p /etc/tinc/${network}/hosts
|
||||||
@ -187,9 +193,6 @@ in
|
|||||||
[ -f "/etc/tinc/${network}/rsa_key.priv" ] || tincd -n ${network} -K 4096
|
[ -f "/etc/tinc/${network}/rsa_key.priv" ] || tincd -n ${network} -K 4096
|
||||||
fi
|
fi
|
||||||
'';
|
'';
|
||||||
script = ''
|
|
||||||
tincd -D -U tinc.${network} -n ${network} ${optionalString (data.chroot) "-R"} --pidfile /run/tinc.${network}.pid -d ${toString data.debugLevel}
|
|
||||||
'';
|
|
||||||
})
|
})
|
||||||
);
|
);
|
||||||
|
|
||||||
|
54
nixos/modules/services/networking/tinydns.nix
Normal file
54
nixos/modules/services/networking/tinydns.nix
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
{ config, lib, pkgs, ... }:
|
||||||
|
|
||||||
|
with lib;
|
||||||
|
|
||||||
|
{
|
||||||
|
###### interface
|
||||||
|
|
||||||
|
options = {
|
||||||
|
services.tinydns = {
|
||||||
|
enable = mkOption {
|
||||||
|
default = false;
|
||||||
|
type = types.bool;
|
||||||
|
description = "Whether to run the tinydns dns server";
|
||||||
|
};
|
||||||
|
|
||||||
|
data = mkOption {
|
||||||
|
type = types.lines;
|
||||||
|
default = "";
|
||||||
|
description = "The DNS data to serve, in the format described by tinydns-data(8)";
|
||||||
|
};
|
||||||
|
|
||||||
|
ip = mkOption {
|
||||||
|
default = "0.0.0.0";
|
||||||
|
type = types.str;
|
||||||
|
description = "IP address on which to listen for connections";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
###### implementation
|
||||||
|
|
||||||
|
config = mkIf config.services.tinydns.enable {
|
||||||
|
environment.systemPackages = [ pkgs.djbdns ];
|
||||||
|
|
||||||
|
users.extraUsers.tinydns = {};
|
||||||
|
|
||||||
|
systemd.services.tinydns = {
|
||||||
|
description = "djbdns tinydns server";
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
path = with pkgs; [ daemontools djbdns ];
|
||||||
|
preStart = ''
|
||||||
|
rm -rf /var/lib/tinydns
|
||||||
|
tinydns-conf tinydns tinydns /var/lib/tinydns ${config.services.tinydns.ip}
|
||||||
|
cd /var/lib/tinydns/root/
|
||||||
|
ln -sf ${pkgs.writeText "tinydns-data" config.services.tinydns.data} data
|
||||||
|
tinydns-data
|
||||||
|
'';
|
||||||
|
script = ''
|
||||||
|
cd /var/lib/tinydns
|
||||||
|
exec ./run
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
@ -95,6 +95,14 @@ let
|
|||||||
type = with types; listOf (submodule peerOpts);
|
type = with types; listOf (submodule peerOpts);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
allowedIPsAsRoutes = mkOption {
|
||||||
|
example = false;
|
||||||
|
default = true;
|
||||||
|
type = types.bool;
|
||||||
|
description = ''
|
||||||
|
Determines whether to add allowed IPs as routes or not.
|
||||||
|
'';
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
};
|
};
|
||||||
@ -217,11 +225,11 @@ let
|
|||||||
|
|
||||||
"${ipCommand} link set up dev ${name}"
|
"${ipCommand} link set up dev ${name}"
|
||||||
|
|
||||||
(map (peer:
|
(optionals (values.allowedIPsAsRoutes != false) (map (peer:
|
||||||
(map (allowedIP:
|
(map (allowedIP:
|
||||||
"${ipCommand} route replace ${allowedIP} dev ${name} table ${values.table}"
|
"${ipCommand} route replace ${allowedIP} dev ${name} table ${values.table}"
|
||||||
) peer.allowedIPs)
|
) peer.allowedIPs)
|
||||||
) values.peers)
|
) values.peers))
|
||||||
|
|
||||||
values.postSetup
|
values.postSetup
|
||||||
]);
|
]);
|
||||||
|
@ -148,6 +148,7 @@ in {
|
|||||||
wants = [ "network.target" ];
|
wants = [ "network.target" ];
|
||||||
requires = lib.concatMap deviceUnit ifaces;
|
requires = lib.concatMap deviceUnit ifaces;
|
||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
stopIfChanged = false;
|
||||||
|
|
||||||
path = [ pkgs.wpa_supplicant ];
|
path = [ pkgs.wpa_supplicant ];
|
||||||
|
|
||||||
|
@ -212,6 +212,14 @@ in
|
|||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
|
openFirewall = mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = false;
|
||||||
|
description = ''
|
||||||
|
Whether to open ports in the firewall for ZNC.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
zncConf = mkOption {
|
zncConf = mkOption {
|
||||||
default = "";
|
default = "";
|
||||||
example = "See: http://wiki.znc.in/Configuration";
|
example = "See: http://wiki.znc.in/Configuration";
|
||||||
@ -276,14 +284,6 @@ in
|
|||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
openFirewall = mkOption {
|
|
||||||
type = types.bool;
|
|
||||||
default = false;
|
|
||||||
description = ''
|
|
||||||
Whether to open ports in the firewall for ZNC.
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
passBlock = mkOption {
|
passBlock = mkOption {
|
||||||
example = defaultPassBlock;
|
example = defaultPassBlock;
|
||||||
type = types.string;
|
type = types.string;
|
||||||
@ -359,7 +359,7 @@ in
|
|||||||
config = mkIf cfg.enable {
|
config = mkIf cfg.enable {
|
||||||
|
|
||||||
networking.firewall = mkIf cfg.openFirewall {
|
networking.firewall = mkIf cfg.openFirewall {
|
||||||
allowedTCPPorts = [ cfg.port ];
|
allowedTCPPorts = [ cfg.confOptions.port ];
|
||||||
};
|
};
|
||||||
|
|
||||||
systemd.services.znc = {
|
systemd.services.znc = {
|
||||||
|
@ -83,11 +83,11 @@ let
|
|||||||
|
|
||||||
# Unpack Mediawiki and put the config file in its root directory.
|
# Unpack Mediawiki and put the config file in its root directory.
|
||||||
mediawikiRoot = pkgs.stdenv.mkDerivation rec {
|
mediawikiRoot = pkgs.stdenv.mkDerivation rec {
|
||||||
name= "mediawiki-1.27.3";
|
name= "mediawiki-1.29.1";
|
||||||
|
|
||||||
src = pkgs.fetchurl {
|
src = pkgs.fetchurl {
|
||||||
url = "http://download.wikimedia.org/mediawiki/1.27/${name}.tar.gz";
|
url = "http://download.wikimedia.org/mediawiki/1.29/${name}.tar.gz";
|
||||||
sha256 = "08x8mvc0y1gwq8rg0zm98wc6hc5j8imb6dcpx6s7392j5dc71m0i";
|
sha256 = "03mpazbxvb011s2nmlw5p6dc43yjgl5yrsilmj1imyykm57bwb3m";
|
||||||
};
|
};
|
||||||
|
|
||||||
skins = config.skins;
|
skins = config.skins;
|
||||||
|
@ -4,7 +4,6 @@ with lib;
|
|||||||
|
|
||||||
let
|
let
|
||||||
cfg = config.services.xserver.desktopManager.gnome3;
|
cfg = config.services.xserver.desktopManager.gnome3;
|
||||||
gnome3 = config.environment.gnome3.packageSet;
|
|
||||||
|
|
||||||
# Remove packages of ys from xs, based on their names
|
# Remove packages of ys from xs, based on their names
|
||||||
removePackagesByName = xs: ys:
|
removePackagesByName = xs: ys:
|
||||||
@ -28,7 +27,7 @@ let
|
|||||||
nixos-gsettings-desktop-schemas = pkgs.runCommand "nixos-gsettings-desktop-schemas" {}
|
nixos-gsettings-desktop-schemas = pkgs.runCommand "nixos-gsettings-desktop-schemas" {}
|
||||||
''
|
''
|
||||||
mkdir -p $out/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas
|
mkdir -p $out/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas
|
||||||
cp -rf ${gnome3.gsettings_desktop_schemas}/share/gsettings-schemas/gsettings-desktop-schemas*/glib-2.0/schemas/*.xml $out/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas
|
cp -rf ${pkgs.gnome3.gsettings_desktop_schemas}/share/gsettings-schemas/gsettings-desktop-schemas*/glib-2.0/schemas/*.xml $out/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas
|
||||||
|
|
||||||
${concatMapStrings (pkg: "cp -rf ${pkg}/share/gsettings-schemas/*/glib-2.0/schemas/*.xml $out/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas\n") cfg.extraGSettingsOverridePackages}
|
${concatMapStrings (pkg: "cp -rf ${pkg}/share/gsettings-schemas/*/glib-2.0/schemas/*.xml $out/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas\n") cfg.extraGSettingsOverridePackages}
|
||||||
|
|
||||||
@ -61,7 +60,7 @@ in {
|
|||||||
example = literalExample "[ pkgs.gnome3.gpaste ]";
|
example = literalExample "[ pkgs.gnome3.gpaste ]";
|
||||||
description = "Additional list of packages to be added to the session search path.
|
description = "Additional list of packages to be added to the session search path.
|
||||||
Useful for gnome shell extensions or gsettings-conditionated autostart.";
|
Useful for gnome shell extensions or gsettings-conditionated autostart.";
|
||||||
apply = list: list ++ [ gnome3.gnome_shell gnome3.gnome-shell-extensions ];
|
apply = list: list ++ [ pkgs.gnome3.gnome_shell pkgs.gnome3.gnome-shell-extensions ];
|
||||||
};
|
};
|
||||||
|
|
||||||
extraGSettingsOverrides = mkOption {
|
extraGSettingsOverrides = mkOption {
|
||||||
@ -79,13 +78,6 @@ in {
|
|||||||
debug = mkEnableOption "gnome-session debug messages";
|
debug = mkEnableOption "gnome-session debug messages";
|
||||||
};
|
};
|
||||||
|
|
||||||
environment.gnome3.packageSet = mkOption {
|
|
||||||
default = null;
|
|
||||||
example = literalExample "pkgs.gnome3_22";
|
|
||||||
description = "Which GNOME 3 package set to use.";
|
|
||||||
apply = p: if p == null then pkgs.gnome3 else p;
|
|
||||||
};
|
|
||||||
|
|
||||||
environment.gnome3.excludePackages = mkOption {
|
environment.gnome3.excludePackages = mkOption {
|
||||||
default = [];
|
default = [];
|
||||||
example = literalExample "[ pkgs.gnome3.totem ]";
|
example = literalExample "[ pkgs.gnome3.totem ]";
|
||||||
@ -169,26 +161,26 @@ in {
|
|||||||
# Update user dirs as described in http://freedesktop.org/wiki/Software/xdg-user-dirs/
|
# Update user dirs as described in http://freedesktop.org/wiki/Software/xdg-user-dirs/
|
||||||
${pkgs.xdg-user-dirs}/bin/xdg-user-dirs-update
|
${pkgs.xdg-user-dirs}/bin/xdg-user-dirs-update
|
||||||
|
|
||||||
${gnome3.gnome_session}/bin/gnome-session ${optionalString cfg.debug "--debug"} &
|
${pkgs.gnome3.gnome_session}/bin/gnome-session ${optionalString cfg.debug "--debug"} &
|
||||||
waitPID=$!
|
waitPID=$!
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
services.xserver.updateDbusEnvironment = true;
|
services.xserver.updateDbusEnvironment = true;
|
||||||
|
|
||||||
environment.variables.GIO_EXTRA_MODULES = [ "${lib.getLib gnome3.dconf}/lib/gio/modules"
|
environment.variables.GIO_EXTRA_MODULES = [ "${lib.getLib pkgs.gnome3.dconf}/lib/gio/modules"
|
||||||
"${gnome3.glib_networking.out}/lib/gio/modules"
|
"${pkgs.gnome3.glib_networking.out}/lib/gio/modules"
|
||||||
"${gnome3.gvfs}/lib/gio/modules" ];
|
"${pkgs.gnome3.gvfs}/lib/gio/modules" ];
|
||||||
environment.systemPackages = gnome3.corePackages ++ cfg.sessionPath
|
environment.systemPackages = pkgs.gnome3.corePackages ++ cfg.sessionPath
|
||||||
++ (removePackagesByName gnome3.optionalPackages config.environment.gnome3.excludePackages);
|
++ (removePackagesByName pkgs.gnome3.optionalPackages config.environment.gnome3.excludePackages);
|
||||||
|
|
||||||
# Use the correct gnome3 packageSet
|
# Use the correct gnome3 packageSet
|
||||||
networking.networkmanager.basePackages =
|
networking.networkmanager.basePackages =
|
||||||
{ inherit (pkgs) networkmanager modemmanager wpa_supplicant;
|
{ inherit (pkgs) networkmanager modemmanager wpa_supplicant;
|
||||||
inherit (gnome3) networkmanager_openvpn networkmanager_vpnc
|
inherit (pkgs.gnome3) networkmanager_openvpn networkmanager_vpnc
|
||||||
networkmanager_openconnect networkmanager_fortisslvpn
|
networkmanager_openconnect networkmanager_fortisslvpn
|
||||||
networkmanager_pptp networkmanager_iodine
|
networkmanager_pptp networkmanager_iodine
|
||||||
networkmanager_l2tp; };
|
networkmanager_l2tp; };
|
||||||
|
|
||||||
# Needed for themes and backgrounds
|
# Needed for themes and backgrounds
|
||||||
environment.pathsToLink = [ "/share" ];
|
environment.pathsToLink = [ "/share" ];
|
||||||
|
@ -5,8 +5,7 @@ with lib;
|
|||||||
let
|
let
|
||||||
|
|
||||||
cfg = config.services.xserver.displayManager;
|
cfg = config.services.xserver.displayManager;
|
||||||
gnome3 = config.environment.gnome3.packageSet;
|
gdm = pkgs.gnome3.gdm;
|
||||||
gdm = gnome3.gdm;
|
|
||||||
|
|
||||||
in
|
in
|
||||||
|
|
||||||
@ -65,6 +64,14 @@ in
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
wayland = mkOption {
|
||||||
|
default = true;
|
||||||
|
description = ''
|
||||||
|
Allow GDM run on Wayland instead of Xserver
|
||||||
|
'';
|
||||||
|
type = types.bool;
|
||||||
|
};
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
};
|
};
|
||||||
@ -95,6 +102,7 @@ in
|
|||||||
# GDM needs different xserverArgs, presumable because using wayland by default.
|
# GDM needs different xserverArgs, presumable because using wayland by default.
|
||||||
services.xserver.tty = null;
|
services.xserver.tty = null;
|
||||||
services.xserver.display = null;
|
services.xserver.display = null;
|
||||||
|
services.xserver.verbose = null;
|
||||||
|
|
||||||
services.xserver.displayManager.job =
|
services.xserver.displayManager.job =
|
||||||
{
|
{
|
||||||
@ -103,7 +111,7 @@ in
|
|||||||
(filter (arg: arg != "-terminate") cfg.xserverArgs);
|
(filter (arg: arg != "-terminate") cfg.xserverArgs);
|
||||||
GDM_SESSIONS_DIR = "${cfg.session.desktops}";
|
GDM_SESSIONS_DIR = "${cfg.session.desktops}";
|
||||||
# Find the mouse
|
# Find the mouse
|
||||||
XCURSOR_PATH = "~/.icons:${gnome3.adwaita-icon-theme}/share/icons";
|
XCURSOR_PATH = "~/.icons:${pkgs.gnome3.adwaita-icon-theme}/share/icons";
|
||||||
};
|
};
|
||||||
execCmd = "exec ${gdm}/bin/gdm";
|
execCmd = "exec ${gdm}/bin/gdm";
|
||||||
};
|
};
|
||||||
@ -127,7 +135,7 @@ in
|
|||||||
StandardError = "inherit";
|
StandardError = "inherit";
|
||||||
};
|
};
|
||||||
|
|
||||||
systemd.services.display-manager.path = [ gnome3.gnome_session ];
|
systemd.services.display-manager.path = [ pkgs.gnome3.gnome_session ];
|
||||||
|
|
||||||
services.dbus.packages = [ gdm ];
|
services.dbus.packages = [ gdm ];
|
||||||
|
|
||||||
@ -140,6 +148,7 @@ in
|
|||||||
# presented and there's a little delay.
|
# presented and there's a little delay.
|
||||||
environment.etc."gdm/custom.conf".text = ''
|
environment.etc."gdm/custom.conf".text = ''
|
||||||
[daemon]
|
[daemon]
|
||||||
|
WaylandEnable=${if cfg.gdm.wayland then "true" else "false"}
|
||||||
${optionalString cfg.gdm.autoLogin.enable (
|
${optionalString cfg.gdm.autoLogin.enable (
|
||||||
if cfg.gdm.autoLogin.delay > 0 then ''
|
if cfg.gdm.autoLogin.delay > 0 then ''
|
||||||
TimedLoginEnable=true
|
TimedLoginEnable=true
|
||||||
@ -186,7 +195,7 @@ in
|
|||||||
auth required pam_env.so envfile=${config.system.build.pamEnvironment}
|
auth required pam_env.so envfile=${config.system.build.pamEnvironment}
|
||||||
|
|
||||||
auth required pam_succeed_if.so uid >= 1000 quiet
|
auth required pam_succeed_if.so uid >= 1000 quiet
|
||||||
auth optional ${gnome3.gnome_keyring}/lib/security/pam_gnome_keyring.so
|
auth optional ${pkgs.gnome3.gnome_keyring}/lib/security/pam_gnome_keyring.so
|
||||||
auth ${if config.security.pam.enableEcryptfs then "required" else "sufficient"} pam_unix.so nullok likeauth
|
auth ${if config.security.pam.enableEcryptfs then "required" else "sufficient"} pam_unix.so nullok likeauth
|
||||||
${optionalString config.security.pam.enableEcryptfs
|
${optionalString config.security.pam.enableEcryptfs
|
||||||
"auth required ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so unwrap"}
|
"auth required ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so unwrap"}
|
||||||
@ -206,7 +215,7 @@ in
|
|||||||
"session optional ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so"}
|
"session optional ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so"}
|
||||||
session required pam_loginuid.so
|
session required pam_loginuid.so
|
||||||
session optional ${pkgs.systemd}/lib/security/pam_systemd.so
|
session optional ${pkgs.systemd}/lib/security/pam_systemd.so
|
||||||
session optional ${gnome3.gnome_keyring}/lib/security/pam_gnome_keyring.so auto_start
|
session optional ${pkgs.gnome3.gnome_keyring}/lib/security/pam_gnome_keyring.so auto_start
|
||||||
'';
|
'';
|
||||||
|
|
||||||
gdm-password.text = ''
|
gdm-password.text = ''
|
||||||
@ -214,7 +223,7 @@ in
|
|||||||
auth required pam_env.so envfile=${config.system.build.pamEnvironment}
|
auth required pam_env.so envfile=${config.system.build.pamEnvironment}
|
||||||
|
|
||||||
auth required pam_succeed_if.so uid >= 1000 quiet
|
auth required pam_succeed_if.so uid >= 1000 quiet
|
||||||
auth optional ${gnome3.gnome_keyring}/lib/security/pam_gnome_keyring.so
|
auth optional ${pkgs.gnome3.gnome_keyring}/lib/security/pam_gnome_keyring.so
|
||||||
auth ${if config.security.pam.enableEcryptfs then "required" else "sufficient"} pam_unix.so nullok likeauth
|
auth ${if config.security.pam.enableEcryptfs then "required" else "sufficient"} pam_unix.so nullok likeauth
|
||||||
${optionalString config.security.pam.enableEcryptfs
|
${optionalString config.security.pam.enableEcryptfs
|
||||||
"auth required ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so unwrap"}
|
"auth required ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so unwrap"}
|
||||||
@ -233,7 +242,7 @@ in
|
|||||||
"session optional ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so"}
|
"session optional ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so"}
|
||||||
session required pam_loginuid.so
|
session required pam_loginuid.so
|
||||||
session optional ${pkgs.systemd}/lib/security/pam_systemd.so
|
session optional ${pkgs.systemd}/lib/security/pam_systemd.so
|
||||||
session optional ${gnome3.gnome_keyring}/lib/security/pam_gnome_keyring.so auto_start
|
session optional ${pkgs.gnome3.gnome_keyring}/lib/security/pam_gnome_keyring.so auto_start
|
||||||
'';
|
'';
|
||||||
|
|
||||||
gdm-autologin.text = ''
|
gdm-autologin.text = ''
|
||||||
|
@ -480,6 +480,15 @@ in
|
|||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
|
verbose = mkOption {
|
||||||
|
type = types.nullOr types.int;
|
||||||
|
default = 3;
|
||||||
|
example = 7;
|
||||||
|
description = ''
|
||||||
|
Controls verbosity of X logging.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
useGlamor = mkOption {
|
useGlamor = mkOption {
|
||||||
type = types.bool;
|
type = types.bool;
|
||||||
default = false;
|
default = false;
|
||||||
@ -631,10 +640,11 @@ in
|
|||||||
[ "-config ${configFile}"
|
[ "-config ${configFile}"
|
||||||
"-xkbdir" "${cfg.xkbDir}"
|
"-xkbdir" "${cfg.xkbDir}"
|
||||||
# Log at the default verbosity level to stderr rather than /var/log/X.*.log.
|
# Log at the default verbosity level to stderr rather than /var/log/X.*.log.
|
||||||
"-verbose" "3" "-logfile" "/dev/null"
|
"-logfile" "/dev/null"
|
||||||
] ++ optional (cfg.display != null) ":${toString cfg.display}"
|
] ++ optional (cfg.display != null) ":${toString cfg.display}"
|
||||||
++ optional (cfg.tty != null) "vt${toString cfg.tty}"
|
++ optional (cfg.tty != null) "vt${toString cfg.tty}"
|
||||||
++ optional (cfg.dpi != null) "-dpi ${toString cfg.dpi}"
|
++ optional (cfg.dpi != null) "-dpi ${toString cfg.dpi}"
|
||||||
|
++ optional (cfg.verbose != null) "-verbose ${toString cfg.verbose}"
|
||||||
++ optional (!cfg.enableTCP) "-nolisten tcp"
|
++ optional (!cfg.enableTCP) "-nolisten tcp"
|
||||||
++ optional (cfg.autoRepeatDelay != null) "-ardelay ${toString cfg.autoRepeatDelay}"
|
++ optional (cfg.autoRepeatDelay != null) "-ardelay ${toString cfg.autoRepeatDelay}"
|
||||||
++ optional (cfg.autoRepeatInterval != null) "-arinterval ${toString cfg.autoRepeatInterval}"
|
++ optional (cfg.autoRepeatInterval != null) "-arinterval ${toString cfg.autoRepeatInterval}"
|
||||||
|
@ -235,6 +235,16 @@ in
|
|||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
|
boot.initrd.luks.forceLuksSupportInInitrd = mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = false;
|
||||||
|
internal = true;
|
||||||
|
description = ''
|
||||||
|
Whether to configure luks support in the initrd, when no luks
|
||||||
|
devices are configured.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
boot.initrd.luks.devices = mkOption {
|
boot.initrd.luks.devices = mkOption {
|
||||||
default = { };
|
default = { };
|
||||||
example = { "luksroot".device = "/dev/disk/by-uuid/430e9eff-d852-4f68-aa3b-2fa3599ebe08"; };
|
example = { "luksroot".device = "/dev/disk/by-uuid/430e9eff-d852-4f68-aa3b-2fa3599ebe08"; };
|
||||||
@ -417,7 +427,7 @@ in
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
config = mkIf (luks.devices != {}) {
|
config = mkIf (luks.devices != {} || luks.forceLuksSupportInInitrd) {
|
||||||
|
|
||||||
# actually, sbp2 driver is the one enabling the DMA attack, but this needs to be tested
|
# actually, sbp2 driver is the one enabling the DMA attack, but this needs to be tested
|
||||||
boot.blacklistedKernelModules = optionals luks.mitigateDMAAttacks
|
boot.blacklistedKernelModules = optionals luks.mitigateDMAAttacks
|
||||||
|
@ -639,11 +639,7 @@ in
|
|||||||
Rules for creating and cleaning up temporary files
|
Rules for creating and cleaning up temporary files
|
||||||
automatically. See
|
automatically. See
|
||||||
<citerefentry><refentrytitle>tmpfiles.d</refentrytitle><manvolnum>5</manvolnum></citerefentry>
|
<citerefentry><refentrytitle>tmpfiles.d</refentrytitle><manvolnum>5</manvolnum></citerefentry>
|
||||||
for the exact format. You should not use this option to create
|
for the exact format.
|
||||||
files required by systemd services, since there is no
|
|
||||||
guarantee that <command>systemd-tmpfiles</command> runs when
|
|
||||||
the system is reconfigured using
|
|
||||||
<command>nixos-rebuild</command>.
|
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -879,7 +875,12 @@ in
|
|||||||
systemd.services.systemd-remount-fs.restartIfChanged = false;
|
systemd.services.systemd-remount-fs.restartIfChanged = false;
|
||||||
systemd.services.systemd-update-utmp.restartIfChanged = false;
|
systemd.services.systemd-update-utmp.restartIfChanged = false;
|
||||||
systemd.services.systemd-user-sessions.restartIfChanged = false; # Restart kills all active sessions.
|
systemd.services.systemd-user-sessions.restartIfChanged = false; # Restart kills all active sessions.
|
||||||
systemd.services.systemd-logind.restartTriggers = [ config.environment.etc."systemd/logind.conf".source ];
|
# Restarting systemd-logind breaks X11
|
||||||
|
# - upstream commit: https://cgit.freedesktop.org/xorg/xserver/commit/?id=dc48bd653c7e101
|
||||||
|
# - systemd announcement: https://github.com/systemd/systemd/blob/22043e4317ecd2bc7834b48a6d364de76bb26d91/NEWS#L103-L112
|
||||||
|
# - this might be addressed in the future by xorg
|
||||||
|
#systemd.services.systemd-logind.restartTriggers = [ config.environment.etc."systemd/logind.conf".source ];
|
||||||
|
systemd.services.systemd-logind.restartIfChanged = false;
|
||||||
systemd.services.systemd-logind.stopIfChanged = false;
|
systemd.services.systemd-logind.stopIfChanged = false;
|
||||||
systemd.services.systemd-journald.restartTriggers = [ config.environment.etc."systemd/journald.conf".source ];
|
systemd.services.systemd-journald.restartTriggers = [ config.environment.etc."systemd/journald.conf".source ];
|
||||||
systemd.services.systemd-journald.stopIfChanged = false;
|
systemd.services.systemd-journald.stopIfChanged = false;
|
||||||
|
@ -56,11 +56,19 @@ in
|
|||||||
};
|
};
|
||||||
|
|
||||||
config = mkIf anyEncrypted {
|
config = mkIf anyEncrypted {
|
||||||
|
assertions = map (dev: {
|
||||||
|
assertion = dev.label != null;
|
||||||
|
message = ''
|
||||||
|
The filesystem for ${dev.mountPoint} has encrypted.enable set to true, but no encrypted.label set
|
||||||
|
'';
|
||||||
|
}) encDevs;
|
||||||
|
|
||||||
boot.initrd = {
|
boot.initrd = {
|
||||||
luks = {
|
luks = {
|
||||||
devices =
|
devices =
|
||||||
map (dev: { name = dev.encrypted.label; device = dev.encrypted.blkDev; } ) keylessEncDevs;
|
map (dev: { name = dev.encrypted.label; device = dev.encrypted.blkDev; } ) keylessEncDevs;
|
||||||
cryptoModules = [ "aes" "sha256" "sha1" "xts" ];
|
cryptoModules = [ "aes" "sha256" "sha1" "xts" ];
|
||||||
|
forceLuksSupportInInitrd = true;
|
||||||
};
|
};
|
||||||
postMountCommands =
|
postMountCommands =
|
||||||
concatMapStrings (dev: "cryptsetup luksOpen --key-file ${dev.encrypted.keyFile} ${dev.encrypted.blkDev} ${dev.encrypted.label};\n") keyedEncDevs;
|
concatMapStrings (dev: "cryptsetup luksOpen --key-file ${dev.encrypted.keyFile} ${dev.encrypted.blkDev} ${dev.encrypted.label};\n") keyedEncDevs;
|
||||||
|
@ -140,6 +140,17 @@ in
|
|||||||
this once.
|
this once.
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
|
requestEncryptionCredentials = mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = config.boot.zfs.enableUnstable;
|
||||||
|
description = ''
|
||||||
|
Request encryption keys or passwords for all encrypted datasets on import.
|
||||||
|
|
||||||
|
Dataset encryption is only supported in zfsUnstable at the moment.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
services.zfs.autoSnapshot = {
|
services.zfs.autoSnapshot = {
|
||||||
@ -263,6 +274,10 @@ in
|
|||||||
assertion = !cfgZfs.forceImportAll || cfgZfs.forceImportRoot;
|
assertion = !cfgZfs.forceImportAll || cfgZfs.forceImportRoot;
|
||||||
message = "If you enable boot.zfs.forceImportAll, you must also enable boot.zfs.forceImportRoot";
|
message = "If you enable boot.zfs.forceImportAll, you must also enable boot.zfs.forceImportRoot";
|
||||||
}
|
}
|
||||||
|
{
|
||||||
|
assertion = cfgZfs.requestEncryptionCredentials -> cfgZfs.enableUnstable;
|
||||||
|
message = "This feature is only available for zfs unstable. Set the NixOS option boot.zfs.enableUnstable.";
|
||||||
|
}
|
||||||
];
|
];
|
||||||
|
|
||||||
boot = {
|
boot = {
|
||||||
@ -306,6 +321,9 @@ in
|
|||||||
done
|
done
|
||||||
echo
|
echo
|
||||||
if [[ -n "$msg" ]]; then echo "$msg"; fi
|
if [[ -n "$msg" ]]; then echo "$msg"; fi
|
||||||
|
${lib.optionalString cfgZfs.requestEncryptionCredentials ''
|
||||||
|
zfs load-key -a
|
||||||
|
''}
|
||||||
'') rootPools));
|
'') rootPools));
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -98,22 +98,10 @@ in
|
|||||||
'') config.i18n.consoleColors}
|
'') config.i18n.consoleColors}
|
||||||
'';
|
'';
|
||||||
|
|
||||||
/* XXX: systemd-vconsole-setup needs a "main" terminal. By default
|
|
||||||
* /dev/tty0 is used which wouldn't work when the service is restarted
|
|
||||||
* from X11. We set this to /dev/tty1; not ideal because it may also be
|
|
||||||
* owned by X11 or something else.
|
|
||||||
*
|
|
||||||
* See #22470.
|
|
||||||
*/
|
|
||||||
systemd.services."systemd-vconsole-setup" =
|
systemd.services."systemd-vconsole-setup" =
|
||||||
{ wantedBy = [ "sysinit.target" ];
|
{ before = [ "display-manager.service" ];
|
||||||
before = [ "display-manager.service" ];
|
|
||||||
after = [ "systemd-udev-settle.service" ];
|
after = [ "systemd-udev-settle.service" ];
|
||||||
restartTriggers = [ vconsoleConf kbdEnv ];
|
restartTriggers = [ vconsoleConf kbdEnv ];
|
||||||
serviceConfig.ExecStart = [
|
|
||||||
""
|
|
||||||
"${pkgs.systemd}/lib/systemd/systemd-vconsole-setup /dev/tty1"
|
|
||||||
];
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -9,6 +9,12 @@ let
|
|||||||
interfaces = attrValues cfg.interfaces;
|
interfaces = attrValues cfg.interfaces;
|
||||||
hasVirtuals = any (i: i.virtual) interfaces;
|
hasVirtuals = any (i: i.virtual) interfaces;
|
||||||
|
|
||||||
|
slaves = concatMap (i: i.interfaces) (attrValues cfg.bonds)
|
||||||
|
++ concatMap (i: i.interfaces) (attrValues cfg.bridges)
|
||||||
|
++ concatMap (i: i.interfaces) (attrValues cfg.vswitches)
|
||||||
|
++ concatMap (i: [i.interface]) (attrValues cfg.macvlans)
|
||||||
|
++ concatMap (i: [i.interface]) (attrValues cfg.vlans);
|
||||||
|
|
||||||
# We must escape interfaces due to the systemd interpretation
|
# We must escape interfaces due to the systemd interpretation
|
||||||
subsystemDevice = interface:
|
subsystemDevice = interface:
|
||||||
"sys-subsystem-net-devices-${escapeSystemdPath interface}.device";
|
"sys-subsystem-net-devices-${escapeSystemdPath interface}.device";
|
||||||
@ -105,7 +111,7 @@ let
|
|||||||
''
|
''
|
||||||
# Set the static DNS configuration, if given.
|
# Set the static DNS configuration, if given.
|
||||||
${pkgs.openresolv}/sbin/resolvconf -m 1 -a static <<EOF
|
${pkgs.openresolv}/sbin/resolvconf -m 1 -a static <<EOF
|
||||||
${optionalString (cfg.domain != null) ''
|
${optionalString (cfg.nameservers != [] && cfg.domain != null) ''
|
||||||
domain ${cfg.domain}
|
domain ${cfg.domain}
|
||||||
''}
|
''}
|
||||||
${optionalString (cfg.search != []) ("search " + concatStringsSep " " cfg.search)}
|
${optionalString (cfg.search != []) ("search " + concatStringsSep " " cfg.search)}
|
||||||
@ -116,24 +122,32 @@ let
|
|||||||
|
|
||||||
# Set the default gateway.
|
# Set the default gateway.
|
||||||
${optionalString (cfg.defaultGateway != null && cfg.defaultGateway.address != "") ''
|
${optionalString (cfg.defaultGateway != null && cfg.defaultGateway.address != "") ''
|
||||||
# FIXME: get rid of "|| true" (necessary to make it idempotent).
|
${optionalString (cfg.defaultGateway.interface != null) ''
|
||||||
ip route add default ${optionalString (cfg.defaultGateway.metric != null)
|
ip route replace ${cfg.defaultGateway.address} dev ${cfg.defaultGateway.interface} ${optionalString (cfg.defaultGateway.metric != null)
|
||||||
|
"metric ${toString cfg.defaultGateway.metric}"
|
||||||
|
} proto static
|
||||||
|
''}
|
||||||
|
ip route replace default ${optionalString (cfg.defaultGateway.metric != null)
|
||||||
"metric ${toString cfg.defaultGateway.metric}"
|
"metric ${toString cfg.defaultGateway.metric}"
|
||||||
} via "${cfg.defaultGateway.address}" ${
|
} via "${cfg.defaultGateway.address}" ${
|
||||||
optionalString (cfg.defaultGatewayWindowSize != null)
|
optionalString (cfg.defaultGatewayWindowSize != null)
|
||||||
"window ${toString cfg.defaultGatewayWindowSize}"} ${
|
"window ${toString cfg.defaultGatewayWindowSize}"} ${
|
||||||
optionalString (cfg.defaultGateway.interface != null)
|
optionalString (cfg.defaultGateway.interface != null)
|
||||||
"dev ${cfg.defaultGateway.interface}"} proto static || true
|
"dev ${cfg.defaultGateway.interface}"} proto static
|
||||||
''}
|
''}
|
||||||
${optionalString (cfg.defaultGateway6 != null && cfg.defaultGateway6.address != "") ''
|
${optionalString (cfg.defaultGateway6 != null && cfg.defaultGateway6.address != "") ''
|
||||||
# FIXME: get rid of "|| true" (necessary to make it idempotent).
|
${optionalString (cfg.defaultGateway6.interface != null) ''
|
||||||
ip -6 route add ::/0 ${optionalString (cfg.defaultGateway6.metric != null)
|
ip -6 route replace ${cfg.defaultGateway6.address} dev ${cfg.defaultGateway6.interface} ${optionalString (cfg.defaultGateway6.metric != null)
|
||||||
|
"metric ${toString cfg.defaultGateway6.metric}"
|
||||||
|
} proto static
|
||||||
|
''}
|
||||||
|
ip -6 route replace default ${optionalString (cfg.defaultGateway6.metric != null)
|
||||||
"metric ${toString cfg.defaultGateway6.metric}"
|
"metric ${toString cfg.defaultGateway6.metric}"
|
||||||
} via "${cfg.defaultGateway6.address}" ${
|
} via "${cfg.defaultGateway6.address}" ${
|
||||||
optionalString (cfg.defaultGatewayWindowSize != null)
|
optionalString (cfg.defaultGatewayWindowSize != null)
|
||||||
"window ${toString cfg.defaultGatewayWindowSize}"} ${
|
"window ${toString cfg.defaultGatewayWindowSize}"} ${
|
||||||
optionalString (cfg.defaultGateway6.interface != null)
|
optionalString (cfg.defaultGateway6.interface != null)
|
||||||
"dev ${cfg.defaultGateway6.interface}"} proto static || true
|
"dev ${cfg.defaultGateway6.interface}"} proto static
|
||||||
''}
|
''}
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
@ -152,7 +166,11 @@ let
|
|||||||
in
|
in
|
||||||
nameValuePair "network-addresses-${i.name}"
|
nameValuePair "network-addresses-${i.name}"
|
||||||
{ description = "Address configuration of ${i.name}";
|
{ description = "Address configuration of ${i.name}";
|
||||||
wantedBy = [ "network-setup.service" ];
|
wantedBy = [
|
||||||
|
"network-setup.service"
|
||||||
|
"network-link-${i.name}.service"
|
||||||
|
"network.target"
|
||||||
|
];
|
||||||
# propagate stop and reload from network-setup
|
# propagate stop and reload from network-setup
|
||||||
partOf = [ "network-setup.service" ];
|
partOf = [ "network-setup.service" ];
|
||||||
# order before network-setup because the routes that are configured
|
# order before network-setup because the routes that are configured
|
||||||
@ -206,7 +224,7 @@ let
|
|||||||
after = [ "dev-net-tun.device" "network-pre.target" ];
|
after = [ "dev-net-tun.device" "network-pre.target" ];
|
||||||
wantedBy = [ "network-setup.service" (subsystemDevice i.name) ];
|
wantedBy = [ "network-setup.service" (subsystemDevice i.name) ];
|
||||||
partOf = [ "network-setup.service" ];
|
partOf = [ "network-setup.service" ];
|
||||||
before = [ "network-setup.service" (subsystemDevice i.name) ];
|
before = [ "network-setup.service" ];
|
||||||
path = [ pkgs.iproute ];
|
path = [ pkgs.iproute ];
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
Type = "oneshot";
|
Type = "oneshot";
|
||||||
@ -232,7 +250,7 @@ let
|
|||||||
partOf = [ "network-setup.service" ] ++ optional v.rstp "mstpd.service";
|
partOf = [ "network-setup.service" ] ++ optional v.rstp "mstpd.service";
|
||||||
after = [ "network-pre.target" ] ++ deps ++ optional v.rstp "mstpd.service"
|
after = [ "network-pre.target" ] ++ deps ++ optional v.rstp "mstpd.service"
|
||||||
++ concatMap (i: [ "network-addresses-${i}.service" "network-link-${i}.service" ]) v.interfaces;
|
++ concatMap (i: [ "network-addresses-${i}.service" "network-link-${i}.service" ]) v.interfaces;
|
||||||
before = [ "network-setup.service" (subsystemDevice n) ];
|
before = [ "network-setup.service" ];
|
||||||
serviceConfig.Type = "oneshot";
|
serviceConfig.Type = "oneshot";
|
||||||
serviceConfig.RemainAfterExit = true;
|
serviceConfig.RemainAfterExit = true;
|
||||||
path = [ pkgs.iproute ];
|
path = [ pkgs.iproute ];
|
||||||
@ -331,7 +349,7 @@ let
|
|||||||
partOf = [ "network-setup.service" ];
|
partOf = [ "network-setup.service" ];
|
||||||
after = [ "network-pre.target" ] ++ deps
|
after = [ "network-pre.target" ] ++ deps
|
||||||
++ concatMap (i: [ "network-addresses-${i}.service" "network-link-${i}.service" ]) v.interfaces;
|
++ concatMap (i: [ "network-addresses-${i}.service" "network-link-${i}.service" ]) v.interfaces;
|
||||||
before = [ "network-setup.service" (subsystemDevice n) ];
|
before = [ "network-setup.service" ];
|
||||||
serviceConfig.Type = "oneshot";
|
serviceConfig.Type = "oneshot";
|
||||||
serviceConfig.RemainAfterExit = true;
|
serviceConfig.RemainAfterExit = true;
|
||||||
path = [ pkgs.iproute pkgs.gawk ];
|
path = [ pkgs.iproute pkgs.gawk ];
|
||||||
@ -369,7 +387,7 @@ let
|
|||||||
bindsTo = deps;
|
bindsTo = deps;
|
||||||
partOf = [ "network-setup.service" ];
|
partOf = [ "network-setup.service" ];
|
||||||
after = [ "network-pre.target" ] ++ deps;
|
after = [ "network-pre.target" ] ++ deps;
|
||||||
before = [ "network-setup.service" (subsystemDevice n) ];
|
before = [ "network-setup.service" ];
|
||||||
serviceConfig.Type = "oneshot";
|
serviceConfig.Type = "oneshot";
|
||||||
serviceConfig.RemainAfterExit = true;
|
serviceConfig.RemainAfterExit = true;
|
||||||
path = [ pkgs.iproute ];
|
path = [ pkgs.iproute ];
|
||||||
@ -394,7 +412,7 @@ let
|
|||||||
bindsTo = deps;
|
bindsTo = deps;
|
||||||
partOf = [ "network-setup.service" ];
|
partOf = [ "network-setup.service" ];
|
||||||
after = [ "network-pre.target" ] ++ deps;
|
after = [ "network-pre.target" ] ++ deps;
|
||||||
before = [ "network-setup.service" (subsystemDevice n) ];
|
before = [ "network-setup.service" ];
|
||||||
serviceConfig.Type = "oneshot";
|
serviceConfig.Type = "oneshot";
|
||||||
serviceConfig.RemainAfterExit = true;
|
serviceConfig.RemainAfterExit = true;
|
||||||
path = [ pkgs.iproute ];
|
path = [ pkgs.iproute ];
|
||||||
@ -422,7 +440,7 @@ let
|
|||||||
bindsTo = deps;
|
bindsTo = deps;
|
||||||
partOf = [ "network-setup.service" ];
|
partOf = [ "network-setup.service" ];
|
||||||
after = [ "network-pre.target" ] ++ deps;
|
after = [ "network-pre.target" ] ++ deps;
|
||||||
before = [ "network-setup.service" (subsystemDevice n) ];
|
before = [ "network-setup.service" ];
|
||||||
serviceConfig.Type = "oneshot";
|
serviceConfig.Type = "oneshot";
|
||||||
serviceConfig.RemainAfterExit = true;
|
serviceConfig.RemainAfterExit = true;
|
||||||
path = [ pkgs.iproute ];
|
path = [ pkgs.iproute ];
|
||||||
@ -465,5 +483,8 @@ in
|
|||||||
config = mkMerge [
|
config = mkMerge [
|
||||||
bondWarnings
|
bondWarnings
|
||||||
(mkIf (!cfg.useNetworkd) normalConfig)
|
(mkIf (!cfg.useNetworkd) normalConfig)
|
||||||
|
{ # Ensure slave interfaces are brought up
|
||||||
|
networking.interfaces = genAttrs slaves (i: {});
|
||||||
|
}
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
|
@ -271,7 +271,7 @@ in rec {
|
|||||||
tests.kernel-latest = callTest tests/kernel-latest.nix {};
|
tests.kernel-latest = callTest tests/kernel-latest.nix {};
|
||||||
tests.kernel-lts = callTest tests/kernel-lts.nix {};
|
tests.kernel-lts = callTest tests/kernel-lts.nix {};
|
||||||
tests.keystone = callTest tests/keystone.nix {};
|
tests.keystone = callTest tests/keystone.nix {};
|
||||||
tests.kubernetes = hydraJob (import tests/kubernetes.nix { system = "x86_64-linux"; });
|
tests.kubernetes = hydraJob (import tests/kubernetes/default.nix { system = "x86_64-linux"; });
|
||||||
tests.latestKernel.login = callTest tests/login.nix { latestKernel = true; };
|
tests.latestKernel.login = callTest tests/login.nix { latestKernel = true; };
|
||||||
tests.ldap = callTest tests/ldap.nix {};
|
tests.ldap = callTest tests/ldap.nix {};
|
||||||
#tests.lightdm = callTest tests/lightdm.nix {};
|
#tests.lightdm = callTest tests/lightdm.nix {};
|
||||||
@ -283,6 +283,7 @@ in rec {
|
|||||||
tests.mumble = callTest tests/mumble.nix {};
|
tests.mumble = callTest tests/mumble.nix {};
|
||||||
tests.munin = callTest tests/munin.nix {};
|
tests.munin = callTest tests/munin.nix {};
|
||||||
tests.mysql = callTest tests/mysql.nix {};
|
tests.mysql = callTest tests/mysql.nix {};
|
||||||
|
tests.mysqlBackup = callTest tests/mysql-backup.nix {};
|
||||||
tests.mysqlReplication = callTest tests/mysql-replication.nix {};
|
tests.mysqlReplication = callTest tests/mysql-replication.nix {};
|
||||||
tests.nat.firewall = callTest tests/nat.nix { withFirewall = true; };
|
tests.nat.firewall = callTest tests/nat.nix { withFirewall = true; };
|
||||||
tests.nat.firewall-conntrack = callTest tests/nat.nix { withFirewall = true; withConntrackHelpers = true; };
|
tests.nat.firewall-conntrack = callTest tests/nat.nix { withFirewall = true; withConntrackHelpers = true; };
|
||||||
|
@ -10,6 +10,17 @@ import ./make-test.nix ({ pkgs, ...} : {
|
|||||||
{ users.users.alice = { isNormalUser = true; extraGroups = [ "proc" ]; };
|
{ users.users.alice = { isNormalUser = true; extraGroups = [ "proc" ]; };
|
||||||
users.users.sybil = { isNormalUser = true; group = "wheel"; };
|
users.users.sybil = { isNormalUser = true; group = "wheel"; };
|
||||||
imports = [ ../modules/profiles/hardened.nix ];
|
imports = [ ../modules/profiles/hardened.nix ];
|
||||||
|
virtualisation.emptyDiskImages = [ 4096 ];
|
||||||
|
boot.initrd.postDeviceCommands = ''
|
||||||
|
${pkgs.dosfstools}/bin/mkfs.vfat -n EFISYS /dev/vdb
|
||||||
|
'';
|
||||||
|
fileSystems = lib.mkVMOverride {
|
||||||
|
"/efi" = {
|
||||||
|
device = "/dev/disk/by-label/EFISYS";
|
||||||
|
fsType = "vfat";
|
||||||
|
options = [ "noauto" ];
|
||||||
|
};
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
testScript =
|
testScript =
|
||||||
@ -42,5 +53,13 @@ import ./make-test.nix ({ pkgs, ...} : {
|
|||||||
subtest "kcore", sub {
|
subtest "kcore", sub {
|
||||||
$machine->fail("cat /proc/kcore");
|
$machine->fail("cat /proc/kcore");
|
||||||
};
|
};
|
||||||
|
|
||||||
|
# Test deferred mount
|
||||||
|
subtest "mount", sub {
|
||||||
|
$machine->fail("mountpoint -q /efi"); # was deferred
|
||||||
|
$machine->execute("mkdir -p /efi");
|
||||||
|
$machine->succeed("mount /dev/disk/by-label/EFISYS /efi");
|
||||||
|
$machine->succeed("mountpoint -q /efi"); # now mounted
|
||||||
|
};
|
||||||
'';
|
'';
|
||||||
})
|
})
|
||||||
|
@ -1,409 +0,0 @@
|
|||||||
{ system ? builtins.currentSystem }:
|
|
||||||
|
|
||||||
with import ../lib/testing.nix { inherit system; };
|
|
||||||
with import ../lib/qemu-flags.nix;
|
|
||||||
with pkgs.lib;
|
|
||||||
|
|
||||||
let
|
|
||||||
redisPod = pkgs.writeText "redis-master-pod.json" (builtins.toJSON {
|
|
||||||
kind = "Pod";
|
|
||||||
apiVersion = "v1";
|
|
||||||
metadata.name = "redis";
|
|
||||||
metadata.labels.name = "redis";
|
|
||||||
spec.containers = [{
|
|
||||||
name = "redis";
|
|
||||||
image = "redis";
|
|
||||||
args = ["--bind" "0.0.0.0"];
|
|
||||||
imagePullPolicy = "Never";
|
|
||||||
ports = [{
|
|
||||||
name = "redis-server";
|
|
||||||
containerPort = 6379;
|
|
||||||
}];
|
|
||||||
}];
|
|
||||||
});
|
|
||||||
|
|
||||||
redisService = pkgs.writeText "redis-service.json" (builtins.toJSON {
|
|
||||||
kind = "Service";
|
|
||||||
apiVersion = "v1";
|
|
||||||
metadata.name = "redis";
|
|
||||||
spec = {
|
|
||||||
ports = [{port = 6379; targetPort = 6379;}];
|
|
||||||
selector = {name = "redis";};
|
|
||||||
};
|
|
||||||
});
|
|
||||||
|
|
||||||
redisImage = pkgs.dockerTools.buildImage {
|
|
||||||
name = "redis";
|
|
||||||
tag = "latest";
|
|
||||||
contents = pkgs.redis;
|
|
||||||
config.Entrypoint = "/bin/redis-server";
|
|
||||||
};
|
|
||||||
|
|
||||||
testSimplePod = ''
|
|
||||||
$kubernetes->execute("docker load < ${redisImage}");
|
|
||||||
$kubernetes->waitUntilSucceeds("kubectl create -f ${redisPod}");
|
|
||||||
$kubernetes->succeed("kubectl create -f ${redisService}");
|
|
||||||
$kubernetes->waitUntilSucceeds("kubectl get pod redis | grep Running");
|
|
||||||
$kubernetes->succeed("nc -z \$\(dig \@10.10.0.1 redis.default.svc.cluster.local +short\) 6379");
|
|
||||||
'';
|
|
||||||
in {
|
|
||||||
# This test runs kubernetes on a single node
|
|
||||||
trivial = makeTest {
|
|
||||||
name = "kubernetes-trivial";
|
|
||||||
|
|
||||||
nodes = {
|
|
||||||
kubernetes =
|
|
||||||
{ config, pkgs, lib, nodes, ... }:
|
|
||||||
{
|
|
||||||
virtualisation.memorySize = 768;
|
|
||||||
virtualisation.diskSize = 2048;
|
|
||||||
|
|
||||||
programs.bash.enableCompletion = true;
|
|
||||||
environment.systemPackages = with pkgs; [ netcat bind ];
|
|
||||||
|
|
||||||
services.kubernetes.roles = ["master" "node"];
|
|
||||||
virtualisation.docker.extraOptions = "--iptables=false --ip-masq=false -b cbr0";
|
|
||||||
|
|
||||||
networking.bridges.cbr0.interfaces = [];
|
|
||||||
networking.interfaces.cbr0 = {};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
testScript = ''
|
|
||||||
startAll;
|
|
||||||
|
|
||||||
$kubernetes->waitUntilSucceeds("kubectl get nodes | grep kubernetes | grep Ready");
|
|
||||||
|
|
||||||
${testSimplePod}
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
cluster = let
|
|
||||||
runWithOpenSSL = file: cmd: pkgs.runCommand file {
|
|
||||||
buildInputs = [ pkgs.openssl ];
|
|
||||||
} cmd;
|
|
||||||
|
|
||||||
ca_key = runWithOpenSSL "ca-key.pem" "openssl genrsa -out $out 2048";
|
|
||||||
ca_pem = runWithOpenSSL "ca.pem" ''
|
|
||||||
openssl req \
|
|
||||||
-x509 -new -nodes -key ${ca_key} \
|
|
||||||
-days 10000 -out $out -subj "/CN=etcd-ca"
|
|
||||||
'';
|
|
||||||
etcd_key = runWithOpenSSL "etcd-key.pem" "openssl genrsa -out $out 2048";
|
|
||||||
etcd_csr = runWithOpenSSL "etcd.csr" ''
|
|
||||||
openssl req \
|
|
||||||
-new -key ${etcd_key} \
|
|
||||||
-out $out -subj "/CN=etcd" \
|
|
||||||
-config ${openssl_cnf}
|
|
||||||
'';
|
|
||||||
etcd_cert = runWithOpenSSL "etcd.pem" ''
|
|
||||||
openssl x509 \
|
|
||||||
-req -in ${etcd_csr} \
|
|
||||||
-CA ${ca_pem} -CAkey ${ca_key} \
|
|
||||||
-CAcreateserial -out $out \
|
|
||||||
-days 365 -extensions v3_req \
|
|
||||||
-extfile ${openssl_cnf}
|
|
||||||
'';
|
|
||||||
|
|
||||||
etcd_client_key = runWithOpenSSL "etcd-client-key.pem"
|
|
||||||
"openssl genrsa -out $out 2048";
|
|
||||||
|
|
||||||
etcd_client_csr = runWithOpenSSL "etcd-client-key.pem" ''
|
|
||||||
openssl req \
|
|
||||||
-new -key ${etcd_client_key} \
|
|
||||||
-out $out -subj "/CN=etcd-client" \
|
|
||||||
-config ${client_openssl_cnf}
|
|
||||||
'';
|
|
||||||
|
|
||||||
etcd_client_cert = runWithOpenSSL "etcd-client.crt" ''
|
|
||||||
openssl x509 \
|
|
||||||
-req -in ${etcd_client_csr} \
|
|
||||||
-CA ${ca_pem} -CAkey ${ca_key} -CAcreateserial \
|
|
||||||
-out $out -days 365 -extensions v3_req \
|
|
||||||
-extfile ${client_openssl_cnf}
|
|
||||||
'';
|
|
||||||
|
|
||||||
apiserver_key = runWithOpenSSL "apiserver-key.pem" "openssl genrsa -out $out 2048";
|
|
||||||
|
|
||||||
apiserver_csr = runWithOpenSSL "apiserver.csr" ''
|
|
||||||
openssl req \
|
|
||||||
-new -key ${apiserver_key} \
|
|
||||||
-out $out -subj "/CN=kube-apiserver" \
|
|
||||||
-config ${apiserver_cnf}
|
|
||||||
'';
|
|
||||||
|
|
||||||
apiserver_cert = runWithOpenSSL "apiserver.pem" ''
|
|
||||||
openssl x509 \
|
|
||||||
-req -in ${apiserver_csr} \
|
|
||||||
-CA ${ca_pem} -CAkey ${ca_key} -CAcreateserial \
|
|
||||||
-out $out -days 365 -extensions v3_req \
|
|
||||||
-extfile ${apiserver_cnf}
|
|
||||||
'';
|
|
||||||
|
|
||||||
worker_key = runWithOpenSSL "worker-key.pem" "openssl genrsa -out $out 2048";
|
|
||||||
|
|
||||||
worker_csr = runWithOpenSSL "worker.csr" ''
|
|
||||||
openssl req \
|
|
||||||
-new -key ${worker_key} \
|
|
||||||
-out $out -subj "/CN=kube-worker" \
|
|
||||||
-config ${worker_cnf}
|
|
||||||
'';
|
|
||||||
|
|
||||||
worker_cert = runWithOpenSSL "worker.pem" ''
|
|
||||||
openssl x509 \
|
|
||||||
-req -in ${worker_csr} \
|
|
||||||
-CA ${ca_pem} -CAkey ${ca_key} -CAcreateserial \
|
|
||||||
-out $out -days 365 -extensions v3_req \
|
|
||||||
-extfile ${worker_cnf}
|
|
||||||
'';
|
|
||||||
|
|
||||||
openssl_cnf = pkgs.writeText "openssl.cnf" ''
|
|
||||||
[req]
|
|
||||||
req_extensions = v3_req
|
|
||||||
distinguished_name = req_distinguished_name
|
|
||||||
[req_distinguished_name]
|
|
||||||
[ v3_req ]
|
|
||||||
basicConstraints = CA:FALSE
|
|
||||||
keyUsage = digitalSignature, keyEncipherment
|
|
||||||
extendedKeyUsage = serverAuth
|
|
||||||
subjectAltName = @alt_names
|
|
||||||
[alt_names]
|
|
||||||
DNS.1 = etcd1
|
|
||||||
DNS.2 = etcd2
|
|
||||||
DNS.3 = etcd3
|
|
||||||
IP.1 = 127.0.0.1
|
|
||||||
'';
|
|
||||||
|
|
||||||
client_openssl_cnf = pkgs.writeText "client-openssl.cnf" ''
|
|
||||||
[req]
|
|
||||||
req_extensions = v3_req
|
|
||||||
distinguished_name = req_distinguished_name
|
|
||||||
[req_distinguished_name]
|
|
||||||
[ v3_req ]
|
|
||||||
basicConstraints = CA:FALSE
|
|
||||||
keyUsage = digitalSignature, keyEncipherment
|
|
||||||
extendedKeyUsage = clientAuth
|
|
||||||
'';
|
|
||||||
|
|
||||||
apiserver_cnf = pkgs.writeText "apiserver-openssl.cnf" ''
|
|
||||||
[req]
|
|
||||||
req_extensions = v3_req
|
|
||||||
distinguished_name = req_distinguished_name
|
|
||||||
[req_distinguished_name]
|
|
||||||
[ v3_req ]
|
|
||||||
basicConstraints = CA:FALSE
|
|
||||||
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
|
||||||
subjectAltName = @alt_names
|
|
||||||
[alt_names]
|
|
||||||
DNS.1 = kubernetes
|
|
||||||
DNS.2 = kubernetes.default
|
|
||||||
DNS.3 = kubernetes.default.svc
|
|
||||||
DNS.4 = kubernetes.default.svc.cluster.local
|
|
||||||
IP.1 = 10.10.10.1
|
|
||||||
'';
|
|
||||||
|
|
||||||
worker_cnf = pkgs.writeText "worker-openssl.cnf" ''
|
|
||||||
[req]
|
|
||||||
req_extensions = v3_req
|
|
||||||
distinguished_name = req_distinguished_name
|
|
||||||
[req_distinguished_name]
|
|
||||||
[ v3_req ]
|
|
||||||
basicConstraints = CA:FALSE
|
|
||||||
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
|
||||||
subjectAltName = @alt_names
|
|
||||||
[alt_names]
|
|
||||||
DNS.1 = kubeWorker1
|
|
||||||
DNS.2 = kubeWorker2
|
|
||||||
'';
|
|
||||||
|
|
||||||
etcdNodeConfig = {
|
|
||||||
virtualisation.memorySize = 128;
|
|
||||||
|
|
||||||
services = {
|
|
||||||
etcd = {
|
|
||||||
enable = true;
|
|
||||||
keyFile = etcd_key;
|
|
||||||
certFile = etcd_cert;
|
|
||||||
trustedCaFile = ca_pem;
|
|
||||||
peerClientCertAuth = true;
|
|
||||||
listenClientUrls = ["https://0.0.0.0:2379"];
|
|
||||||
listenPeerUrls = ["https://0.0.0.0:2380"];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
environment.variables = {
|
|
||||||
ETCDCTL_CERT_FILE = "${etcd_client_cert}";
|
|
||||||
ETCDCTL_KEY_FILE = "${etcd_client_key}";
|
|
||||||
ETCDCTL_CA_FILE = "${ca_pem}";
|
|
||||||
ETCDCTL_PEERS = "https://127.0.0.1:2379";
|
|
||||||
};
|
|
||||||
|
|
||||||
networking.firewall.allowedTCPPorts = [ 2379 2380 ];
|
|
||||||
};
|
|
||||||
|
|
||||||
kubeConfig = {
|
|
||||||
virtualisation.diskSize = 2048;
|
|
||||||
programs.bash.enableCompletion = true;
|
|
||||||
|
|
||||||
services.flannel = {
|
|
||||||
enable = true;
|
|
||||||
network = "10.10.0.0/16";
|
|
||||||
iface = "eth1";
|
|
||||||
etcd = {
|
|
||||||
endpoints = ["https://etcd1:2379" "https://etcd2:2379" "https://etcd3:2379"];
|
|
||||||
keyFile = etcd_client_key;
|
|
||||||
certFile = etcd_client_cert;
|
|
||||||
caFile = ca_pem;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
# vxlan
|
|
||||||
networking.firewall.allowedUDPPorts = [ 8472 ];
|
|
||||||
|
|
||||||
systemd.services.docker.after = ["flannel.service"];
|
|
||||||
systemd.services.docker.serviceConfig.EnvironmentFile = "/run/flannel/subnet.env";
|
|
||||||
virtualisation.docker.extraOptions = "--iptables=false --ip-masq=false --bip $FLANNEL_SUBNET";
|
|
||||||
|
|
||||||
services.kubernetes.verbose = true;
|
|
||||||
services.kubernetes.etcd = {
|
|
||||||
servers = ["https://etcd1:2379" "https://etcd2:2379" "https://etcd3:2379"];
|
|
||||||
keyFile = etcd_client_key;
|
|
||||||
certFile = etcd_client_cert;
|
|
||||||
caFile = ca_pem;
|
|
||||||
};
|
|
||||||
|
|
||||||
environment.systemPackages = [ pkgs.bind pkgs.tcpdump pkgs.utillinux ];
|
|
||||||
};
|
|
||||||
|
|
||||||
kubeMasterConfig = {pkgs, ...}: {
|
|
||||||
require = [kubeConfig];
|
|
||||||
|
|
||||||
# kube apiserver
|
|
||||||
networking.firewall.allowedTCPPorts = [ 443 ];
|
|
||||||
|
|
||||||
virtualisation.memorySize = 512;
|
|
||||||
|
|
||||||
services.kubernetes = {
|
|
||||||
roles = ["master"];
|
|
||||||
scheduler.leaderElect = true;
|
|
||||||
controllerManager.leaderElect = true;
|
|
||||||
|
|
||||||
apiserver = {
|
|
||||||
publicAddress = "0.0.0.0";
|
|
||||||
advertiseAddress = "192.168.1.8";
|
|
||||||
tlsKeyFile = apiserver_key;
|
|
||||||
tlsCertFile = apiserver_cert;
|
|
||||||
clientCaFile = ca_pem;
|
|
||||||
kubeletClientCaFile = ca_pem;
|
|
||||||
kubeletClientKeyFile = worker_key;
|
|
||||||
kubeletClientCertFile = worker_cert;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
kubeWorkerConfig = { pkgs, ... }: {
|
|
||||||
require = [kubeConfig];
|
|
||||||
|
|
||||||
virtualisation.memorySize = 512;
|
|
||||||
|
|
||||||
# kubelet
|
|
||||||
networking.firewall.allowedTCPPorts = [ 10250 ];
|
|
||||||
|
|
||||||
services.kubernetes = {
|
|
||||||
roles = ["node"];
|
|
||||||
kubeconfig = {
|
|
||||||
server = "https://kubernetes:443";
|
|
||||||
caFile = ca_pem;
|
|
||||||
certFile = worker_cert;
|
|
||||||
keyFile = worker_key;
|
|
||||||
};
|
|
||||||
kubelet = {
|
|
||||||
tlsKeyFile = worker_key;
|
|
||||||
tlsCertFile = worker_cert;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
in makeTest {
|
|
||||||
name = "kubernetes-cluster";
|
|
||||||
|
|
||||||
nodes = {
|
|
||||||
etcd1 = { config, pkgs, nodes, ... }: {
|
|
||||||
require = [etcdNodeConfig];
|
|
||||||
services.etcd = {
|
|
||||||
advertiseClientUrls = ["https://etcd1:2379"];
|
|
||||||
initialCluster = ["etcd1=https://etcd1:2380" "etcd2=https://etcd2:2380" "etcd3=https://etcd3:2380"];
|
|
||||||
initialAdvertisePeerUrls = ["https://etcd1:2380"];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
etcd2 = { config, pkgs, ... }: {
|
|
||||||
require = [etcdNodeConfig];
|
|
||||||
services.etcd = {
|
|
||||||
advertiseClientUrls = ["https://etcd2:2379"];
|
|
||||||
initialCluster = ["etcd1=https://etcd1:2380" "etcd2=https://etcd2:2380" "etcd3=https://etcd3:2380"];
|
|
||||||
initialAdvertisePeerUrls = ["https://etcd2:2380"];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
etcd3 = { config, pkgs, ... }: {
|
|
||||||
require = [etcdNodeConfig];
|
|
||||||
services.etcd = {
|
|
||||||
advertiseClientUrls = ["https://etcd3:2379"];
|
|
||||||
initialCluster = ["etcd1=https://etcd1:2380" "etcd2=https://etcd2:2380" "etcd3=https://etcd3:2380"];
|
|
||||||
initialAdvertisePeerUrls = ["https://etcd3:2380"];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
kubeMaster1 = { config, pkgs, lib, nodes, ... }: {
|
|
||||||
require = [kubeMasterConfig];
|
|
||||||
};
|
|
||||||
|
|
||||||
kubeMaster2 = { config, pkgs, lib, nodes, ... }: {
|
|
||||||
require = [kubeMasterConfig];
|
|
||||||
};
|
|
||||||
|
|
||||||
# Kubernetes TCP load balancer
|
|
||||||
kubernetes = { config, pkgs, ... }: {
|
|
||||||
# kubernetes
|
|
||||||
networking.firewall.allowedTCPPorts = [ 443 ];
|
|
||||||
|
|
||||||
services.haproxy.enable = true;
|
|
||||||
services.haproxy.config = ''
|
|
||||||
global
|
|
||||||
log 127.0.0.1 local0 notice
|
|
||||||
user haproxy
|
|
||||||
group haproxy
|
|
||||||
|
|
||||||
defaults
|
|
||||||
log global
|
|
||||||
retries 2
|
|
||||||
timeout connect 3000
|
|
||||||
timeout server 5000
|
|
||||||
timeout client 5000
|
|
||||||
|
|
||||||
listen kubernetes
|
|
||||||
bind 0.0.0.0:443
|
|
||||||
mode tcp
|
|
||||||
option ssl-hello-chk
|
|
||||||
balance roundrobin
|
|
||||||
server kube-master-1 kubeMaster1:443 check
|
|
||||||
server kube-master-2 kubeMaster2:443 check
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
kubeWorker1 = { config, pkgs, lib, nodes, ... }: {
|
|
||||||
require = [kubeWorkerConfig];
|
|
||||||
};
|
|
||||||
|
|
||||||
kubeWorker2 = { config, pkgs, lib, nodes, ... }: {
|
|
||||||
require = [kubeWorkerConfig];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
testScript = ''
|
|
||||||
startAll;
|
|
||||||
|
|
||||||
${testSimplePod}
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
}
|
|
113
nixos/tests/kubernetes/base.nix
Normal file
113
nixos/tests/kubernetes/base.nix
Normal file
@ -0,0 +1,113 @@
|
|||||||
|
{ system ? builtins.currentSystem }:
|
||||||
|
|
||||||
|
with import ../../lib/testing.nix { inherit system; };
|
||||||
|
with import ../../lib/qemu-flags.nix;
|
||||||
|
with pkgs.lib;
|
||||||
|
|
||||||
|
let
|
||||||
|
mkKubernetesBaseTest =
|
||||||
|
{ name, domain ? "my.zyx", test, machines
|
||||||
|
, pkgs ? import <nixpkgs> { inherit system; }
|
||||||
|
, certs ? import ./certs.nix { inherit pkgs; externalDomain = domain; }
|
||||||
|
, extraConfiguration ? null }:
|
||||||
|
let
|
||||||
|
masterName = head (filter (machineName: any (role: role == "master") machines.${machineName}.roles) (attrNames machines));
|
||||||
|
master = machines.${masterName};
|
||||||
|
extraHosts = ''
|
||||||
|
${master.ip} etcd.${domain}
|
||||||
|
${master.ip} api.${domain}
|
||||||
|
${concatMapStringsSep "\n" (machineName: "${machines.${machineName}.ip} ${machineName}.${domain}") (attrNames machines)}
|
||||||
|
'';
|
||||||
|
in makeTest {
|
||||||
|
inherit name;
|
||||||
|
|
||||||
|
nodes = mapAttrs (machineName: machine:
|
||||||
|
{ config, pkgs, lib, nodes, ... }:
|
||||||
|
mkMerge [
|
||||||
|
{
|
||||||
|
virtualisation.memorySize = mkDefault 768;
|
||||||
|
virtualisation.diskSize = mkDefault 4096;
|
||||||
|
networking = {
|
||||||
|
inherit domain extraHosts;
|
||||||
|
primaryIPAddress = mkForce machine.ip;
|
||||||
|
|
||||||
|
firewall = {
|
||||||
|
allowedTCPPorts = [
|
||||||
|
10250 # kubelet
|
||||||
|
];
|
||||||
|
trustedInterfaces = ["docker0"];
|
||||||
|
|
||||||
|
extraCommands = concatMapStrings (node: ''
|
||||||
|
iptables -A INPUT -s ${node.config.networking.primaryIPAddress} -j ACCEPT
|
||||||
|
'') (attrValues nodes);
|
||||||
|
};
|
||||||
|
};
|
||||||
|
programs.bash.enableCompletion = true;
|
||||||
|
environment.variables = {
|
||||||
|
ETCDCTL_CERT_FILE = "${certs.worker}/etcd-client.pem";
|
||||||
|
ETCDCTL_KEY_FILE = "${certs.worker}/etcd-client-key.pem";
|
||||||
|
ETCDCTL_CA_FILE = "${certs.worker}/ca.pem";
|
||||||
|
ETCDCTL_PEERS = "https://etcd.${domain}:2379";
|
||||||
|
};
|
||||||
|
services.flannel.iface = "eth1";
|
||||||
|
services.kubernetes.apiserver.advertiseAddress = master.ip;
|
||||||
|
}
|
||||||
|
(optionalAttrs (any (role: role == "master") machine.roles) {
|
||||||
|
networking.firewall.allowedTCPPorts = [
|
||||||
|
2379 2380 # etcd
|
||||||
|
443 # kubernetes apiserver
|
||||||
|
];
|
||||||
|
services.etcd = {
|
||||||
|
enable = true;
|
||||||
|
certFile = "${certs.master}/etcd.pem";
|
||||||
|
keyFile = "${certs.master}/etcd-key.pem";
|
||||||
|
trustedCaFile = "${certs.master}/ca.pem";
|
||||||
|
peerClientCertAuth = true;
|
||||||
|
listenClientUrls = ["https://0.0.0.0:2379"];
|
||||||
|
listenPeerUrls = ["https://0.0.0.0:2380"];
|
||||||
|
advertiseClientUrls = ["https://etcd.${config.networking.domain}:2379"];
|
||||||
|
initialCluster = ["${masterName}=https://etcd.${config.networking.domain}:2380"];
|
||||||
|
initialAdvertisePeerUrls = ["https://etcd.${config.networking.domain}:2380"];
|
||||||
|
};
|
||||||
|
})
|
||||||
|
(import ./kubernetes-common.nix { inherit (machine) roles; inherit pkgs config certs; })
|
||||||
|
(optionalAttrs (machine ? "extraConfiguration") (machine.extraConfiguration { inherit config pkgs lib nodes; }))
|
||||||
|
(optionalAttrs (extraConfiguration != null) (extraConfiguration { inherit config pkgs lib nodes; }))
|
||||||
|
]
|
||||||
|
) machines;
|
||||||
|
|
||||||
|
testScript = ''
|
||||||
|
startAll;
|
||||||
|
|
||||||
|
${test}
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
mkKubernetesMultiNodeTest = attrs: mkKubernetesBaseTest ({
|
||||||
|
machines = {
|
||||||
|
machine1 = {
|
||||||
|
roles = ["master"];
|
||||||
|
ip = "192.168.1.1";
|
||||||
|
};
|
||||||
|
machine2 = {
|
||||||
|
roles = ["node"];
|
||||||
|
ip = "192.168.1.2";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
} // attrs // {
|
||||||
|
name = "kubernetes-${attrs.name}-multinode";
|
||||||
|
});
|
||||||
|
|
||||||
|
mkKubernetesSingleNodeTest = attrs: mkKubernetesBaseTest ({
|
||||||
|
machines = {
|
||||||
|
machine1 = {
|
||||||
|
roles = ["master" "node"];
|
||||||
|
ip = "192.168.1.1";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
} // attrs // {
|
||||||
|
name = "kubernetes-${attrs.name}-singlenode";
|
||||||
|
});
|
||||||
|
in {
|
||||||
|
inherit mkKubernetesBaseTest mkKubernetesSingleNodeTest mkKubernetesMultiNodeTest;
|
||||||
|
}
|
185
nixos/tests/kubernetes/certs.nix
Normal file
185
nixos/tests/kubernetes/certs.nix
Normal file
@ -0,0 +1,185 @@
|
|||||||
|
{
|
||||||
|
pkgs ? import <nixpkgs> {},
|
||||||
|
internalDomain ? "cloud.yourdomain.net",
|
||||||
|
externalDomain ? "myawesomecluster.cluster.yourdomain.net",
|
||||||
|
serviceClusterIp ? "10.0.0.1"
|
||||||
|
}:
|
||||||
|
let
|
||||||
|
runWithCFSSL = name: cmd:
|
||||||
|
builtins.fromJSON (builtins.readFile (
|
||||||
|
pkgs.runCommand "${name}-cfss.json" {
|
||||||
|
buildInputs = [ pkgs.cfssl ];
|
||||||
|
} "cfssl ${cmd} > $out"
|
||||||
|
));
|
||||||
|
|
||||||
|
writeCFSSL = content:
|
||||||
|
pkgs.runCommand content.name {
|
||||||
|
buildInputs = [ pkgs.cfssl ];
|
||||||
|
} ''
|
||||||
|
mkdir -p $out
|
||||||
|
cd $out
|
||||||
|
cat ${writeFile content} | cfssljson -bare ${content.name}
|
||||||
|
'';
|
||||||
|
|
||||||
|
noCSR = content: pkgs.lib.filterAttrs (n: v: n != "csr") content;
|
||||||
|
noKey = content: pkgs.lib.filterAttrs (n: v: n != "key") content;
|
||||||
|
|
||||||
|
writeFile = content: pkgs.writeText "content" (
|
||||||
|
if pkgs.lib.isAttrs content then builtins.toJSON content
|
||||||
|
else toString content
|
||||||
|
);
|
||||||
|
|
||||||
|
createServingCertKey = { ca, cn, hosts? [], size ? 2048, name ? cn }:
|
||||||
|
noCSR (
|
||||||
|
(runWithCFSSL name "gencert -ca=${writeFile ca.cert} -ca-key=${writeFile ca.key} -profile=server -config=${writeFile ca.config} ${writeFile {
|
||||||
|
CN = cn;
|
||||||
|
hosts = hosts;
|
||||||
|
key = { algo = "rsa"; inherit size; };
|
||||||
|
}}") // { inherit name; }
|
||||||
|
);
|
||||||
|
|
||||||
|
createClientCertKey = { ca, cn, groups ? [], size ? 2048, name ? cn }:
|
||||||
|
noCSR (
|
||||||
|
(runWithCFSSL name "gencert -ca=${writeFile ca.cert} -ca-key=${writeFile ca.key} -profile=client -config=${writeFile ca.config} ${writeFile {
|
||||||
|
CN = cn;
|
||||||
|
names = map (group: {O = group;}) groups;
|
||||||
|
hosts = [""];
|
||||||
|
key = { algo = "rsa"; inherit size; };
|
||||||
|
}}") // { inherit name; }
|
||||||
|
);
|
||||||
|
|
||||||
|
createSigningCertKey = { C ? "xx", ST ? "x", L ? "x", O ? "x", OU ? "x", CN ? "ca", emailAddress ? "x", expiry ? "43800h", size ? 2048, name ? CN }:
|
||||||
|
(noCSR (runWithCFSSL CN "genkey -initca ${writeFile {
|
||||||
|
key = { algo = "rsa"; inherit size; };
|
||||||
|
names = [{ inherit C ST L O OU CN emailAddress; }];
|
||||||
|
}}")) // {
|
||||||
|
inherit name;
|
||||||
|
config.signing = {
|
||||||
|
default.expiry = expiry;
|
||||||
|
profiles = {
|
||||||
|
server = {
|
||||||
|
inherit expiry;
|
||||||
|
usages = [
|
||||||
|
"signing"
|
||||||
|
"key encipherment"
|
||||||
|
"server auth"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
client = {
|
||||||
|
inherit expiry;
|
||||||
|
usages = [
|
||||||
|
"signing"
|
||||||
|
"key encipherment"
|
||||||
|
"client auth"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
peer = {
|
||||||
|
inherit expiry;
|
||||||
|
usages = [
|
||||||
|
"signing"
|
||||||
|
"key encipherment"
|
||||||
|
"server auth"
|
||||||
|
"client auth"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
ca = createSigningCertKey {};
|
||||||
|
|
||||||
|
kube-apiserver = createServingCertKey {
|
||||||
|
inherit ca;
|
||||||
|
cn = "kube-apiserver";
|
||||||
|
hosts = ["kubernetes.default" "kubernetes.default.svc" "localhost" "api.${externalDomain}" serviceClusterIp];
|
||||||
|
};
|
||||||
|
|
||||||
|
kubelet = createServingCertKey {
|
||||||
|
inherit ca;
|
||||||
|
cn = "kubelet";
|
||||||
|
hosts = ["*.${externalDomain}"];
|
||||||
|
};
|
||||||
|
|
||||||
|
service-accounts = createServingCertKey {
|
||||||
|
inherit ca;
|
||||||
|
cn = "kube-service-accounts";
|
||||||
|
};
|
||||||
|
|
||||||
|
etcd = createServingCertKey {
|
||||||
|
inherit ca;
|
||||||
|
cn = "etcd";
|
||||||
|
hosts = ["etcd.${externalDomain}"];
|
||||||
|
};
|
||||||
|
|
||||||
|
etcd-client = createClientCertKey {
|
||||||
|
inherit ca;
|
||||||
|
cn = "etcd-client";
|
||||||
|
};
|
||||||
|
|
||||||
|
kubelet-client = createClientCertKey {
|
||||||
|
inherit ca;
|
||||||
|
cn = "kubelet-client";
|
||||||
|
groups = ["system:masters"];
|
||||||
|
};
|
||||||
|
|
||||||
|
apiserver-client = {
|
||||||
|
kubelet = createClientCertKey {
|
||||||
|
inherit ca;
|
||||||
|
cn = "apiserver-client-kubelet";
|
||||||
|
groups = ["system:nodes"];
|
||||||
|
};
|
||||||
|
|
||||||
|
kube-proxy = createClientCertKey {
|
||||||
|
inherit ca;
|
||||||
|
name = "apiserver-client-kube-proxy";
|
||||||
|
cn = "system:kube-proxy";
|
||||||
|
groups = ["system:kube-proxy" "system:nodes"];
|
||||||
|
};
|
||||||
|
|
||||||
|
kube-controller-manager = createClientCertKey {
|
||||||
|
inherit ca;
|
||||||
|
name = "apiserver-client-kube-controller-manager";
|
||||||
|
cn = "system:kube-controller-manager";
|
||||||
|
groups = ["system:masters"];
|
||||||
|
};
|
||||||
|
|
||||||
|
kube-scheduler = createClientCertKey {
|
||||||
|
inherit ca;
|
||||||
|
name = "apiserver-client-kube-scheduler";
|
||||||
|
cn = "system:kube-scheduler";
|
||||||
|
groups = ["system:kube-scheduler"];
|
||||||
|
};
|
||||||
|
|
||||||
|
admin = createClientCertKey {
|
||||||
|
inherit ca;
|
||||||
|
cn = "admin";
|
||||||
|
groups = ["system:masters"];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
in {
|
||||||
|
master = pkgs.buildEnv {
|
||||||
|
name = "master-keys";
|
||||||
|
paths = [
|
||||||
|
(writeCFSSL (noKey ca))
|
||||||
|
(writeCFSSL kube-apiserver)
|
||||||
|
(writeCFSSL kubelet-client)
|
||||||
|
(writeCFSSL apiserver-client.kube-controller-manager)
|
||||||
|
(writeCFSSL apiserver-client.kube-scheduler)
|
||||||
|
(writeCFSSL service-accounts)
|
||||||
|
(writeCFSSL etcd)
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
worker = pkgs.buildEnv {
|
||||||
|
name = "worker-keys";
|
||||||
|
paths = [
|
||||||
|
(writeCFSSL (noKey ca))
|
||||||
|
(writeCFSSL kubelet)
|
||||||
|
(writeCFSSL apiserver-client.kubelet)
|
||||||
|
(writeCFSSL apiserver-client.kube-proxy)
|
||||||
|
(writeCFSSL etcd-client)
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
admin = writeCFSSL apiserver-client.admin;
|
||||||
|
}
|
7
nixos/tests/kubernetes/default.nix
Normal file
7
nixos/tests/kubernetes/default.nix
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
{ system ? builtins.currentSystem }:
|
||||||
|
{
|
||||||
|
dns = import ./dns.nix { inherit system; };
|
||||||
|
# e2e = import ./e2e.nix { inherit system; }; # TODO: make it pass
|
||||||
|
# the following test(s) can be removed when e2e is working:
|
||||||
|
rbac = import ./rbac.nix { inherit system; };
|
||||||
|
}
|
127
nixos/tests/kubernetes/dns.nix
Normal file
127
nixos/tests/kubernetes/dns.nix
Normal file
@ -0,0 +1,127 @@
|
|||||||
|
{ system ? builtins.currentSystem, pkgs ? import <nixpkgs> { inherit system; } }:
|
||||||
|
with import ./base.nix { inherit system; };
|
||||||
|
let
|
||||||
|
domain = "my.zyx";
|
||||||
|
|
||||||
|
certs = import ./certs.nix { externalDomain = domain; };
|
||||||
|
|
||||||
|
redisPod = pkgs.writeText "redis-pod.json" (builtins.toJSON {
|
||||||
|
kind = "Pod";
|
||||||
|
apiVersion = "v1";
|
||||||
|
metadata.name = "redis";
|
||||||
|
metadata.labels.name = "redis";
|
||||||
|
spec.containers = [{
|
||||||
|
name = "redis";
|
||||||
|
image = "redis";
|
||||||
|
args = ["--bind" "0.0.0.0"];
|
||||||
|
imagePullPolicy = "Never";
|
||||||
|
ports = [{
|
||||||
|
name = "redis-server";
|
||||||
|
containerPort = 6379;
|
||||||
|
}];
|
||||||
|
}];
|
||||||
|
});
|
||||||
|
|
||||||
|
redisService = pkgs.writeText "redis-service.json" (builtins.toJSON {
|
||||||
|
kind = "Service";
|
||||||
|
apiVersion = "v1";
|
||||||
|
metadata.name = "redis";
|
||||||
|
spec = {
|
||||||
|
ports = [{port = 6379; targetPort = 6379;}];
|
||||||
|
selector = {name = "redis";};
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
redisImage = pkgs.dockerTools.buildImage {
|
||||||
|
name = "redis";
|
||||||
|
tag = "latest";
|
||||||
|
contents = [ pkgs.redis pkgs.bind.host ];
|
||||||
|
config.Entrypoint = "/bin/redis-server";
|
||||||
|
};
|
||||||
|
|
||||||
|
probePod = pkgs.writeText "probe-pod.json" (builtins.toJSON {
|
||||||
|
kind = "Pod";
|
||||||
|
apiVersion = "v1";
|
||||||
|
metadata.name = "probe";
|
||||||
|
metadata.labels.name = "probe";
|
||||||
|
spec.containers = [{
|
||||||
|
name = "probe";
|
||||||
|
image = "probe";
|
||||||
|
args = [ "-f" ];
|
||||||
|
tty = true;
|
||||||
|
imagePullPolicy = "Never";
|
||||||
|
}];
|
||||||
|
});
|
||||||
|
|
||||||
|
probeImage = pkgs.dockerTools.buildImage {
|
||||||
|
name = "probe";
|
||||||
|
tag = "latest";
|
||||||
|
contents = [ pkgs.bind.host pkgs.busybox ];
|
||||||
|
config.Entrypoint = "/bin/tail";
|
||||||
|
};
|
||||||
|
|
||||||
|
extraConfiguration = { config, pkgs, lib, nodes, ... }: {
|
||||||
|
environment.systemPackages = [ pkgs.bind.host ];
|
||||||
|
# virtualisation.docker.extraOptions = "--dns=${config.services.kubernetes.addons.dns.clusterIp}";
|
||||||
|
services.dnsmasq.enable = true;
|
||||||
|
services.dnsmasq.servers = [
|
||||||
|
"/cluster.local/${config.services.kubernetes.addons.dns.clusterIp}#53"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
base = {
|
||||||
|
name = "dns";
|
||||||
|
inherit domain certs extraConfiguration;
|
||||||
|
};
|
||||||
|
|
||||||
|
singleNodeTest = {
|
||||||
|
test = ''
|
||||||
|
# prepare machine1 for test
|
||||||
|
$machine1->waitUntilSucceeds("kubectl get node machine1.${domain} | grep -w Ready");
|
||||||
|
$machine1->execute("docker load < ${redisImage}");
|
||||||
|
$machine1->waitUntilSucceeds("kubectl create -f ${redisPod}");
|
||||||
|
$machine1->waitUntilSucceeds("kubectl create -f ${redisService}");
|
||||||
|
$machine1->execute("docker load < ${probeImage}");
|
||||||
|
$machine1->waitUntilSucceeds("kubectl create -f ${probePod}");
|
||||||
|
|
||||||
|
# check if pods are running
|
||||||
|
$machine1->waitUntilSucceeds("kubectl get pod redis | grep Running");
|
||||||
|
$machine1->waitUntilSucceeds("kubectl get pod probe | grep Running");
|
||||||
|
$machine1->waitUntilSucceeds("kubectl get pods -n kube-system | grep 'kube-dns.*3/3'");
|
||||||
|
|
||||||
|
# check dns on host (dnsmasq)
|
||||||
|
$machine1->succeed("host redis.default.svc.cluster.local");
|
||||||
|
|
||||||
|
# check dns inside the container
|
||||||
|
$machine1->succeed("kubectl exec -ti probe -- /bin/host redis.default.svc.cluster.local");
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
multiNodeTest = {
|
||||||
|
test = ''
|
||||||
|
# prepare machines for test
|
||||||
|
$machine1->waitUntilSucceeds("kubectl get node machine1.${domain} | grep -w Ready");
|
||||||
|
$machine1->waitUntilSucceeds("kubectl get node machine2.${domain} | grep -w Ready");
|
||||||
|
$machine2->execute("docker load < ${redisImage}");
|
||||||
|
$machine1->waitUntilSucceeds("kubectl create -f ${redisPod}");
|
||||||
|
$machine1->waitUntilSucceeds("kubectl create -f ${redisService}");
|
||||||
|
$machine2->execute("docker load < ${probeImage}");
|
||||||
|
$machine1->waitUntilSucceeds("kubectl create -f ${probePod}");
|
||||||
|
|
||||||
|
# check if pods are running
|
||||||
|
$machine1->waitUntilSucceeds("kubectl get pod redis | grep Running");
|
||||||
|
$machine1->waitUntilSucceeds("kubectl get pod probe | grep Running");
|
||||||
|
$machine1->waitUntilSucceeds("kubectl get pods -n kube-system | grep 'kube-dns.*3/3'");
|
||||||
|
|
||||||
|
# check dns on hosts (dnsmasq)
|
||||||
|
$machine1->succeed("host redis.default.svc.cluster.local");
|
||||||
|
$machine2->succeed("host redis.default.svc.cluster.local");
|
||||||
|
|
||||||
|
# check dns inside the container
|
||||||
|
$machine1->succeed("kubectl exec -ti probe -- /bin/host redis.default.svc.cluster.local");
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
in {
|
||||||
|
singlenode = mkKubernetesSingleNodeTest (base // singleNodeTest);
|
||||||
|
multinode = mkKubernetesMultiNodeTest (base // multiNodeTest);
|
||||||
|
}
|
40
nixos/tests/kubernetes/e2e.nix
Normal file
40
nixos/tests/kubernetes/e2e.nix
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
{ system ? builtins.currentSystem, pkgs ? import <nixpkgs> { inherit system; } }:
|
||||||
|
with import ./base.nix { inherit system; };
|
||||||
|
let
|
||||||
|
domain = "my.zyx";
|
||||||
|
certs = import ./certs.nix { externalDomain = domain; };
|
||||||
|
kubeconfig = pkgs.writeText "kubeconfig.json" (builtins.toJSON {
|
||||||
|
apiVersion = "v1";
|
||||||
|
kind = "Config";
|
||||||
|
clusters = [{
|
||||||
|
name = "local";
|
||||||
|
cluster.certificate-authority = "${certs.master}/ca.pem";
|
||||||
|
cluster.server = "https://api.${domain}";
|
||||||
|
}];
|
||||||
|
users = [{
|
||||||
|
name = "kubelet";
|
||||||
|
user = {
|
||||||
|
client-certificate = "${certs.admin}/admin.pem";
|
||||||
|
client-key = "${certs.admin}/admin-key.pem";
|
||||||
|
};
|
||||||
|
}];
|
||||||
|
contexts = [{
|
||||||
|
context = {
|
||||||
|
cluster = "local";
|
||||||
|
user = "kubelet";
|
||||||
|
};
|
||||||
|
current-context = "kubelet-context";
|
||||||
|
}];
|
||||||
|
});
|
||||||
|
|
||||||
|
base = {
|
||||||
|
name = "e2e";
|
||||||
|
inherit domain certs;
|
||||||
|
test = ''
|
||||||
|
$machine1->succeed("e2e.test -kubeconfig ${kubeconfig} -provider local -ginkgo.focus '\\[Conformance\\]' -ginkgo.skip '\\[Flaky\\]|\\[Serial\\]'");
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
in {
|
||||||
|
singlenode = mkKubernetesSingleNodeTest base;
|
||||||
|
multinode = mkKubernetesMultiNodeTest base;
|
||||||
|
}
|
59
nixos/tests/kubernetes/kubernetes-common.nix
Normal file
59
nixos/tests/kubernetes/kubernetes-common.nix
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
{ roles, config, pkgs, certs }:
|
||||||
|
with pkgs.lib;
|
||||||
|
let
|
||||||
|
base = {
|
||||||
|
inherit roles;
|
||||||
|
featureGates = ["AllAlpha"];
|
||||||
|
flannel.enable = true;
|
||||||
|
addons.dashboard.enable = true;
|
||||||
|
verbose = true;
|
||||||
|
|
||||||
|
caFile = "${certs.master}/ca.pem";
|
||||||
|
apiserver = {
|
||||||
|
tlsCertFile = "${certs.master}/kube-apiserver.pem";
|
||||||
|
tlsKeyFile = "${certs.master}/kube-apiserver-key.pem";
|
||||||
|
kubeletClientCertFile = "${certs.master}/kubelet-client.pem";
|
||||||
|
kubeletClientKeyFile = "${certs.master}/kubelet-client-key.pem";
|
||||||
|
serviceAccountKeyFile = "${certs.master}/kube-service-accounts.pem";
|
||||||
|
};
|
||||||
|
etcd = {
|
||||||
|
servers = ["https://etcd.${config.networking.domain}:2379"];
|
||||||
|
certFile = "${certs.worker}/etcd-client.pem";
|
||||||
|
keyFile = "${certs.worker}/etcd-client-key.pem";
|
||||||
|
};
|
||||||
|
kubeconfig = {
|
||||||
|
server = "https://api.${config.networking.domain}";
|
||||||
|
};
|
||||||
|
kubelet = {
|
||||||
|
tlsCertFile = "${certs.worker}/kubelet.pem";
|
||||||
|
tlsKeyFile = "${certs.worker}/kubelet-key.pem";
|
||||||
|
hostname = "${config.networking.hostName}.${config.networking.domain}";
|
||||||
|
kubeconfig = {
|
||||||
|
certFile = "${certs.worker}/apiserver-client-kubelet.pem";
|
||||||
|
keyFile = "${certs.worker}/apiserver-client-kubelet-key.pem";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
controllerManager = {
|
||||||
|
serviceAccountKeyFile = "${certs.master}/kube-service-accounts-key.pem";
|
||||||
|
kubeconfig = {
|
||||||
|
certFile = "${certs.master}/apiserver-client-kube-controller-manager.pem";
|
||||||
|
keyFile = "${certs.master}/apiserver-client-kube-controller-manager-key.pem";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
scheduler = {
|
||||||
|
kubeconfig = {
|
||||||
|
certFile = "${certs.master}/apiserver-client-kube-scheduler.pem";
|
||||||
|
keyFile = "${certs.master}/apiserver-client-kube-scheduler-key.pem";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
proxy = {
|
||||||
|
kubeconfig = {
|
||||||
|
certFile = "${certs.worker}/apiserver-client-kube-proxy.pem";
|
||||||
|
keyFile = "${certs.worker}//apiserver-client-kube-proxy-key.pem";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
in {
|
||||||
|
services.kubernetes = base;
|
||||||
|
}
|
137
nixos/tests/kubernetes/rbac.nix
Normal file
137
nixos/tests/kubernetes/rbac.nix
Normal file
@ -0,0 +1,137 @@
|
|||||||
|
{ system ? builtins.currentSystem, pkgs ? import <nixpkgs> { inherit system; } }:
|
||||||
|
with import ./base.nix { inherit system; };
|
||||||
|
let
|
||||||
|
|
||||||
|
roServiceAccount = pkgs.writeText "ro-service-account.json" (builtins.toJSON {
|
||||||
|
kind = "ServiceAccount";
|
||||||
|
apiVersion = "v1";
|
||||||
|
metadata = {
|
||||||
|
name = "read-only";
|
||||||
|
namespace = "default";
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
roRoleBinding = pkgs.writeText "ro-role-binding.json" (builtins.toJSON {
|
||||||
|
apiVersion = "rbac.authorization.k8s.io/v1beta1";
|
||||||
|
kind = "RoleBinding";
|
||||||
|
metadata = {
|
||||||
|
name = "read-pods";
|
||||||
|
namespace = "default";
|
||||||
|
};
|
||||||
|
roleRef = {
|
||||||
|
apiGroup = "rbac.authorization.k8s.io";
|
||||||
|
kind = "Role";
|
||||||
|
name = "pod-reader";
|
||||||
|
};
|
||||||
|
subjects = [{
|
||||||
|
kind = "ServiceAccount";
|
||||||
|
name = "read-only";
|
||||||
|
namespace = "default";
|
||||||
|
}];
|
||||||
|
});
|
||||||
|
|
||||||
|
roRole = pkgs.writeText "ro-role.json" (builtins.toJSON {
|
||||||
|
apiVersion = "rbac.authorization.k8s.io/v1beta1";
|
||||||
|
kind = "Role";
|
||||||
|
metadata = {
|
||||||
|
name = "pod-reader";
|
||||||
|
namespace = "default";
|
||||||
|
};
|
||||||
|
rules = [{
|
||||||
|
apiGroups = [""];
|
||||||
|
resources = ["pods"];
|
||||||
|
verbs = ["get" "list" "watch"];
|
||||||
|
}];
|
||||||
|
});
|
||||||
|
|
||||||
|
kubectlPod = pkgs.writeText "kubectl-pod.json" (builtins.toJSON {
|
||||||
|
kind = "Pod";
|
||||||
|
apiVersion = "v1";
|
||||||
|
metadata.name = "kubectl";
|
||||||
|
metadata.namespace = "default";
|
||||||
|
metadata.labels.name = "kubectl";
|
||||||
|
spec.serviceAccountName = "read-only";
|
||||||
|
spec.containers = [{
|
||||||
|
name = "kubectl";
|
||||||
|
image = "kubectl:latest";
|
||||||
|
command = ["/bin/tail" "-f"];
|
||||||
|
imagePullPolicy = "Never";
|
||||||
|
tty = true;
|
||||||
|
}];
|
||||||
|
});
|
||||||
|
|
||||||
|
kubectlPod2 = pkgs.writeTextDir "kubectl-pod-2.json" (builtins.toJSON {
|
||||||
|
kind = "Pod";
|
||||||
|
apiVersion = "v1";
|
||||||
|
metadata.name = "kubectl-2";
|
||||||
|
metadata.namespace = "default";
|
||||||
|
metadata.labels.name = "kubectl-2";
|
||||||
|
spec.serviceAccountName = "read-only";
|
||||||
|
spec.containers = [{
|
||||||
|
name = "kubectl-2";
|
||||||
|
image = "kubectl:latest";
|
||||||
|
command = ["/bin/tail" "-f"];
|
||||||
|
imagePullPolicy = "Never";
|
||||||
|
tty = true;
|
||||||
|
}];
|
||||||
|
});
|
||||||
|
|
||||||
|
kubectl = pkgs.runCommand "copy-kubectl" { buildInputs = [ pkgs.kubernetes ]; } ''
|
||||||
|
mkdir -p $out/bin
|
||||||
|
cp ${pkgs.kubernetes}/bin/kubectl $out/bin/kubectl
|
||||||
|
'';
|
||||||
|
|
||||||
|
kubectlImage = pkgs.dockerTools.buildImage {
|
||||||
|
name = "kubectl";
|
||||||
|
tag = "latest";
|
||||||
|
contents = [ kubectl pkgs.busybox kubectlPod2 ];
|
||||||
|
config.Entrypoint = "/bin/sh";
|
||||||
|
};
|
||||||
|
|
||||||
|
base = {
|
||||||
|
name = "rbac";
|
||||||
|
};
|
||||||
|
|
||||||
|
singlenode = base // {
|
||||||
|
test = ''
|
||||||
|
$machine1->waitUntilSucceeds("kubectl get node machine1.my.zyx | grep -w Ready");
|
||||||
|
|
||||||
|
$machine1->execute("docker load < ${kubectlImage}");
|
||||||
|
|
||||||
|
$machine1->waitUntilSucceeds("kubectl apply -f ${roServiceAccount}");
|
||||||
|
$machine1->waitUntilSucceeds("kubectl apply -f ${roRole}");
|
||||||
|
$machine1->waitUntilSucceeds("kubectl apply -f ${roRoleBinding}");
|
||||||
|
$machine1->waitUntilSucceeds("kubectl create -f ${kubectlPod}");
|
||||||
|
|
||||||
|
$machine1->waitUntilSucceeds("kubectl get pod kubectl | grep Running");
|
||||||
|
|
||||||
|
$machine1->succeed("kubectl exec -ti kubectl -- kubectl get pods");
|
||||||
|
$machine1->fail("kubectl exec -ti kubectl -- kubectl create -f /kubectl-pod-2.json");
|
||||||
|
$machine1->fail("kubectl exec -ti kubectl -- kubectl delete pods -l name=kubectl");
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
multinode = base // {
|
||||||
|
test = ''
|
||||||
|
$machine1->waitUntilSucceeds("kubectl get node machine1.my.zyx | grep -w Ready");
|
||||||
|
$machine1->waitUntilSucceeds("kubectl get node machine2.my.zyx | grep -w Ready");
|
||||||
|
|
||||||
|
$machine2->execute("docker load < ${kubectlImage}");
|
||||||
|
|
||||||
|
$machine1->waitUntilSucceeds("kubectl apply -f ${roServiceAccount}");
|
||||||
|
$machine1->waitUntilSucceeds("kubectl apply -f ${roRole}");
|
||||||
|
$machine1->waitUntilSucceeds("kubectl apply -f ${roRoleBinding}");
|
||||||
|
$machine1->waitUntilSucceeds("kubectl create -f ${kubectlPod}");
|
||||||
|
|
||||||
|
$machine1->waitUntilSucceeds("kubectl get pod kubectl | grep Running");
|
||||||
|
|
||||||
|
$machine1->succeed("kubectl exec -ti kubectl -- kubectl get pods");
|
||||||
|
$machine1->fail("kubectl exec -ti kubectl -- kubectl create -f /kubectl-pod-2.json");
|
||||||
|
$machine1->fail("kubectl exec -ti kubectl -- kubectl delete pods -l name=kubectl");
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
in {
|
||||||
|
singlenode = mkKubernetesSingleNodeTest singlenode;
|
||||||
|
multinode = mkKubernetesMultiNodeTest multinode;
|
||||||
|
}
|
@ -56,9 +56,7 @@ import ./make-test.nix ({ pkgs, ...} : rec {
|
|||||||
src = ./mesos_test.py;
|
src = ./mesos_test.py;
|
||||||
phases = [ "installPhase" "fixupPhase" ];
|
phases = [ "installPhase" "fixupPhase" ];
|
||||||
installPhase = ''
|
installPhase = ''
|
||||||
mkdir $out
|
install -Dvm 0755 $src $out/bin/mesos_test.py
|
||||||
cp $src $out/mesos_test.py
|
|
||||||
chmod +x $out/mesos_test.py
|
|
||||||
|
|
||||||
echo "done" > test.result
|
echo "done" > test.result
|
||||||
tar czf $out/test.tar.gz test.result
|
tar czf $out/test.tar.gz test.result
|
||||||
@ -74,18 +72,18 @@ import ./make-test.nix ({ pkgs, ...} : rec {
|
|||||||
$master->waitForOpenPort(5050);
|
$master->waitForOpenPort(5050);
|
||||||
$slave->waitForOpenPort(5051);
|
$slave->waitForOpenPort(5051);
|
||||||
|
|
||||||
# is slave registred?
|
# is slave registered?
|
||||||
$master->waitUntilSucceeds("curl -s --fail http://master:5050/master/slaves".
|
$master->waitUntilSucceeds("curl -s --fail http://master:5050/master/slaves".
|
||||||
" | grep -q \"\\\"hostname\\\":\\\"slave\\\"\"");
|
" | grep -q \"\\\"hostname\\\":\\\"slave\\\"\"");
|
||||||
|
|
||||||
# try to run docker image
|
# try to run docker image
|
||||||
$master->succeed("${pkgs.mesos}/bin/mesos-execute --master=master:5050".
|
$master->succeed("${pkgs.mesos}/bin/mesos-execute --master=master:5050".
|
||||||
" --resources=\"cpus:0.1;mem:32\" --name=simple-docker".
|
" --resources=\"cpus:0.1;mem:32\" --name=simple-docker".
|
||||||
" --containerizer=mesos --docker_image=echo:latest".
|
" --containerizer=mesos --docker_image=echo:latest".
|
||||||
" --shell=true --command=\"echo done\" | grep -q TASK_FINISHED");
|
" --shell=true --command=\"echo done\" | grep -q TASK_FINISHED");
|
||||||
|
|
||||||
# simple command with .tar.gz uri
|
# simple command with .tar.gz uri
|
||||||
$master->succeed("${testFramework}/mesos_test.py master ".
|
$master->succeed("${testFramework}/bin/mesos_test.py master ".
|
||||||
"${testFramework}/test.tar.gz");
|
"${testFramework}/test.tar.gz");
|
||||||
'';
|
'';
|
||||||
})
|
})
|
||||||
|
42
nixos/tests/mysql-backup.nix
Normal file
42
nixos/tests/mysql-backup.nix
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
# Test whether mysqlBackup option works
|
||||||
|
import ./make-test.nix ({ pkgs, ... } : {
|
||||||
|
name = "mysql-backup";
|
||||||
|
meta = with pkgs.stdenv.lib.maintainers; {
|
||||||
|
maintainers = [ rvl ];
|
||||||
|
};
|
||||||
|
|
||||||
|
nodes = {
|
||||||
|
master = { config, pkgs, ... }: {
|
||||||
|
services.mysql = {
|
||||||
|
enable = true;
|
||||||
|
initialDatabases = [ { name = "testdb"; schema = ./testdb.sql; } ];
|
||||||
|
package = pkgs.mysql;
|
||||||
|
};
|
||||||
|
|
||||||
|
services.mysqlBackup = {
|
||||||
|
enable = true;
|
||||||
|
databases = [ "doesnotexist" "testdb" ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
testScript =
|
||||||
|
'' startAll;
|
||||||
|
|
||||||
|
# Need to have mysql started so that it can be populated with data.
|
||||||
|
$master->waitForUnit("mysql.service");
|
||||||
|
|
||||||
|
# Wait for testdb to be populated.
|
||||||
|
$master->sleep(10);
|
||||||
|
|
||||||
|
# Do a backup and wait for it to finish.
|
||||||
|
$master->startJob("mysql-backup.service");
|
||||||
|
$master->waitForJob("mysql-backup.service");
|
||||||
|
|
||||||
|
# Check that data appears in backup
|
||||||
|
$master->succeed("${pkgs.gzip}/bin/zcat /var/backup/mysql/testdb.gz | grep hello");
|
||||||
|
|
||||||
|
# Check that a failed backup is logged
|
||||||
|
$master->succeed("journalctl -u mysql-backup.service | grep 'fail.*doesnotexist' > /dev/null");
|
||||||
|
'';
|
||||||
|
})
|
@ -43,6 +43,7 @@ in
|
|||||||
});
|
});
|
||||||
})
|
})
|
||||||
];
|
];
|
||||||
|
system.stateVersion = "17.03";
|
||||||
};
|
};
|
||||||
radicale1_export = lib.recursiveUpdate radicale1 {
|
radicale1_export = lib.recursiveUpdate radicale1 {
|
||||||
services.radicale.extraArgs = [
|
services.radicale.extraArgs = [
|
||||||
|
@ -8,3 +8,4 @@ insert into tests values (1, 'a');
|
|||||||
insert into tests values (2, 'b');
|
insert into tests values (2, 'b');
|
||||||
insert into tests values (3, 'c');
|
insert into tests values (3, 'c');
|
||||||
insert into tests values (4, 'd');
|
insert into tests values (4, 'd');
|
||||||
|
insert into tests values (5, 'hello');
|
||||||
|
43
pkgs/applications/altcoins/bitcoin-abc.nix
Normal file
43
pkgs/applications/altcoins/bitcoin-abc.nix
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
{ stdenv, fetchFromGitHub, pkgconfig, autoreconfHook, openssl, db48, boost
|
||||||
|
, zlib, miniupnpc, qt5, utillinux, protobuf, qrencode, libevent
|
||||||
|
, withGui }:
|
||||||
|
|
||||||
|
with stdenv.lib;
|
||||||
|
|
||||||
|
stdenv.mkDerivation rec {
|
||||||
|
|
||||||
|
name = "bitcoin" + (toString (optional (!withGui) "d")) + "-abc-" + version;
|
||||||
|
version = "0.15.0";
|
||||||
|
|
||||||
|
src = fetchFromGitHub {
|
||||||
|
owner = "bitcoin-ABC";
|
||||||
|
repo = "bitcoin-abc";
|
||||||
|
rev = "v${version}";
|
||||||
|
sha256 = "1fygn6cc99iasg5g5jyps5ps873hfnn4ln4hsmcwlwiqd591qxyv";
|
||||||
|
};
|
||||||
|
|
||||||
|
patches = [ ./fix-bitcoin-qt-build.patch ];
|
||||||
|
|
||||||
|
nativeBuildInputs = [ pkgconfig autoreconfHook ];
|
||||||
|
buildInputs = [ openssl db48 boost zlib
|
||||||
|
miniupnpc utillinux protobuf libevent ]
|
||||||
|
++ optionals withGui [ qt5.qtbase qt5.qttools qrencode ];
|
||||||
|
|
||||||
|
configureFlags = [ "--with-boost-libdir=${boost.out}/lib" ]
|
||||||
|
++ optionals withGui [ "--with-gui=qt5" ];
|
||||||
|
|
||||||
|
meta = {
|
||||||
|
description = "Peer-to-peer electronic cash system (Cash client)";
|
||||||
|
longDescription= ''
|
||||||
|
Bitcoin ABC is the name of open source software which enables the use of Bitcoin.
|
||||||
|
It is designed to facilite a hard fork to increase Bitcoin's block size limit.
|
||||||
|
"ABC" stands for "Adjustable Blocksize Cap".
|
||||||
|
|
||||||
|
Bitcoin ABC is a fork of the Bitcoin Core software project.
|
||||||
|
'';
|
||||||
|
homepage = https://bitcoinabc.org/;
|
||||||
|
maintainers = with maintainers; [ lassulus ];
|
||||||
|
license = licenses.mit;
|
||||||
|
platforms = platforms.unix;
|
||||||
|
};
|
||||||
|
}
|
@ -5,13 +5,11 @@
|
|||||||
with stdenv.lib;
|
with stdenv.lib;
|
||||||
stdenv.mkDerivation rec{
|
stdenv.mkDerivation rec{
|
||||||
name = "bitcoin" + (toString (optional (!withGui) "d")) + "-" + version;
|
name = "bitcoin" + (toString (optional (!withGui) "d")) + "-" + version;
|
||||||
version = "0.15.0";
|
version = "0.15.0.1";
|
||||||
|
|
||||||
src = fetchurl {
|
src = fetchurl {
|
||||||
urls = [ "https://bitcoin.org/bin/bitcoin-core-${version}/bitcoin-${version}.tar.gz"
|
url = "https://bitcoin.org/bin/bitcoin-core-${version}/bitcoin-${version}.tar.gz";
|
||||||
"mirror://sourceforge/bitcoin/Bitcoin/bitcoin-${version}/bitcoin-${version}.tar.gz"
|
sha256 = "16si3skhm6jhw1pkniv2b9y1kkdhjmhj392palphir0qc1srwzmm";
|
||||||
];
|
|
||||||
sha256 = "18gj5gdscarv2a1hdgjps50czwi4hrmrrmhssaag55ysh94zbdjl";
|
|
||||||
};
|
};
|
||||||
|
|
||||||
nativeBuildInputs = [ pkgconfig autoreconfHook ];
|
nativeBuildInputs = [ pkgconfig autoreconfHook ];
|
||||||
|
@ -5,6 +5,9 @@ rec {
|
|||||||
bitcoin = callPackage ./bitcoin.nix { withGui = true; };
|
bitcoin = callPackage ./bitcoin.nix { withGui = true; };
|
||||||
bitcoind = callPackage ./bitcoin.nix { withGui = false; };
|
bitcoind = callPackage ./bitcoin.nix { withGui = false; };
|
||||||
|
|
||||||
|
bitcoin-abc = callPackage ./bitcoin-abc.nix { withGui = true; };
|
||||||
|
bitcoind-abc = callPackage ./bitcoin-abc.nix { withGui = false; };
|
||||||
|
|
||||||
bitcoin-unlimited = callPackage ./bitcoin-unlimited.nix { withGui = true; };
|
bitcoin-unlimited = callPackage ./bitcoin-unlimited.nix { withGui = true; };
|
||||||
bitcoind-unlimited = callPackage ./bitcoin-unlimited.nix { withGui = false; };
|
bitcoind-unlimited = callPackage ./bitcoin-unlimited.nix { withGui = false; };
|
||||||
|
|
||||||
|
15
pkgs/applications/altcoins/fix-bitcoin-qt-build.patch
Normal file
15
pkgs/applications/altcoins/fix-bitcoin-qt-build.patch
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
--- bitcoin-abc-v0.15.0-src/build-aux/m4/bitcoin_qt.m4 1970-01-01 01:00:01.000000000 +0100
|
||||||
|
+++ bitcoin-abc-v0.15.0-src.org/build-aux/m4/bitcoin_qt.m4 2017-09-27 23:38:44.748384197 +0100
|
||||||
|
@@ -35,11 +35,7 @@
|
||||||
|
dnl Output: $1 is set to the path of $2 if found. $2 are searched in order.
|
||||||
|
AC_DEFUN([BITCOIN_QT_PATH_PROGS],[
|
||||||
|
BITCOIN_QT_CHECK([
|
||||||
|
- if test "x$3" != "x"; then
|
||||||
|
- AC_PATH_PROGS($1,$2,,$3)
|
||||||
|
- else
|
||||||
|
- AC_PATH_PROGS($1,$2)
|
||||||
|
- fi
|
||||||
|
+ AC_PATH_PROGS($1,$2)
|
||||||
|
if test "x$$1" = "x" && test "x$4" != "xyes"; then
|
||||||
|
BITCOIN_QT_FAIL([$1 not found])
|
||||||
|
fi
|
@ -39,6 +39,6 @@ stdenv.mkDerivation rec {
|
|||||||
license = stdenv.lib.licenses.gpl2Plus;
|
license = stdenv.lib.licenses.gpl2Plus;
|
||||||
|
|
||||||
maintainers = [ ];
|
maintainers = [ ];
|
||||||
platforms = stdenv.lib.platforms.unix;
|
platforms = stdenv.lib.platforms.linux;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
37
pkgs/applications/audio/playbar2/default.nix
Normal file
37
pkgs/applications/audio/playbar2/default.nix
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
{ stdenv
|
||||||
|
, cmake
|
||||||
|
, extra-cmake-modules
|
||||||
|
, plasma-framework
|
||||||
|
, kwindowsystem
|
||||||
|
, fetchFromGitHub
|
||||||
|
}:
|
||||||
|
|
||||||
|
stdenv.mkDerivation rec {
|
||||||
|
name = "playbar2-${version}";
|
||||||
|
version = "2.5";
|
||||||
|
|
||||||
|
src = fetchFromGitHub {
|
||||||
|
owner = "audoban";
|
||||||
|
repo = "PlayBar2";
|
||||||
|
rev = "v${version}";
|
||||||
|
sha256 = "0iv2m4flgaz2r0k7f6l0ca8p6cw8j8j2gin1gci2pg3l5g5khbch";
|
||||||
|
};
|
||||||
|
|
||||||
|
nativeBuildInputs = [
|
||||||
|
cmake
|
||||||
|
extra-cmake-modules
|
||||||
|
];
|
||||||
|
|
||||||
|
buildInputs = [
|
||||||
|
plasma-framework
|
||||||
|
kwindowsystem
|
||||||
|
];
|
||||||
|
|
||||||
|
meta = with stdenv.lib; {
|
||||||
|
description = "Mpris2 Client for Plasma5";
|
||||||
|
homepage = https://github.com/audoban/PlayBar2;
|
||||||
|
license = licenses.gpl3;
|
||||||
|
platforms = platforms.linux;
|
||||||
|
maintainers = with maintainers; [ pjones ];
|
||||||
|
};
|
||||||
|
}
|
15
pkgs/applications/editors/emacs/clean-env.patch
Normal file
15
pkgs/applications/editors/emacs/clean-env.patch
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
Dump temacs in an empty environment to prevent -dev paths from ending
|
||||||
|
up in the dumped image.
|
||||||
|
|
||||||
|
diff -ru -x '*~' emacs-25.3/src/Makefile.in emacs-25.3-new/src/Makefile.in
|
||||||
|
--- emacs-25.3/src/Makefile.in 2017-04-14 17:02:47.000000000 +0200
|
||||||
|
+++ emacs-25.3-new/src/Makefile.in 2017-09-25 19:03:02.173861038 +0200
|
||||||
|
@@ -532,7 +532,7 @@
|
||||||
|
ifeq ($(CANNOT_DUMP),yes)
|
||||||
|
ln -f temacs$(EXEEXT) $@
|
||||||
|
else
|
||||||
|
- LC_ALL=C $(RUN_TEMACS) -batch -l loadup dump
|
||||||
|
+ env -i LC_ALL=C $(RUN_TEMACS) -batch -l loadup dump
|
||||||
|
ifneq ($(PAXCTL_dumped),)
|
||||||
|
$(PAXCTL_dumped) $@
|
||||||
|
endif
|
@ -34,7 +34,11 @@ stdenv.mkDerivation rec {
|
|||||||
sha256 = "02y00y9q42g1iqgz5qhmsja75hwxd88yrn9zp14lanay0zkwafi5";
|
sha256 = "02y00y9q42g1iqgz5qhmsja75hwxd88yrn9zp14lanay0zkwafi5";
|
||||||
};
|
};
|
||||||
|
|
||||||
patches = (lib.optional stdenv.isDarwin ./at-fdcwd.patch);
|
enableParallelBuilding = true;
|
||||||
|
|
||||||
|
patches =
|
||||||
|
[ ./clean-env.patch ]
|
||||||
|
++ lib.optional stdenv.isDarwin ./at-fdcwd.patch;
|
||||||
|
|
||||||
nativeBuildInputs = [ pkgconfig ]
|
nativeBuildInputs = [ pkgconfig ]
|
||||||
++ lib.optionals srcRepo [ autoconf automake texinfo ]
|
++ lib.optionals srcRepo [ autoconf automake texinfo ]
|
||||||
|
@ -53,6 +53,9 @@ let
|
|||||||
patchelf --set-interpreter $interp \
|
patchelf --set-interpreter $interp \
|
||||||
--set-rpath "${lib.makeLibraryPath [ stdenv.cc.cc.lib zlib ]}:$lldbLibPath" \
|
--set-rpath "${lib.makeLibraryPath [ stdenv.cc.cc.lib zlib ]}:$lldbLibPath" \
|
||||||
bin/clang/clang-tidy
|
bin/clang/clang-tidy
|
||||||
|
|
||||||
|
wrapProgram $out/bin/clion \
|
||||||
|
--set CL_JDK "${jdk}"
|
||||||
)
|
)
|
||||||
'';
|
'';
|
||||||
});
|
});
|
||||||
@ -229,15 +232,15 @@ in
|
|||||||
|
|
||||||
datagrip = buildDataGrip rec {
|
datagrip = buildDataGrip rec {
|
||||||
name = "datagrip-${version}";
|
name = "datagrip-${version}";
|
||||||
version = "2017.1.5"; /* updated by script */
|
version = "2017.2.2"; /* updated by script */
|
||||||
description = "Your Swiss Army Knife for Databases and SQL";
|
description = "Your Swiss Army Knife for Databases and SQL";
|
||||||
license = stdenv.lib.licenses.unfree;
|
license = stdenv.lib.licenses.unfree;
|
||||||
src = fetchurl {
|
src = fetchurl {
|
||||||
url = "https://download.jetbrains.com/datagrip/${name}.tar.gz";
|
url = "https://download.jetbrains.com/datagrip/${name}.tar.gz";
|
||||||
sha256 = "8847c35761fcf6fc7a1d3f2bed0fa3971fbf28721c144f41d21feb473bb212dc"; /* updated by script */
|
sha256 = "1l8y65fw9g5ckzwpcgigm2qwy8fhpw2hil576rphsnx6qvnh4swn"; /* updated by script */
|
||||||
};
|
};
|
||||||
wmClass = "jetbrains-datagrip";
|
wmClass = "jetbrains-datagrip";
|
||||||
update-channel = "datagrip_2017_1";
|
update-channel = "datagrip_2017_2";
|
||||||
};
|
};
|
||||||
|
|
||||||
gogland = buildGogland rec {
|
gogland = buildGogland rec {
|
||||||
|
@ -27,46 +27,61 @@ sub get_latest_versions {
|
|||||||
next unless $latest_build;
|
next unless $latest_build;
|
||||||
|
|
||||||
# version as in download url
|
# version as in download url
|
||||||
|
my ($version) = $latest_build =~ /^<build [^>]*version="([^"]+)"/;
|
||||||
|
my ($fullNumber) = $latest_build =~ /^<build [^>]*fullNumber="([^"]+)"/;
|
||||||
|
my $latest_version_full1 = "$version-$fullNumber";
|
||||||
|
$latest_version_full1 =~ s/\s*EAP//;
|
||||||
|
|
||||||
my ($latest_version) = $latest_build =~ /^<build [^>]*version="([^"]+)"/;
|
my ($latest_version) = $latest_build =~ /^<build [^>]*version="([^"]+)"/;
|
||||||
($latest_version) = $latest_build =~ /^<build [^>]*fullNumber="([^"]+)"/ if $latest_version =~ / /;
|
($latest_version) = $latest_build =~ /^<build [^>]*fullNumber="([^"]+)"/ if $latest_version =~ / /;
|
||||||
|
|
||||||
$h{$id} = $latest_version;
|
$h{$id} = $latest_version;
|
||||||
|
$h{"full1_" . $id} = $latest_version_full1;
|
||||||
}
|
}
|
||||||
return %h;
|
return %h;
|
||||||
}
|
}
|
||||||
|
|
||||||
my %latest_versions = get_latest_versions();
|
my %latest_versions = get_latest_versions();
|
||||||
#for my $ch (sort keys %latest_versions) {
|
# for my $ch (sort keys %latest_versions) {
|
||||||
# print("$ch $latest_versions{$ch}\n");
|
# print("$ch $latest_versions{$ch}\n");
|
||||||
#}
|
# }
|
||||||
|
|
||||||
sub update_nix_block {
|
sub update_nix_block {
|
||||||
my ($block) = @_;
|
my ($block) = @_;
|
||||||
my ($channel) = $block =~ /update-channel\s*=\s*"([^"]+)"/;
|
my ($channel) = $block =~ /update-channel\s*=\s*"([^"]+)"/;
|
||||||
if ($channel) {
|
if ($channel) {
|
||||||
die "unknown update-channel $channel" unless $latest_versions{$channel};
|
if ($latest_versions{$channel}) {
|
||||||
my ($version) = $block =~ /version\s*=\s*"([^"]+)"/;
|
my ($version) = $block =~ /version\s*=\s*"([^"]+)"/;
|
||||||
die "no version in $block" unless $version;
|
die "no version in $block" unless $version;
|
||||||
if ($version eq $latest_versions{$channel}) {
|
if ($version eq $latest_versions{$channel}) {
|
||||||
print("$channel is up to date at $version\n");
|
print("$channel is up to date at $version\n");
|
||||||
|
} else {
|
||||||
|
print("updating $channel: $version -> $latest_versions{$channel}\n");
|
||||||
|
my ($url) = $block =~ /url\s*=\s*"([^"]+)"/;
|
||||||
|
# try to interpret some nix
|
||||||
|
my ($name) = $block =~ /name\s*=\s*"([^"]+)"/;
|
||||||
|
$name =~ s/\$\{version\}/$latest_versions{$channel}/;
|
||||||
|
$url =~ s/\$\{name\}/$name/;
|
||||||
|
$url =~ s/\$\{version\}/$latest_versions{$channel}/;
|
||||||
|
die "$url still has some interpolation" if $url =~ /\$/;
|
||||||
|
my ($sha256) = get("$url.sha256") =~ /^([0-9a-f]{64})/;
|
||||||
|
my $version_string = $latest_versions{$channel};
|
||||||
|
unless ( $sha256 ) {
|
||||||
|
my $full_version = $latest_versions{"full1_" . $channel};
|
||||||
|
$url =~ s/$version_string/$full_version/;
|
||||||
|
($sha256) = get("$url.sha256") =~ /^([0-9a-f]{64})/;
|
||||||
|
$version_string = $full_version;
|
||||||
|
}
|
||||||
|
die "invalid sha256 in $url.sha256" unless $sha256;
|
||||||
|
my ($sha256Base32) = readpipe("nix-hash --type sha256 --to-base32 $sha256");
|
||||||
|
chomp $sha256Base32;
|
||||||
|
print "Jetbrains published SHA256: $sha256\n";
|
||||||
|
print "Conversion into base32 yields: $sha256Base32\n";
|
||||||
|
$block =~ s#version\s*=\s*"([^"]+)".+$#version = "$version_string"; /* updated by script */#m;
|
||||||
|
$block =~ s#sha256\s*=\s*"([^"]+)".+$#sha256 = "$sha256Base32"; /* updated by script */#m;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
print("updating $channel: $version -> $latest_versions{$channel}\n");
|
warn "unknown update-channel $channel";
|
||||||
my ($url) = $block =~ /url\s*=\s*"([^"]+)"/;
|
|
||||||
# try to interpret some nix
|
|
||||||
my ($name) = $block =~ /name\s*=\s*"([^"]+)"/;
|
|
||||||
$name =~ s/\$\{version\}/$latest_versions{$channel}/;
|
|
||||||
$url =~ s/\$\{name\}/$name/;
|
|
||||||
$url =~ s/\$\{version\}/$latest_versions{$channel}/;
|
|
||||||
die "$url still has some interpolation" if $url =~ /\$/;
|
|
||||||
|
|
||||||
my ($sha256) = get("$url.sha256") =~ /^([0-9a-f]{64})/;
|
|
||||||
my ($sha256Base32) = readpipe("nix-hash --type sha256 --to-base32 $sha256");
|
|
||||||
chomp $sha256Base32;
|
|
||||||
print "Jetbrains published SHA256: $sha256\n";
|
|
||||||
print "Conversion into base32 yeilds: $sha256Base32\n";
|
|
||||||
|
|
||||||
$block =~ s#version\s*=\s*"([^"]+)".+$#version = "$latest_versions{$channel}"; /* updated by script */#m;
|
|
||||||
$block =~ s#sha256\s*=\s*"([^"]+)".+$#sha256 = "$sha256Base32"; /* updated by script */#m;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return $block;
|
return $block;
|
||||||
|
@ -1,33 +1,30 @@
|
|||||||
{ fetchurl, stdenv, ncurses, pkgconfig, libbsd }:
|
{ stdenv, fetchurl, pkgconfig, libbsd, ncurses }:
|
||||||
|
|
||||||
stdenv.mkDerivation rec {
|
stdenv.mkDerivation rec {
|
||||||
name = "mg-${version}";
|
name = "mg-${version}";
|
||||||
version = "20161005";
|
version = "20170828";
|
||||||
|
|
||||||
src = fetchurl {
|
src = fetchurl {
|
||||||
url = "http://homepage.boetes.org/software/mg/${name}.tar.gz";
|
url = "http://homepage.boetes.org/software/mg/${name}.tar.gz";
|
||||||
sha256 = "0qaydk2cy765n9clghmi5gdnpwn15y2v0fj6r0jcm0v7d89vbz5p";
|
sha256 = "139nc58l5ifj3d3478nhqls0lic52skmxfxggznzxaz9camqd20z";
|
||||||
};
|
};
|
||||||
|
|
||||||
NIX_CFLAGS_COMPILE = "-Wno-error";
|
enableParallelBuilding = true;
|
||||||
|
|
||||||
preConfigure = ''
|
makeFlags = [ "PKG_CONFIG=${pkgconfig}/bin/pkg-config" ];
|
||||||
substituteInPlace GNUmakefile \
|
|
||||||
--replace /usr/bin/pkg-config ${pkgconfig}/bin/pkg-config
|
|
||||||
'';
|
|
||||||
|
|
||||||
installPhase = ''
|
installPhase = ''
|
||||||
mkdir -p $out/bin
|
install -m 555 -Dt $out/bin mg
|
||||||
cp mg $out/bin
|
install -m 444 -Dt $out/share/man/man1 mg.1
|
||||||
mkdir -p $out/share/man/man1
|
|
||||||
cp mg.1 $out/share/man/man1
|
|
||||||
'';
|
'';
|
||||||
|
|
||||||
nativeBuildInputs = [ pkgconfig ];
|
nativeBuildInputs = [ pkgconfig ];
|
||||||
buildInputs = [ ncurses libbsd ];
|
|
||||||
|
buildInputs = [ libbsd ncurses ];
|
||||||
|
|
||||||
meta = with stdenv.lib; {
|
meta = with stdenv.lib; {
|
||||||
homepage = http://homepage.boetes.org/software/mg/;
|
|
||||||
description = "Micro GNU/emacs, a portable version of the mg maintained by the OpenBSD team";
|
description = "Micro GNU/emacs, a portable version of the mg maintained by the OpenBSD team";
|
||||||
|
homepage = "https://homepage.boetes.org/software/mg";
|
||||||
license = licenses.publicDomain;
|
license = licenses.publicDomain;
|
||||||
platforms = platforms.all;
|
platforms = platforms.all;
|
||||||
};
|
};
|
||||||
|
@ -50,7 +50,7 @@ stdenv.mkDerivation rec {
|
|||||||
'';
|
'';
|
||||||
homepage = https://github.com/blakemcbride/TECOC;
|
homepage = https://github.com/blakemcbride/TECOC;
|
||||||
maintainers = [ maintainers.AndersonTorres ];
|
maintainers = [ maintainers.AndersonTorres ];
|
||||||
platforms = platforms.unix;
|
platforms = platforms.linux;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
# TODO: test in other platforms - especially Darwin
|
# TODO: test in other platforms - especially Darwin
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
{ lib, fetchFromGitHub }:
|
{ lib, fetchFromGitHub }:
|
||||||
rec {
|
rec {
|
||||||
version = "8.0.0442";
|
version = "8.0.1150";
|
||||||
|
|
||||||
src = fetchFromGitHub {
|
src = fetchFromGitHub {
|
||||||
owner = "vim";
|
owner = "vim";
|
||||||
repo = "vim";
|
repo = "vim";
|
||||||
rev = "v${version}";
|
rev = "v${version}";
|
||||||
sha256 = "1pyyrkb7k5vhm1ijrh4v2f50lxhrgga5mm0gvmz4v704z0h585yg";
|
sha256 = "1k1qkmb2jbymqikrp99q1yjagdf508xzabrw7b08dlh926b2v23j";
|
||||||
};
|
};
|
||||||
|
|
||||||
enableParallelBuilding = true;
|
enableParallelBuilding = true;
|
||||||
|
@ -14,8 +14,8 @@ let
|
|||||||
else throw "ImageMagick is not supported on this platform.";
|
else throw "ImageMagick is not supported on this platform.";
|
||||||
|
|
||||||
cfg = {
|
cfg = {
|
||||||
version = "7.0.6-4";
|
version = "7.0.7-4";
|
||||||
sha256 = "0fvkx9lf8g0sa9bccd9s5qyhcy0g1mqnkbpqly55ryxyg1ywxqaz";
|
sha256 = "074w4jm5s98b8dxwjl8lljvdhmm3mbg1ikgjy1mw3c1sb08z3nc8";
|
||||||
patches = [];
|
patches = [];
|
||||||
};
|
};
|
||||||
in
|
in
|
||||||
|
@ -14,8 +14,8 @@ let
|
|||||||
else throw "ImageMagick is not supported on this platform.";
|
else throw "ImageMagick is not supported on this platform.";
|
||||||
|
|
||||||
cfg = {
|
cfg = {
|
||||||
version = "6.9.9-7";
|
version = "6.9.9-15";
|
||||||
sha256 = "1lwsz9b8clygdppgawv2hsry4aykgmawjlwhg3fj70rndv4a8rw4";
|
sha256 = "0bxgdc1qiyvag6a2iiqcbwp4ak0m1mzi9qhs51fbrvv6syy12m6c";
|
||||||
patches = [];
|
patches = [];
|
||||||
}
|
}
|
||||||
# Freeze version on mingw so we don't need to port the patch too often.
|
# Freeze version on mingw so we don't need to port the patch too often.
|
||||||
|
@ -2,16 +2,21 @@ Get the environment propagated to scons forked childs, and correct the dicom plu
|
|||||||
a typedef of size_t that failed at least on x86_64-linux.
|
a typedef of size_t that failed at least on x86_64-linux.
|
||||||
|
|
||||||
diff --git a/SConstruct b/SConstruct
|
diff --git a/SConstruct b/SConstruct
|
||||||
index 16eccd9..603e931 100644
|
index 9e752d6..f93f27f 100644
|
||||||
--- a/SConstruct
|
--- a/SConstruct
|
||||||
+++ b/SConstruct
|
+++ b/SConstruct
|
||||||
@@ -7,8 +7,7 @@ else:
|
@@ -9,13 +9,7 @@ else:
|
||||||
cppflags = ['-O2']
|
|
||||||
variant = 'Release'
|
commit_id = os.popen('git rev-parse HEAD').read().replace('\n','')
|
||||||
|
|
||||||
-env = Environment(LIBPATH=[],
|
-env = Environment(LIBPATH=[],
|
||||||
- CPPFLAGS = cppflags)
|
- CPPFLAGS = cppflags + ['-Wno-deprecated-declarations',
|
||||||
|
- '-Wno-reorder',
|
||||||
|
- '-Wno-unused-but-set-variable',
|
||||||
|
- '-Wno-unused-function'],
|
||||||
|
- CXXFLAGS=['-std=c++1y']
|
||||||
|
- )
|
||||||
+env = Environment(ENV = os.environ)
|
+env = Environment(ENV = os.environ)
|
||||||
|
|
||||||
env['SBOX'] = False
|
env['SBOX'] = False
|
||||||
|
env['COMMITIDSHORT'] = commit_id[0:6]
|
||||||
|
@ -2,13 +2,14 @@
|
|||||||
pcre, cfitsio, perl, gob2, vala_0_23, libtiff, json_glib }:
|
pcre, cfitsio, perl, gob2, vala_0_23, libtiff, json_glib }:
|
||||||
|
|
||||||
stdenv.mkDerivation rec {
|
stdenv.mkDerivation rec {
|
||||||
name = "giv-20150811-git";
|
name = "giv-${version}";
|
||||||
|
version = "0.9.26";
|
||||||
|
|
||||||
src = fetchFromGitHub {
|
src = fetchFromGitHub {
|
||||||
owner = "dov";
|
owner = "dov";
|
||||||
repo = "giv";
|
repo = "giv";
|
||||||
rev = "64648bfbbf10ec4a9adfbc939c96c7d1dbdce57a";
|
rev = "v${version}";
|
||||||
sha256 = "1sz2n7jbmg3g97bs613xxjpzqbsl5rvpg6v7g3x3ycyd35r8vsfp";
|
sha256 = "1sfm8j3hvqij6z3h8xz724d7hjqqbzljl2a6pp4yjpnnrxksnic2";
|
||||||
};
|
};
|
||||||
|
|
||||||
hardeningDisable = [ "format" ];
|
hardeningDisable = [ "format" ];
|
||||||
|
@ -1,40 +1,46 @@
|
|||||||
{ fetchurl, stdenv, cmake, qt4
|
{stdenv, fetchFromGitHub, cmake
|
||||||
, hdf5
|
,full, python, mesa, libXt }:
|
||||||
, mpich2
|
|
||||||
, python
|
|
||||||
, libxml2
|
|
||||||
, mesa, libXt
|
|
||||||
}:
|
|
||||||
|
|
||||||
stdenv.mkDerivation rec {
|
stdenv.mkDerivation rec {
|
||||||
name = "paraview-4.0.1";
|
name = "paraview-${version}";
|
||||||
src = fetchurl {
|
version = "5.4.0";
|
||||||
url = "http://paraview.org/files/v4.0/ParaView-v4.0.1-source.tgz";
|
|
||||||
sha256 = "1qj8dq8gqpsw75sv4sdc7xm1xcpv0ilsddnrcfhha0zfhp0gq10y";
|
# fetching from GitHub instead of taking an "official" source
|
||||||
|
# tarball because of missing submodules there
|
||||||
|
src = fetchFromGitHub {
|
||||||
|
owner = "Kitware";
|
||||||
|
repo = "ParaView";
|
||||||
|
rev = "v${version}";
|
||||||
|
sha256 = "0h1vkgwm10mc5mnr3djp81lxr5pi0hyj776z77hiib6xm5596q9n";
|
||||||
|
fetchSubmodules = true;
|
||||||
};
|
};
|
||||||
|
|
||||||
# [ 5%] Generating vtkGLSLShaderLibrary.h
|
cmakeFlags = [
|
||||||
# ../../../bin/ProcessShader: error while loading shared libraries: libvtksys.so.pv3.10: cannot open shared object file: No such file or directory
|
"-DPARAVIEW_ENABLE_PYTHON=ON"
|
||||||
preConfigure = ''
|
"-DPARAVIEW_INSTALL_DEVELOPMENT_FILES=ON"
|
||||||
export NIX_LDFLAGS="$NIX_LDFLAGS -rpath $out/lib/paraview-3.98 -rpath ../../../../../../lib -rpath ../../../../../lib -rpath ../../../../lib -rpath ../../../lib -rpath ../../lib -rpath ../lib"
|
];
|
||||||
'';
|
|
||||||
cmakeFlags = [
|
|
||||||
"-DPARAVIEW_USE_SYSTEM_HDF5:BOOL=ON"
|
|
||||||
"-DVTK_USE_SYSTEM_LIBXML2:BOOL=ON"
|
|
||||||
"-DPARAVIEW_ENABLE_PYTHON:BOOL=ON"
|
|
||||||
# use -DPARAVIEW_INSTALL_THIRD_PARTY_LIBRARIES:BOOL=OFF \ to fix make install error: http://www.cmake.org/pipermail/paraview/2011-February/020268.html
|
|
||||||
"-DPARAVIEW_INSTALL_THIRD_PARTY_LIBRARIES:BOOL=OFF"
|
|
||||||
"-DCMAKE_SKIP_BUILD_RPATH=ON"
|
|
||||||
"-DVTK_USE_RPATH:BOOL=ON"
|
|
||||||
"-DPARAVIEW_INSTALL_DEVELOPMENT=ON"
|
|
||||||
];
|
|
||||||
|
|
||||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1138466
|
# During build, binaries are called that rely on freshly built
|
||||||
NIX_CFLAGS_COMPILE = "-DGLX_GLXEXT_LEGACY";
|
# libraries. These reside in build/lib, and are not found by
|
||||||
|
# default.
|
||||||
|
preBuild = ''
|
||||||
|
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$PWD/lib
|
||||||
|
'';
|
||||||
|
|
||||||
enableParallelBuilding = true;
|
enableParallelBuilding = true;
|
||||||
|
|
||||||
buildInputs = [ cmake qt4 hdf5 mpich2 python libxml2 mesa libXt ];
|
buildInputs = [ cmake
|
||||||
|
python
|
||||||
|
mesa
|
||||||
|
libXt
|
||||||
|
|
||||||
|
# theoretically the following should be fine, but there is an error
|
||||||
|
# due to missing libqminimal when not using qt5.full
|
||||||
|
|
||||||
|
# qtbase qtx11extras qttools
|
||||||
|
full
|
||||||
|
];
|
||||||
|
|
||||||
|
|
||||||
meta = {
|
meta = {
|
||||||
homepage = http://www.paraview.org/;
|
homepage = http://www.paraview.org/;
|
||||||
|
@ -21,6 +21,6 @@ stdenv.mkDerivation (rec {
|
|||||||
homepage = http://www.pberndt.com/Programme/Linux/pqiv;
|
homepage = http://www.pberndt.com/Programme/Linux/pqiv;
|
||||||
license = licenses.gpl3;
|
license = licenses.gpl3;
|
||||||
maintainers = [ maintainers.ndowens ];
|
maintainers = [ maintainers.ndowens ];
|
||||||
platforms = platforms.unix;
|
platforms = platforms.linux;
|
||||||
};
|
};
|
||||||
})
|
})
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user