diff --git a/doc/functions.xml b/doc/functions.xml
index 7f40ba33cd4..5a350a23e0a 100644
--- a/doc/functions.xml
+++ b/doc/functions.xml
@@ -291,4 +291,340 @@ c = lib.makeOverridable f { a = 1; b = 2; }
+
+ pkgs.dockerTools
+
+
+ pkgs.dockerTools is a set of functions for creating and
+ manipulating Docker images according to the
+
+ Docker Image Specification v1.0.0
+ . Docker itself is not used to perform any of the operations done by these
+ functions.
+
+
+
+
+ The dockerTools API is unstable and may be subject to
+ backwards-incompatible changes in the future.
+
+
+
+
+ buildImage
+
+
+ This function is analogous to the docker build command,
+ in that can used to build a Docker-compatible repository tarball containing
+ a single image with one or multiple layers. As such, the result
+ is suitable for being loaded in Docker with docker load.
+
+
+
+ The parameters of buildImage with relative example values are
+ described below:
+
+
+ Docker build
+
+ buildImage {
+ name = "redis";
+ tag = "latest";
+
+ fromImage = someBaseImage;
+ fromImageName = null;
+ fromImageTag = "latest";
+
+ contents = pkgs.redis;
+ runAsRoot = ''
+ #!${stdenv.shell}
+ mkdir -p /data
+ '';
+
+ config = {
+ Cmd = [ "/bin/redis-server" ];
+ WorkingDir = "/data";
+ Volumes = {
+ "/data" = {};
+ };
+ };
+ }
+
+
+
+ The above example will build a Docker image redis/latest
+ from the given base image. Loading and running this image in Docker results in
+ redis-server being started automatically.
+
+
+
+
+
+ name specifies the name of the resulting image.
+ This is the only required argument for buildImage.
+
+
+
+
+
+ tag specifies the tag of the resulting image.
+ By default it's latest.
+
+
+
+
+
+ fromImage is the repository tarball containing the base image.
+ It must be a valid Docker image, such as exported by docker save.
+ By default it's null, which can be seen as equivalent
+ to FROM scratch of a Dockerfile.
+
+
+
+
+
+ fromImageName can be used to further specify
+ the base image within the repository, in case it contains multiple images.
+ By default it's null, in which case
+ buildImage will peek the first image available
+ in the repository.
+
+
+
+
+
+ fromImageTag can be used to further specify the tag
+ of the base image within the repository, in case an image contains multiple tags.
+ By default it's null, in which case
+ buildImage will peek the first tag available for the base image.
+
+
+
+
+
+ contents is a derivation that will be copied in the new
+ layer of the resulting image. This can be similarly seen as
+ ADD contents/ / in a Dockerfile.
+ By default it's null.
+
+
+
+
+
+ runAsRoot is a bash script that will run as root
+ in an environment that overlays the existing layers of the base image with
+ the new resulting layer, including the previously copied
+ contents derivation.
+ This can be similarly seen as
+ RUN ... in a Dockerfile.
+
+
+
+ Using this parameter requires the kvm
+ device to be available.
+
+
+
+
+
+
+
+ config is used to specify the configuration of the
+ containers that will be started off the built image in Docker.
+ The available options are listed in the
+
+ Docker Image Specification v1.0.0
+ .
+
+
+
+
+
+
+ After the new layer has been created, its closure
+ (to which contents, config and
+ runAsRoot contribute) will be copied in the layer itself.
+ Only new dependencies that are not already in the existing layers will be copied.
+
+
+
+ At the end of the process, only one new single layer will be produced and
+ added to the resulting image.
+
+
+
+ The resulting repository will only list the single image
+ image/tag. In the case of
+ it would be redis/latest.
+
+
+
+ It is possible to inspect the arguments with which an image was built
+ using its buildArgs attribute.
+
+
+
+
+
+ pullImage
+
+
+ This function is analogous to the docker pull command,
+ in that can be used to fetch a Docker image from a Docker registry.
+ Currently only registry v1 is supported.
+ By default Docker Hub
+ is used to pull images.
+
+
+
+ Its parameters are described in the example below:
+
+
+ Docker pull
+
+ pullImage {
+ imageName = "debian";
+ imageTag = "jessie";
+ imageId = null;
+ sha256 = "1bhw5hkz6chrnrih0ymjbmn69hyfriza2lr550xyvpdrnbzr4gk2";
+
+ indexUrl = "https://index.docker.io";
+ registryUrl = "https://registry-1.docker.io";
+ registryVersion = "v1";
+ }
+
+
+
+
+
+
+ imageName specifies the name of the image to be downloaded,
+ which can also include the registry namespace (e.g. library/debian).
+ This argument is required.
+
+
+
+
+
+ imageTag specifies the tag of the image to be downloaded.
+ By default it's latest.
+
+
+
+
+
+ imageId, if specified this exact image will be fetched, instead
+ of imageName/imageTag. However, the resulting repository
+ will still be named imageName/imageTag.
+ By default it's null.
+
+
+
+
+
+ sha256 is the checksum of the whole fetched image.
+ This argument is required.
+
+
+
+ The checksum is computed on the unpacked directory, not on the final tarball.
+
+
+
+
+
+
+ In the above example the default values are shown for the variables indexUrl,
+ registryUrl and registryVersion.
+ Hence by default the Docker.io registry is used to pull the images.
+
+
+
+
+
+
+
+ exportImage
+
+
+ This function is analogous to the docker export command,
+ in that can used to flatten a Docker image that contains multiple layers.
+ It is in fact the result of the merge of all the layers of the image.
+ As such, the result is suitable for being imported in Docker
+ with docker import.
+
+
+
+
+ Using this function requires the kvm
+ device to be available.
+
+
+
+
+ The parameters of exportImage are the following:
+
+
+ Docker export
+
+ exportImage {
+ fromImage = someLayeredImage;
+ fromImageName = null;
+ fromImageTag = null;
+
+ name = someLayeredImage.name;
+ }
+
+
+
+
+ The parameters relative to the base image have the same synopsis as
+ described in , except that
+ fromImage is the only required argument in this case.
+
+
+
+ The name argument is the name of the derivation output,
+ which defaults to fromImage.name.
+
+
+
+
+ shadowSetup
+
+
+ This constant string is a helper for setting up the base files for managing
+ users and groups, only if such files don't exist already.
+ It is suitable for being used in a
+ runAsRoot script for cases like
+ in the example below:
+
+
+ Shadow base files
+
+ buildImage {
+ name = "shadow-basic";
+
+ runAsRoot = ''
+ #!${stdenv.shell}
+ ${shadowSetup}
+ groupadd -r redis
+ useradd -r -g redis redis
+ mkdir /data
+ chown redis:redis /data
+ '';
+ }
+
+
+
+
+ Creating base files like /etc/passwd or
+ /etc/login.defs are necessary for shadow-utils to
+ manipulate users and groups.
+
+
+
+
+
+
diff --git a/pkgs/build-support/docker/default.nix b/pkgs/build-support/docker/default.nix
new file mode 100644
index 00000000000..55344aad566
--- /dev/null
+++ b/pkgs/build-support/docker/default.nix
@@ -0,0 +1,365 @@
+{ stdenv, lib, callPackage, runCommand, writeReferencesToFile, writeText, vmTools, writeScript
+, docker, shadow, utillinux, coreutils, jshon, e2fsprogs, goPackages }:
+
+# WARNING: this API is unstable and may be subject to backwards-incompatible changes in the future.
+
+rec {
+
+ pullImage = callPackage ./pull.nix {};
+
+ # We need to sum layer.tar, not a directory, hence tarsum instead of nix-hash.
+ # And we cannot untar it, because then we cannot preserve permissions ecc.
+ tarsum = runCommand "tarsum" {
+ buildInputs = [ goPackages.go ];
+ } ''
+ mkdir tarsum
+ cd tarsum
+
+ cp ${./tarsum.go} tarsum.go
+ export GOPATH=$(pwd)
+ mkdir src
+ ln -sT ${docker.src}/pkg/tarsum src/tarsum
+ go build
+
+ cp tarsum $out
+ '';
+
+ # buildEnv creates symlinks to dirs, which is hard to edit inside the overlay VM
+ mergeDrvs = { drvs, onlyDeps ? false }:
+ runCommand "merge-drvs" {
+ inherit drvs onlyDeps;
+ } ''
+ if [ -n "$onlyDeps" ]; then
+ echo $drvs > $out
+ exit 0
+ fi
+
+ mkdir $out
+ for drv in $drvs; do
+ echo Merging $drv
+ if [ -d "$drv" ]; then
+ cp -drf --preserve=mode -f $drv/* $out/
+ else
+ tar -C $out -xpf $drv || true
+ fi
+ done
+ '';
+
+ mkTarball = { name ? "docker-tar", drv, onlyDeps ? false }:
+ runCommand "${name}.tar.gz" rec {
+ inherit drv onlyDeps;
+
+ drvClosure = writeReferencesToFile drv;
+
+ } ''
+ while read dep; do
+ echo Copying $dep
+ dir="$(dirname "$dep")"
+ mkdir -p "rootfs/$dir"
+ cp -drf --preserve=mode $dep "rootfs/$dir/"
+ done < "$drvClosure"
+
+ if [ -z "$onlyDeps" ]; then
+ cp -drf --preserve=mode $drv/* rootfs/
+ fi
+
+ tar -C rootfs/ -cpzf $out .
+ '';
+
+ shellScript = text:
+ writeScript "script.sh" ''
+ #!${stdenv.shell}
+ set -e
+ export PATH=${coreutils}/bin:/bin
+
+ ${text}
+ '';
+
+ shadowSetup = ''
+ export PATH=${shadow}/bin:$PATH
+ mkdir -p /etc/pam.d
+ if [ ! -f /etc/passwd ]; then
+ echo "root:x:0:0::/root:/bin/sh" > /etc/passwd
+ echo "root:!x:::::::" > /etc/shadow
+ fi
+ if [ ! -f /etc/group ]; then
+ echo "root:x:0:" > /etc/group
+ echo "root:x::" > /etc/gshadow
+ fi
+ if [ ! -f /etc/pam.d/other ]; then
+ cat > /etc/pam.d/other </dev/null || true))
+ done
+
+ mkdir work
+ mkdir layer
+ mkdir mnt
+
+ ${preMount}
+
+ if [ -n "$lowerdir" ]; then
+ mount -t overlay overlay -olowerdir=$lowerdir,workdir=work,upperdir=layer mnt
+ else
+ mount --bind layer mnt
+ fi
+
+ ${postMount}
+
+ umount mnt
+
+ pushd layer
+ find . -type c -exec bash -c 'name="$(basename {})"; touch "$(dirname {})/.wh.$name"; rm "{}"' \;
+ popd
+
+ ${postUmount}
+ '');
+
+ exportImage = { name ? fromImage.name, fromImage, fromImageName ? null, fromImageTag ? null, diskSize ? 1024 }:
+ runWithOverlay {
+ inherit name fromImage fromImageName fromImageTag diskSize;
+
+ postMount = ''
+ echo Packing raw image
+ tar -C mnt -czf $out .
+ '';
+ };
+
+ mkPureLayer = { baseJson, contents ? null, extraCommands ? "" }:
+ runCommand "docker-layer" {
+ inherit baseJson contents extraCommands;
+
+ buildInputs = [ jshon ];
+ } ''
+ mkdir layer
+ if [ -n "$contents" ]; then
+ echo Adding contents
+ for c in $contents; do
+ cp -drf $c/* layer/
+ chmod -R ug+w layer/
+ done
+ fi
+
+ pushd layer
+ ${extraCommands}
+ popd
+
+ echo Packing layer
+ mkdir $out
+ tar -C layer -cf $out/layer.tar .
+ ts=$(${tarsum} < $out/layer.tar)
+ cat ${baseJson} | jshon -s "$ts" -i checksum > $out/json
+ echo -n "1.0" > $out/VERSION
+ '';
+
+ mkRootLayer = { runAsRoot, baseJson, fromImage ? null, fromImageName ? null, fromImageTag ? null
+ , diskSize ? 1024, contents ? null, extraCommands ? "" }:
+ let runAsRootScript = writeScript "run-as-root.sh" runAsRoot;
+ in runWithOverlay {
+ name = "docker-layer";
+
+ inherit fromImage fromImageName fromImageTag diskSize;
+
+ preMount = lib.optionalString (contents != null) ''
+ echo Adding contents
+ for c in ${builtins.toString contents}; do
+ cp -drf $c/* layer/
+ chmod -R ug+w layer/
+ done
+ '';
+
+ postMount = ''
+ mkdir -p mnt/{dev,proc,sys,nix/store}
+ mount --rbind /dev mnt/dev
+ mount --rbind /sys mnt/sys
+ mount --rbind /nix/store mnt/nix/store
+
+ unshare -imnpuf --mount-proc chroot mnt ${runAsRootScript}
+ umount -R mnt/dev mnt/sys mnt/nix/store
+ rmdir --ignore-fail-on-non-empty mnt/dev mnt/proc mnt/sys mnt/nix/store mnt/nix
+ '';
+
+ postUmount = ''
+ pushd layer
+ ${extraCommands}
+ popd
+
+ echo Packing layer
+ mkdir $out
+ tar -C layer -cf $out/layer.tar .
+ ts=$(${tarsum} < $out/layer.tar)
+ cat ${baseJson} | jshon -s "$ts" -i checksum > $out/json
+ echo -n "1.0" > $out/VERSION
+ '';
+ };
+
+ # 1. extract the base image
+ # 2. create the layer
+ # 3. add layer deps to the layer itself, diffing with the base image
+ # 4. compute the layer id
+ # 5. put the layer in the image
+ # 6. repack the image
+ buildImage = args@{ name, tag ? "latest"
+ , fromImage ? null, fromImageName ? null, fromImageTag ? null
+ , contents ? null, tarballs ? [], config ? null
+ , runAsRoot ? null, diskSize ? 1024, extraCommands ? "" }:
+
+ let
+
+ baseJson = writeText "${name}-config.json" (builtins.toJSON {
+ created = "1970-01-01T00:00:01Z";
+ architecture = "amd64";
+ os = "linux";
+ config = config;
+ });
+
+ layer = (if runAsRoot == null
+ then mkPureLayer { inherit baseJson contents extraCommands; }
+ else mkRootLayer { inherit baseJson fromImage fromImageName fromImageTag contents runAsRoot diskSize extraCommands; });
+ depsTarball = mkTarball { name = "${name}-deps";
+ drv = layer;
+ onlyDeps = true; };
+
+ result = runCommand "${name}.tar.gz" {
+ buildInputs = [ jshon ];
+
+ imageName = name;
+ imageTag = tag;
+ inherit fromImage baseJson;
+
+ mergedTarball = if tarballs == [] then depsTarball else mergeTarballs ([ depsTarball ] ++ tarballs);
+
+ passthru = {
+ buildArgs = args;
+ };
+ } ''
+ mkdir image
+ touch baseFiles
+ if [ -n "$fromImage" ]; then
+ echo Unpacking base image
+ tar -C image -xpf "$fromImage"
+
+ if [ -z "$fromImageName" ]; then
+ fromImageName=$(jshon -k < image/repositories|head -n1)
+ fi
+ if [ -z "$fromImageTag" ]; then
+ fromImageTag=$(jshon -e $fromImageName -k < image/repositories|head -n1)
+ fi
+ parentID=$(jshon -e $fromImageName -e $fromImageTag -u < image/repositories)
+
+ for l in image/*/layer.tar; do
+ tar -tf $l >> baseFiles
+ done
+ fi
+
+ chmod -R ug+rw image
+
+ mkdir temp
+ cp ${layer}/* temp/
+ chmod ug+w temp/*
+
+ echo Adding dependencies
+ tar -tf temp/layer.tar >> baseFiles
+ tar -tf "$mergedTarball" | grep -v ${layer} > layerFiles
+ if [ "$(wc -l layerFiles|cut -d ' ' -f 1)" -gt 3 ]; then
+ sed -i -e 's|^[\./]\+||' baseFiles layerFiles
+ comm <(sort -n baseFiles|uniq) <(sort -n layerFiles|uniq) -1 -3 > newFiles
+ mkdir deps
+ pushd deps
+ tar -xpf "$mergedTarball" --no-recursion --files-from ../newFiles 2>/dev/null || true
+ tar -rf ../temp/layer.tar --no-recursion --files-from ../newFiles 2>/dev/null || true
+ popd
+ else
+ echo No new deps, no diffing needed
+ fi
+
+ echo Adding meta
+
+ if [ -n "$parentID" ]; then
+ cat temp/json | jshon -s "$parentID" -i parent > tmpjson
+ mv tmpjson temp/json
+ fi
+
+ layerID=$(sha256sum temp/json|cut -d ' ' -f 1)
+ size=$(stat --printf="%s" temp/layer.tar)
+ cat temp/json | jshon -s "$layerID" -i id -n $size -i Size > tmpjson
+ mv tmpjson temp/json
+
+ mv temp image/$layerID
+
+ jshon -n object \
+ -n object -s "$layerID" -i "$imageTag" \
+ -i "$imageName" > image/repositories
+
+ chmod -R a-w image
+
+ echo Cooking the image
+ tar -C image -czf $out .
+ '';
+
+ in
+
+ result;
+
+}
diff --git a/pkgs/build-support/docker/detjson.py b/pkgs/build-support/docker/detjson.py
new file mode 100644
index 00000000000..ba2c20a475a
--- /dev/null
+++ b/pkgs/build-support/docker/detjson.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Deterministic layer json: https://github.com/docker/hub-feedback/issues/488
+
+import sys
+reload(sys)
+sys.setdefaultencoding('UTF8')
+import json
+
+# If any of the keys below are equal to a certain value
+# then we can delete it because it's the default value
+SAFEDELS = {
+ "Size": 0,
+ "config": {
+ "ExposedPorts": None,
+ "MacAddress": "",
+ "NetworkDisabled": False,
+ "PortSpecs": None,
+ "VolumeDriver": ""
+ }
+}
+SAFEDELS["container_config"] = SAFEDELS["config"]
+
+def makedet(j, safedels):
+ for k,v in safedels.items():
+ if type(v) == dict:
+ makedet(j[k], v)
+ elif k in j and j[k] == v:
+ del j[k]
+
+def main():
+ j = json.load(sys.stdin)
+ makedet(j, SAFEDELS)
+ json.dump(j, sys.stdout, sort_keys=True)
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
diff --git a/pkgs/build-support/docker/pull.nix b/pkgs/build-support/docker/pull.nix
new file mode 100644
index 00000000000..7115a83df42
--- /dev/null
+++ b/pkgs/build-support/docker/pull.nix
@@ -0,0 +1,50 @@
+{ stdenv, lib, curl, jshon, python, runCommand }:
+
+# Inspired and simplified version of fetchurl.
+# For simplicity we only support sha256.
+
+# Currently only registry v1 is supported, compatible with Docker Hub.
+
+{ imageName, imageTag ? "latest", imageId ? null
+, sha256, name ? "${imageName}-${imageTag}"
+, indexUrl ? "https://index.docker.io"
+, registryUrl ? "https://registry-1.docker.io"
+, registryVersion ? "v1"
+, curlOpts ? "" }:
+
+let layer = stdenv.mkDerivation {
+ inherit name imageName imageTag imageId
+ indexUrl registryUrl registryVersion curlOpts;
+
+ builder = ./pull.sh;
+ detjson = ./detjson.py;
+
+ buildInputs = [ curl jshon python ];
+
+ outputHashAlgo = "sha256";
+ outputHash = sha256;
+ outputHashMode = "recursive";
+
+ impureEnvVars = [
+ # We borrow these environment variables from the caller to allow
+ # easy proxy configuration. This is impure, but a fixed-output
+ # derivation like fetchurl is allowed to do so since its result is
+ # by definition pure.
+ "http_proxy" "https_proxy" "ftp_proxy" "all_proxy" "no_proxy"
+
+ # This variable allows the user to pass additional options to curl
+ "NIX_CURL_FLAGS"
+
+ # This variable allows overriding the timeout for connecting to
+ # the hashed mirrors.
+ "NIX_CONNECT_TIMEOUT"
+ ];
+
+ # Doing the download on a remote machine just duplicates network
+ # traffic, so don't do that.
+ preferLocalBuild = true;
+};
+
+in runCommand "${name}.tar.gz" {} ''
+ tar -C ${layer} -czf $out .
+''
diff --git a/pkgs/build-support/docker/pull.sh b/pkgs/build-support/docker/pull.sh
new file mode 100644
index 00000000000..8a0782780af
--- /dev/null
+++ b/pkgs/build-support/docker/pull.sh
@@ -0,0 +1,75 @@
+# Reference: docker src contrib/download-frozen-image.sh
+
+source $stdenv/setup
+
+# Curl flags to handle redirects, not use EPSV, handle cookies for
+# servers to need them during redirects, and work on SSL without a
+# certificate (this isn't a security problem because we check the
+# cryptographic hash of the output anyway).
+curl="curl \
+ --location --max-redirs 20 \
+ --retry 3 \
+ --fail \
+ --disable-epsv \
+ --cookie-jar cookies \
+ --insecure \
+ $curlOpts \
+ $NIX_CURL_FLAGS"
+
+baseUrl="$registryUrl/$registryVersion"
+
+fetchLayer() {
+ local url="$1"
+ local dest="$2"
+ local curlexit=18;
+
+ # if we get error code 18, resume partial download
+ while [ $curlexit -eq 18 ]; do
+ # keep this inside an if statement, since on failure it doesn't abort the script
+ if $curl -H "Authorization: Token $token" "$url" --output "$dest"; then
+ return 0
+ else
+ curlexit=$?;
+ fi
+ done
+
+ return $curlexit
+}
+
+token="$($curl -o /dev/null -D- -H 'X-Docker-Token: true' "$indexUrl/$registryVersion/repositories/$imageName/images" | grep X-Docker-Token | tr -d '\r' | cut -d ' ' -f 2)"
+
+if [ -z "$token" ]; then
+ echo "error: registry returned no token"
+ exit 1
+fi
+
+# token="${token//\"/\\\"}"
+
+if [ -z "$imageId" ]; then
+ imageId="$($curl -H "Authorization: Token $token" "$baseUrl/repositories/$imageName/tags/$imageTag")"
+ imageId="${imageId//\"/}"
+ if [ -z "$imageId" ]; then
+ echo "error: no image ID found for ${imageName}:${imageTag}"
+ exit 1
+ fi
+
+ echo "found image ${imageName}:${imageTag}@$imageId"
+fi
+
+mkdir -p $out
+
+jshon -n object \
+ -n object -s "$imageId" -i "$imageTag" \
+ -i "$imageName" > $out/repositories
+
+$curl -H "Authorization: Token $token" "$baseUrl/images/$imageId/ancestry" -o ancestry.json
+
+layerIds=$(jshon -a -u < ancestry.json)
+for layerId in $layerIds; do
+ echo "fetching layer $layerId"
+
+ mkdir "$out/$layerId"
+ echo '1.0' > "$out/$layerId/VERSION"
+ $curl -H "Authorization: Token $token" "$baseUrl/images/$layerId/json" | python $detjson > "$out/$layerId/json"
+ fetchLayer "$baseUrl/images/$layerId/layer" "$out/$layerId/layer.tar"
+done
\ No newline at end of file
diff --git a/pkgs/build-support/docker/tarsum.go b/pkgs/build-support/docker/tarsum.go
new file mode 100644
index 00000000000..4c25f11b71e
--- /dev/null
+++ b/pkgs/build-support/docker/tarsum.go
@@ -0,0 +1,24 @@
+package main
+
+import (
+ "tarsum"
+ "io"
+ "io/ioutil"
+ "fmt"
+ "os"
+)
+
+func main() {
+ ts, err := tarsum.NewTarSum(os.Stdin, false, tarsum.Version1)
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+
+ if _, err = io.Copy(ioutil.Discard, ts); err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+
+ fmt.Println(ts.Sum(nil))
+}
\ No newline at end of file
diff --git a/pkgs/top-level/all-packages.nix b/pkgs/top-level/all-packages.nix
index 9474f0556e9..e86ff62805d 100644
--- a/pkgs/top-level/all-packages.nix
+++ b/pkgs/top-level/all-packages.nix
@@ -289,6 +289,8 @@ let
cmark = callPackage ../development/libraries/cmark { };
+ dockerTools = callPackage ../build-support/docker { };
+
dotnetenv = callPackage ../build-support/dotnetenv {
dotnetfx = dotnetfx40;
};