dockerTools: nix functions for manipulating docker images
This commit is contained in:
365
pkgs/build-support/docker/default.nix
Normal file
365
pkgs/build-support/docker/default.nix
Normal file
@@ -0,0 +1,365 @@
|
||||
{ stdenv, lib, callPackage, runCommand, writeReferencesToFile, writeText, vmTools, writeScript
|
||||
, docker, shadow, utillinux, coreutils, jshon, e2fsprogs, goPackages }:
|
||||
|
||||
# WARNING: this API is unstable and may be subject to backwards-incompatible changes in the future.
|
||||
|
||||
rec {
|
||||
|
||||
pullImage = callPackage ./pull.nix {};
|
||||
|
||||
# We need to sum layer.tar, not a directory, hence tarsum instead of nix-hash.
|
||||
# And we cannot untar it, because then we cannot preserve permissions ecc.
|
||||
tarsum = runCommand "tarsum" {
|
||||
buildInputs = [ goPackages.go ];
|
||||
} ''
|
||||
mkdir tarsum
|
||||
cd tarsum
|
||||
|
||||
cp ${./tarsum.go} tarsum.go
|
||||
export GOPATH=$(pwd)
|
||||
mkdir src
|
||||
ln -sT ${docker.src}/pkg/tarsum src/tarsum
|
||||
go build
|
||||
|
||||
cp tarsum $out
|
||||
'';
|
||||
|
||||
# buildEnv creates symlinks to dirs, which is hard to edit inside the overlay VM
|
||||
mergeDrvs = { drvs, onlyDeps ? false }:
|
||||
runCommand "merge-drvs" {
|
||||
inherit drvs onlyDeps;
|
||||
} ''
|
||||
if [ -n "$onlyDeps" ]; then
|
||||
echo $drvs > $out
|
||||
exit 0
|
||||
fi
|
||||
|
||||
mkdir $out
|
||||
for drv in $drvs; do
|
||||
echo Merging $drv
|
||||
if [ -d "$drv" ]; then
|
||||
cp -drf --preserve=mode -f $drv/* $out/
|
||||
else
|
||||
tar -C $out -xpf $drv || true
|
||||
fi
|
||||
done
|
||||
'';
|
||||
|
||||
mkTarball = { name ? "docker-tar", drv, onlyDeps ? false }:
|
||||
runCommand "${name}.tar.gz" rec {
|
||||
inherit drv onlyDeps;
|
||||
|
||||
drvClosure = writeReferencesToFile drv;
|
||||
|
||||
} ''
|
||||
while read dep; do
|
||||
echo Copying $dep
|
||||
dir="$(dirname "$dep")"
|
||||
mkdir -p "rootfs/$dir"
|
||||
cp -drf --preserve=mode $dep "rootfs/$dir/"
|
||||
done < "$drvClosure"
|
||||
|
||||
if [ -z "$onlyDeps" ]; then
|
||||
cp -drf --preserve=mode $drv/* rootfs/
|
||||
fi
|
||||
|
||||
tar -C rootfs/ -cpzf $out .
|
||||
'';
|
||||
|
||||
shellScript = text:
|
||||
writeScript "script.sh" ''
|
||||
#!${stdenv.shell}
|
||||
set -e
|
||||
export PATH=${coreutils}/bin:/bin
|
||||
|
||||
${text}
|
||||
'';
|
||||
|
||||
shadowSetup = ''
|
||||
export PATH=${shadow}/bin:$PATH
|
||||
mkdir -p /etc/pam.d
|
||||
if [ ! -f /etc/passwd ]; then
|
||||
echo "root:x:0:0::/root:/bin/sh" > /etc/passwd
|
||||
echo "root:!x:::::::" > /etc/shadow
|
||||
fi
|
||||
if [ ! -f /etc/group ]; then
|
||||
echo "root:x:0:" > /etc/group
|
||||
echo "root:x::" > /etc/gshadow
|
||||
fi
|
||||
if [ ! -f /etc/pam.d/other ]; then
|
||||
cat > /etc/pam.d/other <<EOF
|
||||
account sufficient pam_unix.so
|
||||
auth sufficient pam_rootok.so
|
||||
password requisite pam_unix.so nullok sha512
|
||||
session required pam_unix.so
|
||||
EOF
|
||||
fi
|
||||
if [ ! -f /etc/login.defs ]; then
|
||||
touch /etc/login.defs
|
||||
fi
|
||||
'';
|
||||
|
||||
# Append to tar instead of unpacking
|
||||
mergeTarballs = tarballs:
|
||||
runCommand "merge-tars" { inherit tarballs; } ''
|
||||
mkdir tmp
|
||||
for tb in $tarballs; do
|
||||
tar -C tmp -xkpf $tb
|
||||
done
|
||||
tar -C tmp -cpzf $out .
|
||||
'';
|
||||
|
||||
runWithOverlay = { name , fromImage ? null, fromImageName ? null, fromImageTag ? null
|
||||
, diskSize ? 1024, preMount ? "", postMount ? "", postUmount ? "" }:
|
||||
vmTools.runInLinuxVM (
|
||||
runCommand name {
|
||||
preVM = vmTools.createEmptyImage { size = diskSize; fullName = "docker-run-disk"; };
|
||||
|
||||
inherit fromImage fromImageName fromImageTag;
|
||||
|
||||
buildInputs = [ utillinux e2fsprogs jshon ];
|
||||
} ''
|
||||
rm -rf $out
|
||||
|
||||
mkdir disk
|
||||
mkfs /dev/${vmTools.hd}
|
||||
mount /dev/${vmTools.hd} disk
|
||||
cd disk
|
||||
|
||||
if [ -n "$fromImage" ]; then
|
||||
echo Unpacking base image
|
||||
mkdir image
|
||||
tar -C image -xpf "$fromImage"
|
||||
|
||||
if [ -z "$fromImageName" ]; then
|
||||
fromImageName=$(jshon -k < image/repositories|head -n1)
|
||||
fi
|
||||
if [ -z "$fromImageTag" ]; then
|
||||
fromImageTag=$(jshon -e $fromImageName -k < image/repositories|head -n1)
|
||||
fi
|
||||
parentID=$(jshon -e $fromImageName -e $fromImageTag -u < image/repositories)
|
||||
fi
|
||||
|
||||
lowerdir=""
|
||||
while [ -n "$parentID" ]; do
|
||||
echo Unpacking layer $parentID
|
||||
mkdir -p image/$parentID/layer
|
||||
tar -C image/$parentID/layer -xpf image/$parentID/layer.tar
|
||||
rm image/$parentID/layer.tar
|
||||
|
||||
find image/$parentID/layer -name ".wh.*" -exec bash -c 'name="$(basename {}|sed "s/^.wh.//")"; mknod "$(dirname {})/$name" c 0 0; rm {}' \;
|
||||
|
||||
lowerdir=$lowerdir''${lowerdir:+:}image/$parentID/layer
|
||||
parentID=$(cat image/$parentID/json|(jshon -e parent -u 2>/dev/null || true))
|
||||
done
|
||||
|
||||
mkdir work
|
||||
mkdir layer
|
||||
mkdir mnt
|
||||
|
||||
${preMount}
|
||||
|
||||
if [ -n "$lowerdir" ]; then
|
||||
mount -t overlay overlay -olowerdir=$lowerdir,workdir=work,upperdir=layer mnt
|
||||
else
|
||||
mount --bind layer mnt
|
||||
fi
|
||||
|
||||
${postMount}
|
||||
|
||||
umount mnt
|
||||
|
||||
pushd layer
|
||||
find . -type c -exec bash -c 'name="$(basename {})"; touch "$(dirname {})/.wh.$name"; rm "{}"' \;
|
||||
popd
|
||||
|
||||
${postUmount}
|
||||
'');
|
||||
|
||||
exportImage = { name ? fromImage.name, fromImage, fromImageName ? null, fromImageTag ? null, diskSize ? 1024 }:
|
||||
runWithOverlay {
|
||||
inherit name fromImage fromImageName fromImageTag diskSize;
|
||||
|
||||
postMount = ''
|
||||
echo Packing raw image
|
||||
tar -C mnt -czf $out .
|
||||
'';
|
||||
};
|
||||
|
||||
mkPureLayer = { baseJson, contents ? null, extraCommands ? "" }:
|
||||
runCommand "docker-layer" {
|
||||
inherit baseJson contents extraCommands;
|
||||
|
||||
buildInputs = [ jshon ];
|
||||
} ''
|
||||
mkdir layer
|
||||
if [ -n "$contents" ]; then
|
||||
echo Adding contents
|
||||
for c in $contents; do
|
||||
cp -drf $c/* layer/
|
||||
chmod -R ug+w layer/
|
||||
done
|
||||
fi
|
||||
|
||||
pushd layer
|
||||
${extraCommands}
|
||||
popd
|
||||
|
||||
echo Packing layer
|
||||
mkdir $out
|
||||
tar -C layer -cf $out/layer.tar .
|
||||
ts=$(${tarsum} < $out/layer.tar)
|
||||
cat ${baseJson} | jshon -s "$ts" -i checksum > $out/json
|
||||
echo -n "1.0" > $out/VERSION
|
||||
'';
|
||||
|
||||
mkRootLayer = { runAsRoot, baseJson, fromImage ? null, fromImageName ? null, fromImageTag ? null
|
||||
, diskSize ? 1024, contents ? null, extraCommands ? "" }:
|
||||
let runAsRootScript = writeScript "run-as-root.sh" runAsRoot;
|
||||
in runWithOverlay {
|
||||
name = "docker-layer";
|
||||
|
||||
inherit fromImage fromImageName fromImageTag diskSize;
|
||||
|
||||
preMount = lib.optionalString (contents != null) ''
|
||||
echo Adding contents
|
||||
for c in ${builtins.toString contents}; do
|
||||
cp -drf $c/* layer/
|
||||
chmod -R ug+w layer/
|
||||
done
|
||||
'';
|
||||
|
||||
postMount = ''
|
||||
mkdir -p mnt/{dev,proc,sys,nix/store}
|
||||
mount --rbind /dev mnt/dev
|
||||
mount --rbind /sys mnt/sys
|
||||
mount --rbind /nix/store mnt/nix/store
|
||||
|
||||
unshare -imnpuf --mount-proc chroot mnt ${runAsRootScript}
|
||||
umount -R mnt/dev mnt/sys mnt/nix/store
|
||||
rmdir --ignore-fail-on-non-empty mnt/dev mnt/proc mnt/sys mnt/nix/store mnt/nix
|
||||
'';
|
||||
|
||||
postUmount = ''
|
||||
pushd layer
|
||||
${extraCommands}
|
||||
popd
|
||||
|
||||
echo Packing layer
|
||||
mkdir $out
|
||||
tar -C layer -cf $out/layer.tar .
|
||||
ts=$(${tarsum} < $out/layer.tar)
|
||||
cat ${baseJson} | jshon -s "$ts" -i checksum > $out/json
|
||||
echo -n "1.0" > $out/VERSION
|
||||
'';
|
||||
};
|
||||
|
||||
# 1. extract the base image
|
||||
# 2. create the layer
|
||||
# 3. add layer deps to the layer itself, diffing with the base image
|
||||
# 4. compute the layer id
|
||||
# 5. put the layer in the image
|
||||
# 6. repack the image
|
||||
buildImage = args@{ name, tag ? "latest"
|
||||
, fromImage ? null, fromImageName ? null, fromImageTag ? null
|
||||
, contents ? null, tarballs ? [], config ? null
|
||||
, runAsRoot ? null, diskSize ? 1024, extraCommands ? "" }:
|
||||
|
||||
let
|
||||
|
||||
baseJson = writeText "${name}-config.json" (builtins.toJSON {
|
||||
created = "1970-01-01T00:00:01Z";
|
||||
architecture = "amd64";
|
||||
os = "linux";
|
||||
config = config;
|
||||
});
|
||||
|
||||
layer = (if runAsRoot == null
|
||||
then mkPureLayer { inherit baseJson contents extraCommands; }
|
||||
else mkRootLayer { inherit baseJson fromImage fromImageName fromImageTag contents runAsRoot diskSize extraCommands; });
|
||||
depsTarball = mkTarball { name = "${name}-deps";
|
||||
drv = layer;
|
||||
onlyDeps = true; };
|
||||
|
||||
result = runCommand "${name}.tar.gz" {
|
||||
buildInputs = [ jshon ];
|
||||
|
||||
imageName = name;
|
||||
imageTag = tag;
|
||||
inherit fromImage baseJson;
|
||||
|
||||
mergedTarball = if tarballs == [] then depsTarball else mergeTarballs ([ depsTarball ] ++ tarballs);
|
||||
|
||||
passthru = {
|
||||
buildArgs = args;
|
||||
};
|
||||
} ''
|
||||
mkdir image
|
||||
touch baseFiles
|
||||
if [ -n "$fromImage" ]; then
|
||||
echo Unpacking base image
|
||||
tar -C image -xpf "$fromImage"
|
||||
|
||||
if [ -z "$fromImageName" ]; then
|
||||
fromImageName=$(jshon -k < image/repositories|head -n1)
|
||||
fi
|
||||
if [ -z "$fromImageTag" ]; then
|
||||
fromImageTag=$(jshon -e $fromImageName -k < image/repositories|head -n1)
|
||||
fi
|
||||
parentID=$(jshon -e $fromImageName -e $fromImageTag -u < image/repositories)
|
||||
|
||||
for l in image/*/layer.tar; do
|
||||
tar -tf $l >> baseFiles
|
||||
done
|
||||
fi
|
||||
|
||||
chmod -R ug+rw image
|
||||
|
||||
mkdir temp
|
||||
cp ${layer}/* temp/
|
||||
chmod ug+w temp/*
|
||||
|
||||
echo Adding dependencies
|
||||
tar -tf temp/layer.tar >> baseFiles
|
||||
tar -tf "$mergedTarball" | grep -v ${layer} > layerFiles
|
||||
if [ "$(wc -l layerFiles|cut -d ' ' -f 1)" -gt 3 ]; then
|
||||
sed -i -e 's|^[\./]\+||' baseFiles layerFiles
|
||||
comm <(sort -n baseFiles|uniq) <(sort -n layerFiles|uniq) -1 -3 > newFiles
|
||||
mkdir deps
|
||||
pushd deps
|
||||
tar -xpf "$mergedTarball" --no-recursion --files-from ../newFiles 2>/dev/null || true
|
||||
tar -rf ../temp/layer.tar --no-recursion --files-from ../newFiles 2>/dev/null || true
|
||||
popd
|
||||
else
|
||||
echo No new deps, no diffing needed
|
||||
fi
|
||||
|
||||
echo Adding meta
|
||||
|
||||
if [ -n "$parentID" ]; then
|
||||
cat temp/json | jshon -s "$parentID" -i parent > tmpjson
|
||||
mv tmpjson temp/json
|
||||
fi
|
||||
|
||||
layerID=$(sha256sum temp/json|cut -d ' ' -f 1)
|
||||
size=$(stat --printf="%s" temp/layer.tar)
|
||||
cat temp/json | jshon -s "$layerID" -i id -n $size -i Size > tmpjson
|
||||
mv tmpjson temp/json
|
||||
|
||||
mv temp image/$layerID
|
||||
|
||||
jshon -n object \
|
||||
-n object -s "$layerID" -i "$imageTag" \
|
||||
-i "$imageName" > image/repositories
|
||||
|
||||
chmod -R a-w image
|
||||
|
||||
echo Cooking the image
|
||||
tar -C image -czf $out .
|
||||
'';
|
||||
|
||||
in
|
||||
|
||||
result;
|
||||
|
||||
}
|
||||
38
pkgs/build-support/docker/detjson.py
Normal file
38
pkgs/build-support/docker/detjson.py
Normal file
@@ -0,0 +1,38 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Deterministic layer json: https://github.com/docker/hub-feedback/issues/488
|
||||
|
||||
import sys
|
||||
reload(sys)
|
||||
sys.setdefaultencoding('UTF8')
|
||||
import json
|
||||
|
||||
# If any of the keys below are equal to a certain value
|
||||
# then we can delete it because it's the default value
|
||||
SAFEDELS = {
|
||||
"Size": 0,
|
||||
"config": {
|
||||
"ExposedPorts": None,
|
||||
"MacAddress": "",
|
||||
"NetworkDisabled": False,
|
||||
"PortSpecs": None,
|
||||
"VolumeDriver": ""
|
||||
}
|
||||
}
|
||||
SAFEDELS["container_config"] = SAFEDELS["config"]
|
||||
|
||||
def makedet(j, safedels):
|
||||
for k,v in safedels.items():
|
||||
if type(v) == dict:
|
||||
makedet(j[k], v)
|
||||
elif k in j and j[k] == v:
|
||||
del j[k]
|
||||
|
||||
def main():
|
||||
j = json.load(sys.stdin)
|
||||
makedet(j, SAFEDELS)
|
||||
json.dump(j, sys.stdout, sort_keys=True)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
50
pkgs/build-support/docker/pull.nix
Normal file
50
pkgs/build-support/docker/pull.nix
Normal file
@@ -0,0 +1,50 @@
|
||||
{ stdenv, lib, curl, jshon, python, runCommand }:
|
||||
|
||||
# Inspired and simplified version of fetchurl.
|
||||
# For simplicity we only support sha256.
|
||||
|
||||
# Currently only registry v1 is supported, compatible with Docker Hub.
|
||||
|
||||
{ imageName, imageTag ? "latest", imageId ? null
|
||||
, sha256, name ? "${imageName}-${imageTag}"
|
||||
, indexUrl ? "https://index.docker.io"
|
||||
, registryUrl ? "https://registry-1.docker.io"
|
||||
, registryVersion ? "v1"
|
||||
, curlOpts ? "" }:
|
||||
|
||||
let layer = stdenv.mkDerivation {
|
||||
inherit name imageName imageTag imageId
|
||||
indexUrl registryUrl registryVersion curlOpts;
|
||||
|
||||
builder = ./pull.sh;
|
||||
detjson = ./detjson.py;
|
||||
|
||||
buildInputs = [ curl jshon python ];
|
||||
|
||||
outputHashAlgo = "sha256";
|
||||
outputHash = sha256;
|
||||
outputHashMode = "recursive";
|
||||
|
||||
impureEnvVars = [
|
||||
# We borrow these environment variables from the caller to allow
|
||||
# easy proxy configuration. This is impure, but a fixed-output
|
||||
# derivation like fetchurl is allowed to do so since its result is
|
||||
# by definition pure.
|
||||
"http_proxy" "https_proxy" "ftp_proxy" "all_proxy" "no_proxy"
|
||||
|
||||
# This variable allows the user to pass additional options to curl
|
||||
"NIX_CURL_FLAGS"
|
||||
|
||||
# This variable allows overriding the timeout for connecting to
|
||||
# the hashed mirrors.
|
||||
"NIX_CONNECT_TIMEOUT"
|
||||
];
|
||||
|
||||
# Doing the download on a remote machine just duplicates network
|
||||
# traffic, so don't do that.
|
||||
preferLocalBuild = true;
|
||||
};
|
||||
|
||||
in runCommand "${name}.tar.gz" {} ''
|
||||
tar -C ${layer} -czf $out .
|
||||
''
|
||||
75
pkgs/build-support/docker/pull.sh
Normal file
75
pkgs/build-support/docker/pull.sh
Normal file
@@ -0,0 +1,75 @@
|
||||
# Reference: docker src contrib/download-frozen-image.sh
|
||||
|
||||
source $stdenv/setup
|
||||
|
||||
# Curl flags to handle redirects, not use EPSV, handle cookies for
|
||||
# servers to need them during redirects, and work on SSL without a
|
||||
# certificate (this isn't a security problem because we check the
|
||||
# cryptographic hash of the output anyway).
|
||||
curl="curl \
|
||||
--location --max-redirs 20 \
|
||||
--retry 3 \
|
||||
--fail \
|
||||
--disable-epsv \
|
||||
--cookie-jar cookies \
|
||||
--insecure \
|
||||
$curlOpts \
|
||||
$NIX_CURL_FLAGS"
|
||||
|
||||
baseUrl="$registryUrl/$registryVersion"
|
||||
|
||||
fetchLayer() {
|
||||
local url="$1"
|
||||
local dest="$2"
|
||||
local curlexit=18;
|
||||
|
||||
# if we get error code 18, resume partial download
|
||||
while [ $curlexit -eq 18 ]; do
|
||||
# keep this inside an if statement, since on failure it doesn't abort the script
|
||||
if $curl -H "Authorization: Token $token" "$url" --output "$dest"; then
|
||||
return 0
|
||||
else
|
||||
curlexit=$?;
|
||||
fi
|
||||
done
|
||||
|
||||
return $curlexit
|
||||
}
|
||||
|
||||
token="$($curl -o /dev/null -D- -H 'X-Docker-Token: true' "$indexUrl/$registryVersion/repositories/$imageName/images" | grep X-Docker-Token | tr -d '\r' | cut -d ' ' -f 2)"
|
||||
|
||||
if [ -z "$token" ]; then
|
||||
echo "error: registry returned no token"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# token="${token//\"/\\\"}"
|
||||
|
||||
if [ -z "$imageId" ]; then
|
||||
imageId="$($curl -H "Authorization: Token $token" "$baseUrl/repositories/$imageName/tags/$imageTag")"
|
||||
imageId="${imageId//\"/}"
|
||||
if [ -z "$imageId" ]; then
|
||||
echo "error: no image ID found for ${imageName}:${imageTag}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "found image ${imageName}:${imageTag}@$imageId"
|
||||
fi
|
||||
|
||||
mkdir -p $out
|
||||
|
||||
jshon -n object \
|
||||
-n object -s "$imageId" -i "$imageTag" \
|
||||
-i "$imageName" > $out/repositories
|
||||
|
||||
$curl -H "Authorization: Token $token" "$baseUrl/images/$imageId/ancestry" -o ancestry.json
|
||||
|
||||
layerIds=$(jshon -a -u < ancestry.json)
|
||||
for layerId in $layerIds; do
|
||||
echo "fetching layer $layerId"
|
||||
|
||||
mkdir "$out/$layerId"
|
||||
echo '1.0' > "$out/$layerId/VERSION"
|
||||
$curl -H "Authorization: Token $token" "$baseUrl/images/$layerId/json" | python $detjson > "$out/$layerId/json"
|
||||
fetchLayer "$baseUrl/images/$layerId/layer" "$out/$layerId/layer.tar"
|
||||
done
|
||||
24
pkgs/build-support/docker/tarsum.go
Normal file
24
pkgs/build-support/docker/tarsum.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"tarsum"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
ts, err := tarsum.NewTarSum(os.Stdin, false, tarsum.Version1)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if _, err = io.Copy(ioutil.Discard, ts); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Println(ts.Sum(nil))
|
||||
}
|
||||
@@ -289,6 +289,8 @@ let
|
||||
|
||||
cmark = callPackage ../development/libraries/cmark { };
|
||||
|
||||
dockerTools = callPackage ../build-support/docker { };
|
||||
|
||||
dotnetenv = callPackage ../build-support/dotnetenv {
|
||||
dotnetfx = dotnetfx40;
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user