Merge branch 'master' into staging

This commit is contained in:
Daiderd Jordan
2018-06-08 18:06:47 +02:00
165 changed files with 2577 additions and 2052 deletions

View File

@@ -11,7 +11,19 @@
, ncurses
}:
buildPythonPackage rec {
let
excludedTests = []
# cython's testsuite is not working very well with libc++
# We are however optimistic about things outside of testsuite still working
++ stdenv.lib.optionals (stdenv.cc.isClang or false) [ "cpdef_extern_func" "libcpp_algo" ]
# Some tests in the test suite isn't working on aarch64. Disable them for
# now until upstream finds a workaround.
# Upstream issue here: https://github.com/cython/cython/issues/2308
++ stdenv.lib.optionals stdenv.isAarch64 [ "numpy_memoryview" ]
++ stdenv.lib.optionals stdenv.isi686 [ "future_division" "overflow_check_longlong" ]
;
in buildPythonPackage rec {
pname = "Cython";
version = "0.28.3";
@@ -29,12 +41,11 @@ buildPythonPackage rec {
buildInputs = [ glibcLocales gdb ];
LC_ALL = "en_US.UTF-8";
# cython's testsuite is not working very well with libc++
# We are however optimistic about things outside of testsuite still working
checkPhase = ''
export HOME="$NIX_BUILD_TOP"
${python.interpreter} runtests.py \
${if stdenv.cc.isClang or false then ''--exclude="(cpdef_extern_func|libcpp_algo)"'' else ""}
${stdenv.lib.optionalString (builtins.length excludedTests != 0)
''--exclude="(${builtins.concatStringsSep "|" excludedTests})"''}
'';
meta = {

View File

@@ -0,0 +1,24 @@
{ stdenv, buildPythonPackage, fetchPypi, numpy, future, nose }:
buildPythonPackage rec {
pname = "autograd";
version = "1.2";
src = fetchPypi {
inherit pname version;
sha256 = "0zd4lhz9dpll4i63jjijbzkzbgmg8h88il7lr7kmcylvadnzm2x0";
};
propagatedBuildInputs = [ numpy future ];
# Currently, the PyPI tarball doesn't contain the tests. When that has been
# fixed, enable testing. See: https://github.com/HIPS/autograd/issues/404
doCheck = false;
meta = with stdenv.lib; {
homepage = https://github.com/HIPS/autograd;
description = "Compute derivatives of NumPy code efficiently";
license = licenses.mit;
maintainers = with maintainers; [ jluttine ];
};
}

View File

@@ -1,16 +1,13 @@
{ lib, buildPythonPackage, fetchFromGitHub
{ lib, buildPythonPackage, fetchPypi
, click, jinja2, terminaltables }:
buildPythonPackage rec {
pname = "envs";
version = "1.2.4";
version = "1.2.6";
# move to fetchPyPi when https://github.com/capless/envs/issues/8 is fixed
src = fetchFromGitHub {
owner = "capless";
repo = "envs";
rev = "e1f6cbad7f20316fc44324d2c50826d57c2817a8";
sha256 = "0p88a79amj0jxll3ssq1dzg78y7zwgc8yqyr7cf53nv2i7kmpakv";
src = fetchPypi {
inherit pname version;
sha256 = "5fe059d6df1ae01c422d32b10ec7f539baad0e7d339f4c8b2de4ad8cbb07c8ba";
};
checkInputs = [ click jinja2 terminaltables ];

View File

@@ -1,5 +1,5 @@
{ stdenv, fetchPypi, fetchpatch, python, buildPythonPackage
, numpy, hdf5, cython, six, pkgconfig
{ stdenv, fetchPypi, fetchpatch, isPy27, python, buildPythonPackage
, numpy, hdf5, cython, six, pkgconfig, unittest2
, mpi4py ? null, openssh }:
assert hdf5.mpiSupport -> mpi4py != null && hdf5.mpi == mpi4py.mpi;
@@ -10,12 +10,12 @@ let
mpi = hdf5.mpi;
mpiSupport = hdf5.mpiSupport;
in buildPythonPackage rec {
version = "2.7.1";
version = "2.8.0";
pname = "h5py";
src = fetchPypi {
inherit pname version;
sha256 = "180a688311e826ff6ae6d3bda9b5c292b90b28787525ddfcb10a29d5ddcae2cc";
sha256 = "0mdr6wrq02ac93m1aqx9kad0ppfzmm4imlxqgyy1x4l7hmdcc9p6";
};
configure_flags = "--hdf5=${hdf5}" + optionalString mpiSupport " --mpi";
@@ -30,18 +30,13 @@ in buildPythonPackage rec {
preBuild = if mpiSupport then "export CC=${mpi}/bin/mpicc" else "";
checkInputs = optional isPy27 unittest2;
nativeBuildInputs = [ pkgconfig ];
buildInputs = [ hdf5 cython ]
++ optional mpiSupport mpi;
propagatedBuildInputs = [ numpy six]
++ optionals mpiSupport [ mpi4py openssh ];
patches = [
# Patch is based on upstream patch. The tox.ini hunk had to be removed.
# https://github.com/h5py/h5py/commit/5009e062a6f7d4e074cab0fcb42a780ac2b1d7d4.patch
./numpy-1.14.patch
];
meta = {
description =
"Pythonic interface to the HDF5 binary data format";

View File

@@ -1,94 +0,0 @@
From 5009e062a6f7d4e074cab0fcb42a780ac2b1d7d4 Mon Sep 17 00:00:00 2001
From: James Tocknell <aragilar@gmail.com>
Date: Thu, 28 Dec 2017 20:55:55 +1100
Subject: [PATCH] FIX: Don't reorder compound types, breaks on numpy 1.14
---
h5py/h5t.pyx | 25 +++++++------------------
setup.py | 2 +-
tox.ini | 4 ++--
3 files changed, 10 insertions(+), 21 deletions(-)
diff --git a/h5py/h5t.pyx b/h5py/h5t.pyx
index cc2344e1..7445e9eb 100644
--- a/h5py/h5t.pyx
+++ b/h5py/h5t.pyx
@@ -1136,12 +1136,6 @@ cdef class TypeCompoundID(TypeCompositeID):
else:
if sys.version[0] == '3':
field_names = [x.decode('utf8') for x in field_names]
- if len(field_names) > 0:
- collated_fields = zip(field_names, field_types, field_offsets)
- ordered_fields = sorted(
- collated_fields, key=operator.itemgetter(2))
- field_names, field_types, field_offsets = \
- map(list, zip(*ordered_fields))
typeobj = dtype({
'names': field_names,
'formats': field_types,
@@ -1458,8 +1452,7 @@ cdef TypeCompoundID _c_compound(dtype dt, int logical, int aligned):
cdef dtype member_dt
cdef size_t member_offset = 0
- cdef dict offsets = {}
- cdef list fields = []
+ cdef dict fields = {}
# The challenge with correctly converting a numpy/h5py dtype to a HDF5 type
# which is composed of subtypes has three aspects we must consider
@@ -1468,19 +1461,14 @@ cdef TypeCompoundID _c_compound(dtype dt, int logical, int aligned):
# 2. For correct round-tripping of aligned dtypes, we need to consider how
# much padding we need by looking at the field offsets
# 3. There is no requirement that the offsets be monotonically increasing
- # (so we start by sorting the names as a function of increasing offset)
#
# The code below tries to cover these aspects
- # Get offsets for each compound member
- for name, field in dt.fields.items():
- offsets[name] = field[1]
-
# Build list of names, offsets, and types, sorted by increasing offset
# (i.e. the position of the member in the struct)
- for name in sorted(dt.names, key=offsets.__getitem__):
+ for name in sorted(dt.names, key=(lambda n: dt.fields[n][1])):
field = dt.fields[name]
- name = name.encode('utf8') if isinstance(name, unicode) else name
+ h5_name = name.encode('utf8') if isinstance(name, unicode) else name
# Get HDF5 data types and set the offset for each member
member_dt = field[0]
@@ -1489,7 +1477,7 @@ cdef TypeCompoundID _c_compound(dtype dt, int logical, int aligned):
if aligned and (member_offset > field[1]
or member_dt.itemsize != member_type.get_size()):
raise TypeError("Enforced alignment not compatible with HDF5 type")
- fields.append((name, member_offset, member_type))
+ fields[name] = (h5_name, member_offset, member_type)
# Update member offset based on the HDF5 type size
member_offset += member_type.get_size()
@@ -1500,8 +1488,9 @@ cdef TypeCompoundID _c_compound(dtype dt, int logical, int aligned):
# Create compound with the necessary size, and insert its members
tid = H5Tcreate(H5T_COMPOUND, member_offset)
- for (name, member_offset, member_type) in fields:
- H5Tinsert(tid, name, member_offset, member_type.id)
+ for name in dt.names:
+ h5_name, member_offset, member_type = fields[name]
+ H5Tinsert(tid, h5_name, member_offset, member_type.id)
return TypeCompoundID(tid)
diff --git a/setup.py b/setup.py
index ec2a78a7..bbb086f6 100755
--- a/setup.py
+++ b/setup.py
@@ -32,7 +32,7 @@
# these are required to build h5py
# RUN_REQUIRES is included as setup.py test needs RUN_REQUIRES for testing
# RUN_REQUIRES can be removed when setup.py test is removed
-SETUP_REQUIRES = RUN_REQUIRES + [NUMPY_DEP, 'Cython>=0.19', 'pkgconfig']
+SETUP_REQUIRES = RUN_REQUIRES + [NUMPY_DEP, 'Cython>=0.23', 'pkgconfig']
# Needed to avoid trying to install numpy/cython on pythons which the latest
# versions don't support

View File

@@ -0,0 +1,25 @@
{ stdenv, buildPythonPackage, fetchPypi, numpy, pandas, pytz, six, pytest }:
buildPythonPackage rec {
pname = "pvlib";
version = "0.5.2";
src = fetchPypi {
inherit pname version;
sha256 = "1897v9qq97nk5n0hfm9089yz8pffd42795mnhcyq48g9bsyap1xi";
};
checkInputs = [ pytest ];
propagatedBuildInputs = [ numpy pandas pytz six ];
# Currently, the PyPI tarball doesn't contain the tests. When that has been
# fixed, enable testing. See: https://github.com/pvlib/pvlib-python/issues/473
doCheck = false;
meta = with stdenv.lib; {
homepage = http://pvlib-python.readthedocs.io;
description = "Simulate the performance of photovoltaic energy systems";
license = licenses.bsd3;
maintainers = with maintainers; [ jluttine ];
};
}

View File

@@ -0,0 +1,33 @@
{ buildPythonPackage, isPy3k, fetchFromGitHub, stdenv,
netcdf, hdf5, libminc, ezminc,
cython, numpy, scipy
}:
buildPythonPackage rec {
pname = "pyezminc";
version = "1.2.01";
disabled = isPy3k;
src = fetchFromGitHub {
owner = "BIC-MNI";
repo = "pyezminc";
rev = "release-${version}";
sha256 = "13smvramacisbwj8qsl160dnvv6ynngn1jmqwhvy146nmadphyv1";
};
nativeBuildInputs = [ cython ];
buildInputs = [ netcdf hdf5 libminc ezminc ];
propagatedBuildInputs = [ numpy scipy ];
NIX_CFLAGS_COMPILE = "-fpermissive";
doCheck = false; # e.g., expects test data in /opt
meta = {
homepage = https://github.com/BIC-MNI/pyezminc;
description = "Python API for libminc using EZMINC";
license = stdenv.lib.licenses.gpl2;
maintainers = with stdenv.lib.maintainers; [ bcdarwin ];
};
}

View File

@@ -2,7 +2,7 @@
, lib
, fetchurl
, buildPythonPackage
, isPy3k, isPy36, pythonOlder
, isPy3k, isPy35, isPy36, pythonOlder
, numpy
, six
, protobuf
@@ -12,20 +12,40 @@
, enum34
, tensorflow-tensorboard
, cudaSupport ? false
, cudatoolkit ? null
, cudnn ? null
, nvidia_x11 ? null
, zlib
, python
, symlinkJoin
}:
# tensorflow is built from a downloaded wheel because the source
# build doesn't yet work on Darwin.
# We keep this binary build for two reasons:
# - the source build doesn't work on Darwin.
# - the source build is currently brittle and not easy to maintain
buildPythonPackage rec {
assert cudaSupport -> cudatoolkit != null
&& cudnn != null
&& nvidia_x11 != null;
let
cudatoolkit_joined = symlinkJoin {
name = "unsplit_cudatoolkit";
paths = [ cudatoolkit.out
cudatoolkit.lib ];};
in buildPythonPackage rec {
pname = "tensorflow";
version = "1.5.0";
version = "1.7.1";
format = "wheel";
src = fetchurl {
url = "https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-${version}-py3-none-any.whl";
sha256 = "1mapv45n9wmgcq3i3im0pv0gmhwkxw5z69nsnxb1gfxbj1mz5h9m";
};
src = let
pyVerNoDot = lib.strings.stringAsChars (x: if x == "." then "" else x) "${python.majorVersion}";
version = if stdenv.isDarwin then builtins.substring 0 1 pyVerNoDot else pyVerNoDot;
platform = if stdenv.isDarwin then "mac" else "linux";
unit = if cudaSupport then "gpu" else "cpu";
key = "${platform}_py_${version}_${unit}";
dls = import ./tf1.7.1-hashes.nix;
in fetchurl dls.${key};
propagatedBuildInputs = [ numpy six protobuf absl-py ]
++ lib.optional (!isPy3k) mock
@@ -38,14 +58,28 @@ buildPythonPackage rec {
# bleach) Hence we disable dependency checking for now.
installFlags = lib.optional isPy36 "--no-dependencies";
# Note that we need to run *after* the fixup phase because the
# libraries are loaded at runtime. If we run in preFixup then
# patchelf --shrink-rpath will remove the cuda libraries.
postFixup = let
rpath = stdenv.lib.makeLibraryPath
([ stdenv.cc.cc.lib zlib ] ++ lib.optionals cudaSupport [ cudatoolkit_joined cudnn nvidia_x11 ]);
in
lib.optionalString (stdenv.isLinux) ''
rrPath="$out/${python.sitePackages}/tensorflow/:${rpath}"
internalLibPath="$out/${python.sitePackages}/tensorflow/python/_pywrap_tensorflow_internal.so"
find $out -name '*.${stdenv.hostPlatform.extensions.sharedLibrary}' -exec patchelf --set-rpath "$rrPath" {} \;
'';
meta = with stdenv.lib; {
description = "Computation using data flow graphs for scalable machine learning";
homepage = http://tensorflow.org;
license = licenses.asl20;
maintainers = with maintainers; [ jyp abbradar ];
platforms = platforms.darwin;
platforms = with platforms; linux ++ lib.optionals (!cudaSupport) darwin;
# Python 2.7 build uses different string encoding.
# See https://github.com/NixOS/nixpkgs/pull/37044#issuecomment-373452253
broken = cudaSupport || !isPy3k;
broken = stdenv.isDarwin && !isPy3k;
};
}

View File

@@ -0,0 +1,29 @@
version=1.7.1
hashfile=tf${version}-hashes.nix
rm -f $hashfile
echo "{" >> $hashfile
for sys in "linux" "mac"; do
for tfpref in "cpu/tensorflow" "gpu/tensorflow_gpu"; do
for pykind in "py2-none-any" "py3-none-any" "cp27-none-linux_x86_64" "cp35-cp35m-linux_x86_64" "cp36-cp36m-linux_x86_64"; do
if [ $sys == "mac" ]; then
[[ $pykind =~ py.* ]] && [[ $tfpref =~ cpu.* ]]
result=$?
pyver=${pykind:2:1}
flavour=cpu
else
[[ $pykind =~ .*linux.* ]]
result=$?
pyver=${pykind:2:2}
flavour=${tfpref:0:3}
fi
if [ $result == 0 ]; then
url=https://storage.googleapis.com/tensorflow/$sys/$tfpref-$version-$pykind.whl
hash=$(nix-prefetch-url $url)
echo "${sys}_py_${pyver}_${flavour} = {" >> $hashfile
echo " url = \"$url\";" >> $hashfile
echo " sha256 = \"$hash\";" >> $hashfile
echo "};" >> $hashfile
fi
done
done
done

View File

@@ -0,0 +1,34 @@
{
linux_py_27_cpu = {
url = "https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.7.1-cp27-none-linux_x86_64.whl";
sha256 = "0p8n5x74qmdv9g63y176xqpfdc1gawzjysn79bvk46knrks3pa2b";
};
linux_py_35_cpu = {
url = "https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.7.1-cp35-cp35m-linux_x86_64.whl";
sha256 = "050qv8fjpnw2y8da7s910jv4nsxg56d3xdpl09jim47kbwqabr5m";
};
linux_py_36_cpu = {
url = "https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.7.1-cp36-cp36m-linux_x86_64.whl";
sha256 = "00d5cij1mh64hh0zc2qfl8z2hpr3nna6lhpsc6qh4am1g7wz4ndn";
};
linux_py_27_gpu = {
url = "https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.7.1-cp27-none-linux_x86_64.whl";
sha256 = "0ami6nlp9cwg631a8f5rfpzpwb9ls9zxhsx61cimw46xljx3l2b5";
};
linux_py_35_gpu = {
url = "https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.7.1-cp35-cp35m-linux_x86_64.whl";
sha256 = "1xfc8dww52fy8g4b0j8r20q7yj2bfg20hlk9p7sk3k9z8swfw0kc";
};
linux_py_36_gpu = {
url = "https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.7.1-cp36-cp36m-linux_x86_64.whl";
sha256 = "1kkqx8m7h03b8l9l6dki4g4r7sgi3wbb4dp9gvk6l08n4vnlvc50";
};
mac_py_2_cpu = {
url = "https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.7.1-py2-none-any.whl";
sha256 = "1icbsvvwkkc09s6bdd43drvnhc6v6xmnqwjzipgc8rmpj1z71yz5";
};
mac_py_3_cpu = {
url = "https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.7.1-py3-none-any.whl";
sha256 = "0s5dy956jvwazqflc90v15i912zvhwsbzlf0cl8k7isq52j6g3kp";
};
}