pytorch: 0.2.0 → 0.3.1 with CUDA and cuDNN (#38530)
* pytorch-0.3 with optional cuda and cudnn * pytorch tests reenabled if compiling without cuda * pytorch: Conditionalize cudnn dependency on cudaSupport Signed-off-by: Anders Kaseorg <andersk@mit.edu> * pytorch: Compile with the same GCC version used by CUDA if cudaSupport Fixes this error: In file included from /nix/store/gv7w3c71jg627cpcff04yi6kwzpzjyap-cudatoolkit-9.1.85.1/include/host_config.h:50:0, from /nix/store/gv7w3c71jg627cpcff04yi6kwzpzjyap-cudatoolkit-9.1.85.1/include/cuda_runtime.h:78, from <command-line>:0: /nix/store/gv7w3c71jg627cpcff04yi6kwzpzjyap-cudatoolkit-9.1.85.1/include/crt/host_config.h:121:2: error: #error -- unsupported GNU version! gcc versions later than 6 are not supported! #error -- unsupported GNU version! gcc versions later than 6 are not supported! ^~~~~ Signed-off-by: Anders Kaseorg <andersk@mit.edu> * pytorch: Build with joined cudatoolkit Similar to #30058 for TensorFlow. Signed-off-by: Anders Kaseorg <andersk@mit.edu> * pytorch: 0.3.0 -> 0.3.1 Signed-off-by: Anders Kaseorg <andersk@mit.edu> * pytorch: Patch for “refcounted file mapping not supported” failure Signed-off-by: Anders Kaseorg <andersk@mit.edu> * pytorch: Skip distributed tests Signed-off-by: Anders Kaseorg <andersk@mit.edu> * pytorch: Use the stub libcuda.so from cudatoolkit for running tests Signed-off-by: Anders Kaseorg <andersk@mit.edu>
This commit is contained in:
parent
026dc16b85
commit
ce00943916
@ -1,8 +1,31 @@
|
|||||||
{ buildPythonPackage, fetchFromGitHub, lib, numpy, pyyaml, cffi, cmake,
|
{ buildPythonPackage,
|
||||||
git, stdenv }:
|
cudaSupport ? false, cudatoolkit ? null, cudnn ? null,
|
||||||
|
fetchFromGitHub, fetchpatch, lib, numpy, pyyaml, cffi, cmake,
|
||||||
|
git, stdenv, linkFarm, symlinkJoin,
|
||||||
|
utillinux, which }:
|
||||||
|
|
||||||
buildPythonPackage rec {
|
assert cudnn == null || cudatoolkit != null;
|
||||||
version = "0.2.0";
|
assert !cudaSupport || cudatoolkit != null;
|
||||||
|
|
||||||
|
let
|
||||||
|
cudatoolkit_joined = symlinkJoin {
|
||||||
|
name = "${cudatoolkit.name}-unsplit";
|
||||||
|
paths = [ cudatoolkit.out cudatoolkit.lib ];
|
||||||
|
};
|
||||||
|
|
||||||
|
# Normally libcuda.so.1 is provided at runtime by nvidia-x11 via
|
||||||
|
# LD_LIBRARY_PATH=/run/opengl-driver/lib. We only use the stub
|
||||||
|
# libcuda.so from cudatoolkit for running tests, so that we don’t have
|
||||||
|
# to recompile pytorch on every update to nvidia-x11 or the kernel.
|
||||||
|
cudaStub = linkFarm "cuda-stub" [{
|
||||||
|
name = "libcuda.so.1";
|
||||||
|
path = "${cudatoolkit}/lib/stubs/libcuda.so";
|
||||||
|
}];
|
||||||
|
cudaStubEnv = lib.optionalString cudaSupport
|
||||||
|
"LD_LIBRARY_PATH=${cudaStub}\${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} ";
|
||||||
|
|
||||||
|
in buildPythonPackage rec {
|
||||||
|
version = "0.3.1";
|
||||||
pname = "pytorch";
|
pname = "pytorch";
|
||||||
name = "${pname}-${version}";
|
name = "${pname}-${version}";
|
||||||
|
|
||||||
@ -10,18 +33,41 @@ buildPythonPackage rec {
|
|||||||
owner = "pytorch";
|
owner = "pytorch";
|
||||||
repo = "pytorch";
|
repo = "pytorch";
|
||||||
rev = "v${version}";
|
rev = "v${version}";
|
||||||
sha256 = "1s3f46ga1f4lfrcj3lpvvhgkdr1pi8i2hjd9xj9qiz3a9vh2sj4n";
|
fetchSubmodules = true;
|
||||||
|
sha256 = "1k8fr97v5pf7rni5cr2pi21ixc3pdj3h3lkz28njbjbgkndh7mr3";
|
||||||
};
|
};
|
||||||
|
|
||||||
checkPhase = ''
|
patches = [
|
||||||
${stdenv.shell} test/run_test.sh
|
(fetchpatch {
|
||||||
|
# make sure stdatomic.h is included when checking for ATOMIC_INT_LOCK_FREE
|
||||||
|
# Fixes this test failure:
|
||||||
|
# RuntimeError: refcounted file mapping not supported on your system at /tmp/nix-build-python3.6-pytorch-0.3.0.drv-0/source/torch/lib/TH/THAllocator.c:525
|
||||||
|
url = "https://github.com/pytorch/pytorch/commit/502aaf39cf4a878f9e4f849e5f409573aa598aa9.patch";
|
||||||
|
stripLen = 3;
|
||||||
|
extraPrefix = "torch/lib/";
|
||||||
|
sha256 = "1miz4lhy3razjwcmhxqa4xmlcmhm65lqyin1czqczj8g16d3f62f";
|
||||||
|
})
|
||||||
|
];
|
||||||
|
|
||||||
|
postPatch = ''
|
||||||
|
substituteInPlace test/run_test.sh --replace \
|
||||||
|
"INIT_METHOD='file://'\$TEMP_DIR'/shared_init_file' \$PYCMD ./test_distributed.py" \
|
||||||
|
"echo Skipped for Nix package"
|
||||||
|
'';
|
||||||
|
|
||||||
|
preConfigure = lib.optionalString cudaSupport ''
|
||||||
|
export CC=${cudatoolkit.cc}/bin/gcc
|
||||||
|
'' + lib.optionalString (cudaSupport && cudnn != null) ''
|
||||||
|
export CUDNN_INCLUDE_DIR=${cudnn}/include
|
||||||
'';
|
'';
|
||||||
|
|
||||||
buildInputs = [
|
buildInputs = [
|
||||||
cmake
|
cmake
|
||||||
git
|
git
|
||||||
numpy.blas
|
numpy.blas
|
||||||
];
|
utillinux
|
||||||
|
which
|
||||||
|
] ++ lib.optionals cudaSupport [cudatoolkit_joined cudnn];
|
||||||
|
|
||||||
propagatedBuildInputs = [
|
propagatedBuildInputs = [
|
||||||
cffi
|
cffi
|
||||||
@ -29,8 +75,8 @@ buildPythonPackage rec {
|
|||||||
pyyaml
|
pyyaml
|
||||||
];
|
];
|
||||||
|
|
||||||
preConfigure = ''
|
checkPhase = ''
|
||||||
export NO_CUDA=1
|
${cudaStubEnv}${stdenv.shell} test/run_test.sh
|
||||||
'';
|
'';
|
||||||
|
|
||||||
meta = {
|
meta = {
|
||||||
|
@ -5623,7 +5623,17 @@ in {
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
pytorch = callPackage ../development/python-modules/pytorch { };
|
pytorch = callPackage ../development/python-modules/pytorch {
|
||||||
|
cudaSupport = pkgs.config.cudaSupport or false;
|
||||||
|
};
|
||||||
|
|
||||||
|
pytorchWithCuda = self.pytorch.override {
|
||||||
|
cudaSupport = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
pytorchWithoutCuda = self.pytorch.override {
|
||||||
|
cudaSupport = false;
|
||||||
|
};
|
||||||
|
|
||||||
python2-pythondialog = buildPythonPackage rec {
|
python2-pythondialog = buildPythonPackage rec {
|
||||||
name = "python2-pythondialog-${version}";
|
name = "python2-pythondialog-${version}";
|
||||||
|
Loading…
Reference in New Issue
Block a user