diff --git a/3rdparty/mshadow/CMakeLists.txt b/3rdparty/mshadow/CMakeLists.txt index 3b898a4772b2..0fe4eec98621 100644 --- a/3rdparty/mshadow/CMakeLists.txt +++ b/3rdparty/mshadow/CMakeLists.txt @@ -42,7 +42,7 @@ else() target_compile_definitions(mshadow INTERFACE MSHADOW_USE_SSE=0) endif() if(USE_CUDNN) - target_compile_definitions(mshadow INTERFACE MSHADOW_USE_CUDNN) + target_compile_definitions(mshadow INTERFACE MSHADOW_USE_CUDNN=1) endif() if(MSHADOW_IN_CXX11) target_compile_definitions(mshadow INTERFACE MSHADOW_IN_CXX11) diff --git a/CMakeLists.txt b/CMakeLists.txt index 4fc790a7213b..98c10d756cb0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -142,6 +142,7 @@ if(CMAKE_BUILD_TYPE STREQUAL "Distribution" AND UNIX AND NOT APPLE) # Enforce DT_PATH instead of DT_RUNPATH set(CMAKE_SHARED_LINKER_FLAGS "-Wl,--disable-new-dtags") set(CMAKE_EXE_LINKER_FLAGS "-Wl,--disable-new-dtags") + set(Protobuf_USE_STATIC_LIBS ON) endif() set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/upstream;${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules;${CMAKE_MODULE_PATH}") diff --git a/cd/Jenkinsfile_cd_pipeline b/cd/Jenkinsfile_cd_pipeline index 6ef1104ab03d..03f2fb12280e 100644 --- a/cd/Jenkinsfile_cd_pipeline +++ b/cd/Jenkinsfile_cd_pipeline @@ -36,7 +36,7 @@ pipeline { parameters { // Release parameters - string(defaultValue: "cpu,native,cu92,cu100,cu101,cu102,cu110", description: "Comma separated list of variants", name: "MXNET_VARIANTS") + string(defaultValue: "cpu,native,cu100,cu101,cu102,cu110,cu112", description: "Comma separated list of variants", name: "MXNET_VARIANTS") booleanParam(defaultValue: false, description: 'Whether this is a release build or not', name: "RELEASE_BUILD") } diff --git a/cd/Jenkinsfile_release_job b/cd/Jenkinsfile_release_job index a2dd674f3df3..21af1d5bf871 100644 --- a/cd/Jenkinsfile_release_job +++ b/cd/Jenkinsfile_release_job @@ -43,7 +43,7 @@ pipeline { // any disruption caused by different COMMIT_ID values chaning the job parameter configuration on // Jenkins. string(defaultValue: "mxnet_lib", description: "Pipeline to build", name: "RELEASE_JOB_TYPE") - string(defaultValue: "cpu,native,cu100,cu101,cu102,cu110", description: "Comma separated list of variants", name: "MXNET_VARIANTS") + string(defaultValue: "cpu,native,cu100,cu101,cu102,cu110,cu112", description: "Comma separated list of variants", name: "MXNET_VARIANTS") booleanParam(defaultValue: false, description: 'Whether this is a release build or not', name: "RELEASE_BUILD") string(defaultValue: "nightly_v1.x", description: "String used for naming docker images", name: "VERSION") } diff --git a/cd/README.md b/cd/README.md index 2c86c6efb953..f782a21d70a4 100644 --- a/cd/README.md +++ b/cd/README.md @@ -25,7 +25,7 @@ MXNet aims to support a variety of frontends, e.g. Python, Java, Perl, R, etc. a The CD process is driven by the [CD pipeline job](Jenkinsfile_cd_pipeline), which orchestrates the order in which the artifacts are delivered. For instance, first publish the libmxnet library before publishing the pip package. It does this by triggering the [release job](Jenkinsfile_release_job) with a specific set of parameters for each delivery channel. The release job executes the specific release pipeline for a delivery channel across all MXNet *variants*. -A variant is a specific environment or features for which MXNet is compiled. For instance CPU, GPU with CUDA v10.0, CUDA v9.0 with MKL-DNN support, etc. +A variant is a specific environment or features for which MXNet is compiled. For instance CPU, GPU with CUDA v10.0, CUDA v11.0 with MKL-DNN support, etc. Currently, below variants are supported. All of these variants except native have MKL-DNN backend enabled. @@ -36,6 +36,7 @@ Currently, below variants are supported. All of these variants except native hav * *cu101*: CUDA 10.1 * *cu102*: CUDA 10.2 * *cu110*: CUDA 11.0 +* *cu112*: CUDA 11.2 *For more on variants, see [here](https://github.com/apache/incubator-mxnet/issues/8671)* @@ -121,7 +122,7 @@ The "first mile" of the CD process is posting the mxnet binaries to the [artifac ##### Timeout -We shouldn't set global timeouts for the pipelines. Rather, the `step` being executed should be rapped with a `timeout` function (as in the pipeline example above). The `max_time` is a global variable set at the [release job](Jenkinsfile_release_job) level. +We shouldn't set global timeouts for the pipelines. Rather, the `step` being executed should be rapped with a `timeout` function (as in the pipeline example above). The `max_time` is a global variable set at the [release job](Jenkinsfile_release_job) level. ##### Node of execution diff --git a/cd/python/pypi/pypi_package.sh b/cd/python/pypi/pypi_package.sh index 1e8103bdfca7..4c2a3d9eb7d8 100755 --- a/cd/python/pypi/pypi_package.sh +++ b/cd/python/pypi/pypi_package.sh @@ -18,7 +18,7 @@ set -ex -# variant = cpu, native, cu80, cu100, etc. +# variant = cpu, native, cu100, cu101, cu102, cu110, cu112 etc. export mxnet_variant=${1:?"Please specify the mxnet variant"} # Due to this PR: https://github.com/apache/incubator-mxnet/pull/14899 diff --git a/cd/utils/artifact_repository.md b/cd/utils/artifact_repository.md index 49399bbb5551..f9818918a5ba 100644 --- a/cd/utils/artifact_repository.md +++ b/cd/utils/artifact_repository.md @@ -17,7 +17,7 @@ # Artifact Repository - Pushing and Pulling libmxnet -The artifact repository is an S3 bucket accessible only to restricted Jenkins nodes. It is used to store compiled MXNet artifacts that can be used by downstream CD pipelines to package the compiled libraries for different delivery channels (e.g. DockerHub, PyPI, Maven, etc.). The S3 object keys for the files being posted will be prefixed with the following distinguishing characteristics of the binary: branch, commit id, operating system, variant and dependency linking strategy (static or dynamic). For instance, s3://bucket/73b29fa90d3eac0b1fae403b7583fdd1529942dc/ubuntu16.04/cu92mkl/static/libmxnet.so +The artifact repository is an S3 bucket accessible only to restricted Jenkins nodes. It is used to store compiled MXNet artifacts that can be used by downstream CD pipelines to package the compiled libraries for different delivery channels (e.g. DockerHub, PyPI, Maven, etc.). The S3 object keys for the files being posted will be prefixed with the following distinguishing characteristics of the binary: branch, commit id, operating system, variant and dependency linking strategy (static or dynamic). For instance, s3://bucket/73b29fa90d3eac0b1fae403b7583fdd1529942dc/ubuntu18.04/cu100/static/libmxnet.so An MXNet artifact is defined as the following set of files: @@ -53,13 +53,13 @@ If not set, derived through the value of sys.platform (https://docs.python.org/3 **Variant** -Manually configured through the --variant argument. The current variants are: cpu, native, cu92, cu100, cu101, cu102 and cu110. +Manually configured through the --variant argument. The current variants are: cpu, native, cu100, cu101, cu102, cu110 and cu112. As long as the tool is being run from the MXNet code base, the runtime feature detection tool (https://github.com/larroy/mxnet/blob/dd432b7f241c9da2c96bcb877c2dc84e6a1f74d4/docs/api/python/libinfo/libinfo.md) can be used to detect whether the library has been compiled with MKL (library has MKL-DNN feature enabled) and/or CUDA support (compiled with CUDA feature enabled). -If it has been compiled with CUDA support, the output of /usr/local/cuda/bin/nvcc --version can be mined for the exact CUDA version (eg. 8.0, 9.0, etc.). +If it has been compiled with CUDA support, the output of /usr/local/cuda/bin/nvcc --version can be mined for the exact CUDA version (eg. 10.0, 11.0, etc.). -By knowing which features are enabled on the binary, and if necessary, which CUDA version is installed on the machine, the value for the variant argument can be calculated. Eg. if CUDA features are enabled, and nvcc reports cuda version 10, then the variant would be cu100. If neither MKL-DNN nor CUDA features are enabled, the variant would be native. +By knowing which features are enabled on the binary, and if necessary, which CUDA version is installed on the machine, the value for the variant argument can be calculated. Eg. if CUDA features are enabled, and nvcc reports cuda version 10.0, then the variant would be cu100. If neither MKL-DNN nor CUDA features are enabled, the variant would be native. **Dependency Linking** diff --git a/cd/utils/mxnet_base_image.sh b/cd/utils/mxnet_base_image.sh index 5073a3783b2c..5632fe8d024a 100755 --- a/cd/utils/mxnet_base_image.sh +++ b/cd/utils/mxnet_base_image.sh @@ -33,6 +33,9 @@ case ${mxnet_variant} in cu110*) echo "nvidia/cuda:11.0-cudnn8-runtime-ubuntu18.04" ;; + cu112*) + echo "nvidia/cuda:11.2.1-cudnn8-runtime-ubuntu18.04" + ;; cpu) echo "ubuntu:18.04" ;; diff --git a/cd/utils/test_artifact_repository.py b/cd/utils/test_artifact_repository.py index 2ab5d910c87b..827457e335e7 100644 --- a/cd/utils/test_artifact_repository.py +++ b/cd/utils/test_artifact_repository.py @@ -144,9 +144,9 @@ def test_get_cuda_version(self, mock): cuda_version = get_cuda_version() self.assertEqual(cuda_version, '100') - mock.return_value = b'Cuda compilation tools, release 9.2, V9.2.148' + mock.return_value = b'Cuda compilation tools, release 11.0, V11.0.148' cuda_version = get_cuda_version() - self.assertEqual(cuda_version, '92') + self.assertEqual(cuda_version, '110') @patch('artifact_repository.check_output') def test_get_cuda_version_not_found(self, mock): diff --git a/ci/docker/Dockerfile.build.ubuntu_build_cuda b/ci/docker/Dockerfile.build.ubuntu_build_cuda index 4840d3703d12..7fa89f1e7fa7 100644 --- a/ci/docker/Dockerfile.build.ubuntu_build_cuda +++ b/ci/docker/Dockerfile.build.ubuntu_build_cuda @@ -16,12 +16,12 @@ # specific language governing permissions and limitations # under the License. # -# Dockerfile to build MXNet on Ubuntu 16.04 for GPU but on +# Dockerfile to build MXNet on Ubuntu 18.04 for GPU but on # a CPU-only instance. This restriction is caused by the CPP- # package generation, requiring the actual CUDA library to be # present -FROM nvidia/cuda:10.1-devel-ubuntu16.04 +FROM nvidia/cuda:10.1-devel-ubuntu18.04 WORKDIR /work/deps diff --git a/ci/docker/Dockerfile.build.ubuntu_cpu b/ci/docker/Dockerfile.build.ubuntu_cpu index 6893499d70a8..c8f23edc1146 100644 --- a/ci/docker/Dockerfile.build.ubuntu_cpu +++ b/ci/docker/Dockerfile.build.ubuntu_cpu @@ -16,9 +16,9 @@ # specific language governing permissions and limitations # under the License. # -# Dockerfile to build and run MXNet on Ubuntu 16.04 for CPU +# Dockerfile to build and run MXNet on Ubuntu 18.04 for CPU -FROM ubuntu:16.04 +FROM ubuntu:18.04 WORKDIR /work/deps diff --git a/ci/docker/Dockerfile.build.ubuntu_cpu_c b/ci/docker/Dockerfile.build.ubuntu_cpu_c index c7969da1bb1d..afe9eea260c1 100644 --- a/ci/docker/Dockerfile.build.ubuntu_cpu_c +++ b/ci/docker/Dockerfile.build.ubuntu_cpu_c @@ -16,9 +16,9 @@ # specific language governing permissions and limitations # under the License. # -# Dockerfile to build and run MXNet on Ubuntu 16.04 for CPU +# Dockerfile to build and run MXNet on Ubuntu 18.04 for CPU -FROM ubuntu:16.04 +FROM ubuntu:18.04 WORKDIR /work/deps diff --git a/ci/docker/Dockerfile.build.ubuntu_cpu_jekyll b/ci/docker/Dockerfile.build.ubuntu_cpu_jekyll index bc91286ecf21..080039d3f3bd 100644 --- a/ci/docker/Dockerfile.build.ubuntu_cpu_jekyll +++ b/ci/docker/Dockerfile.build.ubuntu_cpu_jekyll @@ -16,9 +16,9 @@ # specific language governing permissions and limitations # under the License. # -# Dockerfile to build and run MXNet on Ubuntu 16.04 for CPU +# Dockerfile to build and run MXNet on Ubuntu 18.04 for CPU -FROM ubuntu:16.04 +FROM ubuntu:18.04 WORKDIR /work/deps diff --git a/ci/docker/Dockerfile.build.ubuntu_cpu_julia b/ci/docker/Dockerfile.build.ubuntu_cpu_julia index 6893499d70a8..c8f23edc1146 100644 --- a/ci/docker/Dockerfile.build.ubuntu_cpu_julia +++ b/ci/docker/Dockerfile.build.ubuntu_cpu_julia @@ -16,9 +16,9 @@ # specific language governing permissions and limitations # under the License. # -# Dockerfile to build and run MXNet on Ubuntu 16.04 for CPU +# Dockerfile to build and run MXNet on Ubuntu 18.04 for CPU -FROM ubuntu:16.04 +FROM ubuntu:18.04 WORKDIR /work/deps diff --git a/ci/docker/Dockerfile.build.ubuntu_cpu_lite b/ci/docker/Dockerfile.build.ubuntu_cpu_lite index ca5618ac1cd7..ff97e68da985 100644 --- a/ci/docker/Dockerfile.build.ubuntu_cpu_lite +++ b/ci/docker/Dockerfile.build.ubuntu_cpu_lite @@ -16,9 +16,9 @@ # specific language governing permissions and limitations # under the License. # -# Dockerfile to build and run MXNet on Ubuntu 16.04 for CPU +# Dockerfile to build and run MXNet on Ubuntu 18.04 for CPU -FROM ubuntu:16.04 +FROM ubuntu:18.04 WORKDIR /work/deps diff --git a/ci/docker/Dockerfile.build.ubuntu_cpu_python b/ci/docker/Dockerfile.build.ubuntu_cpu_python index 6b217d4d341d..74eae602f7b2 100644 --- a/ci/docker/Dockerfile.build.ubuntu_cpu_python +++ b/ci/docker/Dockerfile.build.ubuntu_cpu_python @@ -16,9 +16,9 @@ # specific language governing permissions and limitations # under the License. # -# Dockerfile to build and run MXNet on Ubuntu 16.04 for CPU +# Dockerfile to build and run MXNet on Ubuntu 18.04 for CPU -FROM ubuntu:16.04 +FROM ubuntu:18.04 WORKDIR /work/deps diff --git a/ci/docker/Dockerfile.build.ubuntu_cpu_r b/ci/docker/Dockerfile.build.ubuntu_cpu_r index f41b651585cf..3d17e09d8775 100644 --- a/ci/docker/Dockerfile.build.ubuntu_cpu_r +++ b/ci/docker/Dockerfile.build.ubuntu_cpu_r @@ -16,9 +16,9 @@ # specific language governing permissions and limitations # under the License. # -# Dockerfile to build and run MXNet on Ubuntu 16.04 for CPU +# Dockerfile to build and run MXNet on Ubuntu 18.04 for CPU -FROM ubuntu:16.04 +FROM ubuntu:18.04 WORKDIR /work/deps diff --git a/ci/docker/Dockerfile.build.ubuntu_cpu_scala b/ci/docker/Dockerfile.build.ubuntu_cpu_scala index 38874d290e1d..4a5ffe278486 100644 --- a/ci/docker/Dockerfile.build.ubuntu_cpu_scala +++ b/ci/docker/Dockerfile.build.ubuntu_cpu_scala @@ -16,9 +16,9 @@ # specific language governing permissions and limitations # under the License. # -# Dockerfile to build and run MXNet on Ubuntu 16.04 for CPU +# Dockerfile to build and run MXNet on Ubuntu 18.04 for CPU -FROM ubuntu:16.04 +FROM ubuntu:18.04 WORKDIR /work/deps diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_cu100 b/ci/docker/Dockerfile.build.ubuntu_gpu_cu100 index b792bae44a04..c10e76ec7950 100644 --- a/ci/docker/Dockerfile.build.ubuntu_gpu_cu100 +++ b/ci/docker/Dockerfile.build.ubuntu_gpu_cu100 @@ -16,9 +16,9 @@ # specific language governing permissions and limitations # under the License. # -# Dockerfile to run MXNet on Ubuntu 16.04 for GPU +# Dockerfile to run MXNet on Ubuntu 18.04 for GPU -FROM nvidia/cuda:10.0-devel-ubuntu16.04 +FROM nvidia/cuda:10.0-devel-ubuntu18.04 WORKDIR /work/deps diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_cu101 b/ci/docker/Dockerfile.build.ubuntu_gpu_cu101 index a3e0ece760f0..ef0794889495 100644 --- a/ci/docker/Dockerfile.build.ubuntu_gpu_cu101 +++ b/ci/docker/Dockerfile.build.ubuntu_gpu_cu101 @@ -16,9 +16,9 @@ # specific language governing permissions and limitations # under the License. # -# Dockerfile to run MXNet on Ubuntu 16.04 for GPU +# Dockerfile to run MXNet on Ubuntu 18.04 for GPU -FROM nvidia/cuda:10.1-devel-ubuntu16.04 +FROM nvidia/cuda:10.1-devel-ubuntu18.04 WORKDIR /work/deps diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_cu102 b/ci/docker/Dockerfile.build.ubuntu_gpu_cu102 index a929a486a2f3..845a58d3ddf8 100644 --- a/ci/docker/Dockerfile.build.ubuntu_gpu_cu102 +++ b/ci/docker/Dockerfile.build.ubuntu_gpu_cu102 @@ -16,9 +16,9 @@ # specific language governing permissions and limitations # under the License. # -# Dockerfile to run MXNet on Ubuntu 16.04 for GPU +# Dockerfile to run MXNet on Ubuntu 18.04 for GPU -FROM nvidia/cuda:10.2-devel-ubuntu16.04 +FROM nvidia/cuda:10.2-devel-ubuntu18.04 WORKDIR /work/deps @@ -65,7 +65,7 @@ COPY install/ubuntu_tutorials.sh /work/ RUN /work/ubuntu_tutorials.sh ENV CUDA_VERSION=10.2.89 -ENV CUDNN_VERSION=7.6.5.32 +ENV CUDNN_VERSION=8.0.4.30 COPY install/ubuntu_cudnn.sh /work/ RUN /work/ubuntu_cudnn.sh diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_cu110 b/ci/docker/Dockerfile.build.ubuntu_gpu_cu110 index 38a5ea85831a..b4d9ddfc225b 100644 --- a/ci/docker/Dockerfile.build.ubuntu_gpu_cu110 +++ b/ci/docker/Dockerfile.build.ubuntu_gpu_cu110 @@ -16,9 +16,9 @@ # specific language governing permissions and limitations # under the License. # -# Dockerfile to run MXNet on Ubuntu 16.04 for GPU +# Dockerfile to run MXNet on Ubuntu 18.04 for GPU -FROM nvidia/cuda:11.0-cudnn8-devel-ubuntu16.04 +FROM nvidia/cuda:11.0-cudnn8-devel-ubuntu18.04 WORKDIR /work/deps diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_cu112 b/ci/docker/Dockerfile.build.ubuntu_gpu_cu112 new file mode 100644 index 000000000000..f691465b4177 --- /dev/null +++ b/ci/docker/Dockerfile.build.ubuntu_gpu_cu112 @@ -0,0 +1,48 @@ +# -*- mode: dockerfile -*- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# Dockerfile to run MXNet on Ubuntu 18.04 for GPU + +FROM nvidia/cuda:11.2.1-cudnn8-devel-ubuntu18.04 + +WORKDIR /work/deps + +COPY install/requirements /work/ + +COPY install/ubuntu_core.sh /work/ +RUN /work/ubuntu_core.sh + +COPY install/deb_ubuntu_ccache.sh /work/ +RUN /work/deb_ubuntu_ccache.sh + +COPY install/ubuntu_python.sh /work/ +RUN /work/ubuntu_python.sh + +COPY install/ubuntu_docs.sh /work/ +RUN /work/ubuntu_docs.sh + +# Always last +ARG USER_ID=0 +ARG GROUP_ID=0 +COPY install/ubuntu_adduser.sh /work/ +RUN /work/ubuntu_adduser.sh + +COPY runtime_functions.sh /work/ + +WORKDIR /work/mxnet +ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/cuda/compat diff --git a/ci/docker/install/requirements b/ci/docker/install/requirements index a5ff7f507922..1abdc0051a82 100644 --- a/ci/docker/install/requirements +++ b/ci/docker/install/requirements @@ -31,4 +31,5 @@ pylint==2.3.1 # pylint and astroid need to be aligned astroid==2.3.3 # pylint and astroid need to be aligned requests<2.19.0,>=2.18.4 scipy==1.2.1 -setuptools<50 +setuptools +coverage diff --git a/ci/docker/install/ubuntu_caffe.sh b/ci/docker/install/ubuntu_caffe.sh index bda1c0b8aef3..503b57f2ed7f 100755 --- a/ci/docker/install/ubuntu_caffe.sh +++ b/ci/docker/install/ubuntu_caffe.sh @@ -40,6 +40,7 @@ git clone http://github.com/BVLC/caffe.git cd caffe cp Makefile.config.example Makefile.config +echo "OPENCV_VERSION := 3" >> Makefile.config echo "CPU_ONLY := 1" >> Makefile.config diff --git a/ci/docker/install/ubuntu_clang.sh b/ci/docker/install/ubuntu_clang.sh index ac1bdac46d9e..2788395e92a5 100755 --- a/ci/docker/install/ubuntu_clang.sh +++ b/ci/docker/install/ubuntu_clang.sh @@ -25,11 +25,9 @@ set -ex apt-get update || true # Install clang 3.9 (the same version as in XCode 8.*) and 6.0 (latest major release) wget -qO - http://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - && \ - apt-add-repository "deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-3.9 main" && \ - apt-add-repository "deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-6.0 main" && \ + apt-add-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-6.0 main" && \ apt-get update && \ - apt-get install -y clang-3.9 clang-6.0 clang-tidy-6.0 && \ - clang-3.9 --version && \ + apt-get install -y clang-6.0 clang-tidy-6.0 && \ clang-6.0 --version # Use llvm's master version of run-clang-tidy.py. This version has mostly minor updates, but diff --git a/ci/docker/install/ubuntu_core.sh b/ci/docker/install/ubuntu_core.sh index 2773aa26246c..53b5d4a44f77 100755 --- a/ci/docker/install/ubuntu_core.sh +++ b/ci/docker/install/ubuntu_core.sh @@ -38,10 +38,14 @@ apt-get install -y \ libcurl4-openssl-dev \ libjemalloc-dev \ libhdf5-dev \ + libomp5 \ + libomp-dev \ liblapack-dev \ libopenblas-dev \ libopencv-dev \ - libturbojpeg \ + libjpeg-turbo8-dev \ + libjpeg8-dev \ + libturbojpeg0-dev \ libzmq3-dev \ libtinfo-dev \ zlib1g-dev \ @@ -52,11 +56,12 @@ apt-get install -y \ sudo \ unzip \ vim-nox \ + openjdk-8-jdk \ + openjdk-8-jre \ wget # Use libturbojpeg package as it is correctly compiled with -fPIC flag # https://github.com/HaxeFoundation/hashlink/issues/147 -ln -s /usr/lib/x86_64-linux-gnu/libturbojpeg.so.0.1.0 /usr/lib/x86_64-linux-gnu/libturbojpeg.so # CMake 3.13.2+ is required diff --git a/ci/docker/install/ubuntu_cudnn.sh b/ci/docker/install/ubuntu_cudnn.sh index 1cc0b722bfc7..0699d80eb86e 100755 --- a/ci/docker/install/ubuntu_cudnn.sh +++ b/ci/docker/install/ubuntu_cudnn.sh @@ -38,7 +38,7 @@ case ${CUDA_VERSION} in export libcudnn_dev_version="${CUDNN_VERSION}-1+cuda11.0" ;; 10\.2*) - export libcudnn_package="libcudnn7" + export libcudnn_package="libcudnn8" export libcudnn_version="${CUDNN_VERSION}-1+cuda10.2" export libcudnn_dev_version="${CUDNN_VERSION}-1+cuda10.2" ;; diff --git a/ci/docker/install/ubuntu_llvm.sh b/ci/docker/install/ubuntu_llvm.sh index 8b6e765b56c5..476b3a269c81 100755 --- a/ci/docker/install/ubuntu_llvm.sh +++ b/ci/docker/install/ubuntu_llvm.sh @@ -17,9 +17,9 @@ # specific language governing permissions and limitations # under the License. -echo deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-5.0 main\ +echo deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-5.0 main\ >> /etc/apt/sources.list.d/llvm.list -echo deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial-5.0 main\ +echo deb-src http://apt.llvm.org/bionic/ llvm-toolchain-bionic-5.0 main\ >> /etc/apt/sources.list.d/llvm.list wget -qO - http://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add - diff --git a/ci/docker/install/ubuntu_onnx.sh b/ci/docker/install/ubuntu_onnx.sh index 31eb5e8c9380..81c8755cf35c 100755 --- a/ci/docker/install/ubuntu_onnx.sh +++ b/ci/docker/install/ubuntu_onnx.sh @@ -30,5 +30,4 @@ echo "Installing libprotobuf-dev and protobuf-compiler ..." apt-get update || true apt-get install -y libprotobuf-dev protobuf-compiler -echo "Installing pytest, pytest-cov, protobuf, Pillow, ONNX, tabulate and onnxruntime..." -pip3 install pytest==3.6.3 pytest-cov==2.5.1 protobuf==3.5.2 onnx==1.7.0 Pillow==5.0.0 tabulate==0.7.5 onnxruntime==1.4.0 +pip3 install pytest==6.2.2 pytest-cov==2.11.1 pytest-xdist==2.2.1 protobuf==3.5.2 onnx==1.7.0 Pillow==5.0.0 tabulate==0.7.5 onnxruntime==1.6.0 'numpy>1.16.0,<1.19.0' gluonnlp==0.10.0 gluoncv==0.8.0 diff --git a/ci/docker/install/ubuntu_r.sh b/ci/docker/install/ubuntu_r.sh index 6105da8c4813..44c0357d1cfa 100755 --- a/ci/docker/install/ubuntu_r.sh +++ b/ci/docker/install/ubuntu_r.sh @@ -27,7 +27,7 @@ set -ex cd "$(dirname "$0")" # install libraries for mxnet's r package on ubuntu -echo "deb http://cran.rstudio.com/bin/linux/ubuntu trusty/" >> /etc/apt/sources.list +echo "deb http://cran.rstudio.com/bin/linux/ubuntu bionic-cran40/" >> /etc/apt/sources.list apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E298A3A825C0D65DFD57CBB651716619E084DAB9 diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh index c230ccdccd44..c4fe2db4b784 100755 --- a/ci/docker/runtime_functions.sh +++ b/ci/docker/runtime_functions.sh @@ -224,7 +224,7 @@ build_ubuntu_gpu_mkldnn_release() { # Compiles the dynamic mxnet library # Parameters: -# $1 -> mxnet_variant: the mxnet variant to build, e.g. cpu, cu100, cu92mkl, etc. +# $1 -> mxnet_variant: the mxnet variant to build, e.g. cpu, native, cu100, cu101, cu102, cu110, cu112, etc. build_dynamic_libmxnet() { set -ex @@ -489,6 +489,8 @@ build_ubuntu_cpu_openblas() { USE_DIST_KVSTORE=1 \ USE_LIBJPEG_TURBO=1 \ USE_SIGNAL_HANDLER=1 \ + USE_LAPACK_PATH=/usr/lib/x86_64-linux-gnu \ + USE_LIBJPEG_TURBO_PATH=/usr/lib/x86_64-linux-gnu \ -j$(nproc) make cython PYTHON=python3 } @@ -506,6 +508,7 @@ build_ubuntu_cpu_mkl() { USE_INTEL_PATH=/opt/intel \ USE_DIST_KVSTORE=1 \ USE_SIGNAL_HANDLER=1 \ + USE_LAPACK_PATH=/usr/lib/x86_64-linux-gnu \ -j$(nproc) } @@ -680,6 +683,7 @@ build_ubuntu_cpu_mkldnn() { USE_TVM_OP=1 \ USE_BLAS=openblas \ USE_SIGNAL_HANDLER=1 \ + USE_LAPACK_PATH=/usr/lib/x86_64-linux-gnu \ -j$(nproc) } @@ -695,6 +699,7 @@ build_ubuntu_cpu_mkldnn_mkl() { USE_BLAS=mkl \ USE_SIGNAL_HANDLER=1 \ USE_INTEL_PATH=/opt/intel/ \ + USE_LAPACK_PATH=/usr/lib/x86_64-linux-gnu \ -j$(nproc) } @@ -768,6 +773,7 @@ build_ubuntu_gpu_mkldnn() { USE_TVM_OP=0 \ CUDA_ARCH="$CI_CUDA_COMPUTE_CAPABILITIES" \ USE_SIGNAL_HANDLER=1 \ + USE_LAPACK_PATH=/usr/lib/x86_64-linux-gnu \ -j$(nproc) } @@ -785,6 +791,7 @@ build_ubuntu_gpu_mkldnn_nocudnn() { USE_TVM_OP=0 \ CUDA_ARCH="$CI_CUDA_COMPUTE_CAPABILITIES" \ USE_SIGNAL_HANDLER=1 \ + USE_LAPACK_PATH=/usr/lib/x86_64-linux-gnu \ -j$(nproc) } @@ -802,6 +809,7 @@ build_ubuntu_gpu_cuda101_cudnn7() { USE_DIST_KVSTORE=1 \ CUDA_ARCH="$CI_CUDA_COMPUTE_CAPABILITIES" \ USE_SIGNAL_HANDLER=1 \ + USE_LAPACK_PATH=/usr/lib/x86_64-linux-gnu \ -j$(nproc) make cython PYTHON=python3 } @@ -1659,6 +1667,8 @@ build_ubuntu_cpu_docs() { USE_DIST_KVSTORE=1 \ USE_LIBJPEG_TURBO=1 \ USE_SIGNAL_HANDLER=1 \ + USE_LAPACK_PATH=/usr/lib/x86_64-linux-gnu \ + USE_LIBJPEG_TURBO_PATH=/usr/lib/x86_64-linux-gnu \ -j$(nproc) } @@ -1984,6 +1994,7 @@ build_static_libmxnet() { set -ex pushd . local mxnet_variant=${1:?"This function requires a python command as the first argument"} + CMAKE_STATICBUILD=1 source tools/staticbuild/build.sh ${mxnet_variant} popd } diff --git a/ci/jenkins/Jenkinsfile_clang b/ci/jenkins/Jenkinsfile_clang index 029c7208107b..86320321b4b4 100644 --- a/ci/jenkins/Jenkinsfile_clang +++ b/ci/jenkins/Jenkinsfile_clang @@ -34,10 +34,8 @@ utils.assign_node_labels(utility: 'utility', linux_cpu: 'mxnetlinux-cpu', linux_ utils.main_wrapper( core_logic: { utils.parallel_stage('Build', [ - custom_steps.compile_unix_clang_3_9_cpu(), custom_steps.compile_unix_clang_6_cpu(), custom_steps.compile_unix_clang_tidy_cpu(), - custom_steps.compile_unix_clang_3_9_mkldnn_cpu(), custom_steps.compile_unix_clang_6_mkldnn_cpu() ]) } diff --git a/ci/jenkins/Jenkinsfile_miscellaneous b/ci/jenkins/Jenkinsfile_miscellaneous index dbf2a9e41c76..fa0f390c8dd1 100644 --- a/ci/jenkins/Jenkinsfile_miscellaneous +++ b/ci/jenkins/Jenkinsfile_miscellaneous @@ -38,11 +38,7 @@ core_logic: { custom_steps.compile_unix_asan_cpu(), custom_steps.compile_unix_amalgamation_min(), custom_steps.compile_unix_amalgamation() - ]) - - utils.parallel_stage('Tests', [ - custom_steps.misc_asan_cpu() - ]) + ]) } , failure_handler: { diff --git a/ci/jenkins/Jenkinsfile_unix_cpu b/ci/jenkins/Jenkinsfile_unix_cpu index 71917de58e82..2fa66c1bbc00 100644 --- a/ci/jenkins/Jenkinsfile_unix_cpu +++ b/ci/jenkins/Jenkinsfile_unix_cpu @@ -45,7 +45,6 @@ core_logic: { utils.parallel_stage('Tests', [ custom_steps.test_unix_python3_cpu(), - custom_steps.test_unix_python3_debug_cpu(), custom_steps.test_unix_python3_mkl_cpu(), custom_steps.test_unix_python3_mkldnn_cpu(), custom_steps.test_unix_python3_mkldnn_mkl_cpu(), diff --git a/cmake/ChooseBlas.cmake b/cmake/ChooseBlas.cmake index e16594794ae8..3848a3c3ef07 100644 --- a/cmake/ChooseBlas.cmake +++ b/cmake/ChooseBlas.cmake @@ -45,6 +45,50 @@ elseif(BLAS STREQUAL "Open" OR BLAS STREQUAL "open") add_definitions(-DMSHADOW_USE_CBLAS=1) add_definitions(-DMSHADOW_USE_MKL=0) add_definitions(-DMXNET_USE_BLAS_OPEN=1) + if(NOT MSVC AND CMAKE_BUILD_TYPE STREQUAL "Distribution") + # check if we need to link to omp + execute_process(COMMAND ${CMAKE_NM} -g ${OpenBLAS_LIB} + COMMAND grep omp_get_num_threads + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + OUTPUT_VARIABLE OPENBLAS_USES_OMP_OUT + RESULT_VARIABLE OPENBLAS_USES_OMP_RET) + if(NOT OPENBLAS_USES_OMP_OUT STREQUAL "" AND NOT OPENBLAS_USES_OMP_RET AND NOT USE_OPENMP) + message("Openblas uses OMP, automatically linking to it") + find_package(OpenMP REQUIRED) + message("OpenMP_CXX_LIBRARIES is ${OpenMP_CXX_LIBRARIES}") + list(APPEND mshadow_LINKER_LIBS "${OpenMP_CXX_LIBRARIES}") + endif() + # check if we need to link to gfortran + execute_process(COMMAND ${CMAKE_NM} -g ${OpenBLAS_LIB} + COMMAND grep gfortran + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + OUTPUT_VARIABLE OPENBLAS_USES_GFORTRAN_OUT + RESULT_VARIABLE OPENBLAS_USES_GFORTRAN_RET) + if(NOT OPENBLAS_USES_GFORTRAN_OUT STREQUAL "" AND NOT OPENBLAS_USES_GFORTRAN_RET) + message("Openblas uses GFortran, automatically linking to it") + file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/temp/CMakeLists.txt" + "cmake_minimum_required(VERSION ${CMAKE_VERSION}) +project(CheckFortran Fortran) +set(CMAKE_Fortran_COMPILER gfortran) +file(WRITE \"${CMAKE_CURRENT_BINARY_DIR}/temp/FortranDir.cmake\" +\" +set(FORTRAN_DIR \\\"\$\{CMAKE_Fortran_IMPLICIT_LINK_DIRECTORIES\}\\\") +\") +") + execute_process( + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/temp/ + COMMAND ${CMAKE_COMMAND} . + ) + set(FORTRAN_DIR "") + include(build/temp/FortranDir.cmake) + find_library(FORTRAN_LIB NAMES gfortran HINTS ${FORTRAN_DIR}) + message("FORTRAN_DIR is ${FORTRAN_DIR}") + message("FORTRAN_LIB is ${FORTRAN_LIB}") + list(APPEND mshadow_LINKER_LIBS ${FORTRAN_LIB}) + file(REMOVE_RECURSE "${CMAKE_CURRENT_BINARY_DIR}/temp/") + endif() + endif() + elseif(BLAS STREQUAL "MKL" OR BLAS STREQUAL "mkl") find_package(MKL REQUIRED) include_directories(SYSTEM ${MKL_INCLUDE_DIR}) diff --git a/config/distribution/linux_cpu.cmake b/config/distribution/linux_cpu.cmake index cad348578454..5a8673bfbf91 100644 --- a/config/distribution/linux_cpu.cmake +++ b/config/distribution/linux_cpu.cmake @@ -28,3 +28,4 @@ set(USE_LAPACK ON CACHE BOOL "Build with lapack support") set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.") set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support") set(USE_F16C OFF CACHE BOOL "Build with x86 F16C instruction support") +set(USE_DIST_KVSTORE ON CACHE BOOL "Build with DIST_KVSTORE support") diff --git a/config/distribution/linux_cu100.cmake b/config/distribution/linux_cu100.cmake index d26b4d73eee7..e4b0095b32ed 100644 --- a/config/distribution/linux_cu100.cmake +++ b/config/distribution/linux_cu100.cmake @@ -21,6 +21,7 @@ set(CXXFLAGS "-mno-avx" CACHE STRING "CXXFLAGS") set(USE_CUDA ON CACHE BOOL "Build with CUDA support") set(USE_CUDNN ON CACHE BOOL "Build with CUDA support") +set(USE_NCCL ON CACHE BOOL "Build with NCCL support") set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support") set(USE_OPENMP ON CACHE BOOL "Build with Openmp support") set(USE_MKL_IF_AVAILABLE OFF CACHE BOOL "Use Intel MKL if found") @@ -29,6 +30,7 @@ set(USE_LAPACK ON CACHE BOOL "Build with lapack support") set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.") set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support") set(USE_F16C OFF CACHE BOOL "Build with x86 F16C instruction support") - +set(USE_LIBJPEG_TURBO ON CACHE BOOL "Build with libjpeg-turbo") +set(USE_DIST_KVSTORE ON CACHE BOOL "Build with DIST_KVSTORE support") set(CUDACXX "/usr/local/cuda-10.0/bin/nvcc" CACHE STRING "Cuda compiler") set(MXNET_CUDA_ARCH "3.0;5.0;6.0;7.0;7.5" CACHE STRING "Cuda architectures") diff --git a/config/distribution/linux_cu101.cmake b/config/distribution/linux_cu101.cmake index aaf76cc10df1..c6336e2712ae 100644 --- a/config/distribution/linux_cu101.cmake +++ b/config/distribution/linux_cu101.cmake @@ -23,6 +23,7 @@ set(CXXFLAGS "-mno-avx" CACHE STRING "CXXFLAGS") set(USE_CUDA ON CACHE BOOL "Build with CUDA support") set(USE_CUDNN ON CACHE BOOL "Build with CUDA support") +set(USE_NCCL ON CACHE BOOL "Build with NCCL support") set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support") set(USE_OPENMP ON CACHE BOOL "Build with Openmp support") set(USE_MKL_IF_AVAILABLE OFF CACHE BOOL "Use Intel MKL if found") @@ -31,6 +32,7 @@ set(USE_LAPACK ON CACHE BOOL "Build with lapack support") set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.") set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support") set(USE_F16C OFF CACHE BOOL "Build with x86 F16C instruction support") - +set(USE_LIBJPEG_TURBO ON CACHE BOOL "Build with libjpeg-turbo") +set(USE_DIST_KVSTORE ON CACHE BOOL "Build with DIST_KVSTORE support") set(CUDACXX "/usr/local/cuda-10.1/bin/nvcc" CACHE STRING "Cuda compiler") set(MXNET_CUDA_ARCH "3.0;5.0;6.0;7.0;7.5" CACHE STRING "Cuda architectures") diff --git a/config/distribution/linux_cu102.cmake b/config/distribution/linux_cu102.cmake index 6b575683e919..a405a507d979 100644 --- a/config/distribution/linux_cu102.cmake +++ b/config/distribution/linux_cu102.cmake @@ -21,6 +21,7 @@ set(CXXFLAGS "-mno-avx" CACHE STRING "CXXFLAGS") set(USE_CUDA ON CACHE BOOL "Build with CUDA support") set(USE_CUDNN ON CACHE BOOL "Build with CUDA support") +set(USE_NCCL ON CACHE BOOL "Build with NCCL support") set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support") set(USE_OPENMP ON CACHE BOOL "Build with Openmp support") set(USE_MKL_IF_AVAILABLE OFF CACHE BOOL "Use Intel MKL if found") @@ -29,6 +30,7 @@ set(USE_LAPACK ON CACHE BOOL "Build with lapack support") set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.") set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support") set(USE_F16C OFF CACHE BOOL "Build with x86 F16C instruction support") - +set(USE_LIBJPEG_TURBO ON CACHE BOOL "Build with libjpeg-turbo") +set(USE_DIST_KVSTORE ON CACHE BOOL "Build with DIST_KVSTORE support") set(CUDACXX "/usr/local/cuda-10.2/bin/nvcc" CACHE STRING "Cuda compiler") set(MXNET_CUDA_ARCH "3.0;5.0;6.0;7.0;7.5" CACHE STRING "Cuda architectures") diff --git a/config/distribution/linux_cu110.cmake b/config/distribution/linux_cu110.cmake index 7d44a993abf1..c58ec9ddabec 100644 --- a/config/distribution/linux_cu110.cmake +++ b/config/distribution/linux_cu110.cmake @@ -21,6 +21,7 @@ set(CXXFLAGS "-mno-avx" CACHE STRING "CXXFLAGS") set(USE_CUDA ON CACHE BOOL "Build with CUDA support") set(USE_CUDNN ON CACHE BOOL "Build with CUDA support") +set(USE_NCCL ON CACHE BOOL "Build with NCCL support") set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support") set(USE_OPENMP ON CACHE BOOL "Build with Openmp support") set(USE_MKL_IF_AVAILABLE OFF CACHE BOOL "Use Intel MKL if found") @@ -29,6 +30,7 @@ set(USE_LAPACK ON CACHE BOOL "Build with lapack support") set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.") set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support") set(USE_F16C OFF CACHE BOOL "Build with x86 F16C instruction support") - +set(USE_LIBJPEG_TURBO ON CACHE BOOL "Build with libjpeg-turbo") +set(USE_DIST_KVSTORE ON CACHE BOOL "Build with DIST_KVSTORE support") set(CUDACXX "/usr/local/cuda-11.0/bin/nvcc" CACHE STRING "Cuda compiler") set(MXNET_CUDA_ARCH "5.0;6.0;7.0;8.0" CACHE STRING "Cuda architectures") diff --git a/config/distribution/linux_cu92.cmake b/config/distribution/linux_cu112.cmake similarity index 82% rename from config/distribution/linux_cu92.cmake rename to config/distribution/linux_cu112.cmake index 63ab9fce20d8..b0d3d86b67e1 100644 --- a/config/distribution/linux_cu92.cmake +++ b/config/distribution/linux_cu112.cmake @@ -21,6 +21,7 @@ set(CXXFLAGS "-mno-avx" CACHE STRING "CXXFLAGS") set(USE_CUDA ON CACHE BOOL "Build with CUDA support") set(USE_CUDNN ON CACHE BOOL "Build with CUDA support") +set(USE_NCCL ON CACHE BOOL "Build with NCCL support") set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support") set(USE_OPENMP ON CACHE BOOL "Build with Openmp support") set(USE_MKL_IF_AVAILABLE OFF CACHE BOOL "Use Intel MKL if found") @@ -29,6 +30,7 @@ set(USE_LAPACK ON CACHE BOOL "Build with lapack support") set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.") set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support") set(USE_F16C OFF CACHE BOOL "Build with x86 F16C instruction support") - -set(CUDACXX "/usr/local/cuda-9.2/bin/nvcc" CACHE STRING "Cuda compiler") -set(MXNET_CUDA_ARCH "3.0;5.0;6.0;7.0;7.2" CACHE STRING "Cuda architectures") +set(USE_LIBJPEG_TURBO ON CACHE BOOL "Build with libjpeg-turbo") +set(USE_DIST_KVSTORE ON CACHE BOOL "Build with DIST_KVSTORE support") +set(CUDACXX "/usr/local/cuda-11.2/bin/nvcc" CACHE STRING "Cuda compiler") +set(MXNET_CUDA_ARCH "5.0;6.0;7.0;8.0;8.6" CACHE STRING "Cuda architectures") diff --git a/config/distribution/linux_cu80.cmake b/config/distribution/linux_cu80.cmake deleted file mode 100644 index ce8e0083bcad..000000000000 --- a/config/distribution/linux_cu80.cmake +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set(CMAKE_BUILD_TYPE "Distribution" CACHE STRING "Build type") -set(CFLAGS "-mno-avx" CACHE STRING "CFLAGS") -set(CXXFLAGS "-mno-avx" CACHE STRING "CXXFLAGS") - -set(USE_CUDA ON CACHE BOOL "Build with CUDA support") -set(USE_CUDNN ON CACHE BOOL "Build with CUDA support") -set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support") -set(USE_OPENMP ON CACHE BOOL "Build with Openmp support") -set(USE_MKL_IF_AVAILABLE OFF CACHE BOOL "Use Intel MKL if found") -set(USE_MKLDNN ON CACHE BOOL "Build with MKL-DNN support") -set(USE_LAPACK ON CACHE BOOL "Build with lapack support") -set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.") -set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support") -set(USE_F16C OFF CACHE BOOL "Build with x86 F16C instruction support") - -set(CUDACXX "/usr/local/cuda-8.0/bin/nvcc" CACHE STRING "Cuda compiler") -set(MXNET_CUDA_ARCH "3.0;5.0;6.0;6.2" CACHE STRING "Cuda architectures") diff --git a/config/distribution/linux_cu90.cmake b/config/distribution/linux_cu90.cmake deleted file mode 100644 index 01097cb882e4..000000000000 --- a/config/distribution/linux_cu90.cmake +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set(CMAKE_BUILD_TYPE "Distribution" CACHE STRING "Build type") -set(CFLAGS "-mno-avx" CACHE STRING "CFLAGS") -set(CXXFLAGS "-mno-avx" CACHE STRING "CXXFLAGS") - -set(USE_CUDA ON CACHE BOOL "Build with CUDA support") -set(USE_CUDNN ON CACHE BOOL "Build with CUDA support") -set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support") -set(USE_OPENMP ON CACHE BOOL "Build with Openmp support") -set(USE_MKL_IF_AVAILABLE OFF CACHE BOOL "Use Intel MKL if found") -set(USE_MKLDNN ON CACHE BOOL "Build with MKL-DNN support") -set(USE_LAPACK ON CACHE BOOL "Build with lapack support") -set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.") -set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support") -set(USE_F16C OFF CACHE BOOL "Build with x86 F16C instruction support") - -set(CUDACXX "/usr/local/cuda-9.0/bin/nvcc" CACHE STRING "Cuda compiler") -set(MXNET_CUDA_ARCH "3.0;5.0;6.0;7.0;7.2" CACHE STRING "Cuda architectures") diff --git a/config/distribution/linux_cu91.cmake b/config/distribution/linux_cu91.cmake deleted file mode 100644 index f6301fa9f720..000000000000 --- a/config/distribution/linux_cu91.cmake +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set(CMAKE_BUILD_TYPE "Distribution" CACHE STRING "Build type") -set(CFLAGS "-mno-avx" CACHE STRING "CFLAGS") -set(CXXFLAGS "-mno-avx" CACHE STRING "CXXFLAGS") - -set(USE_CUDA ON CACHE BOOL "Build with CUDA support") -set(USE_CUDNN ON CACHE BOOL "Build with CUDA support") -set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support") -set(USE_OPENMP ON CACHE BOOL "Build with Openmp support") -set(USE_MKL_IF_AVAILABLE OFF CACHE BOOL "Use Intel MKL if found") -set(USE_MKLDNN ON CACHE BOOL "Build with MKL-DNN support") -set(USE_LAPACK ON CACHE BOOL "Build with lapack support") -set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.") -set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support") -set(USE_F16C OFF CACHE BOOL "Build with x86 F16C instruction support") - -set(CUDACXX "/usr/local/cuda-9.1/bin/nvcc" CACHE STRING "Cuda compiler") -set(MXNET_CUDA_ARCH "3.0;5.0;6.0;7.0;7.2" CACHE STRING "Cuda architectures") diff --git a/config/distribution/linux_cu75.cmake b/config/distribution/linux_native.cmake similarity index 81% rename from config/distribution/linux_cu75.cmake rename to config/distribution/linux_native.cmake index 45ba2b9de5d7..5673f19a9274 100644 --- a/config/distribution/linux_cu75.cmake +++ b/config/distribution/linux_native.cmake @@ -19,16 +19,12 @@ set(CMAKE_BUILD_TYPE "Distribution" CACHE STRING "Build type") set(CFLAGS "-mno-avx" CACHE STRING "CFLAGS") set(CXXFLAGS "-mno-avx" CACHE STRING "CXXFLAGS") -set(USE_CUDA ON CACHE BOOL "Build with CUDA support") -set(USE_CUDNN ON CACHE BOOL "Build with CUDA support") +set(USE_CUDA OFF CACHE BOOL "Build with CUDA support") set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support") set(USE_OPENMP ON CACHE BOOL "Build with Openmp support") set(USE_MKL_IF_AVAILABLE OFF CACHE BOOL "Use Intel MKL if found") -set(USE_MKLDNN ON CACHE BOOL "Build with MKL-DNN support") +set(USE_MKLDNN OFF CACHE BOOL "Build with MKL-DNN support") set(USE_LAPACK ON CACHE BOOL "Build with lapack support") set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.") set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support") set(USE_F16C OFF CACHE BOOL "Build with x86 F16C instruction support") - -set(CUDACXX "/usr/local/cuda-7.5/bin/nvcc" CACHE STRING "Cuda compiler") -set(MXNET_CUDA_ARCH "3.0;3.5;5.0;5.2" CACHE STRING "Cuda architectures") diff --git a/cpp-package/example/get_data.sh b/cpp-package/example/get_data.sh index e11077234ade..fda69ce2f087 100755 --- a/cpp-package/example/get_data.sh +++ b/cpp-package/example/get_data.sh @@ -53,10 +53,10 @@ download () { # MNIST dataset from: http://yann.lecun.com/exdb/mnist/ FILES=( - "http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz" - "http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz" - "http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz" - "http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz" + "https://web.archive.org/web/20160828233817/http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz" + "https://web.archive.org/web/20160828233817/http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz" + "https://web.archive.org/web/20160828233817/http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz" + "https://web.archive.org/web/20160828233817/http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz" "http://data.mxnet.io/data/mnist_train.csv.gz") for FILE in ${FILES[@]}; do diff --git a/cpp-package/tests/ci_test.sh b/cpp-package/tests/ci_test.sh index 39f9e06861b3..58f04b341654 100755 --- a/cpp-package/tests/ci_test.sh +++ b/cpp-package/tests/ci_test.sh @@ -60,8 +60,9 @@ cp ../../build/cpp-package/example/test_score . cp ../../build/cpp-package/example/test_ndarray_copy . ./test_ndarray_copy -cp ../../build/cpp-package/example/test_regress_label . -./test_regress_label +# skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/20011 +#cp ../../build/cpp-package/example/test_regress_label . +#./test_regress_label sh unittests/unit_test_mlp_csv.sh diff --git a/example/image-classification/train_mnist.py b/example/image-classification/train_mnist.py index d47521fc8ef8..2b256e22653e 100755 --- a/example/image-classification/train_mnist.py +++ b/example/image-classification/train_mnist.py @@ -32,7 +32,7 @@ def read_data(label, image): """ download and read data into numpy """ - base_url = 'http://yann.lecun.com/exdb/mnist/' + base_url = 'https://web.archive.org/web/20160828233817/http://yann.lecun.com/exdb/mnist/' with gzip.open(download_file(base_url+label, os.path.join('data',label))) as flbl: magic, num = struct.unpack(">II", flbl.read(8)) label = np.fromstring(flbl.read(), dtype=np.int8) diff --git a/make/staticbuild/linux_cu92.mk b/make/staticbuild/linux_cu112.mk similarity index 99% rename from make/staticbuild/linux_cu92.mk rename to make/staticbuild/linux_cu112.mk index bbaa4bfcd772..874e1576d34f 100644 --- a/make/staticbuild/linux_cu92.mk +++ b/make/staticbuild/linux_cu112.mk @@ -66,7 +66,7 @@ USE_CUDA = 1 # add the path to CUDA library to link and compile flag # if you have already add them to environment variable, leave it as NONE # USE_CUDA_PATH = /usr/local/cuda -USE_CUDA_PATH = $(DEPS_PATH)/usr/local/cuda-9.2 +USE_CUDA_PATH = $(DEPS_PATH)/usr/local/cuda-11.2 # whether to use CuDNN library USE_CUDNN = 1 @@ -170,3 +170,4 @@ EXTRA_OPERATORS = # git@github.com:dato-code/SFrame.git # SFRAME_PATH = $(HOME)/SFrame # MXNET_PLUGINS += plugin/sframe/plugin.mk + diff --git a/make/staticbuild/linux_cu75.mk b/make/staticbuild/linux_cu75.mk deleted file mode 100644 index e263794600df..000000000000 --- a/make/staticbuild/linux_cu75.mk +++ /dev/null @@ -1,167 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -#------------------------------------------------------------------------------- -# Template configuration for compiling mxnet for making python wheel -#------------------------------------------------------------------------------- - -#--------------------- -# choice of compiler -#-------------------- - -export CC = gcc -export CXX = g++ -export NVCC = nvcc - -# whether compile with options for MXNet developer -DEV = 0 - -# whether compile with debug -DEBUG = 0 - -# whether to turn on signal handler (e.g. segfault logger) -USE_SIGNAL_HANDLER = 1 - -# the additional link flags you want to add -ADD_LDFLAGS += -L$(DEPS_PATH)/lib $(DEPS_PATH)/lib/libculibos.a -lpng -ltiff -ljpeg -lz -ldl -lgfortran -Wl,--version-script=$(CURDIR)/make/config/libmxnet.ver,-rpath,'$${ORIGIN}',--gc-sections - -# the additional compile flags you want to add -ADD_CFLAGS += -I$(DEPS_PATH)/include -ffunction-sections -fdata-sections - -#--------------------------------------------- -# matrix computation libraries for CPU/GPU -#--------------------------------------------- - -# choose the version of blas you want to use -# can be: mkl, blas, atlas, openblas -# in default use atlas for linux while apple for osx -USE_BLAS=openblas - -# whether use opencv during compilation -# you can disable it, however, you will not able to use -# imbin iterator -USE_OPENCV = 1 -# Add OpenCV include path, in which the directory `opencv2` exists -USE_OPENCV_INC_PATH = NONE -# Add OpenCV shared library path, in which the shared library exists -USE_OPENCV_LIB_PATH = NONE - -# whether use CUDA during compile -USE_CUDA = 1 - -# add the path to CUDA library to link and compile flag -# if you have already add them to environment variable, leave it as NONE -# USE_CUDA_PATH = /usr/local/cuda -USE_CUDA_PATH = $(DEPS_PATH)/usr/local/cuda-7.5 - -# whether use CuDNN R3 library -USE_CUDNN = 1 - -# CUDA architecture setting: going with all of them. -# For CUDA < 6.0, comment the *_50 lines for compatibility. -# CUDA_ARCH := - -# whether use cuda runtime compiling for writing kernels in native language (i.e. Python) -ENABLE_CUDA_RTC = 1 - -# use openmp for parallelization -USE_OPENMP = 1 -USE_OPERATOR_TUNING = 1 -USE_LIBJPEG_TURBO = 1 - -# whether use MKL-DNN library -USE_MKLDNN = 1 - -# whether use NNPACK library -USE_NNPACK = 0 - -# whether use lapack during compilation -# only effective when compiled with blas versions openblas/apple/atlas/mkl -USE_LAPACK = 1 - -# path to lapack library in case of a non-standard installation -USE_LAPACK_PATH = $(DEPS_PATH)/lib - -# add path to intel library, you may need it for MKL, if you did not add the path -# to environment variable -USE_INTEL_PATH = NONE - -# If use MKL, choose static link automatically to allow python wrapper -ifeq ($(USE_BLAS), mkl) -USE_STATIC_MKL = 1 -else -USE_STATIC_MKL = NONE -endif - -#---------------------------- -# Settings for power and arm arch -#---------------------------- -ARCH := $(shell uname -a) -ifneq (,$(filter $(ARCH), armv6l armv7l powerpc64le ppc64le aarch64)) - USE_SSE=0 -else - USE_SSE=1 -endif - -#---------------------------- -# distributed computing -#---------------------------- - -# whether or not to enable multi-machine supporting -USE_DIST_KVSTORE = 1 - -# whether or not allow to read and write HDFS directly. If yes, then hadoop is -# required -USE_HDFS = 0 - -# path to libjvm.so. required if USE_HDFS=1 -LIBJVM=$(JAVA_HOME)/jre/lib/amd64/server - -# whether or not allow to read and write AWS S3 directly. If yes, then -# libcurl4-openssl-dev is required, it can be installed on Ubuntu by -# sudo apt-get install -y libcurl4-openssl-dev -USE_S3 = 1 - -#---------------------------- -# additional operators -#---------------------------- - -# path to folders containing projects specific operators that you don't want to put in src/operators -EXTRA_OPERATORS = - - -#---------------------------- -# plugins -#---------------------------- - -# whether to use caffe integration. This requires installing caffe. -# You also need to add CAFFE_PATH/build/lib to your LD_LIBRARY_PATH -# CAFFE_PATH = $(HOME)/caffe -# MXNET_PLUGINS += plugin/caffe/caffe.mk - -# whether to use torch integration. This requires installing torch. -# You also need to add TORCH_PATH/install/lib to your LD_LIBRARY_PATH -# TORCH_PATH = $(HOME)/torch -# MXNET_PLUGINS += plugin/torch/torch.mk - -# WARPCTC_PATH = $(HOME)/warp-ctc -# MXNET_PLUGINS += plugin/warpctc/warpctc.mk - -# whether to use sframe integration. This requires build sframe -# git@github.com:dato-code/SFrame.git -# SFRAME_PATH = $(HOME)/SFrame -# MXNET_PLUGINS += plugin/sframe/plugin.mk diff --git a/make/staticbuild/linux_cu80.mk b/make/staticbuild/linux_cu80.mk deleted file mode 100644 index a42220d3d467..000000000000 --- a/make/staticbuild/linux_cu80.mk +++ /dev/null @@ -1,170 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -#------------------------------------------------------------------------------- -# Template configuration for compiling mxnet for making python wheel -#------------------------------------------------------------------------------- - -#--------------------- -# choice of compiler -#-------------------- - -export CC = gcc -export CXX = g++ -export NVCC = nvcc - -# whether compile with options for MXNet developer -DEV = 0 - -# whether compile with debug -DEBUG = 0 - -# whether to turn on signal handler (e.g. segfault logger) -USE_SIGNAL_HANDLER = 1 - -# the additional link flags you want to add -ADD_LDFLAGS += -L$(DEPS_PATH)/lib $(DEPS_PATH)/lib/libculibos.a -lpng -ltiff -ljpeg -lz -ldl -lgfortran -Wl,--version-script=$(CURDIR)/make/config/libmxnet.ver,-rpath,'$${ORIGIN}',--gc-sections - -# the additional compile flags you want to add -ADD_CFLAGS += -I$(DEPS_PATH)/include -ffunction-sections -fdata-sections - -#--------------------------------------------- -# matrix computation libraries for CPU/GPU -#--------------------------------------------- - -# choose the version of blas you want to use -# can be: mkl, blas, atlas, openblas -# in default use atlas for linux while apple for osx -USE_BLAS=openblas - -# whether use opencv during compilation -# you can disable it, however, you will not able to use -# imbin iterator -USE_OPENCV = 1 -# Add OpenCV include path, in which the directory `opencv2` exists -USE_OPENCV_INC_PATH = NONE -# Add OpenCV shared library path, in which the shared library exists -USE_OPENCV_LIB_PATH = NONE - -# whether use CUDA during compile -USE_CUDA = 1 - -# add the path to CUDA library to link and compile flag -# if you have already add them to environment variable, leave it as NONE -# USE_CUDA_PATH = /usr/local/cuda -USE_CUDA_PATH = $(DEPS_PATH)/usr/local/cuda-8.0 - -# whether to use CuDNN library -USE_CUDNN = 1 - -# whether to use NCCL library -USE_NCCL = 1 - -# CUDA architecture setting: going with all of them. -# For CUDA < 6.0, comment the *_50 lines for compatibility. -# CUDA_ARCH := - -# whether use cuda runtime compiling for writing kernels in native language (i.e. Python) -ENABLE_CUDA_RTC = 1 - -# use openmp for parallelization -USE_OPENMP = 1 -USE_OPERATOR_TUNING = 1 -USE_LIBJPEG_TURBO = 1 - -# whether use MKL-DNN library -USE_MKLDNN = 1 - -# whether use NNPACK library -USE_NNPACK = 0 - -# whether use lapack during compilation -# only effective when compiled with blas versions openblas/apple/atlas/mkl -USE_LAPACK = 1 - -# path to lapack library in case of a non-standard installation -USE_LAPACK_PATH = $(DEPS_PATH)/lib - -# add path to intel library, you may need it for MKL, if you did not add the path -# to environment variable -USE_INTEL_PATH = NONE - -# If use MKL, choose static link automatically to allow python wrapper -ifeq ($(USE_BLAS), mkl) -USE_STATIC_MKL = 1 -else -USE_STATIC_MKL = NONE -endif - -#---------------------------- -# Settings for power and arm arch -#---------------------------- -ARCH := $(shell uname -a) -ifneq (,$(filter $(ARCH), armv6l armv7l powerpc64le ppc64le aarch64)) - USE_SSE=0 -else - USE_SSE=1 -endif - -#---------------------------- -# distributed computing -#---------------------------- - -# whether or not to enable multi-machine supporting -USE_DIST_KVSTORE = 1 - -# whether or not allow to read and write HDFS directly. If yes, then hadoop is -# required -USE_HDFS = 0 - -# path to libjvm.so. required if USE_HDFS=1 -LIBJVM=$(JAVA_HOME)/jre/lib/amd64/server - -# whether or not allow to read and write AWS S3 directly. If yes, then -# libcurl4-openssl-dev is required, it can be installed on Ubuntu by -# sudo apt-get install -y libcurl4-openssl-dev -USE_S3 = 1 - -#---------------------------- -# additional operators -#---------------------------- - -# path to folders containing projects specific operators that you don't want to put in src/operators -EXTRA_OPERATORS = - - -#---------------------------- -# plugins -#---------------------------- - -# whether to use caffe integration. This requires installing caffe. -# You also need to add CAFFE_PATH/build/lib to your LD_LIBRARY_PATH -# CAFFE_PATH = $(HOME)/caffe -# MXNET_PLUGINS += plugin/caffe/caffe.mk - -# whether to use torch integration. This requires installing torch. -# You also need to add TORCH_PATH/install/lib to your LD_LIBRARY_PATH -# TORCH_PATH = $(HOME)/torch -# MXNET_PLUGINS += plugin/torch/torch.mk - -# WARPCTC_PATH = $(HOME)/warp-ctc -# MXNET_PLUGINS += plugin/warpctc/warpctc.mk - -# whether to use sframe integration. This requires build sframe -# git@github.com:dato-code/SFrame.git -# SFRAME_PATH = $(HOME)/SFrame -# MXNET_PLUGINS += plugin/sframe/plugin.mk diff --git a/make/staticbuild/linux_cu90.mk b/make/staticbuild/linux_cu90.mk deleted file mode 100644 index c46c10f6358b..000000000000 --- a/make/staticbuild/linux_cu90.mk +++ /dev/null @@ -1,172 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -#------------------------------------------------------------------------------- -# Template configuration for compiling mxnet for making python wheel -#------------------------------------------------------------------------------- - -#--------------------- -# choice of compiler -#-------------------- - -export CC = gcc -export CXX = g++ -export NVCC = nvcc - -# whether compile with options for MXNet developer -DEV = 0 - -# whether compile with debug -DEBUG = 0 - -# whether to turn on signal handler (e.g. segfault logger) -USE_SIGNAL_HANDLER = 1 - -# the additional link flags you want to add -ADD_LDFLAGS += -L$(DEPS_PATH)/lib $(DEPS_PATH)/lib/libculibos.a -lpng -ltiff -ljpeg -lz -ldl -lgfortran -Wl,--version-script=$(CURDIR)/make/config/libmxnet.ver,-rpath,'$${ORIGIN}',--gc-sections - -# the additional compile flags you want to add -ADD_CFLAGS += -I$(DEPS_PATH)/include -ffunction-sections -fdata-sections - -#--------------------------------------------- -# matrix computation libraries for CPU/GPU -#--------------------------------------------- - -# choose the version of blas you want to use -# can be: mkl, blas, atlas, openblas -# in default use atlas for linux while apple for osx -USE_BLAS=openblas - -# whether use opencv during compilation -# you can disable it, however, you will not able to use -# imbin iterator -USE_OPENCV = 1 -# Add OpenCV include path, in which the directory `opencv2` exists -USE_OPENCV_INC_PATH = NONE -# Add OpenCV shared library path, in which the shared library exists -USE_OPENCV_LIB_PATH = NONE - -# whether use CUDA during compile -USE_CUDA = 1 - -# add the path to CUDA library to link and compile flag -# if you have already add them to environment variable, leave it as NONE -# USE_CUDA_PATH = /usr/local/cuda -USE_CUDA_PATH = $(DEPS_PATH)/usr/local/cuda-9.0 - -# whether to use CuDNN library -USE_CUDNN = 1 - -# whether to use NCCL library -USE_NCCL = 1 - -# CUDA architecture setting: going with all of them. -# For CUDA < 6.0, comment the *_50 lines for compatibility. -# CUDA_ARCH := - -# whether use cuda runtime compiling for writing kernels in native language (i.e. Python) -ENABLE_CUDA_RTC = 1 - -USE_NVTX=1 - -# use openmp for parallelization -USE_OPENMP = 1 -USE_OPERATOR_TUNING = 1 -USE_LIBJPEG_TURBO = 1 - -# whether use MKL-DNN library -USE_MKLDNN = 1 - -# whether use NNPACK library -USE_NNPACK = 0 - -# whether use lapack during compilation -# only effective when compiled with blas versions openblas/apple/atlas/mkl -USE_LAPACK = 1 - -# path to lapack library in case of a non-standard installation -USE_LAPACK_PATH = $(DEPS_PATH)/lib - -# add path to intel library, you may need it for MKL, if you did not add the path -# to environment variable -USE_INTEL_PATH = NONE - -# If use MKL, choose static link automatically to allow python wrapper -ifeq ($(USE_BLAS), mkl) -USE_STATIC_MKL = 1 -else -USE_STATIC_MKL = NONE -endif - -#---------------------------- -# Settings for power and arm arch -#---------------------------- -ARCH := $(shell uname -a) -ifneq (,$(filter $(ARCH), armv6l armv7l powerpc64le ppc64le aarch64)) - USE_SSE=0 -else - USE_SSE=1 -endif - -#---------------------------- -# distributed computing -#---------------------------- - -# whether or not to enable multi-machine supporting -USE_DIST_KVSTORE = 1 - -# whether or not allow to read and write HDFS directly. If yes, then hadoop is -# required -USE_HDFS = 0 - -# path to libjvm.so. required if USE_HDFS=1 -LIBJVM=$(JAVA_HOME)/jre/lib/amd64/server - -# whether or not allow to read and write AWS S3 directly. If yes, then -# libcurl4-openssl-dev is required, it can be installed on Ubuntu by -# sudo apt-get install -y libcurl4-openssl-dev -USE_S3 = 1 - -#---------------------------- -# additional operators -#---------------------------- - -# path to folders containing projects specific operators that you don't want to put in src/operators -EXTRA_OPERATORS = - - -#---------------------------- -# plugins -#---------------------------- - -# whether to use caffe integration. This requires installing caffe. -# You also need to add CAFFE_PATH/build/lib to your LD_LIBRARY_PATH -# CAFFE_PATH = $(HOME)/caffe -# MXNET_PLUGINS += plugin/caffe/caffe.mk - -# whether to use torch integration. This requires installing torch. -# You also need to add TORCH_PATH/install/lib to your LD_LIBRARY_PATH -# TORCH_PATH = $(HOME)/torch -# MXNET_PLUGINS += plugin/torch/torch.mk - -# WARPCTC_PATH = $(HOME)/warp-ctc -# MXNET_PLUGINS += plugin/warpctc/warpctc.mk - -# whether to use sframe integration. This requires build sframe -# git@github.com:dato-code/SFrame.git -# SFRAME_PATH = $(HOME)/SFrame -# MXNET_PLUGINS += plugin/sframe/plugin.mk diff --git a/make/staticbuild/linux_cu91.mk b/make/staticbuild/linux_cu91.mk deleted file mode 100644 index b2a33d7e36c8..000000000000 --- a/make/staticbuild/linux_cu91.mk +++ /dev/null @@ -1,172 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -#------------------------------------------------------------------------------- -# Template configuration for compiling mxnet for making python wheel -#------------------------------------------------------------------------------- - -#--------------------- -# choice of compiler -#-------------------- - -export CC = gcc -export CXX = g++ -export NVCC = nvcc - -# whether compile with options for MXNet developer -DEV = 0 - -# whether compile with debug -DEBUG = 0 - -# whether to turn on signal handler (e.g. segfault logger) -USE_SIGNAL_HANDLER = 1 - -# the additional link flags you want to add -ADD_LDFLAGS += -L$(DEPS_PATH)/lib $(DEPS_PATH)/lib/libculibos.a -lpng -ltiff -ljpeg -lz -ldl -lgfortran -Wl,--version-script=$(CURDIR)/make/config/libmxnet.ver,-rpath,'$${ORIGIN}',--gc-sections - -# the additional compile flags you want to add -ADD_CFLAGS += -I$(DEPS_PATH)/include -ffunction-sections -fdata-sections - -#--------------------------------------------- -# matrix computation libraries for CPU/GPU -#--------------------------------------------- - -# choose the version of blas you want to use -# can be: mkl, blas, atlas, openblas -# in default use atlas for linux while apple for osx -USE_BLAS=openblas - -# whether use opencv during compilation -# you can disable it, however, you will not able to use -# imbin iterator -USE_OPENCV = 1 -# Add OpenCV include path, in which the directory `opencv2` exists -USE_OPENCV_INC_PATH = NONE -# Add OpenCV shared library path, in which the shared library exists -USE_OPENCV_LIB_PATH = NONE - -# whether use CUDA during compile -USE_CUDA = 1 - -# add the path to CUDA library to link and compile flag -# if you have already add them to environment variable, leave it as NONE -# USE_CUDA_PATH = /usr/local/cuda -USE_CUDA_PATH = $(DEPS_PATH)/usr/local/cuda-9.1 - -# whether to use CuDNN library -USE_CUDNN = 1 - -# whether to use NCCL library -USE_NCCL = 1 - -# CUDA architecture setting: going with all of them. -# For CUDA < 6.0, comment the *_50 lines for compatibility. -# CUDA_ARCH := - -# whether use cuda runtime compiling for writing kernels in native language (i.e. Python) -ENABLE_CUDA_RTC = 1 - -USE_NVTX=1 - -# use openmp for parallelization -USE_OPENMP = 1 -USE_OPERATOR_TUNING = 1 -USE_LIBJPEG_TURBO = 1 - -# whether use MKL-DNN library -USE_MKLDNN = 1 - -# whether use NNPACK library -USE_NNPACK = 0 - -# whether use lapack during compilation -# only effective when compiled with blas versions openblas/apple/atlas/mkl -USE_LAPACK = 1 - -# path to lapack library in case of a non-standard installation -USE_LAPACK_PATH = $(DEPS_PATH)/lib - -# add path to intel library, you may need it for MKL, if you did not add the path -# to environment variable -USE_INTEL_PATH = NONE - -# If use MKL, choose static link automatically to allow python wrapper -ifeq ($(USE_BLAS), mkl) -USE_STATIC_MKL = 1 -else -USE_STATIC_MKL = NONE -endif - -#---------------------------- -# Settings for power and arm arch -#---------------------------- -ARCH := $(shell uname -a) -ifneq (,$(filter $(ARCH), armv6l armv7l powerpc64le ppc64le aarch64)) - USE_SSE=0 -else - USE_SSE=1 -endif - -#---------------------------- -# distributed computing -#---------------------------- - -# whether or not to enable multi-machine supporting -USE_DIST_KVSTORE = 1 - -# whether or not allow to read and write HDFS directly. If yes, then hadoop is -# required -USE_HDFS = 0 - -# path to libjvm.so. required if USE_HDFS=1 -LIBJVM=$(JAVA_HOME)/jre/lib/amd64/server - -# whether or not allow to read and write AWS S3 directly. If yes, then -# libcurl4-openssl-dev is required, it can be installed on Ubuntu by -# sudo apt-get install -y libcurl4-openssl-dev -USE_S3 = 1 - -#---------------------------- -# additional operators -#---------------------------- - -# path to folders containing projects specific operators that you don't want to put in src/operators -EXTRA_OPERATORS = - - -#---------------------------- -# plugins -#---------------------------- - -# whether to use caffe integration. This requires installing caffe. -# You also need to add CAFFE_PATH/build/lib to your LD_LIBRARY_PATH -# CAFFE_PATH = $(HOME)/caffe -# MXNET_PLUGINS += plugin/caffe/caffe.mk - -# whether to use torch integration. This requires installing torch. -# You also need to add TORCH_PATH/install/lib to your LD_LIBRARY_PATH -# TORCH_PATH = $(HOME)/torch -# MXNET_PLUGINS += plugin/torch/torch.mk - -# WARPCTC_PATH = $(HOME)/warp-ctc -# MXNET_PLUGINS += plugin/warpctc/warpctc.mk - -# whether to use sframe integration. This requires build sframe -# git@github.com:dato-code/SFrame.git -# SFRAME_PATH = $(HOME)/SFrame -# MXNET_PLUGINS += plugin/sframe/plugin.mk diff --git a/tests/python/gpu/test_operator_gpu.py b/tests/python/gpu/test_operator_gpu.py index 5fee473554e4..ceb8c6ee5d51 100644 --- a/tests/python/gpu/test_operator_gpu.py +++ b/tests/python/gpu/test_operator_gpu.py @@ -2248,6 +2248,7 @@ def kernel_error_check_symbolic(): f.forward() g = f.outputs[0].asnumpy() +@unittest.skip('skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/20011') def test_kernel_error_checking(): # Running tests that may throw exceptions out of worker threads will stop CI testing # if not run in a separate process (with its own address space for CUDA compatibility). diff --git a/tools/dependencies/openblas.sh b/tools/dependencies/openblas.sh index cdc63b6a8355..8871d0439fa3 100755 --- a/tools/dependencies/openblas.sh +++ b/tools/dependencies/openblas.sh @@ -42,8 +42,6 @@ if [[ ((! -e $DEPS_PATH/lib/libopenblas.a) && -z "$CMAKE_STATICBUILD") || fi $MAKE PREFIX=$DEPS_PATH install - - if [[ -z "$CMAKE_STATICBUILD" ]]; then # Manually removing .so to avoid linking against it rm $DEPS_PATH/lib/libopenblasp-r${OPENBLAS_VERSION}.so diff --git a/tools/dependencies/zmq.sh b/tools/dependencies/zmq.sh index 11d7063200b5..a0cba2e1d0c9 100755 --- a/tools/dependencies/zmq.sh +++ b/tools/dependencies/zmq.sh @@ -20,6 +20,12 @@ # This script builds the static library of zeroMQ that can be used as dependency of mxnet. set -ex ZEROMQ_VERSION=4.2.2 +if [[ $PLATFORM == 'darwin' ]]; then + DY_EXT="dylib" +else + DY_EXT="so" +fi + if [[ ! -f $DEPS_PATH/lib/libzmq.a ]]; then # Download and build zmq >&2 echo "Building zmq..." @@ -37,5 +43,14 @@ if [[ ! -f $DEPS_PATH/lib/libzmq.a ]]; then -D BUILD_SHARED_LIBS=OFF .. $MAKE $MAKE install + + if [[ ! -f $DEPS_PATH/lib/libzmq.a ]]; then + rm $DEPS_PATH/lib64/*zmq*$DY_EXT* + mkdir -p $DEPS_PATH/lib + cp $DEPS_PATH/lib64/*zmq* $DEPS_PATH/lib + else + rm $DEPS_PATH/lib/*zmq*$DY_EXT* + fi + popd -fi +fi \ No newline at end of file diff --git a/tools/pip/doc/CPU_ADDITIONAL.md b/tools/pip/doc/CPU_ADDITIONAL.md index 090186e652ae..224ca9892f7d 100644 --- a/tools/pip/doc/CPU_ADDITIONAL.md +++ b/tools/pip/doc/CPU_ADDITIONAL.md @@ -18,11 +18,11 @@ Prerequisites ------------- This package supports Linux, Mac OSX, and Windows platforms. You may also want to check: +- [mxnet-cu112](https://pypi.python.org/pypi/mxnet-cu112/) with CUDA-11.2 support. - [mxnet-cu110](https://pypi.python.org/pypi/mxnet-cu110/) with CUDA-11.0 support. - [mxnet-cu102](https://pypi.python.org/pypi/mxnet-cu102/) with CUDA-10.2 support. - [mxnet-cu101](https://pypi.python.org/pypi/mxnet-cu101/) with CUDA-10.1 support. - [mxnet-cu100](https://pypi.python.org/pypi/mxnet-cu100/) with CUDA-10.0 support. -- [mxnet-cu92](https://pypi.python.org/pypi/mxnet-cu92/) with CUDA-9.2 support. - [mxnet-native](https://pypi.python.org/pypi/mxnet-native/) CPU variant without MKLDNN. To use this package on Linux you need the `libquadmath.so.0` shared library. On @@ -33,7 +33,7 @@ a GPL library and MXNet part of the Apache Software Foundation, MXNet must not redistribute `libquadmath.so.0` as part of the Pypi package and users must manually install it. -To install for other platforms (e.g. Windows, Raspberry Pi/ARM) or other versions, check [Installing MXNet](https://mxnet.apache.org/versions/master/install/index.html) for instructions on building from source. +To install for other platforms (e.g. Windows, Raspberry Pi/ARM) or other versions, check [Installing MXNet](https://mxnet.apache.org/versions/master) for instructions on building from source. Installation ------------ diff --git a/tools/pip/doc/CU100_ADDITIONAL.md b/tools/pip/doc/CU100_ADDITIONAL.md index c7638e83b0c1..b0399e54743f 100644 --- a/tools/pip/doc/CU100_ADDITIONAL.md +++ b/tools/pip/doc/CU100_ADDITIONAL.md @@ -18,11 +18,12 @@ Prerequisites ------------- This package supports Linux and Windows platforms. You may also want to check: +- [mxnet-cu112](https://pypi.python.org/pypi/mxnet-cu112/) with CUDA-11.2 support. - [mxnet-cu110](https://pypi.python.org/pypi/mxnet-cu110/) with CUDA-11.0 support. - [mxnet-cu102](https://pypi.python.org/pypi/mxnet-cu102/) with CUDA-10.2 support. - [mxnet-cu101](https://pypi.python.org/pypi/mxnet-cu101/) with CUDA-10.1 support. -- [mxnet-cu92](https://pypi.python.org/pypi/mxnet-cu92/) with CUDA-9.2 support. - [mxnet](https://pypi.python.org/pypi/mxnet/). +- [mxnet-native](https://pypi.python.org/pypi/mxnet-native/) CPU variant without MKLDNN. To download CUDA, check [CUDA download](https://developer.nvidia.com/cuda-downloads). For more instructions, check [CUDA Toolkit online documentation](http://docs.nvidia.com/cuda/index.html). diff --git a/tools/pip/doc/CU101_ADDITIONAL.md b/tools/pip/doc/CU101_ADDITIONAL.md index 44ebb894c2db..7014f3293556 100644 --- a/tools/pip/doc/CU101_ADDITIONAL.md +++ b/tools/pip/doc/CU101_ADDITIONAL.md @@ -18,11 +18,12 @@ Prerequisites ------------- This package supports Linux and Windows platforms. You may also want to check: +- [mxnet-cu112](https://pypi.python.org/pypi/mxnet-cu112/) with CUDA-11.2 support. - [mxnet-cu110](https://pypi.python.org/pypi/mxnet-cu110/) with CUDA-11.0 support. - [mxnet-cu102](https://pypi.python.org/pypi/mxnet-cu102/) with CUDA-10.2 support. - [mxnet-cu100](https://pypi.python.org/pypi/mxnet-cu101/) with CUDA-10.0 support. -- [mxnet-cu92](https://pypi.python.org/pypi/mxnet-cu92/) with CUDA-9.2 support. - [mxnet](https://pypi.python.org/pypi/mxnet/). +- [mxnet-native](https://pypi.python.org/pypi/mxnet-native/) CPU variant without MKLDNN. To download CUDA, check [CUDA download](https://developer.nvidia.com/cuda-downloads). For more instructions, check [CUDA Toolkit online documentation](http://docs.nvidia.com/cuda/index.html). @@ -34,7 +35,7 @@ a GPL library and MXNet part of the Apache Software Foundation, MXNet must not redistribute `libquadmath.so.0` as part of the Pypi package and users must manually install it. -To install for other platforms (e.g. Windows, Raspberry Pi/ARM) or other versions, check [Installing MXNet](https://mxnet.incubator.apache.org/versions/master/install/index.html) for instructions on building from source. +To install for other platforms (e.g. Windows, Raspberry Pi/ARM) or other versions, check [Installing MXNet](https://mxnet.apache.org/versions/master) for instructions on building from source. Installation ------------ diff --git a/tools/pip/doc/CU102_ADDITIONAL.md b/tools/pip/doc/CU102_ADDITIONAL.md index 4a81de827d60..ce3f52811f5f 100644 --- a/tools/pip/doc/CU102_ADDITIONAL.md +++ b/tools/pip/doc/CU102_ADDITIONAL.md @@ -18,11 +18,12 @@ Prerequisites ------------- This package supports Linux and Windows platforms. You may also want to check: +- [mxnet-cu112](https://pypi.python.org/pypi/mxnet-cu112/) with CUDA-11.2 support. - [mxnet-cu110](https://pypi.python.org/pypi/mxnet-cu110/) with CUDA-11.0 support. - [mxnet-cu101](https://pypi.python.org/pypi/mxnet-cu101/) with CUDA-10.1 support. - [mxnet-cu100](https://pypi.python.org/pypi/mxnet-cu100/) with CUDA-10.0 support. -- [mxnet-cu92](https://pypi.python.org/pypi/mxnet-cu92/) with CUDA-9.2 support. - [mxnet](https://pypi.python.org/pypi/mxnet/). +- [mxnet-native](https://pypi.python.org/pypi/mxnet-native/) CPU variant without MKLDNN. To download CUDA, check [CUDA download](https://developer.nvidia.com/cuda-downloads). For more instructions, check [CUDA Toolkit online documentation](http://docs.nvidia.com/cuda/index.html). @@ -34,7 +35,7 @@ a GPL library and MXNet part of the Apache Software Foundation, MXNet must not redistribute `libquadmath.so.0` as part of the Pypi package and users must manually install it. -To install for other platforms (e.g. Windows, Raspberry Pi/ARM) or other versions, check [Installing MXNet](https://mxnet.incubator.apache.org/versions/master/install/index.html) for instructions on building from source. +To install for other platforms (e.g. Windows, Raspberry Pi/ARM) or other versions, check [Installing MXNet](https://mxnet.apache.org/versions/master) for instructions on building from source. Installation ------------ diff --git a/tools/pip/doc/CU110_ADDITIONAL.md b/tools/pip/doc/CU110_ADDITIONAL.md index 8eaa7b204d35..2745a68ca6d7 100644 --- a/tools/pip/doc/CU110_ADDITIONAL.md +++ b/tools/pip/doc/CU110_ADDITIONAL.md @@ -18,11 +18,12 @@ Prerequisites ------------- This package supports Linux and Windows platforms. You may also want to check: +- [mxnet-cu112](https://pypi.python.org/pypi/mxnet-cu112/) with CUDA-11.2 support. - [mxnet-cu102](https://pypi.python.org/pypi/mxnet-cu102/) with CUDA-10.2 support. - [mxnet-cu101](https://pypi.python.org/pypi/mxnet-cu101/) with CUDA-10.1 support. - [mxnet-cu100](https://pypi.python.org/pypi/mxnet-cu100/) with CUDA-10.0 support. -- [mxnet-cu92](https://pypi.python.org/pypi/mxnet-cu92/) with CUDA-9.2 support. - [mxnet](https://pypi.python.org/pypi/mxnet/). +- [mxnet-native](https://pypi.python.org/pypi/mxnet-native/) CPU variant without MKLDNN. To download CUDA, check [CUDA download](https://developer.nvidia.com/cuda-downloads). For more instructions, check [CUDA Toolkit online documentation](http://docs.nvidia.com/cuda/index.html). @@ -34,7 +35,7 @@ a GPL library and MXNet part of the Apache Software Foundation, MXNet must not redistribute `libquadmath.so.0` as part of the Pypi package and users must manually install it. -To install for other platforms (e.g. Windows, Raspberry Pi/ARM) or other versions, check [Installing MXNet](https://mxnet.incubator.apache.org/versions/master/install/index.html) for instructions on building from source. +To install for other platforms (e.g. Windows, Raspberry Pi/ARM) or other versions, check [Installing MXNet](https://mxnet.apache.org/versions/master) for instructions on building from source. Installation ------------ diff --git a/tools/pip/doc/CU92_ADDITIONAL.md b/tools/pip/doc/CU112_ADDITIONAL.md similarity index 92% rename from tools/pip/doc/CU92_ADDITIONAL.md rename to tools/pip/doc/CU112_ADDITIONAL.md index a141e5686915..5e7c135f783c 100644 --- a/tools/pip/doc/CU92_ADDITIONAL.md +++ b/tools/pip/doc/CU112_ADDITIONAL.md @@ -23,6 +23,7 @@ This package supports Linux and Windows platforms. You may also want to check: - [mxnet-cu101](https://pypi.python.org/pypi/mxnet-cu101/) with CUDA-10.1 support. - [mxnet-cu100](https://pypi.python.org/pypi/mxnet-cu100/) with CUDA-10.0 support. - [mxnet](https://pypi.python.org/pypi/mxnet/). +- [mxnet-native](https://pypi.python.org/pypi/mxnet-native/) CPU variant without MKLDNN. To download CUDA, check [CUDA download](https://developer.nvidia.com/cuda-downloads). For more instructions, check [CUDA Toolkit online documentation](http://docs.nvidia.com/cuda/index.html). @@ -34,11 +35,11 @@ a GPL library and MXNet part of the Apache Software Foundation, MXNet must not redistribute `libquadmath.so.0` as part of the Pypi package and users must manually install it. -To install for other platforms (e.g. Windows, Raspberry Pi/ARM) or other versions, check [Installing MXNet](https://mxnet.apache.org/versions/master/install/index.html) for instructions on building from source. +To install for other platforms (e.g. Windows, Raspberry Pi/ARM) or other versions, check [Installing MXNet](https://mxnet.apache.org/versions/master) for instructions on building from source. Installation ------------ To install: ```bash -pip install mxnet-cu92 +pip install mxnet-cu112 ``` diff --git a/tools/pip/doc/NATIVE_ADDITIONAL.md b/tools/pip/doc/NATIVE_ADDITIONAL.md index 23c592b811ba..88677f7fa207 100644 --- a/tools/pip/doc/NATIVE_ADDITIONAL.md +++ b/tools/pip/doc/NATIVE_ADDITIONAL.md @@ -18,10 +18,11 @@ Prerequisites ------------- This package supports Linux and Windows platforms. You may also want to check: +- [mxnet-cu112](https://pypi.python.org/pypi/mxnet-cu112/) with CUDA-11.2 support. - [mxnet-cu110](https://pypi.python.org/pypi/mxnet-cu110/) with CUDA-11.0 support. - [mxnet-cu102](https://pypi.python.org/pypi/mxnet-cu102/) with CUDA-10.2 support. - [mxnet-cu101](https://pypi.python.org/pypi/mxnet-cu101/) with CUDA-10.1 support. -- [mxnet-cu92](https://pypi.python.org/pypi/mxnet-cu92/) with CUDA-9.2 support. +- [mxnet-cu100](https://pypi.python.org/pypi/mxnet-cu100/) with CUDA-10.0 support. - [mxnet](https://pypi.python.org/pypi/mxnet/). To download CUDA, check [CUDA download](https://developer.nvidia.com/cuda-downloads). For more instructions, check [CUDA Toolkit online documentation](http://docs.nvidia.com/cuda/index.html). @@ -34,7 +35,7 @@ a GPL library and MXNet part of the Apache Software Foundation, MXNet must not redistribute `libquadmath.so.0` as part of the Pypi package and users must manually install it. -To install for other platforms (e.g. Windows, Raspberry Pi/ARM) or other versions, check [Installing MXNet](https://mxnet.apache.org/versions/master/install/index.html) for instructions on building from source. +To install for other platforms (e.g. Windows, Raspberry Pi/ARM) or other versions, check [Installing MXNet](https://mxnet.apache.org/versions/master) for instructions on building from source. Installation ------------ diff --git a/tools/pip/setup.py b/tools/pip/setup.py index 1950f1dd28ac..9f56e20d9e4b 100644 --- a/tools/pip/setup.py +++ b/tools/pip/setup.py @@ -137,7 +137,9 @@ def skip_markdown_comments(md): if variant == 'CPU': libraries.append('openblas') else: - if variant.startswith('CU110'): + if variant.startswith('CU112'): + libraries.append('CUDA-11.2') + elif variant.startswith('CU110'): libraries.append('CUDA-11.0') elif variant.startswith('CU102'): libraries.append('CUDA-10.2') @@ -145,18 +147,10 @@ def skip_markdown_comments(md): libraries.append('CUDA-10.1') elif variant.startswith('CU100'): libraries.append('CUDA-10.0') - elif variant.startswith('CU92'): - libraries.append('CUDA-9.2') - elif variant.startswith('CU91'): - libraries.append('CUDA-9.1') - elif variant.startswith('CU90'): - libraries.append('CUDA-9.0') - elif variant.startswith('CU80'): - libraries.append('CUDA-8.0') - elif variant.startswith('CU75'): - libraries.append('CUDA-7.5') - if variant.endswith('MKL'): - libraries.append('MKLDNN') + +from mxnet.runtime import Features +if Features().is_enabled("MKLDNN"): + libraries.append('MKLDNN') short_description += ' This version uses {0}.'.format(' and '.join(libraries)) diff --git a/tools/setup_gpu_build_tools.sh b/tools/setup_gpu_build_tools.sh index 754022a9b516..bd31fc085403 100755 --- a/tools/setup_gpu_build_tools.sh +++ b/tools/setup_gpu_build_tools.sh @@ -29,7 +29,17 @@ VARIANT=$1 DEPS_PATH=$2 >&2 echo "Setting CUDA versions for $VARIANT" -if [[ $VARIANT == cu110* ]]; then +if [[ $VARIANT == cu112* ]]; then + CUDA_VERSION='11.2.67-1' + CUDA_PATCH_VERSION='11.4.1.1026-1' + CUDA_LIBS_VERSION='10.2.3.135-1' + CUDA_SOLVER_VERSION='11.1.0.135-1' + LIBCUDA_VERSION='460.32.03-0ubuntu1' + LIBCUDNN_VERSION='8.1.0.77-1+cuda11.2' + LIBNCCL_VERSION='2.8.3-1+cuda11.2' + LIBCUDART_VERSION='11.2.72-1' + LIBCUFFT_VERSION='10.4.0.135-1' +elif [[ $VARIANT == cu110* ]]; then CUDA_VERSION='11.0.221-1' CUDA_PATCH_VERSION='11.2.0.252-1' CUDA_LIBS_VERSION='10.2.1.245-1' @@ -56,36 +66,6 @@ elif [[ $VARIANT == cu100* ]]; then LIBCUDA_VERSION='410.48-0ubuntu1' LIBCUDNN_VERSION='7.6.5.32-1+cuda10.0' LIBNCCL_VERSION='2.5.6-1+cuda10.0' -elif [[ $VARIANT == cu92* ]]; then - CUDA_VERSION='9.2.148-1' - CUDA_PATCH_VERSION='9.2.148.1-1' - LIBCUDA_VERSION='396.44-0ubuntu1' - LIBCUDNN_VERSION='7.6.5.32-1+cuda9.2' - LIBNCCL_VERSION='2.4.8-1+cuda9.2' -elif [[ $VARIANT == cu91* ]]; then - CUDA_VERSION='9.1.85-1' - CUDA_PATCH_VERSION='9.1.85.3-1' - LIBCUDA_VERSION='396.44-0ubuntu1' - LIBCUDNN_VERSION='7.1.3.16-1+cuda9.1' - LIBNCCL_VERSION='2.2.12-1+cuda9.1' -elif [[ $VARIANT == cu90* ]]; then - CUDA_VERSION='9.0.176-1' - CUDA_PATCH_VERSION='9.0.176.3-1' - LIBCUDA_VERSION='384.145-0ubuntu1' - LIBCUDNN_VERSION='7.6.5.32-1+cuda9.0' - LIBNCCL_VERSION='2.5.6-1+cuda9.0' -elif [[ $VARIANT == cu80* ]]; then - CUDA_VERSION='8.0.61-1' - CUDA_PATCH_VERSION='8.0.61.2-1' - LIBCUDA_VERSION='375.88-0ubuntu1' - LIBCUDNN_VERSION='7.2.1.38-1+cuda8.0' - LIBNCCL_VERSION='2.3.4-1+cuda8.0' -elif [[ $VARIANT == cu75* ]]; then - CUDA_VERSION='7.5-18' - CUDA_PATCH_VERSION='7.5-18' - LIBCUDA_VERSION='375.88-0ubuntu1' - LIBCUDNN_VERSION='6.0.21-1+cuda7.5' - LIBNCCL_VERSION='' fi if [[ $VARIANT == cu* ]]; then CUDA_MAJOR_VERSION=$(echo $CUDA_VERSION | tr '-' '.' | cut -d. -f1,2) @@ -97,7 +77,7 @@ if [[ $VARIANT == cu* ]]; then os_name=$(cat /etc/*release | grep '^ID=' | sed 's/^.*=//g') os_version=$(cat /etc/*release | grep VERSION_ID | sed 's/^.*"\([0-9]*\)\.\([0-9]*\)"/\1\2/g') os_id="${os_name}${os_version}" - if [[ $CUDA_MAJOR_DASH == 9-* ]] || [[ $CUDA_MAJOR_DASH == 10-* ]] || [[ $CUDA_MAJOR_DASH == 11-* ]]; then + if [[ $CUDA_MAJOR_DASH == 9-* ]] || [[ $CUDA_MAJOR_DASH == 10-* ]] || [[ $CUDA_MAJOR_DASH == 11-* ]] ; then os_id="ubuntu1604" fi export PATH=/usr/lib/binutils-2.26/bin/:${PATH}:$DEPS_PATH/usr/local/cuda-$CUDA_MAJOR_VERSION/bin @@ -109,7 +89,33 @@ if [[ $VARIANT == cu* ]]; then fi # list of debs to download from nvidia -if [[ $VARIANT == cu110* ]]; then +if [[ $VARIANT == cu112* ]]; then + cuda_files=( \ + "libcublas-${CUDA_MAJOR_DASH}_${CUDA_PATCH_VERSION}_amd64.deb" \ + "libcublas-dev-${CUDA_MAJOR_DASH}_${CUDA_PATCH_VERSION}_amd64.deb" \ + "cuda-cudart-${CUDA_MAJOR_DASH}_${LIBCUDART_VERSION}_amd64.deb" \ + "cuda-cudart-dev-${CUDA_MAJOR_DASH}_${LIBCUDART_VERSION}_amd64.deb" \ + "libcurand-${CUDA_MAJOR_DASH}_${CUDA_LIBS_VERSION}_amd64.deb" \ + "libcurand-dev-${CUDA_MAJOR_DASH}_${CUDA_LIBS_VERSION}_amd64.deb" \ + "libcufft-${CUDA_MAJOR_DASH}_${LIBCUFFT_VERSION}_amd64.deb" \ + "libcufft-dev-${CUDA_MAJOR_DASH}_${LIBCUFFT_VERSION}_amd64.deb" \ + "cuda-nvrtc-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ + "cuda-nvrtc-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ + "libcusolver-${CUDA_MAJOR_DASH}_${CUDA_SOLVER_VERSION}_amd64.deb" \ + "libcusolver-dev-${CUDA_MAJOR_DASH}_${CUDA_SOLVER_VERSION}_amd64.deb" \ + "cuda-nvcc-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ + "cuda-nvtx-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ + "libcuda1-${LIBCUDA_MAJOR}_${LIBCUDA_VERSION}_amd64.deb" \ + "cuda-nvprof-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ + "nvidia-${LIBCUDA_MAJOR}_${LIBCUDA_VERSION}_amd64.deb" \ + "libcudnn${LIBCUDNN_MAJOR}-dev_${LIBCUDNN_VERSION}_amd64.deb" \ + "libcudnn${LIBCUDNN_MAJOR}_${LIBCUDNN_VERSION}_amd64.deb" \ + ) + ml_files=( \ + "libnccl-dev_${LIBNCCL_VERSION}_amd64.deb" \ + "libnccl2_${LIBNCCL_VERSION}_amd64.deb" \ + ) +elif [[ $VARIANT == cu110* ]]; then cuda_files=( \ "libcublas-${CUDA_MAJOR_DASH}_${CUDA_PATCH_VERSION}_amd64.deb" \ "libcublas-dev-${CUDA_MAJOR_DASH}_${CUDA_PATCH_VERSION}_amd64.deb" \ @@ -131,7 +137,9 @@ if [[ $VARIANT == cu110* ]]; then ) ml_files=( \ "libcudnn${LIBCUDNN_MAJOR}-dev_${LIBCUDNN_VERSION}_amd64.deb" \ + "libcudnn${LIBCUDNN_MAJOR}_${LIBCUDNN_VERSION}_amd64.deb" \ "libnccl-dev_${LIBNCCL_VERSION}_amd64.deb" \ + "libnccl2_${LIBNCCL_VERSION}_amd64.deb" \ ) elif [[ $VARIANT == cu102* ]]; then cuda_files=( \ @@ -156,7 +164,9 @@ elif [[ $VARIANT == cu102* ]]; then ) ml_files=( \ "libcudnn${LIBCUDNN_MAJOR}-dev_${LIBCUDNN_VERSION}_amd64.deb" \ + "libcudnn${LIBCUDNN_MAJOR}_${LIBCUDNN_VERSION}_amd64.deb" \ "libnccl-dev_${LIBNCCL_VERSION}_amd64.deb" \ + "libnccl2_${LIBNCCL_VERSION}_amd64.deb" \ ) elif [[ $VARIANT == cu101* ]]; then cuda_files=( \ @@ -181,7 +191,9 @@ elif [[ $VARIANT == cu101* ]]; then ) ml_files=( \ "libcudnn${LIBCUDNN_MAJOR}-dev_${LIBCUDNN_VERSION}_amd64.deb" \ + "libcudnn${LIBCUDNN_MAJOR}_${LIBCUDNN_VERSION}_amd64.deb" \ "libnccl-dev_${LIBNCCL_VERSION}_amd64.deb" \ + "libnccl2_${LIBNCCL_VERSION}_amd64.deb" \ ) elif [[ $VARIANT == cu100* ]]; then cuda_files=( \ @@ -206,125 +218,9 @@ elif [[ $VARIANT == cu100* ]]; then ) ml_files=( \ "libcudnn${LIBCUDNN_MAJOR}-dev_${LIBCUDNN_VERSION}_amd64.deb" \ + "libcudnn${LIBCUDNN_MAJOR}_${LIBCUDNN_VERSION}_amd64.deb" \ "libnccl-dev_${LIBNCCL_VERSION}_amd64.deb" \ - ) -elif [[ $VARIANT == cu92* ]]; then - cuda_files=( \ - "cuda-core-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cublas-${CUDA_MAJOR_DASH}_${CUDA_PATCH_VERSION}_amd64.deb" \ - "cuda-cublas-dev-${CUDA_MAJOR_DASH}_${CUDA_PATCH_VERSION}_amd64.deb" \ - "cuda-cudart-${CUDA_MAJOR_DASH}_${CUDA_PATCH_VERSION}_amd64.deb" \ - "cuda-cudart-dev-${CUDA_MAJOR_DASH}_${CUDA_PATCH_VERSION}_amd64.deb" \ - "cuda-curand-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-curand-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cufft-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cufft-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-nvrtc-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-nvrtc-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cusolver-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cusolver-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-misc-headers-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-nvcc-${CUDA_MAJOR_DASH}_${CUDA_PATCH_VERSION}_amd64.deb" \ - "cuda-nvtx-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "libcuda1-${LIBCUDA_MAJOR}_${LIBCUDA_VERSION}_amd64.deb" \ - "nvidia-${LIBCUDA_MAJOR}_${LIBCUDA_VERSION}_amd64.deb" \ - ) - ml_files=( \ - "libcudnn${LIBCUDNN_MAJOR}-dev_${LIBCUDNN_VERSION}_amd64.deb" \ - "libnccl-dev_${LIBNCCL_VERSION}_amd64.deb" \ - ) -elif [[ $VARIANT == cu91* ]]; then - cuda_files=( \ - "cuda-core-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cublas-${CUDA_MAJOR_DASH}_${CUDA_PATCH_VERSION}_amd64.deb" \ - "cuda-cublas-dev-${CUDA_MAJOR_DASH}_${CUDA_PATCH_VERSION}_amd64.deb" \ - "cuda-cudart-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cudart-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-curand-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-curand-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cufft-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cufft-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-nvrtc-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-nvrtc-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cusolver-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cusolver-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-misc-headers-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-nvcc-${CUDA_MAJOR_DASH}_9.1.85.2-1_amd64.deb" \ - "cuda-nvtx-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "libcuda1-${LIBCUDA_MAJOR}_${LIBCUDA_VERSION}_amd64.deb" \ - "nvidia-${LIBCUDA_MAJOR}_${LIBCUDA_VERSION}_amd64.deb" \ - ) - ml_files=( \ - "libcudnn${LIBCUDNN_MAJOR}-dev_${LIBCUDNN_VERSION}_amd64.deb" \ - "libnccl-dev_${LIBNCCL_VERSION}_amd64.deb" \ - ) -elif [[ $VARIANT == cu90* ]]; then - cuda_files=( \ - "cuda-core-${CUDA_MAJOR_DASH}_${CUDA_PATCH_VERSION}_amd64.deb" \ - "cuda-cublas-${CUDA_MAJOR_DASH}_9.0.176.4-1_amd64.deb" \ - "cuda-cublas-dev-${CUDA_MAJOR_DASH}_9.0.176.4-1_amd64.deb" \ - "cuda-cudart-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cudart-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-curand-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-curand-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cufft-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cufft-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-nvrtc-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-nvrtc-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cusolver-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cusolver-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-misc-headers-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "libcuda1-${LIBCUDA_MAJOR}_${LIBCUDA_VERSION}_amd64.deb" \ - "nvidia-${LIBCUDA_MAJOR}_${LIBCUDA_VERSION}_amd64.deb" \ - ) - ml_files=( \ - "libcudnn${LIBCUDNN_MAJOR}-dev_${LIBCUDNN_VERSION}_amd64.deb" \ - "libnccl-dev_${LIBNCCL_VERSION}_amd64.deb" \ - ) -elif [[ $VARIANT == cu80* ]]; then - cuda_files=( \ - "cuda-core-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cublas-${CUDA_MAJOR_DASH}_${CUDA_PATCH_VERSION}_amd64.deb" \ - "cuda-cublas-dev-${CUDA_MAJOR_DASH}_${CUDA_PATCH_VERSION}_amd64.deb" \ - "cuda-cudart-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cudart-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-curand-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-curand-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cufft-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cufft-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-nvrtc-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-nvrtc-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cusolver-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cusolver-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-misc-headers-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "libcuda1-${LIBCUDA_MAJOR}_${LIBCUDA_VERSION}_amd64.deb" \ - "nvidia-${LIBCUDA_MAJOR}_${LIBCUDA_VERSION}_amd64.deb" \ - ) - ml_files=( \ - "libcudnn${LIBCUDNN_MAJOR}-dev_${LIBCUDNN_VERSION}_amd64.deb" \ - "libnccl-dev_${LIBNCCL_VERSION}_amd64.deb" \ - ) -elif [[ $VARIANT == cu75* ]]; then - cuda_files=( \ - "cuda-core-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cublas-${CUDA_MAJOR_DASH}_${CUDA_PATCH_VERSION}_amd64.deb" \ - "cuda-cublas-dev-${CUDA_MAJOR_DASH}_${CUDA_PATCH_VERSION}_amd64.deb" \ - "cuda-cudart-${CUDA_MAJOR_DASH}_${CUDA_PATCH_VERSION}_amd64.deb" \ - "cuda-cudart-dev-${CUDA_MAJOR_DASH}_${CUDA_PATCH_VERSION}_amd64.deb" \ - "cuda-curand-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-curand-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cufft-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cufft-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-nvrtc-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-nvrtc-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cusolver-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-cusolver-dev-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "cuda-misc-headers-${CUDA_MAJOR_DASH}_${CUDA_VERSION}_amd64.deb" \ - "libcuda1-${LIBCUDA_MAJOR}_${LIBCUDA_VERSION}_amd64.deb" \ - "nvidia-${LIBCUDA_MAJOR}_${LIBCUDA_VERSION}_amd64.deb" \ - ) - ml_files=( \ - "libcudnn${LIBCUDNN_MAJOR}-dev_${LIBCUDNN_VERSION}_amd64.deb" \ + "libnccl2_${LIBNCCL_VERSION}_amd64.deb" \ ) fi @@ -342,7 +238,12 @@ if [[ ! -d $DEPS_PATH/usr/local/cuda-${CUDA_MAJOR_VERSION} ]]; then for item in ${ml_files[*]} do echo "Installing $item" - curl -sL "http://developer.download.nvidia.com/compute/machine-learning/repos/${os_id}/x86_64/${item}" -o package.deb + if [[ $item == libnccl* ]] && [[ $VARIANT == cu112* ]] ; then + echo "variant ${VARIANT} and installing ${item}" + curl -sL "http://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/${item}" -o package.deb + else + curl -sL "http://developer.download.nvidia.com/compute/machine-learning/repos/${os_id}/x86_64/${item}" -o package.deb + fi dpkg -X package.deb ${prefix} rm package.deb done @@ -355,9 +256,7 @@ if [[ ! -d $DEPS_PATH/usr/local/cuda-${CUDA_MAJOR_VERSION} ]]; then done fi cp -f ${prefix}/usr/include/x86_64-linux-gnu/cudnn_v${LIBCUDNN_MAJOR}.h ${prefix}/include/cudnn.h - ln -sf libcudnn_static_v${LIBCUDNN_MAJOR}.a ${prefix}/usr/lib/x86_64-linux-gnu/libcudnn.a - cp -f ${prefix}/usr/local/cuda-${CUDA_MAJOR_VERSION}/lib64/*.a ${prefix}/lib/ + ln -sf ${prefix}/usr/lib/x86_64-linux-gnu/libcudnn.so.${LIBCUDNN_MAJOR} ${prefix}/lib/libcudnn.so cp -f ${prefix}/usr/include/nccl.h ${prefix}/include/nccl.h - ln -sf libnccl_static.a ${prefix}/usr/lib/x86_64-linux-gnu/libnccl.a fi diff --git a/tools/staticbuild/README.md b/tools/staticbuild/README.md index e21abdfa0b3e..2ca892331a74 100644 --- a/tools/staticbuild/README.md +++ b/tools/staticbuild/README.md @@ -25,9 +25,9 @@ automatically identifing the system version, number of cores, and all environment variable settings. Here are examples you can run with this script: ``` -tools/staticbuild/build.sh cu102 +tools/staticbuild/build.sh cu112 ``` -This would build the mxnet package based on CUDA 10.2. Currently, we support variants cpu, native, cu92, cu100, cu101, cu102 and cu110. All of these variants expect native have MKL-DNN backend enabled. +This would build the mxnet package based on CUDA 11.2. Currently, we support variants cpu, native, cu100, cu101, cu102, cu110, and cu112. All of these variants expect native have MKL-DNN backend enabled. ``` tools/staticbuild/build.sh cpu