diff options
author | V3n3RiX <venerix@koprulu.sector> | 2024-06-07 00:08:01 +0100 |
---|---|---|
committer | V3n3RiX <venerix@koprulu.sector> | 2024-06-07 00:08:01 +0100 |
commit | 941687623283212199cbe6e36f7d61b89d6222b2 (patch) | |
tree | ba7fb5198fd46b0b501a9c4362be2c26738408ad /sci-libs/caffe2 | |
parent | 44cec61076bf2bbb3cea1fc89943dd6818a5acc5 (diff) |
gentoo auto-resync : 07:06:2024 - 00:08:01
Diffstat (limited to 'sci-libs/caffe2')
-rw-r--r-- | sci-libs/caffe2/Manifest | 2 | ||||
-rw-r--r-- | sci-libs/caffe2/caffe2-2.3.1.ebuild | 274 |
2 files changed, 276 insertions, 0 deletions
diff --git a/sci-libs/caffe2/Manifest b/sci-libs/caffe2/Manifest index 09293913770e..460740d2eb1a 100644 --- a/sci-libs/caffe2/Manifest +++ b/sci-libs/caffe2/Manifest @@ -13,6 +13,8 @@ AUX caffe2-2.3.0-cudnn_include_fix.patch 587 BLAKE2B 8ffeb9080ee77b953e7a77c9bea AUX caffe2-2.3.0-rocm-fix-std-cpp17.patch 3378 BLAKE2B 9e88fa1bf68c397c8122ea5b3504a22b3f6ef92c77dad8bd84ee03b4f75792b0e1281d8b1aa981ad1bf65060179fa08ef14e776e82abdec9147dfbb3bf37a7ae SHA512 7797a140abf736f2a4628cd727cf0c58ed39c9764b9ce3b67d17fc0c9b9965e647266c815e5322f96f807680120e25ccdbbc66b66c7c6cf84edb811330ad452c DIST pytorch-2.2.2.tar.gz 116367503 BLAKE2B 0be22f2ec4b9aac6f5e976664cae01facf07929a32565cd57d7cc5b2d9888e9ae71ca301853752fe8f31d174d04c9974eb9ed2f3d452360a50ccf024f200726a SHA512 7990e0f9484038c3458c0bda2c863bf2b19e56edab81fc5938c6e0f08b17558287f853bb67350e8cca8f42bec0f1d4ba0e94e50a145db8da44bdd4bd703d91d0 DIST pytorch-2.3.0.tar.gz 117029829 BLAKE2B 8f9c0d71ee0a9219b495eddccdcc65107f7ad537c43c68100b229f3d27b0e6c01ccb1659c7fffc356a48d80f2adc0a10361305dc8f1df20446de837d380f89f6 SHA512 67f7e9a096c3ffb952206ebf9105bedebb68c24ad82456083adf1d1d210437fcaa9dd52b68484cfc97d408c9eebc9541c76868c34a7c9982494dc3f424cfb07c +DIST pytorch-2.3.1.tar.gz 117035696 BLAKE2B d419d7fa1342f1fb317ffce09ec9dc1447414627cc83d36578fe60f68c283c620b2b4d49f414cd206d537b90b16432a06cd1941662720db05d5e2b6c493325f5 SHA512 e1bcae44f9939fc7ccb1360a9b1970d92426f25e5de73e36964df3dd15ad5d8d9f5bd2f9a7dda6b8f64e2bba3674005bd869f542489cc442ad0125a02676f587 EBUILD caffe2-2.2.2-r1.ebuild 7452 BLAKE2B 6b4c14a8304ef36349f6442616bb1e2a9563596293ea507ffd1d9083fc01c251dd237db3babbdc9841273b7c31b8ac4b420e3047e6ffd128763ca9afc4b6aa16 SHA512 cfa525d40f3dfbcd4dcf7e31cb772dcd809d528981925b50239961e91ae2147723d261b71f433e5a22e73f59b0bd156a97d84b385d1d5bfffd51db9fc6db2b57 EBUILD caffe2-2.3.0-r3.ebuild 7542 BLAKE2B 6b230db4d2a033b5078e8c61e40955a0981aa3b6b06734145e4bec8f78719329a3825218045a8f57efce9bfddad7b252ee4447d82cc0637db8656d5f8178b9cc SHA512 d3861111081d26a883790e7a5097ddf77ef2e491eb4471a3cc95d25e659ad8c283c22bda5674cb09d575768da5b6a401ea5347febd4f194dbedd4196d1ac628d +EBUILD caffe2-2.3.1.ebuild 7542 BLAKE2B 6b230db4d2a033b5078e8c61e40955a0981aa3b6b06734145e4bec8f78719329a3825218045a8f57efce9bfddad7b252ee4447d82cc0637db8656d5f8178b9cc SHA512 d3861111081d26a883790e7a5097ddf77ef2e491eb4471a3cc95d25e659ad8c283c22bda5674cb09d575768da5b6a401ea5347febd4f194dbedd4196d1ac628d MISC metadata.xml 1225 BLAKE2B ab7fb0bf8b2d37ddaa1a9ecc815eb094e85465d20d3a30af081b42e0b60ade9858d0053b101ba0e7750a90cb48b5b79db9bdc2729bf66d0420732489da62fe54 SHA512 dfb58597fb4bcdd7df0fcc3f2514518e118e8fc9b1cd24868aab60c32a62ff419b8b72a7c294925eff4c8871cc8df606af7fa60bfa99901091d8195101ee1153 diff --git a/sci-libs/caffe2/caffe2-2.3.1.ebuild b/sci-libs/caffe2/caffe2-2.3.1.ebuild new file mode 100644 index 000000000000..c01e904d8eb0 --- /dev/null +++ b/sci-libs/caffe2/caffe2-2.3.1.ebuild @@ -0,0 +1,274 @@ +# Copyright 2022-2024 Gentoo Authors +# Distributed under the terms of the GNU General Public License v2 + +EAPI=8 + +PYTHON_COMPAT=( python3_{10..12} ) +ROCM_VERSION=5.7 +inherit python-single-r1 cmake cuda flag-o-matic prefix rocm + +MYPN=pytorch +MYP=${MYPN}-${PV} + +DESCRIPTION="A deep learning framework" +HOMEPAGE="https://pytorch.org/" +SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz + -> ${MYP}.tar.gz" + +S="${WORKDIR}"/${MYP} + +LICENSE="BSD" +SLOT="0" +KEYWORDS="~amd64" +IUSE="cuda distributed fbgemm ffmpeg flash gloo mkl mpi nnpack +numpy onednn openblas opencl opencv openmp qnnpack rocm xnnpack" +RESTRICT="test" +REQUIRED_USE=" + ${PYTHON_REQUIRED_USE} + ffmpeg? ( opencv ) + mpi? ( distributed ) + gloo? ( distributed ) + ?? ( cuda rocm ) + rocm? ( + || ( ${ROCM_REQUIRED_USE} ) + !flash + ) +" + +# CUDA 12 not supported yet: https://github.com/pytorch/pytorch/issues/91122 +RDEPEND=" + ${PYTHON_DEPS} + dev-cpp/gflags:= + >=dev-cpp/glog-0.5.0 + dev-libs/cpuinfo + dev-libs/libfmt + dev-libs/protobuf:= + dev-libs/pthreadpool + dev-libs/sleef + virtual/lapack + sci-libs/onnx + sci-libs/foxi + cuda? ( + dev-libs/cudnn + >=dev-libs/cudnn-frontend-1.0.3:0/8 + <dev-util/nvidia-cuda-toolkit-12.4.0:=[profiler] + ) + fbgemm? ( >=dev-libs/FBGEMM-2023.12.01 ) + ffmpeg? ( media-video/ffmpeg:= ) + gloo? ( sci-libs/gloo[cuda?] ) + mpi? ( virtual/mpi ) + nnpack? ( sci-libs/NNPACK ) + numpy? ( $(python_gen_cond_dep ' + dev-python/numpy[${PYTHON_USEDEP}] + ') ) + onednn? ( dev-libs/oneDNN ) + opencl? ( virtual/opencl ) + opencv? ( media-libs/opencv:= ) + qnnpack? ( sci-libs/QNNPACK ) + rocm? ( + >=dev-util/hip-5.7 + >=dev-libs/rccl-5.7[${ROCM_USEDEP}] + >=sci-libs/rocThrust-5.7[${ROCM_USEDEP}] + >=sci-libs/rocPRIM-5.7[${ROCM_USEDEP}] + >=sci-libs/hipBLAS-5.7[${ROCM_USEDEP}] + >=sci-libs/hipFFT-5.7[${ROCM_USEDEP}] + >=sci-libs/hipSPARSE-5.7[${ROCM_USEDEP}] + >=sci-libs/hipRAND-5.7[${ROCM_USEDEP}] + >=sci-libs/hipCUB-5.7[${ROCM_USEDEP}] + >=sci-libs/hipSOLVER-5.7[${ROCM_USEDEP}] + >=sci-libs/miopen-5.7[${ROCM_USEDEP}] + >=dev-util/roctracer-5.7[${ROCM_USEDEP}] + ) + distributed? ( sci-libs/tensorpipe[cuda?] ) + xnnpack? ( >=sci-libs/XNNPACK-2022.12.22 ) + mkl? ( sci-libs/mkl ) + openblas? ( sci-libs/openblas ) +" +DEPEND=" + ${RDEPEND} + cuda? ( >=dev-libs/cutlass-3.4.1 ) + onednn? ( sci-libs/ideep ) + dev-libs/psimd + dev-libs/FP16 + dev-libs/FXdiv + dev-libs/pocketfft + dev-libs/flatbuffers + >=sci-libs/kineto-0.4.0_p20231031 + $(python_gen_cond_dep ' + dev-python/pyyaml[${PYTHON_USEDEP}] + dev-python/pybind11[${PYTHON_USEDEP}] + dev-python/typing-extensions[${PYTHON_USEDEP}] + ') +" + +PATCHES=( + "${FILESDIR}"/${PN}-2.2.1-gentoo.patch + "${FILESDIR}"/${PN}-1.13.0-install-dirs.patch + "${FILESDIR}"/${PN}-1.12.0-glog-0.6.0.patch + "${FILESDIR}"/${PN}-1.13.1-tensorpipe.patch + "${FILESDIR}"/${P}-cudnn_include_fix.patch + "${FILESDIR}"/${PN}-2.1.2-fix-rpath.patch + "${FILESDIR}"/${PN}-2.1.2-fix-openmp-link.patch + "${FILESDIR}"/${P}-rocm-fix-std-cpp17.patch + "${FILESDIR}"/${PN}-2.2.2-musl.patch + "${FILESDIR}"/${P}-CMakeFix.patch +) + +src_prepare() { + filter-lto #bug 862672 + sed -i \ + -e "/third_party\/gloo/d" \ + cmake/Dependencies.cmake \ + || die + cmake_src_prepare + pushd torch/csrc/jit/serialization || die + flatc --cpp --gen-mutable --scoped-enums mobile_bytecode.fbs || die + popd + # prefixify the hardcoded paths, after all patches are applied + hprefixify \ + aten/CMakeLists.txt \ + caffe2/CMakeLists.txt \ + cmake/Metal.cmake \ + cmake/Modules/*.cmake \ + cmake/Modules_CUDA_fix/FindCUDNN.cmake \ + cmake/Modules_CUDA_fix/upstream/FindCUDA/make2cmake.cmake \ + cmake/Modules_CUDA_fix/upstream/FindPackageHandleStandardArgs.cmake \ + cmake/public/LoadHIP.cmake \ + cmake/public/cuda.cmake \ + cmake/Dependencies.cmake \ + torch/CMakeLists.txt \ + CMakeLists.txt + + if use rocm; then + sed -e "s:/opt/rocm:/usr:" \ + -e "s:lib/cmake:$(get_libdir)/cmake:g" \ + -e "s/HIP 1.0/HIP 1.0 REQUIRED/" \ + -i cmake/public/LoadHIP.cmake || die + + ebegin "HIPifying cuda sources" + ${EPYTHON} tools/amd_build/build_amd.py || die + eend $? + fi +} + +src_configure() { + if use cuda && [[ -z ${TORCH_CUDA_ARCH_LIST} ]]; then + ewarn "WARNING: caffe2 is being built with its default CUDA compute capabilities: 3.5 and 7.0." + ewarn "These may not be optimal for your GPU." + ewarn "" + ewarn "To configure caffe2 with the CUDA compute capability that is optimal for your GPU," + ewarn "set TORCH_CUDA_ARCH_LIST in your make.conf, and re-emerge caffe2." + ewarn "For example, to use CUDA capability 7.5 & 3.5, add: TORCH_CUDA_ARCH_LIST=7.5 3.5" + ewarn "For a Maxwell model GPU, an example value would be: TORCH_CUDA_ARCH_LIST=Maxwell" + ewarn "" + ewarn "You can look up your GPU's CUDA compute capability at https://developer.nvidia.com/cuda-gpus" + ewarn "or by running /opt/cuda/extras/demo_suite/deviceQuery | grep 'CUDA Capability'" + fi + + local mycmakeargs=( + -DBUILD_CUSTOM_PROTOBUF=OFF + -DBUILD_SHARED_LIBS=ON + + -DUSE_CCACHE=OFF + -DUSE_CUDA=$(usex cuda) + -DUSE_DISTRIBUTED=$(usex distributed) + -DUSE_MPI=$(usex mpi) + -DUSE_FAKELOWP=OFF + -DUSE_FBGEMM=$(usex fbgemm) + -DUSE_FFMPEG=$(usex ffmpeg) + -DUSE_FLASH_ATTENTION=$(usex flash) + -DUSE_GFLAGS=ON + -DUSE_GLOG=ON + -DUSE_GLOO=$(usex gloo) + -DUSE_KINETO=OFF # TODO + -DUSE_LEVELDB=OFF + -DUSE_MAGMA=OFF # TODO: In GURU as sci-libs/magma + -DUSE_MKLDNN=$(usex onednn) + -DUSE_NNPACK=$(usex nnpack) + -DUSE_QNNPACK=$(usex qnnpack) + -DUSE_XNNPACK=$(usex xnnpack) + -DUSE_SYSTEM_XNNPACK=$(usex xnnpack) + -DUSE_TENSORPIPE=$(usex distributed) + -DUSE_PYTORCH_QNNPACK=OFF + -DUSE_NUMPY=$(usex numpy) + -DUSE_OPENCL=$(usex opencl) + -DUSE_OPENCV=$(usex opencv) + -DUSE_OPENMP=$(usex openmp) + -DUSE_ROCM=$(usex rocm) + -DUSE_SYSTEM_CPUINFO=ON + -DUSE_SYSTEM_PYBIND11=ON + -DUSE_UCC=OFF + -DUSE_VALGRIND=OFF + -DPYBIND11_PYTHON_VERSION="${EPYTHON#python}" + -DPYTHON_EXECUTABLE="${PYTHON}" + -DUSE_ITT=OFF + -DUSE_SYSTEM_PTHREADPOOL=ON + -DUSE_SYSTEM_FXDIV=ON + -DUSE_SYSTEM_FP16=ON + -DUSE_SYSTEM_GLOO=ON + -DUSE_SYSTEM_ONNX=ON + -DUSE_SYSTEM_SLEEF=ON + -DUSE_METAL=OFF + + -Wno-dev + -DTORCH_INSTALL_LIB_DIR="${EPREFIX}"/usr/$(get_libdir) + -DLIBSHM_INSTALL_LIB_SUBDIR="${EPREFIX}"/usr/$(get_libdir) + ) + + if use mkl; then + mycmakeargs+=(-DBLAS=MKL) + elif use openblas; then + mycmakeargs+=(-DBLAS=OpenBLAS) + else + mycmakeargs+=(-DBLAS=Generic -DBLAS_LIBRARIES=) + fi + + if use cuda; then + addpredict "/dev/nvidiactl" # bug 867706 + addpredict "/dev/char" + addpredict "/proc/self/task" # bug 926116 + + mycmakeargs+=( + -DUSE_CUDNN=ON + -DTORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5 7.0}" + -DUSE_NCCL=OFF # TODO: NVIDIA Collective Communication Library + -DCMAKE_CUDA_FLAGS="$(cuda_gccdir -f | tr -d \")" + ) + elif use rocm; then + export PYTORCH_ROCM_ARCH="$(get_amdgpu_flags)" + + mycmakeargs+=( + -DUSE_NCCL=ON + -DUSE_SYSTEM_NCCL=ON + ) + fi + + if use onednn; then + mycmakeargs+=( + -DUSE_MKLDNN=ON + -DMKLDNN_FOUND=ON + -DMKLDNN_LIBRARIES=dnnl + -DMKLDNN_INCLUDE_DIR="${ESYSROOT}/usr/include/oneapi/dnnl" + ) + fi + + cmake_src_configure + + # do not rerun cmake and the build process in src_install + sed '/RERUN/,+1d' -i "${BUILD_DIR}"/build.ninja || die +} + +src_install() { + cmake_src_install + + insinto "/var/lib/${PN}" + doins "${BUILD_DIR}"/CMakeCache.txt + + rm -rf python + mkdir -p python/torch/include || die + mv "${ED}"/usr/lib/python*/site-packages/caffe2 python/ || die + cp torch/version.py python/torch/ || die + python_domodule python/caffe2 + python_domodule python/torch + ln -s ../../../../../include/torch \ + "${D}$(python_get_sitedir)"/torch/include/torch || die # bug 923269 +} |