summaryrefslogtreecommitdiff
path: root/sci-libs
diff options
context:
space:
mode:
Diffstat (limited to 'sci-libs')
-rw-r--r--sci-libs/Manifest.gzbin42988 -> 42983 bytes
-rw-r--r--sci-libs/ViSP/Manifest2
-rw-r--r--sci-libs/ViSP/ViSP-3.5.0-r1.ebuild (renamed from sci-libs/ViSP/ViSP-3.5.0.ebuild)4
-rw-r--r--sci-libs/caffe2/Manifest32
-rw-r--r--sci-libs/caffe2/caffe2-2.2.2-r1.ebuild23
-rw-r--r--sci-libs/caffe2/caffe2-2.3.0-r3.ebuild33
-rw-r--r--sci-libs/caffe2/caffe2-2.3.1.ebuild33
-rw-r--r--sci-libs/caffe2/caffe2-2.4.0.ebuild27
-rw-r--r--sci-libs/caffe2/files/caffe2-1.12.0-glog-0.6.0.patch29
-rw-r--r--sci-libs/caffe2/files/caffe2-1.13.0-install-dirs.patch121
-rw-r--r--sci-libs/caffe2/files/caffe2-1.13.1-tensorpipe.patch10
-rw-r--r--sci-libs/caffe2/files/caffe2-2.0.0-cudnn_include_fix.patch12
-rw-r--r--sci-libs/caffe2/files/caffe2-2.0.0-gcc13.patch41
-rw-r--r--sci-libs/caffe2/files/caffe2-2.1.2-fix-openmp-link.patch15
-rw-r--r--sci-libs/caffe2/files/caffe2-2.1.2-fix-rpath.patch12
-rw-r--r--sci-libs/caffe2/files/caffe2-2.1.2-rocm-fix-std-cpp17.patch68
-rw-r--r--sci-libs/caffe2/files/caffe2-2.2.1-gentoo.patch195
-rw-r--r--sci-libs/caffe2/files/caffe2-2.2.2-musl.patch13
-rw-r--r--sci-libs/caffe2/files/caffe2-2.3.0-CMakeFix.patch11
-rw-r--r--sci-libs/caffe2/files/caffe2-2.3.0-cudnn_include_fix.patch11
-rw-r--r--sci-libs/caffe2/files/caffe2-2.3.0-exclude-aotriton.patch35
-rw-r--r--sci-libs/caffe2/files/caffe2-2.3.0-fix-gcc-clang-abi-compat.patch17
-rw-r--r--sci-libs/caffe2/files/caffe2-2.3.0-fix-libcpp.patch24
-rw-r--r--sci-libs/caffe2/files/caffe2-2.3.0-fix-rocm-gcc14-clamp.patch18
-rw-r--r--sci-libs/caffe2/files/caffe2-2.3.0-optional-hipblaslt.patch235
-rw-r--r--sci-libs/caffe2/files/caffe2-2.3.0-rocm-fix-std-cpp17.patch68
-rw-r--r--sci-libs/caffe2/files/caffe2-2.4.0-exclude-aotriton.patch65
-rw-r--r--sci-libs/caffe2/files/caffe2-2.4.0-fix-openmp-link.patch14
-rw-r--r--sci-libs/caffe2/files/caffe2-2.4.0-gentoo.patch211
-rw-r--r--sci-libs/caffe2/files/caffe2-2.4.0-install-dirs.patch70
-rw-r--r--sci-libs/caffe2/files/caffe2-2.4.0-rocm-fix-std-cpp17.patch50
31 files changed, 70 insertions, 1429 deletions
diff --git a/sci-libs/Manifest.gz b/sci-libs/Manifest.gz
index f9de58feb5ff..0204b5d7e6b4 100644
--- a/sci-libs/Manifest.gz
+++ b/sci-libs/Manifest.gz
Binary files differ
diff --git a/sci-libs/ViSP/Manifest b/sci-libs/ViSP/Manifest
index d72cc9b2d475..e57bd4f5ff84 100644
--- a/sci-libs/ViSP/Manifest
+++ b/sci-libs/ViSP/Manifest
@@ -1,5 +1,5 @@
AUX ViSP-3.0.1-opencv.patch 403 BLAKE2B 0bc3da5cd6e73cdd47dac7b525f4f3fbe6e683aa79f9185a3e92cecaf36fdc30c018fe93d1e9f7ac69890b7807e16663e21e65025184b950a8f384c1ec2748d8 SHA512 3a6a84ac6ad2bce4fdf7228b4a296589239c2b4b019b3ad8e4654af71b00eca6102444f38c32604013ec62d60d067deb2902ea8cffffff6d97e0762602a013f4
AUX ViSP-3.2.0-ocv.patch 1065 BLAKE2B 830cecf11df2d9c5c909ab5e2b5e7430e953355feeea77e40b77d53f24d2f1799a529c8dfda061ca6a276a1e150d10fee475f63e798511ce0296e8fde1fd75a8 SHA512 35fbb672da815fd404bd483800bf3f27b4f62d41b1173b10292a964d23129f4ec8afd530f8de913500214633a425a1a7567341b6da317b9e74720cf9414f3eff
DIST visp-3.5.0.tar.gz 52005297 BLAKE2B a429c07c8d80e8790bdd72bf27b777c79784173cc8be2539656c1cfbc111e35b3f80207a866fe3aaca536033af9e9d5006bf7e10a56cef5655aa9615a674c5ac SHA512 5a968a78c8ed2c48da71fa3def482b84fd73961f012e82ea1d7dd9b7b14336be5596ac9fc1bdf16414793d2399431bbc4306710d31b62946e6b2c03f692751b8
-EBUILD ViSP-3.5.0.ebuild 2879 BLAKE2B 5c9eb22ec47648131fa455005669eb4ed1358effa398b33c73a88b48fca7f7fb2f5638e4a89518480d49f552a0fb43d777899a1fb8832f768fd4a1dff95b4d87 SHA512 ee6ae9e188f1f7fa60cef6048da6eeff7c70b931cba47fbbe49068c7dbbccdc0d5dfc4c3cfabc593d950ded8ca5e6ec531798ed5f09095954fa180f4c2ea5a57
+EBUILD ViSP-3.5.0-r1.ebuild 2972 BLAKE2B 58cf789eac70ba221b6c9acdec74278aebd12eb3e78be19d0961f17a8e09d63539644123bdfe7efed46cb835574e2fe00bbb18857e70c321a6bccad3f07bf2e4 SHA512 8ae7dfda52eaa227185382e5a0808c19a7334ff18a11eb3ef3cadadf583d6c9853c71cf87a03d228268864b97947e161c423bb286f59f72403515fe44b4b1aca
MISC metadata.xml 1384 BLAKE2B 5146b30499cd534556ef2b6f9048535296f38e5ae2dd2dca9670799de61cf666b74ec2ed602a35c56df839317221200129ef71fcf6c6026fe7d74a14ac70dd9c SHA512 4dbd648e138eea6bdb7a97d2eb32f1df909afa7a4922b4ffff356e511a4d73fa0095b65c3b70100c81436b1f9636f9190fedc98ced52c13db0f1d86ca86d9f66
diff --git a/sci-libs/ViSP/ViSP-3.5.0.ebuild b/sci-libs/ViSP/ViSP-3.5.0-r1.ebuild
index 5a7052834f6b..a8833954122d 100644
--- a/sci-libs/ViSP/ViSP-3.5.0.ebuild
+++ b/sci-libs/ViSP/ViSP-3.5.0-r1.ebuild
@@ -21,6 +21,7 @@ REQUIRED_USE="motif? ( coin )"
RESTRICT="!test? ( test )"
COMMON_DEPEND="
+ virtual/lapack
coin? (
>=media-libs/coin-4
virtual/opengl
@@ -76,6 +77,9 @@ src_configure() {
"-DUSE_GSL=$(usex gsl ON OFF)"
"-DUSE_JPEG=$(usex jpeg ON OFF)"
"-DUSE_PNG=$(usex png ON OFF)"
+ "-DUSE_LAPACK=ON"
+ # disable using the builtin copy
+ "-DWITH_LAPACK=OFF"
"-DUSE_LIBUSB_1=$(usex usb ON OFF)"
"-DUSE_OGRE=$(usex ogre ON OFF)"
"-DUSE_OIS=$(usex ogre ON OFF)"
diff --git a/sci-libs/caffe2/Manifest b/sci-libs/caffe2/Manifest
index cee0d49092c1..f94847f10d56 100644
--- a/sci-libs/caffe2/Manifest
+++ b/sci-libs/caffe2/Manifest
@@ -1,32 +1,10 @@
-AUX caffe2-1.12.0-glog-0.6.0.patch 959 BLAKE2B eb0c18a34a2c5a1b68a420b9001eac5c8ff65ef9ca9ea1e5ad93ecef3cbaa595808105bbf6c8d3dcb9047559d23bc0398cda5922fb41e29a7588644c90c90fd8 SHA512 5873f11930ae5947c7dfbb19d0377d5535e395fcc1ab213e3c65c77b2e3bd911efc11cde2ca3e7134e9e7798ca7a483910113f331c0a23b73a5546aaaded40c5
-AUX caffe2-1.13.0-install-dirs.patch 5737 BLAKE2B 10b10e3a29976344c7596e1c4e7a604856c66d91cb7518308422f7ffac82ffe183dc81becab27866814c06e321e97458ab6a6908aa01379299c7fdf8fdccf153 SHA512 1e2e4f646cec24b5373872865522a909d1c154623e232c06e3da361e4d29b8d7520c446f53fbf42aab5f00bab05d1f278ce9425e506a9a5ac73cbb10570bd325
-AUX caffe2-1.13.1-tensorpipe.patch 374 BLAKE2B 46d45aae88f99c213f0da039bad5aa0c470a6c505ed1d28088ecb8dd445547e52a6d2c087c52c15b20da31808cd9213c1e672880e9bf4afe9c487358b4d4cd9d SHA512 d7315408b3ad4630715c18fff853b07e589ae78f5fcc8eef7f8791202fca459a3b23481856a50a5d432aec26482eb33adf37ba0a37d45f719ceb29081449345d
-AUX caffe2-2.0.0-cudnn_include_fix.patch 526 BLAKE2B 63959783a94bfa2b02f78e36e754ef6c54d9f3dfb0bfdedd0fb9ef6553204b54b2dfb1676312689bbc3eace897d5f0441faf9af376f27c3c831c3ffcece31e5d SHA512 ff2ba61a4c69620bb1078aa7235b015a6165bc0df65e5e4ec04d4f3515f80dc865cc904d279fefbc90784b571f41d82441ce8045a27947ae7d3d52505e813b3c
-AUX caffe2-2.0.0-gcc13.patch 1203 BLAKE2B df55608c6019802da32809cb210b60cf0debf38e8d7a20135e92465be28d3537f2d0b52c67a61b264e241539b64fa02aa99eefb856686a9f9e3c45291b8d6a70 SHA512 59e933c60abef3ac97a1e3b2cd7a9935f9fdde4bc3ed1c2c53c729af6d7ed735ffd38cbdac08d32c668cd2751f3f871cc833883efd19732c04a0f67fbb49c41d
-AUX caffe2-2.1.2-fix-openmp-link.patch 483 BLAKE2B dfe9ae0978b71449486d12f769c3c962001c138bc53b0605ed6f2215fe732356da90c5084488bc4d9f54c88aab2a426b8c935543f7ba0890783bb58da30a5cb4 SHA512 a9cee8ae2a8477a21e92bd4ef286af68efd90283593db7ed4641a826b19b4266bcab9c131c93be2b6c2f13d5cd545c766612791cf2e23cff9a731a03f3ad7648
-AUX caffe2-2.1.2-fix-rpath.patch 510 BLAKE2B 6585e8089acc6e7fe69713ad04a64263ab2e9d2d5cf6d3a844dbbc917383e4b7a0688ebdfb3142e3c42108c8c14a6435f48ae4ce3adee8db338286d60ed7b503 SHA512 ede770559b487fff736aded0cf4b077d8308c2b85c5cd139150b04bcb8b72a0d78af6a2b74fbb153b75acff71df0832d8f139231d1c18558f5b5479af348e6a0
-AUX caffe2-2.1.2-rocm-fix-std-cpp17.patch 3485 BLAKE2B 38f07fa876e1b628f1709dc35669ee62bbd5b00e23880800a0c430c72365799da86213ba76d344e9b46559ec1a4c98e1be6b46466bee8ebbf484976c597bb5ba SHA512 8dffee073c7744c3eba62f22b5f11d1f5db980fd7e9ac593ae44b489fe1eded532739db397f4599b8cda1b75c26e493dff81c2862056f62823570ab94a8f3389
-AUX caffe2-2.2.1-gentoo.patch 6765 BLAKE2B 924338e5823825d18220c33e9168f96b5987350cf47ad26824c660dfe953f54c415a4a709d9d7bab6901687f41f8477c0615ab76773e0e689ecf91e9a7f2ef64 SHA512 e9a20bc83a1c0349927205fc3675b1ad832cb86acca3a8d2f68a3dd33f2c1fa39fb53616b603023dce217d0a29fef34e6abb6315201690a8568d2419bcb9d895
-AUX caffe2-2.2.2-musl.patch 363 BLAKE2B 9c62b8d93b430cec9d0e739802d5938933109369c003eda74fe1242d5bb61c50c70dd1cc52aa56d6b79c08f55328b991c8fafce60fa94d9377e84ddf14ab8d4a SHA512 2568001959399d76ce8a45e08dd54c0b297fc02a856b4d8a4003aa4dc12f5ded3e821022214df4997d4bd9de7515e0d2ebe2e465dca574b773155b8f9f5080f8
-AUX caffe2-2.3.0-CMakeFix.patch 519 BLAKE2B 0249b7c31cea647a0c82d94cd99fac3415cbd357aacde4d0cc0e5b936c27dc96afcbecdb9ae55ee464080603c9c71f6c995166ae7045e19e9d874dcf688b4a0a SHA512 a8c5200abf4f059a2c43a2fa0b2a639dde60cbf48f6bb461c133e02b7986e8bc9bb98c4a1c51478308ac13b886691c89debf0b762fc821cd52500f5648c3bd39
-AUX caffe2-2.3.0-cudnn_include_fix.patch 587 BLAKE2B 8ffeb9080ee77b953e7a77c9bea9af8c078adf147c314e07cf40d6f9ce1c988ac201e282ffd67a29703ced5885329d44be013c54cf3ba870c009aed40e65fefc SHA512 b37427e35d8147e603331eb344a3542ed31d0b133df3c7ce10ebecec93b1d09d040f77c33f23f70e9835db10fa209c0318b89b70b3b91263655d160ed737d6f3
-AUX caffe2-2.3.0-exclude-aotriton.patch 1380 BLAKE2B 603b6be7d093726a4ebb2f23b3413f6bdc360232614fddb2a1bf73d3f311d5476127340b084796213f1091eed7b8733dbc129df213f7e0eb9d1bb04e0541995a SHA512 173297bba5287ba7ffd0e6f61364f747fcf98a66990b69f1d234e41dc23ae4f645d4c00759a92bf624a691a0d16cbd1e52c45aeace1c19c06ff7f8e676d67df7
-AUX caffe2-2.3.0-fix-gcc-clang-abi-compat.patch 822 BLAKE2B ca2ad94c4293e120fa93cc535f295dee07da9cd3d98c10af57688a04daede8bb2e17dc7f91e88c937215866e1c65ea1f0a6c20fac02c00626a6ad3d2255089b7 SHA512 149ae161f0224d3500ae928a4077ae625bbc76853f1049c441ea12bf44ae95a18ba57aeba9b879df0a944fc88151290b49e7323e954d32eaf0ec0e520b77ad47
-AUX caffe2-2.3.0-fix-libcpp.patch 1281 BLAKE2B 67943ec6e79327c854ebcd3538f68dd2866530f8799399605a288b21477a3ea9673ad85469d04510347736d656135f7cec90a3254ea0e7572dde3e5be948b6a2 SHA512 b92b80b341dadd43a6a8d75a14a2e5325a2bbbacbb55c960f1c55b50a87e0a5f9298e9cf4faeb3b2684985d6fb439a5f8b908c0227cbf2baed9c9f5b29aa3d48
-AUX caffe2-2.3.0-fix-rocm-gcc14-clamp.patch 1009 BLAKE2B ad27422dfc7be2720b972e1bbc417874a1060d2cbd5edb0acf166a7437963702b830766950db33381d6f32c6f95a001d8424ece73f70603fae8bf5f50b2ba255 SHA512 67f26127632cfca91389fdc60cea4d31a9b259e5547ccd6778bfc98a19bdd7b632ec17abbe888842ff613430c49aac37e18aca441335a947f973fd9d978ac3fe
-AUX caffe2-2.3.0-optional-hipblaslt.patch 10001 BLAKE2B a73913a9d82acfb780ad95fc1aabef1dfbd20243a8caef7136dbdad72a120c8778348d82e25d9171453e159580caf1ec5fdaa9bdbef4a09981721579e50f6b21 SHA512 222d33a3253d35c64dc151d12a42a9a0d4edca7fe60e1bb9b0c43df07292a9a061b2259a5696b4d568db265b0df4f065b3ee54672481c788ce1e2b0ae01b8488
-AUX caffe2-2.3.0-rocm-fix-std-cpp17.patch 3378 BLAKE2B 9e88fa1bf68c397c8122ea5b3504a22b3f6ef92c77dad8bd84ee03b4f75792b0e1281d8b1aa981ad1bf65060179fa08ef14e776e82abdec9147dfbb3bf37a7ae SHA512 7797a140abf736f2a4628cd727cf0c58ed39c9764b9ce3b67d17fc0c9b9965e647266c815e5322f96f807680120e25ccdbbc66b66c7c6cf84edb811330ad452c
-AUX caffe2-2.4.0-exclude-aotriton.patch 2832 BLAKE2B 319e9516b2b5e9d4fba622d7b618085528103f15c1db87185a2c1cae61ec4636cb69e0e1792dca549c42dd55527c46079be34b28c8b74ec1407ef7b5010e51cd SHA512 26612e973085e225f391c125dc265a45e8b4fe556a1b041d9b687fdc7354a07e89dc29c5e243881acb22027e1ba93b4520e7d809d74f83dea2c1d07fd15eb804
-AUX caffe2-2.4.0-fix-openmp-link.patch 633 BLAKE2B 067d970b062e9e5b67925b0f592368869c02a76395e8b36453835c45ab34911823936ce5810a2e15d050915a191cca5f415314189053be8ccae0a5749bf70f94 SHA512 1354a30f45c6cf0238c98dc7aaea734af680cdfa7cfefb2f09f7053ff8acded2c936a6ef24ac6e369715ebc197d5a3cd8e3b8fc30dc3ea092f403cbd019cb00e
-AUX caffe2-2.4.0-gentoo.patch 8283 BLAKE2B c74bb8b2e5c6ff9f9e5fbf23acc6554c0163017a25ec3aa2657007099d6cec64869cbaf0e5a43ad1e0fc9b09a5940e289133ac8824244fda9d86211aa4a55df2 SHA512 792f9cbb6ed13ef3b0e7b3f869489ec2c9d21fb6364feb27e57c23722df6a54bf307e631d0c955221cc5ea6081f7132c1d44f541d34c8791d1792da9820d35c9
-AUX caffe2-2.4.0-install-dirs.patch 2908 BLAKE2B 64bc9adcf377e13c2289ea034bceaa8b370b32ad0024c16ccde0c6adb7ba2b7c929d6b04d26c4c77c502127d9ab8e74305ab661b500b4c862c7673ec3422467d SHA512 c02f2e62b68a59a8948ee55ab170c0d5a89ca2cc7afccc77ece5e0539c050d45d5b0fb14ea4954f85a138c004bfcad580b76b357a1b2595916e13d79387b15e0
-AUX caffe2-2.4.0-rocm-fix-std-cpp17.patch 2431 BLAKE2B 649ff824f454a7b6fe2e95e487c09b51dbc882f23e2910598741a4f3b4c7a5d55257a0ba188bc386a81bf676f7872e60e71657ae82197772ed44cc236ec2d2f4 SHA512 b2e96f0d039a7e29063829b7efb49fbc7ebc04428ee0f8d4197f6048cf7166f04e3c980791cecabe685f280f9ae5b94ce388ee5fc00d6aec19ae1d1a32a43331
+DIST caffe2-patches-20240809.tar.gz 15242 BLAKE2B 77503c61487e7d85cca5afcab9a6e638f9833a70861845638cf1b62bc492d7b6650e6db81d53ebb2f39c6313509250d339f725f04d03ec6dd23dd0cf70843d8c SHA512 74b3b0b6671b655ecac93f7436c4ed7cb0157a83aafbf6afcc0811e11cef341cd8f638db1a111bcbb01e1a6dd4daf3a36b96d7a8ce90f04c2fa091bd6e3a142b
DIST pytorch-2.2.2.tar.gz 116367503 BLAKE2B 0be22f2ec4b9aac6f5e976664cae01facf07929a32565cd57d7cc5b2d9888e9ae71ca301853752fe8f31d174d04c9974eb9ed2f3d452360a50ccf024f200726a SHA512 7990e0f9484038c3458c0bda2c863bf2b19e56edab81fc5938c6e0f08b17558287f853bb67350e8cca8f42bec0f1d4ba0e94e50a145db8da44bdd4bd703d91d0
DIST pytorch-2.3.0.tar.gz 117029829 BLAKE2B 8f9c0d71ee0a9219b495eddccdcc65107f7ad537c43c68100b229f3d27b0e6c01ccb1659c7fffc356a48d80f2adc0a10361305dc8f1df20446de837d380f89f6 SHA512 67f7e9a096c3ffb952206ebf9105bedebb68c24ad82456083adf1d1d210437fcaa9dd52b68484cfc97d408c9eebc9541c76868c34a7c9982494dc3f424cfb07c
DIST pytorch-2.3.1.tar.gz 117035696 BLAKE2B d419d7fa1342f1fb317ffce09ec9dc1447414627cc83d36578fe60f68c283c620b2b4d49f414cd206d537b90b16432a06cd1941662720db05d5e2b6c493325f5 SHA512 e1bcae44f9939fc7ccb1360a9b1970d92426f25e5de73e36964df3dd15ad5d8d9f5bd2f9a7dda6b8f64e2bba3674005bd869f542489cc442ad0125a02676f587
DIST pytorch-2.4.0.tar.gz 115031093 BLAKE2B d206477963977011627df284efa01482fbf57e9fcb5f58f51d679c742b8e5dde6aa6affd8745ab817fcd09477d129a81e74e07be576b5d3585eaca1c735b8e01 SHA512 804d25944035f33de6591fd942fbda44d3de037717a4397d38a97474b01775d30eaf93d16dd708a832c0119050d24d73b90990fd3e3773be79d26ada25244d22
-EBUILD caffe2-2.2.2-r1.ebuild 7452 BLAKE2B 31ef525960d7c3866580985f9ba9736e5419f17c2a63251b8c4fe961a6789a33dee746b14b12fa331c8c20a6c821565b92dd2cb7c6cdd61678bba7ddf5fc7400 SHA512 f232d901c08e3fefd0d6260d07915786747df8f83269c3980c16112c968114c3cdb7daccd8132a6a3850a266a04b38f6df6d2b1f1bc35f473fb9a58d5fff3452
-EBUILD caffe2-2.3.0-r3.ebuild 8441 BLAKE2B 7b7c44a04e072fbf4bcc4fabf470b5256b85b36bb5035a56ceef65843dcaa1eec1eb3cdf9246cf3c672b6d3f4885816a037ef875f77bbf38f9d5a0fafda7981c SHA512 34306a2af160fef5ad902d116da044e96d4eaafd46b8ea511f7b04ef590ccdace7deabeb8905a5b69d9e1a21fc58ab038356d4542dd378c21078a75d08c188ca
-EBUILD caffe2-2.3.1.ebuild 8462 BLAKE2B 1c079bd3119ad0acd224a99cc029ba1e8d557065bbd2cfa30670f6f3720072520a072d0abed1e21f41781581f0b6711e809581fe01e363b613e2b82e38635d94 SHA512 591bee9c3a6beaa9f1687881381692bd7de32a6203d924d36ab89c2fc7de754fd7a5a4056e0e19f293aa836975d1d9321ee5997939270ff10df41fd31e5c9157
-EBUILD caffe2-2.4.0.ebuild 8158 BLAKE2B ae881185ee8e4e317316c4f0bfc9282685a1632aede2907213f4b7e063b55f4bf90886b741183c30aa30b5936e2d14dded4bb57de3ade8f87dd19e3a47403a6e SHA512 f660100f1d8ba8f6ecb9acff1c419457c5b879a005a08773ebd73fc3d14a516e0005a605f9b318254e268bab4456345fbbd6cc7b6993a73b291e509b3c9fa2d7
+EBUILD caffe2-2.2.2-r1.ebuild 7494 BLAKE2B f09d96b6a6ab71366a3b30ac7f8e6d89a20bf6034fb0d055a59f7f1d31f2dd08742307790982ed1f2768305ccea094e5d1655a2ac7d6e91e24b5620c1ed3f0cd SHA512 a10afef393275946a0fa51b2303a1ceaf567e7df30ff96bdf3343d929c575038945f404cd75118d7f40f49ed8d03ef66a2d8a3aef10ede1b4b411f0efee2e56e
+EBUILD caffe2-2.3.0-r3.ebuild 8468 BLAKE2B b3e5d3bdd08fb7c2beabb0de3244b9de125b283ebd2a1d8dbccdc863b912555726eb40649d67af8593eb64dc9e23f9d021174a0868751fb3a5daf26d98884740 SHA512 6123ad17b0b1de6bd8d8ae3a52a7f7205385e3d54faa5f2fdf4006a9255ac69413caf08f4ba2c79679edadbe5c94cb7c187d5b5f2dc3ed6682d7ae601881e86b
+EBUILD caffe2-2.3.1.ebuild 8489 BLAKE2B 64b0ff8a65f46be8d3a80b4b4c47fb513eaaf78208e2d831ced3fcfa27f39cfa89d8648a64dea73a8b516476867f10cd04a4cb1873149659abe6114f88657ef4 SHA512 4bcb6fe1c3ab47879e57a453649866f8e5bf09da7cce6778de909447d6deb0ae0671512a1d182f5e275d246bb05c4553ff95bed490d4fd3f723653feae95b47e
+EBUILD caffe2-2.4.0.ebuild 8194 BLAKE2B 4d5c12021cda76df3c8dab72ad3924b8e1d32f470e9b58df878a7c73f7fb64cb18993445babbbfeda83aabbedcfbf8a34bc08dc85a5766321847adba729584d2 SHA512 7d1fbe08b8077051ed359538a4a726e745fbaf4f4b4323bef566aefa246253b569710d4b87a09d18da50d333a0eff57dc28eea4583b8b17f7ac4a7c622b9587e
MISC metadata.xml 1225 BLAKE2B ab7fb0bf8b2d37ddaa1a9ecc815eb094e85465d20d3a30af081b42e0b60ade9858d0053b101ba0e7750a90cb48b5b79db9bdc2729bf66d0420732489da62fe54 SHA512 dfb58597fb4bcdd7df0fcc3f2514518e118e8fc9b1cd24868aab60c32a62ff419b8b72a7c294925eff4c8871cc8df606af7fa60bfa99901091d8195101ee1153
diff --git a/sci-libs/caffe2/caffe2-2.2.2-r1.ebuild b/sci-libs/caffe2/caffe2-2.2.2-r1.ebuild
index 6649975ddf2d..773808bc4f76 100644
--- a/sci-libs/caffe2/caffe2-2.2.2-r1.ebuild
+++ b/sci-libs/caffe2/caffe2-2.2.2-r1.ebuild
@@ -13,7 +13,8 @@ MYP=${MYPN}-${PV}
DESCRIPTION="A deep learning framework"
HOMEPAGE="https://pytorch.org/"
SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz
- -> ${MYP}.tar.gz"
+ -> ${MYP}.tar.gz
+ https://dev.gentoo.org/~tupone/distfiles/${PN}-patches-20240809.tar.gz"
S="${WORKDIR}"/${MYP}
@@ -97,16 +98,16 @@ DEPEND="
"
PATCHES=(
- "${FILESDIR}"/${PN}-2.2.1-gentoo.patch
- "${FILESDIR}"/${PN}-1.13.0-install-dirs.patch
- "${FILESDIR}"/${PN}-1.12.0-glog-0.6.0.patch
- "${FILESDIR}"/${PN}-1.13.1-tensorpipe.patch
- "${FILESDIR}"/${PN}-2.0.0-gcc13.patch
- "${FILESDIR}"/${PN}-2.0.0-cudnn_include_fix.patch
- "${FILESDIR}"/${PN}-2.1.2-fix-rpath.patch
- "${FILESDIR}"/${PN}-2.1.2-fix-openmp-link.patch
- "${FILESDIR}"/${PN}-2.1.2-rocm-fix-std-cpp17.patch
- "${FILESDIR}"/${P}-musl.patch
+ ../patches/${PN}-2.2.1-gentoo.patch
+ ../patches/${PN}-1.13.0-install-dirs.patch
+ ../patches/${PN}-1.12.0-glog-0.6.0.patch
+ ../patches/${PN}-1.13.1-tensorpipe.patch
+ ../patches/${PN}-2.0.0-gcc13.patch
+ ../patches/${PN}-2.0.0-cudnn_include_fix.patch
+ ../patches/${PN}-2.1.2-fix-rpath.patch
+ ../patches/${PN}-2.1.2-fix-openmp-link.patch
+ ../patches/${PN}-2.1.2-rocm-fix-std-cpp17.patch
+ ../patches/${P}-musl.patch
)
src_prepare() {
diff --git a/sci-libs/caffe2/caffe2-2.3.0-r3.ebuild b/sci-libs/caffe2/caffe2-2.3.0-r3.ebuild
index 666800d8f4b6..7fe4818311cb 100644
--- a/sci-libs/caffe2/caffe2-2.3.0-r3.ebuild
+++ b/sci-libs/caffe2/caffe2-2.3.0-r3.ebuild
@@ -13,7 +13,8 @@ MYP=${MYPN}-${PV}
DESCRIPTION="A deep learning framework"
HOMEPAGE="https://pytorch.org/"
SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz
- -> ${MYP}.tar.gz"
+ -> ${MYP}.tar.gz
+ https://dev.gentoo.org/~tupone/distfiles/${PN}-patches-20240809.tar.gz"
S="${WORKDIR}"/${MYP}
@@ -106,21 +107,21 @@ DEPEND="
"
PATCHES=(
- "${FILESDIR}"/${PN}-2.2.1-gentoo.patch
- "${FILESDIR}"/${PN}-1.13.0-install-dirs.patch
- "${FILESDIR}"/${PN}-1.12.0-glog-0.6.0.patch
- "${FILESDIR}"/${PN}-1.13.1-tensorpipe.patch
- "${FILESDIR}"/${P}-cudnn_include_fix.patch
- "${FILESDIR}"/${PN}-2.1.2-fix-rpath.patch
- "${FILESDIR}"/${PN}-2.1.2-fix-openmp-link.patch
- "${FILESDIR}"/${P}-rocm-fix-std-cpp17.patch
- "${FILESDIR}"/${PN}-2.2.2-musl.patch
- "${FILESDIR}"/${P}-CMakeFix.patch
- "${FILESDIR}"/${PN}-2.3.0-exclude-aotriton.patch
- "${FILESDIR}"/${PN}-2.3.0-fix-rocm-gcc14-clamp.patch
- "${FILESDIR}"/${PN}-2.3.0-optional-hipblaslt.patch
- "${FILESDIR}"/${PN}-2.3.0-fix-libcpp.patch
- "${FILESDIR}"/${PN}-2.3.0-fix-gcc-clang-abi-compat.patch
+ ../patches/${PN}-2.2.1-gentoo.patch
+ ../patches/${PN}-1.13.0-install-dirs.patch
+ ../patches/${PN}-1.12.0-glog-0.6.0.patch
+ ../patches/${PN}-1.13.1-tensorpipe.patch
+ ../patches/${P}-cudnn_include_fix.patch
+ ../patches/${PN}-2.1.2-fix-rpath.patch
+ ../patches/${PN}-2.1.2-fix-openmp-link.patch
+ ../patches/${P}-rocm-fix-std-cpp17.patch
+ ../patches/${PN}-2.2.2-musl.patch
+ ../patches/${P}-CMakeFix.patch
+ ../patches/${PN}-2.3.0-exclude-aotriton.patch
+ ../patches/${PN}-2.3.0-fix-rocm-gcc14-clamp.patch
+ ../patches/${PN}-2.3.0-optional-hipblaslt.patch
+ ../patches/${PN}-2.3.0-fix-libcpp.patch
+ ../patches/${PN}-2.3.0-fix-gcc-clang-abi-compat.patch
)
src_prepare() {
diff --git a/sci-libs/caffe2/caffe2-2.3.1.ebuild b/sci-libs/caffe2/caffe2-2.3.1.ebuild
index ee1da28aa12f..ff2a9caebd59 100644
--- a/sci-libs/caffe2/caffe2-2.3.1.ebuild
+++ b/sci-libs/caffe2/caffe2-2.3.1.ebuild
@@ -13,7 +13,8 @@ MYP=${MYPN}-${PV}
DESCRIPTION="A deep learning framework"
HOMEPAGE="https://pytorch.org/"
SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz
- -> ${MYP}.tar.gz"
+ -> ${MYP}.tar.gz
+ https://dev.gentoo.org/~tupone/distfiles/${PN}-patches-20240809.tar.gz"
S="${WORKDIR}"/${MYP}
@@ -106,21 +107,21 @@ DEPEND="
"
PATCHES=(
- "${FILESDIR}"/${PN}-2.2.1-gentoo.patch
- "${FILESDIR}"/${PN}-1.13.0-install-dirs.patch
- "${FILESDIR}"/${PN}-1.12.0-glog-0.6.0.patch
- "${FILESDIR}"/${PN}-1.13.1-tensorpipe.patch
- "${FILESDIR}"/${PN}-2.3.0-cudnn_include_fix.patch
- "${FILESDIR}"/${PN}-2.1.2-fix-rpath.patch
- "${FILESDIR}"/${PN}-2.1.2-fix-openmp-link.patch
- "${FILESDIR}"/${PN}-2.3.0-rocm-fix-std-cpp17.patch
- "${FILESDIR}"/${PN}-2.2.2-musl.patch
- "${FILESDIR}"/${PN}-2.3.0-CMakeFix.patch
- "${FILESDIR}"/${PN}-2.3.0-exclude-aotriton.patch
- "${FILESDIR}"/${PN}-2.3.0-fix-rocm-gcc14-clamp.patch
- "${FILESDIR}"/${PN}-2.3.0-optional-hipblaslt.patch
- "${FILESDIR}"/${PN}-2.3.0-fix-libcpp.patch
- "${FILESDIR}"/${PN}-2.3.0-fix-gcc-clang-abi-compat.patch
+ ../patches/${PN}-2.2.1-gentoo.patch
+ ../patches/${PN}-1.13.0-install-dirs.patch
+ ../patches/${PN}-1.12.0-glog-0.6.0.patch
+ ../patches/${PN}-1.13.1-tensorpipe.patch
+ ../patches/${PN}-2.3.0-cudnn_include_fix.patch
+ ../patches/${PN}-2.1.2-fix-rpath.patch
+ ../patches/${PN}-2.1.2-fix-openmp-link.patch
+ ../patches/${PN}-2.3.0-rocm-fix-std-cpp17.patch
+ ../patches/${PN}-2.2.2-musl.patch
+ ../patches/${PN}-2.3.0-CMakeFix.patch
+ ../patches/${PN}-2.3.0-exclude-aotriton.patch
+ ../patches/${PN}-2.3.0-fix-rocm-gcc14-clamp.patch
+ ../patches/${PN}-2.3.0-optional-hipblaslt.patch
+ ../patches/${PN}-2.3.0-fix-libcpp.patch
+ ../patches/${PN}-2.3.0-fix-gcc-clang-abi-compat.patch
)
src_prepare() {
diff --git a/sci-libs/caffe2/caffe2-2.4.0.ebuild b/sci-libs/caffe2/caffe2-2.4.0.ebuild
index b4384eb7df11..524dafcaacef 100644
--- a/sci-libs/caffe2/caffe2-2.4.0.ebuild
+++ b/sci-libs/caffe2/caffe2-2.4.0.ebuild
@@ -13,7 +13,8 @@ MYP=${MYPN}-${PV}
DESCRIPTION="A deep learning framework"
HOMEPAGE="https://pytorch.org/"
SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz
- -> ${MYP}.tar.gz"
+ -> ${MYP}.tar.gz
+ https://dev.gentoo.org/~tupone/distfiles/${PN}-patches-20240809.tar.gz"
S="${WORKDIR}"/${MYP}
@@ -108,18 +109,18 @@ DEPEND="
"
PATCHES=(
- "${FILESDIR}"/${PN}-2.4.0-gentoo.patch
- "${FILESDIR}"/${PN}-2.4.0-install-dirs.patch
- "${FILESDIR}"/${PN}-1.12.0-glog-0.6.0.patch
- "${FILESDIR}"/${PN}-1.13.1-tensorpipe.patch
- "${FILESDIR}"/${PN}-2.3.0-cudnn_include_fix.patch
- "${FILESDIR}"/${PN}-2.1.2-fix-rpath.patch
- "${FILESDIR}"/${PN}-2.4.0-fix-openmp-link.patch
- "${FILESDIR}"/${PN}-2.4.0-rocm-fix-std-cpp17.patch
- "${FILESDIR}"/${PN}-2.2.2-musl.patch
- "${FILESDIR}"/${PN}-2.4.0-exclude-aotriton.patch
- "${FILESDIR}"/${PN}-2.3.0-fix-rocm-gcc14-clamp.patch
- "${FILESDIR}"/${PN}-2.3.0-fix-libcpp.patch
+ ../patches/${PN}-2.4.0-gentoo.patch
+ ../patches/${PN}-2.4.0-install-dirs.patch
+ ../patches/${PN}-1.12.0-glog-0.6.0.patch
+ ../patches/${PN}-1.13.1-tensorpipe.patch
+ ../patches/${PN}-2.3.0-cudnn_include_fix.patch
+ ../patches/${PN}-2.1.2-fix-rpath.patch
+ ../patches/${PN}-2.4.0-fix-openmp-link.patch
+ ../patches/${PN}-2.4.0-rocm-fix-std-cpp17.patch
+ ../patches/${PN}-2.2.2-musl.patch
+ ../patches/${PN}-2.4.0-exclude-aotriton.patch
+ ../patches/${PN}-2.3.0-fix-rocm-gcc14-clamp.patch
+ ../patches/${PN}-2.3.0-fix-libcpp.patch
)
src_prepare() {
diff --git a/sci-libs/caffe2/files/caffe2-1.12.0-glog-0.6.0.patch b/sci-libs/caffe2/files/caffe2-1.12.0-glog-0.6.0.patch
deleted file mode 100644
index 6c06d2cca654..000000000000
--- a/sci-libs/caffe2/files/caffe2-1.12.0-glog-0.6.0.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-https://github.com/pytorch/pytorch/issues/58054
-
---- a/c10/util/Logging.cpp
-+++ b/c10/util/Logging.cpp
-@@ -192,23 +192,13 @@
- google::GLOG_WARNING,
- "The minimum log level that caffe2 will output.");
-
--// Google glog's api does not have an external function that allows one to check
--// if glog is initialized or not. It does have an internal function - so we are
--// declaring it here. This is a hack but has been used by a bunch of others too
--// (e.g. Torch).
--namespace google {
--namespace glog_internal_namespace_ {
--bool IsGoogleLoggingInitialized();
--} // namespace glog_internal_namespace_
--} // namespace google
--
- namespace c10 {
- namespace {
-
- void initGoogleLogging(char const* name) {
- #if !defined(_MSC_VER)
- // This trick can only be used on UNIX platforms
-- if (!::google::glog_internal_namespace_::IsGoogleLoggingInitialized())
-+ if (!::google::IsGoogleLoggingInitialized())
- #endif
- {
- ::google::InitGoogleLogging(name);
diff --git a/sci-libs/caffe2/files/caffe2-1.13.0-install-dirs.patch b/sci-libs/caffe2/files/caffe2-1.13.0-install-dirs.patch
deleted file mode 100644
index 299c9f88a173..000000000000
--- a/sci-libs/caffe2/files/caffe2-1.13.0-install-dirs.patch
+++ /dev/null
@@ -1,121 +0,0 @@
---- a/c10/CMakeLists.txt
-+++ b/c10/CMakeLists.txt
-@@ -112,7 +112,7 @@
- # Note: for now, we will put all export path into one single Caffe2Targets group
- # to deal with the cmake deployment need. Inside the Caffe2Targets set, the
- # individual libraries like libc10.so and libcaffe2.so are still self-contained.
--install(TARGETS c10 EXPORT Caffe2Targets DESTINATION lib)
-+install(TARGETS c10 EXPORT Caffe2Targets DESTINATION ${CMAKE_INSTALL_LIBDIR})
- install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
- DESTINATION include
- FILES_MATCHING PATTERN "*.h")
---- a/c10/cuda/CMakeLists.txt
-+++ b/c10/cuda/CMakeLists.txt
-@@ -64,7 +64,7 @@ add_subdirectory(test)
- # Note: for now, we will put all export path into one single Caffe2Targets group
- # to deal with the cmake deployment need. Inside the Caffe2Targets set, the
- # individual libraries like libc10.so and libcaffe2.so are still self-contained.
--install(TARGETS c10_cuda EXPORT Caffe2Targets DESTINATION lib)
-+install(TARGETS c10_cuda EXPORT Caffe2Targets DESTINATION ${CMAKE_INSTALL_LIBDIR})
- foreach(file ${C10_CUDA_HEADERS})
- get_filename_component( dir ${file} DIRECTORY )
- install( FILES ${file} DESTINATION include/c10/cuda/${dir} )
---- a/c10/hip/CMakeLists.txt
-+++ b/c10/hip/CMakeLists.txt
-@@ -55,7 +55,7 @@ target_include_directories(
- add_subdirectory(test)
-
- # ---[ Installation
--install(TARGETS c10_hip EXPORT Caffe2Targets DESTINATION lib)
-+install(TARGETS c10_hip EXPORT Caffe2Targets DESTINATION ${CMAKE_INSTALL_LIBDIR})
- install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
- DESTINATION include
- FILES_MATCHING PATTERN "*.h")
---- a/modules/detectron/CMakeLists.txt
-+++ b/modules/detectron/CMakeLists.txt
-@@ -20,7 +20,7 @@
- if(USE_MKLDNN)
- target_link_libraries(caffe2_detectron_ops_gpu PRIVATE caffe2::mkldnn)
- endif()
-- install(TARGETS caffe2_detectron_ops_gpu DESTINATION lib)
-+ install(TARGETS caffe2_detectron_ops_gpu DESTINATION ${CMAKE_INSTALL_LIBDIR})
- if(MSVC)
- install(FILES $<TARGET_PDB_FILE:caffe2_detectron_ops_gpu> DESTINATION lib OPTIONAL)
- endif()
-@@ -37,7 +37,7 @@
- target_link_libraries(caffe2_detectron_ops_hip PRIVATE caffe2::mkldnn)
- endif()
- target_link_libraries(caffe2_detectron_ops_hip PRIVATE torch)
-- install(TARGETS caffe2_detectron_ops_hip DESTINATION lib)
-+ install(TARGETS caffe2_detectron_ops_hip DESTINATION ${CMAKE_INSTALL_LIBDIR})
- elseif(NOT IOS_PLATFORM)
- add_library(caffe2_detectron_ops SHARED ${Detectron_CPU_SRCS})
- if(HAVE_SOVERSION)
-@@ -49,7 +49,7 @@
- if(USE_MKLDNN)
- target_link_libraries(caffe2_detectron_ops PRIVATE caffe2::mkldnn)
- endif()
-- install(TARGETS caffe2_detectron_ops DESTINATION lib)
-+ install(TARGETS caffe2_detectron_ops DESTINATION ${CMAKE_INSTALL_LIBDIR})
- if(MSVC)
- install(FILES $<TARGET_PDB_FILE:caffe2_detectron_ops> DESTINATION lib OPTIONAL)
- endif()
---- a/modules/module_test/CMakeLists.txt
-+++ b/modules/module_test/CMakeLists.txt
-@@ -16,7 +16,7 @@ if(BUILD_TEST AND NOT BUILD_LITE_INTERPRETER)
- VERSION ${TORCH_VERSION} SOVERSION ${TORCH_SOVERSION})
- endif()
- target_link_libraries(caffe2_module_test_dynamic torch_library)
-- install(TARGETS caffe2_module_test_dynamic DESTINATION lib)
-+ install(TARGETS caffe2_module_test_dynamic DESTINATION ${CMAKE_INSTALL_LIBDIR})
- if(MSVC AND BUILD_SHARED_LIBS)
- install(FILES $<TARGET_PDB_FILE:caffe2_module_test_dynamic> DESTINATION lib OPTIONAL)
- endif()
---- a/modules/observers/CMakeLists.txt
-+++ b/modules/observers/CMakeLists.txt
-@@ -21,7 +21,7 @@ endif()
- target_link_libraries(caffe2_observers PUBLIC torch_library)
- target_include_directories(caffe2_observers PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/..)
- target_compile_options(caffe2_observers PRIVATE "-DCAFFE2_BUILD_OBSERVER_LIB")
--install(TARGETS caffe2_observers DESTINATION lib)
-+install(TARGETS caffe2_observers DESTINATION ${CMAKE_INSTALL_LIBDIR})
- caffe2_interface_library(caffe2_observers caffe2_observers_library)
- if(MSVC AND BUILD_SHARED_LIBS)
- install(FILES $<TARGET_PDB_FILE:caffe2_observers> DESTINATION lib OPTIONAL)
---- a/modules/rocksdb/CMakeLists.txt
-+++ b/modules/rocksdb/CMakeLists.txt
-@@ -63,7 +63,7 @@ add_library(caffe2_rocksdb ${CMAKE_CURRENT_SOURCE_DIR}/rocksdb.cc)
- target_link_libraries(caffe2_rocksdb PUBLIC torch_library)
- target_link_libraries(caffe2_rocksdb PRIVATE ${RocksDB_LIBRARIES})
- target_include_directories(caffe2_rocksdb PRIVATE ${RocksDB_INCLUDE_DIR})
--install(TARGETS caffe2_rocksdb DESTINATION lib)
-+install(TARGETS caffe2_rocksdb DESTINATION ${CMAKE_INSTALL_LIBDIR})
-
- # ---[ Last, Append the library to Caffe2_MODULES, if we are building with
- # the main repo.
---- a/test/cpp/c10d/CMakeLists.txt
-+++ b/test/cpp/c10d/CMakeLists.txt
-@@ -51,7 +51,7 @@ if(USE_CUDA)
- if(INSTALL_TEST)
- install(TARGETS ProcessGroupNCCLTest DESTINATION bin)
- install(TARGETS ProcessGroupNCCLErrorsTest DESTINATION bin)
-- install(TARGETS c10d_cuda_test DESTINATION lib)
-+ install(TARGETS c10d_cuda_test DESTINATION ${CMAKE_INSTALL_LIBDIR})
- endif()
- endif()
- if(USE_UCC AND USE_C10D_UCC)
---- a/test/cpp/jit/CMakeLists.txt
-+++ b/test/cpp/jit/CMakeLists.txt
-@@ -32,9 +32,9 @@ endif()
- target_link_libraries(backend_with_compiler torch)
-
- if(INSTALL_TEST)
-- install(TARGETS torchbind_test DESTINATION lib)
-- install(TARGETS jitbackend_test DESTINATION lib)
-- install(TARGETS backend_with_compiler DESTINATION lib)
-+ install(TARGETS torchbind_test DESTINATION ${CMAKE_INSTALL_LIBDIR})
-+ install(TARGETS jitbackend_test DESTINATION ${CMAKE_INSTALL_LIBDIR})
-+ install(TARGETS backend_with_compiler DESTINATION ${CMAKE_INSTALL_LIBDIR})
- endif()
-
- # Build the cpp gtest binary containing the cpp-only tests.
diff --git a/sci-libs/caffe2/files/caffe2-1.13.1-tensorpipe.patch b/sci-libs/caffe2/files/caffe2-1.13.1-tensorpipe.patch
deleted file mode 100644
index ae0cac9fb947..000000000000
--- a/sci-libs/caffe2/files/caffe2-1.13.1-tensorpipe.patch
+++ /dev/null
@@ -1,10 +0,0 @@
---- a/cmake/Dependencies.cmake 2023-02-28 14:14:49.099057348 +0100
-+++ b/cmake/Dependencies.cmake 2023-02-28 14:15:05.326790806 +0100
-@@ -1404,7 +1404,6 @@
-
- # Tensorpipe uses cuda_add_library
- torch_update_find_cuda_flags()
-- add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/tensorpipe)
-
- list(APPEND Caffe2_DEPENDENCY_LIBS tensorpipe)
- if(USE_CUDA)
diff --git a/sci-libs/caffe2/files/caffe2-2.0.0-cudnn_include_fix.patch b/sci-libs/caffe2/files/caffe2-2.0.0-cudnn_include_fix.patch
deleted file mode 100644
index ff64e4108087..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.0.0-cudnn_include_fix.patch
+++ /dev/null
@@ -1,12 +0,0 @@
-diff -uar pytorch-2.0.0/cmake/Dependencies.cmake pytorch-2.0.0orig/cmake/Dependencies.cmake
---- a/cmake/Dependencies.cmake 2023-04-23 09:43:20.767566047 -0400
-+++ b/cmake/Dependencies.cmake 2023-03-09 17:42:00.000000000 -0500
-@@ -1235,7 +1235,7 @@
-
- # ---[ cuDNN
- if(USE_CUDNN)
-- set(CUDNN_FRONTEND_INCLUDE_DIR ${CMAKE_CURRENT_LIST_DIR}/../third_party/cudnn_frontend/include)
-+ set(CUDNN_FRONTEND_INCLUDE_DIR /opt/cuda/include)
- target_include_directories(torch::cudnn INTERFACE ${CUDNN_FRONTEND_INCLUDE_DIR})
- endif()
-
diff --git a/sci-libs/caffe2/files/caffe2-2.0.0-gcc13.patch b/sci-libs/caffe2/files/caffe2-2.0.0-gcc13.patch
deleted file mode 100644
index acbcebad0a5d..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.0.0-gcc13.patch
+++ /dev/null
@@ -1,41 +0,0 @@
---- a/c10/util/Registry.h 2023-03-09 17:42:00.000000000 -0500
-+++ b/c10/util/Registry.h 2023-04-09 20:38:33.108135511 -0400
-@@ -16,6 +16,7 @@
- #include <memory>
- #include <mutex>
- #include <string>
-+#include <stdexcept>
- #include <unordered_map>
- #include <vector>
-
---- a/torch/csrc/jit/passes/quantization/quantization_type.h 2023-03-09 17:42:00.000000000 -0500
-+++ b/torch/csrc/jit/passes/quantization/quantization_type.h 2023-04-09 20:43:43.124806308 -0400
-@@ -1,5 +1,6 @@
- #pragma once
- #include <ostream>
-+#include <cstdint>
-
- namespace torch {
- namespace jit {
-
---- a/torch/csrc/jit/runtime/logging.cpp 2023-03-09 17:42:00.000000000 -0500
-+++ b/torch/csrc/jit/runtime/logging.cpp 2023-04-09 20:47:49.758142941 -0400
-@@ -1,6 +1,7 @@
- #include <torch/csrc/jit/runtime/logging.h>
-
- #include <atomic>
-+#include <stdexcept>
- #include <mutex>
- #include <unordered_map>
-
-
---- a/torch/csrc/lazy/core/multi_wait.cpp 2023-03-09 17:42:00.000000000 -0500
-+++ b/torch/csrc/lazy/core/multi_wait.cpp 2023-04-09 20:50:36.608145172 -0400
-@@ -1,6 +1,7 @@
- #include <torch/csrc/lazy/core/multi_wait.h>
-
- #include <chrono>
-+#include <stdexcept>
- #include <exception>
-
- namespace torch {
diff --git a/sci-libs/caffe2/files/caffe2-2.1.2-fix-openmp-link.patch b/sci-libs/caffe2/files/caffe2-2.1.2-fix-openmp-link.patch
deleted file mode 100644
index 3f2d0ae3c30a..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.1.2-fix-openmp-link.patch
+++ /dev/null
@@ -1,15 +0,0 @@
-Fix "undefined symbol: omp_get_max_active_levels" in mkl + <nothing else> builds
-https://github.com/pytorch/pytorch/issues/116576
---- a/caffe2/CMakeLists.txt
-+++ b/caffe2/CMakeLists.txt
-@@ -1575,6 +1575,10 @@ if(BUILD_SHARED_LIBS)
- target_link_libraries(torch_global_deps TBB::tbb)
- endif()
-
-+ if(USE_OPENMP)
-+ target_link_libraries(torch_global_deps OpenMP::OpenMP_CXX)
-+ endif()
-+
- install(TARGETS torch_global_deps DESTINATION "${TORCH_INSTALL_LIB_DIR}")
- endif()
-
diff --git a/sci-libs/caffe2/files/caffe2-2.1.2-fix-rpath.patch b/sci-libs/caffe2/files/caffe2-2.1.2-fix-rpath.patch
deleted file mode 100644
index 731227fa25ee..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.1.2-fix-rpath.patch
+++ /dev/null
@@ -1,12 +0,0 @@
-Unset rpath to support blas-lapack-switch
-Bug: https://bugs.gentoo.org/921129
---- a/cmake/Dependencies.cmake
-+++ b/cmake/Dependencies.cmake
-@@ -10,7 +10,6 @@ endif(APPLE)
- set(CMAKE_SKIP_BUILD_RPATH FALSE)
- # Don't use the install-rpath during the build phase
- set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE)
--set(CMAKE_INSTALL_RPATH "${_rpath_portable_origin}")
- # Automatically add all linked folders that are NOT in the build directory to
- # the rpath (per library?)
- set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
diff --git a/sci-libs/caffe2/files/caffe2-2.1.2-rocm-fix-std-cpp17.patch b/sci-libs/caffe2/files/caffe2-2.1.2-rocm-fix-std-cpp17.patch
deleted file mode 100644
index cb0fa0c48e80..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.1.2-rocm-fix-std-cpp17.patch
+++ /dev/null
@@ -1,68 +0,0 @@
-Fix for error: invalid argument '-std=c++17' not allowed with 'C'
-https://github.com/pytorch/pytorch/issues/103222
---- a/c10/hip/CMakeLists.txt
-+++ b/c10/hip/CMakeLists.txt
-@@ -30,6 +30,7 @@ hip_add_library(c10_hip ${C10_HIP_SRCS} ${C10_HIP_HEADERS})
-
- # Propagate HIP_CXX_FLAGS that were set from Dependencies.cmake
- target_compile_options(c10_hip PRIVATE ${HIP_CXX_FLAGS})
-+set_target_properties(c10_hip PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
-
- # caffe2_hip adds a bunch of dependencies like rocsparse, but c10/hip is supposed to be
- # minimal. I'm not sure if we need hip_hcc or not; for now leave it out
---- a/caffe2/CMakeLists.txt
-+++ b/caffe2/CMakeLists.txt
-@@ -1598,6 +1598,7 @@ if(USE_ROCM)
-
- # Since PyTorch files contain HIP headers, these flags are required for the necessary definitions to be added.
- target_compile_options(torch_hip PUBLIC ${HIP_CXX_FLAGS}) # experiment
-+ set_target_properties(torch_hip PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
- target_link_libraries(torch_hip PUBLIC c10_hip)
-
- if(NOT INTERN_BUILD_MOBILE)
-@@ -1774,6 +1775,7 @@ if(BUILD_TEST)
- target_include_directories(${test_name} PRIVATE $<INSTALL_INTERFACE:include>)
- target_include_directories(${test_name} PRIVATE ${Caffe2_CPU_INCLUDE} ${Caffe2_HIP_INCLUDE})
- target_compile_options(${test_name} PRIVATE ${HIP_CXX_FLAGS})
-+ set_target_properties(${test_name} PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
- add_test(NAME ${test_name} COMMAND $<TARGET_FILE:${test_name}>)
- if(INSTALL_TEST)
- install(TARGETS ${test_name} DESTINATION test)
-@@ -1955,6 +1957,7 @@ if(BUILD_PYTHON)
- endif()
- if(NOT MSVC)
- target_compile_options(caffe2_pybind11_state_hip PRIVATE ${HIP_CXX_FLAGS} -fvisibility=hidden)
-+ set_target_properties(caffe2_pybind11_state_hip PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
- endif()
- set_target_properties(caffe2_pybind11_state_hip PROPERTIES PREFIX "")
- set_target_properties(caffe2_pybind11_state_hip PROPERTIES SUFFIX ${PY_EXT_SUFFIX})
---- a/cmake/Dependencies.cmake
-+++ b/cmake/Dependencies.cmake
-@@ -1287,7 +1287,6 @@ if(USE_ROCM)
- list(APPEND HIP_CXX_FLAGS -Wno-duplicate-decl-specifier)
- list(APPEND HIP_CXX_FLAGS -DCAFFE2_USE_MIOPEN)
- list(APPEND HIP_CXX_FLAGS -DTHRUST_DEVICE_SYSTEM=THRUST_DEVICE_SYSTEM_HIP)
-- list(APPEND HIP_CXX_FLAGS -std=c++17)
- add_definitions(-DROCM_VERSION=${ROCM_VERSION_DEV_INT})
- add_definitions(-DTORCH_HIP_VERSION=${TORCH_HIP_VERSION})
- message("TORCH_HIP_VERSION=${TORCH_HIP_VERSION} is added as a compiler defines")
---- a/cmake/public/utils.cmake
-+++ b/cmake/public/utils.cmake
-@@ -335,6 +335,7 @@ function(caffe2_hip_binary_target target_name_or_src)
- caffe2_binary_target(${target_name_or_src})
-
- target_compile_options(${__target} PRIVATE ${HIP_CXX_FLAGS})
-+ set_target_properties(${__target} PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
- target_include_directories(${__target} PRIVATE ${Caffe2_HIP_INCLUDE})
- endfunction()
-
---- a/modules/detectron/CMakeLists.txt
-+++ b/modules/detectron/CMakeLists.txt
-@@ -31,6 +31,7 @@ if(BUILD_CAFFE2_OPS)
- ${Detectron_CPU_SRCS}
- ${Detectron_HIP_SRCS})
- target_compile_options(caffe2_detectron_ops_hip PRIVATE ${HIP_CXX_FLAGS})
-+ set_target_properties(caffe2_detectron_ops_hip PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
- if(USE_MKLDNN)
- target_link_libraries(caffe2_detectron_ops_hip PRIVATE caffe2::mkldnn)
- endif()
diff --git a/sci-libs/caffe2/files/caffe2-2.2.1-gentoo.patch b/sci-libs/caffe2/files/caffe2-2.2.1-gentoo.patch
deleted file mode 100644
index 5472a2c41836..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.2.1-gentoo.patch
+++ /dev/null
@@ -1,195 +0,0 @@
---- a/cmake/Dependencies.cmake
-+++ b/cmake/Dependencies.cmake
-@@ -474,7 +474,7 @@
- endif()
-
- # ---[ QNNPACK
--if(USE_QNNPACK)
-+if(FALSE)
- set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
-
- if(NOT DEFINED QNNPACK_SOURCE_DIR)
-@@ -530,7 +530,7 @@
- endif()
-
- # ---[ Caffe2 Int8 operators (enabled by USE_QNNPACK) depend on gemmlowp and neon2sse headers
--if(USE_QNNPACK)
-+if(FALSE)
- set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
- include_directories(SYSTEM "${CAFFE2_THIRD_PARTY_ROOT}/gemmlowp")
- include_directories(SYSTEM "${CAFFE2_THIRD_PARTY_ROOT}/neon2sse")
-@@ -780,7 +780,7 @@
- endif()
-
- # ---[ FBGEMM
--if(USE_FBGEMM)
-+if(FALSE)
- set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
- if(NOT DEFINED FBGEMM_SOURCE_DIR)
- set(FBGEMM_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/fbgemm" CACHE STRING "FBGEMM source directory")
-@@ -828,6 +828,7 @@
- endif()
-
- if(USE_FBGEMM)
-+ list(APPEND Caffe2_DEPENDENCY_LIBS fbgemm)
- caffe2_update_option(USE_FBGEMM ON)
- else()
- caffe2_update_option(USE_FBGEMM OFF)
-@@ -1529,7 +1530,6 @@
- set_target_properties(onnx_proto PROPERTIES CXX_STANDARD 17)
- endif()
- endif()
-- add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/../third_party/foxi EXCLUDE_FROM_ALL)
-
- add_definitions(-DONNX_NAMESPACE=${ONNX_NAMESPACE})
- if(NOT USE_SYSTEM_ONNX)
-@@ -1796,7 +1796,6 @@
- #
- set(TEMP_BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS})
- set(BUILD_SHARED_LIBS OFF CACHE BOOL "Build shared libs" FORCE)
--add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/fmt)
-
- # Disable compiler feature checks for `fmt`.
- #
-@@ -1805,9 +1804,7 @@
- # CMAKE_CXX_FLAGS in ways that break feature checks. Since we already know
- # `fmt` is compatible with a superset of the compilers that PyTorch is, it
- # shouldn't be too bad to just disable the checks.
--set_target_properties(fmt-header-only PROPERTIES INTERFACE_COMPILE_FEATURES "")
-
--list(APPEND Caffe2_DEPENDENCY_LIBS fmt::fmt-header-only)
- set(BUILD_SHARED_LIBS ${TEMP_BUILD_SHARED_LIBS} CACHE BOOL "Build shared libs" FORCE)
-
- # ---[ Kineto
---- a/c10/CMakeLists.txt
-+++ b/c10/CMakeLists.txt
-@@ -89,7 +89,7 @@
- if(C10_USE_GLOG)
- target_link_libraries(c10 PUBLIC glog::glog)
- endif()
--target_link_libraries(c10 PRIVATE fmt::fmt-header-only)
-+target_link_libraries(c10 PRIVATE fmt)
-
- if(C10_USE_NUMA)
- target_include_directories(c10 PRIVATE ${Numa_INCLUDE_DIR})
---- a/torch/CMakeLists.txt
-+++ b/torch/CMakeLists.txt
-@@ -59,15 +59,9 @@
- ${CMAKE_BINARY_DIR}
- ${CMAKE_BINARY_DIR}/aten/src
- ${CMAKE_BINARY_DIR}/caffe2/aten/src
-- ${CMAKE_BINARY_DIR}/third_party
-- ${CMAKE_BINARY_DIR}/third_party/onnx
-
-- ${TORCH_ROOT}/third_party/valgrind-headers
-
-- ${TORCH_ROOT}/third_party/gloo
-- ${TORCH_ROOT}/third_party/onnx
-- ${TORCH_ROOT}/third_party/flatbuffers/include
-- ${TORCH_ROOT}/third_party/kineto/libkineto/include
-+ /usr/include/kineto
-
- ${TORCH_SRC_DIR}/csrc
- ${TORCH_SRC_DIR}/csrc/api/include
-@@ -80,7 +74,6 @@
- python::python
- pybind::pybind11
- shm
-- fmt::fmt-header-only
- ATEN_CPU_FILES_GEN_LIB)
-
- if(USE_ASAN AND TARGET Sanitizer::address)
---- a/CMakeLists.txt
-+++ b/CMakeLists.txt
-@@ -835,12 +835,11 @@
- # Re-include to override append_cxx_flag_if_supported from third_party/FBGEMM
- include(cmake/public/utils.cmake)
- if(NOT MSVC)
-- string(APPEND CMAKE_CXX_FLAGS " -O2 -fPIC")
-+ string(APPEND CMAKE_CXX_FLAGS " -O2")
- # Eigen fails to build with some versions, so convert this to a warning
- # Details at http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1459
- string(APPEND CMAKE_CXX_FLAGS " -Wall")
- string(APPEND CMAKE_CXX_FLAGS " -Wextra")
-- append_cxx_flag_if_supported("-Werror=return-type" CMAKE_CXX_FLAGS)
- append_cxx_flag_if_supported("-Werror=non-virtual-dtor" CMAKE_CXX_FLAGS)
- append_cxx_flag_if_supported("-Werror=braced-scalar-init" CMAKE_CXX_FLAGS)
- append_cxx_flag_if_supported("-Werror=range-loop-construct" CMAKE_CXX_FLAGS)
-@@ -930,7 +930,6 @@
- string(APPEND CMAKE_LINKER_FLAGS_DEBUG " -fno-omit-frame-pointer -O0")
- append_cxx_flag_if_supported("-fno-math-errno" CMAKE_CXX_FLAGS)
- append_cxx_flag_if_supported("-fno-trapping-math" CMAKE_CXX_FLAGS)
-- append_cxx_flag_if_supported("-Werror=format" CMAKE_CXX_FLAGS)
- else()
- # skip unwanted includes from windows.h
- add_compile_definitions(WIN32_LEAN_AND_MEAN)
---- a/cmake/public/utils.cmake
-+++ b/cmake/public/utils.cmake
-@@ -486,8 +486,6 @@
- endif()
-
- # Use -O2 for release builds (-O3 doesn't improve perf, and -Os results in perf regression)
-- target_compile_options(${libname} PRIVATE
-- $<$<AND:$<COMPILE_LANGUAGE:CXX>,$<OR:$<CONFIG:Release>,$<CONFIG:RelWithDebInfo>>>:-O2>)
-
- endfunction()
-
---- a/cmake/Codegen.cmake
-+++ b/cmake/Codegen.cmake
-@@ -57,7 +57,7 @@
- if(MSVC)
- set(OPT_FLAG "/fp:strict ")
- else(MSVC)
-- set(OPT_FLAG "-O3 ")
-+ set(OPT_FLAG " ")
- if("${CMAKE_BUILD_TYPE}" MATCHES "Debug")
- set(OPT_FLAG " ")
- endif()
---- a/caffe2/CMakeLists.txt
-+++ b/caffe2/CMakeLists.txt
-@@ -107,7 +107,7 @@
- # Note: the folders that are being commented out have not been properly
- # addressed yet.
-
--if(NOT MSVC AND USE_XNNPACK)
-+if(FALSE)
- if(NOT TARGET fxdiv)
- set(FXDIV_BUILD_TESTS OFF CACHE BOOL "")
- set(FXDIV_BUILD_BENCHMARKS OFF CACHE BOOL "")
-@@ -1055,7 +1055,6 @@
- endif()
-
- if(NOT MSVC AND USE_XNNPACK)
-- TARGET_LINK_LIBRARIES(torch_cpu PRIVATE fxdiv)
- endif()
-
- # ==========================================================
-@@ -1175,8 +1174,7 @@
- target_include_directories(torch_cpu PRIVATE
- ${TORCH_ROOT}/third_party/miniz-2.1.0)
-
--target_include_directories(torch_cpu PRIVATE
-- ${TORCH_ROOT}/third_party/kineto/libkineto/include)
-+target_include_directories(torch_cpu PRIVATE /usr/include/kineto)
-
- if(USE_KINETO)
- target_include_directories(torch_cpu PRIVATE
---- a/cmake/External/nnpack.cmake
-+++ b/cmake/External/nnpack.cmake
-@@ -56,7 +56,7 @@
- set(PTHREADPOOL_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/pthreadpool" CACHE STRING "pthreadpool source directory")
- set(GOOGLETEST_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/googletest" CACHE STRING "Google Test source directory")
-
-- if(NOT TARGET nnpack)
-+ if(FALSE)
- if(NOT USE_SYSTEM_PTHREADPOOL AND USE_INTERNAL_PTHREADPOOL_IMPL)
- set(NNPACK_CUSTOM_THREADPOOL ON CACHE BOOL "")
- endif()
---- a/functorch/CMakeLists.txt 2023-11-30 20:30:45.805209036 +0100
-+++ b/functorch/CMakeLists.txt 2023-11-30 20:31:13.284766157 +0100
-@@ -35,4 +35,4 @@
- if(NOT ${TORCH_PYTHON_LINK_FLAGS} STREQUAL "")
- set_target_properties(${PROJECT_NAME} PROPERTIES LINK_FLAGS ${TORCH_PYTHON_LINK_FLAGS})
- endif()
--install(TARGETS ${PROJECT_NAME} DESTINATION "${CMAKE_CURRENT_SOURCE_DIR}")
-+install(TARGETS ${PROJECT_NAME} DESTINATION "${CMAKE_INSTALL_LIBDIR}")
diff --git a/sci-libs/caffe2/files/caffe2-2.2.2-musl.patch b/sci-libs/caffe2/files/caffe2-2.2.2-musl.patch
deleted file mode 100644
index f63e9f1df332..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.2.2-musl.patch
+++ /dev/null
@@ -1,13 +0,0 @@
---- a/torch/csrc/profiler/unwind/unwind.cpp 2024-04-29 12:05:40.895667482 +0200
-+++ b/torch/csrc/profiler/unwind/unwind.cpp 2024-04-29 12:05:53.099524760 +0200
-@@ -112,8 +112,8 @@
- }
-
- struct Version {
-- uint64_t adds_ = LONG_LONG_MAX;
-- uint64_t subs_ = LONG_LONG_MAX;
-+ uint64_t adds_ = LLONG_MAX;
-+ uint64_t subs_ = LLONG_MAX;
- };
-
- struct UnwindCache {
diff --git a/sci-libs/caffe2/files/caffe2-2.3.0-CMakeFix.patch b/sci-libs/caffe2/files/caffe2-2.3.0-CMakeFix.patch
deleted file mode 100644
index eba37d933cac..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.3.0-CMakeFix.patch
+++ /dev/null
@@ -1,11 +0,0 @@
---- a/CMakeLists.txt 2024-04-29 20:32:26.259716769 +0200
-+++ b/CMakeLists.txt 2024-04-29 20:32:35.886384618 +0200
-@@ -50,7 +50,7 @@
-
- # This define is needed to preserve behavior given anticpated changes to cccl/thrust
- # https://nvidia.github.io/libcudacxx/standard_api/numerics_library/complex.html
--string(APPEND CMAKE_CUDA_FLAGS "-DLIBCUDACXX_ENABLE_SIMPLIFIED_COMPLEX_OPERATIONS")
-+string(APPEND CMAKE_CUDA_FLAGS " -DLIBCUDACXX_ENABLE_SIMPLIFIED_COMPLEX_OPERATIONS")
-
- if(LINUX)
- include(cmake/CheckAbi.cmake)
diff --git a/sci-libs/caffe2/files/caffe2-2.3.0-cudnn_include_fix.patch b/sci-libs/caffe2/files/caffe2-2.3.0-cudnn_include_fix.patch
deleted file mode 100644
index 77905dbd1ac8..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.3.0-cudnn_include_fix.patch
+++ /dev/null
@@ -1,11 +0,0 @@
---- a/cmake/Dependencies.cmake 2024-04-29 18:37:34.005639858 +0200
-+++ b/cmake/Dependencies.cmake 2024-04-29 18:39:29.126587738 +0200
-@@ -1235,7 +1235,7 @@
- if(CUDNN_VERSION VERSION_LESS 8.5)
- message(FATAL_ERROR "PyTorch needs CuDNN-8.5 or above, but found ${CUDNN_VERSION}. Builds are still possible with `USE_CUDNN=0`")
- endif()
-- set(CUDNN_FRONTEND_INCLUDE_DIR ${CMAKE_CURRENT_LIST_DIR}/../third_party/cudnn_frontend/include)
-+ set(CUDNN_FRONTEND_INCLUDE_DIR /opt/cuda/include)
- target_include_directories(torch::cudnn INTERFACE ${CUDNN_FRONTEND_INCLUDE_DIR})
- endif()
-
diff --git a/sci-libs/caffe2/files/caffe2-2.3.0-exclude-aotriton.patch b/sci-libs/caffe2/files/caffe2-2.3.0-exclude-aotriton.patch
deleted file mode 100644
index 2c65987acd85..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.3.0-exclude-aotriton.patch
+++ /dev/null
@@ -1,35 +0,0 @@
-Disables aotriton download when both USE_FLASH_ATTENTION and USE_MEM_EFF_ATTENTION cmake flags are OFF
-Backports upstream PR to 2.3.0: https://github.com/pytorch/pytorch/pull/130197
---- a/cmake/Dependencies.cmake
-+++ b/cmake/Dependencies.cmake
-@@ -1334,7 +1334,9 @@ if(USE_ROCM)
- message(STATUS "Disabling Kernel Assert for ROCm")
- endif()
-
-- include(${CMAKE_CURRENT_LIST_DIR}/External/aotriton.cmake)
-+ if(USE_FLASH_ATTENTION)
-+ include(${CMAKE_CURRENT_LIST_DIR}/External/aotriton.cmake)
-+ endif()
- if(USE_CUDA)
- caffe2_update_option(USE_MEM_EFF_ATTENTION OFF)
- endif()
---- a/aten/src/ATen/native/transformers/cuda/sdp_utils.cpp
-+++ b/aten/src/ATen/native/transformers/cuda/sdp_utils.cpp
-@@ -21,7 +21,7 @@
- #include <cmath>
- #include <functional>
-
--#if USE_ROCM
-+#if defined(USE_ROCM) && defined(USE_FLASH_ATTENTION)
- #include <aotriton/flash.h>
- #endif
-
-@@ -186,7 +186,7 @@ bool check_flash_attention_hardware_support(sdp_params const& params, bool debug
- // Check that the gpu is capable of running flash attention
- using sm80 = SMVersion<8, 0>;
- using sm90 = SMVersion<9, 0>;
--#if USE_ROCM
-+#if defined(USE_ROCM) && defined(USE_FLASH_ATTENTION)
- auto stream = at::cuda::getCurrentCUDAStream().stream();
- if (hipSuccess != aotriton::v2::flash::check_gpu(stream)) {
- auto dprops = at::cuda::getCurrentDeviceProperties();
diff --git a/sci-libs/caffe2/files/caffe2-2.3.0-fix-gcc-clang-abi-compat.patch b/sci-libs/caffe2/files/caffe2-2.3.0-fix-gcc-clang-abi-compat.patch
deleted file mode 100644
index a6f981b7e054..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.3.0-fix-gcc-clang-abi-compat.patch
+++ /dev/null
@@ -1,17 +0,0 @@
-
-When gcc builds libtorch_cpu.so and hipcc (clang-18) build libtorch_hip.so,
-resulting binary fails in runtime due to different mangling.
-Related issue in LLVM: https://github.com/llvm/llvm-project/issues/85656
-Fixed in pytorch-2.4.0 in https://github.com/pytorch/pytorch/commit/a89f442f0b103fa6f38103784a2dfedbd147f863
---- a/cmake/Dependencies.cmake
-+++ b/cmake/Dependencies.cmake
-@@ -1314,6 +1314,9 @@ if(USE_ROCM)
- list(APPEND HIP_HIPCC_FLAGS -fdebug-info-for-profiling)
- endif(CMAKE_BUILD_TYPE MATCHES Debug)
-
-+ # needed for compat with newer versions of hip-clang that introduced C++20 mangling rules
-+ list(APPEND HIP_HIPCC_FLAGS -fclang-abi-compat=17)
-+
- set(HIP_CLANG_FLAGS ${HIP_CXX_FLAGS})
- # Ask hcc to generate device code during compilation so we can use
- # host linker to link.
diff --git a/sci-libs/caffe2/files/caffe2-2.3.0-fix-libcpp.patch b/sci-libs/caffe2/files/caffe2-2.3.0-fix-libcpp.patch
deleted file mode 100644
index 75808fd7ec50..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.3.0-fix-libcpp.patch
+++ /dev/null
@@ -1,24 +0,0 @@
-Workaround for libc++ issue https://github.com/llvm/llvm-project/issues/100802
-"reference to __host__ function 'memcpy' in __device__ function"
---- a/c10/util/Half.h
-+++ b/c10/util/Half.h
-@@ -227,7 +227,7 @@ C10_HOST_DEVICE inline float fp16_ieee_to_fp32_value(uint16_t h) {
- // const float exp_scale = 0x1.0p-112f;
- constexpr uint32_t scale_bits = (uint32_t)15 << 23;
- float exp_scale_val = 0;
-- std::memcpy(&exp_scale_val, &scale_bits, sizeof(exp_scale_val));
-+ memcpy(&exp_scale_val, &scale_bits, sizeof(exp_scale_val));
- const float exp_scale = exp_scale_val;
- const float normalized_value =
- fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
-@@ -298,8 +298,8 @@ inline uint16_t fp16_ieee_from_fp32_value(float f) {
- constexpr uint32_t scale_to_inf_bits = (uint32_t)239 << 23;
- constexpr uint32_t scale_to_zero_bits = (uint32_t)17 << 23;
- float scale_to_inf_val = 0, scale_to_zero_val = 0;
-- std::memcpy(&scale_to_inf_val, &scale_to_inf_bits, sizeof(scale_to_inf_val));
-- std::memcpy(
-+ memcpy(&scale_to_inf_val, &scale_to_inf_bits, sizeof(scale_to_inf_val));
-+ memcpy(
- &scale_to_zero_val, &scale_to_zero_bits, sizeof(scale_to_zero_val));
- const float scale_to_inf = scale_to_inf_val;
- const float scale_to_zero = scale_to_zero_val;
diff --git a/sci-libs/caffe2/files/caffe2-2.3.0-fix-rocm-gcc14-clamp.patch b/sci-libs/caffe2/files/caffe2-2.3.0-fix-rocm-gcc14-clamp.patch
deleted file mode 100644
index 81ae075c67cc..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.3.0-fix-rocm-gcc14-clamp.patch
+++ /dev/null
@@ -1,18 +0,0 @@
-Fix hip compilation with gcc-14
-Upstream commit: https://github.com/pytorch/pytorch/commit/8c2c3a03fb87c3568a22362d83b00d82b9fb3db2
---- a/aten/src/ATen/native/cuda/IndexKernel.cu
-+++ b/aten/src/ATen/native/cuda/IndexKernel.cu
-@@ -259,7 +259,13 @@ void index_put_kernel_quantized_cuda(TensorIterator& iter, const IntArrayRef ind
-
- gpu_index_kernel(iter, index_size, index_stride, [inv_scale, zero_point, qmin, qmax]C10_DEVICE(char* const out_data, const char* const in_data, const int64_t offset) {
- int64_t qvalue = static_cast<int64_t>(zero_point + nearbyintf(*(float*)in_data * inv_scale));
-+ // See https://github.com/pytorch/pytorch/issues/127666
-+ // hip-clang std::clamp __glibcxx_assert_fail host function when building on Fedora40/gcc14
-+#ifndef USE_ROCM
- qvalue = std::clamp(qvalue, qmin, qmax);
-+#else
-+ qvalue = (qvalue < qmin) ? qmin : (qmax < qvalue) ? qmax : qvalue;
-+#endif
- *(scalar_t*)(out_data + offset) = static_cast<scalar_t>(qvalue);
- });
- });
diff --git a/sci-libs/caffe2/files/caffe2-2.3.0-optional-hipblaslt.patch b/sci-libs/caffe2/files/caffe2-2.3.0-optional-hipblaslt.patch
deleted file mode 100644
index dc544255c2bd..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.3.0-optional-hipblaslt.patch
+++ /dev/null
@@ -1,235 +0,0 @@
-Makes hipblaslt optional to simplify build for non-datacenter GPUs.
-Based on https://github.com/pytorch/pytorch/pull/120551 with added USE_HIPBLASLT cmake option.
---- a/CMakeLists.txt
-+++ b/CMakeLists.txt
-@@ -225,6 +225,9 @@ option(USE_FAKELOWP "Use FakeLowp operators" OFF)
- option(USE_FFMPEG "Use ffmpeg" OFF)
- option(USE_GFLAGS "Use GFLAGS" OFF)
- option(USE_GLOG "Use GLOG" OFF)
-+cmake_dependent_option(
-+ USE_HIPBLASLT "Use hipBLASLt" ON
-+ "USE_ROCM" OFF)
- option(USE_LEVELDB "Use LEVELDB" OFF)
- option(USE_LITE_PROTO "Use lite protobuf instead of full." OFF)
- option(USE_LMDB "Use LMDB" OFF)
---- a/aten/src/ATen/cuda/CUDABlas.cpp
-+++ b/aten/src/ATen/cuda/CUDABlas.cpp
-@@ -14,7 +14,7 @@
- #include <c10/util/irange.h>
-
- #ifdef USE_ROCM
--#if ROCM_VERSION >= 60000
-+#ifdef USE_HIPBLASLT
- #include <hipblaslt/hipblaslt-ext.hpp>
- #endif
- // until hipblas has an API to accept flags, we must use rocblas here
-@@ -781,7 +781,7 @@ void gemm<at::BFloat16>(CUDABLAS_GEMM_ARGTYPES(at::BFloat16)) {
- }
- }
-
--#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700)
-+#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && defined(USE_HIPBLASLT))
-
- #if defined(USE_ROCM) && ROCM_VERSION >= 50700 && ROCM_VERSION < 60000
- // only for rocm 5.7 where we first supported hipblaslt, it was difficult
-@@ -912,6 +912,7 @@ class CuBlasLtMatmulPreference : public CuBlasLtDescriptor<
- };
- } // namespace
-
-+#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && defined(USE_HIPBLASLT))
- template <typename Dtype>
- void gemm_and_bias(
- bool transpose_mat1,
-@@ -1124,7 +1125,7 @@ template void gemm_and_bias(
- at::BFloat16* result_ptr,
- int64_t result_ld,
- GEMMAndBiasActivationEpilogue activation);
--
-+#endif
- void scaled_gemm(
- char transa,
- char transb,
---- a/aten/src/ATen/cuda/CUDABlas.h
-+++ b/aten/src/ATen/cuda/CUDABlas.h
-@@ -82,7 +82,7 @@ void gemm_internal<at::Half>(CUDABLAS_GEMM_ARGTYPES(at::Half));
- template <>
- void gemm_internal<at::BFloat16>(CUDABLAS_GEMM_ARGTYPES(at::BFloat16));
-
--#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700)
-+#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && defined(USE_HIPBLASLT))
- enum GEMMAndBiasActivationEpilogue {
- None,
- RELU,
---- a/aten/src/ATen/cuda/CUDAContextLight.h
-+++ b/aten/src/ATen/cuda/CUDAContextLight.h
-@@ -9,7 +9,7 @@
-
- // cublasLT was introduced in CUDA 10.1 but we enable only for 11.1 that also
- // added bf16 support
--#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700)
-+#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && defined(USE_HIPBLASLT))
- #include <cublasLt.h>
- #endif
-
-@@ -82,7 +82,7 @@ TORCH_CUDA_CPP_API c10::Allocator* getCUDADeviceAllocator();
- /* Handles */
- TORCH_CUDA_CPP_API cusparseHandle_t getCurrentCUDASparseHandle();
- TORCH_CUDA_CPP_API cublasHandle_t getCurrentCUDABlasHandle();
--#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700)
-+#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && defined(USE_HIPBLASLT))
- TORCH_CUDA_CPP_API cublasLtHandle_t getCurrentCUDABlasLtHandle();
- #endif
-
---- a/aten/src/ATen/cuda/CublasHandlePool.cpp
-+++ b/aten/src/ATen/cuda/CublasHandlePool.cpp
-@@ -29,7 +29,7 @@ namespace at::cuda {
-
- namespace {
-
--#if defined(USE_ROCM) && ROCM_VERSION >= 50700
-+#if defined(USE_ROCM) && defined(USE_HIPBLASLT)
- void createCublasLtHandle(cublasLtHandle_t *handle) {
- TORCH_CUDABLAS_CHECK(cublasLtCreate(handle));
- }
-@@ -190,7 +190,7 @@ cublasHandle_t getCurrentCUDABlasHandle() {
- return handle;
- }
-
--#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700)
-+#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && defined(USE_HIPBLASLT))
- cublasLtHandle_t getCurrentCUDABlasLtHandle() {
- #ifdef USE_ROCM
- c10::DeviceIndex device = 0;
---- a/aten/src/ATen/cuda/tunable/TunableGemm.h
-+++ b/aten/src/ATen/cuda/tunable/TunableGemm.h
-@@ -11,7 +11,7 @@
-
- #include <ATen/cuda/tunable/GemmCommon.h>
- #ifdef USE_ROCM
--#if ROCM_VERSION >= 50700
-+#ifdef USE_HIPBLASLT
- #include <ATen/cuda/tunable/GemmHipblaslt.h>
- #endif
- #include <ATen/cuda/tunable/GemmRocblas.h>
-@@ -166,7 +166,7 @@ class GemmTunableOp : public TunableOp<GemmParams<T>, StreamTimer> {
- }
- #endif
-
--#if defined(USE_ROCM) && ROCM_VERSION >= 50700
-+#if defined(USE_ROCM) && defined(USE_HIPBLASLT)
- static const char *env = std::getenv("PYTORCH_TUNABLEOP_HIPBLASLT_ENABLED");
- if (env == nullptr || strcmp(env, "1") == 0) {
- // disallow tuning of hipblaslt with c10::complex
-@@ -240,7 +240,7 @@ class GemmStridedBatchedTunableOp : public TunableOp<GemmStridedBatchedParams<T>
- }
- #endif
-
--#if defined(USE_ROCM) && ROCM_VERSION >= 50700
-+#if defined(USE_ROCM) && defined(USE_HIPBLASLT)
- static const char *env = std::getenv("PYTORCH_TUNABLEOP_HIPBLASLT_ENABLED");
- if (env == nullptr || strcmp(env, "1") == 0) {
- // disallow tuning of hipblaslt with c10::complex
---- a/aten/src/ATen/native/cuda/Blas.cpp
-+++ b/aten/src/ATen/native/cuda/Blas.cpp
-@@ -155,7 +155,7 @@ enum class Activation {
- GELU,
- };
-
--#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700)
-+#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && defined(USE_HIPBLASLT))
- cuda::blas::GEMMAndBiasActivationEpilogue activation_to_gemm_and_blas_arg(Activation a) {
- switch (a) {
- case Activation::None:
-@@ -193,6 +193,7 @@ static bool getDisableAddmmCudaLt() {
-
- #ifdef USE_ROCM
- static bool isSupportedHipLtROCmArch(int index) {
-+#if defined(USE_HIPBLASLT)
- hipDeviceProp_t* prop = at::cuda::getDeviceProperties(index);
- std::string device_arch = prop->gcnArchName;
- static const std::vector<std::string> archs = {"gfx90a", "gfx940", "gfx941", "gfx942"};
-@@ -203,6 +204,7 @@ static bool isSupportedHipLtROCmArch(int index) {
- }
- }
- TORCH_CHECK(false, "Attempting to use hipBLASLt on a unsupported architecture!");
-+#endif
- return false;
- }
- #endif
-@@ -228,7 +230,7 @@ Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& ma
- at::ScalarType scalar_type = self.scalar_type();
- c10::MaybeOwned<Tensor> self_;
- if (&result != &self) {
--#if (defined(CUDA_VERSION) && CUDA_VERSION >= 11040 && !defined(_MSC_VER)) || defined(USE_ROCM) && ROCM_VERSION >= 50700
-+#if (defined(CUDA_VERSION) && CUDA_VERSION >= 11040 && !defined(_MSC_VER)) || defined(USE_ROCM) && defined(USE_HIPBLASLT)
- // Strangely, if mat2 has only 1 row or column, we get
- // CUBLAS_STATUS_INVALID_VALUE error from cublasLtMatmulAlgoGetHeuristic.
- // self.dim() == 1 && result.dim() == 2 && self.sizes()[0] == mat2_sizes[1]
-@@ -271,7 +273,7 @@ Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& ma
- }
- self__sizes = self_->sizes();
- } else {
--#if defined(USE_ROCM) && ROCM_VERSION >= 50700
-+#if defined(USE_ROCM) && defined(USE_HIPBLASLT)
- useLtInterface = !disable_addmm_cuda_lt &&
- result.dim() == 2 && result.is_contiguous() &&
- isSupportedHipLtROCmArch(self.device().index()) &&
-@@ -322,7 +324,7 @@ Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& ma
-
- TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!args.result->is_conj());
-
--#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700)
-+#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && defined(USE_HIPBLASLT))
- if (useLtInterface) {
- AT_DISPATCH_FLOATING_TYPES_AND2(
- at::ScalarType::Half,
-@@ -876,7 +878,7 @@ _scaled_mm_out_cuda(const Tensor& mat1, const Tensor& mat2,
- at::native::resize_output(out, {mat1_sizes[0], mat2_sizes[1]});
- at::native::resize_output(amax, {});
-
--#if !defined(USE_ROCM) && !defined(_MSC_VER) || (defined(USE_ROCM) && ROCM_VERSION >= 60000)
-+#if !defined(USE_ROCM) && !defined(_MSC_VER) || (defined(USE_ROCM) && defined(USE_HIPBLASLT))
- cublasCommonArgs args(mat1, mat2, out);
- const auto out_dtype_ = args.result->scalar_type();
- TORCH_CHECK(args.transa == 't' && args.transb == 'n', "Only multiplication of row-major and column-major matrices is supported by cuBLASLt");
-@@ -906,7 +908,7 @@ _scaled_mm_out_cuda(const Tensor& mat1, const Tensor& mat2,
- TORCH_CHECK(false, "_scaled_mm_out_cuda is not compiled for this platform.");
- #endif
-
--#if defined(USE_ROCM) && ROCM_VERSION >= 60000
-+#if defined(USE_ROCM) && defined(USE_HIPBLASLT)
- // rocm's hipblaslt does not yet support amax, so calculate separately
- auto out_float32 = out.to(kFloat);
- out_float32.abs_();
---- a/cmake/Dependencies.cmake
-+++ b/cmake/Dependencies.cmake
-@@ -1282,6 +1282,9 @@ if(USE_ROCM)
- if(ROCM_VERSION_DEV VERSION_GREATER_EQUAL "6.0.0")
- list(APPEND HIP_CXX_FLAGS -DHIPBLAS_V2)
- endif()
-+ if(hipblast_FOUND)
-+ list(APPEND HIP_CXX_FLAGS -DHIPBLASLT)
-+ endif()
- if(HIPBLASLT_CUSTOM_DATA_TYPE)
- list(APPEND HIP_CXX_FLAGS -DHIPBLASLT_CUSTOM_DATA_TYPE)
- endif()
---- a/cmake/public/LoadHIP.cmake
-+++ b/cmake/public/LoadHIP.cmake
-@@ -155,7 +155,7 @@ if(HIP_FOUND)
- find_package_and_print_version(hiprand REQUIRED)
- find_package_and_print_version(rocblas REQUIRED)
- find_package_and_print_version(hipblas REQUIRED)
-- if(ROCM_VERSION_DEV VERSION_GREATER_EQUAL "5.7.0")
-+ if(ROCM_VERSION_DEV VERSION_GREATER_EQUAL "5.7.0" AND USE_HIPBLASLT)
- find_package_and_print_version(hipblaslt REQUIRED)
- endif()
- find_package_and_print_version(miopen REQUIRED)
-@@ -191,7 +191,7 @@ if(HIP_FOUND)
- # roctx is part of roctracer
- find_library(ROCM_ROCTX_LIB roctx64 HINTS ${ROCM_PATH}/lib)
-
-- if(ROCM_VERSION_DEV VERSION_GREATER_EQUAL "5.7.0")
-+ if(hipblastlt_FOUND)
- # check whether hipblaslt is using its own datatype
- set(file "${PROJECT_BINARY_DIR}/hipblaslt_test_data_type.cc")
- file(WRITE ${file} ""
diff --git a/sci-libs/caffe2/files/caffe2-2.3.0-rocm-fix-std-cpp17.patch b/sci-libs/caffe2/files/caffe2-2.3.0-rocm-fix-std-cpp17.patch
deleted file mode 100644
index 127a31e4b225..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.3.0-rocm-fix-std-cpp17.patch
+++ /dev/null
@@ -1,68 +0,0 @@
-Fix for error: invalid argument '-std=c++17' not allowed with 'C'
-https://github.com/pytorch/pytorch/issues/103222
---- a/c10/hip/CMakeLists.txt
-+++ b/c10/hip/CMakeLists.txt
-@@ -30,6 +30,7 @@ hip_add_library(c10_hip ${C10_HIP_SRCS} ${C10_HIP_HEADERS})
-
- # Propagate HIP_CXX_FLAGS that were set from Dependencies.cmake
- target_compile_options(c10_hip PRIVATE ${HIP_CXX_FLAGS})
-+set_target_properties(c10_hip PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
-
- # caffe2_hip adds a bunch of dependencies like rocsparse, but c10/hip is supposed to be
- # minimal. I'm not sure if we need hip_hcc or not; for now leave it out
---- a/caffe2/CMakeLists.txt
-+++ b/caffe2/CMakeLists.txt
-@@ -1712,6 +1712,7 @@ if(USE_ROCM)
-
- # Since PyTorch files contain HIP headers, these flags are required for the necessary definitions to be added.
- target_compile_options(torch_hip PUBLIC ${HIP_CXX_FLAGS}) # experiment
-+ set_target_properties(torch_hip PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
- target_link_libraries(torch_hip PUBLIC c10_hip)
-
- if(NOT INTERN_BUILD_MOBILE)
-@@ -1908,6 +1909,7 @@ if(BUILD_TEST)
- target_include_directories(${test_name} PRIVATE $<INSTALL_INTERFACE:include>)
- target_include_directories(${test_name} PRIVATE ${Caffe2_CPU_INCLUDE} ${Caffe2_HIP_INCLUDE})
- target_compile_options(${test_name} PRIVATE ${HIP_CXX_FLAGS})
-+ set_target_properties(${test_name} PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
- add_test(NAME ${test_name} COMMAND $<TARGET_FILE:${test_name}>)
- if(INSTALL_TEST)
- install(TARGETS ${test_name} DESTINATION test)
-@@ -2092,6 +2094,7 @@ if(BUILD_PYTHON)
- endif()
- if(NOT MSVC)
- target_compile_options(caffe2_pybind11_state_hip PRIVATE ${HIP_CXX_FLAGS} -fvisibility=hidden)
-+ set_target_properties(caffe2_pybind11_state_hip PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
- endif()
- set_target_properties(caffe2_pybind11_state_hip PROPERTIES PREFIX "")
- set_target_properties(caffe2_pybind11_state_hip PROPERTIES SUFFIX ${PY_EXT_SUFFIX})
---- a/cmake/Dependencies.cmake
-+++ b/cmake/Dependencies.cmake
-@@ -1278,7 +1278,6 @@
- list(APPEND HIP_CXX_FLAGS -Wno-duplicate-decl-specifier)
- list(APPEND HIP_CXX_FLAGS -DCAFFE2_USE_MIOPEN)
- list(APPEND HIP_CXX_FLAGS -DTHRUST_DEVICE_SYSTEM=THRUST_DEVICE_SYSTEM_HIP)
-- list(APPEND HIP_CXX_FLAGS -std=c++17)
- if(ROCM_VERSION_DEV VERSION_GREATER_EQUAL "6.0.0")
- list(APPEND HIP_CXX_FLAGS -DHIPBLAS_V2)
- endif()
---- a/cmake/public/utils.cmake
-+++ b/cmake/public/utils.cmake
-@@ -335,6 +335,7 @@ function(caffe2_hip_binary_target target_name_or_src)
- caffe2_binary_target(${target_name_or_src})
-
- target_compile_options(${__target} PRIVATE ${HIP_CXX_FLAGS})
-+ set_target_properties(${__target} PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
- target_include_directories(${__target} PRIVATE ${Caffe2_HIP_INCLUDE})
- endfunction()
-
---- a/modules/detectron/CMakeLists.txt
-+++ b/modules/detectron/CMakeLists.txt
-@@ -31,6 +31,7 @@ if(BUILD_CAFFE2_OPS)
- ${Detectron_CPU_SRCS}
- ${Detectron_HIP_SRCS})
- target_compile_options(caffe2_detectron_ops_hip PRIVATE ${HIP_CXX_FLAGS})
-+ set_target_properties(caffe2_detectron_ops_hip PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
- if(USE_MKLDNN)
- target_link_libraries(caffe2_detectron_ops_hip PRIVATE caffe2::mkldnn)
- endif()
diff --git a/sci-libs/caffe2/files/caffe2-2.4.0-exclude-aotriton.patch b/sci-libs/caffe2/files/caffe2-2.4.0-exclude-aotriton.patch
deleted file mode 100644
index 72ab792b2278..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.4.0-exclude-aotriton.patch
+++ /dev/null
@@ -1,65 +0,0 @@
-Disables aotriton download when both USE_FLASH_ATTENTION and USE_MEM_EFF_ATTENTION cmake flags are OFF
-Backports upstream PR to 2.3.0: https://github.com/pytorch/pytorch/pull/130197
---- a/aten/src/ATen/native/transformers/cuda/sdp_utils.cpp
-+++ b/aten/src/ATen/native/transformers/cuda/sdp_utils.cpp
-@@ -24,7 +24,7 @@
- #include <c10/core/SymInt.h>
- #include <c10/util/string_view.h>
-
--#if USE_ROCM
-+#if defined(USE_ROCM) && (defined(USE_MEM_EFF_ATTENTION) || defined(USE_FLASH_ATTENTION))
- #include <aotriton/flash.h>
- #endif
-
-@@ -207,7 +207,7 @@ bool check_flash_attention_hardware_support(sdp_params const& params, bool debug
- // Check that the gpu is capable of running flash attention
- using sm80 = SMVersion<8, 0>;
- using sm90 = SMVersion<9, 0>;
--#if USE_ROCM
-+#if defined(USE_ROCM) && defined(USE_FLASH_ATTENTION)
- auto stream = at::cuda::getCurrentCUDAStream().stream();
- if (hipSuccess != aotriton::v2::flash::check_gpu(stream)) {
- auto dprops = at::cuda::getCurrentDeviceProperties();
-@@ -238,7 +238,7 @@ bool check_mem_efficient_hardware_support(sdp_params const& params, bool debug)
- // Mem Efficient attention supports hardware in the range [sm_50, sm_90]
- using sm50 = SMVersion<5, 0>;
- using sm90 = SMVersion<9, 0>;
--#if USE_ROCM
-+#if defined(USE_ROCM) && defined(USE_MEM_EFF_ATTENTION)
- auto stream = at::cuda::getCurrentCUDAStream().stream();
- if (hipSuccess != aotriton::v2::flash::check_gpu(stream)) {
- auto dprops = at::cuda::getCurrentDeviceProperties();
-@@ -623,7 +623,7 @@ bool can_use_mem_efficient_attention(sdp_params const& params, bool debug) {
- array_of<at::ScalarType>(at::kHalf, at::kFloat, at::kBFloat16);
- constexpr auto less_than_sm80_mem_efficient_dtypes =
- array_of<at::ScalarType>(at::kHalf, at::kFloat);
--#ifdef USE_ROCM
-+#if defined(USE_ROCM) && defined(USE_MEM_EFF_ATTENTION)
- constexpr auto aotriton_mem_efficient_dtypes =
- array_of<at::ScalarType>(at::kHalf, at::kFloat, at::kBFloat16);
- #endif
-@@ -668,7 +668,7 @@ bool can_use_mem_efficient_attention(sdp_params const& params, bool debug) {
- }
- }
-
--#ifdef USE_ROCM
-+#if defined(USE_ROCM) && defined(USE_MEM_EFF_ATTENTION)
- return check_tensor_dtype(params, aotriton_mem_efficient_dtypes, debug);
- #else
- auto dprop = at::cuda::getCurrentDeviceProperties();
---- a/cmake/Dependencies.cmake
-+++ b/cmake/Dependencies.cmake
-@@ -1095,10 +1095,12 @@ if(USE_ROCM)
- message(STATUS "Disabling Kernel Assert for ROCm")
- endif()
-
-- include(${CMAKE_CURRENT_LIST_DIR}/External/aotriton.cmake)
- if(USE_CUDA)
- caffe2_update_option(USE_MEM_EFF_ATTENTION OFF)
- endif()
-+ if(USE_FLASH_ATTENTION OR USE_MEM_EFF_ATTENTION)
-+ include(${CMAKE_CURRENT_LIST_DIR}/External/aotriton.cmake)
-+ endif()
- else()
- caffe2_update_option(USE_ROCM OFF)
- endif()
diff --git a/sci-libs/caffe2/files/caffe2-2.4.0-fix-openmp-link.patch b/sci-libs/caffe2/files/caffe2-2.4.0-fix-openmp-link.patch
deleted file mode 100644
index 9b0fe0b97c0f..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.4.0-fix-openmp-link.patch
+++ /dev/null
@@ -1,14 +0,0 @@
-Fix "undefined symbol: omp_get_max_active_levels" in mkl + <nothing else> builds
-https://github.com/pytorch/pytorch/issues/116576
---- a/caffe2/CMakeLists.txt
-+++ b/caffe2/CMakeLists.txt
-@@ -1643,6 +1643,9 @@ if(BUILD_SHARED_LIBS)
- if(CAFFE2_USE_MKL)
- target_link_libraries(torch_global_deps caffe2::mkl)
- endif()
-+ if(USE_OPENMP)
-+ target_link_libraries(torch_global_deps OpenMP::OpenMP_CXX)
-+ endif()
- # The CUDA libraries are linked here for a different reason: in some
- # cases we load these libraries with ctypes, and if they weren't opened
- # with RTLD_GLOBAL, we'll do the "normal" search process again (and
diff --git a/sci-libs/caffe2/files/caffe2-2.4.0-gentoo.patch b/sci-libs/caffe2/files/caffe2-2.4.0-gentoo.patch
deleted file mode 100644
index d592a346386b..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.4.0-gentoo.patch
+++ /dev/null
@@ -1,211 +0,0 @@
---- a/CMakeLists.txt
-+++ b/CMakeLists.txt
-@@ -966,12 +966,11 @@ endif()
- # third_party/FBGEMM
- include(cmake/public/utils.cmake)
- if(NOT MSVC)
-- string(APPEND CMAKE_CXX_FLAGS " -O2 -fPIC")
-+ string(APPEND CMAKE_CXX_FLAGS " -O2")
- # Eigen fails to build with some versions, so convert this to a warning
- # Details at http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1459
- string(APPEND CMAKE_CXX_FLAGS " -Wall")
- string(APPEND CMAKE_CXX_FLAGS " -Wextra")
-- append_cxx_flag_if_supported("-Werror=return-type" CMAKE_CXX_FLAGS)
- append_cxx_flag_if_supported("-Werror=non-virtual-dtor" CMAKE_CXX_FLAGS)
- append_cxx_flag_if_supported("-Werror=braced-scalar-init" CMAKE_CXX_FLAGS)
- append_cxx_flag_if_supported("-Werror=range-loop-construct" CMAKE_CXX_FLAGS)
-@@ -1074,7 +1073,6 @@ if(NOT MSVC)
- string(APPEND CMAKE_LINKER_FLAGS_DEBUG " -fno-omit-frame-pointer -O0")
- append_cxx_flag_if_supported("-fno-math-errno" CMAKE_CXX_FLAGS)
- append_cxx_flag_if_supported("-fno-trapping-math" CMAKE_CXX_FLAGS)
-- append_cxx_flag_if_supported("-Werror=format" CMAKE_CXX_FLAGS)
- else()
- # skip unwanted includes from windows.h
- add_compile_definitions(WIN32_LEAN_AND_MEAN)
---- a/aten/src/ATen/native/quantized/cpu/qnnpack/CMakeLists.txt
-+++ b/aten/src/ATen/native/quantized/cpu/qnnpack/CMakeLists.txt
-@@ -324,16 +324,8 @@ set_target_properties(pytorch_qnnpack PROPERTIES PUBLIC_HEADER include/pytorch_q
- set_target_properties(pytorch_qnnpack PROPERTIES PUBLIC_HEADER include/qnnpack_func.h)
-
- # ---[ Configure clog
--if(NOT TARGET clog)
-- set(CLOG_BUILD_TESTS OFF CACHE BOOL "")
-- set(CLOG_RUNTIME_TYPE "${CPUINFO_RUNTIME_TYPE}" CACHE STRING "")
-- add_subdirectory(
-- "${CLOG_SOURCE_DIR}"
-- "${CONFU_DEPENDENCIES_BINARY_DIR}/clog")
-- # We build static version of clog but a dynamic library may indirectly depend on it
-- set_property(TARGET clog PROPERTY POSITION_INDEPENDENT_CODE ON)
--endif()
--target_link_libraries(pytorch_qnnpack PUBLIC clog)
-+find_library(CLOG_LIBRARY NAMES clog REQUIRED)
-+target_link_libraries(pytorch_qnnpack PUBLIC ${CLOG_LIBRARY})
-
- # ---[ Configure cpuinfo
- if(NOT TARGET cpuinfo AND USE_SYSTEM_CPUINFO)
---- a/c10/CMakeLists.txt
-+++ b/c10/CMakeLists.txt
-@@ -94,7 +94,7 @@ if(NOT BUILD_LIBTORCHLESS)
- if(C10_USE_GLOG)
- target_link_libraries(c10 PUBLIC glog::glog)
- endif()
-- target_link_libraries(c10 PRIVATE fmt::fmt-header-only)
-+ target_link_libraries(c10 PRIVATE fmt)
-
- if(C10_USE_NUMA)
- message(STATUS "NUMA paths:")
---- a/caffe2/CMakeLists.txt
-+++ b/caffe2/CMakeLists.txt
-@@ -87,7 +87,7 @@ endif()
- # Note: the folders that are being commented out have not been properly
- # addressed yet.
-
--if(NOT MSVC AND USE_XNNPACK)
-+if(FALSE)
- if(NOT TARGET fxdiv)
- set(FXDIV_BUILD_TESTS OFF CACHE BOOL "")
- set(FXDIV_BUILD_BENCHMARKS OFF CACHE BOOL "")
-@@ -1075,7 +1075,6 @@ if(USE_XPU)
- endif()
-
- if(NOT MSVC AND USE_XNNPACK)
-- TARGET_LINK_LIBRARIES(torch_cpu PRIVATE fxdiv)
- endif()
-
- # ==========================================================
-@@ -1178,8 +1177,7 @@ target_include_directories(torch_cpu PRIVATE
- target_include_directories(torch_cpu PRIVATE
- ${TORCH_ROOT}/third_party/miniz-2.1.0)
-
--target_include_directories(torch_cpu PRIVATE
-- ${TORCH_ROOT}/third_party/kineto/libkineto/include)
-+target_include_directories(torch_cpu PRIVATE /usr/include/kineto)
-
- if(USE_KINETO)
- target_include_directories(torch_cpu PRIVATE
---- a/cmake/Codegen.cmake
-+++ b/cmake/Codegen.cmake
-@@ -57,7 +57,7 @@ if(INTERN_BUILD_ATEN_OPS)
- if(MSVC)
- set(OPT_FLAG "/fp:strict ")
- else(MSVC)
-- set(OPT_FLAG "-O3 ")
-+ set(OPT_FLAG " ")
- if("${CMAKE_BUILD_TYPE}" MATCHES "Debug")
- set(OPT_FLAG " ")
- endif()
---- a/cmake/Dependencies.cmake
-+++ b/cmake/Dependencies.cmake
-@@ -466,7 +466,9 @@ if(USE_PYTORCH_QNNPACK)
- set_property(TARGET pytorch_qnnpack PROPERTY POSITION_INDEPENDENT_CODE ON)
- set_property(TARGET cpuinfo PROPERTY POSITION_INDEPENDENT_CODE ON)
- # QNNPACK depends on gemmlowp headers
-- target_include_directories(pytorch_qnnpack PRIVATE "${CAFFE2_THIRD_PARTY_ROOT}/gemmlowp")
-+ find_package(gemmlowp REQUIRED)
-+ get_target_property(GEMMLOWP_INCLUDE_DIRS gemmlowp::gemmlowp INTERFACE_INCLUDE_DIRECTORIES)
-+ target_include_directories(pytorch_qnnpack PRIVATE ${GEMMLOWP_INCLUDE_DIRS})
-
- if(PYTORCH_QNNPACK_CUSTOM_THREADPOOL)
- target_compile_definitions(
-@@ -705,7 +707,7 @@ if(BUILD_TEST OR BUILD_MOBILE_BENCHMARK OR BUILD_MOBILE_TEST)
- endif()
-
- # ---[ FBGEMM
--if(USE_FBGEMM)
-+if(FALSE)
- set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
- if(NOT DEFINED FBGEMM_SOURCE_DIR)
- set(FBGEMM_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/fbgemm" CACHE STRING "FBGEMM source directory")
-@@ -753,6 +755,7 @@ if(USE_FBGEMM)
- endif()
-
- if(USE_FBGEMM)
-+ list(APPEND Caffe2_DEPENDENCY_LIBS fbgemm)
- caffe2_update_option(USE_FBGEMM ON)
- else()
- caffe2_update_option(USE_FBGEMM OFF)
-@@ -1288,7 +1291,6 @@ if(CAFFE2_CMAKE_BUILDING_WITH_MAIN_REPO AND NOT INTERN_DISABLE_ONNX)
- set_target_properties(onnx_proto PROPERTIES CXX_STANDARD 17)
- endif()
- endif()
-- add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/../third_party/foxi EXCLUDE_FROM_ALL)
-
- add_definitions(-DONNX_NAMESPACE=${ONNX_NAMESPACE})
- if(NOT USE_SYSTEM_ONNX)
-@@ -1530,7 +1532,6 @@ endif()
- #
- set(TEMP_BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS})
- set(BUILD_SHARED_LIBS OFF CACHE BOOL "Build shared libs" FORCE)
--add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/fmt)
-
- # Disable compiler feature checks for `fmt`.
- #
-@@ -1539,9 +1540,7 @@ add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/fmt)
- # CMAKE_CXX_FLAGS in ways that break feature checks. Since we already know
- # `fmt` is compatible with a superset of the compilers that PyTorch is, it
- # shouldn't be too bad to just disable the checks.
--set_target_properties(fmt-header-only PROPERTIES INTERFACE_COMPILE_FEATURES "")
-
--list(APPEND Caffe2_DEPENDENCY_LIBS fmt::fmt-header-only)
- set(BUILD_SHARED_LIBS ${TEMP_BUILD_SHARED_LIBS} CACHE BOOL "Build shared libs" FORCE)
-
- # ---[ Kineto
---- a/cmake/External/nnpack.cmake
-+++ b/cmake/External/nnpack.cmake
-@@ -56,7 +56,7 @@ if(ANDROID OR IOS OR ${CMAKE_SYSTEM_NAME} STREQUAL "Linux" OR ${CMAKE_SYSTEM_NAM
- set(PTHREADPOOL_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/pthreadpool" CACHE STRING "pthreadpool source directory")
- set(GOOGLETEST_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/googletest" CACHE STRING "Google Test source directory")
-
-- if(NOT TARGET nnpack)
-+ if(FALSE)
- if(NOT USE_SYSTEM_PTHREADPOOL AND USE_INTERNAL_PTHREADPOOL_IMPL)
- set(NNPACK_CUSTOM_THREADPOOL ON CACHE BOOL "")
- endif()
---- a/cmake/public/utils.cmake
-+++ b/cmake/public/utils.cmake
-@@ -483,8 +483,6 @@ function(torch_compile_options libname)
- endif()
-
- # Use -O2 for release builds (-O3 doesn't improve perf, and -Os results in perf regression)
-- target_compile_options(${libname} PRIVATE
-- $<$<AND:$<COMPILE_LANGUAGE:CXX>,$<OR:$<CONFIG:Release>,$<CONFIG:RelWithDebInfo>>>:-O2>)
-
- endfunction()
-
---- a/functorch/CMakeLists.txt
-+++ b/functorch/CMakeLists.txt
-@@ -42,4 +42,4 @@ endif()
- if(NOT ${TORCH_PYTHON_LINK_FLAGS} STREQUAL "")
- set_target_properties(${PROJECT_NAME} PROPERTIES LINK_FLAGS ${TORCH_PYTHON_LINK_FLAGS})
- endif()
--install(TARGETS ${PROJECT_NAME} DESTINATION "${CMAKE_CURRENT_SOURCE_DIR}")
-+install(TARGETS ${PROJECT_NAME} DESTINATION "${CMAKE_INSTALL_LIBDIR}")
---- a/torch/CMakeLists.txt
-+++ b/torch/CMakeLists.txt
-@@ -59,16 +59,8 @@ set(TORCH_PYTHON_INCLUDE_DIRECTORIES
- ${CMAKE_BINARY_DIR}
- ${CMAKE_BINARY_DIR}/aten/src
- ${CMAKE_BINARY_DIR}/caffe2/aten/src
-- ${CMAKE_BINARY_DIR}/third_party
-- ${CMAKE_BINARY_DIR}/third_party/onnx
--
-- ${TORCH_ROOT}/third_party/valgrind-headers
--
-- ${TORCH_ROOT}/third_party/gloo
-- ${TORCH_ROOT}/third_party/onnx
-- ${TORCH_ROOT}/third_party/flatbuffers/include
-- ${TORCH_ROOT}/third_party/kineto/libkineto/include
-- ${TORCH_ROOT}/third_party/cpp-httplib
-+
-+ /usr/include/kineto
-
- ${TORCH_SRC_DIR}/csrc
- ${TORCH_SRC_DIR}/csrc/api/include
-@@ -83,7 +75,6 @@ set(TORCH_PYTHON_LINK_LIBRARIES
- opentelemetry::api
- httplib
- shm
-- fmt::fmt-header-only
- ATEN_CPU_FILES_GEN_LIB)
-
- if(USE_ASAN AND TARGET Sanitizer::address)
diff --git a/sci-libs/caffe2/files/caffe2-2.4.0-install-dirs.patch b/sci-libs/caffe2/files/caffe2-2.4.0-install-dirs.patch
deleted file mode 100644
index ee6e8fb91562..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.4.0-install-dirs.patch
+++ /dev/null
@@ -1,70 +0,0 @@
---- a/c10/CMakeLists.txt
-+++ b/c10/CMakeLists.txt
-@@ -157,7 +157,7 @@ if(NOT BUILD_LIBTORCHLESS)
- # Note: for now, we will put all export path into one single Caffe2Targets group
- # to deal with the cmake deployment need. Inside the Caffe2Targets set, the
- # individual libraries like libc10.so and libcaffe2.so are still self-contained.
-- install(TARGETS c10 EXPORT Caffe2Targets DESTINATION lib)
-+ install(TARGETS c10 EXPORT Caffe2Targets DESTINATION ${CMAKE_INSTALL_LIBDIR})
- endif()
-
- install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
---- a/c10/cuda/CMakeLists.txt
-+++ b/c10/cuda/CMakeLists.txt
-@@ -82,7 +82,7 @@ if(NOT BUILD_LIBTORCHLESS)
- # Note: for now, we will put all export path into one single Caffe2Targets group
- # to deal with the cmake deployment need. Inside the Caffe2Targets set, the
- # individual libraries like libc10.so and libcaffe2.so are still self-contained.
--install(TARGETS c10_cuda EXPORT Caffe2Targets DESTINATION lib)
-+install(TARGETS c10_cuda EXPORT Caffe2Targets DESTINATION ${CMAKE_INSTALL_LIBDIR})
-
- endif()
-
---- a/c10/hip/CMakeLists.txt
-+++ b/c10/hip/CMakeLists.txt
-@@ -57,7 +57,7 @@ if(NOT BUILD_LIBTORCHLESS)
- $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../..>
- $<BUILD_INTERFACE:${CMAKE_BINARY_DIR}>
- $<INSTALL_INTERFACE:include>)
-- install(TARGETS c10_hip EXPORT Caffe2Targets DESTINATION lib)
-+ install(TARGETS c10_hip EXPORT Caffe2Targets DESTINATION ${CMAKE_INSTALL_LIBDIR})
- set(C10_HIP_LIB c10_hip)
- endif()
-
---- a/c10/xpu/CMakeLists.txt
-+++ b/c10/xpu/CMakeLists.txt
-@@ -45,7 +45,7 @@ target_include_directories(
- $<BUILD_INTERFACE:${CMAKE_BINARY_DIR}>
- $<INSTALL_INTERFACE:include>
- )
-- install(TARGETS c10_xpu EXPORT Caffe2Targets DESTINATION lib)
-+ install(TARGETS c10_xpu EXPORT Caffe2Targets DESTINATION ${CMAKE_INSTALL_LIBDIR})
- set(C10_XPU_LIB c10_xpu)
- add_subdirectory(test)
- endif()
---- a/test/cpp/c10d/CMakeLists.txt
-+++ b/test/cpp/c10d/CMakeLists.txt
-@@ -64,7 +64,7 @@ if(USE_CUDA)
- torch_cpu c10d_cuda_test gtest_main __caffe2_ucc)
- if(INSTALL_TEST)
- install(TARGETS ProcessGroupUCCTest DESTINATION bin)
-- install(TARGETS c10d_cuda_test DESTINATION lib)
-+ install(TARGETS c10d_cuda_test DESTINATION ${CMAKE_INSTALL_LIBDIR})
- endif()
- endif()
- else()
---- a/test/cpp/jit/CMakeLists.txt
-+++ b/test/cpp/jit/CMakeLists.txt
-@@ -32,9 +32,9 @@ endif()
- target_link_libraries(backend_with_compiler torch)
-
- if(INSTALL_TEST)
-- install(TARGETS torchbind_test DESTINATION lib)
-- install(TARGETS jitbackend_test DESTINATION lib)
-- install(TARGETS backend_with_compiler DESTINATION lib)
-+ install(TARGETS torchbind_test DESTINATION ${CMAKE_INSTALL_LIBDIR})
-+ install(TARGETS jitbackend_test DESTINATION ${CMAKE_INSTALL_LIBDIR})
-+ install(TARGETS backend_with_compiler DESTINATION ${CMAKE_INSTALL_LIBDIR})
- endif()
-
- # Build the cpp gtest binary containing the cpp-only tests.
diff --git a/sci-libs/caffe2/files/caffe2-2.4.0-rocm-fix-std-cpp17.patch b/sci-libs/caffe2/files/caffe2-2.4.0-rocm-fix-std-cpp17.patch
deleted file mode 100644
index 3612c3db1a0b..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.4.0-rocm-fix-std-cpp17.patch
+++ /dev/null
@@ -1,50 +0,0 @@
-Fix for error: invalid argument '-std=c++17' not allowed with 'C'
-https://github.com/pytorch/pytorch/issues/103222
---- a/c10/hip/CMakeLists.txt
-+++ b/c10/hip/CMakeLists.txt
-@@ -36,6 +36,7 @@ if(NOT BUILD_LIBTORCHLESS)
-
- # Propagate HIP_CXX_FLAGS that were set from Dependencies.cmake
- target_compile_options(c10_hip PRIVATE ${HIP_CXX_FLAGS})
-+ set_target_properties(c10_hip PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
-
- # caffe2_hip adds a bunch of dependencies like rocsparse, but c10/hip is supposed to be
- # minimal. I'm not sure if we need hip_hcc or not; for now leave it out
---- a/caffe2/CMakeLists.txt
-+++ b/caffe2/CMakeLists.txt
-@@ -1670,6 +1670,7 @@ if(USE_ROCM)
-
- # Since PyTorch files contain HIP headers, these flags are required for the necessary definitions to be added.
- target_compile_options(torch_hip PUBLIC ${HIP_CXX_FLAGS}) # experiment
-+ set_target_properties(torch_hip PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
-
- target_link_libraries(torch_hip PUBLIC c10_hip)
-
-@@ -1867,6 +1868,7 @@ if(BUILD_TEST)
- target_include_directories(${test_name} PRIVATE $<INSTALL_INTERFACE:include>)
- target_include_directories(${test_name} PRIVATE ${Caffe2_CPU_INCLUDE} ${Caffe2_HIP_INCLUDE})
- target_compile_options(${test_name} PRIVATE ${HIP_CXX_FLAGS})
-+ set_target_properties(${test_name} PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
- add_test(NAME ${test_name} COMMAND $<TARGET_FILE:${test_name}>)
- if(INSTALL_TEST)
- install(TARGETS ${test_name} DESTINATION test)
---- a/cmake/Dependencies.cmake
-+++ b/cmake/Dependencies.cmake
-@@ -1050,7 +1050,6 @@ if(USE_ROCM)
- list(APPEND HIP_CXX_FLAGS -Wno-duplicate-decl-specifier)
- list(APPEND HIP_CXX_FLAGS -DCAFFE2_USE_MIOPEN)
- list(APPEND HIP_CXX_FLAGS -DTHRUST_DEVICE_SYSTEM=THRUST_DEVICE_SYSTEM_HIP)
-- list(APPEND HIP_CXX_FLAGS -std=c++17)
- list(APPEND HIP_CXX_FLAGS -DHIPBLAS_V2)
- if(HIP_NEW_TYPE_ENUMS)
- list(APPEND HIP_CXX_FLAGS -DHIP_NEW_TYPE_ENUMS)
---- a/cmake/public/utils.cmake
-+++ b/cmake/public/utils.cmake
-@@ -332,6 +332,7 @@ function(caffe2_hip_binary_target target_name_or_src)
- caffe2_binary_target(${target_name_or_src})
-
- target_compile_options(${__target} PRIVATE ${HIP_CXX_FLAGS})
-+ set_target_properties(${__target} PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
- target_include_directories(${__target} PRIVATE ${Caffe2_HIP_INCLUDE})
- endfunction()
-