From 403ab7c9007c3a5ff376f2a5d8963c5db58813fb Mon Sep 17 00:00:00 2001 From: V3n3RiX Date: Tue, 25 Apr 2023 12:12:32 +0100 Subject: gentoo auto-resync : 25:04:2023 - 12:12:32 --- sci-chemistry/Manifest.gz | Bin 9531 -> 9532 bytes sci-chemistry/gromacs/Manifest | 7 +- .../files/gromacs-2021-cuda-detection.patch | 339 +++++++++++++++++++++ sci-chemistry/gromacs/gromacs-2021.7.ebuild | 5 +- sci-chemistry/gromacs/gromacs-2023.1.ebuild | 331 ++++++++++++++++++++ 5 files changed, 680 insertions(+), 2 deletions(-) create mode 100644 sci-chemistry/gromacs/files/gromacs-2021-cuda-detection.patch create mode 100644 sci-chemistry/gromacs/gromacs-2023.1.ebuild (limited to 'sci-chemistry') diff --git a/sci-chemistry/Manifest.gz b/sci-chemistry/Manifest.gz index 76a8770dc981..31cef5bc3b6d 100644 Binary files a/sci-chemistry/Manifest.gz and b/sci-chemistry/Manifest.gz differ diff --git a/sci-chemistry/gromacs/Manifest b/sci-chemistry/gromacs/Manifest index 85834110c3d7..f1875f06cde3 100644 --- a/sci-chemistry/gromacs/Manifest +++ b/sci-chemistry/gromacs/Manifest @@ -1,5 +1,6 @@ AUX gromacs-2019.6-missing-include.patch 1052 BLAKE2B 01d7e592caa1e975a1fcea99bc37dd61ea7c039ed32243852f532c87d08911f9e83d0752d3a09e46643ca433339fe7c8d510c09e4af1070060735f9ee8a151d0 SHA512 8ed2e1de98cd35d87c0a0ea6867a99d4c1b33940040506feac09da929389afd1e19f34691c9032b1f59b156986b56c49aed1d0c8f8e7e64ad7a78e6dfd521912 AUX gromacs-2020-pytest.patch 1346 BLAKE2B 4b0b34bf4f46ec1acc3d0088ac11323938ee44b700beecc32b083885727d31904649798e38b824a6cf04ea7f9129dbd40e66bdde6e5a88fa4734db51bdf4cc46 SHA512 2c05e243b0e7bb8f9d25051d7bc86463dffa722502586799f0a8ce53bebd24fd0e367b6840365ac4ebc30595ffb8bbccb10a88d0c81148ae198941966cebddb0 +AUX gromacs-2021-cuda-detection.patch 21849 BLAKE2B 4976321e5f2c5bc9b45feeaa29206d77d04d6148ca910652bd3f7923b6e74b7b10f039b5bcc4430826745f6d64b232c536a8ef75cce61556275942e94945acc0 SHA512 6a1d1f0467d27267d214bc4df95c4b15438fffcae6b84e03763425f869e2551490c0f5e2d28774f9dda2daa07da8b5555617721d46e8b4f067eba2b2b86b353d AUX gromacs-2021-musl-stdint.patch 601 BLAKE2B 2e871d22655950ee9b9951229e78bc022f74ce7a8aa8063bafa30e0fd97cd0451e0621ab48f985d813fd5d890c0f2d0c9ced6d4b26d86e9d1f6774c084f169af SHA512 d36497423beb86a26ad011432dd6da7169ae42dc00d15bc90ba14de6ecf5eb3217ac987260dce479e5f92d5f3a40fa28c5eb1b51816a92b6a37745f674581280 DIST gromacs-2018.8.tar.gz 29913703 BLAKE2B 8780032305928067fbfe1559efe9eedc4d47e27ab2f6ab54d6dba39edfcbeefbee4673d8910009048a850620b5a08ba8feb622db652c439cfa50a149ec5f0015 SHA512 6717895942f2ab7b54019511ed2aebadadde8e96d3c4b7414bb9168ffe418432d2dee330192e199f7b9d2f665c71f5d4f1ef0911aa7243d525a2ca182ef5b122 DIST gromacs-2019.6.tar.gz 33446147 BLAKE2B adc21fb6b841b06d499607f8c0166a673645ef5af0b40bd823d0fff5ea24397e6301c5e1e0070986ae1ce1deba8a42052b66da148b071c1e21f2fe3908fee275 SHA512 7c227a9539e5775d5d128ae9e872a24181d79cdcd2852b86453597987e8d7332ecec371da65d089f6e610c54aafbccc8824ce298743383dd6240719ebee692a8 @@ -8,6 +9,7 @@ DIST gromacs-2021.6.tar.gz 38012702 BLAKE2B 023ae37dc53643758afa050fd179cd66f3c6 DIST gromacs-2021.7.tar.gz 38028032 BLAKE2B 2dc1b15ca8692ca5cf4ad1c7a266b9e2ced064d34a63cef61fea69e48231dd0310cfd8484d29c56cdb599f795d7655959f40038ac933b46ad2841d57bf2b3646 SHA512 ca478533da3bcb377fda30682660db8eb13beef6456fac6013a1a97ec3be60b5594567412e1e2a31392e484f9a40a22da1f12336f4738cf0422ff4f3e8814609 DIST gromacs-2022.4.tar.gz 40352282 BLAKE2B 616ba05a70a483d7d8fadefb383ac9fc8825ac09ea92f55a0222c57ac91e4a045fb55405ec963303d8dbabb04b3d1f8c73062785b3812b3c6b7582e002dc5538 SHA512 15c642f2b63c1731a01a3b58c13d454f57c29607554a6693c2e62f38da574c6d596c5cb6f361e0feb5e1069b3b19bb49ceaa432c030da91f48e41654e3e6a62f DIST gromacs-2022.5.tar.gz 40362776 BLAKE2B e77c96cd995fb6c8284bcf9728e2f95e265911f643c69d00cc5910c6d22ada41293a1cb74033e708957776551b253453f64dba46497d68b2f73c675e94f30ddd SHA512 79310d6fd39a160e7bbc661a04540c9bcb89f40c133d1031ca16998ef0a67fc4a023f57950b22793f0c91b5fe47fd78cb2e7afedb50fe9251b1c920c9267344d +DIST gromacs-2023.1.tar.gz 41934072 BLAKE2B 6d04642ba37a8563fafc10e0be83e33aab5ce25ef43ec2c9555e84328096bbe61426558a1433b086028f3c6637a5b3ae82241e67f206c0dd0b5254f5ec27ae4c SHA512 4bddbbc56f26487db9d35f371d65725289d1355dc4c295d32698499e05eb74e31d4db579fc1f665edb55a68b002bddacf37c0f05d3f970b1111c149990d434b1 DIST gromacs-2023.tar.gz 41863526 BLAKE2B 81669eaf176e48855e168da81f410f949851a5b7d1fde43ee367f4bbaeef4a61b963e7c936c8a07d0eb51ad0e412e2826c6dd194c1df6d3ba39490decdf9fb21 SHA512 4593a0802d809ea797c3d3ba157851c279e9569e96c3aefa2dbcf97b5a1a74cd9840f0fcf0f032f49024691458941efb43e59e4e05c7ee44ee46d6b3621e52d5 DIST manual-2018.8.pdf 10025023 BLAKE2B afb0a6e6a72d78df743fcb57e7c1716848589e571dd35167b957a9b407ca27978ccfb6cb9e0df9c9439b888f352501a00ba32281ed1e0b4193bd606f1d77152e SHA512 b1972f7ce965bfc9377542993c5943ea4868a8ed23a969d4203264746d6bea3a7a65c6379196ece37fd6d68ec7ec80f827bde87d1049284af7082759a124f1d5 DIST manual-2019.6.pdf 12702376 BLAKE2B c350127bc06a9eb3dee73da39037c84daeb89500e23cb131bd19a150bf60602d4dde7611e0c6f0f344af4093e96a899303dac71b4df56fd0c44c3a48a56606b7 SHA512 dfdff67c2c1c9ce1f1c236b7686c5331b0fe86951da6c98eddd6a3e748815e0be0a9653ae4318469fcdd913c4e7c355d64a997cc80b9d9fbe6282ef6d98e61eb @@ -16,6 +18,7 @@ DIST manual-2021.6.pdf 12256595 BLAKE2B 5d808e38571b38c1f6e0d115f6e604238206cc00 DIST manual-2021.7.pdf 12256526 BLAKE2B 6f298d7dac142f66914bbc2ea7d6ca70c4aeafaf33ccf12336d5cec64b1f1097b5332c767bb411ccef706ec6184479a84bcf59bda21c4249d45d8ce3dea08e79 SHA512 197765accb8ab6c5f1cc94b2697ff1817e157d85fa231dfe36a85036076e04da2a8035de66aba4f46c6dd7f553176357b94c92e1da5d1dabc574c627deee5279 DIST manual-2022.4.pdf 13080976 BLAKE2B 45685fa1d727aa30425e946ac72e0bd723528032e3fb45525d3c025316ee2682ea3c9cc2fd75b5cd4e900adbc6e7f577e3368d9e0681294fd1515df3d2e5d840 SHA512 a7ac3a80a90f14b6917e15633d77230d24cdcf19ade38d14b35c6451074848a7754b33c612e2a26a657f2edd3d6710e5a8130cacb107d4085d1bd934267ad623 DIST manual-2022.5.pdf 13081119 BLAKE2B 7a0ffd086d98527d42da4a22984d8351ad7861e32951e8b65c88e7d0df1559303750886ae31a7c158b4e22f616f6dd5deed9a63b0ed114bb1e0adfda9350a487 SHA512 707869ee6052f1b7f75376b0f78fcc4859035234957cc740bd770ea3bea3a591f7fc1fbc0b8526baab48eebfa506d03927df94ddc9c412c75dc2f2864574dd82 +DIST manual-2023.1.pdf 13526498 BLAKE2B a9aabcb722954c9c83a8f5945c85e634b0b82ace8b9b077c3f2b9ff432ca71d3e2d5ca2dd52d6ce489d77d3d4ca7b41d75e5566205bb8855caa650fa2722e5ec SHA512 b6ca6c822ac5b3cbe3c84d8fd51d3ed465268c00bb5f96e2afa557d45f9c534b0de278f33a0fdf43080c9c619344b98cd71847e17b297a789eec4c8b42dea7b2 DIST manual-2023.pdf 13516908 BLAKE2B 03a62190921c1a5ddf1ce6b0af5f8d83eaea26d31ac9b7e2f59f8b08e350d7147ee03ff8ec4e81e43777d04fa3d91c96d2a3dc8daa53a2565df02c39d6897d06 SHA512 2b16ca234941ede6415f89e0e31ee611ea1324454f5ddaf3cbcbdfbb5e864344faf012e5106131dcb9d3c1ffbbac358f1ea387166b1e45035e6427e344ff1964 DIST regressiontests-2018.8.tar.gz 67855469 BLAKE2B 34c9b339f6229f483afbb5192ee6ba8b8f72d5c26907a853af9c53dfece0d88739e48f6b44b78d1c010f988f9385d077285300522164f533a5861e9dda879275 SHA512 3642389d27bd1942cd0f091c940ae97b197b94856a387fe581dc516b0d4169480f16551d4ba357f9282b3337d605c286d51dd38112ac87c826dda634904836bb DIST regressiontests-2019.6.tar.gz 67643195 BLAKE2B 1e054e24b187946f7ea28090d4f20cf8e1d79a26253f57ba07d130e0773d8541b8a1552a38023d31a68ce8bc62d8e0af1d98609234bb3e7d3e6d567307ebb386 SHA512 eacf1c55b982515a305c29459fe80a7cd558a8481e5689962aa956148af542568b1d1ce59c6784ecb0afd4768c2b664afa21e12af2d89ae2b06b1ba61ad72036 @@ -24,15 +27,17 @@ DIST regressiontests-2021.6.tar.gz 48537070 BLAKE2B 982fad6b23da5c1e6d32009f9f08 DIST regressiontests-2021.7.tar.gz 48537500 BLAKE2B b0c97fa652cb9b972c50f29822f530fcaa2ec9acb6d6442114fc6446c54aade5eb1bfa153c69a7e8bf64b66e8b041d7f8e0d51e7f7b0516f9e03f3d9ad626193 SHA512 7cbead90aa6d62346dcf7c2249c3cfa179884b987a749624444d2f59d93e0322f7721143bf6eb44d26102635dc70a73546ef82d4647558b6320bfdd459074017 DIST regressiontests-2022.4.tar.gz 48614599 BLAKE2B d41d59d4336455c87f3877be48ec49b2cdc989548c51ef9c8277b05bb6cf3dc05ebfa91a0248cc113c130a0ed19aae2c8316184caf37b72aa612b211fe41b144 SHA512 6495a286b201774ea2a922376ff5c0abaae7aaef1dfd28a1a61c817b5c6ac83db99300d43cc019745ca32a025e1f8a7f538342af539d9d753e494c550b429706 DIST regressiontests-2022.5.tar.gz 48615858 BLAKE2B 6655946155291eb24f76832c76e21b5c606905e94426fd057ecdaa44d74844613ba288b651f94120553ef7d7f938de345ddaac7c3738d67bdad148359b7b4500 SHA512 8dd0c8a9b30f4c047fb756dee8eee1cb667803c231fd114717e3156c5694a195dec2b57d80b50b417312949bc4cb7da90027cc98102d84fa154a93862bc69790 +DIST regressiontests-2023.1.tar.gz 48613205 BLAKE2B b64716e08e5597676794209a0a951d2a5a46aab2b05bc1615d6f4ba666d5c81a7fe9247fc90ce8d9a5da1a9d52eea73d3d3cd4a4072a8bad66b8de0e06dfa599 SHA512 c6924ec75e456950cef0809ea9718be1bf9d85fe6e9ba5553b2df86ef604b0c7f54d41476f45cbc0ccf801ee0290a829bf4f9eb8d0773586bb8385efc5242d4d DIST regressiontests-2023.tar.gz 48620353 BLAKE2B 536550976dd761a59ff5684bc4928d534f7e45e69b0a86831e661887a3ad1ea72c1ddb72ab3a27237926083a711dd0f17ea60079fec201dd0992c1aee32ebd7e SHA512 f6b62fab39c77491698822b2f5b3f66c505e801266b31ea7ddc46378f69b524461187bba31b1b23d29f06bfc35630b0cae9a921aa92c0dfba46b2f1d904ce2b6 EBUILD gromacs-2018.8-r1.ebuild 6675 BLAKE2B 46cd37f680b71e001586e88974f7a69bd8e41d07de2464ab0d8d80f7755a68da45b77ad28e01444cb48b0156e6f4332dc8fc67d21944c76768b9cbd18804cc46 SHA512 ece906b1c1d3b401ea85121b280f99fafe92f791918c47aab57f1094b53e95a5786812e414928ec765ef44fbecf6219e094dfb74d5a52f0b5fccbdf736a37968 EBUILD gromacs-2019.6-r2.ebuild 7053 BLAKE2B b61fb0e08daec321b562e246275e1ff9768bf3e24428c127fbcc67ae36fbe6054b60f3cb0587fb711e9c050798086bcd7dece03a64f5bebd53db327db939acb5 SHA512 de3d96b9780667be98a519bb496eb72c2f02cb8aa9482d075d03fa1558ef2852c4daa6e8ef36879232604a7dc30722464e70886cb7673a371691bf93252655a2 EBUILD gromacs-2020.7.ebuild 9847 BLAKE2B 5ac1c696c996e4d281d8e90b858982f9f6a475867ec94209b4a42a1018282752878f6d8a62827a06af247bd00b766be6bb52176ebbccb1c616190a38ed507627 SHA512 03044916378006f3d26e55d56b2fecf637680c80b3eef85f5427e3ac92406926ff9d22ad5b1a1450477e579cb41030d39c787566a2ccf9829c78cf795365f3b1 EBUILD gromacs-2021.6.ebuild 10054 BLAKE2B 6a1a608a8996cff7b2d429cf4d7f8b2909271c6a83158671687619303904f5ead3cdce98d5ca968b05ac6fb8f16d764f08f111009a78d3132e88d64c65ca6c5a SHA512 43fe99bf2b2d901477675bb0fc30e244baeaf8b74b9736dc4e51d15c16544c4c8e801eaa2585335b32f92dd8a29d000aa1c3e511c4975570e573d28be401b324 -EBUILD gromacs-2021.7.ebuild 10057 BLAKE2B bed7c9d0cb7f7ac408070a4e115a172311a14031e221cde57a60a781419af95f6271aa523b1ade0fde41b49624926c52053c3a5b7fadb9f889d387c4fe93ed04 SHA512 b52de46729be145c4b762bd0b5b3bbcdeb63798f1658c32d8d2ae77f9b9fbe7843ad5b195e24ba3862057d11ecd3e464d93c2f28badc94e50e7e5458b76a5b3a +EBUILD gromacs-2021.7.ebuild 10105 BLAKE2B a97146e3165e8ee528921d48375a7968ced93de69aad48a028227a8c418b03af044ec47c7674dd3e5adf89c85368cc53c18740ceccb9a4c59ec0631c23807bc7 SHA512 158aa5c265d3fd4a0b59a2c7abc467ef873599e12f07bf939eea56661b659b42d163e3c1363e772f86c112d41b1b8658df5499302f2584a5a69acde9cf57ee22 EBUILD gromacs-2022.4.ebuild 9596 BLAKE2B b3e675f96621add5f81b07ca1ec6a6c4f4f3f61f69051ba6f6319bd5f5044bacb67554bd7e85877485163c4695cee7dc3246b024b904a0f97d2d858065a4a098 SHA512 f81d9732b050c4cd8d453031e3122e5c0beeacda352493e0acc0b8cade05b36fd4671f87a89036bcc7dddd098ebd825d3c17af3c06b391d7514d75520ca9368b EBUILD gromacs-2022.5.ebuild 9596 BLAKE2B b3e675f96621add5f81b07ca1ec6a6c4f4f3f61f69051ba6f6319bd5f5044bacb67554bd7e85877485163c4695cee7dc3246b024b904a0f97d2d858065a4a098 SHA512 f81d9732b050c4cd8d453031e3122e5c0beeacda352493e0acc0b8cade05b36fd4671f87a89036bcc7dddd098ebd825d3c17af3c06b391d7514d75520ca9368b EBUILD gromacs-2022.9999.ebuild 9596 BLAKE2B b3e675f96621add5f81b07ca1ec6a6c4f4f3f61f69051ba6f6319bd5f5044bacb67554bd7e85877485163c4695cee7dc3246b024b904a0f97d2d858065a4a098 SHA512 f81d9732b050c4cd8d453031e3122e5c0beeacda352493e0acc0b8cade05b36fd4671f87a89036bcc7dddd098ebd825d3c17af3c06b391d7514d75520ca9368b +EBUILD gromacs-2023.1.ebuild 9544 BLAKE2B a03201533e4d713e62a7a93d70e2d621c9afb3af832b2aa711bb3840359f7f79b9529552c2cfa22a7ea1cd53d2d2524bd8fd382c932ceb233f7bbf769349de1c SHA512 1017e32229be36967c1e1a2c9c883d80f84bb8d2dc59ead2d34cf9299b981f27fe5d868df583c74af881c1916519af265cb6b126d01a5f337a926772a2e13dcb EBUILD gromacs-2023.9999.ebuild 9544 BLAKE2B a03201533e4d713e62a7a93d70e2d621c9afb3af832b2aa711bb3840359f7f79b9529552c2cfa22a7ea1cd53d2d2524bd8fd382c932ceb233f7bbf769349de1c SHA512 1017e32229be36967c1e1a2c9c883d80f84bb8d2dc59ead2d34cf9299b981f27fe5d868df583c74af881c1916519af265cb6b126d01a5f337a926772a2e13dcb EBUILD gromacs-2023.ebuild 9544 BLAKE2B a03201533e4d713e62a7a93d70e2d621c9afb3af832b2aa711bb3840359f7f79b9529552c2cfa22a7ea1cd53d2d2524bd8fd382c932ceb233f7bbf769349de1c SHA512 1017e32229be36967c1e1a2c9c883d80f84bb8d2dc59ead2d34cf9299b981f27fe5d868df583c74af881c1916519af265cb6b126d01a5f337a926772a2e13dcb EBUILD gromacs-9999.ebuild 9544 BLAKE2B a03201533e4d713e62a7a93d70e2d621c9afb3af832b2aa711bb3840359f7f79b9529552c2cfa22a7ea1cd53d2d2524bd8fd382c932ceb233f7bbf769349de1c SHA512 1017e32229be36967c1e1a2c9c883d80f84bb8d2dc59ead2d34cf9299b981f27fe5d868df583c74af881c1916519af265cb6b126d01a5f337a926772a2e13dcb diff --git a/sci-chemistry/gromacs/files/gromacs-2021-cuda-detection.patch b/sci-chemistry/gromacs/files/gromacs-2021-cuda-detection.patch new file mode 100644 index 000000000000..8b458e96cb53 --- /dev/null +++ b/sci-chemistry/gromacs/files/gromacs-2021-cuda-detection.patch @@ -0,0 +1,339 @@ +--- gromacs-2021.7/cmake/gmxManageNvccConfig.cmake 2023-01-31 12:45:45.000000000 +0100 ++++ gromacs-2022.5/cmake/gmxManageNvccConfig.cmake 2023-02-03 12:53:34.000000000 +0100 +@@ -1,11 +1,9 @@ + # + # This file is part of the GROMACS molecular simulation package. + # +-# Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team. +-# Copyright (c) 2017,2018,2019,2020,2021, by the GROMACS development team, led by +-# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl, +-# and including many others, as listed in the AUTHORS file in the +-# top-level source directory and at http://www.gromacs.org. ++# Copyright 2012- The GROMACS Authors ++# and the project initiators Erik Lindahl, Berk Hess and David van der Spoel. ++# Consult the AUTHORS/COPYING files and https://www.gromacs.org for details. + # + # GROMACS is free software; you can redistribute it and/or + # modify it under the terms of the GNU Lesser General Public License +@@ -19,7 +17,7 @@ + # + # You should have received a copy of the GNU Lesser General Public + # License along with GROMACS; if not, see +-# http://www.gnu.org/licenses, or write to the Free Software Foundation, ++# https://www.gnu.org/licenses, or write to the Free Software Foundation, + # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + # + # If you want to redistribute modifications to GROMACS, please +@@ -28,10 +26,10 @@ + # consider code for inclusion in the official distribution, but + # derived work must not be called official GROMACS. Details are found + # in the README & COPYING files - if they are missing, get the +-# official version at http://www.gromacs.org. ++# official version at https://www.gromacs.org. + # + # To help us fund GROMACS development, we humbly ask that you cite +-# the research papers on the package. Check out http://www.gromacs.org. ++# the research papers on the package. Check out https://www.gromacs.org. + + # Manage CUDA nvcc compilation configuration, try to be smart to ease the users' + # pain as much as possible: +@@ -51,7 +49,7 @@ + # glibc source shows that _FORCE_INLINES is only used in this string.h + # feature and performance of memcpy variants is unimportant for CUDA + # code in GROMACS. So this workaround is good enough to keep problems +-# away from users installing GROMACS. See Issue #1942. ++# away from users installing GROMACS. See Issue #1982. + function(work_around_glibc_2_23) + try_compile(IS_GLIBC_2_23_OR_HIGHER ${CMAKE_BINARY_DIR} ${CMAKE_SOURCE_DIR}/cmake/TestGlibcVersion.cpp) + if(IS_GLIBC_2_23_OR_HIGHER) +@@ -83,67 +81,158 @@ + mark_as_advanced(CUDA_HOST_COMPILER CUDA_HOST_COMPILER_OPTIONS) + endif() + ++# We would like to be helpful and reject the host compiler with a ++# clear error message at configure time, rather than let nvcc ++# later reject the host compiler as not supported when the first ++# CUDA source file is built. We've implemented that for current ++# nvcc running on Unix-like systems, but e.g. changes to nvcc ++# will further affect the limited portability of this checking ++# code. Set the CMake variable GMX_NVCC_WORKS on if you want to ++# bypass this check. ++if((_cuda_nvcc_executable_or_flags_changed OR CUDA_HOST_COMPILER_CHANGED OR NOT GMX_NVCC_WORKS) AND NOT WIN32) ++ message(STATUS "Check for working NVCC/C++ compiler combination with nvcc '${CUDA_NVCC_EXECUTABLE}'") ++ execute_process(COMMAND ${CUDA_NVCC_EXECUTABLE} --compiler-bindir=${CUDA_HOST_COMPILER} -c ${CUDA_NVCC_FLAGS} ${CUDA_NVCC_FLAGS_${_build_type}} ${CMAKE_SOURCE_DIR}/cmake/TestCUDA.cu ++ RESULT_VARIABLE _cuda_test_res ++ OUTPUT_VARIABLE _cuda_test_out ++ ERROR_VARIABLE _cuda_test_err ++ OUTPUT_STRIP_TRAILING_WHITESPACE) ++ ++ if(${_cuda_test_res}) ++ message(STATUS "Check for working NVCC/C compiler combination - broken") ++ message(STATUS "${CUDA_NVCC_EXECUTABLE} standard output: '${_cuda_test_out}'") ++ message(STATUS "${CUDA_NVCC_EXECUTABLE} standard error: '${_cuda_test_err}'") ++ if(${_cuda_test_err} MATCHES "nsupported") ++ message(FATAL_ERROR "NVCC/C++ compiler combination does not seem to be supported. CUDA frequently does not support the latest versions of the host compiler, so you might want to try an earlier C++ compiler version and make sure your CUDA compiler and driver are as recent as possible. Set the GMX_NVCC_WORKS CMake cache variable to bypass this check if you know what you are doing.") ++ elseif(CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 11.2 AND CUDA_VERSION VERSION_GREATER 11.4 AND CUDA_VERSION VERSION_LESS 11.7) # Issue #4574, #4641 ++ # Above, we should be checking for VERSION_LESS 11.6.2, but CUDA_VERSION is only "major.minor" ++ message(FATAL_ERROR "CUDA versions 11.5-11.6.1 are known to be incompatible with some GCC 11.x. Use a different GCC or update your CUDA installation to at least CUDA 11.6.2") ++ else() ++ message(FATAL_ERROR "CUDA compiler does not seem to be functional or is not compatible with the host compiler. Set the GMX_NVCC_WORKS CMake cache variable to bypass this check if you know what you are doing.") ++ endif() ++ elseif(NOT GMX_CUDA_TEST_COMPILER_QUIETLY) ++ message(STATUS "Check for working NVCC/C++ compiler combination - works") ++ set(GMX_NVCC_WORKS TRUE CACHE INTERNAL "Nvcc can compile a trivial test program") ++ endif() ++endif() # GMX_CHECK_NVCC ++ ++# Tests a single flag to use with nvcc. ++# ++# If the flags are accepted, they are appended to the variable named ++# in the first argument. The cache variable named in the second ++# argument is used to avoid rerunning the check in future invocations ++# of cmake. The list of flags to check follows these two required ++# arguments. ++# ++# Note that a space-separated string of flags, or a flag-value pair ++# separated by spaces will not work. Use the single-argument forms ++# accepted by nvcc, like "--arg=value". ++# ++# As this code is not yet tested on Windows, it always accepts the ++# flags in that case. ++function(gmx_add_nvcc_flag_if_supported _output_variable_name_to_append_to _flags_cache_variable_name) ++ # If the check has already been run, do not re-run it ++ if (NOT ${_flags_cache_variable_name} AND NOT WIN32) ++ message(STATUS "Checking if nvcc accepts flags ${ARGN}") ++ # See detailed comment about gcc 7 below ++ if (CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8) ++ set(_cache_variable_value TRUE) ++ message(STATUS "Checking if nvcc accepts flags ${ARGN} - Assuming success when using gcc 7") ++ else() ++ if(NOT(CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 11)) ++ set(CCBIN "--compiler-bindir=${CUDA_HOST_COMPILER}") ++ endif() ++ execute_process( ++ COMMAND ${CUDA_NVCC_EXECUTABLE} ${ARGN} ${CCBIN} "${CMAKE_SOURCE_DIR}/cmake/TestCUDA.cu" ++ RESULT_VARIABLE _cuda_success ++ OUTPUT_QUIET ++ ERROR_QUIET ++ ) ++ # Convert the success value to a boolean and report status ++ if (_cuda_success EQUAL 0) ++ set(_cache_variable_value TRUE) ++ message(STATUS "Checking if nvcc accepts flags ${ARGN} - Success") ++ else() ++ set(_cache_variable_value FALSE) ++ message(STATUS "Checking if nvcc accepts flags ${ARGN} - Failed") ++ endif() ++ endif() ++ set(${_flags_cache_variable_name} ${_cache_variable_value} CACHE BOOL "Whether NVCC supports flag(s) ${ARGN}") ++ endif() ++ # Append the flags to the output variable if they have been tested to work ++ if (${_flags_cache_variable_name} OR WIN32) ++ list(APPEND ${_output_variable_name_to_append_to} ${ARGN}) ++ set(${_output_variable_name_to_append_to} ${${_output_variable_name_to_append_to}} PARENT_SCOPE) ++ endif() ++endfunction() ++ ++# Versions of gcc 7 have differing behavior when executing ++# ++# nvcc $args --compiler-bindir=gcc TestCUDA.cu ++# ++# and would need e.g. adding -lstdc++ to the command line so that ++# linking of a C++/CUDA object by the C-compiler flavor of gcc works. ++# This means we can't reliably test compiler flags in this case ++# without risking creating other problems. Instead we assume (above) ++# that all compiler flags will work, and issue this warning. ++# We also want to skip this warning during GROMACS CI testing. ++if (CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8 ++ AND NOT DEFINED ENV{GITLAB_CI}) ++ message(WARNING "You are using gcc version 7 with the CUDA compiler nvcc. GROMACS cannot reliably test compiler arguments for this combination, so if you later experience errors in building GROMACS, please use a more recent version of gcc.") ++endif() ++ + # If any of these manual override variables for target CUDA GPU architectures + # or virtual architecture is set, parse the values and assemble the nvcc + # command line for these. Otherwise use our defaults. + # Note that the manual override variables require a semicolon separating + # architecture codes. ++set(GMX_CUDA_NVCC_GENCODE_FLAGS) + if (GMX_CUDA_TARGET_SM OR GMX_CUDA_TARGET_COMPUTE) +- set(GMX_CUDA_NVCC_GENCODE_FLAGS) + set(_target_sm_list ${GMX_CUDA_TARGET_SM}) + foreach(_target ${_target_sm_list}) +- list(APPEND GMX_CUDA_NVCC_GENCODE_FLAGS "-gencode;arch=compute_${_target},code=sm_${_target}") ++ gmx_add_nvcc_flag_if_supported(GMX_CUDA_NVCC_GENCODE_FLAGS NVCC_HAS_GENCODE_COMPUTE_AND_SM_${_target} "--generate-code=arch=compute_${_target},code=sm_${_target}") ++ if (NOT NVCC_HAS_GENCODE_COMPUTE_AND_SM_${_target} AND NOT WIN32) ++ message(FATAL_ERROR "Your choice of ${_target} in GMX_CUDA_TARGET_SM was not accepted by nvcc, please choose a target that it accepts") ++ endif() + endforeach() + set(_target_compute_list ${GMX_CUDA_TARGET_COMPUTE}) + foreach(_target ${_target_compute_list}) +- list(APPEND GMX_CUDA_NVCC_GENCODE_FLAGS "-gencode;arch=compute_${_target},code=compute_${_target}") ++ gmx_add_nvcc_flag_if_supported(GMX_CUDA_NVCC_GENCODE_FLAGS NVCC_HAS_GENCODE_COMPUTE_${_target} --generate-code=arch=compute_${_target},code=compute_${_target}) ++ if (NOT NVCC_HAS_GENCODE_COMPUTE_${_target} AND NOT WIN32) ++ message(FATAL_ERROR "Your choice of ${_target} in GMX_CUDA_TARGET_COMPUTE was not accepted by nvcc, please choose a target that it accepts") ++ endif() + endforeach() + else() + # Set the CUDA GPU architectures to compile for: +- # - with CUDA >=9.0 CC 7.0 is supported and CC 2.0 is no longer supported +- # => compile sm_30, sm_35, sm_37, sm_50, sm_52, sm_60, sm_61, sm_70 SASS, and compute_35, compute_70 PTX +- # - with CUDA >=10.0 CC 7.5 is supported +- # => compile sm_30, sm_35, sm_37, sm_50, sm_52, sm_60, sm_61, sm_70, sm_75 SASS, and compute_35, compute_75 PTX + # - with CUDA >=11.0 CC 8.0 is supported + # => compile sm_35, sm_37, sm_50, sm_52, sm_60, sm_61, sm_70, sm_75, sm_80 SASS, and compute_35, compute_80 PTX + + # First add flags that trigger SASS (binary) code generation for physical arch +- if(CUDA_VERSION VERSION_LESS "11.0") +- list (APPEND GMX_CUDA_NVCC_GENCODE_FLAGS "-gencode;arch=compute_30,code=sm_30") +- endif() +- list (APPEND GMX_CUDA_NVCC_GENCODE_FLAGS "-gencode;arch=compute_35,code=sm_35") +- list (APPEND GMX_CUDA_NVCC_GENCODE_FLAGS "-gencode;arch=compute_37,code=sm_37") +- list (APPEND GMX_CUDA_NVCC_GENCODE_FLAGS "-gencode;arch=compute_50,code=sm_50") +- list (APPEND GMX_CUDA_NVCC_GENCODE_FLAGS "-gencode;arch=compute_52,code=sm_52") +- list (APPEND GMX_CUDA_NVCC_GENCODE_FLAGS "-gencode;arch=compute_60,code=sm_60") +- list (APPEND GMX_CUDA_NVCC_GENCODE_FLAGS "-gencode;arch=compute_61,code=sm_61") +- list (APPEND GMX_CUDA_NVCC_GENCODE_FLAGS "-gencode;arch=compute_70,code=sm_70") +- if(NOT CUDA_VERSION VERSION_LESS "10.0") +- list (APPEND GMX_CUDA_NVCC_GENCODE_FLAGS "-gencode;arch=compute_75,code=sm_75") +- endif() +- if(NOT CUDA_VERSION VERSION_LESS "11.0") +- list (APPEND GMX_CUDA_NVCC_GENCODE_FLAGS "-gencode;arch=compute_80,code=sm_80") +- # Requesting sm or compute 35, 37, or 50 triggers deprecation messages with +- # nvcc 11.0, which we need to suppress for use in CI +- list (APPEND GMX_CUDA_NVCC_GENCODE_FLAGS "-Wno-deprecated-gpu-targets") +- endif() +- if(NOT CUDA_VERSION VERSION_LESS "11.1") +- list (APPEND GMX_CUDA_NVCC_GENCODE_FLAGS "-gencode;arch=compute_86,code=sm_86") ++ gmx_add_nvcc_flag_if_supported(GMX_CUDA_NVCC_GENCODE_FLAGS NVCC_HAS_GENCODE_COMPUTE_AND_SM_35 --generate-code=arch=compute_35,code=sm_35) ++ gmx_add_nvcc_flag_if_supported(GMX_CUDA_NVCC_GENCODE_FLAGS NVCC_HAS_GENCODE_COMPUTE_AND_SM_37 --generate-code=arch=compute_37,code=sm_37) ++ gmx_add_nvcc_flag_if_supported(GMX_CUDA_NVCC_GENCODE_FLAGS NVCC_HAS_GENCODE_COMPUTE_AND_SM_50 --generate-code=arch=compute_50,code=sm_50) ++ gmx_add_nvcc_flag_if_supported(GMX_CUDA_NVCC_GENCODE_FLAGS NVCC_HAS_GENCODE_COMPUTE_AND_SM_52 --generate-code=arch=compute_52,code=sm_52) ++ gmx_add_nvcc_flag_if_supported(GMX_CUDA_NVCC_GENCODE_FLAGS NVCC_HAS_GENCODE_COMPUTE_AND_SM_60 --generate-code=arch=compute_60,code=sm_60) ++ gmx_add_nvcc_flag_if_supported(GMX_CUDA_NVCC_GENCODE_FLAGS NVCC_HAS_GENCODE_COMPUTE_AND_SM_61 --generate-code=arch=compute_61,code=sm_61) ++ gmx_add_nvcc_flag_if_supported(GMX_CUDA_NVCC_GENCODE_FLAGS NVCC_HAS_GENCODE_COMPUTE_AND_SM_70 --generate-code=arch=compute_70,code=sm_70) ++ gmx_add_nvcc_flag_if_supported(GMX_CUDA_NVCC_GENCODE_FLAGS NVCC_HAS_GENCODE_COMPUTE_AND_SM_75 --generate-code=arch=compute_75,code=sm_75) ++ gmx_add_nvcc_flag_if_supported(GMX_CUDA_NVCC_GENCODE_FLAGS NVCC_HAS_GENCODE_COMPUTE_AND_SM_80 --generate-code=arch=compute_80,code=sm_80) ++ # Don't attempt to add newest architectures with old GNU compiler, to avoid issues in CI ++ # related to being unable to test which flags are supported ++ if (NOT (CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8)) ++ gmx_add_nvcc_flag_if_supported(GMX_CUDA_NVCC_GENCODE_FLAGS NVCC_HAS_GENCODE_COMPUTE_AND_SM_86 --generate-code=arch=compute_86,code=sm_86) ++ gmx_add_nvcc_flag_if_supported(GMX_CUDA_NVCC_GENCODE_FLAGS NVCC_HAS_GENCODE_COMPUTE_AND_SM_89 --generate-code=arch=compute_89,code=sm_89) ++ gmx_add_nvcc_flag_if_supported(GMX_CUDA_NVCC_GENCODE_FLAGS NVCC_HAS_GENCODE_COMPUTE_AND_SM_90 --generate-code=arch=compute_90,code=sm_90) + endif() ++ # Requesting sm or compute 35, 37, or 50 triggers deprecation messages with ++ # nvcc 11.0, which we need to suppress for use in CI ++ gmx_add_nvcc_flag_if_supported(GMX_CUDA_NVCC_GENCODE_FLAGS NVCC_HAS_WARNING_NO_DEPRECATED_GPU_TARGETS -Wno-deprecated-gpu-targets) + + # Next add flags that trigger PTX code generation for the + # newest supported virtual arch that's useful to JIT to future architectures + # as well as an older one suitable for JIT-ing to any rare intermediate arch + # (like that of Jetson / Drive PX devices) +- list (APPEND GMX_CUDA_NVCC_GENCODE_FLAGS "-gencode;arch=compute_35,code=compute_35") +- if(CUDA_VERSION VERSION_LESS "11.0") +- list (APPEND GMX_CUDA_NVCC_GENCODE_FLAGS "-gencode;arch=compute_32,code=compute_32") +- else() +- list (APPEND GMX_CUDA_NVCC_GENCODE_FLAGS "-gencode;arch=compute_53,code=compute_53") +- endif() +- if(NOT CUDA_VERSION VERSION_LESS "11.0") +- list (APPEND GMX_CUDA_NVCC_GENCODE_FLAGS "-gencode;arch=compute_80,code=compute_80") +- endif() ++ gmx_add_nvcc_flag_if_supported(GMX_CUDA_NVCC_GENCODE_FLAGS NVCC_HAS_GENCODE_COMPUTE_53 --generate-code=arch=compute_53,code=sm_53) ++ gmx_add_nvcc_flag_if_supported(GMX_CUDA_NVCC_GENCODE_FLAGS NVCC_HAS_GENCODE_COMPUTE_80 --generate-code=arch=compute_80,code=sm_80) + endif() + + if (GMX_CUDA_TARGET_SM) +@@ -158,27 +247,20 @@ + # FindCUDA.cmake is unaware of the mechanism used by cmake to embed + # the compiler flag for the required C++ standard in the generated + # build files, so we have to pass it ourselves +-if (CUDA_VERSION VERSION_LESS 11.0) +- # CUDA doesn't formally support C++17 until version 11.0, so for +- # now host-side code that compiles with CUDA is restricted to +- # C++14. This needs to be expressed formally for older CUDA +- # version. ++ ++# gcc-7 pre-dated C++17, so uses the -std=c++1z compiler flag for it, ++# which modern nvcc does not recognize. So we work around that by ++# compiling in C++14 mode. Clang doesn't have this problem because nvcc ++# only supports version of clang that already understood -std=c++17 ++if (CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8) + list(APPEND GMX_CUDA_NVCC_FLAGS "${CMAKE_CXX14_STANDARD_COMPILE_OPTION}") + else() +- # gcc-7 pre-dated C++17, so uses the -std=c++1z compiler flag for it, +- # which modern nvcc does not recognize. So we work around that by +- # compiling in C++14 mode. Clang doesn't have this problem because nvcc +- # only supports version of clang that already understood -std=c++17 +- if (CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8) +- list(APPEND GMX_CUDA_NVCC_FLAGS "${CMAKE_CXX14_STANDARD_COMPILE_OPTION}") +- else() +- list(APPEND GMX_CUDA_NVCC_FLAGS "${CMAKE_CXX17_STANDARD_COMPILE_OPTION}") +- endif() ++ list(APPEND GMX_CUDA_NVCC_FLAGS "${CMAKE_CXX17_STANDARD_COMPILE_OPTION}") + endif() + + # assemble the CUDA flags + list(APPEND GMX_CUDA_NVCC_FLAGS "${GMX_CUDA_NVCC_GENCODE_FLAGS}") +-list(APPEND GMX_CUDA_NVCC_FLAGS "-use_fast_math") ++gmx_add_nvcc_flag_if_supported(GMX_CUDA_NVCC_FLAGS NVCC_HAS_USE_FAST_MATH -use_fast_math) + + # assemble the CUDA host compiler flags + list(APPEND GMX_CUDA_NVCC_FLAGS "${CUDA_HOST_COMPILER_OPTIONS}") +@@ -187,12 +269,18 @@ + # CUDA header cuda_runtime_api.h in at least CUDA 10.1 uses 0 + # where nullptr would be preferable. GROMACS can't fix these, so + # must suppress them. +- GMX_TEST_CXXFLAG(CXXFLAGS_NO_ZERO_AS_NULL_POINTER_CONSTANT "-Wno-zero-as-null-pointer-constant" NVCC_CLANG_SUPPRESSIONS_CXXFLAGS) ++ GMX_TEST_CXXFLAG(HAS_WARNING_NO_ZERO_AS_NULL_POINTER_CONSTANT "-Wno-zero-as-null-pointer-constant" NVCC_CLANG_SUPPRESSIONS_CXXFLAGS) + + # CUDA header crt/math_functions.h in at least CUDA 10.x and 11.1 + # used throw() specifications that are deprecated in more recent + # C++ versions. GROMACS can't fix these, so must suppress them. +- GMX_TEST_CXXFLAG(CXXFLAGS_NO_DEPRECATED_DYNAMIC_EXCEPTION_SPEC "-Wno-deprecated-dynamic-exception-spec" NVCC_CLANG_SUPPRESSIONS_CXXFLAGS) ++ GMX_TEST_CXXFLAG(HAS_WARNING_NO_DEPRECATED_DYNAMIC_EXCEPTION_SPEC "-Wno-deprecated-dynamic-exception-spec" NVCC_CLANG_SUPPRESSIONS_CXXFLAGS) ++ ++ # CUDA headers cuda_runtime.h and channel_descriptor.h in at least ++ # CUDA 11.0 uses many C-style casts, which are ncessary for this ++ # header to work for C. GROMACS can't fix these, so must suppress ++ # the warnings they generate ++ GMX_TEST_CXXFLAG(HAS_WARNING_NO_OLD_STYLE_CAST "-Wno-old-style-cast" NVCC_CLANG_SUPPRESSIONS_CXXFLAGS) + + # Add these flags to those used for the host compiler. The + # "-Xcompiler" prefix directs nvcc to only use them for host +@@ -205,37 +293,6 @@ + string(TOUPPER "${CMAKE_BUILD_TYPE}" _build_type) + gmx_check_if_changed(_cuda_nvcc_executable_or_flags_changed CUDA_NVCC_EXECUTABLE CUDA_NVCC_FLAGS CUDA_NVCC_FLAGS_${_build_type}) + +-# We would like to be helpful and reject the host compiler with a +-# clear error message at configure time, rather than let nvcc +-# later reject the host compiler as not supported when the first +-# CUDA source file is built. We've implemented that for current +-# nvcc running on Unix-like systems, but e.g. changes to nvcc +-# will further affect the limited portability of this checking +-# code. Set the CMake variable GMX_NVCC_WORKS on if you want to +-# bypass this check. +-if((_cuda_nvcc_executable_or_flags_changed OR CUDA_HOST_COMPILER_CHANGED OR NOT GMX_NVCC_WORKS) AND NOT WIN32) +- message(STATUS "Check for working NVCC/C++ compiler combination with nvcc '${CUDA_NVCC_EXECUTABLE}'") +- execute_process(COMMAND ${CUDA_NVCC_EXECUTABLE} -ccbin ${CUDA_HOST_COMPILER} -c ${CUDA_NVCC_FLAGS} ${CUDA_NVCC_FLAGS_${_build_type}} ${CMAKE_SOURCE_DIR}/cmake/TestCUDA.cu +- RESULT_VARIABLE _cuda_test_res +- OUTPUT_VARIABLE _cuda_test_out +- ERROR_VARIABLE _cuda_test_err +- OUTPUT_STRIP_TRAILING_WHITESPACE) +- +- if(${_cuda_test_res}) +- message(STATUS "Check for working NVCC/C compiler combination - broken") +- message(STATUS "${CUDA_NVCC_EXECUTABLE} standard output: '${_cuda_test_out}'") +- message(STATUS "${CUDA_NVCC_EXECUTABLE} standard error: '${_cuda_test_err}'") +- if(${_cuda_test_err} MATCHES "nsupported") +- message(FATAL_ERROR "NVCC/C++ compiler combination does not seem to be supported. CUDA frequently does not support the latest versions of the host compiler, so you might want to try an earlier C++ compiler version and make sure your CUDA compiler and driver are as recent as possible.") +- else() +- message(FATAL_ERROR "CUDA compiler does not seem to be functional.") +- endif() +- elseif(NOT GMX_CUDA_TEST_COMPILER_QUIETLY) +- message(STATUS "Check for working NVCC/C++ compiler combination - works") +- set(GMX_NVCC_WORKS TRUE CACHE INTERNAL "Nvcc can compile a trivial test program") +- endif() +-endif() # GMX_CHECK_NVCC +- + + # The flags are set as local variables which shadow the cache variables. The cache variables + # (can be set by the user) are appended. This is done in a macro to set the flags when all diff --git a/sci-chemistry/gromacs/gromacs-2021.7.ebuild b/sci-chemistry/gromacs/gromacs-2021.7.ebuild index 8cb244317617..1a6dbcab47f4 100644 --- a/sci-chemistry/gromacs/gromacs-2021.7.ebuild +++ b/sci-chemistry/gromacs/gromacs-2021.7.ebuild @@ -85,7 +85,10 @@ DOCS=( AUTHORS README ) RESTRICT="!test? ( test )" -PATCHES=( "${FILESDIR}/${PN}-2021-musl-stdint.patch" ) +PATCHES=( + "${FILESDIR}/${PN}-2021-musl-stdint.patch" + "${FILESDIR}/${PN}-2021-cuda-detection.patch" +) if [[ ${PV} != *9999 ]]; then S="${WORKDIR}/${PN}-${PV/_/-}" diff --git a/sci-chemistry/gromacs/gromacs-2023.1.ebuild b/sci-chemistry/gromacs/gromacs-2023.1.ebuild new file mode 100644 index 000000000000..98c909cdab48 --- /dev/null +++ b/sci-chemistry/gromacs/gromacs-2023.1.ebuild @@ -0,0 +1,331 @@ +# Copyright 1999-2023 Gentoo Authors +# Distributed under the terms of the GNU General Public License v2 + +EAPI=8 + +CMAKE_MAKEFILE_GENERATOR="ninja" + +PYTHON_COMPAT=( python3_{9..11} ) + +DISTUTILS_OPTIONAL=1 +DISTUTILS_USE_PEP517=no +DISTUTILS_SINGLE_IMPL=1 + +inherit bash-completion-r1 cmake cuda distutils-r1 flag-o-matic readme.gentoo-r1 toolchain-funcs xdg-utils + +if [[ ${PV} = *9999* ]]; then + EGIT_REPO_URI=" + https://gitlab.com/gromacs/gromacs.git + https://github.com/gromacs/gromacs.git + git://git.gromacs.org/gromacs.git" + [[ ${PV} = 9999 ]] && EGIT_BRANCH="master" || EGIT_BRANCH="release-${PV:0:4}" + inherit git-r3 +else + SRC_URI=" + https://ftp.gromacs.org/gromacs/${PN}-${PV/_/-}.tar.gz + doc? ( https://ftp.gromacs.org/manual/manual-${PV/_/-}.pdf ) + test? ( https://ftp.gromacs.org/regressiontests/regressiontests-${PV/_/-}.tar.gz )" + # since 2022 arm support was dropped (but not arm64) + KEYWORDS="~amd64 -arm ~arm64 ~x86 ~amd64-linux ~x86-linux ~x64-macos" +fi + +ACCE_IUSE="cpu_flags_x86_sse2 cpu_flags_x86_sse4_1 cpu_flags_x86_fma4 cpu_flags_x86_avx cpu_flags_x86_avx2 cpu_flags_x86_avx512f cpu_flags_arm_neon" + +DESCRIPTION="The ultimate molecular dynamics simulation package" +HOMEPAGE="https://www.gromacs.org/" + +# see COPYING for details +# https://repo.or.cz/w/gromacs.git/blob/HEAD:/COPYING +# base, vmd plugins, fftpack from numpy, blas/lapck from netlib, memtestG80 library, mpi_thread lib +LICENSE="LGPL-2.1 UoI-NCSA !mkl? ( !fftw? ( BSD ) !blas? ( BSD ) !lapack? ( BSD ) ) cuda? ( LGPL-3 ) threads? ( BSD )" +SLOT="0/${PV}" +IUSE="blas clang clang-cuda cuda +custom-cflags +doc build-manual double-precision +fftw +gmxapi +gmxapi-legacy +hwloc lapack mkl mpi +offensive opencl openmp +python +single-precision test +threads +tng ${ACCE_IUSE}" + +CDEPEND=" + blas? ( virtual/blas ) + cuda? ( >=dev-util/nvidia-cuda-toolkit-11[profiler] ) + opencl? ( virtual/opencl ) + fftw? ( sci-libs/fftw:3.0= ) + hwloc? ( sys-apps/hwloc:= ) + lapack? ( virtual/lapack ) + mkl? ( sci-libs/mkl ) + mpi? ( virtual/mpi[cxx] ) + sci-libs/lmfit:= + >=dev-cpp/muParser-2.3:= + ${PYTHON_DEPS} + " +BDEPEND="${CDEPEND} + virtual/pkgconfig + clang? ( >=sys-devel/clang-6:* ) + build-manual? ( + app-doc/doxygen + $(python_gen_cond_dep ' + dev-python/sphinx[${PYTHON_USEDEP}] + ') + media-gfx/mscgen + media-gfx/graphviz + dev-texlive/texlive-latex + dev-texlive/texlive-latexextra + media-gfx/imagemagick + )" +RDEPEND="${CDEPEND}" + +REQUIRED_USE=" + || ( single-precision double-precision ) + doc? ( !build-manual ) + cuda? ( single-precision ) + opencl? ( single-precision ) + cuda? ( !opencl ) + clang-cuda? ( clang cuda ) + mkl? ( !blas !fftw !lapack ) + ${PYTHON_REQUIRED_USE}" + +DOCS=( AUTHORS README ) + +RESTRICT="!test? ( test )" + +if [[ ${PV} != *9999 ]]; then + S="${WORKDIR}/${PN}-${PV/_/-}" +fi + +pkg_pretend() { + [[ ${MERGE_TYPE} != binary ]] && use openmp && tc-check-openmp +} + +pkg_setup() { + [[ ${MERGE_TYPE} != binary ]] && use openmp && tc-check-openmp + python-single-r1_pkg_setup +} + +src_unpack() { + if [[ ${PV} != *9999 ]]; then + default + else + git-r3_src_unpack + if use test; then + EGIT_REPO_URI="git://git.gromacs.org/regressiontests.git" \ + EGIT_BRANCH="${EGIT_BRANCH}" \ + EGIT_CHECKOUT_DIR="${WORKDIR}/regressiontests"\ + git-r3_src_unpack + fi + fi +} + +src_prepare() { + #notes/todos + # -on apple: there is framework support + + xdg_environment_reset #591952 + + # we can use clang as default + if use clang && ! tc-is-clang ; then + export CC=${CHOST}-clang + export CXX=${CHOST}-clang++ + else + tc-export CXX CC + fi + # clang-cuda need to filter mfpmath + if use clang-cuda ; then + filter-mfpmath sse + filter-mfpmath i386 + fi + + cmake_src_prepare + + use cuda && cuda_src_prepare + + GMX_DIRS="" + use single-precision && GMX_DIRS+=" float" + use double-precision && GMX_DIRS+=" double" + + if use test; then + for x in ${GMX_DIRS}; do + mkdir -p "${WORKDIR}/${P}_${x}" || die + cp -al "${WORKDIR}/regressiontests"* "${WORKDIR}/${P}_${x}/tests" || die + done + fi + + DOC_CONTENTS="Gromacs can use sci-chemistry/vmd to read additional file formats" + if use build-manual; then + # try to create policy for imagemagik + mkdir -p ${HOME}/.config/ImageMagick + cat >> ${HOME}/.config/ImageMagick/policy.xml <<- EOF + + + !ATTLIST policymap xmlns CDATA #FIXED ''> + + + ]> + + + + + + + + + EOF + fi +} + +src_configure() { + local mycmakeargs_pre=( ) extra fft_opts=( ) + local acce="AUTO" + + if use custom-cflags; then + #go from slowest to fastest acceleration + acce="None" + if (use amd64 || use x86); then + use cpu_flags_x86_sse2 && acce="SSE2" + use cpu_flags_x86_sse4_1 && acce="SSE4.1" + use cpu_flags_x86_fma4 && acce="AVX_128_FMA" + use cpu_flags_x86_avx && acce="AVX_256" + use cpu_flags_x86_avx2 && acce="AVX2_256" + use cpu_flags_x86_avx512f && acce="AVX_512" + elif (use arm); then + use cpu_flags_arm_neon && acce="ARM_NEON" + elif (use arm64); then + use cpu_flags_arm_neon && acce="ARM_NEON_ASIMD" + fi + else + strip-flags + fi + + #to create man pages, build tree binaries are executed (bug #398437) + [[ ${CHOST} = *-darwin* ]] && \ + extra+=" -DCMAKE_BUILD_WITH_INSTALL_RPATH=OFF" + + if use fftw; then + fft_opts=( -DGMX_FFT_LIBRARY=fftw3 ) + elif use mkl; then + local bits=$(get_libdir) + fft_opts=( -DGMX_FFT_LIBRARY=mkl + -DMKL_INCLUDE_DIR="$(echo /opt/intel/*/mkl/include)" + -DMKL_LIBRARIES="$(echo /opt/intel/*/mkl/lib/*${bits/lib}/libmkl_rt.so)" + ) + else + fft_opts=( -DGMX_FFT_LIBRARY=fftpack ) + fi + + mycmakeargs_pre+=( + "${fft_opts[@]}" + "${lmfit_opts[@]}" + -DGMX_USE_LMFIT=EXTERNAL + -DGMX_USE_MUPARSER=EXTERNAL + -DGMX_EXTERNAL_BLAS=$(usex blas) + -DGMX_EXTERNAL_LAPACK=$(usex lapack) + -DGMX_OPENMP=$(usex openmp) + -DGMX_COOL_QUOTES=$(usex offensive) + -DGMX_USE_TNG=$(usex tng) + -DGMX_BUILD_MANUAL=$(usex build-manual) + -DGMX_HWLOC=$(usex hwloc) + -DGMX_DEFAULT_SUFFIX=off + -DGMX_SIMD="$acce" + -DGMX_VMD_PLUGIN_PATH="${EPREFIX}/usr/$(get_libdir)/vmd/plugins/*/molfile/" + -DBUILD_TESTING=$(usex test) + -DGMX_BUILD_UNITTESTS=$(usex test) + -DPYTHON_EXECUTABLE="${EPREFIX}/usr/bin/${EPYTHON}" + ${extra} + ) + + for x in ${GMX_DIRS}; do + einfo "Configuring for ${x} precision" + local suffix="" + #if we build single and double - double is suffixed + use double-precision && use single-precision && \ + [[ ${x} = "double" ]] && suffix="_d" + local p + [[ ${x} = "double" ]] && p="-DGMX_DOUBLE=ON" || p="-DGMX_DOUBLE=OFF" + local gpu=( "-DGMX_GPU=OFF" ) + [[ ${x} = "float" ]] && use cuda && gpu=( "-DGMX_GPU=CUDA" ) + [[ ${x} = "float" ]] && use clang-cuda && gpu=( "-DGMX_GPU=CUDA" "-DGMX_CLANG_CUDA=ON" ) + use opencl && gpu=( "-DGMX_GPU=OPENCL" ) + local mycmakeargs=( + ${mycmakeargs_pre[@]} ${p} + -DGMX_MPI=$(usex mpi) + -DGMX_THREAD_MPI=$(usex threads) + -DGMXAPI=$(usex gmxapi) + -DGMX_INSTALL_LEGACY_API=$(usex gmxapi-legacy) + "${gpu[@]}" + "$(use test && echo -DREGRESSIONTEST_PATH="${WORKDIR}/${P}_${x}/tests")" + -DGMX_BINARY_SUFFIX="${suffix}" + -DGMX_LIBS_SUFFIX="${suffix}" + -DGMX_PYTHON_PACKAGE=$(usex python) + ) + BUILD_DIR="${WORKDIR}/${P}_${x}" cmake_src_configure + [[ ${CHOST} != *-darwin* ]] || \ + sed -i '/SET(CMAKE_INSTALL_NAME_DIR/s/^/#/' "${WORKDIR}/${P}_${x}/gentoo_rules.cmake" || die + done +} + +src_compile() { + for x in ${GMX_DIRS}; do + einfo "Compiling for ${x} precision" + BUILD_DIR="${WORKDIR}/${P}_${x}"\ + cmake_src_compile + if use python; then + BUILD_DIR="${WORKDIR}/${P}_${x}"\ + cmake_src_compile python_packaging/all + BUILD_DIR="${WORKDIR}/${P}" \ + distutils-r1_src_compile + fi + # not 100% necessary for rel ebuilds as available from website + if use build-manual; then + BUILD_DIR="${WORKDIR}/${P}_${x}"\ + cmake_src_compile manual + fi + done +} + +src_test() { + for x in ${GMX_DIRS}; do + BUILD_DIR="${WORKDIR}/${P}_${x}"\ + cmake_src_compile check + done +} + +src_install() { + for x in ${GMX_DIRS}; do + BUILD_DIR="${WORKDIR}/${P}_${x}" \ + cmake_src_install + if use python; then + BUILD_DIR="${WORKDIR}/${P}_${x}" \ + cmake_src_install python_packaging/install + fi + if use build-manual; then + newdoc "${WORKDIR}/${P}_${x}"/docs/manual/gromacs.pdf "${PN}-manual-${PV}.pdf" + fi + + if use doc; then + if [[ ${PV} != *9999* ]]; then + newdoc "${DISTDIR}/manual-${PV/_/-}.pdf" "${PN}-manual-${PV}.pdf" + fi + fi + done + + if use tng; then + insinto /usr/include/tng + doins src/external/tng_io/include/tng/*h + fi + # drop unneeded stuff + rm "${ED}"/usr/bin/GMXRC* || die + for x in "${ED}"/usr/bin/gmx-completion-*.bash ; do + local n=${x##*/gmx-completion-} + n="${n%.bash}" + cat "${ED}"/usr/bin/gmx-completion.bash "$x" > "${T}/${n}" || die + newbashcomp "${T}"/"${n}" "${n}" + done + rm "${ED}"/usr/bin/gmx-completion*.bash || die + readme.gentoo_create_doc +} + +pkg_postinst() { + einfo + einfo "Please read and cite gromacs related papers from list:" + einfo "https://www.gromacs.org/Gromacs_papers" + einfo + readme.gentoo_print_elog +} -- cgit v1.2.3