summaryrefslogtreecommitdiff
path: root/dev-libs/librdkafka
diff options
context:
space:
mode:
authorV3n3RiX <venerix@koprulu.sector>2024-01-17 08:06:37 +0000
committerV3n3RiX <venerix@koprulu.sector>2024-01-17 08:06:37 +0000
commite74bddc195533f6d160ee96220f5cfcc205bda1c (patch)
tree147bf202fd80bf604f6d289e985630f30b5a8438 /dev-libs/librdkafka
parent20c23c152d2e7b44a6f93ec9476b808659e5ff1e (diff)
gentoo auto-resync : 17:01:2024 - 08:06:37
Diffstat (limited to 'dev-libs/librdkafka')
-rw-r--r--dev-libs/librdkafka/Manifest2
-rw-r--r--dev-libs/librdkafka/files/librdkafka-2.2.0-backport-pr4449.patch285
-rw-r--r--dev-libs/librdkafka/librdkafka-2.2.0-r1.ebuild110
3 files changed, 397 insertions, 0 deletions
diff --git a/dev-libs/librdkafka/Manifest b/dev-libs/librdkafka/Manifest
index ea424dc77719..61d861ad56d2 100644
--- a/dev-libs/librdkafka/Manifest
+++ b/dev-libs/librdkafka/Manifest
@@ -1,7 +1,9 @@
+AUX librdkafka-2.2.0-backport-pr4449.patch 12334 BLAKE2B 1787951e628e64890e1a84203f25609d0488ebf8508e16214a9c9c97f23b853cbd86d1b5ceeb3f00d9118245bf9771ad5db82f5db60c37f1c3d2ba493bf3fa4a SHA512 4e8e2de48025ec7b17c070d1e830a8bead9470d283e26ed0b4a9c7779cb2963cd129e59283da07d44f5a46917c783b026367ce575c6a3d103759e7b64855d937
DIST librdkafka-1.8.2.tar.gz 4097028 BLAKE2B 37a3190417e973ea4629012e358ff61cda45eb0134448dd0054fd1f1cac57f3543b133331b0b0d518f219f30f79f7665a214c75084d6a8474687b7e686395724 SHA512 8c8ae291129b78e3b8367307ad1b1715af1438cd76d7160d64d13a58adf84c7c9f51efeba4656f55e101c25e4cb744db0d8bb5c01a2decb229e4567d16bdcb22
DIST librdkafka-2.1.1.tar.gz 4281061 BLAKE2B 8200c2aee0d04109cb78f13b186cf907f3260ee6a17a2fffc4f7706ed9cbea9436eb31d17167e9af783495ef2365f7401bbdc671d5a8d7e7f2ecb9b5c7d57fbb SHA512 6bf1761e7ed1820b587fda24277f6606ec046da281064df13c4380f49a92f3e2b165614b9c622d46b27078ec024a4dc211610e500e597265e8219f8869c4d203
DIST librdkafka-2.2.0.tar.gz 4340164 BLAKE2B 394c9fd25db0a3a02b16fec3d5d5acc4808b4c8a6c8e025e71f15a91c6311206bf4d7c863860be36483c150bb10955cdc98ba0f088fa493b20ae52154e2a83d0 SHA512 1a85b5864efdeece1327e461b62a378f24b73eb7174168f630aeff01d4a5074d6b2f15744bc267665fcfc6384e804df00c20b7271ecd85a98dca84746b86b4d9
EBUILD librdkafka-1.8.2.ebuild 2377 BLAKE2B 1ac13b1f1df3dcd58db7ecd0ac0afef58c431aa796ebddce48ae1ae8f0b06293623ae7731a04a044022bb4767444d8e680759738c9ee9cccbd0c5b33671bdf5c SHA512 fa963b50fe52ed39e1e6c206e0c6a9729d134e6001cb5534a4f0cc0cfe58fbf8438122e0dc9925c45939ce0ff3576116f6a7aea3f7df7ed95e89d4d64ccbf10d
EBUILD librdkafka-2.1.1.ebuild 2377 BLAKE2B 1ac13b1f1df3dcd58db7ecd0ac0afef58c431aa796ebddce48ae1ae8f0b06293623ae7731a04a044022bb4767444d8e680759738c9ee9cccbd0c5b33671bdf5c SHA512 fa963b50fe52ed39e1e6c206e0c6a9729d134e6001cb5534a4f0cc0cfe58fbf8438122e0dc9925c45939ce0ff3576116f6a7aea3f7df7ed95e89d4d64ccbf10d
+EBUILD librdkafka-2.2.0-r1.ebuild 2457 BLAKE2B b0e1d8922ff5fdea718f0da84ceaac2d42501adb41cbe9ff2e32f721236576047972482ef6e01e204c9015577f8020ae55218a4a33ee375f7c4c314ab0b655cc SHA512 4f48b2b6f0b74bef92621fd3b8f0706482ddc978e5c7d709542ac9426ee56be85f5ce0435dddfc8f4774a256038fb46d3245c6c66d0ef9dd34a9254fea770679
EBUILD librdkafka-2.2.0.ebuild 2392 BLAKE2B 66094f8e29286889e87e1334aefc5e8613c3b3a425941ba1ebb6f06d583ae1c185541c236cc3054244e7c544f781d4edeb65634b9bbe8f68754a7780feaa086e SHA512 aa4c3d4aca88411f478412e16104b725c42f5b43c3aa4af117e03ea06e20ef352079e16a97c71c9eff04c25c7147010613120186c8d48da6b472ab371f2a97c8
MISC metadata.xml 458 BLAKE2B 9fe3256ed33cb5691e327d86d5276cfa37e898e938c623f0643bf6cb583e4ee6bd34922880185bdbabc638e2e85ffe157554ff95063db7a348fc9e6f425411e6 SHA512 84279a77f53332007509a0912059a5fb71fed16f0976dbdad200f7a0ff54cff43e17d94afa05f8169181c3fd1aa18e93f6c2e4e279e01cbe9fefdaba46042e7a
diff --git a/dev-libs/librdkafka/files/librdkafka-2.2.0-backport-pr4449.patch b/dev-libs/librdkafka/files/librdkafka-2.2.0-backport-pr4449.patch
new file mode 100644
index 000000000000..cc6b57ea72d2
--- /dev/null
+++ b/dev-libs/librdkafka/files/librdkafka-2.2.0-backport-pr4449.patch
@@ -0,0 +1,285 @@
+https://bugs.gentoo.org/915433
+https://github.com/confluentinc/librdkafka/pull/4449
+
+From 8b311b8a850805f4ec9bb068c0edb31492ad03fe Mon Sep 17 00:00:00 2001
+From: Emanuele Sabellico <esabellico@confluent.io>
+Date: Wed, 27 Sep 2023 11:08:33 +0200
+Subject: [PATCH 1/3] tmpabuf refactor and fix for insufficient buffer
+ allocation
+
+---
+ CHANGELOG.md | 10 ++++++
+ src/rdkafka_buf.h | 25 ++++++++++++---
+ src/rdkafka_metadata.c | 59 +++++++++++++++++++-----------------
+ src/rdkafka_metadata_cache.c | 38 +++++++++++------------
+ src/rdkafka_topic.c | 36 +++++++++++++---------
+ 5 files changed, 100 insertions(+), 68 deletions(-)
+
+diff --git a/src/rdkafka_buf.h b/src/rdkafka_buf.h
+index ccd563cc6..623ec49ae 100644
+--- a/src/rdkafka_buf.h
++++ b/src/rdkafka_buf.h
+@@ -49,21 +49,36 @@ typedef struct rd_tmpabuf_s {
+ size_t of;
+ char *buf;
+ int failed;
+- int assert_on_fail;
++ rd_bool_t assert_on_fail;
+ } rd_tmpabuf_t;
+
+ /**
+- * @brief Allocate new tmpabuf with \p size bytes pre-allocated.
++ * @brief Initialize new tmpabuf of non-final \p size bytes.
+ */
+ static RD_UNUSED void
+-rd_tmpabuf_new(rd_tmpabuf_t *tab, size_t size, int assert_on_fail) {
+- tab->buf = rd_malloc(size);
+- tab->size = size;
++rd_tmpabuf_new(rd_tmpabuf_t *tab, size_t size, rd_bool_t assert_on_fail) {
++ tab->buf = NULL;
++ tab->size = RD_ROUNDUP(size, 8);
+ tab->of = 0;
+ tab->failed = 0;
+ tab->assert_on_fail = assert_on_fail;
+ }
+
++/**
++ * @brief Add a new allocation of \p _size bytes,
++ * rounded up to maximum word size,
++ * for \p _times times.
++ */
++#define rd_tmpabuf_add_alloc_times(_tab, _size, _times) \
++ (_tab)->size += RD_ROUNDUP(_size, 8) * _times
++
++#define rd_tmpabuf_add_alloc(_tab, _size) \
++ rd_tmpabuf_add_alloc_times(_tab, _size, 1)
++/**
++ * @brief Finalize tmpabuf pre-allocating tab->size bytes.
++ */
++#define rd_tmpabuf_finalize(_tab) (_tab)->buf = rd_malloc((_tab)->size)
++
+ /**
+ * @brief Free memory allocated by tmpabuf
+ */
+diff --git a/src/rdkafka_metadata.c b/src/rdkafka_metadata.c
+index f96edf658..6c2f60ae3 100644
+--- a/src/rdkafka_metadata.c
++++ b/src/rdkafka_metadata.c
+@@ -164,7 +164,8 @@ static rd_kafka_metadata_internal_t *rd_kafka_metadata_copy_internal(
+ * Because of this we copy all the structs verbatim but
+ * any pointer fields needs to be copied explicitly to update
+ * the pointer address. */
+- rd_tmpabuf_new(&tbuf, size, 1 /*assert on fail*/);
++ rd_tmpabuf_new(&tbuf, size, rd_true /*assert on fail*/);
++ rd_tmpabuf_finalize(&tbuf);
+ mdi = rd_tmpabuf_write(&tbuf, src, sizeof(*mdi));
+ md = &mdi->metadata;
+
+@@ -506,11 +507,13 @@ rd_kafka_parse_Metadata(rd_kafka_broker_t *rkb,
+ * no more than 4 times larger than the wire representation.
+ * This is increased to 5 times in case if we want to compute partition
+ * to rack mapping. */
+- rd_tmpabuf_new(&tbuf,
+- sizeof(*mdi) + rkb_namelen +
+- (rkbuf->rkbuf_totlen * 4 +
+- (compute_racks ? rkbuf->rkbuf_totlen : 0)),
+- 0 /*dont assert on fail*/);
++ rd_tmpabuf_new(&tbuf, 0, rd_false /*dont assert on fail*/);
++ rd_tmpabuf_add_alloc(&tbuf, sizeof(*mdi));
++ rd_tmpabuf_add_alloc(&tbuf, rkb_namelen);
++ rd_tmpabuf_add_alloc(&tbuf, rkbuf->rkbuf_totlen *
++ (4 + (compute_racks ? 1 : 0)));
++
++ rd_tmpabuf_finalize(&tbuf);
+
+ if (!(mdi = rd_tmpabuf_alloc(&tbuf, sizeof(*mdi)))) {
+ rd_kafka_broker_unlock(rkb);
+@@ -1603,35 +1606,37 @@ rd_kafka_metadata_new_topic_mock(const rd_kafka_metadata_topic_t *topics,
+ rd_kafka_metadata_internal_t *mdi;
+ rd_kafka_metadata_t *md;
+ rd_tmpabuf_t tbuf;
+- size_t topic_names_size = 0;
+- int total_partition_cnt = 0;
+ size_t i;
+ int curr_broker = 0;
+
+- /* Calculate total partition count and topic names size before
+- * allocating memory. */
+- for (i = 0; i < topic_cnt; i++) {
+- topic_names_size += 1 + strlen(topics[i].topic);
+- total_partition_cnt += topics[i].partition_cnt;
+- }
+-
+ /* If the replication factor is given, num_brokers must also be given */
+ rd_assert(replication_factor <= 0 || num_brokers > 0);
+
+ /* Allocate contiguous buffer which will back all the memory
+ * needed by the final metadata_t object */
+- rd_tmpabuf_new(
+- &tbuf,
+- sizeof(*mdi) + (sizeof(*md->topics) * topic_cnt) +
+- topic_names_size + (64 /*topic name size..*/ * topic_cnt) +
+- (sizeof(*md->topics[0].partitions) * total_partition_cnt) +
+- (sizeof(*mdi->topics) * topic_cnt) +
+- (sizeof(*mdi->topics[0].partitions) * total_partition_cnt) +
+- (sizeof(*mdi->brokers) * RD_ROUNDUP(num_brokers, 8)) +
+- (replication_factor > 0 ? RD_ROUNDUP(replication_factor, 8) *
+- total_partition_cnt * sizeof(int)
+- : 0),
+- 1 /*assert on fail*/);
++ rd_tmpabuf_new(&tbuf, sizeof(*mdi), rd_true /*assert on fail*/);
++
++ rd_tmpabuf_add_alloc(&tbuf, topic_cnt * sizeof(*md->topics));
++ rd_tmpabuf_add_alloc(&tbuf, topic_cnt * sizeof(*mdi->topics));
++ rd_tmpabuf_add_alloc(&tbuf, num_brokers * sizeof(*md->brokers));
++
++ /* Calculate total partition count and topic names size before
++ * allocating memory. */
++ for (i = 0; i < topic_cnt; i++) {
++ rd_tmpabuf_add_alloc(&tbuf, 1 + strlen(topics[i].topic));
++ rd_tmpabuf_add_alloc(&tbuf,
++ topics[i].partition_cnt *
++ sizeof(*md->topics[i].partitions));
++ rd_tmpabuf_add_alloc(&tbuf,
++ topics[i].partition_cnt *
++ sizeof(*mdi->topics[i].partitions));
++ if (replication_factor > 0)
++ rd_tmpabuf_add_alloc_times(
++ &tbuf, replication_factor * sizeof(int),
++ topics[i].partition_cnt);
++ }
++
++ rd_tmpabuf_finalize(&tbuf);
+
+ mdi = rd_tmpabuf_alloc(&tbuf, sizeof(*mdi));
+ memset(mdi, 0, sizeof(*mdi));
+diff --git a/src/rdkafka_metadata_cache.c b/src/rdkafka_metadata_cache.c
+index 18f19a4d0..1530e699e 100644
+--- a/src/rdkafka_metadata_cache.c
++++ b/src/rdkafka_metadata_cache.c
+@@ -249,8 +249,6 @@ static struct rd_kafka_metadata_cache_entry *rd_kafka_metadata_cache_insert(
+ rd_kafka_metadata_broker_internal_t *brokers_internal,
+ size_t broker_cnt) {
+ struct rd_kafka_metadata_cache_entry *rkmce, *old;
+- size_t topic_len;
+- size_t racks_size = 0;
+ rd_tmpabuf_t tbuf;
+ int i;
+
+@@ -261,34 +259,32 @@ static struct rd_kafka_metadata_cache_entry *rd_kafka_metadata_cache_insert(
+ * any pointer fields needs to be copied explicitly to update
+ * the pointer address.
+ * See also rd_kafka_metadata_cache_delete which frees this. */
+- topic_len = strlen(mtopic->topic) + 1;
++ rd_tmpabuf_new(&tbuf, 0, rd_true /*assert on fail*/);
++
++ rd_tmpabuf_add_alloc(&tbuf, sizeof(*rkmce));
++ rd_tmpabuf_add_alloc(&tbuf, strlen(mtopic->topic) + 1);
++ rd_tmpabuf_add_alloc(&tbuf, mtopic->partition_cnt *
++ sizeof(*mtopic->partitions));
++ rd_tmpabuf_add_alloc(&tbuf,
++ mtopic->partition_cnt *
++ sizeof(*metadata_internal_topic->partitions));
+
+ for (i = 0; include_racks && i < mtopic->partition_cnt; i++) {
+ size_t j;
+- racks_size += RD_ROUNDUP(
+- metadata_internal_topic->partitions[i].racks_cnt *
+- sizeof(char *),
+- 8);
++ rd_tmpabuf_add_alloc(
++ &tbuf, metadata_internal_topic->partitions[i].racks_cnt *
++ sizeof(char *));
+ for (j = 0;
+ j < metadata_internal_topic->partitions[i].racks_cnt;
+ j++) {
+- racks_size += RD_ROUNDUP(
+- strlen(metadata_internal_topic->partitions[i]
+- .racks[j]) +
+- 1,
+- 8);
++ rd_tmpabuf_add_alloc(
++ &tbuf, strlen(metadata_internal_topic->partitions[i]
++ .racks[j]) +
++ 1);
+ }
+ }
+
+- rd_tmpabuf_new(
+- &tbuf,
+- RD_ROUNDUP(sizeof(*rkmce), 8) + RD_ROUNDUP(topic_len, 8) +
+- (mtopic->partition_cnt *
+- RD_ROUNDUP(sizeof(*mtopic->partitions), 8)) +
+- (mtopic->partition_cnt *
+- RD_ROUNDUP(sizeof(*metadata_internal_topic->partitions), 8)) +
+- racks_size,
+- 1 /*assert on fail*/);
++ rd_tmpabuf_finalize(&tbuf);
+
+ rkmce = rd_tmpabuf_alloc(&tbuf, sizeof(*rkmce));
+
+diff --git a/src/rdkafka_topic.c b/src/rdkafka_topic.c
+index 3b3986d43..b63a0bbea 100644
+--- a/src/rdkafka_topic.c
++++ b/src/rdkafka_topic.c
+@@ -1831,38 +1831,44 @@ rd_kafka_topic_info_t *rd_kafka_topic_info_new_with_rack(
+ const rd_kafka_metadata_partition_internal_t *mdpi) {
+ rd_kafka_topic_info_t *ti;
+ rd_tmpabuf_t tbuf;
+- size_t tlen = RD_ROUNDUP(strlen(topic) + 1, 8);
+- size_t total_racks_size = 0;
+ int i;
++ rd_bool_t has_racks = rd_false;
+
++ rd_tmpabuf_new(&tbuf, 0, rd_true /* assert on fail */);
++
++ rd_tmpabuf_add_alloc(&tbuf, sizeof(*ti));
++ rd_tmpabuf_add_alloc(&tbuf, strlen(topic) + 1);
+ for (i = 0; i < partition_cnt; i++) {
+ size_t j;
+ if (!mdpi[i].racks)
+ continue;
+
++ if (unlikely(!has_racks))
++ has_racks = rd_true;
++
+ for (j = 0; j < mdpi[i].racks_cnt; j++) {
+- total_racks_size +=
+- RD_ROUNDUP(strlen(mdpi[i].racks[j]) + 1, 8);
++ rd_tmpabuf_add_alloc(&tbuf,
++ strlen(mdpi[i].racks[j]) + 1);
+ }
+- total_racks_size +=
+- RD_ROUNDUP(sizeof(char *) * mdpi[i].racks_cnt, 8);
++ rd_tmpabuf_add_alloc(&tbuf, sizeof(char *) * mdpi[i].racks_cnt);
++ }
++
++ /* Only bother allocating this if at least one
++ * rack is there. */
++ if (has_racks) {
++ rd_tmpabuf_add_alloc(
++ &tbuf, sizeof(rd_kafka_metadata_partition_internal_t) *
++ partition_cnt);
+ }
+
+- if (total_racks_size) /* Only bother allocating this if at least one
+- rack is there. */
+- total_racks_size +=
+- RD_ROUNDUP(sizeof(rd_kafka_metadata_partition_internal_t) *
+- partition_cnt,
+- 8);
++ rd_tmpabuf_finalize(&tbuf);
+
+- rd_tmpabuf_new(&tbuf, sizeof(*ti) + tlen + total_racks_size,
+- 1 /* assert on fail */);
+ ti = rd_tmpabuf_alloc(&tbuf, sizeof(*ti));
+ ti->topic = rd_tmpabuf_write_str(&tbuf, topic);
+ ti->partition_cnt = partition_cnt;
+ ti->partitions_internal = NULL;
+
+- if (total_racks_size) {
++ if (has_racks) {
+ ti->partitions_internal = rd_tmpabuf_alloc(
+ &tbuf, sizeof(*ti->partitions_internal) * partition_cnt);
+
+
diff --git a/dev-libs/librdkafka/librdkafka-2.2.0-r1.ebuild b/dev-libs/librdkafka/librdkafka-2.2.0-r1.ebuild
new file mode 100644
index 000000000000..c3f8341d2d7d
--- /dev/null
+++ b/dev-libs/librdkafka/librdkafka-2.2.0-r1.ebuild
@@ -0,0 +1,110 @@
+# Copyright 1999-2024 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+EAPI="8"
+
+PYTHON_COMPAT=( python3_{9..12} )
+
+inherit python-any-r1 toolchain-funcs
+
+DESCRIPTION="Apache Kafka C/C++ client library"
+HOMEPAGE="https://github.com/confluentinc/librdkafka"
+
+if [[ ${PV} == "9999" ]]; then
+ EGIT_REPO_URI="https://github.com/confluentinc/${PN}.git"
+
+ inherit git-r3
+else
+ SRC_URI="https://github.com/confluentinc/${PN}/archive/v${PV}.tar.gz -> ${P}.tar.gz"
+ KEYWORDS="~amd64 ~arm ~arm64 ~hppa ~ia64 ~loong ~ppc ~ppc64 ~riscv ~sparc ~x86"
+fi
+
+LICENSE="BSD-2"
+
+# subslot = soname version
+SLOT="0/1"
+
+IUSE="lz4 sasl ssl static-libs zstd"
+
+LIB_DEPEND="
+ lz4? ( app-arch/lz4:=[static-libs(+)] )
+ sasl? ( dev-libs/cyrus-sasl:=[static-libs(+)] )
+ ssl? ( dev-libs/openssl:0=[static-libs(+)] )
+ zstd? ( app-arch/zstd:=[static-libs(+)] )
+ sys-libs/zlib:=[static-libs(+)]
+"
+# which: https://github.com/confluentinc/librdkafka/pull/4353
+BDEPEND="
+ sys-apps/which
+ virtual/pkgconfig
+ ${PYTHON_DEPS}
+"
+
+RDEPEND="net-misc/curl
+ !static-libs? ( ${LIB_DEPEND//\[static-libs(+)]} )"
+
+DEPEND="
+ ${RDEPEND}
+ static-libs? ( ${LIB_DEPEND} )
+"
+
+PATCHES=( "${FILESDIR}/${PN}-2.2.0-backport-pr4449.patch" )
+
+pkg_setup() {
+ python-any-r1_pkg_setup
+}
+
+src_prepare() {
+ default
+
+ if [[ ${PV} != "9999" ]]; then
+ sed -i \
+ -e "s/^\(export RDKAFKA_GITVER=\).*/\1\"${PV}@release\"/" \
+ tests/run-test.sh || die
+ fi
+}
+
+src_configure() {
+ tc-export AR CC CXX LD NM OBJDUMP PKG_CONFIG STRIP
+
+ local myeconf=(
+ --prefix="${EPREFIX}/usr"
+ --build="${CBUILD}"
+ --host="${CHOST}"
+ --mandir="${EPREFIX}/usr/share/man"
+ --infodir="${EPREFIX}/usr/share/info"
+ --datadir="${EPREFIX}/usr/share"
+ --sysconfdir="${EPREFIX}/etc"
+ --localstatedir="${EPREFIX}/var"
+ --libdir="${EPREFIX}/usr/$(get_libdir)"
+ --no-cache
+ --no-download
+ --disable-debug-symbols
+ $(use_enable lz4)
+ $(use_enable sasl)
+ $(usex static-libs '--enable-static' '')
+ $(use_enable ssl)
+ $(use_enable zstd)
+ )
+
+ ./configure ${myeconf[@]} || die
+}
+
+src_test() {
+ # Simulate CI so we do not fail when tests are running longer than expected,
+ # https://github.com/confluentinc/librdkafka/blob/v1.6.1/tests/0062-stats_event.c#L101-L116
+ local -x CI=true
+
+ emake -C tests run_local
+}
+
+src_install() {
+ emake -j1 \
+ DESTDIR="${D}" \
+ docdir="/usr/share/doc/${PF}" \
+ install
+
+ if ! use static-libs; then
+ find "${ED}" -type f \( -name "*.a" -o -name "*.la" \) -delete || die
+ fi
+}