200 Commits
0.2 ... master

Author SHA1 Message Date
Stephane Bourque
f128c1764b Merge branch 'mfontanini:master' into master 2022-08-01 21:58:24 -07:00
stephb9959
03b12561af Merge 2022-08-01 21:55:17 -07:00
stephb9959
14f5261806 Merge 2022-08-01 21:54:29 -07:00
stephb9959
d7e08fa69c Merge 2022-08-01 21:54:17 -07:00
Matias Fontanini
76d175e354 Merge pull request #293 from SpaceIm/fix/cppstd
CMake: set min required C++ standard to C++11
2022-04-08 09:02:52 -07:00
Matias Fontanini
93893e1386 Merge pull request #292 from SpaceIm/fix/export-template
Do not try to export template declaration in shared lib
2022-04-08 08:59:20 -07:00
SpaceIm
fc97759d93 set min C++ standard to C++11
do not hardcode -std=c++11, but let CMake set the minimum required C++ standard of cppkafka if consumer do not force CMAKE_CXX_STANDARD
2022-04-07 19:44:15 +02:00
SpaceIm
025d8ed7e1 do not try to export template declaration
it doesn't make sense to export a template declaration
2022-04-07 19:37:08 +02:00
Matias Fontanini
5a119f689f Bump version to 0.4.0 2021-06-02 16:41:09 -07:00
Matias Fontanini
dd3966fb45 Merge pull request #281 from ClickHouse-Extras/fix_failover
Fix for failover issue.
2021-06-02 16:04:02 -07:00
Mikhail Filimonov
dabb2d3aa8 Fix for failover issue.
When the consumer enters the group and gets no assignment (for ex. there is not enough partitions in the topic),
librdkafka waits for the rebalancing sequence to be finished by calling assign with the empty list of partitions
(just as was passed by librdkafka to rebalance callback).

But cppkafka instead pass nullptr instead of an empty list (which means unassign). And consumer stuck forever in that
state, not being able to pick the partition during the next rebalance (failover), because the previous rebalance sequence
was not finished.

Fixes https://github.com/mfontanini/cppkafka/issues/273 , https://github.com/ClickHouse/ClickHouse/issues/21118 , etc.
2021-06-01 23:36:25 +02:00
Matias Fontanini
57a599d99c Merge pull request #277 from oxidecomputer/master
Adds support for building on Solaris-based systems
2021-05-02 14:30:37 -07:00
Benjamin Naecker
544972e48f Adds support for building on Solaris-based systems 2021-04-28 14:13:56 -07:00
Matias Fontanini
847f530e6e Merge pull request #227 from neatlife/fix-macos-rt
remove rt lib if mac os
2021-04-28 08:26:25 -07:00
stephb9959
2c24b6e6b9 hanging to compile under OSX and Linux 2021-04-19 20:34:32 -07:00
Matias Fontanini
01bd066d57 Merge pull request #267 from jlcordeiro/feature/boost_cleanup
remove unused boost includes
2021-04-16 09:22:36 -07:00
Matias Fontanini
8fd0ef8fc5 Merge pull request #272 from psigen/bugfix_readonly
Add fix for in-source build of cppkafka.h.
2021-04-03 09:14:34 -07:00
Pras Velagapudi
85f1c0fcb1 Add fix for in-source build of cppkafka.h.
This fixes an issue where a cppkafka.h is being generated directly
within the source directory, which causes issues on sandboxed build
environments where the source directory is mounted readonly.

This PR changes the configure_file() directive to point to the
binary directory, and uses the install directives to move the
generated file into the build output.
2021-03-04 03:47:32 -05:00
Joao Cordeiro
62ec1d82c1 remove unused boost includes 2020-10-18 12:30:27 +01:00
Matias Fontanini
5e4b350806 Merge pull request #265 from accelerated/find-package
Fix CMAKE warning
2020-09-05 09:45:20 -07:00
Alexander Damian
e932d9567f Fix CMAKE warning 2020-09-04 18:20:38 -04:00
Matias Fontanini
a2056c36bf Merge pull request #263 from accelerated/sync-produce
Call flush termination callbacks from sync_produce
2020-08-31 19:59:37 -07:00
Alexander Damian
8cfd4595f6 Call flush termination callbacks from sync_produce 2020-08-31 21:56:10 -04:00
Matias Fontanini
f117720f66 Merge pull request #248 from filimonov/patch-1
Try to fix travis
2020-08-31 18:46:44 -07:00
Matias Fontanini
e5dfd5085c Merge pull request #261 from hanickadot/master
Ability to block producer in case the underlying queue is full.
2020-08-15 18:03:15 -07:00
Hana Dusíková
a032f9a1e8 Ability to block producer in case the underlying queue is full. 2020-08-14 19:29:55 +02:00
Matias Fontanini
b06e64ef5b Merge pull request #257 from accelerated/resume-scope
Bring base class  resume into scope
2020-07-19 11:23:53 -07:00
Alexander Damian
a48bf89292 Bring base class into scope 2020-07-19 12:40:52 -04:00
filimonov
31a58d433e Update .travis.yml 2020-05-24 19:54:11 +02:00
filimonov
4467743340 Update .travis.yml 2020-05-24 19:43:30 +02:00
filimonov
e8b4f5a8e9 Update .travis.yml 2020-05-24 19:29:06 +02:00
filimonov
eb1105d839 Update .travis.yml 2020-05-24 17:32:35 +02:00
filimonov
098735413b Update .travis.yml 2020-05-24 15:47:53 +02:00
filimonov
91a3be0a8f Update .travis.yml 2020-05-24 10:52:15 +02:00
Matias Fontanini
e2000b0741 Merge pull request #246 from LesnyRumcajs/patch-2
fixed typo in example (log message)
2020-05-23 09:34:41 -07:00
Matias Fontanini
ca3a1321ec Merge pull request #247 from filimonov/kafka_destroy_flags3
Add support for rd_kafka_destroy_flags.
2020-05-23 09:24:38 -07:00
Mikhail Filimonov
244726c251 Style changes 2020-05-22 17:24:19 +02:00
filimonov
7aa60a1409 Add latest rdkafka version to build matrix 2020-05-22 00:16:12 +02:00
filimonov
487585fd17 Try to fix travis
Old URL returns 404, picked new from official webpage: https://kafka.apache.org/downloads
2020-05-22 00:10:49 +02:00
Mikhail Filimonov
3b67ba072a Add support for rd_kafka_destroy_flags. 2020-05-21 23:38:17 +02:00
LesnyRumcajs
14423bba40 fixed typo log message 2020-05-12 10:49:08 +02:00
Matias Fontanini
006642cdb2 Merge pull request #237 from accelerated/buff_prod_comments
Added clarifications and comments to the BufferedProducer class
2020-04-18 10:56:51 -07:00
Matias Fontanini
679f58dee3 Merge pull request #241 from accelerated/timeout-overloads
Added timeout overloads for consumer and handle classes
2020-04-08 08:28:26 -07:00
Docker RHEL
b2b0d16fee Added timeout overloads for consumer and handle classes 2020-04-08 15:23:05 +00:00
Matias Fontanini
2ce0ae4a62 Merge pull request #238 from accelerated/poll_strategy
Added member functions for static consumers
2020-04-08 07:14:58 -07:00
Alexander Damian
935a34238b Added implementation for thread-aware ack monitoring 2020-03-05 14:56:36 -05:00
Alexander Damian
5a057e4c99 Wait until the ack is received without timing out.
wait_for_acks() should default to infinite timeout since the
original implementation was never timing out.
2020-02-16 21:06:14 -05:00
Alexander Damian
ffcf8956bd Allow to pass-in via cmake all the kafka config options for testing: broker, partitions and topics 2020-02-16 20:11:33 -05:00
Alexander Damian
2287e0994b Express async_flush in terms of flush since the logic is identical except for the timeout 2020-02-16 20:11:33 -05:00
Alexander Damian
92e46aa6cb Proper implementation of flush() with timeout 2020-02-16 20:11:20 -05:00
Alexander Damian
a4532ed336 Use erase directly
Added revoke() member function
2020-02-09 21:25:36 -05:00
Alexander Damian
68ae525eba Added member functions for static consumers 2020-02-08 22:34:45 -05:00
Alexander Damian
e401e97b40 Added clarifications and comments to the BufferedProducer class 2020-02-08 21:24:12 -05:00
Matias Fontanini
7d097df34d Merge pull request #235 from accelerated/promise_bug
Fix tracker promise from throwing when set multiple times
2020-02-04 06:23:39 -08:00
Alexander Damian
fbbd5bc5a6 Changed int to size_t 2020-02-03 22:04:50 -05:00
Alexander Damian
bda2f4156d Fix tracker promise from throwing when set multiple times 2020-02-03 16:46:28 -05:00
Matias Fontanini
f1de729d4e Merge pull request #234 from accelerated/null_topic
Fix crash when message handle is valid but topic is null
2020-01-27 09:09:44 -08:00
Alexander Damian
81ce56a1bd Fix case when message handle is valid but topic is null 2020-01-27 10:30:12 -05:00
suxiaolin
a2a46f0ec8 remove rt lib if mac os 2019-11-03 16:12:57 +08:00
Matias Fontanini
c3b4580fef Merge pull request #210 from ych/pc_config
Remove boost dependency from pkg-config template file
2019-09-05 09:39:52 -07:00
ych
de06b7ad4e Add boost include dir to pkg-config template file 2019-09-05 11:49:01 +08:00
ych
9a0f196d34 Remove boost dependency from pkg-config template file
Boost not provide pkg-config file, so if execute
 'pkg-config --exist cppkafka' command with boost dependency,
 user always gets non-zero return. And PKG_SEARCH_MODULE in cmake
 use the command to check the status of cppkafka.

The boost dependency should be removed for general usage can be works.
2019-09-05 11:49:01 +08:00
Matias Fontanini
e5aec82ddf Merge pull request #219 from accelerated/master
Fix RdKafka_LIBRARY_DIR-NOTFOUND
2019-09-04 08:44:05 -07:00
Alexander Damian
58111bdf62 Removed RdKafka_LIBRARY_DIR as per code review 2019-09-03 14:56:46 -04:00
Alex Damian
fd19648d5a Fix RdKafka_LIBRARY_DIR-NOTFOUND 2019-08-31 23:54:44 -04:00
Matias Fontanini
4a3ec91f87 Merge pull request #215 from accelerated/partition
Added method to set the partition
2019-07-29 09:11:01 -07:00
Alexander Damian
a85a87bb9b Added method to set the partition 2019-07-26 10:35:20 -04:00
Matias Fontanini
a357529cc0 Merge pull request #204 from accelerated/cmake
Add CMake configuration file and export installed targets
2019-07-17 12:55:25 -07:00
Alexander Damian
dd6ec44c27 Updated pkg_config file name 2019-07-17 10:20:43 -04:00
Alexander Damian
20b806037b Added options to conditionally disable installation of configuration files 2019-07-03 18:01:28 -04:00
Alexander Damian
ad800a5765 Added RdKafka hex version so that FindRdKafka.cmake can compile the test code. Changed find_dependency to find_package for the RdKafka config so that the script is not automatically exited on failure 2019-07-02 15:30:33 -04:00
Alexander Damian
4bddb2241c Added INSTALL_RPATH and INSTALL_RPATH_USE_LINK_PATH to CppKafka target to allow discoverability of the RdKafka.so w/o having to update LD_LIBRARY_PATH 2019-07-02 14:52:21 -04:00
Alexander Damian
097184c648 Added COMPONENT tags to the install targets. Also when installing TARGETS, the COMPONENT cannot appear after INCLUDES DESTINATION as it will be considered part of the destination. 2019-07-01 17:58:38 -04:00
Alexander Damian
bbc78f8dbb Fixed conflicts 2019-07-01 17:24:26 -04:00
Matias Fontanini
18d0b0c00b TEMP: Always put lib under "lib" when installing
This is a temporary fix until this is properly fixed.
2019-07-01 13:46:57 -07:00
Alexander Damian
591e8abe4f Changed include_directories to taget_include_directories and removed Boost path since it's automatically pulled in via Boost::headers 2019-07-01 15:36:10 -04:00
Alexander Damian
ee30fabc2a Added INTERFACE_LINK_DIRECTORIES to the RdKafka::rdkafka properties 2019-07-01 14:57:19 -04:00
Alexander Damian
0d2356f7dd Changed link libraries to PUBLIC so they get exported in the CppKafkaTargets.cmake 2019-07-01 11:52:38 -04:00
Alexander Damian
11a6e4213b Remove comment from config file relating to not finding the RdKafka config file. 2019-06-30 19:39:31 -04:00
Alexander Damian
c4b6a95438 Fixes per code review 2019-06-30 19:30:13 -04:00
Alexander Damian
40e8559158 Remove warning for cmake policy CMP0074 2019-06-29 12:38:13 -04:00
Alexander Damian
d20cab69f3 Add CMake configuration file and export installed targets 2019-06-29 01:50:29 -04:00
Matias Fontanini
c733e0b8d8 Merge pull request #199 from accelerated/pc_config
Removed dependency from rdkafka since it has its own pkg_config file.…
2019-05-21 08:43:49 -07:00
Alexander Damian
07b3c4957d Changed method to determine bitness 2019-05-21 08:52:03 -04:00
Alexander Damian
107cff7ed7 Removed dependency from rdkafka since it has its own pkg_config file. Also added BITNESS detection 2019-05-20 15:27:07 -04:00
Matias Fontanini
1a981f2674 Merge pull request #196 from mfontanini/travis-fix
Fix kafka URL in travis build
2019-05-19 10:21:47 -07:00
Matias Fontanini
8eb7751ff3 Fix kafka URL in travis build 2019-05-19 09:38:25 -07:00
Matias Fontanini
4b25f928a1 Merge pull request #194 from accelerated/master
Fixed pkg_config file
2019-05-17 11:25:12 -07:00
Alexander Damian
6adf1e82c9 Fixed pkg_config file template by adding boost dependency and correcting rdkafka library name 2019-05-17 14:19:28 -04:00
Matias Fontanini
bb0beb6db6 Define constructor for new exception
Fixes #193
2019-05-17 09:14:35 -07:00
Matias Fontanini
bbc3af67d9 Merge pull request #189 from accelerated/fix_versioning
Fixed version macros
2019-05-16 09:21:23 -07:00
Matias Fontanini
a0530d79a9 Merge pull request #190 from accelerated/pc_config
Support for generating pkg-config file
2019-05-16 09:19:03 -07:00
Alexander Damian
d148fe18d5 Added config option for PKGCONFIG install location 2019-05-15 13:44:07 -04:00
Alexander Damian
6499ef9869 Merge branch 'master' of https://github.com/mfontanini/cppkafka into pc_config 2019-05-10 16:10:37 -04:00
Alexander Damian
24e94fbfbc Added boost include dirs to pkg config file 2019-05-08 10:28:49 -04:00
Matias Fontanini
b91350d6a4 Merge pull request #187 from accelerated/master
Added commit termination callback functionality in BackoffComitter
2019-05-02 08:36:17 -07:00
Alexander Damian
bd43d3c767 Support for generating pkg-config file 2019-04-26 16:19:54 -04:00
Alexander Damian
40d0221052 Fixed version macros 2019-04-26 14:36:03 -04:00
Alexander Damian
6e076810a0 Added ActionTerminatedException to BackoffPerformer 2019-04-26 11:12:52 -04:00
Alexander Damian
81a131ff16 Return true when RD_KAFKA_RESP_ERR__NO_OFFSET is received 2019-04-24 17:39:59 -04:00
Alexander Damian
effdf7fb95 Removed ReturnType. Throw on error from inside do_commit() as well as from perform() 2019-04-24 16:42:56 -04:00
Alexander Damian
d84b75ca9d Merge branch 'master' of https://github.com/mfontanini/cppkafka 2019-04-24 10:52:53 -04:00
Alexander Damian
0c1119727b Replaced termination callback with throwing exception 2019-04-24 10:40:29 -04:00
proller
e8c4397b66 Fix build on some libcxx implementations (#175) 2019-04-24 09:16:14 -04:00
accelerated
470a5b6857 Set CMAKE_CXX_FLAGS only when not set 2019-04-24 09:16:14 -04:00
proller
df4eaa0735 Fix build on some libcxx implementations (#175) 2019-03-07 10:13:15 -08:00
Matias Fontanini
de85a329cb Merge pull request #174 from accelerated/cxxflags
Set CMAKE_CXX_FLAGS only when not set
2019-03-04 20:19:57 -08:00
Matias Fontanini
a17a6f3b55 Merge pull request #172 from snar/freebsd-fix
Fix FreeBSD builds
2019-03-04 20:19:09 -08:00
accelerated
a935d1cb2e Set CMAKE_CXX_FLAGS only when not set 2019-03-02 18:51:25 -05:00
Alexandre Snarskii
ca729ef6f0 Fix FreeBSD builds 2019-03-01 13:47:40 +03:00
Matias Fontanini
c9c46d7a1f Merge pull request #171 from accelerated/async_flush
Added wait_for_acks(0) when calling async_flush
2019-02-23 11:27:13 -08:00
accelerated
ace18d5d7b Added wait_for_acks(0) when calling async_flush 2019-02-21 15:21:16 -05:00
Pavel Pimenov
5bfc047263 Fix detected by PVS-Studio (#167)
* Fix PVS-Studio
V591 	Non-void function should return a value.
V519 	The 'new_item' variable is assigned values twice successively.
2019-02-20 18:30:48 -08:00
Matias Fontanini
4a887607b3 Merge pull request #164 from accelerated/offset_store
Added consumer legacy offset store API
2019-02-07 09:00:17 -08:00
accelerated
9bf535ac49 Simplify round-robin test due to intermittent errors 2019-02-06 22:45:12 -05:00
accelerated
8ae5e9d573 Fixed buffer test array initialization warning for clang 2019-02-06 18:47:36 -05:00
accelerated
e19d84b839 Added compile time check for store_offsets() api 2019-02-06 17:47:02 -05:00
accelerated
4f4c9e9c91 Changes per code review 2019-02-06 13:01:57 -05:00
accelerated
284e1c57a9 Changed store_offsets() to use the actual position from the assignment 2019-02-04 14:23:59 -05:00
accelerated
7bc03185a8 Added legacy offset store API 2019-02-04 12:26:04 -05:00
Matias Fontanini
872ee0442b Merge pull request #163 from accelerated/status_version_fix
Fix message status version
2019-01-31 09:03:40 -08:00
accelerated
63327461bd Fix message status version 2019-01-31 09:29:48 -05:00
Matias Fontanini
efa4e95a18 Merge pull request #157 from demin80/buffer-size-comparison-fix2
added type conversion to avoid signed-vs-unsigned-comparison warning
2019-01-19 08:12:55 -08:00
demin80
755e9f10c2 added missing return 2019-01-18 16:58:03 -05:00
Matias Fontanini
fb4c5edc8e Merge pull request #159 from accelerated/check_error_fix
Fix error check in partition list
2019-01-16 12:27:57 -08:00
accelerated
dc732445f7 Fixes #158 2019-01-16 15:09:27 -05:00
demin80
5a34955fae added type conversion to avoid signed-vs-unsigned-comparison warning 2019-01-16 10:52:14 -05:00
Matias Fontanini
05cc8304df Merge pull request #153 from demin80/hi-priority-queue-fix
Added a high-priority queue to BufferedProducer to avoid message re-ordering
2019-01-14 20:08:47 -08:00
Matias Fontanini
2c6a47d68d Merge pull request #156 from psigen/patch-1
Use CMAKE_STATIC_LIBRARY_* macros for FindRdKafka
2019-01-14 09:18:50 -08:00
Pras Velagapudi
85b7e579e2 Use CMAKE_STATIC_LIBRARY_* macros for FindRdKafka
In the current implementation, library suffixes are hard coded from a hand-maintained list.  Instead of writing this list, we can use the CMake macros for platform specific library prefix/suffixes.

E.g. https://cmake.org/cmake/help/v3.0/variable/CMAKE_STATIC_LIBRARY_SUFFIX.html

This also resolves library resolution on Mac OSX, which does not currently work on the native `.dylib`  suffix for shared libraries.
2019-01-12 15:51:11 -05:00
demin80
93c2edf6ba refactored by adding retry_mutex_ and replacing bools with enums; fixed formatting issues 2019-01-10 14:37:46 -05:00
demin80
71c4e02143 Revised the implementation based on the reviewers' response 2019-01-08 13:48:26 -05:00
demin80
00370c981d Fixed spacing issues 2019-01-07 14:42:32 -05:00
demin80
97229ebfd9 Added a high-priority queue to BufferedProducer to avoid message re-ordering 2019-01-07 14:39:09 -05:00
Matias Fontanini
4ba6b38b6e Merge pull request #149 from accelerated/queue_full
Added queue full notification
2019-01-06 16:52:12 -08:00
accelerated
4a6b6779ad Updated callback description 2019-01-06 17:40:39 -05:00
accelerated
97d1bb9434 Added queue full notify callback 2019-01-06 17:35:55 -05:00
accelerated
ed81ce446d Added queue full notification 2019-01-06 16:59:00 -05:00
Matias Fontanini
520465510e Revert "Add support for Buffer construction via raw arrays"
This reverts commit 74acf65fa6.
2018-12-17 09:18:28 -08:00
Matias Fontanini
40ee64c5c1 Merge pull request #140 from tgaldes/master
Add support for Buffer construction via raw arrays
2018-12-16 11:42:29 -08:00
Matias Fontanini
3ffb0f1fa8 Merge pull request #151 from mfontanini/travis-fix
Fix travis build
2018-12-16 10:15:33 -08:00
Matias Fontanini
7c5616da07 Use sudo: required in travis file 2018-12-16 10:02:06 -08:00
Matias Fontanini
f14a4b9e8c Merge pull request #150 from accelerated/flush_bug
Bug with message leak in BufferedProducer::flush(timeout)
2018-12-16 09:45:35 -08:00
Matias Fontanini
ccc6738265 Merge pull request #148 from accelerated/flush_failure
Added flush/produce termination callbacks
2018-12-16 09:32:36 -08:00
accelerated
8b431c5421 changed rbegin to begin 2018-12-14 16:33:20 -05:00
accelerated
4a24971d3f Fixed bug with message leak in BufferedProducer::flush(timeout) 2018-12-14 16:08:57 -05:00
accelerated
8dd5428c49 Added similar logic for ProduceTerminationCallback 2018-12-13 15:04:12 -05:00
accelerated
0b9b7bab11 Added flush termination callback 2018-12-13 10:43:29 -05:00
Matias Fontanini
ab002fe119 Merge pull request #147 from accelerated/raw_arrays
Support for raw array Buffer constructor
2018-12-10 18:15:37 -08:00
accelerated
06ddd79a29 Support for raw array Buffer constructor 2018-12-10 15:42:04 -05:00
Matias Fontanini
d89840b5f0 Merge pull request #144 from accelerated/header_fix
Header fixes and header copy considerations
2018-12-09 21:47:35 -08:00
accelerated
25c2eaa998 Changed iterator logic to capture header list by reference 2018-12-06 10:37:02 -05:00
accelerated
1c80af9b68 Added constructor from another HeaderList type 2018-12-05 20:04:10 -05:00
accelerated
fe0c7e7dd5 Fixed end() iterator and also applied default copy-constructor instead of passing null handle in BasicMessageBuilder 2018-12-05 12:15:25 -05:00
accelerated
93e066a1c1 * Added asserts when building a HeaderList and removed checks for handle
validity.
* Removed explicit move semantic when cloning a MessageBuilder.
* Renamed clone() to try_clone() in ClonablePtr class.
2018-12-04 11:12:28 -05:00
accelerated
6bbddcd5d5 Fixed Message::set_header_list as per review comments. Changed ClonablePtr to use clone() internally 2018-12-03 09:48:32 -05:00
accelerated
e96dc6d1fc Added comments 2018-12-02 15:00:07 -05:00
accelerated
0b7931bfb8 Added Buffer::Buffer(iter, iter) constructor overload 2018-12-02 14:42:02 -05:00
accelerated
57bddabfd0 Removed clone_handle method and made ClonablePtr::clone private 2018-12-02 14:15:20 -05:00
accelerated
c7ba478582 Header fixes 2018-11-30 09:55:26 -05:00
Matias Fontanini
a9a0693e2a Merge pull request #143 from mfontanini/fix-travis
Fix kafka download URL in travis file
2018-11-26 20:03:07 -08:00
Matias Fontanini
5aa4bc08a3 Fix kafka download URL in travis file 2018-11-26 19:47:58 -08:00
Matias Fontanini
5a4481dc28 Merge pull request #142 from farnazj/master
Add <array> header
2018-11-26 19:06:45 -08:00
Farnaz Jahanbakhsh
d06cd222fe include <array> 2018-11-26 00:26:04 +00:00
Tyler Galdes
74acf65fa6 Add support for Buffer construction via raw arrays 2018-11-19 19:59:08 -05:00
Matias Fontanini
4ad2685d61 Merge pull request #138 from tgaldes/master
Add support for constructing Buffer from std::array
2018-11-19 16:58:34 -08:00
Tyler Galdes
248d1b0638 Delete construction of buffer with rvalue arrays 2018-11-19 19:48:02 -05:00
Tyler Galdes
b48036fe62 use std::array functions for pointer and size of data 2018-11-19 11:49:17 -05:00
Tyler Galdes
757d2b623f Add support for constructing Buffer from std::array 2018-11-16 19:49:52 -05:00
Matias Fontanini
4b7a10ec90 Merge pull request #136 from accelerated/master
Remove setting log level in the constructor
2018-11-13 14:50:15 -08:00
accelerated
b366cf4bf6 Remove setting log level in the constructor 2018-11-13 15:28:06 -05:00
Matias Fontanini
7b4c3e163f Merge pull request #135 from accelerated/events
Added API description for Message::get_status
2018-11-12 10:26:30 -08:00
accelerated
70aef6681d Added API description for Message::get_status 2018-11-12 12:14:20 -05:00
Matias Fontanini
29cb02b756 Merge pull request #134 from accelerated/events
Added support for message status and setting the event mask
2018-11-12 09:05:27 -08:00
accelerated
9859e54522 Added support for message status and setting the event mask 2018-11-12 10:30:54 -05:00
Matias Fontanini
9f6556da0c Merge pull request #125 from mfontanini/events
Event implementation
2018-11-10 10:43:05 -08:00
Matias Fontanini
46481d879f Use BufferedProducer in round robin consumer test
Hopefully this will get rid of the sporadic failures
2018-11-10 10:26:03 -08:00
Matias Fontanini
25e3aacf4a Add compile guard for rd_kafka_event_stats 2018-11-10 10:26:03 -08:00
Matias Fontanini
1f1f1c253b Fix build issue when using rd_kafka_message_latency in old rdkafka 2018-11-10 10:26:03 -08:00
Matias Fontanini
24960c0a49 Build library on travis using rdkafka 0.9.4 as well 2018-11-10 10:26:03 -08:00
Matias Fontanini
4ac837d831 Disable even consumption test 2018-11-10 10:26:03 -08:00
Matias Fontanini
b242e2c35c Allow setting background event callback on configuration handles 2018-11-10 10:26:03 -08:00
Matias Fontanini
19baa03cea Allow getting background queue out of kafka handle base 2018-11-10 10:26:03 -08:00
Matias Fontanini
8dc94869fd Move get_queue behavior into Queue class 2018-11-10 10:25:31 -08:00
Matias Fontanini
71fb76b8e1 Add dumb test that extracts event from queue 2018-11-10 10:25:31 -08:00
Matias Fontanini
c7e1dcb60a Allow checking if an Event is valid 2018-11-10 10:25:31 -08:00
Matias Fontanini
e73c997a0c Allow getting Events out of Queues 2018-11-10 10:25:31 -08:00
Matias Fontanini
b46991db7e Add Event class 2018-11-10 10:25:31 -08:00
Alex Damian
b0ddceda1f Message timestamp refactoring and log level changes (#133)
* Message timestamp refactoring and log level changes

* Changes per code review
2018-11-07 08:36:57 -08:00
Matias Fontanini
451d60295a Remove MessageTimestamp constructor from time_point (#129) 2018-10-26 18:57:28 -07:00
Alex Damian
57268e666c Added time_point overloads for creating timestamps. (#128)
* Added time_point overloads for creating timestamps.

* aliased std::chrono types
2018-10-25 07:39:22 -07:00
Alex Damian
ad9a1e4a49 If timeout is 0, the function should at least run once (#123) 2018-10-22 07:55:29 -07:00
Matias Fontanini
416a7d43ce Minor documentation fixes 2018-10-21 10:17:10 -07:00
Matias Fontanini
a2d17a6f45 Test suite fixes (#124)
* Move polling strategy adapter definition into test_utils.cpp

* Use a random consumer group id in every test
2018-10-20 20:32:32 -07:00
Matias Fontanini
0d54acbc64 Flush producer in example 2018-10-19 08:42:10 -07:00
Matias Fontanini
b2ba4cbfa3 Add comment regarding flushing producer 2018-10-19 08:41:36 -07:00
multiprogramm
2b66fd3a22 Fix windows linker errors (#120) 2018-10-17 10:43:33 -07:00
Alex Damian
fbe3759fed Header support implementation (#115)
* header support implementation

* Fixed issue when ptr is null and doesn't have a cloner function

* Code complete with test cases

updated travis file with v0.11.5

* Added compile time check for rdkafka header support version

* Changes per last code review

* Using brace list initializers
2018-10-16 10:58:05 -07:00
Alex Damian
9af4330c6d Allocators (#118)
* Added allocator support for consumers and buffered producer

* Changed MessageList back to std::vector<Message> for consistency with the allocator API
2018-10-16 08:57:11 -07:00
Alex Damian
d77e7466b8 changed assert with if statement (#116) 2018-10-06 09:28:45 -07:00
68 changed files with 3820 additions and 778 deletions

View File

@@ -1,13 +1,14 @@
language: cpp
sudo: false
sudo: required
compiler:
- gcc
- clang
env:
- RDKAFKA_VERSION=v0.11.0
- RDKAFKA_VERSION=v0.9.4
- RDKAFKA_VERSION=v0.11.6
os:
- linux
@@ -21,9 +22,8 @@ addons:
- zookeeperd
before_script:
- service zookeeper start
- KAFKA_VERSION=2.11-1.0.0
- wget http://apache.cs.utah.edu/kafka/1.0.0/kafka_$KAFKA_VERSION.tgz
- KAFKA_VERSION=2.11-2.2.0
- wget https://archive.apache.org/dist/kafka/2.2.0/kafka_$KAFKA_VERSION.tgz
- tar xvzf kafka_$KAFKA_VERSION.tgz
- ./kafka_$KAFKA_VERSION/bin/kafka-server-start.sh ./kafka_$KAFKA_VERSION/config/server.properties > /dev/null 2> /dev/null &
- git clone https://github.com/edenhill/librdkafka.git
@@ -37,7 +37,7 @@ script:
- ./configure --prefix=./install && make libs && make install
- cd ..
- mkdir build && cd build
- cmake .. -DRDKAFKA_ROOT_DIR=../librdkafka/install/ -DKAFKA_TEST_INSTANCE=localhost:9092
- cmake .. -DCPPKAFKA_CMAKE_VERBOSE=ON -DRDKAFKA_ROOT=./librdkafka/install -DKAFKA_TEST_INSTANCE=localhost:9092
- make examples
- make tests
- ./tests/cppkafka_tests

View File

@@ -1,15 +1,26 @@
cmake_minimum_required(VERSION 2.8.1)
project(cppkafka)
cmake_minimum_required(VERSION 3.9.2)
project(CppKafka)
if (${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.12.0")
# Use <package>_ROOT variable to find configuration files
cmake_policy(SET CMP0074 NEW)
endif()
include(GNUInstallDirs)
include(CMakePackageConfigHelpers)
# Set the version number.
set(CPPKAFKA_VERSION_MAJOR 0)
set(CPPKAFKA_VERSION_MINOR 2)
set(CPPKAFKA_VERSION "${CPPKAFKA_VERSION_MAJOR}.${CPPKAFKA_VERSION_MINOR}")
set(RDKAFKA_MIN_VERSION 0x00090400)
set(CPPKAFKA_VERSION_MINOR 4)
set(CPPKAFKA_VERSION_REVISION 0)
set(CPPKAFKA_VERSION "${CPPKAFKA_VERSION_MAJOR}.${CPPKAFKA_VERSION_MINOR}.${CPPKAFKA_VERSION_REVISION}")
set(RDKAFKA_MIN_VERSION "0.9.4")
set(RDKAFKA_MIN_VERSION_HEX 0x00090400)
if (NOT CMAKE_CXX_FLAGS)
# Set default compile flags for the project
if(MSVC)
# Don't always use Wall, since VC's /Wall is ridiculously verbose.
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W3")
set(CMAKE_CXX_FLAGS "/W3")
# Disable VC secure checks, since these are not really issues
add_definitions("-D_CRT_SECURE_NO_WARNINGS=1")
@@ -17,9 +28,9 @@ if(MSVC)
add_definitions("-DNOGDI=1")
add_definitions("-DNOMINMAX=1")
else()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall")
set(CMAKE_CXX_FLAGS "-Wall")
endif()
endif()
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake/")
# Set output directories
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/lib)
@@ -33,6 +44,35 @@ option(CPPKAFKA_DISABLE_EXAMPLES "Disable build of cppkafka examples." OFF)
option(CPPKAFKA_BOOST_STATIC_LIBS "Link with Boost static libraries." ON)
option(CPPKAFKA_BOOST_USE_MULTITHREADED "Use Boost multithreaded libraries." ON)
option(CPPKAFKA_RDKAFKA_STATIC_LIB "Link with Rdkafka static library." OFF)
option(CPPKAFKA_EXPORT_PKGCONFIG "Generate 'cppkafka.pc' file" ON)
option(CPPKAFKA_EXPORT_CMAKE_CONFIG "Generate CMake config, target and version files." ON)
# Add FindRdKafka.cmake
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake/")
if (NOT CPPKAFKA_CONFIG_DIR)
set(CPPKAFKA_CONFIG_DIR lib/cmake/${PROJECT_NAME})
endif()
# Maintain previous compatibility
if (RDKAFKA_ROOT_DIR)
set(RdKafka_ROOT ${RDKAFKA_ROOT_DIR})
elseif (RDKAFKA_ROOT)
set(RdKafka_ROOT ${RDKAFKA_ROOT})
endif()
if (RdKafka_ROOT)
if (NOT IS_ABSOLUTE ${RdKafka_ROOT})
set(RdKafka_ROOT "${CMAKE_SOURCE_DIR}/${RdKafka_ROOT}")
endif()
endif()
if (RDKAFKA_DIR)
set(RdKafka_DIR ${RDKAFKA_DIR}) # For older versions of find_package
if (NOT IS_ABSOLUTE ${RdKafka_ROOT})
set(RdKafka_DIR "${CMAKE_SOURCE_DIR}/${RdKafka_DIR}")
endif()
endif()
# Disable output from find_package macro
if (NOT CPPKAFKA_CMAKE_VERBOSE)
@@ -47,15 +87,23 @@ else()
message(STATUS "Build will generate a static library.")
set(CPPKAFKA_LIBRARY_TYPE STATIC)
add_definitions("-DCPPKAFKA_STATIC=1")
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
endif()
if (CPPKAFKA_RDKAFKA_STATIC_LIB)
add_definitions("-DLIBRDKAFKA_STATICLIB")
endif()
if (NOT CPPKAFKA_CONFIG_DIR)
set(CPPKAFKA_CONFIG_DIR lib/cmake/${PROJECT_NAME})
endif()
if (NOT CPPKAFKA_PKGCONFIG_DIR)
set(CPPKAFKA_PKGCONFIG_DIR share/pkgconfig)
endif()
# Look for Boost (just need boost.optional headers here)
find_package(Boost REQUIRED ${FIND_PACKAGE_QUIET})
find_package(RdKafka REQUIRED ${FIND_PACKAGE_QUIET})
if (Boost_FOUND)
find_package(Boost COMPONENTS program_options ${FIND_PACKAGE_QUIET})
@@ -72,8 +120,24 @@ if (Boost_FOUND)
endif()
endif()
# Try to find the RdKafka configuration file if present.
# This will search default system locations as well as RdKafka_ROOT and RdKafka_Dir paths if specified.
find_package(RdKafka ${FIND_PACKAGE_QUIET} CONFIG)
set(RDKAFKA_TARGET_IMPORTS ${RdKafka_FOUND})
if (NOT RdKafka_FOUND)
message(STATUS "RdKafkaConfig.cmake not found. Attempting to find module instead...")
find_package(RdKafka REQUIRED ${FIND_PACKAGE_QUIET} MODULE)
if (NOT RdKafka_FOUND)
message(FATAL_ERROR "RdKafka module not found. Please set RDKAFKA_ROOT to the install path or RDKAFKA_DIR pointing to the RdKafka configuration file location.")
else()
message(STATUS "RdKafka module found.")
endif()
else()
message(STATUS "RdKafka configuration file found: ${RdKafka_CONFIG}")
endif()
add_subdirectory(src)
add_subdirectory(include)
add_subdirectory(include/cppkafka)
# Examples target
if (NOT CPPKAFKA_DISABLE_EXAMPLES AND Boost_PROGRAM_OPTIONS_FOUND)

View File

@@ -17,6 +17,8 @@ only supported via the high level consumer API. _cppkafka_ requires **rdkafka >=
order to use it. Other wrapped functionalities are also provided, like fetching metadata,
offsets, etc.
* _cppkafka_ provides message header support. This feature requires **rdkafka >= 0.11.4**.
* _cppkafka_ tries to add minimal overhead over _librdkafka_. A very thin wrapper for _librdkafka_
messages is used for consumption so there's virtually no overhead at all.
@@ -52,10 +54,9 @@ int main() {
In order to compile _cppkafka_ you need:
* _librdkafka >= 0.9.4_
* _CMake_
* A compiler with good C++11 support (e.g. gcc >= 4.8). This was tested successfully on
_g++ 4.8.3_.
* The boost library.
* _CMake >= 3.9.2_
* A compiler with good C++11 support (e.g. gcc >= 4.8). This was tested successfully on _g++ 4.8.3_.
* The boost library (for boost::optional)
Now, in order to build, just run:
@@ -64,12 +65,14 @@ mkdir build
cd build
cmake <OPTIONS> ..
make
make install
```
## CMake options
The following cmake options can be specified:
* `RDKAFKA_ROOT_DIR` : Specify a different librdkafka install directory.
* `RDKAFKA_ROOT` : Specify a different librdkafka install directory.
* `RDKAFKA_DIR` : Specify a different directory where the RdKafkaConfig.cmake is installed.
* `BOOST_ROOT` : Specify a different Boost install directory.
* `CPPKAFKA_CMAKE_VERBOSE` : Generate verbose output. Default is `OFF`.
* `CPPKAFKA_BUILD_SHARED` : Build cppkafka as a shared library. Default is `ON`.
@@ -78,24 +81,14 @@ The following cmake options can be specified:
* `CPPKAFKA_BOOST_STATIC_LIBS` : Link with Boost static libraries. Default is `ON`.
* `CPPKAFKA_BOOST_USE_MULTITHREADED` : Use Boost multi-threaded libraries. Default is `ON`.
* `CPPKAFKA_RDKAFKA_STATIC_LIB` : Link to Rdkafka static library. Default is `OFF`.
* `CPPKAFKA_CONFIG_DIR` : Install location of the cmake configuration files. Default is `lib/cmake/cppkafka`.
* `CPPKAFKA_PKGCONFIG_DIR` : Install location of the .pc file. Default is `share/pkgconfig`.
* `CPPKAFKA_EXPORT_PKGCONFIG` : Generate `cppkafka.pc` file. Default is `ON`.
* `CPPKAFKA_EXPORT_CMAKE_CONFIG` : Generate CMake config, target and version files. Default is `ON`.
Example:
```Shell
cmake -DRDKAFKA_ROOT_DIR=/some/other/dir -DCPPKAFKA_BUILD_SHARED=OFF ...
```
The `RDKAFKA_ROOT_DIR` must contain the following structure. If the system
architecture is 64-bit and both `lib` and `lib64` folders are available, the `lib64`
folder location will be selected by cmake.
```Shell
${RDKAFKA_ROOT_DIR}/
|
+ include/librdkafka/rdkafka.h
|
+ lib/librdkafka.a
|
+ lib64/librdkafka.a (optional)
cmake -DRDKAFKA_ROOT=/some/other/dir -DCPPKAFKA_BUILD_SHARED=OFF ...
```
# Using
@@ -105,6 +98,13 @@ If you want to use _cppkafka_, you'll need to link your application with:
* _cppkafka_
* _rdkafka_
If using CMake, this is simplified by doing:
```cmake
find_package(CppKafka REQUIRED)
target_link_libraries(<YourLibrary> CppKafka::cppkafka)
```
# Documentation
You can generate the documentation by running `make docs` inside the build directory. This requires

View File

@@ -1,60 +1,75 @@
# Override default CMAKE_FIND_LIBRARY_SUFFIXES
# This find module helps find the RdKafka module. It exports the following variables:
# - RdKafka_INCLUDE_DIR : The directory where rdkafka.h is located.
# - RdKafka_LIBNAME : The name of the library, i.e. librdkafka.a, librdkafka.so, etc.
# - RdKafka_LIBRARY_PATH : The full library path i.e. <path_to_binaries>/${RdKafka_LIBNAME}
# - RdKafka::rdkafka : Imported library containing all above properties set.
if (CPPKAFKA_RDKAFKA_STATIC_LIB)
if (MSVC)
set(RDKAFKA_SUFFIX lib)
set(RDKAFKA_PREFIX ${CMAKE_STATIC_LIBRARY_PREFIX})
set(RDKAFKA_SUFFIX ${CMAKE_STATIC_LIBRARY_SUFFIX})
set(RDKAFKA_LIBRARY_TYPE STATIC)
else()
set(RDKAFKA_SUFFIX a)
endif()
else()
if (MSVC)
set(RDKAFKA_SUFFIX dll)
else()
set(RDKAFKA_SUFFIX so)
endif()
set(RDKAFKA_PREFIX ${CMAKE_SHARED_LIBRARY_PREFIX})
set(RDKAFKA_SUFFIX ${CMAKE_SHARED_LIBRARY_SUFFIX})
set(RDKAFKA_LIBRARY_TYPE SHARED)
endif()
find_path(RDKAFKA_ROOT_DIR
NAMES include/librdkafka/rdkafka.h
set(RdKafka_LIBNAME ${RDKAFKA_PREFIX}rdkafka${RDKAFKA_SUFFIX})
find_path(RdKafka_INCLUDE_DIR
NAMES librdkafka/rdkafka.h
HINTS ${RdKafka_ROOT}/include
)
find_path(RDKAFKA_INCLUDE_DIR
NAMES librdkafka/rdkafka.h
HINTS ${RDKAFKA_ROOT_DIR}/include
find_library(RdKafka_LIBRARY_PATH
NAMES ${RdKafka_LIBNAME} rdkafka
HINTS ${RdKafka_ROOT}/lib ${RdKafka_ROOT}/lib64
)
# Check lib paths
if (CPPKAFKA_CMAKE_VERBOSE)
get_property(FIND_LIBRARY_32 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB32_PATHS)
get_property(FIND_LIBRARY_64 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB64_PATHS)
MESSAGE(STATUS "RDKAFKA search 32-bit library paths: ${FIND_LIBRARY_32}")
MESSAGE(STATUS "RDKAFKA search 64-bit library paths: ${FIND_LIBRARY_64}")
message(STATUS "RDKAFKA search 32-bit library paths: ${FIND_LIBRARY_32}")
message(STATUS "RDKAFKA search 64-bit library paths: ${FIND_LIBRARY_64}")
message(STATUS "RdKafka_ROOT = ${RdKafka_ROOT}")
message(STATUS "RdKafka_INCLUDE_DIR = ${RdKafka_INCLUDE_DIR}")
message(STATUS "RdKafka_LIBNAME = ${RdKafka_LIBNAME}")
message(STATUS "RdKafka_LIBRARY_PATH = ${RdKafka_LIBRARY_PATH}")
endif()
find_library(RDKAFKA_LIBRARY
NAMES rdkafka.${RDKAFKA_SUFFIX} librdkafka.${RDKAFKA_SUFFIX} rdkafka
HINTS ${RDKAFKA_ROOT_DIR}/lib
)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(RDKAFKA DEFAULT_MSG
RDKAFKA_LIBRARY
RDKAFKA_INCLUDE_DIR
find_package_handle_standard_args(RdKafka DEFAULT_MSG
RdKafka_LIBNAME
RdKafka_LIBRARY_PATH
RdKafka_INCLUDE_DIR
)
set(CONTENTS "#include <librdkafka/rdkafka.h>\n #if RD_KAFKA_VERSION >= ${RDKAFKA_MIN_VERSION}\n int main() { }\n #endif")
set(FILE_NAME ${CMAKE_CURRENT_BINARY_DIR}/rdkafka_version_test.c)
set(CONTENTS "#include <librdkafka/rdkafka.h>\n #if RD_KAFKA_VERSION >= ${RDKAFKA_MIN_VERSION_HEX}\n int main() { }\n #endif")
set(FILE_NAME ${CMAKE_CURRENT_BINARY_DIR}/rdkafka_version_test.cpp)
file(WRITE ${FILE_NAME} ${CONTENTS})
try_compile(HAVE_VALID_KAFKA_VERSION ${CMAKE_CURRENT_BINARY_DIR}
try_compile(RdKafka_FOUND ${CMAKE_CURRENT_BINARY_DIR}
SOURCES ${FILE_NAME}
CMAKE_FLAGS "-DINCLUDE_DIRECTORIES=${RDKAFKA_INCLUDE_DIR}")
CMAKE_FLAGS "-DINCLUDE_DIRECTORIES=${RdKafka_INCLUDE_DIR}")
if (HAVE_VALID_KAFKA_VERSION)
if (RdKafka_FOUND)
add_library(RdKafka::rdkafka ${RDKAFKA_LIBRARY_TYPE} IMPORTED GLOBAL)
if (UNIX AND NOT APPLE)
set(RDKAFKA_DEPENDENCIES pthread rt ssl crypto dl z)
else()
set(RDKAFKA_DEPENDENCIES pthread ssl crypto dl z)
endif()
set_target_properties(RdKafka::rdkafka PROPERTIES
IMPORTED_NAME RdKafka
IMPORTED_LOCATION "${RdKafka_LIBRARY_PATH}"
INTERFACE_INCLUDE_DIRECTORIES "${RdKafka_INCLUDE_DIR}"
INTERFACE_LINK_LIBRARIES "${RDKAFKA_DEPENDENCIES}")
message(STATUS "Found valid rdkafka version")
mark_as_advanced(
RDKAFKA_ROOT_DIR
RDKAFKA_INCLUDE_DIR
RDKAFKA_LIBRARY
RdKafka_INCLUDE_DIR
RdKafka_LIBRARY_PATH
)
else()
message(FATAL_ERROR "Failed to find valid rdkafka version")

33
cmake/config.cmake.in Normal file
View File

@@ -0,0 +1,33 @@
@PACKAGE_INIT@
include(CMakeFindDependencyMacro)
# Add FindRdKafka.cmake
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_LIST_DIR}")
set(RDKAFKA_MIN_VERSION_HEX "@RDKAFKA_MIN_VERSION_HEX@")
# Find boost optional
find_dependency(Boost REQUIRED)
# Try to find the RdKafka configuration file if present.
# This will search default system locations as well as RdKafka_ROOT and RdKafka_DIR paths if specified.
find_package(RdKafka QUIET CONFIG)
set(RDKAFKA_TARGET_IMPORTS ${RdKafka_FOUND})
if (NOT RdKafka_FOUND)
find_dependency(RdKafka REQUIRED MODULE)
endif()
include("${CMAKE_CURRENT_LIST_DIR}/@TARGET_EXPORT_NAME@.cmake")
# Export 'CppKafka_ROOT'
set_and_check(@PROJECT_NAME@_ROOT "@PACKAGE_CMAKE_INSTALL_PREFIX@")
# Export 'CppKafka_INSTALL_INCLUDE_DIR'
set_and_check(@PROJECT_NAME@_INSTALL_INCLUDE_DIR "@PACKAGE_CMAKE_INSTALL_INCLUDEDIR@")
# Export 'CppKafka_INSTALL_LIB_DIR'
set_and_check(@PROJECT_NAME@_INSTALL_LIB_DIR "@PACKAGE_CMAKE_INSTALL_LIBDIR@")
# Validate installed components
check_required_components("@PROJECT_NAME@")

14
cmake/cppkafka.pc.in Normal file
View File

@@ -0,0 +1,14 @@
prefix=@CMAKE_INSTALL_PREFIX@
exec_prefix=${prefix}
libdir=${prefix}/@CMAKE_INSTALL_LIBDIR@
sharedlibdir=${prefix}/@CMAKE_INSTALL_LIBDIR@
includedir=${prefix}/include
Name: cppkafka
Url: https://github.com/mfontanini/cppkafka
Description: C++ wrapper library on top of RdKafka
Version: @CPPKAFKA_VERSION@
Requires:
Requires.private: rdkafka >= 0.9.4
Libs: -L${libdir} -L${sharedlibdir} -lcppkafka
Cflags: -I${includedir} -I${includedir}/cppkafka -I@Boost_INCLUDE_DIRS@

View File

@@ -1,11 +1,10 @@
link_libraries(cppkafka ${RDKAFKA_LIBRARY} ${Boost_LIBRARIES} pthread rt ssl crypto dl z)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include)
include_directories(SYSTEM ${RDKAFKA_INCLUDE_DIR})
add_custom_target(examples)
macro(create_example example_name)
string(REPLACE "_" "-" sanitized_name ${example_name})
add_executable(${sanitized_name} EXCLUDE_FROM_ALL "${example_name}_example.cpp")
target_link_libraries(${sanitized_name} cppkafka RdKafka::rdkafka Boost::boost Boost::program_options)
add_dependencies(examples ${sanitized_name})
endmacro()

View File

@@ -115,7 +115,7 @@ int main(int argc, char* argv[]) {
},
// Whenever EOF is reached on a partition, print this
[](ConsumerDispatcher::EndOfFile, const TopicPartition& topic_partition) {
cout << "Reched EOF on partition " << topic_partition << endl;
cout << "Reached EOF on partition " << topic_partition << endl;
}
);
}

View File

@@ -75,4 +75,7 @@ int main(int argc, char* argv[]) {
// Actually produce the message we've built
producer.produce(builder);
}
// Flush all produced messages
producer.flush();
}

View File

@@ -1 +0,0 @@
add_subdirectory(cppkafka)

View File

@@ -1,7 +1,8 @@
set(CPPKAFKA_HEADER "${CMAKE_CURRENT_BINARY_DIR}/cppkafka.h")
# Local function to auto-generate main cppkafka.h header file
function(make_cppkafka_header)
set(CPPKAFKA_HEADER ${CMAKE_CURRENT_SOURCE_DIR}/cppkafka.h)
file(GLOB INCLUDE_HEADERS RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "*.h" "utils/*.h")
file(GLOB INCLUDE_HEADERS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "*.h" "utils/*.h")
list(SORT INCLUDE_HEADERS)
foreach(header ${INCLUDE_HEADERS})
if (NOT ${header} MATCHES "cppkafka.h")
@@ -10,7 +11,7 @@ function(make_cppkafka_header)
endforeach()
#create file from template
configure_file(${PROJECT_SOURCE_DIR}/cppkafka.h.in ${CPPKAFKA_HEADER})
configure_file("${PROJECT_SOURCE_DIR}/cmake/cppkafka.h.in" "${CPPKAFKA_HEADER}" @ONLY)
endfunction()
# Run file generation function
@@ -22,7 +23,7 @@ file(GLOB UTILS_INCLUDE_FILES "utils/*.h")
file(GLOB DETAIL_INCLUDE_FILES "detail/*.h")
install(
FILES ${INCLUDE_FILES}
DESTINATION include/cppkafka
DESTINATION include/cppkafka/
COMPONENT Headers
)
install(
@@ -35,3 +36,8 @@ install(
DESTINATION include/cppkafka/detail/
COMPONENT Headers
)
install(
FILES "${CPPKAFKA_HEADER}"
DESTINATION include/cppkafka/
COMPONENT Headers
)

View File

@@ -31,6 +31,7 @@
#define CPPKAFKA_BUFFER_H
#include <cstddef>
#include <array>
#include <vector>
#include <iosfwd>
#include <algorithm>
@@ -81,6 +82,17 @@ public:
}
}
/**
* Constructs a buffer from two iterators in the range [first,last)
*
* \param first An iterator to the start of data
* \param last An iterator to the end of data (not included)
*/
template <typename Iter>
Buffer(const Iter first, const Iter last)
: Buffer(&*first, std::distance(first, last)) {
}
/**
* Constructs a buffer from a vector
*
@@ -92,10 +104,43 @@ public:
static_assert(sizeof(T) == sizeof(DataType), "sizeof(T) != sizeof(DataType)");
}
// Don't allow construction from temporary vectors
/**
* Don't allow construction from temporary vectors
*/
template <typename T>
Buffer(std::vector<T>&& data) = delete;
/**
* Constructs a buffer from an array
*
* \param data The the array to be used as input
*/
template <typename T, size_t N>
Buffer(const std::array<T, N>& data)
: data_(reinterpret_cast<const DataType*>(data.data())), size_(data.size()) {
static_assert(sizeof(T) == sizeof(DataType), "sizeof(T) != sizeof(DataType)");
}
/**
* Don't allow construction from temporary arrays
*/
template <typename T, size_t N>
Buffer(std::array<T, N>&& data) = delete;
/**
* Constructs a buffer from a raw array
*
* \param data The the array to be used as input
*/
template <typename T, size_t N>
Buffer(const T(&data)[N])
: Buffer(data, N) {
}
// Don't allow construction from temporary raw arrays
template <typename T, size_t N>
Buffer(T(&&data)[N]) = delete;
/**
* \brief Construct a buffer from a const string ref
*
@@ -104,7 +149,9 @@ public:
*/
Buffer(const std::string& data);
// Don't allow construction from temporary strings
/**
* Don't allow construction from temporary strings
*/
Buffer(std::string&&) = delete;
Buffer(const Buffer&) = delete;
@@ -172,6 +219,14 @@ CPPKAFKA_API bool operator==(const Buffer& lhs, const Buffer& rhs);
*/
CPPKAFKA_API bool operator!=(const Buffer& lhs, const Buffer& rhs);
/**
* Compares Buffer objects lexicographically
*/
CPPKAFKA_API bool operator<(const Buffer& lhs, const Buffer& rhs);
CPPKAFKA_API bool operator<=(const Buffer& lhs, const Buffer& rhs);
CPPKAFKA_API bool operator>(const Buffer& lhs, const Buffer& rhs);
CPPKAFKA_API bool operator>=(const Buffer& lhs, const Buffer& rhs);
} // cppkafka
#endif // CPPKAFKA_BUFFER_H

View File

@@ -41,7 +41,7 @@ template <typename T, typename Deleter, typename Cloner>
class ClonablePtr {
public:
/**
* Creates an instance
* \brief Creates an instance
*
* \param ptr The pointer to be wrapped
* \param deleter The deleter functor
@@ -60,17 +60,21 @@ public:
* \param rhs The pointer to be copied
*/
ClonablePtr(const ClonablePtr& rhs)
: handle_(rhs.cloner_(rhs.handle_.get()), rhs.handle_.get_deleter()), cloner_(rhs.cloner_) {
: handle_(std::unique_ptr<T, Deleter>(rhs.try_clone(), rhs.get_deleter())),
cloner_(rhs.get_cloner()) {
}
/**
* Copies and assigns the given pointer
* \brief Copies and assigns the given pointer
*
* \param rhs The pointer to be copied
*/
ClonablePtr& operator=(const ClonablePtr& rhs) {
handle_.reset(cloner_(rhs.handle_.get()));
if (this != &rhs) {
handle_ = std::unique_ptr<T, Deleter>(rhs.try_clone(), rhs.get_deleter());
cloner_ = rhs.get_cloner();
}
return *this;
}
@@ -79,12 +83,51 @@ public:
~ClonablePtr() = default;
/**
* Getter for the internal pointer
* \brief Getter for the internal pointer
*/
T* get() const {
return handle_.get();
}
/**
* \brief Releases ownership of the internal pointer
*/
T* release() {
return handle_.release();
}
/**
* \brief Reset the internal pointer to a new one
*/
void reset(T* ptr) {
handle_.reset(ptr);
}
/**
* \brief Get the deleter
*/
const Deleter& get_deleter() const {
return handle_.get_deleter();
}
/**
* \brief Get the cloner
*/
const Cloner& get_cloner() const {
return cloner_;
}
/**
* \brief Indicates whether this ClonablePtr instance is valid (not null)
*/
explicit operator bool() const {
return static_cast<bool>(handle_);
}
private:
T* try_clone() const {
return cloner_ ? cloner_(get()) : get();
}
std::unique_ptr<T, Deleter> handle_;
Cloner cloner_;
};

View File

@@ -42,6 +42,7 @@
#include "clonable_ptr.h"
#include "configuration_base.h"
#include "macros.h"
#include "event.h"
namespace cppkafka {
@@ -78,6 +79,7 @@ public:
const std::string& message)>;
using StatsCallback = std::function<void(KafkaHandleBase& handle, const std::string& json)>;
using SocketCallback = std::function<int(int domain, int type, int protocol)>;
using BackgroundEventCallback = std::function<void(KafkaHandleBase& handle, Event)>;
using ConfigurationBase<Configuration>::set;
using ConfigurationBase<Configuration>::get;
@@ -142,6 +144,18 @@ public:
*/
Configuration& set_socket_callback(SocketCallback callback);
#if RD_KAFKA_VERSION >= RD_KAFKA_ADMIN_API_SUPPORT_VERSION
/**
* Sets the background event callback (invokes rd_kafka_conf_set_background_event_cb)
*/
Configuration& set_background_event_callback(BackgroundEventCallback callback);
/**
* Sets the event mask (invokes rd_kafka_conf_set_events)
*/
Configuration& set_events(int events);
#endif
/**
* Sets the default topic configuration
*/
@@ -204,6 +218,11 @@ public:
*/
const SocketCallback& get_socket_callback() const;
/**
* Gets the background event callback
*/
const BackgroundEventCallback& get_background_event_callback() const;
/**
* Gets the default topic configuration
*/
@@ -229,6 +248,7 @@ private:
LogCallback log_callback_;
StatsCallback stats_callback_;
SocketCallback socket_callback_;
BackgroundEventCallback background_event_callback_;
};
} // cppkafka

View File

@@ -102,6 +102,7 @@ public:
using RevocationCallback = std::function<void(const TopicPartitionList&)>;
using RebalanceErrorCallback = std::function<void(Error)>;
using KafkaHandleBase::pause;
using KafkaHandleBase::resume;
/**
* \brief Creates an instance of a consumer.
@@ -281,6 +282,20 @@ public:
*/
TopicPartitionList get_offsets_committed(const TopicPartitionList& topic_partitions) const;
/**
* \brief Gets the offsets committed for the given topic/partition list with a timeout
*
* This translates into a call to rd_kafka_committed
*
* \param topic_partitions The topic/partition list to be queried
*
* \param timeout The timeout for this operation. Supersedes the default consumer timeout.
*
* \return The topic partition list
*/
TopicPartitionList get_offsets_committed(const TopicPartitionList& topic_partitions,
std::chrono::milliseconds timeout) const;
/**
* \brief Gets the offset positions for the given topic/partition list
*
@@ -291,6 +306,38 @@ public:
* \return The topic partition list
*/
TopicPartitionList get_offsets_position(const TopicPartitionList& topic_partitions) const;
#if (RD_KAFKA_VERSION >= RD_KAFKA_STORE_OFFSETS_SUPPORT_VERSION)
/**
* \brief Stores the offsets on the currently assigned topic/partitions (legacy).
*
* This translates into a call to rd_kafka_offsets_store with the offsets prior to the current assignment positions.
* It is equivalent to calling rd_kafka_offsets_store(get_offsets_position(get_assignment())).
*
* \note When using this API it's recommended to set enable.auto.offset.store=false and enable.auto.commit=true.
*/
void store_consumed_offsets() const;
/**
* \brief Stores the offsets on the given topic/partitions (legacy).
*
* This translates into a call to rd_kafka_offsets_store.
*
* \param topic_partitions The topic/partition list to be stored.
*
* \note When using this API it's recommended to set enable.auto.offset.store=false and enable.auto.commit=true.
*/
void store_offsets(const TopicPartitionList& topic_partitions) const;
#endif
/**
* \brief Stores the offset for this message (legacy).
*
* This translates into a call to rd_kafka_offset_store.
*
* \param msg The message whose offset will be stored.
*
* \note When using this API it's recommended to set enable.auto.offset.store=false and enable.auto.commit=true.
*/
void store_offset(const Message& msg) const;
/**
* \brief Gets the current topic subscription
@@ -376,13 +423,43 @@ public:
/**
* \brief Polls for a batch of messages
*
* This can return one or more messages
* This can return zero or more messages
*
* \param max_batch_size The maximum amount of messages expected
* \param alloc The optionally supplied allocator for allocating messages
*
* \return A list of messages
*/
template <typename Allocator>
std::vector<Message, Allocator> poll_batch(size_t max_batch_size,
const Allocator& alloc);
/**
* \brief Polls for a batch of messages
*
* This can return zero or more messages
*
* \param max_batch_size The maximum amount of messages expected
*
* \return A list of messages
*/
MessageList poll_batch(size_t max_batch_size);
std::vector<Message> poll_batch(size_t max_batch_size);
/**
* \brief Polls for a batch of messages
*
* This can return zero or more messages
*
* \param max_batch_size The maximum amount of messages expected
* \param timeout The timeout for this operation
* \param alloc The optionally supplied allocator for allocating messages
*
* \return A list of messages
*/
template <typename Allocator>
std::vector<Message, Allocator> poll_batch(size_t max_batch_size,
std::chrono::milliseconds timeout,
const Allocator& alloc);
/**
* \brief Polls for a batch of messages
@@ -394,7 +471,8 @@ public:
*
* \return A list of messages
*/
MessageList poll_batch(size_t max_batch_size, std::chrono::milliseconds timeout);
std::vector<Message> poll_batch(size_t max_batch_size,
std::chrono::milliseconds timeout);
/**
* \brief Get the global event queue servicing this consumer corresponding to
@@ -440,6 +518,34 @@ private:
RebalanceErrorCallback rebalance_error_callback_;
};
// Implementations
template <typename Allocator>
std::vector<Message, Allocator> Consumer::poll_batch(size_t max_batch_size,
const Allocator& alloc) {
return poll_batch(max_batch_size, get_timeout(), alloc);
}
template <typename Allocator>
std::vector<Message, Allocator> Consumer::poll_batch(size_t max_batch_size,
std::chrono::milliseconds timeout,
const Allocator& alloc) {
std::vector<rd_kafka_message_t*> raw_messages(max_batch_size);
// Note that this will leak the queue when using rdkafka < 0.11.5 (see get_queue comment)
Queue queue = Queue::make_queue(rd_kafka_queue_get_consumer(get_handle()));
ssize_t result = rd_kafka_consume_batch_queue(queue.get_handle(),
timeout.count(),
raw_messages.data(),
raw_messages.size());
if (result == -1) {
check_error(rd_kafka_last_error());
// on the off-chance that check_error() does not throw an error
return std::vector<Message, Allocator>(alloc);
}
return std::vector<Message, Allocator>(raw_messages.begin(),
raw_messages.begin() + result,
alloc);
}
} // cppkafka
#endif // CPP_KAFKA_CONSUMER_H

View File

@@ -37,14 +37,19 @@
#include <cppkafka/configuration_option.h>
#include <cppkafka/consumer.h>
#include <cppkafka/error.h>
#include <cppkafka/event.h>
#include <cppkafka/exceptions.h>
#include <cppkafka/group_information.h>
#include <cppkafka/header.h>
#include <cppkafka/header_list.h>
#include <cppkafka/header_list_iterator.h>
#include <cppkafka/kafka_handle_base.h>
#include <cppkafka/logging.h>
#include <cppkafka/macros.h>
#include <cppkafka/message.h>
#include <cppkafka/message_builder.h>
#include <cppkafka/message_internal.h>
#include <cppkafka/message_timestamp.h>
#include <cppkafka/metadata.h>
#include <cppkafka/producer.h>
#include <cppkafka/queue.h>

View File

@@ -14,7 +14,7 @@
#endif
#if defined(__linux__) || defined(__CYGWIN__)
#if defined(__linux__) || defined(__CYGWIN__) || defined(__sun)
# include <endian.h>
@@ -42,11 +42,11 @@
# define __LITTLE_ENDIAN LITTLE_ENDIAN
# define __PDP_ENDIAN PDP_ENDIAN
#elif defined(__OpenBSD__)
#elif defined(__OpenBSD__) || defined(__FreeBSD__)
# include <sys/endian.h>
#elif defined(__NetBSD__) || defined(__FreeBSD__) || defined(__DragonFly__)
#elif defined(__NetBSD__) || defined(__DragonFly__)
# include <sys/endian.h>

View File

@@ -42,6 +42,10 @@ namespace cppkafka {
*/
class CPPKAFKA_API Error {
public:
/**
* @brief Constructs an error object with RD_KAFKA_RESP_ERR_NO_ERROR
*/
Error() = default;
/**
* Constructs an error object
*/
@@ -77,7 +81,7 @@ public:
*/
CPPKAFKA_API friend std::ostream& operator<<(std::ostream& output, const Error& rhs);
private:
rd_kafka_resp_err_t error_;
rd_kafka_resp_err_t error_{RD_KAFKA_RESP_ERR_NO_ERROR};
};
} // cppkafka

180
include/cppkafka/event.h Normal file
View File

@@ -0,0 +1,180 @@
/*
* Copyright (c) 2018, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef CPPKAFKA_EVENT_H
#define CPPKAFKA_EVENT_H
#include <memory>
#include <string>
#include <vector>
#include "error.h"
#include "message.h"
#include "topic_partition.h"
#include "topic_partition_list.h"
namespace cppkafka {
class Event {
public:
/**
* Construct an Event from a rdkafka event handle and take ownership of it
*
* /param handle The handle to construct this event from
*/
Event(rd_kafka_event_t* handle);
/**
* Returns the name of this event
*/
std::string get_name() const;
/**
* Returns the type of this event
*/
rd_kafka_event_type_t get_type() const;
/**
* \brief Gets the next message contained in this event.
*
* This call is only valid if the event type is one of:
* * RD_KAFKA_EVENT_FETCH
* * RD_KAFKA_EVENT_DR
*
* \note The returned message's lifetime *is tied to this Event*. That is, if the event
* is free'd so will the contents of the message.
*/
Message get_next_message() const;
/**
* \brief Gets all messages in this event (if any)
*
* This call is only valid if the event type is one of:
* * RD_KAFKA_EVENT_FETCH
* * RD_KAFKA_EVENT_DR
*
* \note The returned messages' lifetime *is tied to this Event*. That is, if the event
* is free'd so will the contents of the messages.
*
* \return A vector containing 0 or more messages
*/
std::vector<Message> get_messages();
/**
* \brief Gets all messages in this event (if any)
*
* This call is only valid if the event type is one of:
* * RD_KAFKA_EVENT_FETCH
* * RD_KAFKA_EVENT_DR
*
* \param allocator The allocator to use on the output vector
*
* \note The returned messages' lifetime *is tied to this Event*. That is, if the event
* is free'd so will the contents of the messages.
*
* \return A vector containing 0 or more messages
*/
template <typename Allocator>
std::vector<Message, Allocator> get_messages(const Allocator allocator);
/**
* \brief Gets the number of messages contained in this event
*
* This call is only valid if the event type is one of:
* * RD_KAFKA_EVENT_FETCH
* * RD_KAFKA_EVENT_DR
*/
size_t get_message_count() const;
/**
* \brief Returns the error in this event
*/
Error get_error() const;
/**
* Gets the opaque pointer in this event
*/
void* get_opaque() const;
#if RD_KAFKA_VERSION >= RD_KAFKA_EVENT_STATS_SUPPORT_VERSION
/**
* \brief Gets the stats in this event
*
* This call is only valid if the event type is RD_KAFKA_EVENT_STATS
*/
std::string get_stats() const {
return rd_kafka_event_stats(handle_.get());
}
#endif
/**
* \brief Gets the topic/partition for this event
*
* This call is only valid if the event type is RD_KAFKA_EVENT_ERROR
*/
TopicPartition get_topic_partition() const;
/**
* \brief Gets the list of topic/partitions in this event
*
* This call is only valid if the event type is one of:
* * RD_KAFKA_EVENT_REBALANCE
* * RD_KAFKA_EVENT_OFFSET_COMMIT
*/
TopicPartitionList get_topic_partition_list() const;
/**
* Check whether this event is valid
*
* /return true iff this event has a valid (non-null) handle inside
*/
operator bool() const;
private:
using HandlePtr = std::unique_ptr<rd_kafka_event_t, decltype(&rd_kafka_event_destroy)>;
HandlePtr handle_;
};
template <typename Allocator>
std::vector<Message, Allocator> Event::get_messages(const Allocator allocator) {
const size_t total_messages = get_message_count();
std::vector<const rd_kafka_message_t*> raw_messages(total_messages);
const auto messages_read = rd_kafka_event_message_array(handle_.get(),
raw_messages.data(),
total_messages);
std::vector<Message, Allocator> output(allocator);
output.reserve(messages_read);
for (auto message : raw_messages) {
output.emplace_back(Message::make_non_owning(const_cast<rd_kafka_message_t*>(message)));
}
return output;
}
} // cppkafka
#endif // CPPKAFKA_EVENT_H

View File

@@ -134,6 +134,14 @@ private:
Error error_;
};
/**
* Backoff performer has no more retries left for a specific action.
*/
class CPPKAFKA_API ActionTerminatedException : public Exception {
public:
ActionTerminatedException(const std::string& error);
};
} // cppkafka
#endif // CPPKAFKA_EXCEPTIONS_H

195
include/cppkafka/header.h Normal file
View File

@@ -0,0 +1,195 @@
/*
* Copyright (c) 2017, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef CPPKAFKA_HEADER_H
#define CPPKAFKA_HEADER_H
#include "macros.h"
#include "buffer.h"
#include <string>
#include <assert.h>
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
namespace cppkafka {
/**
* \brief Class representing a rdkafka header.
*
* The template parameter 'BufferType' can represent a cppkafka::Buffer, std::string, std::vector, etc.
* A valid header may contain an empty name as well as null data.
*/
template <typename BufferType>
class Header {
public:
using ValueType = BufferType;
/**
* \brief Build an empty header with no data
*/
Header() = default;
/**
* \brief Build a header instance
* \param name The header name
* \param value The non-modifiable header data
*/
Header(std::string name,
const BufferType& value);
/**
* \brief Build a header instance
* \param name The header name
* \param value The header data to be moved
*/
Header(std::string name,
BufferType&& value);
/**
* \brief Get the header name
* \return A reference to the name
*/
const std::string& get_name() const;
/**
* \brief Get the header value
* \return A const reference to the underlying buffer
*/
const BufferType& get_value() const;
/**
* \brief Get the header value
* \return A non-const reference to the underlying buffer
*/
BufferType& get_value();
/**
* \brief Check if this header is empty
* \return True if the header contains valid data, false otherwise.
*/
operator bool() const;
private:
template <typename T>
T make_value(const T& other);
Buffer make_value(const Buffer& other);
std::string name_;
BufferType value_;
};
// Comparison operators for Header type
template <typename BufferType>
bool operator==(const Header<BufferType>& lhs, const Header<BufferType>& rhs) {
return std::tie(lhs.get_name(), lhs.get_value()) == std::tie(rhs.get_name(), rhs.get_value());
}
template <typename BufferType>
bool operator!=(const Header<BufferType>& lhs, const Header<BufferType>& rhs) {
return !(lhs == rhs);
}
template <typename BufferType>
bool operator<(const Header<BufferType>& lhs, const Header<BufferType>& rhs) {
return std::tie(lhs.get_name(), lhs.get_value()) < std::tie(rhs.get_name(), rhs.get_value());
}
template <typename BufferType>
bool operator>(const Header<BufferType>& lhs, const Header<BufferType>& rhs) {
return std::tie(lhs.get_name(), lhs.get_value()) > std::tie(rhs.get_name(), rhs.get_value());
}
template <typename BufferType>
bool operator<=(const Header<BufferType>& lhs, const Header<BufferType>& rhs) {
return !(lhs > rhs);
}
template <typename BufferType>
bool operator>=(const Header<BufferType>& lhs, const Header<BufferType>& rhs) {
return !(lhs < rhs);
}
// Implementation
template <typename BufferType>
Header<BufferType>::Header(std::string name,
const BufferType& value)
: name_(std::move(name)),
value_(make_value(value)) {
}
template <typename BufferType>
Header<BufferType>::Header(std::string name,
BufferType&& value)
: name_(std::move(name)),
value_(std::move(value)) {
}
template <typename BufferType>
const std::string& Header<BufferType>::get_name() const {
return name_;
}
template <typename BufferType>
const BufferType& Header<BufferType>::get_value() const {
return value_;
}
template <typename BufferType>
BufferType& Header<BufferType>::get_value() {
return value_;
}
template <typename BufferType>
Header<BufferType>::operator bool() const {
return !value_.empty();
}
template <>
inline
Header<Buffer>::operator bool() const {
return value_.get_size() > 0;
}
template <typename BufferType>
template <typename T>
T Header<BufferType>::make_value(const T& other) {
return other;
}
template <typename BufferType>
Buffer Header<BufferType>::make_value(const Buffer& other) {
return Buffer(other.get_data(), other.get_size());
}
} //namespace cppkafka
#endif //RD_KAFKA_HEADERS_SUPPORT_VERSION
#endif //CPPKAFKA_HEADER_H

View File

@@ -0,0 +1,337 @@
/*
* Copyright (c) 2017, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef CPPKAFKA_HEADER_LIST_H
#define CPPKAFKA_HEADER_LIST_H
#include <librdkafka/rdkafka.h>
#include "clonable_ptr.h"
#include "header.h"
#include "header_list_iterator.h"
#include "exceptions.h"
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
namespace cppkafka {
/**
* \brief Thin wrapper over a rd_kafka_headers_t handle which optionally controls its lifetime.
* \tparam HeaderType The header type
*
* This is a copyable and movable class that wraps a rd_kafka_header_t*. When copying this class,
* all associated headers are also copied via rd_kafka_headers_copy(). If this list owns the underlying handle,
* its destructor will call rd_kafka_headers_destroy().
*/
template <typename HeaderType>
class HeaderList {
public:
template <typename OtherHeaderType>
friend class HeaderList;
using BufferType = typename HeaderType::ValueType;
using Iterator = HeaderIterator<HeaderType>;
/**
* Constructs a message that won't take ownership of the given pointer.
*/
static HeaderList<HeaderType> make_non_owning(rd_kafka_headers_t* handle);
/**
* \brief Create an empty header list with no handle.
*/
HeaderList();
/**
* \brief Create an empty header list. This call translates to rd_kafka_headers_new().
* \param reserve The number of headers to reserve space for.
*/
explicit HeaderList(size_t reserve);
/**
* \brief Create a header list and assume ownership of the handle.
* \param handle The header list handle.
*/
explicit HeaderList(rd_kafka_headers_t* handle);
/**
* \brief Create a header list from another header list type
* \param other The other list
*/
template <typename OtherHeaderType>
HeaderList(const HeaderList<OtherHeaderType>& other);
template <typename OtherHeaderType>
HeaderList(HeaderList<OtherHeaderType>&& other);
/**
* \brief Add a header to the list. This translates to rd_kafka_header_add().
* \param header The header.
* \return An Error indicating if the operation was successful or not.
* \warning This operation shall invalidate all iterators.
*/
Error add(const HeaderType& header);
/**
* \brief Remove all headers with 'name'. This translates to rd_kafka_header_remove().
* \param name The name of the header(s) to remove.
* \return An Error indicating if the operation was successful or not.
* \warning This operation shall invalidate all iterators.
*/
Error remove(const std::string& name);
/**
* \brief Return the header present at position 'index'. Throws on error.
* This translates to rd_kafka_header_get(index)
* \param index The header index in the list (0-based).
* \return The header at that position.
*/
HeaderType at(size_t index) const; //throws
/**
* \brief Return the first header in the list. Throws if the list is empty.
* This translates to rd_kafka_header_get(0).
* \return The first header.
*/
HeaderType front() const; //throws
/**
* \brief Return the first header in the list. Throws if the list is empty.
* This translates to rd_kafka_header_get(size-1).
* \return The last header.
*/
HeaderType back() const; //throws
/**
* \brief Returns the number of headers in the list. This translates to rd_kafka_header_cnt().
* \return The number of headers.
*/
size_t size() const;
/**
* \brief Indicates if this list is empty.
* \return True if empty, false otherwise.
*/
bool empty() const;
/**
* \brief Returns a HeaderIterator pointing to the first position if the list is not empty
* or pointing to end() otherwise.
* \return An iterator.
* \warning This iterator will be invalid if add() or remove() is called.
*/
Iterator begin() const;
/**
* \brief Returns a HeaderIterator pointing to one element past the end of the list.
* \return An iterator.
* \remark This iterator cannot be de-referenced.
*/
Iterator end() const;
/**
* \brief Get the underlying header list handle.
* \return The handle.
*/
rd_kafka_headers_t* get_handle() const;
/**
* \brief Get the underlying header list handle and release its ownership.
* \return The handle.
* \warning After this call, the HeaderList becomes invalid.
*/
rd_kafka_headers_t* release_handle();
/**
* \brief Indicates if this list is valid (contains a non-null handle) or not.
* \return True if valid, false otherwise.
*/
explicit operator bool() const;
private:
struct NonOwningTag { };
static void dummy_deleter(rd_kafka_headers_t*) {}
using HandlePtr = ClonablePtr<rd_kafka_headers_t, decltype(&rd_kafka_headers_destroy),
decltype(&rd_kafka_headers_copy)>;
HeaderList(rd_kafka_headers_t* handle, NonOwningTag);
HandlePtr handle_;
};
template <typename HeaderType>
bool operator==(const HeaderList<HeaderType>& lhs, const HeaderList<HeaderType> rhs) {
if (!lhs && !rhs) {
return true;
}
if (!lhs || !rhs) {
return false;
}
if (lhs.size() != rhs.size()) {
return false;
}
return std::equal(lhs.begin(), lhs.end(), rhs.begin());
}
template <typename HeaderType>
bool operator!=(const HeaderList<HeaderType>& lhs, const HeaderList<HeaderType> rhs) {
return !(lhs == rhs);
}
template <typename HeaderType>
HeaderList<HeaderType> HeaderList<HeaderType>::make_non_owning(rd_kafka_headers_t* handle) {
return HeaderList(handle, NonOwningTag());
}
template <typename HeaderType>
HeaderList<HeaderType>::HeaderList()
: handle_(nullptr, nullptr, nullptr) {
}
template <typename HeaderType>
HeaderList<HeaderType>::HeaderList(size_t reserve)
: handle_(rd_kafka_headers_new(reserve), &rd_kafka_headers_destroy, &rd_kafka_headers_copy) {
assert(reserve);
}
template <typename HeaderType>
HeaderList<HeaderType>::HeaderList(rd_kafka_headers_t* handle)
: handle_(handle, &rd_kafka_headers_destroy, &rd_kafka_headers_copy) { //if we own the header list, we clone it on copy
assert(handle);
}
template <typename HeaderType>
HeaderList<HeaderType>::HeaderList(rd_kafka_headers_t* handle, NonOwningTag)
: handle_(handle, &dummy_deleter, nullptr) { //if we don't own the header list, we forward the handle on copy.
assert(handle);
}
template <typename HeaderType>
template <typename OtherHeaderType>
HeaderList<HeaderType>::HeaderList(const HeaderList<OtherHeaderType>& other)
: handle_(other.handle_) {
}
template <typename HeaderType>
template <typename OtherHeaderType>
HeaderList<HeaderType>::HeaderList(HeaderList<OtherHeaderType>&& other)
: handle_(std::move(other.handle_)) {
}
// Methods
template <typename HeaderType>
Error HeaderList<HeaderType>::add(const HeaderType& header) {
assert(handle_);
return rd_kafka_header_add(handle_.get(),
header.get_name().data(), header.get_name().size(),
header.get_value().data(), header.get_value().size());
}
template <>
inline
Error HeaderList<Header<Buffer>>::add(const Header<Buffer>& header) {
assert(handle_);
return rd_kafka_header_add(handle_.get(),
header.get_name().data(), header.get_name().size(),
header.get_value().get_data(), header.get_value().get_size());
}
template <typename HeaderType>
Error HeaderList<HeaderType>::remove(const std::string& name) {
assert(handle_);
return rd_kafka_header_remove(handle_.get(), name.data());
}
template <typename HeaderType>
HeaderType HeaderList<HeaderType>::at(size_t index) const {
assert(handle_);
const char *name, *value;
size_t size;
Error error = rd_kafka_header_get_all(handle_.get(), index, &name, reinterpret_cast<const void**>(&value), &size);
if (error != RD_KAFKA_RESP_ERR_NO_ERROR) {
throw Exception(error.to_string());
}
return HeaderType(name, BufferType(value, value + size));
}
template <typename HeaderType>
HeaderType HeaderList<HeaderType>::front() const {
return at(0);
}
template <typename HeaderType>
HeaderType HeaderList<HeaderType>::back() const {
return at(size()-1);
}
template <typename HeaderType>
size_t HeaderList<HeaderType>::size() const {
return handle_ ? rd_kafka_header_cnt(handle_.get()) : 0;
}
template <typename HeaderType>
bool HeaderList<HeaderType>::empty() const {
return size() == 0;
}
template <typename HeaderType>
typename HeaderList<HeaderType>::Iterator
HeaderList<HeaderType>::begin() const {
return Iterator(*this, 0);
}
template <typename HeaderType>
typename HeaderList<HeaderType>::Iterator
HeaderList<HeaderType>::end() const {
return Iterator(*this, size());
}
template <typename HeaderType>
rd_kafka_headers_t* HeaderList<HeaderType>::get_handle() const {
return handle_.get();
}
template <typename HeaderType>
rd_kafka_headers_t* HeaderList<HeaderType>::release_handle() {
return handle_.release();
}
template <typename HeaderType>
HeaderList<HeaderType>::operator bool() const {
return static_cast<bool>(handle_);
}
} //namespace cppkafka
#endif //RD_KAFKA_HEADERS_SUPPORT_VERSION
#endif //CPPKAFKA_HEADER_LIST_H

View File

@@ -0,0 +1,193 @@
/*
* Copyright (c) 2017, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef CPPKAFKA_HEADER_LIST_ITERATOR_H
#define CPPKAFKA_HEADER_LIST_ITERATOR_H
#include <cstddef>
#include <utility>
#include <iterator>
#include "header.h"
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
namespace cppkafka {
template <typename HeaderType>
class HeaderList;
template <typename HeaderType>
class HeaderIterator;
template <typename HeaderType>
bool operator==(const HeaderIterator<HeaderType>& lhs, const HeaderIterator<HeaderType>& rhs);
/**
* \brief Iterator over a HeaderList object.
* \tparam HeaderType The type of header this iterator points to.
*/
template <typename HeaderType>
class HeaderIterator {
public:
friend HeaderList<HeaderType>;
using HeaderListType = HeaderList<HeaderType>;
using BufferType = typename HeaderType::ValueType;
//std::iterator_traits
using difference_type = std::ptrdiff_t;
using value_type = HeaderType;
using pointer = value_type*;
using reference = value_type&;
using iterator_category = std::bidirectional_iterator_tag;
friend bool operator==<HeaderType>(const HeaderIterator<HeaderType>& lhs,
const HeaderIterator<HeaderType>& rhs);
HeaderIterator(const HeaderIterator& other)
: header_list_(other.header_list_),
header_(make_header(other.header_)),
index_(other.index_) {
}
HeaderIterator& operator=(const HeaderIterator& other) {
if (this == &other) return *this;
header_list_ = other.header_list_;
header_ = make_header(other.header_);
index_ = other.index_;
return *this;
}
HeaderIterator(HeaderIterator&&) = default;
HeaderIterator& operator=(HeaderIterator&&) = default;
/**
* \brief Prefix increment of the iterator.
* \return Itself after being incremented.
*/
HeaderIterator& operator++() {
assert(index_ < header_list_.size());
++index_;
return *this;
}
/**
* \brief Postfix increment of the iterator.
* \return Itself before being incremented.
*/
HeaderIterator operator++(int) {
HeaderIterator tmp(*this);
operator++();
return tmp;
}
/**
* \brief Prefix decrement of the iterator.
* \return Itself after being decremented.
*/
HeaderIterator& operator--() {
assert(index_ > 0);
--index_;
return *this;
}
/**
* \brief Postfix decrement of the iterator.
* \return Itself before being decremented.
*/
HeaderIterator operator--(int) {
HeaderIterator tmp(*this);
operator--();
return tmp;
}
/**
* \brief Dereferences this iterator.
* \return A reference to the header the iterator points to.
* \warning Throws if invalid or if *this == end().
*/
const HeaderType& operator*() const {
header_ = header_list_.at(index_);
return header_;
}
HeaderType& operator*() {
header_ = header_list_.at(index_);
return header_;
}
/**
* \brief Dereferences this iterator.
* \return The address to the header the iterator points to.
* \warning Throws if invalid or if *this == end().
*/
const HeaderType* operator->() const {
header_ = header_list_.at(index_);
return &header_;
}
HeaderType* operator->() {
header_ = header_list_.at(index_);
return &header_;
}
private:
HeaderIterator(const HeaderListType& headers,
size_t index)
: header_list_(headers),
index_(index) {
}
template <typename T>
T make_header(const T& other) {
return other;
}
Header<Buffer> make_header(const Header<Buffer>& other) {
return Header<Buffer>(other.get_name(),
Buffer(other.get_value().get_data(),
other.get_value().get_size()));
}
const HeaderListType& header_list_;
HeaderType header_;
size_t index_;
};
// Equality comparison operators
template <typename HeaderType>
bool operator==(const HeaderIterator<HeaderType>& lhs, const HeaderIterator<HeaderType>& rhs) {
return (lhs.header_list_.get_handle() == rhs.header_list_.get_handle()) && (lhs.index_ == rhs.index_);
}
template <typename HeaderType>
bool operator!=(const HeaderIterator<HeaderType>& lhs, const HeaderIterator<HeaderType>& rhs) {
return !(lhs == rhs);
}
} //namespace cppkafka
#endif //RD_KAFKA_HEADERS_SUPPORT_VERSION
#endif //CPPKAFKA_HEADER_LIST_ITERATOR_H

View File

@@ -45,6 +45,8 @@
#include "topic_configuration.h"
#include "configuration.h"
#include "macros.h"
#include "logging.h"
#include "queue.h"
namespace cppkafka {
@@ -108,6 +110,11 @@ public:
*/
void set_timeout(std::chrono::milliseconds timeout);
/**
* \brief Sets the log level
*/
void set_log_level(LogLevel level);
/**
* \brief Adds one or more brokers to this handle's broker list
*
@@ -128,6 +135,20 @@ public:
*/
OffsetTuple query_offsets(const TopicPartition& topic_partition) const;
/**
* \brief Queries the offset for the given topic/partition with a given timeout
*
* This translates into a call to rd_kafka_query_watermark_offsets
*
* \param topic_partition The topic/partition to be queried
*
* \timeout The timeout for this operation. This supersedes the default handle timeout.
*
* \return A pair of watermark offsets {low, high}
*/
OffsetTuple query_offsets(const TopicPartition& topic_partition,
std::chrono::milliseconds timeout) const;
/**
* \brief Gets the rdkafka handle
*
@@ -171,6 +192,20 @@ public:
*/
Metadata get_metadata(bool all_topics = true) const;
/**
* \brief Gets metadata for brokers, topics, partitions, etc with a timeout
*
* This translates into a call to rd_kafka_metadata
*
* \param all_topics Whether to fetch metadata about all topics or only locally known ones
*
* \param timeout The timeout for this operation. Supersedes the default handle timeout.
*
* \return The metadata
*/
Metadata get_metadata(bool all_topics,
std::chrono::milliseconds timeout) const;
/**
* \brief Gets general metadata but only fetches metadata for the given topic rather than
* all of them
@@ -183,6 +218,21 @@ public:
*/
TopicMetadata get_metadata(const Topic& topic) const;
/**
* \brief Gets general metadata but only fetches metadata for the given topic rather than
* all of them. Uses a timeout to limit the operation execution time.
*
* This translates into a call to rd_kafka_metadata
*
* \param topic The topic to fetch information for
*
* \param timeout The timeout for this operation. Supersedes the default handle timeout.
*
* \return The topic metadata
*/
TopicMetadata get_metadata(const Topic& topic,
std::chrono::milliseconds timeout) const;
/**
* \brief Gets the consumer group information
*
@@ -192,6 +242,18 @@ public:
*/
GroupInformation get_consumer_group(const std::string& name);
/**
* \brief Gets the consumer group information with a timeout
*
* \param name The name of the consumer group to look up
*
* \param timeout The timeout for this operation. Supersedes the default handle timeout.
*
* \return The group information
*/
GroupInformation get_consumer_group(const std::string& name,
std::chrono::milliseconds timeout);
/**
* \brief Gets all consumer groups
*
@@ -199,6 +261,15 @@ public:
*/
GroupInformationList get_consumer_groups();
/**
* \brief Gets all consumer groups with a timeout
*
* \param timeout The timeout for this operation. Supersedes the default handle timeout.
*
* \return A list of consumer groups
*/
GroupInformationList get_consumer_groups(std::chrono::milliseconds timeout);
/**
* \brief Gets topic/partition offsets based on timestamps
*
@@ -210,6 +281,20 @@ public:
*/
TopicPartitionList get_offsets_for_times(const TopicPartitionsTimestampsMap& queries) const;
/**
* \brief Gets topic/partition offsets based on timestamps with a timeout
*
* This translates into a call to rd_kafka_offsets_for_times
*
* \param queries A map from topic/partition to the timestamp to be used
*
* \param timeout The timeout for this operation. This supersedes the default handle timeout.
*
* \return A topic partition list
*/
TopicPartitionList get_offsets_for_times(const TopicPartitionsTimestampsMap& queries,
std::chrono::milliseconds timeout) const;
/**
* \brief Get the kafka handle name
*
@@ -233,6 +318,19 @@ public:
*/
const Configuration& get_configuration() const;
#if RD_KAFKA_VERSION >= RD_KAFKA_ADMIN_API_SUPPORT_VERSION
/**
* \brief Gets the background queue
*
* This translates into a call to rd_kafka_queue_get_background
*
* \return The background queue
*/
Queue get_background_queue() const {
return Queue::make_queue(rd_kafka_queue_get_background(handle_.get()));
}
#endif
/**
* \brief Gets the length of the out queue
*
@@ -242,6 +340,18 @@ public:
*/
int get_out_queue_length() const;
#if RD_KAFKA_VERSION >= RD_KAFKA_DESTROY_FLAGS_SUPPORT_VERSION
/**
* \brief Sets flags for rd_kafka_destroy_flags()
*/
void set_destroy_flags(int destroy_flags);
/**
* \brief Returns flags for rd_kafka_destroy_flags()
*/
int get_destroy_flags() const;
#endif
/**
* \brief Cancels the current callback dispatcher
*
@@ -259,12 +369,22 @@ protected:
private:
static const std::chrono::milliseconds DEFAULT_TIMEOUT;
using HandlePtr = std::unique_ptr<rd_kafka_t, decltype(&rd_kafka_destroy)>;
struct HandleDeleter {
explicit HandleDeleter(const KafkaHandleBase* handle_base_ptr) : handle_base_ptr_{handle_base_ptr} {}
void operator()(rd_kafka_t* handle);
private:
const KafkaHandleBase * handle_base_ptr_;
};
using HandlePtr = std::unique_ptr<rd_kafka_t, HandleDeleter>;
using TopicConfigurationMap = std::unordered_map<std::string, TopicConfiguration>;
Topic get_topic(const std::string& name, rd_kafka_topic_conf_t* conf);
Metadata get_metadata(bool all_topics, rd_kafka_topic_t* topic_ptr) const;
GroupInformationList fetch_consumer_groups(const char* name);
Metadata get_metadata(bool all_topics,
rd_kafka_topic_t* topic_ptr,
std::chrono::milliseconds timeout) const;
GroupInformationList fetch_consumer_groups(const char* name,
std::chrono::milliseconds timeout);
void save_topic_config(const std::string& topic_name, TopicConfiguration config);
std::chrono::milliseconds timeout_ms_;
@@ -272,6 +392,7 @@ private:
TopicConfigurationMap topic_configurations_;
std::mutex topic_configurations_mutex_;
HandlePtr handle_;
int destroy_flags_;
};
} // cppkafka

View File

@@ -43,4 +43,14 @@
#define CPPKAFKA_API
#endif // _WIN32 && !CPPKAFKA_STATIC
// See: https://github.com/edenhill/librdkafka/issues/1792
#define RD_KAFKA_QUEUE_REFCOUNT_BUG_VERSION 0x000b0500 //v0.11.5.00
#define RD_KAFKA_HEADERS_SUPPORT_VERSION 0x000b0402 //v0.11.4.02
#define RD_KAFKA_ADMIN_API_SUPPORT_VERSION 0x000b0500 //v0.11.5.00
#define RD_KAFKA_MESSAGE_LATENCY_SUPPORT_VERSION 0x000b0000 //v0.11.0.00
#define RD_KAFKA_EVENT_STATS_SUPPORT_VERSION 0x000b0000 //v0.11.0.00
#define RD_KAFKA_MESSAGE_STATUS_SUPPORT_VERSION 0x01000002 //v1.0.0.02
#define RD_KAFKA_STORE_OFFSETS_SUPPORT_VERSION 0x00090501 //v0.9.5.01
#define RD_KAFKA_DESTROY_FLAGS_SUPPORT_VERSION 0x000b0600 //v0.11.6
#endif // CPPKAFKA_MACROS_H

View File

@@ -39,10 +39,11 @@
#include "buffer.h"
#include "macros.h"
#include "error.h"
#include "header_list.h"
#include "message_timestamp.h"
namespace cppkafka {
class MessageTimestamp;
class Internal;
/**
@@ -59,6 +60,10 @@ class CPPKAFKA_API Message {
public:
friend class MessageInternal;
using InternalPtr = std::shared_ptr<Internal>;
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
using HeaderType = Header<Buffer>;
using HeaderListType = HeaderList<HeaderType>;
#endif
/**
* Constructs a message that won't take ownership of the given pointer
*/
@@ -84,7 +89,7 @@ public:
Message& operator=(Message&& rhs) = default;
/**
* Gets the error attribute
* \brief Gets the error attribute
*/
Error get_error() const {
assert(handle_);
@@ -92,22 +97,22 @@ public:
}
/**
* Utility function to check for get_error() == RD_KAFKA_RESP_ERR__PARTITION_EOF
* \brief Utility function to check for get_error() == RD_KAFKA_RESP_ERR__PARTITION_EOF
*/
bool is_eof() const {
return get_error() == RD_KAFKA_RESP_ERR__PARTITION_EOF;
}
/**
* Gets the topic that this message belongs to
* \brief Gets the topic that this message belongs to
*/
std::string get_topic() const {
assert(handle_);
return rd_kafka_topic_name(handle_->rkt);
return handle_->rkt ? rd_kafka_topic_name(handle_->rkt) : std::string{};
}
/**
* Gets the partition that this message belongs to
* \brief Gets the partition that this message belongs to
*/
int get_partition() const {
assert(handle_);
@@ -115,21 +120,54 @@ public:
}
/**
* Gets the message's payload
* \brief Gets the message's payload
*/
const Buffer& get_payload() const {
return payload_;
}
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
/**
* Gets the message's key
* \brief Sets the message's header list.
* \note This calls rd_kafka_message_set_headers.
*/
void set_header_list(const HeaderListType& headers) {
assert(handle_);
if (!headers) {
return; //nothing to set
}
rd_kafka_headers_t* handle_copy = rd_kafka_headers_copy(headers.get_handle());
rd_kafka_message_set_headers(handle_.get(), handle_copy);
header_list_ = HeaderListType::make_non_owning(handle_copy);
}
/**
* \brief Gets the message's header list
*/
const HeaderListType& get_header_list() const {
return header_list_;
}
/**
* \brief Detaches the message's header list
*/
template <typename HeaderType>
HeaderList<HeaderType> detach_header_list() {
rd_kafka_headers_t* headers_handle;
Error error = rd_kafka_message_detach_headers(handle_.get(), &headers_handle);
return error ? HeaderList<HeaderType>() : HeaderList<HeaderType>(headers_handle);
}
#endif
/**
* \brief Gets the message's key
*/
const Buffer& get_key() const {
return key_;
}
/**
* Gets the message offset
* \brief Gets the message offset
*/
int64_t get_offset() const {
assert(handle_);
@@ -151,24 +189,44 @@ public:
*
* If calling rd_kafka_message_timestamp returns -1, then boost::none_t will be returned.
*/
inline boost::optional<MessageTimestamp> get_timestamp() const;
boost::optional<MessageTimestamp> get_timestamp() const;
#if RD_KAFKA_VERSION >= RD_KAFKA_MESSAGE_LATENCY_SUPPORT_VERSION
/**
* \brief Gets the message latency in microseconds as measured from the produce() call.
*/
std::chrono::microseconds get_latency() const {
assert(handle_);
return std::chrono::microseconds(rd_kafka_message_latency(handle_.get()));
}
#endif
#if (RD_KAFKA_VERSION >= RD_KAFKA_MESSAGE_STATUS_SUPPORT_VERSION)
/**
* \brief Gets the message persistence status
*/
rd_kafka_msg_status_t get_status() const {
assert(handle_);
return rd_kafka_message_status(handle_.get());
}
#endif
/**
* Indicates whether this message is valid (not null)
* \brief Indicates whether this message is valid (not null)
*/
explicit operator bool() const {
return handle_ != nullptr;
}
/**
* Gets the rdkafka message handle
* \brief Gets the rdkafka message handle
*/
rd_kafka_message_t* get_handle() const {
return handle_.get();
}
/**
* Internal private const data accessor (internal use only)
* \brief Internal private const data accessor (internal use only)
*/
InternalPtr internal() const {
return internal_;
@@ -185,54 +243,15 @@ private:
HandlePtr handle_;
Buffer payload_;
Buffer key_;
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
HeaderListType header_list_;
#endif
void* user_data_;
InternalPtr internal_;
};
using MessageList = std::vector<Message>;
/**
* Represents a message's timestamp
*/
class CPPKAFKA_API MessageTimestamp {
public:
/**
* The timestamp type
*/
enum TimestampType {
CREATE_TIME = RD_KAFKA_TIMESTAMP_CREATE_TIME,
LOG_APPEND_TIME = RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME
};
/**
* Constructs a timestamp object
*/
MessageTimestamp(std::chrono::milliseconds timestamp, TimestampType type);
/**
* Gets the timestamp value
*/
std::chrono::milliseconds get_timestamp() const;
/**
* Gets the timestamp type
*/
TimestampType get_type() const;
private:
std::chrono::milliseconds timestamp_;
TimestampType type_;
};
boost::optional<MessageTimestamp> Message::get_timestamp() const {
rd_kafka_timestamp_type_t type = RD_KAFKA_TIMESTAMP_NOT_AVAILABLE;
int64_t timestamp = rd_kafka_message_timestamp(handle_.get(), &type);
if (timestamp == -1 || type == RD_KAFKA_TIMESTAMP_NOT_AVAILABLE) {
return {};
}
return MessageTimestamp(std::chrono::milliseconds(timestamp),
static_cast<MessageTimestamp::TimestampType>(type));
}
} // cppkafka
#endif // CPPKAFKA_MESSAGE_H

View File

@@ -35,6 +35,7 @@
#include "topic.h"
#include "macros.h"
#include "message.h"
#include "header_list.h"
namespace cppkafka {
@@ -44,6 +45,10 @@ namespace cppkafka {
template <typename BufferType, typename Concrete>
class BasicMessageBuilder {
public:
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
using HeaderType = Header<BufferType>;
using HeaderListType = HeaderList<HeaderType>;
#endif
/**
* Construct a BasicMessageBuilder
*
@@ -65,7 +70,12 @@ public:
*/
template <typename OtherBufferType, typename OtherConcrete>
BasicMessageBuilder(const BasicMessageBuilder<OtherBufferType, OtherConcrete>& rhs);
template <typename OtherBufferType, typename OtherConcrete>
BasicMessageBuilder(BasicMessageBuilder<OtherBufferType, OtherConcrete>&& rhs);
/**
* Default copy and move constructors and assignment operators
*/
BasicMessageBuilder(BasicMessageBuilder&&) = default;
BasicMessageBuilder(const BasicMessageBuilder&) = default;
BasicMessageBuilder& operator=(BasicMessageBuilder&&) = default;
@@ -99,6 +109,17 @@ public:
*/
Concrete& key(BufferType&& value);
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
/**
* Add a header(s) to the message
*
* \param header The header to be used
*/
Concrete& header(const HeaderType& header);
Concrete& headers(const HeaderListType& headers);
Concrete& headers(HeaderListType&& headers);
#endif
/**
* Sets the message's payload
*
@@ -114,12 +135,20 @@ public:
Concrete& payload(BufferType&& value);
/**
* Sets the message's timestamp
* Sets the message's timestamp with a 'duration'
*
* \param value The timestamp to be used
*/
Concrete& timestamp(std::chrono::milliseconds value);
/**
* Sets the message's timestamp with a 'time_point'.
*
* \param value The timestamp to be used
*/
template <typename Clock, typename Duration = typename Clock::duration>
Concrete& timestamp(std::chrono::time_point<Clock, Duration> value);
/**
* Sets the message's user data pointer
*
@@ -147,6 +176,18 @@ public:
*/
BufferType& key();
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
/**
* Gets the list of headers
*/
const HeaderListType& header_list() const;
/**
* Gets the list of headers
*/
HeaderListType& header_list();
#endif
/**
* Gets the message's payload
*/
@@ -158,7 +199,8 @@ public:
BufferType& payload();
/**
* Gets the message's timestamp
* Gets the message's timestamp as a duration. If the timestamp was created with a 'time_point',
* the duration represents the number of milliseconds since epoch.
*/
std::chrono::milliseconds timestamp() const;
@@ -173,13 +215,18 @@ public:
Message::InternalPtr internal() const;
Concrete& internal(Message::InternalPtr internal);
private:
protected:
void construct_buffer(BufferType& lhs, const BufferType& rhs);
private:
Concrete& get_concrete();
std::string topic_;
int partition_{-1};
BufferType key_;
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
HeaderListType header_list_;
#endif
BufferType payload_;
std::chrono::milliseconds timestamp_{0};
void* user_data_;
@@ -196,23 +243,51 @@ template <typename T, typename C>
BasicMessageBuilder<T, C>::BasicMessageBuilder(const Message& message)
: topic_(message.get_topic()),
key_(Buffer(message.get_key().get_data(), message.get_key().get_size())),
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
//Here we must copy explicitly the Message headers since they are non-owning and this class
//assumes full ownership. Otherwise we will be holding an invalid handle when Message goes
//out of scope and rdkafka frees its resource.
header_list_(message.get_header_list() ?
HeaderListType(rd_kafka_headers_copy(message.get_header_list().get_handle())) : HeaderListType()), //copy headers
#endif
payload_(Buffer(message.get_payload().get_data(), message.get_payload().get_size())),
timestamp_(message.get_timestamp() ? message.get_timestamp().get().get_timestamp() :
std::chrono::milliseconds(0)),
user_data_(message.get_user_data()),
internal_(message.internal()) {
}
template <typename T, typename C>
template <typename U, typename V>
BasicMessageBuilder<T, C>::BasicMessageBuilder(const BasicMessageBuilder<U, V>& rhs)
: topic_(rhs.topic()), partition_(rhs.partition()), timestamp_(rhs.timestamp()),
: topic_(rhs.topic()),
partition_(rhs.partition()),
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
header_list_(rhs.header_list()), //copy headers
#endif
timestamp_(rhs.timestamp()),
user_data_(rhs.user_data()),
internal_(rhs.internal()) {
get_concrete().construct_buffer(key_, rhs.key());
get_concrete().construct_buffer(payload_, rhs.payload());
}
template <typename T, typename C>
template <typename U, typename V>
BasicMessageBuilder<T, C>::BasicMessageBuilder(BasicMessageBuilder<U, V>&& rhs)
: topic_(rhs.topic()),
partition_(rhs.partition()),
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
header_list_(std::move(header_list())), //assume header ownership
#endif
timestamp_(rhs.timestamp()),
user_data_(rhs.user_data()),
internal_(rhs.internal()) {
get_concrete().construct_buffer(key_, std::move(rhs.key()));
get_concrete().construct_buffer(payload_, std::move(rhs.payload()));
}
template <typename T, typename C>
C& BasicMessageBuilder<T, C>::topic(std::string value) {
topic_ = std::move(value);
@@ -237,6 +312,29 @@ C& BasicMessageBuilder<T, C>::key(T&& value) {
return get_concrete();
}
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
template <typename T, typename C>
C& BasicMessageBuilder<T, C>::header(const HeaderType& header) {
if (!header_list_) {
header_list_ = HeaderListType(5);
}
header_list_.add(header);
return get_concrete();
}
template <typename T, typename C>
C& BasicMessageBuilder<T, C>::headers(const HeaderListType& headers) {
header_list_ = headers;
return get_concrete();
}
template <typename T, typename C>
C& BasicMessageBuilder<T, C>::headers(HeaderListType&& headers) {
header_list_ = std::move(headers);
return get_concrete();
}
#endif
template <typename T, typename C>
C& BasicMessageBuilder<T, C>::payload(const T& value) {
get_concrete().construct_buffer(payload_, value);
@@ -255,6 +353,14 @@ C& BasicMessageBuilder<T, C>::timestamp(std::chrono::milliseconds value) {
return get_concrete();
}
template <typename T, typename C>
template <typename Clock, typename Duration>
C& BasicMessageBuilder<T, C>::timestamp(std::chrono::time_point<Clock, Duration> value)
{
timestamp_ = std::chrono::duration_cast<std::chrono::milliseconds>(value.time_since_epoch());
return get_concrete();
}
template <typename T, typename C>
C& BasicMessageBuilder<T, C>::user_data(void* value) {
user_data_ = value;
@@ -281,6 +387,20 @@ T& BasicMessageBuilder<T, C>::key() {
return key_;
}
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
template <typename T, typename C>
const typename BasicMessageBuilder<T, C>::HeaderListType&
BasicMessageBuilder<T, C>::header_list() const {
return header_list_;
}
template <typename T, typename C>
typename BasicMessageBuilder<T, C>::HeaderListType&
BasicMessageBuilder<T, C>::header_list() {
return header_list_;
}
#endif
template <typename T, typename C>
const T& BasicMessageBuilder<T, C>::payload() const {
return payload_;
@@ -338,24 +458,34 @@ C& BasicMessageBuilder<T, C>::get_concrete() {
*/
class MessageBuilder : public BasicMessageBuilder<Buffer, MessageBuilder> {
public:
using BasicMessageBuilder::BasicMessageBuilder;
using Base = BasicMessageBuilder<Buffer, MessageBuilder>;
using BasicMessageBuilder<Buffer, MessageBuilder>::BasicMessageBuilder;
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
using HeaderType = Base::HeaderType;
using HeaderListType = Base::HeaderListType;
#endif
void construct_buffer(Buffer& lhs, const Buffer& rhs) {
lhs = Buffer(rhs.get_data(), rhs.get_size());
}
template <typename T>
void construct_buffer(Buffer& lhs, const T& rhs) {
lhs = Buffer(rhs);
void construct_buffer(Buffer& lhs, T&& rhs) {
lhs = Buffer(std::forward<T>(rhs));
}
MessageBuilder clone() const {
return std::move(MessageBuilder(topic()).
key(Buffer(key().get_data(), key().get_size())).
MessageBuilder builder(topic());
builder.key(Buffer(key().get_data(), key().get_size())).
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
headers(header_list()).
#endif
payload(Buffer(payload().get_data(), payload().get_size())).
timestamp(timestamp()).
user_data(user_data()).
internal(internal()));
internal(internal());
return builder;
}
};
@@ -365,7 +495,12 @@ public:
template <typename T>
class ConcreteMessageBuilder : public BasicMessageBuilder<T, ConcreteMessageBuilder<T>> {
public:
using Base = BasicMessageBuilder<T, ConcreteMessageBuilder<T>>;
using BasicMessageBuilder<T, ConcreteMessageBuilder<T>>::BasicMessageBuilder;
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
using HeaderType = typename Base::HeaderType;
using HeaderListType = typename Base::HeaderListType;
#endif
};
} // cppkafka

View File

@@ -31,6 +31,7 @@
#define CPPKAFKA_MESSAGE_INTERNAL_H
#include <memory>
#include "macros.h"
namespace cppkafka {
@@ -45,7 +46,7 @@ using InternalPtr = std::shared_ptr<Internal>;
/**
* \brief Private message data structure
*/
class MessageInternal {
class CPPKAFKA_API MessageInternal {
public:
MessageInternal(void* user_data, std::shared_ptr<Internal> internal);
static std::unique_ptr<MessageInternal> load(Message& message);

View File

@@ -0,0 +1,72 @@
/*
* Copyright (c) 2017, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef CPPKAFKA_MESSAGE_TIMESTAMP_H
#define CPPKAFKA_MESSAGE_TIMESTAMP_H
#include <chrono>
#include <librdkafka/rdkafka.h>
#include "macros.h"
namespace cppkafka {
/**
* Represents a message's timestamp
*/
class CPPKAFKA_API MessageTimestamp {
friend class Message;
public:
/**
* The timestamp type
*/
enum TimestampType {
CREATE_TIME = RD_KAFKA_TIMESTAMP_CREATE_TIME,
LOG_APPEND_TIME = RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME
};
/**
* Gets the timestamp value. If the timestamp was created with a 'time_point',
* the duration represents the number of milliseconds since epoch.
*/
std::chrono::milliseconds get_timestamp() const;
/**
* Gets the timestamp type
*/
TimestampType get_type() const;
private:
MessageTimestamp(std::chrono::milliseconds timestamp, TimestampType type);
std::chrono::milliseconds timestamp_;
TimestampType type_;
};
} // cppkafka
#endif //CPPKAFKA_MESSAGE_TIMESTAMP_H

View File

@@ -74,6 +74,9 @@ class Message;
* // Write using a key on a fixed partition (42)
* producer.produce(MessageBuilder("some_topic").partition(42).key(key).payload(payload));
*
* // Flush the produced messages
* producer.flush();
*
* \endcode
*/
class CPPKAFKA_API Producer : public KafkaHandleBase {
@@ -85,7 +88,8 @@ public:
enum class PayloadPolicy {
PASSTHROUGH_PAYLOAD = 0, ///< Rdkafka will not copy nor free the payload.
COPY_PAYLOAD = RD_KAFKA_MSG_F_COPY, ///< Means RD_KAFKA_MSG_F_COPY
FREE_PAYLOAD = RD_KAFKA_MSG_F_FREE ///< Means RD_KAFKA_MSG_F_FREE
FREE_PAYLOAD = RD_KAFKA_MSG_F_FREE, ///< Means RD_KAFKA_MSG_F_FREE
BLOCK_ON_FULL_QUEUE = RD_KAFKA_MSG_F_BLOCK ///< Producer will block if the underlying queue is full
};
/**
@@ -113,6 +117,7 @@ public:
* \param builder The builder class used to compose a message
*/
void produce(const MessageBuilder& builder);
void produce(MessageBuilder&& builder);
/**
* \brief Produces a message
@@ -120,6 +125,7 @@ public:
* \param message The message to be produced
*/
void produce(const Message& message);
void produce(Message&& message);
/**
* \brief Polls on this handle
@@ -157,6 +163,15 @@ public:
*/
void flush(std::chrono::milliseconds timeout);
private:
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
void do_produce(const MessageBuilder& builder, MessageBuilder::HeaderListType&& headers);
void do_produce(const Message& message, MessageBuilder::HeaderListType&& headers);
#else
void do_produce(const MessageBuilder& builder);
void do_produce(const Message& message);
#endif
// Members
PayloadPolicy message_payload_policy_;
};

View File

@@ -29,8 +29,8 @@
#include <vector>
#include <memory>
#include <boost/optional.hpp>
#include <librdkafka/rdkafka.h>
#include "event.h"
#include "macros.h"
#include "message.h"
@@ -52,6 +52,17 @@ public:
*/
static Queue make_non_owning(rd_kafka_queue_t* handle);
/**
* \brieef Creates a Queue object out of a handle.
*
* This will check what the rdkafka version is and will return either an owned
* queue handle or a non owned one, depending on whether the current version
* is >= RD_KAFKA_QUEUE_REFCOUNT_BUG_VERSION (see macros.h)
*
* \param handle The handle to be used
*/
static Queue make_queue(rd_kafka_queue_t* handle);
/**
* \brief Constructs an empty queue
*
@@ -134,26 +145,76 @@ public:
/**
* \brief Consumes a batch of messages from this queue
*
* This translates to a call to rd_kafka_consume_batch_queue using the configured timeout for this object
* This translates to a call to rd_kafka_consume_batch_queue using the configured timeout
* for this object
*
* \param max_batch_size The max number of messages to consume if available
* \param alloc The optionally supplied allocator for the message list
*
* \return A list of messages. Could be empty if there's nothing to consume
*/
MessageList consume_batch(size_t max_batch_size) const;
template <typename Allocator>
std::vector<Message, Allocator> consume_batch(size_t max_batch_size,
const Allocator& alloc) const;
/**
* \brief Consumes a batch of messages from this queue
*
* Same as Queue::consume_batch(size_t) but the specified timeout will be used instead of the configured one
* This translates to a call to rd_kafka_consume_batch_queue using the configured timeout
* for this object
*
* \param max_batch_size The max number of messages to consume if available
*
* \return A list of messages. Could be empty if there's nothing to consume
*/
std::vector<Message> consume_batch(size_t max_batch_size) const;
/**
* \brief Consumes a batch of messages from this queue
*
* Same as Queue::consume_batch(size_t) but the specified timeout will be used instead of the
* configured one
*
* \param max_batch_size The max number of messages to consume if available
* \param timeout The timeout to be used on this call
* \param alloc The optionally supplied allocator for the message list
*
* \return A list of messages. Could be empty if there's nothing to consume
*/
template <typename Allocator>
std::vector<Message, Allocator> consume_batch(size_t max_batch_size,
std::chrono::milliseconds timeout,
const Allocator& alloc) const;
/**
* \brief Consumes a batch of messages from this queue
*
* Same as Queue::consume_batch(size_t) but the specified timeout will be used instead of the
* configured one
*
* \param max_batch_size The max number of messages to consume if available
* \param timeout The timeout to be used on this call
*
* \return A list of messages. Could be empty if there's nothing to consume
*/
MessageList consume_batch(size_t max_batch_size, std::chrono::milliseconds timeout) const;
std::vector<Message> consume_batch(size_t max_batch_size,
std::chrono::milliseconds timeout) const;
/**
* \brief Extracts the next message in this Queue
*
* /return The latest event, if any
*/
Event next_event() const;
/**
* \brief Extracts the next message in this Queue
*
* \param timeout The amount of time to wait for this operation to complete
*
* /return The latest event, if any
*/
Event next_event(std::chrono::milliseconds timeout) const;
/**
* Indicates whether this queue is valid (not null)
@@ -178,6 +239,32 @@ private:
using QueueList = std::vector<Queue>;
template <typename Allocator>
std::vector<Message, Allocator> Queue::consume_batch(size_t max_batch_size,
const Allocator& alloc) const {
return consume_batch(max_batch_size, timeout_ms_, alloc);
}
template <typename Allocator>
std::vector<Message, Allocator> Queue::consume_batch(size_t max_batch_size,
std::chrono::milliseconds timeout,
const Allocator& alloc) const {
std::vector<rd_kafka_message_t*> raw_messages(max_batch_size);
ssize_t result = rd_kafka_consume_batch_queue(handle_.get(),
static_cast<int>(timeout.count()),
raw_messages.data(),
raw_messages.size());
if (result == -1) {
rd_kafka_resp_err_t error = rd_kafka_last_error();
if (error != RD_KAFKA_RESP_ERR_NO_ERROR) {
throw QueueException(error);
}
return std::vector<Message, Allocator>(alloc);
}
// Build message list
return std::vector<Message, Allocator>(raw_messages.begin(), raw_messages.begin() + result, alloc);
}
} // cppkafka
#endif //CPPKAFKA_QUEUE_H

View File

@@ -32,7 +32,6 @@
#include <string>
#include <memory>
#include <boost/optional.hpp>
#include <librdkafka/rdkafka.h>
#include "macros.h"

View File

@@ -107,6 +107,11 @@ public:
*/
int64_t get_offset() const;
/**
* @brief Sets the partition
*/
void set_partition(int partition);
/**
* Sets the offset
*/

View File

@@ -37,6 +37,7 @@
#include "../consumer.h"
#include "backoff_performer.h"
#include "../detail/callback_invoker.h"
#include "../macros.h"
namespace cppkafka {
@@ -71,7 +72,7 @@ namespace cppkafka {
* committer.commit(some_message);
* \endcode
*/
class BackoffCommitter : public BackoffPerformer {
class CPPKAFKA_API BackoffCommitter : public BackoffPerformer {
public:
/**
* \brief The error callback.
@@ -99,10 +100,18 @@ public:
*/
void set_error_callback(ErrorCallback callback);
/**
* \brief Commits the current partition assignment synchronously
*
* This will call Consumer::commit() until either the message is successfully
* committed or the error callback returns false (if any is set).
*/
void commit();
/**
* \brief Commits the given message synchronously
*
* This will call Consumer::commit until either the message is successfully
* This will call Consumer::commit(msg) until either the message is successfully
* committed or the error callback returns false (if any is set).
*
* \param msg The message to be committed
@@ -112,7 +121,7 @@ public:
/**
* \brief Commits the offsets on the given topic/partitions synchronously
*
* This will call Consumer::commit until either the offsets are successfully
* This will call Consumer::commit(topic_partitions) until either the offsets are successfully
* committed or the error callback returns false (if any is set).
*
* \param topic_partitions The topic/partition list to be committed
@@ -126,26 +135,31 @@ public:
*/
Consumer& get_consumer();
private:
// Return true to abort and false to continue committing
template <typename T>
bool do_commit(const T& object) {
// If the ReturnType contains 'true', we abort committing. Otherwise we continue.
// The second member of the ReturnType contains the RdKafka error if any.
template <typename...Args>
bool do_commit(Args&&...args) {
try {
consumer_.commit(object);
// If the commit succeeds, we're done
consumer_.commit(std::forward<Args>(args)...);
// If the commit succeeds, we're done.
return true;
}
catch (const HandleException& ex) {
Error error = ex.get_error();
// If there were actually no offsets to commit, return. Retrying won't solve
// anything here
if (ex.get_error() == RD_KAFKA_RESP_ERR__NO_OFFSET) {
return true;
// anything here.
if (error == RD_KAFKA_RESP_ERR__NO_OFFSET) {
return true; //not considered an error.
}
// If there's a callback and it returns false for this message, abort.
// Otherwise keep committing.
CallbackInvoker<ErrorCallback> callback("backoff committer", callback_, &consumer_);
return callback && !callback(ex.get_error());
if (callback && !callback(error)) {
throw ex; //abort
}
}
return false; //continue
}
Consumer& consumer_;
ErrorCallback callback_;

View File

@@ -34,6 +34,7 @@
#include <functional>
#include <thread>
#include "../consumer.h"
#include "../exceptions.h"
namespace cppkafka {
@@ -123,7 +124,7 @@ public:
auto start = std::chrono::steady_clock::now();
// If the callback returns true, we're done
if (callback()) {
return;
return; //success
}
auto end = std::chrono::steady_clock::now();
auto time_elapsed = end - start;
@@ -134,6 +135,8 @@ public:
// Increase out backoff depending on the policy being used
backoff = increase_backoff(backoff);
}
// No more retries left or we have a terminal error.
throw ActionTerminatedException("Commit failed: no more retries.");
}
private:
TimeUnit increase_backoff(TimeUnit backoff);

File diff suppressed because it is too large Load Diff

View File

@@ -37,13 +37,14 @@
#include <boost/optional.hpp>
#include "../buffer.h"
#include "../consumer.h"
#include "../macros.h"
namespace cppkafka {
/**
* \brief Events generated by a CompactedTopicProcessor
*/
template <typename Key, typename Value>
class CPPKAFKA_API CompactedTopicEvent {
class CompactedTopicEvent {
public:
/**
* \brief Event type enum

View File

@@ -70,7 +70,7 @@ namespace cppkafka {
* * EOF: void(BasicConsumerDispatcher::EndOfFile, TopicPartition)
*/
template <typename ConsumerType>
class CPPKAFKA_API BasicConsumerDispatcher {
class BasicConsumerDispatcher {
public:
/**
* Tag to indicate a timeout occurred

View File

@@ -108,7 +108,7 @@ struct PollInterface {
* otherwise the broker will think this consumer is down and will trigger a rebalance
* (if using dynamic subscription)
*/
virtual MessageList poll_batch(size_t max_batch_size) = 0;
virtual std::vector<Message> poll_batch(size_t max_batch_size) = 0;
/**
* \brief Polls all assigned partitions for a batch of new messages in round-robin fashion
@@ -122,7 +122,7 @@ struct PollInterface {
*
* \return A list of messages
*/
virtual MessageList poll_batch(size_t max_batch_size, std::chrono::milliseconds timeout) = 0;
virtual std::vector<Message> poll_batch(size_t max_batch_size, std::chrono::milliseconds timeout) = 0;
};
} //cppkafka

View File

@@ -35,6 +35,7 @@
#include "../queue.h"
#include "../topic_partition_list.h"
#include "poll_interface.h"
#include "../macros.h"
namespace cppkafka {
@@ -52,7 +53,7 @@ struct QueueData {
*
* \brief Base implementation of the PollInterface
*/
class PollStrategyBase : public PollInterface {
class CPPKAFKA_API PollStrategyBase : public PollInterface {
public:
using QueueMap = std::map<TopicPartition, QueueData>;
@@ -83,6 +84,36 @@ public:
*/
Consumer& get_consumer() final;
/**
* \brief Creates partitions queues associated with the supplied partitions.
*
* This method contains a default implementation. It adds all the new queues belonging
* to the provided partition list and calls reset_state().
* To be used with static consumers.
*
* \param partitions Assigned topic partitions.
*/
virtual void assign(TopicPartitionList& partitions);
/**
* \brief Removes partitions queues associated with the supplied partitions.
*
* This method contains a default implementation. It removes all the queues
* belonging to the provided partition list and calls reset_state().
* To be used with static consumers.
*
* \param partitions Revoked topic partitions.
*/
virtual void revoke(const TopicPartitionList& partitions);
/**
* \brief Removes all partitions queues associated with the supplied partitions.
*
* This method contains a default implementation. It removes all the queues
* currently assigned and calls reset_state(). To be used with static consumers.
*/
virtual void revoke();
protected:
/**
* \brief Get the queues from all assigned partitions
@@ -110,8 +141,8 @@ protected:
/**
* \brief Function to be called when a new partition assignment takes place
*
* This method contains a default implementation. It adds all the new queues belonging
* to the provided partition list and calls reset_state().
* This method contains a default implementation. It calls assign()
* and invokes the user assignment callback.
*
* \param partitions Assigned topic partitions
*/
@@ -120,8 +151,8 @@ protected:
/**
* \brief Function to be called when an old partition assignment gets revoked
*
* This method contains a default implementation. It removes all the queues
* belonging to the provided partition list and calls reset_state().
* This method contains a default implementation. It calls revoke()
* and invokes the user revocation callback.
*
* \param partitions Revoked topic partitions
*/

View File

@@ -102,12 +102,19 @@ public:
/**
* \sa PollInterface::poll_batch
*/
MessageList poll_batch(size_t max_batch_size) override;
template <typename Allocator>
std::vector<Message, Allocator> poll_batch(size_t max_batch_size,
const Allocator& alloc);
std::vector<Message> poll_batch(size_t max_batch_size) override;
/**
* \sa PollInterface::poll_batch
*/
MessageList poll_batch(size_t max_batch_size,
template <typename Allocator>
std::vector<Message, Allocator> poll_batch(size_t max_batch_size,
std::chrono::milliseconds timeout,
const Allocator& alloc);
std::vector<Message> poll_batch(size_t max_batch_size,
std::chrono::milliseconds timeout) override;
protected:
@@ -119,10 +126,12 @@ protected:
QueueData& get_next_queue();
private:
template <typename Allocator>
void consume_batch(Queue& queue,
MessageList& messages,
std::vector<Message, Allocator>& messages,
ssize_t& count,
std::chrono::milliseconds timeout);
std::chrono::milliseconds timeout,
const Allocator& alloc);
void restore_forwarding();
@@ -130,6 +139,53 @@ private:
QueueMap::iterator queue_iter_;
};
// Implementations
template <typename Allocator>
std::vector<Message, Allocator> RoundRobinPollStrategy::poll_batch(size_t max_batch_size,
const Allocator& alloc) {
return poll_batch(max_batch_size, get_consumer().get_timeout(), alloc);
}
template <typename Allocator>
std::vector<Message, Allocator> RoundRobinPollStrategy::poll_batch(size_t max_batch_size,
std::chrono::milliseconds timeout,
const Allocator& alloc) {
std::vector<Message, Allocator> messages(alloc);
ssize_t count = max_batch_size;
// batch from the group event queue first (non-blocking)
consume_batch(get_consumer_queue().queue, messages, count, std::chrono::milliseconds(0), alloc);
size_t num_queues = get_partition_queues().size();
while ((count > 0) && (num_queues--)) {
// batch from the next partition (non-blocking)
consume_batch(get_next_queue().queue, messages, count, std::chrono::milliseconds(0), alloc);
}
// we still have space left in the buffer
if (count > 0) {
// wait on the event queue until timeout
consume_batch(get_consumer_queue().queue, messages, count, timeout, alloc);
}
return messages;
}
template <typename Allocator>
void RoundRobinPollStrategy::consume_batch(Queue& queue,
std::vector<Message, Allocator>& messages,
ssize_t& count,
std::chrono::milliseconds timeout,
const Allocator& alloc) {
std::vector<Message, Allocator> queue_messages = queue.consume_batch(count, timeout, alloc);
if (queue_messages.empty()) {
return;
}
// concatenate both lists
messages.insert(messages.end(),
make_move_iterator(queue_messages.begin()),
make_move_iterator(queue_messages.end()));
// reduce total batch count
count -= queue_messages.size();
}
} //cppkafka
#endif //CPPKAFKA_ROUNDROBIN_POLL_STRATEGY_H

View File

@@ -7,12 +7,14 @@ set(SOURCES
buffer.cpp
queue.cpp
message.cpp
message_timestamp.cpp
message_internal.cpp
topic_partition.cpp
topic_partition_list.cpp
metadata.cpp
group_information.cpp
error.cpp
event.cpp
kafka_handle_base.cpp
producer.cpp
@@ -24,24 +26,83 @@ set(SOURCES
utils/roundrobin_poll_strategy.cpp
)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include/cppkafka)
include_directories(SYSTEM ${Boost_INCLUDE_DIRS} ${RDKAFKA_INCLUDE_DIR})
set(TARGET_NAME cppkafka)
set(PKG_DIR "${CMAKE_BINARY_DIR}/package")
set(PKG_CONFIG_FILE "${PKG_DIR}/${TARGET_NAME}.pc")
set(CONFIG_FILE "${PKG_DIR}/${PROJECT_NAME}Config.cmake")
set(VERSION_FILE "${PKG_DIR}/${PROJECT_NAME}ConfigVersion.cmake")
set(FIND_RDKAFKA_FILE "${PROJECT_SOURCE_DIR}/cmake/FindRdKafka.cmake")
set(NAMESPACE "${PROJECT_NAME}::")
set(TARGET_EXPORT_NAME ${PROJECT_NAME}Targets)
add_library(cppkafka ${CPPKAFKA_LIBRARY_TYPE} ${SOURCES})
set_target_properties(cppkafka PROPERTIES VERSION ${CPPKAFKA_VERSION}
add_library(${TARGET_NAME} ${CPPKAFKA_LIBRARY_TYPE} ${SOURCES})
target_compile_features(${TARGET_NAME} PUBLIC cxx_std_11)
target_include_directories(${TARGET_NAME} PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../include/cppkafka>)
set_target_properties(${TARGET_NAME} PROPERTIES
ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_INSTALL_LIBDIR}"
ARCHIVE_OUTPUT_NAME "${TARGET_NAME}"
LIBRARY_OUTPUT_DIRECTORY "${CMAKE_INSTALL_LIBDIR}"
LIBRARY_OUTPUT_NAME "${TARGET_NAME}"
INSTALL_RPATH "${CMAKE_INSTALL_LIBDIR}"
INSTALL_RPATH_USE_LINK_PATH TRUE
VERSION ${CPPKAFKA_VERSION}
SOVERSION ${CPPKAFKA_VERSION})
set(DEPENDENCIES ${RDKAFKA_LIBRARY})
# In CMake >= 3.15 Boost::boost == Boost::headers
target_link_libraries(${TARGET_NAME} PUBLIC RdKafka::rdkafka Boost::boost)
if (WIN32)
# On windows ntohs and related are in ws2_32
set(DEPENDENCIES ${DEPENDENCIES} ws2_32.lib)
target_link_libraries(${TARGET_NAME} PUBLIC ws2_32.lib)
endif()
target_link_libraries(cppkafka ${DEPENDENCIES})
target_include_directories(cppkafka PUBLIC ${PROJECT_SOURCE_DIR}/include)
# Install cppkafka target and specify all properties needed for the exported file
install(
TARGETS ${TARGET_NAME}
EXPORT ${TARGET_EXPORT_NAME}
COMPONENT binaries
LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}"
ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}"
RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}"
INCLUDES DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}"
)
if (CPPKAFKA_EXPORT_PKGCONFIG)
# Generate and install pkgconfig file
configure_file(${PROJECT_SOURCE_DIR}/cmake/cppkafka.pc.in ${PKG_CONFIG_FILE} @ONLY)
install(
TARGETS cppkafka
LIBRARY DESTINATION lib
ARCHIVE DESTINATION lib
COMPONENT dev
FILES ${PKG_CONFIG_FILE}
DESTINATION "${CPPKAFKA_PKGCONFIG_DIR}"
COMPONENT pkgconfig
)
endif()
if (CPPKAFKA_EXPORT_CMAKE_CONFIG)
# Install the exported file
install(
EXPORT "${TARGET_EXPORT_NAME}"
NAMESPACE "${NAMESPACE}"
COMPONENT config
DESTINATION "${CPPKAFKA_CONFIG_DIR}"
)
# Generate CMAKE configuration file and exported targets
configure_package_config_file(
"${PROJECT_SOURCE_DIR}/cmake/config.cmake.in"
"${CONFIG_FILE}"
INSTALL_DESTINATION "${CPPKAFKA_CONFIG_DIR}"
PATH_VARS RDKAFKA_MIN_VERSION_HEX CMAKE_INSTALL_PREFIX CMAKE_INSTALL_INCLUDEDIR CMAKE_INSTALL_LIBDIR
)
# Generate version file
write_basic_package_version_file(
"${VERSION_FILE}"
VERSION ${CPPKAFKA_VERSION}
COMPATIBILITY AnyNewerVersion
)
install(
FILES "${CONFIG_FILE}" "${VERSION_FILE}" "${FIND_RDKAFKA_FILE}"
DESTINATION "${CPPKAFKA_CONFIG_DIR}"
COMPONENT config
)
endif()

View File

@@ -34,6 +34,7 @@
using std::string;
using std::equal;
using std::lexicographical_compare;
using std::ostream;
using std::hex;
using std::dec;
@@ -101,4 +102,22 @@ bool operator!=(const Buffer& lhs, const Buffer& rhs) {
return !(lhs == rhs);
}
bool operator<(const Buffer& lhs, const Buffer& rhs) {
return lexicographical_compare(lhs.get_data(), lhs.get_data() + lhs.get_size(),
rhs.get_data(), rhs.get_data() + rhs.get_size());
}
bool operator>(const Buffer& lhs, const Buffer& rhs) {
return lexicographical_compare(rhs.get_data(), rhs.get_data() + rhs.get_size(),
lhs.get_data(), lhs.get_data() + lhs.get_size());
}
bool operator<=(const Buffer& lhs, const Buffer& rhs) {
return !(lhs > rhs);
}
bool operator>=(const Buffer& lhs, const Buffer& rhs) {
return !(lhs < rhs);
}
} // cppkafka

View File

@@ -102,6 +102,13 @@ int socket_callback_proxy(int domain, int type, int protocol, void* opaque) {
(domain, type, protocol);
}
void background_event_callback_proxy(rd_kafka_t*, rd_kafka_event_t* event_ptr, void *opaque) {
KafkaHandleBase* handle = static_cast<KafkaHandleBase*>(opaque);
CallbackInvoker<Configuration::BackgroundEventCallback>
("background_event", handle->get_configuration().get_background_event_callback(), handle)
(*handle, Event{event_ptr});
}
// Configuration
Configuration::Configuration()
@@ -177,6 +184,19 @@ Configuration& Configuration::set_socket_callback(SocketCallback callback) {
return *this;
}
#if RD_KAFKA_VERSION >= RD_KAFKA_ADMIN_API_SUPPORT_VERSION
Configuration& Configuration::set_background_event_callback(BackgroundEventCallback callback) {
background_event_callback_ = move(callback);
rd_kafka_conf_set_background_event_cb(handle_.get(), &background_event_callback_proxy);
return *this;
}
Configuration& Configuration::set_events(int events) {
rd_kafka_conf_set_events(handle_.get(), events);
return *this;
}
#endif
Configuration&
Configuration::set_default_topic_configuration(TopicConfiguration config) {
default_topic_config_ = std::move(config);
@@ -239,6 +259,11 @@ const Configuration::SocketCallback& Configuration::get_socket_callback() const
return socket_callback_;
}
const Configuration::BackgroundEventCallback&
Configuration::get_background_event_callback() const {
return background_event_callback_;
}
const optional<TopicConfiguration>& Configuration::get_default_topic_configuration() const {
return default_topic_config_;
}

View File

@@ -29,6 +29,7 @@
#include <sstream>
#include <algorithm>
#include <cctype>
#include "macros.h"
#include "consumer.h"
#include "exceptions.h"
#include "logging.h"
@@ -44,20 +45,10 @@ using std::ostringstream;
using std::chrono::milliseconds;
using std::toupper;
using std::equal;
using std::allocator;
namespace cppkafka {
// See: https://github.com/edenhill/librdkafka/issues/1792
const int rd_kafka_queue_refcount_bug_version = 0x000b0500;
Queue get_queue(rd_kafka_queue_t* handle) {
if (rd_kafka_version() <= rd_kafka_queue_refcount_bug_version) {
return Queue::make_non_owning(handle);
}
else {
return Queue(handle);
}
}
void Consumer::rebalance_proxy(rd_kafka_t*, rd_kafka_resp_err_t error,
rd_kafka_topic_partition_list_t *partitions, void *opaque) {
TopicPartitionList list = convert(partitions);
@@ -133,16 +124,10 @@ void Consumer::unsubscribe() {
void Consumer::assign(const TopicPartitionList& topic_partitions) {
rd_kafka_resp_err_t error;
if (topic_partitions.empty()) {
error = rd_kafka_assign(get_handle(), nullptr);
check_error(error);
}
else {
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
error = rd_kafka_assign(get_handle(), topic_list_handle.get());
check_error(error, topic_list_handle.get());
}
}
void Consumer::unassign() {
rd_kafka_resp_err_t error = rd_kafka_assign(get_handle(), nullptr);
@@ -194,9 +179,15 @@ KafkaHandleBase::OffsetTuple Consumer::get_offsets(const TopicPartition& topic_p
TopicPartitionList
Consumer::get_offsets_committed(const TopicPartitionList& topic_partitions) const {
return get_offsets_committed(topic_partitions, get_timeout());
}
TopicPartitionList
Consumer::get_offsets_committed(const TopicPartitionList& topic_partitions,
milliseconds timeout) const {
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
rd_kafka_resp_err_t error = rd_kafka_committed(get_handle(), topic_list_handle.get(),
static_cast<int>(get_timeout().count()));
static_cast<int>(timeout.count()));
check_error(error, topic_list_handle.get());
return convert(topic_list_handle);
}
@@ -209,6 +200,23 @@ Consumer::get_offsets_position(const TopicPartitionList& topic_partitions) const
return convert(topic_list_handle);
}
#if (RD_KAFKA_VERSION >= RD_KAFKA_STORE_OFFSETS_SUPPORT_VERSION)
void Consumer::store_consumed_offsets() const {
store_offsets(get_offsets_position(get_assignment()));
}
void Consumer::store_offsets(const TopicPartitionList& topic_partitions) const {
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
rd_kafka_resp_err_t error = rd_kafka_offsets_store(get_handle(), topic_list_handle.get());
check_error(error, topic_list_handle.get());
}
#endif
void Consumer::store_offset(const Message& msg) const {
rd_kafka_resp_err_t error = rd_kafka_offset_store(msg.get_handle()->rkt, msg.get_partition(), msg.get_offset());
check_error(error);
}
vector<string> Consumer::get_subscription() const {
rd_kafka_resp_err_t error;
rd_kafka_topic_partition_list_t* list = nullptr;
@@ -255,38 +263,28 @@ Message Consumer::poll(milliseconds timeout) {
return rd_kafka_consumer_poll(get_handle(), static_cast<int>(timeout.count()));
}
MessageList Consumer::poll_batch(size_t max_batch_size) {
return poll_batch(max_batch_size, get_timeout());
std::vector<Message> Consumer::poll_batch(size_t max_batch_size) {
return poll_batch(max_batch_size, get_timeout(), allocator<Message>());
}
MessageList Consumer::poll_batch(size_t max_batch_size, milliseconds timeout) {
vector<rd_kafka_message_t*> raw_messages(max_batch_size);
// Note that this will leak the queue when using rdkafka < 0.11.5 (see get_queue comment)
Queue queue(get_queue(rd_kafka_queue_get_consumer(get_handle())));
ssize_t result = rd_kafka_consume_batch_queue(queue.get_handle() , timeout.count(), raw_messages.data(),
raw_messages.size());
if (result == -1) {
check_error(rd_kafka_last_error());
// on the off-chance that check_error() does not throw an error
return MessageList();
}
return MessageList(raw_messages.begin(), raw_messages.begin() + result);
std::vector<Message> Consumer::poll_batch(size_t max_batch_size, milliseconds timeout) {
return poll_batch(max_batch_size, timeout, allocator<Message>());
}
Queue Consumer::get_main_queue() const {
Queue queue(get_queue(rd_kafka_queue_get_main(get_handle())));
Queue queue = Queue::make_queue(rd_kafka_queue_get_main(get_handle()));
queue.disable_queue_forwarding();
return queue;
}
Queue Consumer::get_consumer_queue() const {
return get_queue(rd_kafka_queue_get_consumer(get_handle()));
return Queue::make_queue(rd_kafka_queue_get_consumer(get_handle()));
}
Queue Consumer::get_partition_queue(const TopicPartition& partition) const {
Queue queue(get_queue(rd_kafka_queue_get_partition(get_handle(),
Queue queue = Queue::make_queue(rd_kafka_queue_get_partition(get_handle(),
partition.get_topic().c_str(),
partition.get_partition())));
partition.get_partition()));
queue.disable_queue_forwarding();
return queue;
}

93
src/event.cpp Normal file
View File

@@ -0,0 +1,93 @@
/*
* Copyright (c) 2018, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "event.h"
using std::allocator;
using std::string;
using std::unique_ptr;
using std::vector;
namespace cppkafka {
Event::Event(rd_kafka_event_t* handle)
: handle_(handle, &rd_kafka_event_destroy) {
}
string Event::get_name() const {
return rd_kafka_event_name(handle_.get());
}
rd_kafka_event_type_t Event::get_type() const {
return rd_kafka_event_type(handle_.get());
}
Message Event::get_next_message() const {
// Note: the constness in rd_kafka_event_message_next's return value is not needed and it
// breaks Message's interface. This is dirty but it looks like it should have no side effects.
const auto message =
const_cast<rd_kafka_message_t*>(rd_kafka_event_message_next(handle_.get()));
return Message::make_non_owning(message);
}
vector<Message> Event::get_messages() {
return get_messages(allocator<Message>());
}
size_t Event::get_message_count() const {
return rd_kafka_event_message_count(handle_.get());
}
Error Event::get_error() const {
return rd_kafka_event_error(handle_.get());
}
void* Event::get_opaque() const {
return rd_kafka_event_opaque(handle_.get());
}
TopicPartition Event::get_topic_partition() const {
using TopparHandle = unique_ptr<rd_kafka_topic_partition_t,
decltype(&rd_kafka_topic_partition_destroy)>;
TopparHandle toppar_handle{rd_kafka_event_topic_partition(handle_.get()),
&rd_kafka_topic_partition_destroy};
return TopicPartition(toppar_handle->topic, toppar_handle->partition, toppar_handle->offset);
}
TopicPartitionList Event::get_topic_partition_list() const {
auto toppars_handle = rd_kafka_event_topic_partition_list(handle_.get());
return convert(toppars_handle);
}
Event::operator bool() const {
return !!handle_;
}
} // cppkafka

View File

@@ -119,4 +119,11 @@ Error QueueException::get_error() const {
return error_;
}
// ActionTerminatedException
ActionTerminatedException::ActionTerminatedException(const string& error)
: Exception(error) {
}
} // cppkafka

View File

@@ -48,7 +48,7 @@ namespace cppkafka {
const milliseconds KafkaHandleBase::DEFAULT_TIMEOUT{1000};
KafkaHandleBase::KafkaHandleBase(Configuration config)
: timeout_ms_(DEFAULT_TIMEOUT), config_(move(config)), handle_(nullptr, nullptr) {
: timeout_ms_(DEFAULT_TIMEOUT), config_(move(config)), handle_(nullptr, HandleDeleter(this)), destroy_flags_(0) {
auto& maybe_config = config_.get_default_topic_configuration();
if (maybe_config) {
maybe_config->set_as_opaque();
@@ -83,6 +83,10 @@ void KafkaHandleBase::set_timeout(milliseconds timeout) {
timeout_ms_ = timeout;
}
void KafkaHandleBase::set_log_level(LogLevel level) {
rd_kafka_set_log_level(handle_.get(), static_cast<int>(level));
}
void KafkaHandleBase::add_brokers(const string& brokers) {
rd_kafka_brokers_add(handle_.get(), brokers.data());
}
@@ -104,24 +108,40 @@ Topic KafkaHandleBase::get_topic(const string& name, TopicConfiguration config)
KafkaHandleBase::OffsetTuple
KafkaHandleBase::query_offsets(const TopicPartition& topic_partition) const {
return query_offsets(topic_partition, timeout_ms_);
}
KafkaHandleBase::OffsetTuple
KafkaHandleBase::query_offsets(const TopicPartition& topic_partition,
milliseconds timeout) const {
int64_t low;
int64_t high;
const string& topic = topic_partition.get_topic();
const int partition = topic_partition.get_partition();
const int timeout = static_cast<int>(timeout_ms_.count());
const int timeout_ms = static_cast<int>(timeout.count());
rd_kafka_resp_err_t result = rd_kafka_query_watermark_offsets(handle_.get(), topic.data(),
partition, &low, &high,
timeout);
timeout_ms);
check_error(result);
return make_tuple(low, high);
}
Metadata KafkaHandleBase::get_metadata(bool all_topics) const {
return get_metadata(all_topics, nullptr);
return get_metadata(all_topics, nullptr, timeout_ms_);
}
Metadata KafkaHandleBase::get_metadata(bool all_topics,
milliseconds timeout) const {
return get_metadata(all_topics, nullptr, timeout);
}
TopicMetadata KafkaHandleBase::get_metadata(const Topic& topic) const {
Metadata md = get_metadata(false, topic.get_handle());
return get_metadata(topic, timeout_ms_);
}
TopicMetadata KafkaHandleBase::get_metadata(const Topic& topic,
milliseconds timeout) const {
Metadata md = get_metadata(false, topic.get_handle(), timeout);
auto topics = md.get_topics();
if (topics.empty()) {
throw ElementNotFound("topic metadata", topic.get_name());
@@ -130,7 +150,12 @@ TopicMetadata KafkaHandleBase::get_metadata(const Topic& topic) const {
}
GroupInformation KafkaHandleBase::get_consumer_group(const string& name) {
auto result = fetch_consumer_groups(name.c_str());
return get_consumer_group(name, timeout_ms_);
}
GroupInformation KafkaHandleBase::get_consumer_group(const string& name,
milliseconds timeout) {
auto result = fetch_consumer_groups(name.c_str(), timeout);
if (result.empty()) {
throw ElementNotFound("consumer group information", name);
}
@@ -138,11 +163,21 @@ GroupInformation KafkaHandleBase::get_consumer_group(const string& name) {
}
vector<GroupInformation> KafkaHandleBase::get_consumer_groups() {
return fetch_consumer_groups(nullptr);
return get_consumer_groups(timeout_ms_);
}
vector<GroupInformation> KafkaHandleBase::get_consumer_groups(milliseconds timeout) {
return fetch_consumer_groups(nullptr, timeout);
}
TopicPartitionList
KafkaHandleBase::get_offsets_for_times(const TopicPartitionsTimestampsMap& queries) const {
return get_offsets_for_times(queries, timeout_ms_);
}
TopicPartitionList
KafkaHandleBase::get_offsets_for_times(const TopicPartitionsTimestampsMap& queries,
milliseconds timeout) const {
TopicPartitionList topic_partitions;
for (const auto& query : queries) {
const TopicPartition& topic_partition = query.first;
@@ -150,9 +185,9 @@ KafkaHandleBase::get_offsets_for_times(const TopicPartitionsTimestampsMap& queri
query.second.count());
}
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
const int timeout = static_cast<int>(timeout_ms_.count());
const int timeout_ms = static_cast<int>(timeout.count());
rd_kafka_resp_err_t result = rd_kafka_offsets_for_times(handle_.get(), topic_list_handle.get(),
timeout);
timeout_ms);
check_error(result, topic_list_handle.get());
return convert(topic_list_handle);
}
@@ -178,7 +213,7 @@ void KafkaHandleBase::yield() const {
}
void KafkaHandleBase::set_handle(rd_kafka_t* handle) {
handle_ = HandlePtr(handle, &rd_kafka_destroy);
handle_ = HandlePtr(handle, HandleDeleter(this));
}
Topic KafkaHandleBase::get_topic(const string& name, rd_kafka_topic_conf_t* conf) {
@@ -189,19 +224,22 @@ Topic KafkaHandleBase::get_topic(const string& name, rd_kafka_topic_conf_t* conf
return Topic(topic);
}
Metadata KafkaHandleBase::get_metadata(bool all_topics, rd_kafka_topic_t* topic_ptr) const {
Metadata KafkaHandleBase::get_metadata(bool all_topics,
rd_kafka_topic_t* topic_ptr,
milliseconds timeout) const {
const rd_kafka_metadata_t* metadata;
const int timeout = static_cast<int>(timeout_ms_.count());
const int timeout_ms = static_cast<int>(timeout.count());
rd_kafka_resp_err_t error = rd_kafka_metadata(get_handle(), !!all_topics,
topic_ptr, &metadata, timeout);
topic_ptr, &metadata, timeout_ms);
check_error(error);
return Metadata(metadata);
}
vector<GroupInformation> KafkaHandleBase::fetch_consumer_groups(const char* name) {
vector<GroupInformation> KafkaHandleBase::fetch_consumer_groups(const char* name,
milliseconds timeout) {
const rd_kafka_group_list* list = nullptr;
const int timeout = static_cast<int>(timeout_ms_.count());
auto result = rd_kafka_list_groups(get_handle(), name, &list, timeout);
const int timeout_ms = static_cast<int>(timeout.count());
auto result = rd_kafka_list_groups(get_handle(), name, &list, timeout_ms);
check_error(result);
// Wrap this in a unique_ptr so it gets auto deleted
@@ -237,7 +275,7 @@ void KafkaHandleBase::check_error(rd_kafka_resp_err_t error,
//check if any partition has errors
for (int i = 0; i < list_ptr->cnt; ++i) {
if (list_ptr->elems[i].err != RD_KAFKA_RESP_ERR_NO_ERROR) {
throw HandleException(error);
throw HandleException(list_ptr->elems[i].err);
}
}
}
@@ -247,4 +285,25 @@ rd_kafka_conf_t* KafkaHandleBase::get_configuration_handle() {
return config_.get_handle();
}
#if RD_KAFKA_VERSION >= RD_KAFKA_DESTROY_FLAGS_SUPPORT_VERSION
void KafkaHandleBase::set_destroy_flags(int destroy_flags) {
destroy_flags_ = destroy_flags;
};
int KafkaHandleBase::get_destroy_flags() const {
return destroy_flags_;
};
#endif
void KafkaHandleBase::HandleDeleter::operator()(rd_kafka_t* handle) {
#if RD_KAFKA_VERSION >= RD_KAFKA_DESTROY_FLAGS_SUPPORT_VERSION
rd_kafka_destroy_flags(handle, handle_base_ptr_->get_destroy_flags());
#else
rd_kafka_destroy(handle);
#endif
}
} // cppkafka

View File

@@ -63,6 +63,16 @@ Message::Message(HandlePtr handle)
payload_(handle_ ? Buffer((const Buffer::DataType*)handle_->payload, handle_->len) : Buffer()),
key_(handle_ ? Buffer((const Buffer::DataType*)handle_->key, handle_->key_len) : Buffer()),
user_data_(handle_ ? handle_->_private : nullptr) {
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
// get the header list if any
if (handle_) {
rd_kafka_headers_t* headers_handle;
Error error = rd_kafka_message_headers(handle_.get(), &headers_handle);
if (!error) {
header_list_ = HeaderListType::make_non_owning(headers_handle);
}
}
#endif
}
Message& Message::load_internal() {
@@ -74,19 +84,14 @@ Message& Message::load_internal() {
return *this;
}
// MessageTimestamp
MessageTimestamp::MessageTimestamp(milliseconds timestamp, TimestampType type)
: timestamp_(timestamp), type_(type) {
boost::optional<MessageTimestamp> Message::get_timestamp() const {
rd_kafka_timestamp_type_t type = RD_KAFKA_TIMESTAMP_NOT_AVAILABLE;
int64_t timestamp = rd_kafka_message_timestamp(handle_.get(), &type);
if (timestamp == -1 || type == RD_KAFKA_TIMESTAMP_NOT_AVAILABLE) {
return {};
}
milliseconds MessageTimestamp::get_timestamp() const {
return timestamp_;
}
MessageTimestamp::TimestampType MessageTimestamp::get_type() const {
return type_;
return MessageTimestamp(std::chrono::milliseconds(timestamp),
static_cast<MessageTimestamp::TimestampType>(type));
}
} // cppkafka

51
src/message_timestamp.cpp Normal file
View File

@@ -0,0 +1,51 @@
/*
* Copyright (c) 2017, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "message_timestamp.h"
using std::chrono::milliseconds;
namespace cppkafka {
MessageTimestamp::MessageTimestamp(milliseconds timestamp, TimestampType type)
: timestamp_(timestamp),
type_(type) {
}
milliseconds MessageTimestamp::get_timestamp() const {
return timestamp_;
}
MessageTimestamp::TimestampType MessageTimestamp::get_type() const {
return type_;
}
} // cppkafka

View File

@@ -52,7 +52,6 @@ Producer::Producer(Configuration config)
if (!ptr) {
throw Exception("Failed to create producer handle: " + string(error_buffer));
}
rd_kafka_set_log_level(ptr, 7);
set_handle(ptr);
}
@@ -64,39 +63,44 @@ Producer::PayloadPolicy Producer::get_payload_policy() const {
return message_payload_policy_;
}
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
void Producer::produce(const MessageBuilder& builder) {
const Buffer& payload = builder.payload();
const Buffer& key = builder.key();
const int policy = static_cast<int>(message_payload_policy_);
auto result = rd_kafka_producev(get_handle(),
RD_KAFKA_V_TOPIC(builder.topic().data()),
RD_KAFKA_V_PARTITION(builder.partition()),
RD_KAFKA_V_MSGFLAGS(policy),
RD_KAFKA_V_TIMESTAMP(builder.timestamp().count()),
RD_KAFKA_V_KEY((void*)key.get_data(), key.get_size()),
RD_KAFKA_V_VALUE((void*)payload.get_data(), payload.get_size()),
RD_KAFKA_V_OPAQUE(builder.user_data()),
RD_KAFKA_V_END);
check_error(result);
do_produce(builder, MessageBuilder::HeaderListType(builder.header_list())); //copy headers
}
void Producer::produce(MessageBuilder&& builder) {
do_produce(builder, std::move(builder.header_list())); //move headers
}
void Producer::produce(const Message& message) {
const Buffer& payload = message.get_payload();
const Buffer& key = message.get_key();
const int policy = static_cast<int>(message_payload_policy_);
int64_t duration = message.get_timestamp() ? message.get_timestamp().get().get_timestamp().count() : 0;
auto result = rd_kafka_producev(get_handle(),
RD_KAFKA_V_TOPIC(message.get_topic().data()),
RD_KAFKA_V_PARTITION(message.get_partition()),
RD_KAFKA_V_MSGFLAGS(policy),
RD_KAFKA_V_TIMESTAMP(duration),
RD_KAFKA_V_KEY((void*)key.get_data(), key.get_size()),
RD_KAFKA_V_VALUE((void*)payload.get_data(), payload.get_size()),
RD_KAFKA_V_OPAQUE(message.get_user_data()),
RD_KAFKA_V_END);
check_error(result);
do_produce(message, HeaderList<Message::HeaderType>(message.get_header_list())); //copy headers
}
void Producer::produce(Message&& message) {
do_produce(message, message.detach_header_list<Message::HeaderType>()); //move headers
}
#else
void Producer::produce(const MessageBuilder& builder) {
do_produce(builder);
}
void Producer::produce(MessageBuilder&& builder) {
do_produce(builder);
}
void Producer::produce(const Message& message) {
do_produce(message);
}
void Producer::produce(Message&& message) {
do_produce(message);
}
#endif
int Producer::poll() {
return poll(get_timeout());
}
@@ -114,4 +118,80 @@ void Producer::flush(milliseconds timeout) {
check_error(result);
}
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
void Producer::do_produce(const MessageBuilder& builder,
MessageBuilder::HeaderListType&& headers) {
const Buffer& payload = builder.payload();
const Buffer& key = builder.key();
const int policy = static_cast<int>(message_payload_policy_);
auto result = rd_kafka_producev(get_handle(),
RD_KAFKA_V_TOPIC(builder.topic().data()),
RD_KAFKA_V_PARTITION(builder.partition()),
RD_KAFKA_V_MSGFLAGS(policy),
RD_KAFKA_V_TIMESTAMP(builder.timestamp().count()),
RD_KAFKA_V_KEY((void*)key.get_data(), key.get_size()),
RD_KAFKA_V_HEADERS(headers.release_handle()), //pass ownership to rdkafka
RD_KAFKA_V_VALUE((void*)payload.get_data(), payload.get_size()),
RD_KAFKA_V_OPAQUE(builder.user_data()),
RD_KAFKA_V_END);
check_error(result);
}
void Producer::do_produce(const Message& message,
MessageBuilder::HeaderListType&& headers) {
const Buffer& payload = message.get_payload();
const Buffer& key = message.get_key();
const int policy = static_cast<int>(message_payload_policy_);
int64_t duration = message.get_timestamp() ? message.get_timestamp().get().get_timestamp().count() : 0;
auto result = rd_kafka_producev(get_handle(),
RD_KAFKA_V_TOPIC(message.get_topic().data()),
RD_KAFKA_V_PARTITION(message.get_partition()),
RD_KAFKA_V_MSGFLAGS(policy),
RD_KAFKA_V_TIMESTAMP(duration),
RD_KAFKA_V_KEY((void*)key.get_data(), key.get_size()),
RD_KAFKA_V_HEADERS(headers.release_handle()), //pass ownership to rdkafka
RD_KAFKA_V_VALUE((void*)payload.get_data(), payload.get_size()),
RD_KAFKA_V_OPAQUE(message.get_user_data()),
RD_KAFKA_V_END);
check_error(result);
}
#else
void Producer::do_produce(const MessageBuilder& builder) {
const Buffer& payload = builder.payload();
const Buffer& key = builder.key();
const int policy = static_cast<int>(message_payload_policy_);
auto result = rd_kafka_producev(get_handle(),
RD_KAFKA_V_TOPIC(builder.topic().data()),
RD_KAFKA_V_PARTITION(builder.partition()),
RD_KAFKA_V_MSGFLAGS(policy),
RD_KAFKA_V_TIMESTAMP(builder.timestamp().count()),
RD_KAFKA_V_KEY((void*)key.get_data(), key.get_size()),
RD_KAFKA_V_VALUE((void*)payload.get_data(), payload.get_size()),
RD_KAFKA_V_OPAQUE(builder.user_data()),
RD_KAFKA_V_END);
check_error(result);
}
void Producer::do_produce(const Message& message) {
const Buffer& payload = message.get_payload();
const Buffer& key = message.get_key();
const int policy = static_cast<int>(message_payload_policy_);
int64_t duration = message.get_timestamp() ? message.get_timestamp().get().get_timestamp().count() : 0;
auto result = rd_kafka_producev(get_handle(),
RD_KAFKA_V_TOPIC(message.get_topic().data()),
RD_KAFKA_V_PARTITION(message.get_partition()),
RD_KAFKA_V_MSGFLAGS(policy),
RD_KAFKA_V_TIMESTAMP(duration),
RD_KAFKA_V_KEY((void*)key.get_data(), key.get_size()),
RD_KAFKA_V_VALUE((void*)payload.get_data(), payload.get_size()),
RD_KAFKA_V_OPAQUE(message.get_user_data()),
RD_KAFKA_V_END);
check_error(result);
}
#endif //RD_KAFKA_HEADERS_SUPPORT_VERSION
} // cppkafka

View File

@@ -32,6 +32,7 @@
using std::vector;
using std::exception;
using std::chrono::milliseconds;
using std::allocator;
namespace cppkafka {
@@ -45,6 +46,15 @@ Queue Queue::make_non_owning(rd_kafka_queue_t* handle) {
return Queue(handle, NonOwningTag{});
}
Queue Queue::make_queue(rd_kafka_queue_t* handle) {
if (rd_kafka_version() <= RD_KAFKA_QUEUE_REFCOUNT_BUG_VERSION) {
return Queue::make_non_owning(handle);
}
else {
return Queue(handle);
}
}
Queue::Queue()
: handle_(nullptr, nullptr),
timeout_ms_(DEFAULT_TIMEOUT) {
@@ -94,25 +104,20 @@ Message Queue::consume(milliseconds timeout) const {
return Message(rd_kafka_consume_queue(handle_.get(), static_cast<int>(timeout.count())));
}
MessageList Queue::consume_batch(size_t max_batch_size) const {
return consume_batch(max_batch_size, timeout_ms_);
vector<Message> Queue::consume_batch(size_t max_batch_size) const {
return consume_batch(max_batch_size, timeout_ms_, allocator<Message>());
}
MessageList Queue::consume_batch(size_t max_batch_size, milliseconds timeout) const {
vector<rd_kafka_message_t*> raw_messages(max_batch_size);
ssize_t result = rd_kafka_consume_batch_queue(handle_.get(),
static_cast<int>(timeout.count()),
raw_messages.data(),
raw_messages.size());
if (result == -1) {
rd_kafka_resp_err_t error = rd_kafka_last_error();
if (error != RD_KAFKA_RESP_ERR_NO_ERROR) {
throw QueueException(error);
vector<Message> Queue::consume_batch(size_t max_batch_size, milliseconds timeout) const {
return consume_batch(max_batch_size, timeout, allocator<Message>());
}
return MessageList();
Event Queue::next_event() const {
return next_event(timeout_ms_);
}
// Build message list
return MessageList(raw_messages.begin(), raw_messages.begin() + result);
Event Queue::next_event(milliseconds timeout) const {
return Event(rd_kafka_queue_poll(handle_.get(), timeout.count()));
}
} //cppkafka

View File

@@ -76,6 +76,10 @@ int64_t TopicPartition::get_offset() const {
return offset_;
}
void TopicPartition::set_partition(int partition) {
partition_ = partition;
}
void TopicPartition::set_offset(int64_t offset) {
offset_ = offset;
}

View File

@@ -38,6 +38,7 @@ using std::vector;
using std::set;
using std::ostream;
using std::string;
using std::equal;
namespace cppkafka {
@@ -45,10 +46,11 @@ TopicPartitionsListPtr convert(const TopicPartitionList& topic_partitions) {
TopicPartitionsListPtr handle(rd_kafka_topic_partition_list_new(topic_partitions.size()),
&rd_kafka_topic_partition_list_destroy);
for (const auto& item : topic_partitions) {
rd_kafka_topic_partition_t* new_item = nullptr;
new_item = rd_kafka_topic_partition_list_add(handle.get(),
rd_kafka_topic_partition_t* new_item = rd_kafka_topic_partition_list_add(
handle.get(),
item.get_topic().data(),
item.get_partition());
item.get_partition()
);
new_item->offset = item.get_offset();
}
return handle;

View File

@@ -43,6 +43,12 @@ void BackoffCommitter::set_error_callback(ErrorCallback callback) {
callback_ = move(callback);
}
void BackoffCommitter::commit() {
perform([&] {
return do_commit();
});
}
void BackoffCommitter::commit(const Message& msg) {
perform([&] {
return do_commit(msg);

View File

@@ -89,13 +89,29 @@ void PollStrategyBase::reset_state() {
}
void PollStrategyBase::on_assignment(TopicPartitionList& partitions) {
void PollStrategyBase::assign(TopicPartitionList& partitions) {
// populate partition queues
for (const auto& partition : partitions) {
// get the queue associated with this partition
partition_queues_.emplace(partition, QueueData{consumer_.get_partition_queue(partition), boost::any()});
}
reset_state();
}
void PollStrategyBase::revoke(const TopicPartitionList& partitions) {
for (const auto &partition : partitions) {
partition_queues_.erase(partition);
}
reset_state();
}
void PollStrategyBase::revoke() {
partition_queues_.clear();
reset_state();
}
void PollStrategyBase::on_assignment(TopicPartitionList& partitions) {
assign(partitions);
// call original consumer callback if any
if (assignment_callback_) {
assignment_callback_(partitions);
@@ -103,15 +119,7 @@ void PollStrategyBase::on_assignment(TopicPartitionList& partitions) {
}
void PollStrategyBase::on_revocation(const TopicPartitionList& partitions) {
for (const auto& partition : partitions) {
// get the queue associated with this partition
auto toppar_it = partition_queues_.find(partition);
if (toppar_it != partition_queues_.end()) {
// remove this queue from the list
partition_queues_.erase(toppar_it);
}
}
reset_state();
revoke(partitions);
// call original consumer callback if any
if (revocation_callback_) {
revocation_callback_(partitions);

View File

@@ -32,6 +32,7 @@
using std::string;
using std::chrono::milliseconds;
using std::make_move_iterator;
using std::allocator;
namespace cppkafka {
@@ -67,45 +68,14 @@ Message RoundRobinPollStrategy::poll(milliseconds timeout) {
return get_consumer_queue().queue.consume(timeout);
}
MessageList RoundRobinPollStrategy::poll_batch(size_t max_batch_size) {
return poll_batch(max_batch_size, get_consumer().get_timeout());
std::vector<Message> RoundRobinPollStrategy::poll_batch(size_t max_batch_size) {
return poll_batch(max_batch_size, get_consumer().get_timeout(), allocator<Message>());
}
MessageList RoundRobinPollStrategy::poll_batch(size_t max_batch_size, milliseconds timeout) {
MessageList messages;
ssize_t count = max_batch_size;
// batch from the group event queue first (non-blocking)
consume_batch(get_consumer_queue().queue, messages, count, milliseconds(0));
size_t num_queues = get_partition_queues().size();
while ((count > 0) && (num_queues--)) {
// batch from the next partition (non-blocking)
consume_batch(get_next_queue().queue, messages, count, milliseconds(0));
}
// we still have space left in the buffer
if (count > 0) {
// wait on the event queue until timeout
consume_batch(get_consumer_queue().queue, messages, count, timeout);
}
return messages;
}
void RoundRobinPollStrategy::consume_batch(Queue& queue,
MessageList& messages,
ssize_t& count,
std::vector<Message> RoundRobinPollStrategy::poll_batch(size_t max_batch_size,
milliseconds timeout) {
MessageList queue_messages = queue.consume_batch(count, timeout);
if (queue_messages.empty()) {
return;
return poll_batch(max_batch_size, timeout, allocator<Message>());
}
// concatenate both lists
messages.insert(messages.end(),
make_move_iterator(queue_messages.begin()),
make_move_iterator(queue_messages.end()));
// reduce total batch count
count -= queue_messages.size();
}
void RoundRobinPollStrategy::restore_forwarding() {
// forward all partition queues

View File

@@ -1,16 +1,36 @@
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include/)
include_directories(SYSTEM ${CATCH_INCLUDE})
include_directories(SYSTEM ${RDKAFKA_INCLUDE_DIR})
set(KAFKA_TEST_INSTANCE "kafka-vm:9092"
if (NOT KAFKA_TEST_INSTANCE)
set(KAFKA_TEST_INSTANCE kafka-vm:9092
CACHE STRING "The kafka instance to which to connect to run tests")
endif()
if (NOT KAFKA_NUM_PARTITIONS)
set(KAFKA_NUM_PARTITIONS 3 CACHE STRING "Kafka Number of partitions")
endif()
if (NOT KAFKA_TOPICS)
set(KAFKA_TOPICS "cppkafka_test1;cppkafka_test2" CACHE STRING "Kafka topics")
endif()
# Convert list of topics into a C++ initializer list
FOREACH(TOPIC ${KAFKA_TOPICS})
if (NOT TOPIC_LIST)
set(TOPIC_LIST "\"${TOPIC}\"")
else()
set(TOPIC_LIST "${TOPIC_LIST},\"${TOPIC}\"")
endif()
ENDFOREACH()
add_custom_target(tests)
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
add_definitions("-DKAFKA_TEST_INSTANCE=\"${KAFKA_TEST_INSTANCE}\"")
add_definitions(
"-DKAFKA_TEST_INSTANCE=\"${KAFKA_TEST_INSTANCE}\""
-DKAFKA_NUM_PARTITIONS=${KAFKA_NUM_PARTITIONS}
-DKAFKA_TOPIC_NAMES=${TOPIC_LIST}
)
add_executable(
cppkafka_tests
add_executable(cppkafka_tests
buffer_test.cpp
compacted_topic_processor_test.cpp
configuration_test.cpp
@@ -19,10 +39,14 @@ add_executable(
producer_test.cpp
consumer_test.cpp
roundrobin_poll_test.cpp
headers_test.cpp
test_utils.cpp
# Main file
test_main.cpp
)
target_link_libraries(cppkafka_tests cppkafka ${RDKAFKA_LIBRARY} pthread rt ssl crypto dl z)
# In CMake >= 3.15 Boost::boost == Boost::headers
target_link_libraries(cppkafka_tests cppkafka RdKafka::rdkafka Boost::boost)
add_dependencies(tests cppkafka_tests)
add_test(cppkafka cppkafka_tests)

View File

@@ -1,11 +1,13 @@
#include <string>
#include <vector>
#include <array>
#include <sstream>
#include <catch.hpp>
#include "cppkafka/buffer.h"
using std::string;
using std::vector;
using std::array;
using std::ostringstream;
using namespace cppkafka;
@@ -36,10 +38,32 @@ TEST_CASE("conversions", "[buffer]") {
}
TEST_CASE("construction", "[buffer]") {
// From string
const string str_data = "Hello world!";
const vector<uint8_t> data(str_data.begin(), str_data.end());
const Buffer buffer(data);
// From vector
const vector<uint8_t> vector_data(str_data.begin(), str_data.end());
// From array
const array<char,12> array_data{{'H','e','l','l','o',' ','w','o','r','l','d','!'}};
// From raw array
const char raw_array[12]{'H','e','l','l','o',' ','w','o','r','l','d','!'};
// Build buffers
const Buffer buffer(vector_data); //vector
const Buffer buffer2(vector_data.begin(), vector_data.end()); //iterators
const Buffer buffer3(str_data.data(), str_data.data() + str_data.size()); //char iterators
const Buffer buffer4(array_data); //arrays
const Buffer buffer5(raw_array); //raw arrays
const Buffer buffer6(str_data); //string
const Buffer buffer7(str_data.data(), str_data.size()); //type + size
// Test
CHECK(str_data == buffer);
CHECK(buffer == buffer2);
CHECK(buffer == buffer3);
CHECK(buffer == buffer4);
CHECK(buffer == buffer5);
CHECK(buffer == buffer6);
CHECK(buffer == buffer7);
}

View File

@@ -35,7 +35,7 @@ static Configuration make_producer_config() {
return config;
}
static Configuration make_consumer_config(const string& group_id = "consumer_test") {
static Configuration make_consumer_config(const string& group_id = make_consumer_group_id()) {
Configuration config;
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
config.set("enable.auto.commit", false);
@@ -85,11 +85,12 @@ TEST_CASE("message consumption", "[consumer]") {
TEST_CASE("consumer rebalance", "[consumer]") {
TopicPartitionList assignment1;
TopicPartitionList assignment2;
const string group_id = make_consumer_group_id();
bool revocation_called = false;
int partition = 0;
// Create a consumer and subscribe to the topic
Consumer consumer1(make_consumer_config());
Consumer consumer1(make_consumer_config(group_id));
consumer1.set_assignment_callback([&](const TopicPartitionList& topic_partitions) {
assignment1 = topic_partitions;
});
@@ -100,7 +101,7 @@ TEST_CASE("consumer rebalance", "[consumer]") {
ConsumerRunner runner1(consumer1, 1, KAFKA_NUM_PARTITIONS);
// Create a second consumer and subscribe to the topic
Consumer consumer2(make_consumer_config());
Consumer consumer2(make_consumer_config(group_id));
consumer2.set_assignment_callback([&](const TopicPartitionList& topic_partitions) {
assignment2 = topic_partitions;
});
@@ -195,7 +196,7 @@ TEST_CASE("consumer throttle", "[consumer]") {
if (callback_executed_count == 3) {
return Message();
}
return move(msg);
return msg;
},
[&](ConsumerDispatcher::Timeout) {
if (callback_executed_count == 3) {
@@ -240,3 +241,20 @@ TEST_CASE("consume batch", "[consumer]") {
CHECK(all_messages[0].get_payload() == payload);
CHECK(all_messages[1].get_payload() == payload);
}
// This test may fail due to what seems to be an rdkafka bug. Skip it for now until we're
// certain of what to do
TEST_CASE("Event consumption", "[!hide][consumer]") {
// Create a consumer and subscribe to the topic
Consumer consumer(make_consumer_config());
consumer.subscribe({ KAFKA_TOPICS[0] });
vector<rd_kafka_event_type_t> types = {
RD_KAFKA_EVENT_NONE
};
Queue queue = consumer.get_main_queue();
for (const auto type : types) {
const Event event = queue.next_event();
CHECK(event.get_type() == type);
}
}

226
tests/headers_test.cpp Normal file
View File

@@ -0,0 +1,226 @@
#include <vector>
#include <thread>
#include <set>
#include <mutex>
#include <chrono>
#include <iterator>
#include <condition_variable>
#include <catch.hpp>
#include "cppkafka/consumer.h"
#include "cppkafka/producer.h"
#include "cppkafka/header_list.h"
#include "test_utils.h"
using std::vector;
using std::move;
using std::string;
using std::thread;
using std::set;
using std::mutex;
using std::tie;
using std::condition_variable;
using std::lock_guard;
using std::unique_lock;
using std::make_move_iterator;
using std::chrono::seconds;
using std::chrono::milliseconds;
using std::chrono::system_clock;
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
using namespace cppkafka;
using StringHeader = Header<std::string>;
using BufferHeader = Header<Buffer>;
TEST_CASE("creation", "[headers]") {
SECTION("empty") {
HeaderList<StringHeader> list;
REQUIRE(!!list == false);
}
SECTION("default") {
HeaderList<StringHeader> list(2);
REQUIRE(!!list == true);
REQUIRE(list.size() == 0);
REQUIRE(list.empty() == true);
REQUIRE(list.get_handle() != nullptr);
}
SECTION("from handle") {
HeaderList<StringHeader> list(rd_kafka_headers_new(1));
REQUIRE(!!list == true);
REQUIRE(list.size() == 0);
REQUIRE(list.empty() == true);
REQUIRE(list.get_handle() != nullptr);
}
}
TEST_CASE("release", "[headers]") {
HeaderList<StringHeader> list(2);
auto handle = list.release_handle();
REQUIRE(handle != nullptr);
REQUIRE(list.release_handle() == nullptr); //release again
REQUIRE(!!list == false);
rd_kafka_headers_destroy(handle);
}
TEST_CASE("modify", "[headers]") {
SECTION("add") {
HeaderList<StringHeader> list(10);
//empty header name
list.add({{}, "payload1"});
//empty payload
list.add({"header2", {}});
list.add({"header3", "payload3"});
//both null
list.add({{}, {}});
//both empty (0-length strings)
list.add({"", ""});
//validate
REQUIRE(list.size() == 5);
REQUIRE_FALSE(list.empty());
//access a header
REQUIRE(list.at(1).get_name() == "header2");
REQUIRE(list.at(1).get_value().empty());
REQUIRE(list.at(2).get_value() == "payload3");
}
SECTION("remove") {
HeaderList<StringHeader> list(10);
//empty header name
list.add({{}, "payload1"});
//empty payload
list.add({"header2", {}});
list.add({"header3", "payload3"});
//both null
list.add({{}, {}});
//both empty (0 length strings)
list.add({"", ""});
//Remove a bogus name
Error err = list.remove("bogus");
REQUIRE(err.get_error() == RD_KAFKA_RESP_ERR__NOENT);
//Remove header with name
list.remove("header2");
REQUIRE(list.size() == 4);
list.remove("header3");
REQUIRE(list.size() == 3);
//Remove headers without name
list.remove({});
REQUIRE(list.size() == 0);
}
}
TEST_CASE("copy and move", "[headers]") {
SECTION("copy owning") {
//Create an owning header list and copy it
HeaderList<StringHeader> list(3), list2(3);
list.add({"header1", "payload1"});
list.add({"header2", "payload2"});
list.add({"header3", "payload3"});
REQUIRE(list2.size() == 0);
list2 = list;
REQUIRE(list2.size() == 3);
REQUIRE(list2.size() == list.size());
//make sure the handles are different
CHECK(list.get_handle() != list2.get_handle());
CHECK(list.at(0) == list2.at(0));
CHECK(list.at(1) == list2.at(1));
CHECK(list.at(2) == list2.at(2));
CHECK(list == list2);
}
SECTION("copy owning with buffers") {
//Create an owning header list and copy it
HeaderList<BufferHeader> list(3), list2(3);
string payload1 = "payload1", payload2 = "payload2", payload3 = "payload3";
list.add({"header1", payload1});
list.add({"header2", payload2});
list.add({"header3", payload3});
REQUIRE(list2.size() == 0);
list2 = list;
REQUIRE(list2.size() == 3);
REQUIRE(list2.size() == list.size());
//make sure the handles are different
CHECK(list.get_handle() != list2.get_handle());
CHECK(list.at(0) == list2.at(0));
CHECK(list.at(1) == list2.at(1));
CHECK(list.at(2) == list2.at(2));
CHECK(list == list2);
}
SECTION("copy non-owning") {
//Create an owning header list and copy it
HeaderList<StringHeader> list(3), list2(3), list3(HeaderList<StringHeader>::make_non_owning(list.get_handle()));
list.add({"header1", "payload1"});
list.add({"header2", "payload2"});
list.add({"header3", "payload3"});
list2 = list3; //copy non-owning list
REQUIRE(list.size() == 3);
REQUIRE(list3.size() == list.size());
REQUIRE(list2.size() == list.size());
//make sure the handles are the same
CHECK(list2.get_handle() == list3.get_handle());
CHECK(list2.at(0) == list3.at(0));
CHECK(list2.at(1) == list3.at(1));
CHECK(list2.at(2) == list3.at(2));
CHECK(list2 == list3);
}
SECTION("move") {
HeaderList<StringHeader> list(3), list2;
list.add({"header1", "payload1"});
list.add({"header2", "payload2"});
list.add({"header3", "payload3"});
auto handle = list.get_handle();
list2 = std::move(list);
CHECK_FALSE(!!list);
CHECK(!!list2);
CHECK(list2.size() == 3);
CHECK(handle == list2.get_handle());
}
}
TEST_CASE("access", "[headers]") {
HeaderList<StringHeader> list(3);
list.add({"header1", "payload1"});
list.add({"header2", "payload2"});
list.add({"header3", "payload3"});
CHECK(list.at(0).get_value() == "payload1");
CHECK(list.at(1).get_value() == "payload2");
CHECK(list.at(2).get_value() == "payload3");
CHECK_THROWS_AS(list.at(3), Exception);
CHECK(list.front() == list.at(0));
CHECK(list.back() == list.at(2));
}
TEST_CASE("iterate", "[headers]") {
HeaderList<StringHeader> list(3);
REQUIRE(list.begin() == list.end());
list.add({"header1", "payload1"});
REQUIRE(list.begin() != list.end());
CHECK(++list.begin() == list.end());
list.add({"header2", "payload2"});
list.add({"header3", "payload3"});
int i = 0;
for (auto it = list.begin(); it != list.end(); ++it, ++i) {
CHECK(it->get_name().length() == 7);
if (i == 0) {
CHECK(it->get_name() == "header1");
}
else if (i == 1) {
CHECK(it->get_name() == "header2");
}
else if (i == 2) {
CHECK(it->get_name() == "header3");
}
}
//rewind end() iterator
CHECK((--list.end())->get_name() == "header3");
}
#endif //RD_KAFKA_HEADERS_SUPPORT_VERSION

View File

@@ -24,6 +24,8 @@ using std::condition_variable;
using std::chrono::system_clock;
using std::chrono::seconds;
using std::chrono::milliseconds;
using std::chrono::time_point;
using std::chrono::duration_cast;
using std::ref;
using namespace cppkafka;
@@ -42,7 +44,7 @@ static Configuration make_consumer_config() {
Configuration config = {
{ "metadata.broker.list", KAFKA_TEST_INSTANCE },
{ "enable.auto.commit", false },
{ "group.id", "producer_test" },
{ "group.id", make_consumer_group_id() },
{ "api.version.request", true }
};
return config;
@@ -164,7 +166,7 @@ TEST_CASE("simple production", "[producer]") {
SECTION("message with key") {
const string payload = "Hello world! 2";
const string key = "such key";
const milliseconds timestamp{15};
auto timestamp = system_clock::now();
Producer producer(config);
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition)
.key(key)
@@ -181,9 +183,46 @@ TEST_CASE("simple production", "[producer]") {
CHECK(message.get_partition() == partition);
CHECK(!!message.get_error() == false);
REQUIRE(!!message.get_timestamp() == true);
CHECK(message.get_timestamp()->get_timestamp() == timestamp);
CHECK(message.get_timestamp()->get_timestamp() == duration_cast<milliseconds>(timestamp.time_since_epoch()));
}
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
SECTION("message with key and move-able headers") {
using Hdr = MessageBuilder::HeaderType;
const string payload = "Hello world! 2";
const string key = "such key";
const string header1, header2 = "", header3 = "header3";
const milliseconds timestamp{15};
Producer producer(config);
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition)
.key(key)
.payload(payload)
.timestamp(timestamp)
.header(Hdr{})
.header(Hdr{"", header2})
.header(Hdr{"header3", header3}));
runner.try_join();
const auto& messages = runner.get_messages();
REQUIRE(messages.size() == 1);
const auto& message = messages[0];
CHECK(message.get_payload() == payload);
CHECK(message.get_key() == key);
CHECK(message.get_topic() == KAFKA_TOPICS[0]);
CHECK(message.get_partition() == partition);
CHECK(!!message.get_error() == false);
REQUIRE(!!message.get_timestamp() == true);
CHECK(message.get_timestamp()->get_timestamp() == timestamp);
//validate headers
REQUIRE(!!message.get_header_list());
REQUIRE(message.get_header_list().size() == 3);
CHECK(message.get_header_list().front() == Hdr{});
CHECK(message.get_header_list().at(1) == Hdr{"", header2});
CHECK(message.get_header_list().back() == Hdr{"header3", header3});
}
#endif //RD_KAFKA_HEADERS_SUPPORT_VERSION
SECTION("message without message builder") {
const string payload = "Goodbye cruel world!";
const string key = "replay key";
@@ -315,6 +354,52 @@ TEST_CASE("multiple messages", "[producer]") {
}
}
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
TEST_CASE("multiple messages with copy-able headers", "[producer][headers]") {
using Hdr = MessageBuilder::HeaderType;
size_t message_count = 2;
string payload = "Hello world with headers";
const string header1, header2 = "", header3 = "header3";
// Create a consumer and subscribe to this topic
Consumer consumer(make_consumer_config());
consumer.subscribe({ KAFKA_TOPICS[0] });
ConsumerRunner runner(consumer, message_count, KAFKA_NUM_PARTITIONS);
// Now create a producer and produce a message
Producer producer(make_producer_config());
MessageBuilder builder(KAFKA_TOPICS[0]);
builder.payload(payload)
.header(Hdr{})
.header(Hdr{"", header2})
.header(Hdr{"header3", header3});
producer.produce(builder);
producer.produce(builder);
//Check we still have the messages after production
CHECK(!!builder.header_list());
CHECK(builder.header_list().size() == 3);
runner.try_join();
const auto& messages = runner.get_messages();
REQUIRE(messages.size() == message_count);
const auto& message = messages[0];
CHECK(message.get_payload() == payload);
CHECK(!!message.get_error() == false);
//validate headers
REQUIRE(!!message.get_header_list());
REQUIRE(message.get_header_list().size() == 3);
CHECK(message.get_header_list().front() == Hdr{});
CHECK(message.get_header_list().at(1) == Hdr{"", header2});
CHECK(message.get_header_list().back() == Hdr{"header3", header3});
//validate second message
CHECK(messages[0].get_header_list() == messages[1].get_header_list());
CHECK(messages[0].get_header_list().get_handle() != messages[1].get_header_list().get_handle());
}
#endif //RD_KAFKA_HEADERS_SUPPORT_VERSION
TEST_CASE("multiple sync messages", "[producer][buffered_producer][sync]") {
size_t message_count = 10;
set<string> payloads;

View File

@@ -7,13 +7,14 @@
#include <condition_variable>
#include <catch.hpp>
#include <memory>
#include <iostream>
#include <stdexcept>
#include "cppkafka/cppkafka.h"
#include "test_utils.h"
using std::vector;
using std::move;
using std::string;
using std::exception;
using std::thread;
using std::set;
using std::mutex;
@@ -29,25 +30,29 @@ using std::chrono::system_clock;
using namespace cppkafka;
#define ENABLE_STRICT_RR_ORDER 0
//==================================================================================
// Helper functions
//==================================================================================
static Configuration make_producer_config() {
Configuration config;
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
Configuration config = {
{ "metadata.broker.list", KAFKA_TEST_INSTANCE },
{ "max.in.flight", 1 }
};
return config;
}
static Configuration make_consumer_config(const string& group_id = "rr_consumer_test") {
Configuration config;
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
config.set("enable.auto.commit", true);
config.set("enable.auto.offset.store", true );
config.set("auto.commit.interval.ms", 100);
config.set("group.id", group_id);
static Configuration make_consumer_config(const string& group_id = make_consumer_group_id()) {
Configuration config = {
{ "metadata.broker.list", KAFKA_TEST_INSTANCE },
{ "enable.auto.commit", false },
{ "group.id", group_id },
};
return config;
}
#if ENABLE_STRICT_RR_ORDER
static vector<int> make_roundrobin_partition_vector(int total_messages) {
vector<int> partition_order;
for (int i = 0, partition = 0; i < total_messages+1; ++i) {
@@ -58,49 +63,12 @@ static vector<int> make_roundrobin_partition_vector(int total_messages) {
}
return partition_order;
}
#endif
//========================================================================
// TESTS
//========================================================================
TEST_CASE("serial consumer test", "[roundrobin consumer]") {
int messages_per_partition = 3;
int total_messages = KAFKA_NUM_PARTITIONS * messages_per_partition;
// Create a consumer and subscribe to the topic
Consumer consumer(make_consumer_config());
TopicPartitionList partitions;
for (int i = 0; i < KAFKA_NUM_PARTITIONS; partitions.emplace_back(KAFKA_TOPICS[0], i++));
consumer.assign(partitions);
// Start the runner with the original consumer
ConsumerRunner runner(consumer, total_messages, KAFKA_NUM_PARTITIONS);
// Produce messages so we stop the consumer
Producer producer(make_producer_config());
string payload = "Serial";
// push 3 messages in each partition
for (int i = 0; i < total_messages; ++i) {
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(i%KAFKA_NUM_PARTITIONS).payload(payload));
}
producer.flush();
runner.try_join();
// Check that we have all messages
REQUIRE(runner.get_messages().size() == total_messages);
// messages should have sequential identical partition ids in groups of <messages_per_partition>
int expected_partition;
for (int i = 0; i < total_messages; ++i) {
if ((i % messages_per_partition) == 0) {
expected_partition = runner.get_messages()[i].get_partition();
}
REQUIRE(runner.get_messages()[i].get_partition() == expected_partition);
REQUIRE((string)runner.get_messages()[i].get_payload() == payload);
}
}
TEST_CASE("roundrobin consumer test", "[roundrobin consumer]") {
TopicPartitionList assignment;
int messages_per_partition = 3;
@@ -114,19 +82,23 @@ TEST_CASE("roundrobin consumer test", "[roundrobin consumer]") {
PollConsumerRunner runner(consumer, total_messages, KAFKA_NUM_PARTITIONS);
// Produce messages so we stop the consumer
Producer producer(make_producer_config());
BufferedProducer<string> producer(make_producer_config());
string payload = "RoundRobin";
// push 3 messages in each partition
for (int i = 0; i < total_messages; ++i) {
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(i%KAFKA_NUM_PARTITIONS).payload(payload));
producer.sync_produce(MessageBuilder(KAFKA_TOPICS[0])
.partition(i % KAFKA_NUM_PARTITIONS)
.payload(payload));
}
producer.flush();
runner.try_join();
// Check that we have all messages
REQUIRE(runner.get_messages().size() == total_messages);
#if ENABLE_STRICT_RR_ORDER
// Check that we have one message from each partition in desired order
vector<int> partition_order = make_roundrobin_partition_vector(total_messages+KAFKA_NUM_PARTITIONS);
int partition_idx;
@@ -135,12 +107,11 @@ TEST_CASE("roundrobin consumer test", "[roundrobin consumer]") {
// find first polled partition index
partition_idx = runner.get_messages()[i].get_partition();
}
REQUIRE(runner.get_messages()[i].get_partition() == partition_order[i+partition_idx]);
CHECK(runner.get_messages()[i].get_partition() == partition_order[i+partition_idx]);
REQUIRE((string)runner.get_messages()[i].get_payload() == payload);
}
//============ resume original poll strategy =============//
//validate that once the round robin strategy is deleted, normal poll works as before
consumer.delete_polling_strategy();
@@ -149,7 +120,7 @@ TEST_CASE("roundrobin consumer test", "[roundrobin consumer]") {
payload = "SerialPolling";
// push 3 messages in each partition
for (int i = 0; i < total_messages; ++i) {
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(i%KAFKA_NUM_PARTITIONS).payload(payload));
producer.sync_produce(MessageBuilder(KAFKA_TOPICS[0]).partition(i%KAFKA_NUM_PARTITIONS).payload(payload));
}
producer.flush();
serial_runner.try_join();
@@ -160,5 +131,11 @@ TEST_CASE("roundrobin consumer test", "[roundrobin consumer]") {
for (int i = 0; i < total_messages; ++i) {
REQUIRE((string)serial_runner.get_messages()[i].get_payload() == payload);
}
#else
// Simple payload check
for (int i = 0; i < total_messages; ++i) {
REQUIRE((string)runner.get_messages()[i].get_payload() == payload);
}
#endif
}

View File

@@ -15,8 +15,7 @@ using Catch::TestCaseStats;
using Catch::Totals;
using Catch::Session;
std::vector<std::string> KAFKA_TOPICS = {"cppkafka_test1", "cppkafka_test2"};
int KAFKA_NUM_PARTITIONS = 3;
std::vector<std::string> KAFKA_TOPICS = {KAFKA_TOPIC_NAMES};
namespace cppkafka {

94
tests/test_utils.cpp Normal file
View File

@@ -0,0 +1,94 @@
#include <cstdint>
#include <iomanip>
#include <limits>
#include <sstream>
#include <random>
#include "test_utils.h"
using std::chrono::duration_cast;
using std::chrono::milliseconds;
using std::chrono::seconds;
using std::chrono::system_clock;
using std::hex;
using std::move;
using std::numeric_limits;
using std::ostringstream;
using std::random_device;
using std::string;
using std::uniform_int_distribution;
using std::unique_ptr;
using std::vector;
//==================================================================================
// PollStrategyAdapter
//==================================================================================
PollStrategyAdapter::PollStrategyAdapter(Configuration config)
: Consumer(config) {
}
void PollStrategyAdapter::add_polling_strategy(unique_ptr<PollInterface> poll_strategy) {
strategy_ = move(poll_strategy);
}
void PollStrategyAdapter::delete_polling_strategy() {
strategy_.reset();
}
Message PollStrategyAdapter::poll() {
if (strategy_) {
return strategy_->poll();
}
return Consumer::poll();
}
Message PollStrategyAdapter::poll(milliseconds timeout) {
if (strategy_) {
return strategy_->poll(timeout);
}
return Consumer::poll(timeout);
}
vector<Message> PollStrategyAdapter::poll_batch(size_t max_batch_size) {
if (strategy_) {
return strategy_->poll_batch(max_batch_size);
}
return Consumer::poll_batch(max_batch_size);
}
vector<Message> PollStrategyAdapter::poll_batch(size_t max_batch_size, milliseconds timeout) {
if (strategy_) {
return strategy_->poll_batch(max_batch_size, timeout);
}
return Consumer::poll_batch(max_batch_size, timeout);
}
void PollStrategyAdapter::set_timeout(milliseconds timeout) {
if (strategy_) {
strategy_->set_timeout(timeout);
}
else {
Consumer::set_timeout(timeout);
}
}
milliseconds PollStrategyAdapter::get_timeout() {
if (strategy_) {
return strategy_->get_timeout();
}
return Consumer::get_timeout();
}
// Misc
string make_consumer_group_id() {
ostringstream output;
output << hex;
random_device rd;
uniform_int_distribution<uint64_t> distribution(0, numeric_limits<uint64_t>::max());
const auto now = duration_cast<seconds>(system_clock::now().time_since_epoch());
const auto random_number = distribution(rd);
output << now.count() << random_number;
return output.str();
}

View File

@@ -1,6 +1,7 @@
#ifndef CPPKAFKA_TEST_UTILS_H
#define CPPKAFKA_TEST_UTILS_H
#include <string>
#include <thread>
#include <vector>
#include "cppkafka/consumer.h"
@@ -8,7 +9,6 @@
#include "cppkafka/utils/consumer_dispatcher.h"
extern const std::vector<std::string> KAFKA_TOPICS;
extern const int KAFKA_NUM_PARTITIONS;
using namespace cppkafka;
@@ -48,8 +48,8 @@ public:
void delete_polling_strategy();
Message poll();
Message poll(std::chrono::milliseconds timeout);
MessageList poll_batch(size_t max_batch_size);
MessageList poll_batch(size_t max_batch_size,
std::vector<Message> poll_batch(size_t max_batch_size);
std::vector<Message> poll_batch(size_t max_batch_size,
std::chrono::milliseconds timeout);
void set_timeout(std::chrono::milliseconds timeout);
std::chrono::milliseconds get_timeout();
@@ -57,9 +57,14 @@ private:
std::unique_ptr<PollInterface> strategy_;
};
// Misc
std::string make_consumer_group_id();
using PollConsumerRunner = BasicConsumerRunner<PollStrategyAdapter>;
using ConsumerRunner = BasicConsumerRunner<Consumer>;
#include "test_utils_impl.h"
#endif // CPPKAFKA_TEST_UTILS_H

View File

@@ -1,7 +1,6 @@
#include <mutex>
#include <chrono>
#include <condition_variable>
#include "test_utils.h"
#include "cppkafka/utils/consumer_dispatcher.h"
using std::vector;
@@ -19,7 +18,6 @@ using cppkafka::Consumer;
using cppkafka::BasicConsumerDispatcher;
using cppkafka::Message;
using cppkafka::MessageList;
using cppkafka::TopicPartition;
//==================================================================================
@@ -46,7 +44,8 @@ BasicConsumerRunner<ConsumerType>::BasicConsumerRunner(ConsumerType& consumer,
}
},
// EOF callback
[&](typename BasicConsumerDispatcher<ConsumerType>::EndOfFile, const TopicPartition& topic_partition) {
[&](typename BasicConsumerDispatcher<ConsumerType>::EndOfFile,
const TopicPartition& topic_partition) {
if (number_eofs != partitions) {
number_eofs++;
if (number_eofs == partitions) {
@@ -89,7 +88,7 @@ BasicConsumerRunner<ConsumerType>::~BasicConsumerRunner() {
}
template <typename ConsumerType>
const MessageList& BasicConsumerRunner<ConsumerType>::get_messages() const {
const std::vector<Message>& BasicConsumerRunner<ConsumerType>::get_messages() const {
return messages_;
}
@@ -100,73 +99,4 @@ void BasicConsumerRunner<ConsumerType>::try_join() {
}
}
//==================================================================================
// PollStrategyAdapter
//==================================================================================
inline
PollStrategyAdapter::PollStrategyAdapter(Configuration config)
: Consumer(config) {
}
inline
void PollStrategyAdapter::add_polling_strategy(std::unique_ptr<PollInterface> poll_strategy) {
strategy_ = std::move(poll_strategy);
}
inline
void PollStrategyAdapter::delete_polling_strategy() {
strategy_.reset();
}
inline
Message PollStrategyAdapter::poll() {
if (strategy_) {
return strategy_->poll();
}
return Consumer::poll();
}
inline
Message PollStrategyAdapter::poll(milliseconds timeout) {
if (strategy_) {
return strategy_->poll(timeout);
}
return Consumer::poll(timeout);
}
inline
MessageList PollStrategyAdapter::poll_batch(size_t max_batch_size) {
if (strategy_) {
return strategy_->poll_batch(max_batch_size);
}
return Consumer::poll_batch(max_batch_size);
}
inline
MessageList PollStrategyAdapter::poll_batch(size_t max_batch_size,
milliseconds timeout) {
if (strategy_) {
return strategy_->poll_batch(max_batch_size, timeout);
}
return Consumer::poll_batch(max_batch_size, timeout);
}
inline
void PollStrategyAdapter::set_timeout(milliseconds timeout) {
if (strategy_) {
strategy_->set_timeout(timeout);
}
else {
Consumer::set_timeout(timeout);
}
}
inline
milliseconds PollStrategyAdapter::get_timeout() {
if (strategy_) {
return strategy_->get_timeout();
}
return Consumer::get_timeout();
}