310 Commits
v0.1 ... master

Author SHA1 Message Date
Stephane Bourque
f128c1764b Merge branch 'mfontanini:master' into master 2022-08-01 21:58:24 -07:00
stephb9959
03b12561af Merge 2022-08-01 21:55:17 -07:00
stephb9959
14f5261806 Merge 2022-08-01 21:54:29 -07:00
stephb9959
d7e08fa69c Merge 2022-08-01 21:54:17 -07:00
Matias Fontanini
76d175e354 Merge pull request #293 from SpaceIm/fix/cppstd
CMake: set min required C++ standard to C++11
2022-04-08 09:02:52 -07:00
Matias Fontanini
93893e1386 Merge pull request #292 from SpaceIm/fix/export-template
Do not try to export template declaration in shared lib
2022-04-08 08:59:20 -07:00
SpaceIm
fc97759d93 set min C++ standard to C++11
do not hardcode -std=c++11, but let CMake set the minimum required C++ standard of cppkafka if consumer do not force CMAKE_CXX_STANDARD
2022-04-07 19:44:15 +02:00
SpaceIm
025d8ed7e1 do not try to export template declaration
it doesn't make sense to export a template declaration
2022-04-07 19:37:08 +02:00
Matias Fontanini
5a119f689f Bump version to 0.4.0 2021-06-02 16:41:09 -07:00
Matias Fontanini
dd3966fb45 Merge pull request #281 from ClickHouse-Extras/fix_failover
Fix for failover issue.
2021-06-02 16:04:02 -07:00
Mikhail Filimonov
dabb2d3aa8 Fix for failover issue.
When the consumer enters the group and gets no assignment (for ex. there is not enough partitions in the topic),
librdkafka waits for the rebalancing sequence to be finished by calling assign with the empty list of partitions
(just as was passed by librdkafka to rebalance callback).

But cppkafka instead pass nullptr instead of an empty list (which means unassign). And consumer stuck forever in that
state, not being able to pick the partition during the next rebalance (failover), because the previous rebalance sequence
was not finished.

Fixes https://github.com/mfontanini/cppkafka/issues/273 , https://github.com/ClickHouse/ClickHouse/issues/21118 , etc.
2021-06-01 23:36:25 +02:00
Matias Fontanini
57a599d99c Merge pull request #277 from oxidecomputer/master
Adds support for building on Solaris-based systems
2021-05-02 14:30:37 -07:00
Benjamin Naecker
544972e48f Adds support for building on Solaris-based systems 2021-04-28 14:13:56 -07:00
Matias Fontanini
847f530e6e Merge pull request #227 from neatlife/fix-macos-rt
remove rt lib if mac os
2021-04-28 08:26:25 -07:00
stephb9959
2c24b6e6b9 hanging to compile under OSX and Linux 2021-04-19 20:34:32 -07:00
Matias Fontanini
01bd066d57 Merge pull request #267 from jlcordeiro/feature/boost_cleanup
remove unused boost includes
2021-04-16 09:22:36 -07:00
Matias Fontanini
8fd0ef8fc5 Merge pull request #272 from psigen/bugfix_readonly
Add fix for in-source build of cppkafka.h.
2021-04-03 09:14:34 -07:00
Pras Velagapudi
85f1c0fcb1 Add fix for in-source build of cppkafka.h.
This fixes an issue where a cppkafka.h is being generated directly
within the source directory, which causes issues on sandboxed build
environments where the source directory is mounted readonly.

This PR changes the configure_file() directive to point to the
binary directory, and uses the install directives to move the
generated file into the build output.
2021-03-04 03:47:32 -05:00
Joao Cordeiro
62ec1d82c1 remove unused boost includes 2020-10-18 12:30:27 +01:00
Matias Fontanini
5e4b350806 Merge pull request #265 from accelerated/find-package
Fix CMAKE warning
2020-09-05 09:45:20 -07:00
Alexander Damian
e932d9567f Fix CMAKE warning 2020-09-04 18:20:38 -04:00
Matias Fontanini
a2056c36bf Merge pull request #263 from accelerated/sync-produce
Call flush termination callbacks from sync_produce
2020-08-31 19:59:37 -07:00
Alexander Damian
8cfd4595f6 Call flush termination callbacks from sync_produce 2020-08-31 21:56:10 -04:00
Matias Fontanini
f117720f66 Merge pull request #248 from filimonov/patch-1
Try to fix travis
2020-08-31 18:46:44 -07:00
Matias Fontanini
e5dfd5085c Merge pull request #261 from hanickadot/master
Ability to block producer in case the underlying queue is full.
2020-08-15 18:03:15 -07:00
Hana Dusíková
a032f9a1e8 Ability to block producer in case the underlying queue is full. 2020-08-14 19:29:55 +02:00
Matias Fontanini
b06e64ef5b Merge pull request #257 from accelerated/resume-scope
Bring base class  resume into scope
2020-07-19 11:23:53 -07:00
Alexander Damian
a48bf89292 Bring base class into scope 2020-07-19 12:40:52 -04:00
filimonov
31a58d433e Update .travis.yml 2020-05-24 19:54:11 +02:00
filimonov
4467743340 Update .travis.yml 2020-05-24 19:43:30 +02:00
filimonov
e8b4f5a8e9 Update .travis.yml 2020-05-24 19:29:06 +02:00
filimonov
eb1105d839 Update .travis.yml 2020-05-24 17:32:35 +02:00
filimonov
098735413b Update .travis.yml 2020-05-24 15:47:53 +02:00
filimonov
91a3be0a8f Update .travis.yml 2020-05-24 10:52:15 +02:00
Matias Fontanini
e2000b0741 Merge pull request #246 from LesnyRumcajs/patch-2
fixed typo in example (log message)
2020-05-23 09:34:41 -07:00
Matias Fontanini
ca3a1321ec Merge pull request #247 from filimonov/kafka_destroy_flags3
Add support for rd_kafka_destroy_flags.
2020-05-23 09:24:38 -07:00
Mikhail Filimonov
244726c251 Style changes 2020-05-22 17:24:19 +02:00
filimonov
7aa60a1409 Add latest rdkafka version to build matrix 2020-05-22 00:16:12 +02:00
filimonov
487585fd17 Try to fix travis
Old URL returns 404, picked new from official webpage: https://kafka.apache.org/downloads
2020-05-22 00:10:49 +02:00
Mikhail Filimonov
3b67ba072a Add support for rd_kafka_destroy_flags. 2020-05-21 23:38:17 +02:00
LesnyRumcajs
14423bba40 fixed typo log message 2020-05-12 10:49:08 +02:00
Matias Fontanini
006642cdb2 Merge pull request #237 from accelerated/buff_prod_comments
Added clarifications and comments to the BufferedProducer class
2020-04-18 10:56:51 -07:00
Matias Fontanini
679f58dee3 Merge pull request #241 from accelerated/timeout-overloads
Added timeout overloads for consumer and handle classes
2020-04-08 08:28:26 -07:00
Docker RHEL
b2b0d16fee Added timeout overloads for consumer and handle classes 2020-04-08 15:23:05 +00:00
Matias Fontanini
2ce0ae4a62 Merge pull request #238 from accelerated/poll_strategy
Added member functions for static consumers
2020-04-08 07:14:58 -07:00
Alexander Damian
935a34238b Added implementation for thread-aware ack monitoring 2020-03-05 14:56:36 -05:00
Alexander Damian
5a057e4c99 Wait until the ack is received without timing out.
wait_for_acks() should default to infinite timeout since the
original implementation was never timing out.
2020-02-16 21:06:14 -05:00
Alexander Damian
ffcf8956bd Allow to pass-in via cmake all the kafka config options for testing: broker, partitions and topics 2020-02-16 20:11:33 -05:00
Alexander Damian
2287e0994b Express async_flush in terms of flush since the logic is identical except for the timeout 2020-02-16 20:11:33 -05:00
Alexander Damian
92e46aa6cb Proper implementation of flush() with timeout 2020-02-16 20:11:20 -05:00
Alexander Damian
a4532ed336 Use erase directly
Added revoke() member function
2020-02-09 21:25:36 -05:00
Alexander Damian
68ae525eba Added member functions for static consumers 2020-02-08 22:34:45 -05:00
Alexander Damian
e401e97b40 Added clarifications and comments to the BufferedProducer class 2020-02-08 21:24:12 -05:00
Matias Fontanini
7d097df34d Merge pull request #235 from accelerated/promise_bug
Fix tracker promise from throwing when set multiple times
2020-02-04 06:23:39 -08:00
Alexander Damian
fbbd5bc5a6 Changed int to size_t 2020-02-03 22:04:50 -05:00
Alexander Damian
bda2f4156d Fix tracker promise from throwing when set multiple times 2020-02-03 16:46:28 -05:00
Matias Fontanini
f1de729d4e Merge pull request #234 from accelerated/null_topic
Fix crash when message handle is valid but topic is null
2020-01-27 09:09:44 -08:00
Alexander Damian
81ce56a1bd Fix case when message handle is valid but topic is null 2020-01-27 10:30:12 -05:00
suxiaolin
a2a46f0ec8 remove rt lib if mac os 2019-11-03 16:12:57 +08:00
Matias Fontanini
c3b4580fef Merge pull request #210 from ych/pc_config
Remove boost dependency from pkg-config template file
2019-09-05 09:39:52 -07:00
ych
de06b7ad4e Add boost include dir to pkg-config template file 2019-09-05 11:49:01 +08:00
ych
9a0f196d34 Remove boost dependency from pkg-config template file
Boost not provide pkg-config file, so if execute
 'pkg-config --exist cppkafka' command with boost dependency,
 user always gets non-zero return. And PKG_SEARCH_MODULE in cmake
 use the command to check the status of cppkafka.

The boost dependency should be removed for general usage can be works.
2019-09-05 11:49:01 +08:00
Matias Fontanini
e5aec82ddf Merge pull request #219 from accelerated/master
Fix RdKafka_LIBRARY_DIR-NOTFOUND
2019-09-04 08:44:05 -07:00
Alexander Damian
58111bdf62 Removed RdKafka_LIBRARY_DIR as per code review 2019-09-03 14:56:46 -04:00
Alex Damian
fd19648d5a Fix RdKafka_LIBRARY_DIR-NOTFOUND 2019-08-31 23:54:44 -04:00
Matias Fontanini
4a3ec91f87 Merge pull request #215 from accelerated/partition
Added method to set the partition
2019-07-29 09:11:01 -07:00
Alexander Damian
a85a87bb9b Added method to set the partition 2019-07-26 10:35:20 -04:00
Matias Fontanini
a357529cc0 Merge pull request #204 from accelerated/cmake
Add CMake configuration file and export installed targets
2019-07-17 12:55:25 -07:00
Alexander Damian
dd6ec44c27 Updated pkg_config file name 2019-07-17 10:20:43 -04:00
Alexander Damian
20b806037b Added options to conditionally disable installation of configuration files 2019-07-03 18:01:28 -04:00
Alexander Damian
ad800a5765 Added RdKafka hex version so that FindRdKafka.cmake can compile the test code. Changed find_dependency to find_package for the RdKafka config so that the script is not automatically exited on failure 2019-07-02 15:30:33 -04:00
Alexander Damian
4bddb2241c Added INSTALL_RPATH and INSTALL_RPATH_USE_LINK_PATH to CppKafka target to allow discoverability of the RdKafka.so w/o having to update LD_LIBRARY_PATH 2019-07-02 14:52:21 -04:00
Alexander Damian
097184c648 Added COMPONENT tags to the install targets. Also when installing TARGETS, the COMPONENT cannot appear after INCLUDES DESTINATION as it will be considered part of the destination. 2019-07-01 17:58:38 -04:00
Alexander Damian
bbc78f8dbb Fixed conflicts 2019-07-01 17:24:26 -04:00
Matias Fontanini
18d0b0c00b TEMP: Always put lib under "lib" when installing
This is a temporary fix until this is properly fixed.
2019-07-01 13:46:57 -07:00
Alexander Damian
591e8abe4f Changed include_directories to taget_include_directories and removed Boost path since it's automatically pulled in via Boost::headers 2019-07-01 15:36:10 -04:00
Alexander Damian
ee30fabc2a Added INTERFACE_LINK_DIRECTORIES to the RdKafka::rdkafka properties 2019-07-01 14:57:19 -04:00
Alexander Damian
0d2356f7dd Changed link libraries to PUBLIC so they get exported in the CppKafkaTargets.cmake 2019-07-01 11:52:38 -04:00
Alexander Damian
11a6e4213b Remove comment from config file relating to not finding the RdKafka config file. 2019-06-30 19:39:31 -04:00
Alexander Damian
c4b6a95438 Fixes per code review 2019-06-30 19:30:13 -04:00
Alexander Damian
40e8559158 Remove warning for cmake policy CMP0074 2019-06-29 12:38:13 -04:00
Alexander Damian
d20cab69f3 Add CMake configuration file and export installed targets 2019-06-29 01:50:29 -04:00
Matias Fontanini
c733e0b8d8 Merge pull request #199 from accelerated/pc_config
Removed dependency from rdkafka since it has its own pkg_config file.…
2019-05-21 08:43:49 -07:00
Alexander Damian
07b3c4957d Changed method to determine bitness 2019-05-21 08:52:03 -04:00
Alexander Damian
107cff7ed7 Removed dependency from rdkafka since it has its own pkg_config file. Also added BITNESS detection 2019-05-20 15:27:07 -04:00
Matias Fontanini
1a981f2674 Merge pull request #196 from mfontanini/travis-fix
Fix kafka URL in travis build
2019-05-19 10:21:47 -07:00
Matias Fontanini
8eb7751ff3 Fix kafka URL in travis build 2019-05-19 09:38:25 -07:00
Matias Fontanini
4b25f928a1 Merge pull request #194 from accelerated/master
Fixed pkg_config file
2019-05-17 11:25:12 -07:00
Alexander Damian
6adf1e82c9 Fixed pkg_config file template by adding boost dependency and correcting rdkafka library name 2019-05-17 14:19:28 -04:00
Matias Fontanini
bb0beb6db6 Define constructor for new exception
Fixes #193
2019-05-17 09:14:35 -07:00
Matias Fontanini
bbc3af67d9 Merge pull request #189 from accelerated/fix_versioning
Fixed version macros
2019-05-16 09:21:23 -07:00
Matias Fontanini
a0530d79a9 Merge pull request #190 from accelerated/pc_config
Support for generating pkg-config file
2019-05-16 09:19:03 -07:00
Alexander Damian
d148fe18d5 Added config option for PKGCONFIG install location 2019-05-15 13:44:07 -04:00
Alexander Damian
6499ef9869 Merge branch 'master' of https://github.com/mfontanini/cppkafka into pc_config 2019-05-10 16:10:37 -04:00
Alexander Damian
24e94fbfbc Added boost include dirs to pkg config file 2019-05-08 10:28:49 -04:00
Matias Fontanini
b91350d6a4 Merge pull request #187 from accelerated/master
Added commit termination callback functionality in BackoffComitter
2019-05-02 08:36:17 -07:00
Alexander Damian
bd43d3c767 Support for generating pkg-config file 2019-04-26 16:19:54 -04:00
Alexander Damian
40d0221052 Fixed version macros 2019-04-26 14:36:03 -04:00
Alexander Damian
6e076810a0 Added ActionTerminatedException to BackoffPerformer 2019-04-26 11:12:52 -04:00
Alexander Damian
81a131ff16 Return true when RD_KAFKA_RESP_ERR__NO_OFFSET is received 2019-04-24 17:39:59 -04:00
Alexander Damian
effdf7fb95 Removed ReturnType. Throw on error from inside do_commit() as well as from perform() 2019-04-24 16:42:56 -04:00
Alexander Damian
d84b75ca9d Merge branch 'master' of https://github.com/mfontanini/cppkafka 2019-04-24 10:52:53 -04:00
Alexander Damian
0c1119727b Replaced termination callback with throwing exception 2019-04-24 10:40:29 -04:00
proller
e8c4397b66 Fix build on some libcxx implementations (#175) 2019-04-24 09:16:14 -04:00
accelerated
470a5b6857 Set CMAKE_CXX_FLAGS only when not set 2019-04-24 09:16:14 -04:00
proller
df4eaa0735 Fix build on some libcxx implementations (#175) 2019-03-07 10:13:15 -08:00
Matias Fontanini
de85a329cb Merge pull request #174 from accelerated/cxxflags
Set CMAKE_CXX_FLAGS only when not set
2019-03-04 20:19:57 -08:00
Matias Fontanini
a17a6f3b55 Merge pull request #172 from snar/freebsd-fix
Fix FreeBSD builds
2019-03-04 20:19:09 -08:00
accelerated
a935d1cb2e Set CMAKE_CXX_FLAGS only when not set 2019-03-02 18:51:25 -05:00
Alexandre Snarskii
ca729ef6f0 Fix FreeBSD builds 2019-03-01 13:47:40 +03:00
Matias Fontanini
c9c46d7a1f Merge pull request #171 from accelerated/async_flush
Added wait_for_acks(0) when calling async_flush
2019-02-23 11:27:13 -08:00
accelerated
ace18d5d7b Added wait_for_acks(0) when calling async_flush 2019-02-21 15:21:16 -05:00
Pavel Pimenov
5bfc047263 Fix detected by PVS-Studio (#167)
* Fix PVS-Studio
V591 	Non-void function should return a value.
V519 	The 'new_item' variable is assigned values twice successively.
2019-02-20 18:30:48 -08:00
Matias Fontanini
4a887607b3 Merge pull request #164 from accelerated/offset_store
Added consumer legacy offset store API
2019-02-07 09:00:17 -08:00
accelerated
9bf535ac49 Simplify round-robin test due to intermittent errors 2019-02-06 22:45:12 -05:00
accelerated
8ae5e9d573 Fixed buffer test array initialization warning for clang 2019-02-06 18:47:36 -05:00
accelerated
e19d84b839 Added compile time check for store_offsets() api 2019-02-06 17:47:02 -05:00
accelerated
4f4c9e9c91 Changes per code review 2019-02-06 13:01:57 -05:00
accelerated
284e1c57a9 Changed store_offsets() to use the actual position from the assignment 2019-02-04 14:23:59 -05:00
accelerated
7bc03185a8 Added legacy offset store API 2019-02-04 12:26:04 -05:00
Matias Fontanini
872ee0442b Merge pull request #163 from accelerated/status_version_fix
Fix message status version
2019-01-31 09:03:40 -08:00
accelerated
63327461bd Fix message status version 2019-01-31 09:29:48 -05:00
Matias Fontanini
efa4e95a18 Merge pull request #157 from demin80/buffer-size-comparison-fix2
added type conversion to avoid signed-vs-unsigned-comparison warning
2019-01-19 08:12:55 -08:00
demin80
755e9f10c2 added missing return 2019-01-18 16:58:03 -05:00
Matias Fontanini
fb4c5edc8e Merge pull request #159 from accelerated/check_error_fix
Fix error check in partition list
2019-01-16 12:27:57 -08:00
accelerated
dc732445f7 Fixes #158 2019-01-16 15:09:27 -05:00
demin80
5a34955fae added type conversion to avoid signed-vs-unsigned-comparison warning 2019-01-16 10:52:14 -05:00
Matias Fontanini
05cc8304df Merge pull request #153 from demin80/hi-priority-queue-fix
Added a high-priority queue to BufferedProducer to avoid message re-ordering
2019-01-14 20:08:47 -08:00
Matias Fontanini
2c6a47d68d Merge pull request #156 from psigen/patch-1
Use CMAKE_STATIC_LIBRARY_* macros for FindRdKafka
2019-01-14 09:18:50 -08:00
Pras Velagapudi
85b7e579e2 Use CMAKE_STATIC_LIBRARY_* macros for FindRdKafka
In the current implementation, library suffixes are hard coded from a hand-maintained list.  Instead of writing this list, we can use the CMake macros for platform specific library prefix/suffixes.

E.g. https://cmake.org/cmake/help/v3.0/variable/CMAKE_STATIC_LIBRARY_SUFFIX.html

This also resolves library resolution on Mac OSX, which does not currently work on the native `.dylib`  suffix for shared libraries.
2019-01-12 15:51:11 -05:00
demin80
93c2edf6ba refactored by adding retry_mutex_ and replacing bools with enums; fixed formatting issues 2019-01-10 14:37:46 -05:00
demin80
71c4e02143 Revised the implementation based on the reviewers' response 2019-01-08 13:48:26 -05:00
demin80
00370c981d Fixed spacing issues 2019-01-07 14:42:32 -05:00
demin80
97229ebfd9 Added a high-priority queue to BufferedProducer to avoid message re-ordering 2019-01-07 14:39:09 -05:00
Matias Fontanini
4ba6b38b6e Merge pull request #149 from accelerated/queue_full
Added queue full notification
2019-01-06 16:52:12 -08:00
accelerated
4a6b6779ad Updated callback description 2019-01-06 17:40:39 -05:00
accelerated
97d1bb9434 Added queue full notify callback 2019-01-06 17:35:55 -05:00
accelerated
ed81ce446d Added queue full notification 2019-01-06 16:59:00 -05:00
Matias Fontanini
520465510e Revert "Add support for Buffer construction via raw arrays"
This reverts commit 74acf65fa6.
2018-12-17 09:18:28 -08:00
Matias Fontanini
40ee64c5c1 Merge pull request #140 from tgaldes/master
Add support for Buffer construction via raw arrays
2018-12-16 11:42:29 -08:00
Matias Fontanini
3ffb0f1fa8 Merge pull request #151 from mfontanini/travis-fix
Fix travis build
2018-12-16 10:15:33 -08:00
Matias Fontanini
7c5616da07 Use sudo: required in travis file 2018-12-16 10:02:06 -08:00
Matias Fontanini
f14a4b9e8c Merge pull request #150 from accelerated/flush_bug
Bug with message leak in BufferedProducer::flush(timeout)
2018-12-16 09:45:35 -08:00
Matias Fontanini
ccc6738265 Merge pull request #148 from accelerated/flush_failure
Added flush/produce termination callbacks
2018-12-16 09:32:36 -08:00
accelerated
8b431c5421 changed rbegin to begin 2018-12-14 16:33:20 -05:00
accelerated
4a24971d3f Fixed bug with message leak in BufferedProducer::flush(timeout) 2018-12-14 16:08:57 -05:00
accelerated
8dd5428c49 Added similar logic for ProduceTerminationCallback 2018-12-13 15:04:12 -05:00
accelerated
0b9b7bab11 Added flush termination callback 2018-12-13 10:43:29 -05:00
Matias Fontanini
ab002fe119 Merge pull request #147 from accelerated/raw_arrays
Support for raw array Buffer constructor
2018-12-10 18:15:37 -08:00
accelerated
06ddd79a29 Support for raw array Buffer constructor 2018-12-10 15:42:04 -05:00
Matias Fontanini
d89840b5f0 Merge pull request #144 from accelerated/header_fix
Header fixes and header copy considerations
2018-12-09 21:47:35 -08:00
accelerated
25c2eaa998 Changed iterator logic to capture header list by reference 2018-12-06 10:37:02 -05:00
accelerated
1c80af9b68 Added constructor from another HeaderList type 2018-12-05 20:04:10 -05:00
accelerated
fe0c7e7dd5 Fixed end() iterator and also applied default copy-constructor instead of passing null handle in BasicMessageBuilder 2018-12-05 12:15:25 -05:00
accelerated
93e066a1c1 * Added asserts when building a HeaderList and removed checks for handle
validity.
* Removed explicit move semantic when cloning a MessageBuilder.
* Renamed clone() to try_clone() in ClonablePtr class.
2018-12-04 11:12:28 -05:00
accelerated
6bbddcd5d5 Fixed Message::set_header_list as per review comments. Changed ClonablePtr to use clone() internally 2018-12-03 09:48:32 -05:00
accelerated
e96dc6d1fc Added comments 2018-12-02 15:00:07 -05:00
accelerated
0b7931bfb8 Added Buffer::Buffer(iter, iter) constructor overload 2018-12-02 14:42:02 -05:00
accelerated
57bddabfd0 Removed clone_handle method and made ClonablePtr::clone private 2018-12-02 14:15:20 -05:00
accelerated
c7ba478582 Header fixes 2018-11-30 09:55:26 -05:00
Matias Fontanini
a9a0693e2a Merge pull request #143 from mfontanini/fix-travis
Fix kafka download URL in travis file
2018-11-26 20:03:07 -08:00
Matias Fontanini
5aa4bc08a3 Fix kafka download URL in travis file 2018-11-26 19:47:58 -08:00
Matias Fontanini
5a4481dc28 Merge pull request #142 from farnazj/master
Add <array> header
2018-11-26 19:06:45 -08:00
Farnaz Jahanbakhsh
d06cd222fe include <array> 2018-11-26 00:26:04 +00:00
Tyler Galdes
74acf65fa6 Add support for Buffer construction via raw arrays 2018-11-19 19:59:08 -05:00
Matias Fontanini
4ad2685d61 Merge pull request #138 from tgaldes/master
Add support for constructing Buffer from std::array
2018-11-19 16:58:34 -08:00
Tyler Galdes
248d1b0638 Delete construction of buffer with rvalue arrays 2018-11-19 19:48:02 -05:00
Tyler Galdes
b48036fe62 use std::array functions for pointer and size of data 2018-11-19 11:49:17 -05:00
Tyler Galdes
757d2b623f Add support for constructing Buffer from std::array 2018-11-16 19:49:52 -05:00
Matias Fontanini
4b7a10ec90 Merge pull request #136 from accelerated/master
Remove setting log level in the constructor
2018-11-13 14:50:15 -08:00
accelerated
b366cf4bf6 Remove setting log level in the constructor 2018-11-13 15:28:06 -05:00
Matias Fontanini
7b4c3e163f Merge pull request #135 from accelerated/events
Added API description for Message::get_status
2018-11-12 10:26:30 -08:00
accelerated
70aef6681d Added API description for Message::get_status 2018-11-12 12:14:20 -05:00
Matias Fontanini
29cb02b756 Merge pull request #134 from accelerated/events
Added support for message status and setting the event mask
2018-11-12 09:05:27 -08:00
accelerated
9859e54522 Added support for message status and setting the event mask 2018-11-12 10:30:54 -05:00
Matias Fontanini
9f6556da0c Merge pull request #125 from mfontanini/events
Event implementation
2018-11-10 10:43:05 -08:00
Matias Fontanini
46481d879f Use BufferedProducer in round robin consumer test
Hopefully this will get rid of the sporadic failures
2018-11-10 10:26:03 -08:00
Matias Fontanini
25e3aacf4a Add compile guard for rd_kafka_event_stats 2018-11-10 10:26:03 -08:00
Matias Fontanini
1f1f1c253b Fix build issue when using rd_kafka_message_latency in old rdkafka 2018-11-10 10:26:03 -08:00
Matias Fontanini
24960c0a49 Build library on travis using rdkafka 0.9.4 as well 2018-11-10 10:26:03 -08:00
Matias Fontanini
4ac837d831 Disable even consumption test 2018-11-10 10:26:03 -08:00
Matias Fontanini
b242e2c35c Allow setting background event callback on configuration handles 2018-11-10 10:26:03 -08:00
Matias Fontanini
19baa03cea Allow getting background queue out of kafka handle base 2018-11-10 10:26:03 -08:00
Matias Fontanini
8dc94869fd Move get_queue behavior into Queue class 2018-11-10 10:25:31 -08:00
Matias Fontanini
71fb76b8e1 Add dumb test that extracts event from queue 2018-11-10 10:25:31 -08:00
Matias Fontanini
c7e1dcb60a Allow checking if an Event is valid 2018-11-10 10:25:31 -08:00
Matias Fontanini
e73c997a0c Allow getting Events out of Queues 2018-11-10 10:25:31 -08:00
Matias Fontanini
b46991db7e Add Event class 2018-11-10 10:25:31 -08:00
Alex Damian
b0ddceda1f Message timestamp refactoring and log level changes (#133)
* Message timestamp refactoring and log level changes

* Changes per code review
2018-11-07 08:36:57 -08:00
Matias Fontanini
451d60295a Remove MessageTimestamp constructor from time_point (#129) 2018-10-26 18:57:28 -07:00
Alex Damian
57268e666c Added time_point overloads for creating timestamps. (#128)
* Added time_point overloads for creating timestamps.

* aliased std::chrono types
2018-10-25 07:39:22 -07:00
Alex Damian
ad9a1e4a49 If timeout is 0, the function should at least run once (#123) 2018-10-22 07:55:29 -07:00
Matias Fontanini
416a7d43ce Minor documentation fixes 2018-10-21 10:17:10 -07:00
Matias Fontanini
a2d17a6f45 Test suite fixes (#124)
* Move polling strategy adapter definition into test_utils.cpp

* Use a random consumer group id in every test
2018-10-20 20:32:32 -07:00
Matias Fontanini
0d54acbc64 Flush producer in example 2018-10-19 08:42:10 -07:00
Matias Fontanini
b2ba4cbfa3 Add comment regarding flushing producer 2018-10-19 08:41:36 -07:00
multiprogramm
2b66fd3a22 Fix windows linker errors (#120) 2018-10-17 10:43:33 -07:00
Alex Damian
fbe3759fed Header support implementation (#115)
* header support implementation

* Fixed issue when ptr is null and doesn't have a cloner function

* Code complete with test cases

updated travis file with v0.11.5

* Added compile time check for rdkafka header support version

* Changes per last code review

* Using brace list initializers
2018-10-16 10:58:05 -07:00
Alex Damian
9af4330c6d Allocators (#118)
* Added allocator support for consumers and buffered producer

* Changed MessageList back to std::vector<Message> for consistency with the allocator API
2018-10-16 08:57:11 -07:00
Alex Damian
d77e7466b8 changed assert with if statement (#116) 2018-10-06 09:28:45 -07:00
Matias Fontanini
f458514fb2 Bump version to 0.2 2018-09-20 18:23:32 -07:00
Zenon Parker
d3ef9cad32 Fix #111: Rename LogLevel enums (#112) 2018-08-30 11:28:12 -07:00
shashank khare
df04b27e22 Fix #104: memory leak in poll_batch (#107)
poll_batch currently leaks memory while initialising the queue
returned by rd_kafka_queue_get_consumer. The fix as suggested
by @mfontanini as done here is to initialise the queue with a
Queue so it's cleaned up when going out of scope.
2018-07-26 08:56:42 -07:00
Alex Damian
d6f8129207 Fix for ref count on queue handles (#92)
* Fix for ref count on queue handles

* added check for rdkafka version

* changed to runtime version checking
2018-07-23 17:07:35 -07:00
Sergey Batanov
3238c94f43 Fix #83: define LIBRDKAFKA_STATICLIB (#99)
* Fix #83: define LIBRDKAFKA_STATICLIB

* Moved LIBRDKAFKA_STATICLIB definition
2018-07-04 08:26:25 -07:00
Sergey Batanov
081f8d80a0 Fix #97: struct switched to class. (#98)
To avoid name mangling difference in MSVC.
2018-07-02 16:57:52 -07:00
Alex Damian
577bbb0242 added error check for partition list (#90) 2018-06-26 08:57:49 -07:00
Matias Fontanini
6158d932c0 Example fixes (#96)
* Add example for kafka buffered producer

* Add notes regarding bool returned in produce failure callback

* Fix example names
2018-06-25 19:19:23 -07:00
Alex Damian
5c72f3fe28 Added pause/resume for producers (#87)
* Added pause/resume for producers

* Moved pause/resume functions to KafkaHandleBase
2018-06-25 09:16:57 -07:00
Alex Damian
069ea3df8e Specific linking option for rdkafka library (#94) 2018-06-25 07:03:11 -07:00
Alex Damian
c5aca985b8 Invoke error callback if present instead of log callback (#93) 2018-06-20 09:11:24 -07:00
Alex Damian
eb46b8808e Bug fixes for sync flush and add_tracker (#91)
* fixes for sync flush and also add_tracker

* added flag for flush
2018-06-18 14:46:31 -07:00
Alex Damian
b8f4be5e1b Increase buffer construction requirements (#88)
* Fix crash in Buffer with null pointer and non-zero length

* Throw inside Buffer constructor instead
2018-06-18 09:09:48 -07:00
Matias Fontanini
9a20b588c5 Merge pull request #86 from accelerated/flush_with_timeout
Added timeout to flush and wait_for_acks
2018-06-14 10:14:44 -07:00
accelerated
3c72eb5752 Added timeout to flush and wait_for_acks 2018-06-14 11:43:12 -04:00
Matias Fontanini
157b7ec997 Merge pull request #79 from accelerated/purge
Added purge (aka async_flush) functionality
2018-06-12 09:30:58 -07:00
accelerated
f220062e40 Changed purge to async_flush 2018-06-12 10:23:48 -04:00
accelerated
7530b9f9e4 added method to empty the buffer when max limit is reached 2018-06-12 09:21:55 -04:00
accelerated
3cf9bb53e9 Added purge (aka async_flush) functionality 2018-06-12 09:17:41 -04:00
Matias Fontanini
0c7a3b0c25 Merge pull request #78 from accelerated/producer_retry
Producer retry
2018-06-11 20:45:37 -07:00
accelerated
972a008aa4 Changed test to produce twice the same MessageBuilder 2018-06-11 15:45:41 -04:00
accelerated
a4eefacaa1 concurrency issues in MessageBuilder internal data 2018-06-10 18:48:51 -04:00
accelerated
23810654ab Removed dependency on Producer and dr_callback_proxy 2018-06-10 13:00:56 -04:00
accelerated
f746653841 Added logic to conditionally enable internal data 2018-06-10 13:00:56 -04:00
accelerated
597c026555 Prevent MessageInternal structures if there is no delivery callback registered 2018-06-10 13:00:56 -04:00
accelerated
71e6e2e4e5 added retry logic for producers 2018-06-10 13:00:55 -04:00
Matias Fontanini
f15b59cb13 Fix compacted topic processor test
* Use buffered producer on compacted topic processor test

* Add include directives for callback invoker where missing

* Consume until EOF on compacted topic processor test
2018-06-09 14:49:50 -07:00
sachnk
5dcede6411 #81: add detail headers to installation (#82) 2018-06-09 10:32:10 -07:00
Alex Damian
5cad740aea Added access to the internal Consumer in the backoff committer (#75)
* Added access to the internal Consumer and provided non-default constructor for BackoffPerformer

* added sync_produce and deleted the value constructor

* removed sync_produce methods

* removed value constructor in backoff_performer class
2018-06-04 11:48:20 -07:00
Alex Damian
9714bec5bf Callback invoker to sink all thrown exceptions (#74) 2018-06-01 16:35:56 -07:00
Matias Fontanini
15fdab6943 Merge pull request #63 from accelerated/partition_poll
round robin polling for assigned partitions
2018-05-30 11:15:02 -07:00
accelerated
ea9601ba1b Changes per code review 2018-05-29 10:05:56 -04:00
accelerated
ffc64b9a5a Remove valgrind warning 2018-05-29 10:05:56 -04:00
accelerated
556f15a43f Allow access to the user-supplied delivery callback. 2018-05-29 09:59:19 -04:00
accelerated
6144330835 added test case for polling strategy refactored the strategy class 2018-05-29 09:55:30 -04:00
accelerated
169ea4f8ed Fixes to queue polling and making them non-owning 2018-05-29 09:55:30 -04:00
accelerated
65f35dcd39 fix cppkafka::MessageList declaration 2018-05-29 09:55:30 -04:00
accelerated
532d83b225 Removed reserve() and synced consumer::poll_batch and queue::poll_batch functions 2018-05-29 09:55:30 -04:00
accelerated
a1ce130bfd changes as per code review 2018-05-29 09:55:29 -04:00
accelerated
71afaba3e1 added support for different topics/partitions 2018-05-29 09:55:29 -04:00
accelerated
15be627f8e intial polling version 2018-05-29 09:55:29 -04:00
Alex Damian
429ec92369 Buffered producer thread safe (#72)
* Thread safe buffered producer

* Using single mutex version

* Changed based on feedback

* Changes based on latest review

* Added flush counter
2018-05-28 18:33:36 -07:00
Alex Damian
f543810515 Allow metadata object to be non-owning (#73) 2018-05-28 11:30:01 -07:00
Alex Damian
841e632fbd Allow access to the user-supplied delivery callback. (#66)
* Allow access to the user-supplied delivery callback.

* Remove valgrind warning

* Added buffer size watermark

* added ability to produce a message directly

* Updated on_delivery_report function
2018-05-24 20:59:41 -07:00
Alex Damian
46c396f729 Pause/resume a consumer by topic (#67)
* Pause a consumer by topic

* Changes per review comments

* convert rvalue to value

* Refactored code to provide a more generic way of getting partition subsets

* Changes per code review and added test cases

* Modified loop to use binary search instead of linear

* Simplify find_matches test cases
2018-05-23 13:03:47 -07:00
amirshavit
ee71b3979a Add yield (#64)
Simply calls rd_kafka_yield; allows consumers and producers to abort the
current callback dispatcher.
2018-05-23 09:26:09 -07:00
Alex Damian
d9feb5c3db Added retry limit for the backoff performer class (#70) 2018-05-23 09:16:12 -07:00
Azat Khuzhin
2451c74c4f Avoid SIGSEGV during destructing KafkaHandleBase (with set_log_callback()) (#68)
Configuration should exist for logging, since it contains the copy of
the callback.
2018-05-19 13:24:57 -07:00
Alex Damian
ae74814791 Log error in case consumer destructor throws (#61) 2018-04-27 07:03:47 -07:00
Matias Fontanini
ee0c0829a4 Remove extra line at the end of cppkafka.h 2018-04-23 19:23:09 -07:00
Matias Fontanini
59d8adc4a4 Sort header files before generating cppkafka.h 2018-04-23 19:22:41 -07:00
Matias Fontanini
cb2c8877d8 Move tests to use catch instead of googletest (#56)
* Port buffer test to use Catch2

* Move compacted topic processor test to Catch2

* Move configuration tests to Catch2

* Rename configuration test cases

* Move topic partition list test to Catch2

* Move handle base tests to Catch2

* Move producer tests to Catch2

* Move consumer tests to catch2

* Use CHECK on tests when appropriate

* Remove googletest

* Show tests' progress as they run

* Update message when Catch2 is not checked out

* Remove references to googletest

* Run cppkafka_tests manually on travis

* Print amount of time taken by each test case
2018-04-24 03:20:48 +01:00
Alex Damian
30b3652a94 auto-gen main header file (#59) 2018-04-24 02:32:43 +01:00
Alex Damian
8fc6a0f02d Print offset when dumping partition object (#55) 2018-04-24 02:32:14 +01:00
Alex Damian
83a963c1db Added commit for current assignment and message state checks (#53) 2018-04-20 23:51:44 +01:00
Andrei Ovsiankin
c95d790547 Added cmake option for disabling tests (#51)
* Added cmake option for disabling tests

* Renamed cmake option for tests

* comment fixed

* Misprint
2018-04-20 05:00:34 -07:00
Matias Fontanini
eee60407fa Use CPPKAFKA_API for friends and free functions 2018-04-11 10:47:53 -07:00
Matias Fontanini
05d5a0404b Merge pull request #47 from accelerated/master
Fixed deprecated error function call
2018-04-03 19:04:39 -07:00
accelerated
3d1402f53a Fixed font 2018-04-03 12:04:26 -04:00
accelerated
6db2cdcecf Fixed font 2018-04-03 12:02:21 -04:00
accelerated
018a1f52d9 Fixed deprecated error librdkafka function 2018-04-03 11:58:21 -04:00
Matias Fontanini
df12b5fd5c Fix linking issues on Windows
Relates to #10
2018-03-20 08:12:04 -07:00
Matias Fontanini
9513b01b8e Don't use quotes when expanding DEPENDENCIES 2018-03-19 07:04:59 -07:00
Matias Fontanini
86ed154c92 Link with ws2_32.lib on Windows 2018-03-18 18:57:09 -07:00
Matias Fontanini
69e30f9e74 Merge pull request #39 from arvidn/destruction
destruct callbacks (and their closures)
2018-03-17 15:42:21 -07:00
Matias Fontanini
675954ef75 Merge pull request #40 from arvidn/poll-batch-performance
improve performance of Consumer::poll_batch()
2018-03-17 15:41:21 -07:00
arvidn
98b9839ff9 improve performance of Consumer::poll_batch() by allocating the results array once, rather than incremental growth. Also avoid checking every single pointer in the returned array, just use the returned counter instead 2018-02-08 12:22:11 +01:00
arvidn
d173526f99 destruct callbacks (and their closures) to ensure there are no reference cycles when destructing the consumer. This solves a hang in rd_kafka_destroy 2018-02-08 11:51:36 +01:00
Matias Fontanini
deff8b1ff3 Merge pull request #27 from alkenet/patch-1
Update README.md
2017-11-28 20:28:00 -08:00
atravers
cc7d183ff1 Update README.md
Should there not be a flush at the end? In my case, the application quit without the message ending up on Kafka.
2017-11-29 13:03:02 +09:00
Matias Fontanini
1817115784 Merge pull request #22 from mfontanini/mfontanini-patch-1
Use latest kafka version in travis
2017-11-21 09:19:21 -08:00
Matias Fontanini
9c09243633 Use latest kafka version in travis 2017-11-21 06:51:53 -08:00
Matias Fontanini
5b63c642f9 Merge pull request #21 from spektom/patch-1
Check for existing custom target: uninstall
2017-11-21 06:47:23 -08:00
Michael Spector
c874ccc43f Check for existing custom target: uninstall
When including `cppkafka` in a CMake project containing other project defining the same target (`curl`, specifically, which defines the same target [here](3ea7679057/CMakeLists.txt (L1293))) I get the foollowing error:

```
add_custom_target cannot create target "uninstall" because another target  with the same name already exists
```

This if condition fixes the issue.
2017-11-21 10:14:29 +02:00
Matias Fontanini
af368bba04 Merge pull request #16 from arvidn/include-dir
make sure dependent targets get the include directory added correctly
2017-11-11 14:02:32 -08:00
arvidn
c7715733bf make sure dependent targets get the include directory added correctly 2017-10-25 20:50:59 +02:00
Matias Fontanini
80e0ed5007 Merge pull request #18 from arvidn/consumer-typo
fix typo in consumer documentation
2017-10-25 11:28:56 -07:00
arvidn
a1dc9d115e fix typo in consumer documentation 2017-10-25 20:26:35 +02:00
Matias Fontanini
edb2737263 Merge pull request #17 from arvidn/typo
fix typo in README
2017-10-24 14:08:05 -07:00
arvidn
5bd61e8915 fix typo in README 2017-10-24 22:26:54 +02:00
Matias Fontanini
0e96f87eeb Wait for kafka to come up in travis build 2017-10-14 10:40:36 -07:00
Matias Fontanini
702279d0e9 Add travis CI badge to README.md 2017-10-14 10:28:42 -07:00
Matias Fontanini
94dac08d79 Merge pull request #14 from mfontanini/travis
Add travis CI build file
2017-10-14 10:28:25 -07:00
Matias Fontanini
29fa7bed19 Add travis CI build file 2017-10-14 08:41:52 -07:00
Matias Fontanini
179e669c06 Assume testing kafka cluster is >= 0.10 2017-10-14 08:41:33 -07:00
Matias Fontanini
853396acab Fix Consumer::poll_batch test 2017-10-14 08:41:33 -07:00
Matias Fontanini
5889c322c2 Fix link issues when building in clang 2017-10-02 10:31:02 -07:00
Matias Fontanini
17da880854 Persist message's user data when retrying on buffered producer 2017-09-17 15:46:23 -07:00
Matias Fontanini
4c9aa6fcd4 Catch exceptions on Consumer destructor
Fixes #6
2017-09-14 19:26:38 -07:00
Matias Fontanini
37cb16c3f5 Check for null pointer on Consumer::poll_batch
Fixes #8
2017-09-06 23:44:34 -07:00
Matias Fontanini
04d5b41c6b Add a project description to README.md 2017-08-28 21:29:35 -07:00
Matias Fontanini
c3011c9eed Remove redundant line in README.md 2017-08-28 21:24:20 -07:00
Matias Fontanini
d0c794b978 Increase expected acks on buffered producer after producing 2017-08-24 15:04:54 -07:00
Matias Fontanini
86d4bc8037 Make BasicConsumerDispatcher::Pauser a template type 2017-08-21 14:47:06 -07:00
Matias Fontanini
0d4b9ef2f6 Don't keep looping on NO_OFFSET error on backoff committer 2017-08-03 09:33:09 -07:00
Matias Fontanini
1582f6156d Allow consuming message batches
Fixes #3
2017-07-17 19:17:57 -07:00
Matias Fontanini
2340046544 Clear messages acked/expected acks on buffered producer 2017-07-11 10:17:58 -07:00
Matias Fontanini
b7a0dce710 Add a generic "event" event on ConsumerDispatcher 2017-07-04 18:23:42 -07:00
Matias Fontanini
08815e97c0 Add CPPKAFKA_API to utils classes 2017-07-04 18:09:06 -07:00
Matias Fontanini
9e6315fcc2 Make ConsumerDispatcher a template class 2017-07-04 18:07:22 -07:00
Matias Fontanini
191956b4ca Pause/resume consumption on throttle 2017-06-17 09:39:50 -07:00
Matias Fontanini
4af48ff0e7 Allow throttling on ConsumerDispatcher 2017-06-17 08:52:48 -07:00
Matias Fontanini
556dac7015 Add backoff performer class 2017-06-16 19:27:18 -07:00
Matias Fontanini
4cd03aea3c Cleanup ConsumerDispatcher code 2017-06-11 14:35:33 -07:00
Matias Fontanini
2e6bfd64d3 Fix comment typo 2017-06-10 19:37:48 -07:00
Matias Fontanini
ed71ab2daa Add example using ConsumerDispatcher 2017-06-10 19:26:42 -07:00
Matias Fontanini
52822fdb61 Move some small functions into header files 2017-06-10 19:15:53 -07:00
Matias Fontanini
bb5fb490ce Check if all callbacks match a signature on ConsumerDispatcher 2017-06-10 17:55:17 -07:00
Matias Fontanini
4369b75695 Use tags on EOF and timeout callbacks on ConsumerDispatcher 2017-06-10 17:39:23 -07:00
Matias Fontanini
f0ec0bfb10 Add ConsumerDispatcher class 2017-06-10 15:25:28 -07:00
90 changed files with 8416 additions and 1283 deletions

6
.gitmodules vendored
View File

@@ -1,3 +1,3 @@
[submodule "third_party/googletest"]
path = third_party/googletest
url = https://github.com/google/googletest.git
[submodule "third_party/Catch2"]
path = third_party/Catch2
url = https://github.com/catchorg/Catch2.git

43
.travis.yml Normal file
View File

@@ -0,0 +1,43 @@
language: cpp
sudo: required
compiler:
- gcc
- clang
env:
- RDKAFKA_VERSION=v0.9.4
- RDKAFKA_VERSION=v0.11.6
os:
- linux
addons:
apt:
packages:
- libboost-dev
- libboost-program-options-dev
- zookeeper
- zookeeperd
before_script:
- KAFKA_VERSION=2.11-2.2.0
- wget https://archive.apache.org/dist/kafka/2.2.0/kafka_$KAFKA_VERSION.tgz
- tar xvzf kafka_$KAFKA_VERSION.tgz
- ./kafka_$KAFKA_VERSION/bin/kafka-server-start.sh ./kafka_$KAFKA_VERSION/config/server.properties > /dev/null 2> /dev/null &
- git clone https://github.com/edenhill/librdkafka.git
- while ! echo "asd" | nc localhost 9092; do sleep 1; done
- ./kafka_$KAFKA_VERSION/bin/kafka-topics.sh --create --zookeeper localhost:2181 --topic cppkafka_test1 --partitions 3 --replication-factor 1
- ./kafka_$KAFKA_VERSION/bin/kafka-topics.sh --create --zookeeper localhost:2181 --topic cppkafka_test2 --partitions 3 --replication-factor 1
script:
- cd librdkafka
- git checkout $RDKAFKA_VERSION
- ./configure --prefix=./install && make libs && make install
- cd ..
- mkdir build && cd build
- cmake .. -DCPPKAFKA_CMAKE_VERBOSE=ON -DRDKAFKA_ROOT=./librdkafka/install -DKAFKA_TEST_INSTANCE=localhost:9092
- make examples
- make tests
- ./tests/cppkafka_tests

View File

@@ -1,30 +1,84 @@
cmake_minimum_required(VERSION 2.8.1)
project(cppkafka)
cmake_minimum_required(VERSION 3.9.2)
project(CppKafka)
if (${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.12.0")
# Use <package>_ROOT variable to find configuration files
cmake_policy(SET CMP0074 NEW)
endif()
include(GNUInstallDirs)
include(CMakePackageConfigHelpers)
# Set the version number.
set(CPPKAFKA_VERSION_MAJOR 0)
set(CPPKAFKA_VERSION_MINOR 1)
set(CPPKAFKA_VERSION "${CPPKAFKA_VERSION_MAJOR}.${CPPKAFKA_VERSION_MINOR}")
set(CPPKAFKA_VERSION_MINOR 4)
set(CPPKAFKA_VERSION_REVISION 0)
set(CPPKAFKA_VERSION "${CPPKAFKA_VERSION_MAJOR}.${CPPKAFKA_VERSION_MINOR}.${CPPKAFKA_VERSION_REVISION}")
set(RDKAFKA_MIN_VERSION "0.9.4")
set(RDKAFKA_MIN_VERSION_HEX 0x00090400)
if(MSVC)
if (NOT CMAKE_CXX_FLAGS)
# Set default compile flags for the project
if(MSVC)
# Don't always use Wall, since VC's /Wall is ridiculously verbose.
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W3")
set(CMAKE_CXX_FLAGS "/W3")
# Disable VC secure checks, since these are not really issues
add_definitions("-D_CRT_SECURE_NO_WARNINGS=1")
add_definitions("-D_SCL_SECURE_NO_WARNINGS=1")
add_definitions("-DNOGDI=1")
else()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall")
add_definitions("-DNOMINMAX=1")
else()
set(CMAKE_CXX_FLAGS "-Wall")
endif()
endif()
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake/")
# Set output directories
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/lib)
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/lib)
# Build output checks
option(CPPKAFKA_CMAKE_VERBOSE "Generate verbose output." OFF)
option(CPPKAFKA_BUILD_SHARED "Build cppkafka as a shared library." ON)
option(CPPKAFKA_DISABLE_TESTS "Disable build of cppkafka tests." OFF)
option(CPPKAFKA_DISABLE_EXAMPLES "Disable build of cppkafka examples." OFF)
option(CPPKAFKA_BOOST_STATIC_LIBS "Link with Boost static libraries." ON)
option(CPPKAFKA_BOOST_USE_MULTITHREADED "Use Boost multithreaded libraries." ON)
option(CPPKAFKA_RDKAFKA_STATIC_LIB "Link with Rdkafka static library." OFF)
option(CPPKAFKA_EXPORT_PKGCONFIG "Generate 'cppkafka.pc' file" ON)
option(CPPKAFKA_EXPORT_CMAKE_CONFIG "Generate CMake config, target and version files." ON)
# Add FindRdKafka.cmake
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake/")
if (NOT CPPKAFKA_CONFIG_DIR)
set(CPPKAFKA_CONFIG_DIR lib/cmake/${PROJECT_NAME})
endif()
# Maintain previous compatibility
if (RDKAFKA_ROOT_DIR)
set(RdKafka_ROOT ${RDKAFKA_ROOT_DIR})
elseif (RDKAFKA_ROOT)
set(RdKafka_ROOT ${RDKAFKA_ROOT})
endif()
if (RdKafka_ROOT)
if (NOT IS_ABSOLUTE ${RdKafka_ROOT})
set(RdKafka_ROOT "${CMAKE_SOURCE_DIR}/${RdKafka_ROOT}")
endif()
endif()
if (RDKAFKA_DIR)
set(RdKafka_DIR ${RDKAFKA_DIR}) # For older versions of find_package
if (NOT IS_ABSOLUTE ${RdKafka_ROOT})
set(RdKafka_DIR "${CMAKE_SOURCE_DIR}/${RdKafka_DIR}")
endif()
endif()
# Disable output from find_package macro
if (NOT CPPKAFKA_CMAKE_VERBOSE)
set(FIND_PACKAGE_QUIET QUIET)
endif()
if(CPPKAFKA_BUILD_SHARED)
message(STATUS "Build will generate a shared library. "
"Use CPPKAFKA_BUILD_SHARED=0 to perform a static build")
@@ -33,19 +87,67 @@ else()
message(STATUS "Build will generate a static library.")
set(CPPKAFKA_LIBRARY_TYPE STATIC)
add_definitions("-DCPPKAFKA_STATIC=1")
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
endif()
if (CPPKAFKA_RDKAFKA_STATIC_LIB)
add_definitions("-DLIBRDKAFKA_STATICLIB")
endif()
if (NOT CPPKAFKA_CONFIG_DIR)
set(CPPKAFKA_CONFIG_DIR lib/cmake/${PROJECT_NAME})
endif()
if (NOT CPPKAFKA_PKGCONFIG_DIR)
set(CPPKAFKA_PKGCONFIG_DIR share/pkgconfig)
endif()
# Look for Boost (just need boost.optional headers here)
find_package(Boost REQUIRED)
find_package(RdKafka REQUIRED)
find_package(Boost REQUIRED ${FIND_PACKAGE_QUIET})
if (Boost_FOUND)
find_package(Boost COMPONENTS program_options ${FIND_PACKAGE_QUIET})
set(Boost_USE_STATIC_LIBS ${CPPKAFKA_BOOST_STATIC_LIBS})
set(Boost_USE_MULTITHREADED ${CPPKAFKA_BOOST_USE_MULTITHREADED})
include_directories(${Boost_INCLUDE_DIRS})
link_directories(${Boost_LIBRARY_DIRS})
if (CPPKAFKA_CMAKE_VERBOSE)
message(STATUS "Boost include dir: ${Boost_INCLUDE_DIRS}")
message(STATUS "Boost library dir: ${Boost_LIBRARY_DIRS}")
message(STATUS "Boost use static libs: ${Boost_USE_STATIC_LIBS}")
message(STATUS "Boost is multi-threaded: ${CPPKAFKA_BOOST_USE_MULTITHREADED}")
message(STATUS "Boost libraries: ${Boost_LIBRARIES}")
endif()
endif()
# Try to find the RdKafka configuration file if present.
# This will search default system locations as well as RdKafka_ROOT and RdKafka_Dir paths if specified.
find_package(RdKafka ${FIND_PACKAGE_QUIET} CONFIG)
set(RDKAFKA_TARGET_IMPORTS ${RdKafka_FOUND})
if (NOT RdKafka_FOUND)
message(STATUS "RdKafkaConfig.cmake not found. Attempting to find module instead...")
find_package(RdKafka REQUIRED ${FIND_PACKAGE_QUIET} MODULE)
if (NOT RdKafka_FOUND)
message(FATAL_ERROR "RdKafka module not found. Please set RDKAFKA_ROOT to the install path or RDKAFKA_DIR pointing to the RdKafka configuration file location.")
else()
message(STATUS "RdKafka module found.")
endif()
else()
message(STATUS "RdKafka configuration file found: ${RdKafka_CONFIG}")
endif()
add_subdirectory(src)
add_subdirectory(include)
add_subdirectory(include/cppkafka)
add_subdirectory(examples)
# Examples target
if (NOT CPPKAFKA_DISABLE_EXAMPLES AND Boost_PROGRAM_OPTIONS_FOUND)
add_subdirectory(examples)
else()
message(STATUS "Disabling examples")
endif()
# Add a target to generate API documentation using Doxygen
find_package(Doxygen QUIET)
find_package(Doxygen ${FIND_PACKAGE_QUIET})
if(DOXYGEN_FOUND)
configure_file(
${CMAKE_CURRENT_SOURCE_DIR}/docs/Doxyfile.in
@@ -60,39 +162,28 @@ if(DOXYGEN_FOUND)
)
endif(DOXYGEN_FOUND)
set(GOOGLETEST_ROOT ${CMAKE_SOURCE_DIR}/third_party/googletest)
if(EXISTS "${GOOGLETEST_ROOT}/CMakeLists.txt")
set(GOOGLETEST_INCLUDE ${GOOGLETEST_ROOT}/googletest/include)
set(GOOGLETEST_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/googletest)
set(GOOGLETEST_LIBRARY ${GOOGLETEST_BINARY_DIR}/googletest)
include(ExternalProject)
ExternalProject_Add(
googletest
DOWNLOAD_COMMAND ""
SOURCE_DIR ${GOOGLETEST_ROOT}
BINARY_DIR ${GOOGLETEST_BINARY_DIR}
CMAKE_CACHE_ARGS "-DBUILD_GTEST:bool=ON" "-DBUILD_GMOCK:bool=OFF"
"-Dgtest_force_shared_crt:bool=ON"
INSTALL_COMMAND ""
)
if(NOT CPPKAFKA_DISABLE_TESTS)
set(CATCH_ROOT ${CMAKE_SOURCE_DIR}/third_party/Catch2)
if(EXISTS ${CATCH_ROOT}/CMakeLists.txt)
set(CATCH_INCLUDE ${CATCH_ROOT}/single_include)
enable_testing()
add_subdirectory(tests)
# Make sure we build googletest before anything else
add_dependencies(cppkafka googletest)
else()
message(STATUS "Disabling tests because submodule Catch2 isn't checked out")
endif()
else()
message(STATUS "Disabling tests")
endif()
# Confiugure the uninstall script
configure_file(
if(NOT TARGET uninstall)
# Confiugure the uninstall script
configure_file(
"${CMAKE_CURRENT_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in"
"${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake"
IMMEDIATE @ONLY
)
)
# Add uninstall target
add_custom_target(uninstall
# Add uninstall target
add_custom_target(uninstall
COMMAND ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake)
endif()

View File

@@ -1,7 +1,11 @@
# cppkafka
---
High level C++ wrapper for _rdkafka_
---
# cppkafka: high level C++ wrapper for _rdkafka_
[![Build status](https://travis-ci.org/mfontanini/cppkafka.svg?branch=master)](https://travis-ci.org/mfontanini/cppkafka)
_cppkafka_ allows C++ applications to consume and produce messages using the Apache Kafka
protocol. The library is built on top of [_librdkafka_](https://github.com/edenhill/librdkafka),
and provides a high level API that uses modern C++ features to make it easier to write code
while keeping the wrapper's performance overhead to a minimum.
# Features
@@ -9,10 +13,12 @@ High level C++ wrapper for _rdkafka_
simple, less error prone way.
* _cppkafka_ provides an API to produce messages as well as consuming messages, but the latter is
only supported via the high level consumer API. _cppkafka_ requires **rdkakfa >= 0.9.4** in
only supported via the high level consumer API. _cppkafka_ requires **rdkafka >= 0.9.4** in
order to use it. Other wrapped functionalities are also provided, like fetching metadata,
offsets, etc.
* _cppkafka_ provides message header support. This feature requires **rdkafka >= 0.11.4**.
* _cppkafka_ tries to add minimal overhead over _librdkafka_. A very thin wrapper for _librdkafka_
messages is used for consumption so there's virtually no overhead at all.
@@ -22,7 +28,7 @@ _cppkafka_'s API is simple to use. For example, this code creates a producer tha
into some partition:
```c++
#include <cppkafka/producer.h>
#include <cppkafka/cppkafka.h>
using namespace std;
using namespace cppkafka;
@@ -39,6 +45,7 @@ int main() {
// Produce a message!
string message = "hey there!";
producer.produce(MessageBuilder("my_topic").partition(0).payload(message));
producer.flush();
}
```
@@ -47,41 +54,41 @@ int main() {
In order to compile _cppkafka_ you need:
* _librdkafka >= 0.9.4_
* _CMake_
* A compiler with good C++11 support (e.g. gcc >= 4.8). This was tested successfully on
_g++ 4.8.3_.
* The boost library. _cppkafka_ only requires boost.optional, which is a header only library,
so this doesn't add any additional runtime dependencies.
* _CMake >= 3.9.2_
* A compiler with good C++11 support (e.g. gcc >= 4.8). This was tested successfully on _g++ 4.8.3_.
* The boost library (for boost::optional)
Now, in order to build, just run:
```Shell
mkdir build
cd build
cmake ..
cmake <OPTIONS> ..
make
make install
```
## CMake options
If you have installed _librdkafka_ on a non standard directory, you can use the
`RDKAFKA_ROOT_DIR` cmake parameter when configuring the project:
The following cmake options can be specified:
* `RDKAFKA_ROOT` : Specify a different librdkafka install directory.
* `RDKAFKA_DIR` : Specify a different directory where the RdKafkaConfig.cmake is installed.
* `BOOST_ROOT` : Specify a different Boost install directory.
* `CPPKAFKA_CMAKE_VERBOSE` : Generate verbose output. Default is `OFF`.
* `CPPKAFKA_BUILD_SHARED` : Build cppkafka as a shared library. Default is `ON`.
* `CPPKAFKA_DISABLE_TESTS` : Disable build of cppkafka tests. Default is `OFF`.
* `CPPKAFKA_DISABLE_EXAMPLES` : Disable build of cppkafka examples. Default is `OFF`.
* `CPPKAFKA_BOOST_STATIC_LIBS` : Link with Boost static libraries. Default is `ON`.
* `CPPKAFKA_BOOST_USE_MULTITHREADED` : Use Boost multi-threaded libraries. Default is `ON`.
* `CPPKAFKA_RDKAFKA_STATIC_LIB` : Link to Rdkafka static library. Default is `OFF`.
* `CPPKAFKA_CONFIG_DIR` : Install location of the cmake configuration files. Default is `lib/cmake/cppkafka`.
* `CPPKAFKA_PKGCONFIG_DIR` : Install location of the .pc file. Default is `share/pkgconfig`.
* `CPPKAFKA_EXPORT_PKGCONFIG` : Generate `cppkafka.pc` file. Default is `ON`.
* `CPPKAFKA_EXPORT_CMAKE_CONFIG` : Generate CMake config, target and version files. Default is `ON`.
Example:
```Shell
cmake .. -DRDKAFKA_ROOT_DIR=/some/other/dir
```
Note that finding _librdkafka_ will succeed iff there's an _include_ and _lib_
directories inside the specified path, including both the _rdkafka.h_ header
and the _librdkafka_ library file.
---
By default, a shared library will be built. If you want to perform a static build,
use the _CPPKAFKA_BUILD_SHARED_ parameter:
```Shell
cmake .. -DCPPKAFKA_BUILD_SHARED=0
cmake -DRDKAFKA_ROOT=/some/other/dir -DCPPKAFKA_BUILD_SHARED=OFF ...
```
# Using
@@ -91,6 +98,13 @@ If you want to use _cppkafka_, you'll need to link your application with:
* _cppkafka_
* _rdkafka_
If using CMake, this is simplified by doing:
```cmake
find_package(CppKafka REQUIRED)
target_link_libraries(<YourLibrary> CppKafka::cppkafka)
```
# Documentation
You can generate the documentation by running `make docs` inside the build directory. This requires
@@ -99,4 +113,3 @@ _Doxygen_ to be installed. The documentation will be written in html format at
Make sure to check the [wiki](https://github.com/mfontanini/cppkafka/wiki) which includes
some documentation about the project and some of its features.

View File

@@ -1,39 +1,75 @@
find_path(RDKAFKA_ROOT_DIR
NAMES include/librdkafka/rdkafka.h
)
# This find module helps find the RdKafka module. It exports the following variables:
# - RdKafka_INCLUDE_DIR : The directory where rdkafka.h is located.
# - RdKafka_LIBNAME : The name of the library, i.e. librdkafka.a, librdkafka.so, etc.
# - RdKafka_LIBRARY_PATH : The full library path i.e. <path_to_binaries>/${RdKafka_LIBNAME}
# - RdKafka::rdkafka : Imported library containing all above properties set.
find_path(RDKAFKA_INCLUDE_DIR
if (CPPKAFKA_RDKAFKA_STATIC_LIB)
set(RDKAFKA_PREFIX ${CMAKE_STATIC_LIBRARY_PREFIX})
set(RDKAFKA_SUFFIX ${CMAKE_STATIC_LIBRARY_SUFFIX})
set(RDKAFKA_LIBRARY_TYPE STATIC)
else()
set(RDKAFKA_PREFIX ${CMAKE_SHARED_LIBRARY_PREFIX})
set(RDKAFKA_SUFFIX ${CMAKE_SHARED_LIBRARY_SUFFIX})
set(RDKAFKA_LIBRARY_TYPE SHARED)
endif()
set(RdKafka_LIBNAME ${RDKAFKA_PREFIX}rdkafka${RDKAFKA_SUFFIX})
find_path(RdKafka_INCLUDE_DIR
NAMES librdkafka/rdkafka.h
HINTS ${RDKAFKA_ROOT_DIR}/include
HINTS ${RdKafka_ROOT}/include
)
set(HINT_DIR ${RDKAFKA_ROOT_DIR}/lib)
find_library(RDKAFKA_LIBRARY
NAMES rdkafka librdkafka
HINTS ${HINT_DIR}
find_library(RdKafka_LIBRARY_PATH
NAMES ${RdKafka_LIBNAME} rdkafka
HINTS ${RdKafka_ROOT}/lib ${RdKafka_ROOT}/lib64
)
# Check lib paths
if (CPPKAFKA_CMAKE_VERBOSE)
get_property(FIND_LIBRARY_32 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB32_PATHS)
get_property(FIND_LIBRARY_64 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB64_PATHS)
message(STATUS "RDKAFKA search 32-bit library paths: ${FIND_LIBRARY_32}")
message(STATUS "RDKAFKA search 64-bit library paths: ${FIND_LIBRARY_64}")
message(STATUS "RdKafka_ROOT = ${RdKafka_ROOT}")
message(STATUS "RdKafka_INCLUDE_DIR = ${RdKafka_INCLUDE_DIR}")
message(STATUS "RdKafka_LIBNAME = ${RdKafka_LIBNAME}")
message(STATUS "RdKafka_LIBRARY_PATH = ${RdKafka_LIBRARY_PATH}")
endif()
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(RDKAFKA DEFAULT_MSG
RDKAFKA_LIBRARY
RDKAFKA_INCLUDE_DIR
find_package_handle_standard_args(RdKafka DEFAULT_MSG
RdKafka_LIBNAME
RdKafka_LIBRARY_PATH
RdKafka_INCLUDE_DIR
)
set(CONTENTS "#include <librdkafka/rdkafka.h>\n #if RD_KAFKA_VERSION >= 0x00090400\n int main() { }\n #endif")
set(FILE_NAME ${CMAKE_CURRENT_BINARY_DIR}/rdkafka_version_test.c)
set(CONTENTS "#include <librdkafka/rdkafka.h>\n #if RD_KAFKA_VERSION >= ${RDKAFKA_MIN_VERSION_HEX}\n int main() { }\n #endif")
set(FILE_NAME ${CMAKE_CURRENT_BINARY_DIR}/rdkafka_version_test.cpp)
file(WRITE ${FILE_NAME} ${CONTENTS})
try_compile(HAVE_VALID_KAFKA_VERSION ${CMAKE_CURRENT_BINARY_DIR}
try_compile(RdKafka_FOUND ${CMAKE_CURRENT_BINARY_DIR}
SOURCES ${FILE_NAME}
CMAKE_FLAGS "-DINCLUDE_DIRECTORIES=${RDKAFKA_INCLUDE_DIR}")
CMAKE_FLAGS "-DINCLUDE_DIRECTORIES=${RdKafka_INCLUDE_DIR}")
if (HAVE_VALID_KAFKA_VERSION)
if (RdKafka_FOUND)
add_library(RdKafka::rdkafka ${RDKAFKA_LIBRARY_TYPE} IMPORTED GLOBAL)
if (UNIX AND NOT APPLE)
set(RDKAFKA_DEPENDENCIES pthread rt ssl crypto dl z)
else()
set(RDKAFKA_DEPENDENCIES pthread ssl crypto dl z)
endif()
set_target_properties(RdKafka::rdkafka PROPERTIES
IMPORTED_NAME RdKafka
IMPORTED_LOCATION "${RdKafka_LIBRARY_PATH}"
INTERFACE_INCLUDE_DIRECTORIES "${RdKafka_INCLUDE_DIR}"
INTERFACE_LINK_LIBRARIES "${RDKAFKA_DEPENDENCIES}")
message(STATUS "Found valid rdkafka version")
mark_as_advanced(
RDKAFKA_ROOT_DIR
RDKAFKA_INCLUDE_DIR
RDKAFKA_LIBRARY
RdKafka_INCLUDE_DIR
RdKafka_LIBRARY_PATH
)
else()
message(FATAL_ERROR "Failed to find valid rdkafka version")

33
cmake/config.cmake.in Normal file
View File

@@ -0,0 +1,33 @@
@PACKAGE_INIT@
include(CMakeFindDependencyMacro)
# Add FindRdKafka.cmake
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_LIST_DIR}")
set(RDKAFKA_MIN_VERSION_HEX "@RDKAFKA_MIN_VERSION_HEX@")
# Find boost optional
find_dependency(Boost REQUIRED)
# Try to find the RdKafka configuration file if present.
# This will search default system locations as well as RdKafka_ROOT and RdKafka_DIR paths if specified.
find_package(RdKafka QUIET CONFIG)
set(RDKAFKA_TARGET_IMPORTS ${RdKafka_FOUND})
if (NOT RdKafka_FOUND)
find_dependency(RdKafka REQUIRED MODULE)
endif()
include("${CMAKE_CURRENT_LIST_DIR}/@TARGET_EXPORT_NAME@.cmake")
# Export 'CppKafka_ROOT'
set_and_check(@PROJECT_NAME@_ROOT "@PACKAGE_CMAKE_INSTALL_PREFIX@")
# Export 'CppKafka_INSTALL_INCLUDE_DIR'
set_and_check(@PROJECT_NAME@_INSTALL_INCLUDE_DIR "@PACKAGE_CMAKE_INSTALL_INCLUDEDIR@")
# Export 'CppKafka_INSTALL_LIB_DIR'
set_and_check(@PROJECT_NAME@_INSTALL_LIB_DIR "@PACKAGE_CMAKE_INSTALL_LIBDIR@")
# Validate installed components
check_required_components("@PROJECT_NAME@")

34
cmake/cppkafka.h.in Normal file
View File

@@ -0,0 +1,34 @@
/*
* Copyright (c) 2017, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef CPPKAFKA_H
#define CPPKAFKA_H
@CPPKAFKA_HEADERS@
#endif

14
cmake/cppkafka.pc.in Normal file
View File

@@ -0,0 +1,14 @@
prefix=@CMAKE_INSTALL_PREFIX@
exec_prefix=${prefix}
libdir=${prefix}/@CMAKE_INSTALL_LIBDIR@
sharedlibdir=${prefix}/@CMAKE_INSTALL_LIBDIR@
includedir=${prefix}/include
Name: cppkafka
Url: https://github.com/mfontanini/cppkafka
Description: C++ wrapper library on top of RdKafka
Version: @CPPKAFKA_VERSION@
Requires:
Requires.private: rdkafka >= 0.9.4
Libs: -L${libdir} -L${sharedlibdir} -lcppkafka
Cflags: -I${includedir} -I${includedir}/cppkafka -I@Boost_INCLUDE_DIRS@

View File

@@ -1,21 +1,16 @@
find_package(Boost COMPONENTS program_options)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include)
if (Boost_PROGRAM_OPTIONS_FOUND)
link_libraries(${Boost_LIBRARIES} cppkafka ${RDKAFKA_LIBRARY})
add_custom_target(examples)
macro(create_example example_name)
string(REPLACE "_" "-" sanitized_name ${example_name})
add_executable(${sanitized_name} EXCLUDE_FROM_ALL "${example_name}_example.cpp")
target_link_libraries(${sanitized_name} cppkafka RdKafka::rdkafka Boost::boost Boost::program_options)
add_dependencies(examples ${sanitized_name})
endmacro()
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include)
include_directories(SYSTEM ${Boost_INCLUDE_DIRS} ${RDKAFKA_INCLUDE_DIR})
add_custom_target(examples)
macro(create_example example_name)
add_executable(${example_name} EXCLUDE_FROM_ALL "${example_name}.cpp")
add_dependencies(examples ${example_name})
endmacro()
create_example(kafka_producer)
create_example(kafka_consumer)
create_example(metadata)
create_example(consumers_information)
else()
message(STATUS "Disabling examples since boost.program_options was not found")
endif()
create_example(producer)
create_example(buffered_producer)
create_example(consumer)
create_example(consumer_dispatcher)
create_example(metadata)
create_example(consumers_information)

View File

@@ -0,0 +1,96 @@
#include <stdexcept>
#include <iostream>
#include <boost/program_options.hpp>
#include "cppkafka/utils/buffered_producer.h"
#include "cppkafka/configuration.h"
using std::string;
using std::exception;
using std::getline;
using std::cin;
using std::cout;
using std::endl;
using cppkafka::BufferedProducer;
using cppkafka::Configuration;
using cppkafka::Topic;
using cppkafka::MessageBuilder;
using cppkafka::Message;
namespace po = boost::program_options;
int main(int argc, char* argv[]) {
string brokers;
string topic_name;
int partition_value = -1;
po::options_description options("Options");
options.add_options()
("help,h", "produce this help message")
("brokers,b", po::value<string>(&brokers)->required(),
"the kafka broker list")
("topic,t", po::value<string>(&topic_name)->required(),
"the topic in which to write to")
("partition,p", po::value<int>(&partition_value),
"the partition to write into (unassigned if not provided)")
;
po::variables_map vm;
try {
po::store(po::command_line_parser(argc, argv).options(options).run(), vm);
po::notify(vm);
}
catch (exception& ex) {
cout << "Error parsing options: " << ex.what() << endl;
cout << endl;
cout << options << endl;
return 1;
}
// Create a message builder for this topic
MessageBuilder builder(topic_name);
// Get the partition we want to write to. If no partition is provided, this will be
// an unassigned one
if (partition_value != -1) {
builder.partition(partition_value);
}
// Construct the configuration
Configuration config = {
{ "metadata.broker.list", brokers }
};
// Create the producer
BufferedProducer<string> producer(config);
// Set a produce success callback
producer.set_produce_success_callback([](const Message& msg) {
cout << "Successfully produced message with payload " << msg.get_payload() << endl;
});
// Set a produce failure callback
producer.set_produce_failure_callback([](const Message& msg) {
cout << "Failed to produce message with payload " << msg.get_payload() << endl;
// Return false so we stop trying to produce this message
return false;
});
cout << "Producing messages into topic " << topic_name << endl;
// Now read lines and write them into kafka
string line;
while (getline(cin, line)) {
// Set the payload on this builder
builder.payload(line);
// Add the message we've built to the buffered producer
producer.add_message(builder);
// Now flush so we:
// * emit the buffered message
// * poll the producer so we dispatch on delivery report callbacks and
// therefore get the produce failure/success callbacks
producer.flush();
}
}

View File

@@ -0,0 +1,121 @@
#include <stdexcept>
#include <iostream>
#include <csignal>
#include <boost/program_options.hpp>
#include "cppkafka/consumer.h"
#include "cppkafka/configuration.h"
#include "cppkafka/utils/consumer_dispatcher.h"
using std::string;
using std::exception;
using std::cout;
using std::endl;
using std::function;
using cppkafka::Consumer;
using cppkafka::ConsumerDispatcher;
using cppkafka::Configuration;
using cppkafka::Message;
using cppkafka::TopicPartition;
using cppkafka::TopicPartitionList;
using cppkafka::Error;
namespace po = boost::program_options;
function<void()> on_signal;
void signal_handler(int) {
on_signal();
}
// This example uses ConsumerDispatcher, a simple synchronous wrapper over a Consumer
// to allow processing messages using pattern matching rather than writing a loop
// and check if there's a message, if there's an error, etc.
int main(int argc, char* argv[]) {
string brokers;
string topic_name;
string group_id;
po::options_description options("Options");
options.add_options()
("help,h", "produce this help message")
("brokers,b", po::value<string>(&brokers)->required(),
"the kafka broker list")
("topic,t", po::value<string>(&topic_name)->required(),
"the topic in which to write to")
("group-id,g", po::value<string>(&group_id)->required(),
"the consumer group id")
;
po::variables_map vm;
try {
po::store(po::command_line_parser(argc, argv).options(options).run(), vm);
po::notify(vm);
}
catch (exception& ex) {
cout << "Error parsing options: " << ex.what() << endl;
cout << endl;
cout << options << endl;
return 1;
}
// Construct the configuration
Configuration config = {
{ "metadata.broker.list", brokers },
{ "group.id", group_id },
// Disable auto commit
{ "enable.auto.commit", false }
};
// Create the consumer
Consumer consumer(config);
// Print the assigned partitions on assignment
consumer.set_assignment_callback([](const TopicPartitionList& partitions) {
cout << "Got assigned: " << partitions << endl;
});
// Print the revoked partitions on revocation
consumer.set_revocation_callback([](const TopicPartitionList& partitions) {
cout << "Got revoked: " << partitions << endl;
});
// Subscribe to the topic
consumer.subscribe({ topic_name });
cout << "Consuming messages from topic " << topic_name << endl;
// Create a consumer dispatcher
ConsumerDispatcher dispatcher(consumer);
// Stop processing on SIGINT
on_signal = [&]() {
dispatcher.stop();
};
signal(SIGINT, signal_handler);
// Now run the dispatcher, providing a callback to handle messages, one to handle
// errors and another one to handle EOF on a partition
dispatcher.run(
// Callback executed whenever a new message is consumed
[&](Message msg) {
// Print the key (if any)
if (msg.get_key()) {
cout << msg.get_key() << " -> ";
}
// Print the payload
cout << msg.get_payload() << endl;
// Now commit the message
consumer.commit(msg);
},
// Whenever there's an error (other than the EOF soft error)
[](Error error) {
cout << "[+] Received error notification: " << error << endl;
},
// Whenever EOF is reached on a partition, print this
[](ConsumerDispatcher::EndOfFile, const TopicPartition& topic_partition) {
cout << "Reached EOF on partition " << topic_partition << endl;
}
);
}

View File

@@ -75,4 +75,7 @@ int main(int argc, char* argv[]) {
// Actually produce the message we've built
producer.produce(builder);
}
// Flush all produced messages
producer.flush();
}

View File

@@ -1 +0,0 @@
add_subdirectory(cppkafka)

View File

@@ -1,8 +1,29 @@
set(CPPKAFKA_HEADER "${CMAKE_CURRENT_BINARY_DIR}/cppkafka.h")
# Local function to auto-generate main cppkafka.h header file
function(make_cppkafka_header)
file(GLOB INCLUDE_HEADERS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "*.h" "utils/*.h")
list(SORT INCLUDE_HEADERS)
foreach(header ${INCLUDE_HEADERS})
if (NOT ${header} MATCHES "cppkafka.h")
SET(CPPKAFKA_HEADERS "${CPPKAFKA_HEADERS}#include <cppkafka/${header}>\n")
endif()
endforeach()
#create file from template
configure_file("${PROJECT_SOURCE_DIR}/cmake/cppkafka.h.in" "${CPPKAFKA_HEADER}" @ONLY)
endfunction()
# Run file generation function
make_cppkafka_header()
# Install headers including the auto-generated cppkafka.h
file(GLOB INCLUDE_FILES "*.h")
file(GLOB UTILS_INCLUDE_FILES "utils/*.h")
file(GLOB DETAIL_INCLUDE_FILES "detail/*.h")
install(
FILES ${INCLUDE_FILES}
DESTINATION include/cppkafka
DESTINATION include/cppkafka/
COMPONENT Headers
)
install(
@@ -10,3 +31,13 @@ install(
DESTINATION include/cppkafka/utils/
COMPONENT Headers
)
install(
FILES ${DETAIL_INCLUDE_FILES}
DESTINATION include/cppkafka/detail/
COMPONENT Headers
)
install(
FILES "${CPPKAFKA_HEADER}"
DESTINATION include/cppkafka/
COMPONENT Headers
)

View File

@@ -31,10 +31,12 @@
#define CPPKAFKA_BUFFER_H
#include <cstddef>
#include <array>
#include <vector>
#include <iosfwd>
#include <algorithm>
#include "macros.h"
#include "exceptions.h"
namespace cppkafka {
@@ -75,6 +77,20 @@ public:
Buffer(const T* data, size_t size)
: data_(reinterpret_cast<const DataType*>(data)), size_(size) {
static_assert(sizeof(T) == sizeof(DataType), "sizeof(T) != sizeof(DataType)");
if ((data_ == nullptr) && (size_ > 0)) {
throw Exception("Invalid buffer configuration");
}
}
/**
* Constructs a buffer from two iterators in the range [first,last)
*
* \param first An iterator to the start of data
* \param last An iterator to the end of data (not included)
*/
template <typename Iter>
Buffer(const Iter first, const Iter last)
: Buffer(&*first, std::distance(first, last)) {
}
/**
@@ -88,10 +104,43 @@ public:
static_assert(sizeof(T) == sizeof(DataType), "sizeof(T) != sizeof(DataType)");
}
// Don't allow construction from temporary vectors
/**
* Don't allow construction from temporary vectors
*/
template <typename T>
Buffer(std::vector<T>&& data) = delete;
/**
* Constructs a buffer from an array
*
* \param data The the array to be used as input
*/
template <typename T, size_t N>
Buffer(const std::array<T, N>& data)
: data_(reinterpret_cast<const DataType*>(data.data())), size_(data.size()) {
static_assert(sizeof(T) == sizeof(DataType), "sizeof(T) != sizeof(DataType)");
}
/**
* Don't allow construction from temporary arrays
*/
template <typename T, size_t N>
Buffer(std::array<T, N>&& data) = delete;
/**
* Constructs a buffer from a raw array
*
* \param data The the array to be used as input
*/
template <typename T, size_t N>
Buffer(const T(&data)[N])
: Buffer(data, N) {
}
// Don't allow construction from temporary raw arrays
template <typename T, size_t N>
Buffer(T(&&data)[N]) = delete;
/**
* \brief Construct a buffer from a const string ref
*
@@ -100,7 +149,9 @@ public:
*/
Buffer(const std::string& data);
// Don't allow construction from temporary strings
/**
* Don't allow construction from temporary strings
*/
Buffer(std::string&&) = delete;
Buffer(const Buffer&) = delete;
@@ -152,7 +203,7 @@ public:
/**
* Output operator
*/
friend std::ostream& operator<<(std::ostream& output, const Buffer& rhs);
CPPKAFKA_API friend std::ostream& operator<<(std::ostream& output, const Buffer& rhs);
private:
const DataType* data_;
size_t size_;
@@ -161,12 +212,20 @@ private:
/**
* Compares Buffer objects for equality
*/
bool operator==(const Buffer& lhs, const Buffer& rhs);
CPPKAFKA_API bool operator==(const Buffer& lhs, const Buffer& rhs);
/**
* Compares Buffer objects for inequality
*/
bool operator!=(const Buffer& lhs, const Buffer& rhs);
CPPKAFKA_API bool operator!=(const Buffer& lhs, const Buffer& rhs);
/**
* Compares Buffer objects lexicographically
*/
CPPKAFKA_API bool operator<(const Buffer& lhs, const Buffer& rhs);
CPPKAFKA_API bool operator<=(const Buffer& lhs, const Buffer& rhs);
CPPKAFKA_API bool operator>(const Buffer& lhs, const Buffer& rhs);
CPPKAFKA_API bool operator>=(const Buffer& lhs, const Buffer& rhs);
} // cppkafka

View File

@@ -41,7 +41,7 @@ template <typename T, typename Deleter, typename Cloner>
class ClonablePtr {
public:
/**
* Creates an instance
* \brief Creates an instance
*
* \param ptr The pointer to be wrapped
* \param deleter The deleter functor
@@ -60,17 +60,21 @@ public:
* \param rhs The pointer to be copied
*/
ClonablePtr(const ClonablePtr& rhs)
: handle_(rhs.cloner_(rhs.handle_.get()), rhs.handle_.get_deleter()), cloner_(rhs.cloner_) {
: handle_(std::unique_ptr<T, Deleter>(rhs.try_clone(), rhs.get_deleter())),
cloner_(rhs.get_cloner()) {
}
/**
* Copies and assigns the given pointer
* \brief Copies and assigns the given pointer
*
* \param rhs The pointer to be copied
*/
ClonablePtr& operator=(const ClonablePtr& rhs) {
handle_.reset(cloner_(rhs.handle_.get()));
if (this != &rhs) {
handle_ = std::unique_ptr<T, Deleter>(rhs.try_clone(), rhs.get_deleter());
cloner_ = rhs.get_cloner();
}
return *this;
}
@@ -79,12 +83,51 @@ public:
~ClonablePtr() = default;
/**
* Getter for the internal pointer
* \brief Getter for the internal pointer
*/
T* get() const {
return handle_.get();
}
/**
* \brief Releases ownership of the internal pointer
*/
T* release() {
return handle_.release();
}
/**
* \brief Reset the internal pointer to a new one
*/
void reset(T* ptr) {
handle_.reset(ptr);
}
/**
* \brief Get the deleter
*/
const Deleter& get_deleter() const {
return handle_.get_deleter();
}
/**
* \brief Get the cloner
*/
const Cloner& get_cloner() const {
return cloner_;
}
/**
* \brief Indicates whether this ClonablePtr instance is valid (not null)
*/
explicit operator bool() const {
return static_cast<bool>(handle_);
}
private:
T* try_clone() const {
return cloner_ ? cloner_(get()) : get();
}
std::unique_ptr<T, Deleter> handle_;
Cloner cloner_;
};

View File

@@ -42,6 +42,7 @@
#include "clonable_ptr.h"
#include "configuration_base.h"
#include "macros.h"
#include "event.h"
namespace cppkafka {
@@ -62,19 +63,23 @@ class KafkaHandleBase;
class CPPKAFKA_API Configuration : public ConfigurationBase<Configuration> {
public:
using DeliveryReportCallback = std::function<void(Producer& producer, const Message&)>;
using OffsetCommitCallback = std::function<void(Consumer& consumer, Error,
using OffsetCommitCallback = std::function<void(Consumer& consumer,
Error error,
const TopicPartitionList& topic_partitions)>;
using ErrorCallback = std::function<void(KafkaHandleBase& handle, int error,
using ErrorCallback = std::function<void(KafkaHandleBase& handle,
int error,
const std::string& reason)>;
using ThrottleCallback = std::function<void(KafkaHandleBase& handle,
const std::string& broker_name,
int32_t broker_id,
std::chrono::milliseconds throttle_time)>;
using LogCallback = std::function<void(KafkaHandleBase& handle, int level,
using LogCallback = std::function<void(KafkaHandleBase& handle,
int level,
const std::string& facility,
const std::string& message)>;
using StatsCallback = std::function<void(KafkaHandleBase& handle, const std::string& json)>;
using SocketCallback = std::function<int(int domain, int type, int protoco)>;
using SocketCallback = std::function<int(int domain, int type, int protocol)>;
using BackgroundEventCallback = std::function<void(KafkaHandleBase& handle, Event)>;
using ConfigurationBase<Configuration>::set;
using ConfigurationBase<Configuration>::get;
@@ -139,13 +144,25 @@ public:
*/
Configuration& set_socket_callback(SocketCallback callback);
#if RD_KAFKA_VERSION >= RD_KAFKA_ADMIN_API_SUPPORT_VERSION
/**
* Sets the background event callback (invokes rd_kafka_conf_set_background_event_cb)
*/
Configuration& set_background_event_callback(BackgroundEventCallback callback);
/**
* Sets the event mask (invokes rd_kafka_conf_set_events)
*/
Configuration& set_events(int events);
#endif
/**
* Sets the default topic configuration
*/
Configuration& set_default_topic_configuration(TopicConfiguration config);
/**
* Returns true iff the given property name has been set
* Returns true if the given property name has been set
*/
bool has_property(const std::string& name) const;
@@ -201,6 +218,11 @@ public:
*/
const SocketCallback& get_socket_callback() const;
/**
* Gets the background event callback
*/
const BackgroundEventCallback& get_background_event_callback() const;
/**
* Gets the default topic configuration
*/
@@ -226,6 +248,7 @@ private:
LogCallback log_callback_;
StatsCallback stats_callback_;
SocketCallback socket_callback_;
BackgroundEventCallback background_event_callback_;
};
} // cppkafka

View File

@@ -32,13 +32,14 @@
#include <string>
#include <type_traits>
#include "macros.h"
namespace cppkafka {
/**
* Wrapper over a configuration (key, value) pair
*/
class ConfigurationOption {
class CPPKAFKA_API ConfigurationOption {
public:
/**
* Construct using a std::string value

View File

@@ -35,9 +35,10 @@
#include <chrono>
#include <functional>
#include "kafka_handle_base.h"
#include "message.h"
#include "queue.h"
#include "macros.h"
#include "error.h"
#include "detail/callback_invoker.h"
namespace cppkafka {
@@ -64,14 +65,14 @@ class TopicConfiguration;
* Consumer consumer(config);
*
* // Set the assignment callback
* consumer.set_assignment_callback([&](vector<TopicPartition>& topic_partitions) {
* consumer.set_assignment_callback([&](TopicPartitionList& topic_partitions) {
* // Here you could fetch offsets and do something, altering the offsets on the
* // topic_partitions vector if needed
* cout << "Got assigned " << topic_partitions.count() << " partitions!" << endl;
* cout << "Got assigned " << topic_partitions.size() << " partitions!" << endl;
* });
*
* // Set the revocation callback
* consumer.set_revocation_callback([&](const vector<TopicPartition>& topic_partitions) {
* consumer.set_revocation_callback([&](const TopicPartitionList& topic_partitions) {
* cout << topic_partitions.size() << " partitions revoked!" << endl;
* });
*
@@ -100,6 +101,8 @@ public:
using AssignmentCallback = std::function<void(TopicPartitionList&)>;
using RevocationCallback = std::function<void(const TopicPartitionList&)>;
using RebalanceErrorCallback = std::function<void(Error)>;
using KafkaHandleBase::pause;
using KafkaHandleBase::resume;
/**
* \brief Creates an instance of a consumer.
@@ -116,7 +119,7 @@ public:
Consumer& operator=(Consumer&&) = delete;
/**
* \brief Closes and estroys the rdkafka handle
* \brief Closes and destroys the rdkafka handle
*
* This will call Consumer::close before destroying the handle
*/
@@ -126,7 +129,7 @@ public:
* \brief Sets the topic/partition assignment callback
*
* The Consumer class will use rd_kafka_conf_set_rebalance_cb and will handle the
* rebalance, converting from rdkafka topic partition list handles into vector<TopicPartition>
* rebalance, converting from rdkafka topic partition list handles into TopicPartitionList
* and executing the assignment/revocation/rebalance_error callbacks.
*
* \note You *do not need* to call Consumer::assign with the provided topic parttitions. This
@@ -140,7 +143,7 @@ public:
* \brief Sets the topic/partition revocation callback
*
* The Consumer class will use rd_kafka_conf_set_rebalance_cb and will handle the
* rebalance, converting from rdkafka topic partition list handles into vector<TopicPartition>
* rebalance, converting from rdkafka topic partition list handles into TopicPartitionList
* and executing the assignment/revocation/rebalance_error callbacks.
*
* \note You *do not need* to call Consumer::assign with an empty topic partition list or
@@ -155,7 +158,7 @@ public:
* \brief Sets the rebalance error callback
*
* The Consumer class will use rd_kafka_conf_set_rebalance_cb and will handle the
* rebalance, converting from rdkafka topic partition list handles into vector<TopicPartition>
* rebalance, converting from rdkafka topic partition list handles into TopicPartitionList
* and executing the assignment/revocation/rebalance_error callbacks.
*
* \param callback The rebalance error callback
@@ -193,6 +196,34 @@ public:
*/
void unassign();
/**
* \brief Pauses all consumption
*/
void pause();
/**
* \brief Resumes all consumption
*/
void resume();
/**
* \brief Commits the current partition assignment
*
* This translates into a call to rd_kafka_commit with a null partition list.
*
* \remark This function is equivalent to calling commit(get_assignment())
*/
void commit();
/**
* \brief Commits the current partition assignment asynchronously
*
* This translates into a call to rd_kafka_commit with a null partition list.
*
* \remark This function is equivalent to calling async_commit(get_assignment())
*/
void async_commit();
/**
* \brief Commits the given message synchronously
*
@@ -235,6 +266,8 @@ public:
* This translates into a call to rd_kafka_get_watermark_offsets
*
* \param topic_partition The topic/partition to get the offsets from
*
* \return A pair of offsets {low, high}
*/
OffsetTuple get_offsets(const TopicPartition& topic_partition) const;
@@ -244,17 +277,67 @@ public:
* This translates into a call to rd_kafka_committed
*
* \param topic_partitions The topic/partition list to be queried
*
* \return The topic partition list
*/
TopicPartitionList get_offsets_committed(const TopicPartitionList& topic_partitions) const;
/**
* \brief Gets the offsets committed for the given topic/partition list with a timeout
*
* This translates into a call to rd_kafka_committed
*
* \param topic_partitions The topic/partition list to be queried
*
* \param timeout The timeout for this operation. Supersedes the default consumer timeout.
*
* \return The topic partition list
*/
TopicPartitionList get_offsets_committed(const TopicPartitionList& topic_partitions,
std::chrono::milliseconds timeout) const;
/**
* \brief Gets the offset positions for the given topic/partition list
*
* This translates into a call to rd_kafka_position
*
* \param topic_partitions The topic/partition list to be queried
*
* \return The topic partition list
*/
TopicPartitionList get_offsets_position(const TopicPartitionList& topic_partitions) const;
#if (RD_KAFKA_VERSION >= RD_KAFKA_STORE_OFFSETS_SUPPORT_VERSION)
/**
* \brief Stores the offsets on the currently assigned topic/partitions (legacy).
*
* This translates into a call to rd_kafka_offsets_store with the offsets prior to the current assignment positions.
* It is equivalent to calling rd_kafka_offsets_store(get_offsets_position(get_assignment())).
*
* \note When using this API it's recommended to set enable.auto.offset.store=false and enable.auto.commit=true.
*/
void store_consumed_offsets() const;
/**
* \brief Stores the offsets on the given topic/partitions (legacy).
*
* This translates into a call to rd_kafka_offsets_store.
*
* \param topic_partitions The topic/partition list to be stored.
*
* \note When using this API it's recommended to set enable.auto.offset.store=false and enable.auto.commit=true.
*/
void store_offsets(const TopicPartitionList& topic_partitions) const;
#endif
/**
* \brief Stores the offset for this message (legacy).
*
* This translates into a call to rd_kafka_offset_store.
*
* \param msg The message whose offset will be stored.
*
* \note When using this API it's recommended to set enable.auto.offset.store=false and enable.auto.commit=true.
*/
void store_offset(const Message& msg) const;
/**
* \brief Gets the current topic subscription
@@ -267,6 +350,8 @@ public:
* \brief Gets the current topic/partition list assignment
*
* This translates to a call to rd_kafka_assignment
*
* \return The topic partition list
*/
TopicPartitionList get_assignment() const;
@@ -274,21 +359,29 @@ public:
* \brief Gets the group member id
*
* This translates to a call to rd_kafka_memberid
*
* \return The id
*/
std::string get_member_id() const;
/**
* Gets the partition assignment callback.
* \brief Gets the partition assignment callback.
*
* \return The callback reference
*/
const AssignmentCallback& get_assignment_callback() const;
/**
* Gets the partition revocation callback.
* \brief Gets the partition revocation callback.
*
* \return The callback reference
*/
const RevocationCallback& get_revocation_callback() const;
/**
* Gets the rebalance error callback.
* \brief Gets the rebalance error callback.
*
* \return The callback reference
*/
const RebalanceErrorCallback& get_rebalance_error_callback() const;
@@ -303,8 +396,8 @@ public:
*
* The timeout used on this call will be the one configured via Consumer::set_timeout.
*
* The returned message *might* be empty. If's necessary to check that it's a valid one before
* using it:
* \return A message. The returned message *might* be empty. It's necessary to check
* that it's valid before using it:
*
* \code
* Message msg = consumer.poll();
@@ -322,15 +415,102 @@ public:
* instead of the one configured on this Consumer.
*
* \param timeout The timeout to be used on this call
*
* \return A message
*/
Message poll(std::chrono::milliseconds timeout);
/**
* \brief Polls for a batch of messages
*
* This can return zero or more messages
*
* \param max_batch_size The maximum amount of messages expected
* \param alloc The optionally supplied allocator for allocating messages
*
* \return A list of messages
*/
template <typename Allocator>
std::vector<Message, Allocator> poll_batch(size_t max_batch_size,
const Allocator& alloc);
/**
* \brief Polls for a batch of messages
*
* This can return zero or more messages
*
* \param max_batch_size The maximum amount of messages expected
*
* \return A list of messages
*/
std::vector<Message> poll_batch(size_t max_batch_size);
/**
* \brief Polls for a batch of messages
*
* This can return zero or more messages
*
* \param max_batch_size The maximum amount of messages expected
* \param timeout The timeout for this operation
* \param alloc The optionally supplied allocator for allocating messages
*
* \return A list of messages
*/
template <typename Allocator>
std::vector<Message, Allocator> poll_batch(size_t max_batch_size,
std::chrono::milliseconds timeout,
const Allocator& alloc);
/**
* \brief Polls for a batch of messages
*
* This can return one or more messages
*
* \param max_batch_size The maximum amount of messages expected
* \param timeout The timeout for this operation
*
* \return A list of messages
*/
std::vector<Message> poll_batch(size_t max_batch_size,
std::chrono::milliseconds timeout);
/**
* \brief Get the global event queue servicing this consumer corresponding to
* rd_kafka_queue_get_main and which is polled via rd_kafka_poll
*
* \return A Queue object
*
* \remark Note that this call will disable forwarding to the consumer_queue.
* To restore forwarding if desired, call Queue::forward_to_queue(consumer_queue)
*/
Queue get_main_queue() const;
/**
* \brief Get the consumer group queue servicing corresponding to
* rd_kafka_queue_get_consumer and which is polled via rd_kafka_consumer_poll
*
* \return A Queue object
*/
Queue get_consumer_queue() const;
/**
* \brief Get the queue belonging to this partition. If the consumer is not assigned to this
* partition, an empty queue will be returned
*
* \param partition The partition object
*
* \return A Queue object
*
* \remark Note that this call will disable forwarding to the consumer_queue.
* To restore forwarding if desired, call Queue::forward_to_queue(consumer_queue)
*/
Queue get_partition_queue(const TopicPartition& partition) const;
private:
static void rebalance_proxy(rd_kafka_t *handle, rd_kafka_resp_err_t error,
rd_kafka_topic_partition_list_t *partitions, void *opaque);
void close();
void commit(const Message& msg, bool async);
void commit(const TopicPartitionList& topic_partitions, bool async);
void commit(const TopicPartitionList* topic_partitions, bool async);
void handle_rebalance(rd_kafka_resp_err_t err, TopicPartitionList& topic_partitions);
AssignmentCallback assignment_callback_;
@@ -338,6 +518,34 @@ private:
RebalanceErrorCallback rebalance_error_callback_;
};
// Implementations
template <typename Allocator>
std::vector<Message, Allocator> Consumer::poll_batch(size_t max_batch_size,
const Allocator& alloc) {
return poll_batch(max_batch_size, get_timeout(), alloc);
}
template <typename Allocator>
std::vector<Message, Allocator> Consumer::poll_batch(size_t max_batch_size,
std::chrono::milliseconds timeout,
const Allocator& alloc) {
std::vector<rd_kafka_message_t*> raw_messages(max_batch_size);
// Note that this will leak the queue when using rdkafka < 0.11.5 (see get_queue comment)
Queue queue = Queue::make_queue(rd_kafka_queue_get_consumer(get_handle()));
ssize_t result = rd_kafka_consume_batch_queue(queue.get_handle(),
timeout.count(),
raw_messages.data(),
raw_messages.size());
if (result == -1) {
check_error(rd_kafka_last_error());
// on the off-chance that check_error() does not throw an error
return std::vector<Message, Allocator>(alloc);
}
return std::vector<Message, Allocator>(raw_messages.begin(),
raw_messages.begin() + result,
alloc);
}
} // cppkafka
#endif // CPP_KAFKA_CONSUMER_H

View File

@@ -0,0 +1,69 @@
/*
* Copyright (c) 2017, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef CPPKAFKA_H
#define CPPKAFKA_H
#include <cppkafka/buffer.h>
#include <cppkafka/clonable_ptr.h>
#include <cppkafka/configuration.h>
#include <cppkafka/configuration_base.h>
#include <cppkafka/configuration_option.h>
#include <cppkafka/consumer.h>
#include <cppkafka/error.h>
#include <cppkafka/event.h>
#include <cppkafka/exceptions.h>
#include <cppkafka/group_information.h>
#include <cppkafka/header.h>
#include <cppkafka/header_list.h>
#include <cppkafka/header_list_iterator.h>
#include <cppkafka/kafka_handle_base.h>
#include <cppkafka/logging.h>
#include <cppkafka/macros.h>
#include <cppkafka/message.h>
#include <cppkafka/message_builder.h>
#include <cppkafka/message_internal.h>
#include <cppkafka/message_timestamp.h>
#include <cppkafka/metadata.h>
#include <cppkafka/producer.h>
#include <cppkafka/queue.h>
#include <cppkafka/topic.h>
#include <cppkafka/topic_configuration.h>
#include <cppkafka/topic_partition.h>
#include <cppkafka/topic_partition_list.h>
#include <cppkafka/utils/backoff_committer.h>
#include <cppkafka/utils/backoff_performer.h>
#include <cppkafka/utils/buffered_producer.h>
#include <cppkafka/utils/compacted_topic_processor.h>
#include <cppkafka/utils/consumer_dispatcher.h>
#include <cppkafka/utils/poll_interface.h>
#include <cppkafka/utils/poll_strategy_base.h>
#include <cppkafka/utils/roundrobin_poll_strategy.h>
#endif

View File

@@ -0,0 +1,127 @@
/*
* Copyright (c) 2017, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef CPPKAFKA_CALLBACK_INVOKER_H
#define CPPKAFKA_CALLBACK_INVOKER_H
#include <sstream>
#include <assert.h>
#include "../logging.h"
#include "../kafka_handle_base.h"
namespace cppkafka {
// Error values
template <typename T>
T error_value() { return T{}; }
template<> inline
void error_value<void>() {};
template<> inline
bool error_value<bool>() { return false; }
template<> inline
int error_value<int>() { return -1; }
/**
* \brief Wraps an std::function object and runs it while preventing all exceptions from escaping
* \tparam Func An std::function object
*/
template <typename Func>
class CallbackInvoker
{
public:
using RetType = typename Func::result_type;
using LogCallback = std::function<void(KafkaHandleBase& handle,
int level,
const std::string& facility,
const std::string& message)>;
CallbackInvoker(const char* callback_name,
const Func& callback,
KafkaHandleBase* handle)
: callback_name_(callback_name),
callback_(callback),
handle_(handle) {
}
explicit operator bool() const {
return (bool)callback_;
}
template <typename ...Args>
RetType operator()(Args&&... args) const {
static const char* library_name = "cppkafka";
std::ostringstream error_msg;
try {
if (callback_) {
return callback_(std::forward<Args>(args)...);
}
return error_value<RetType>();
}
catch (const std::exception& ex) {
if (handle_) {
error_msg << "Caught exception in " << callback_name_ << " callback: " << ex.what();
}
}
catch (...) {
if (handle_) {
error_msg << "Caught unknown exception in " << callback_name_ << " callback";
}
}
// Log error
if (handle_) {
if (handle_->get_configuration().get_log_callback()) {
try {
// Log it
handle_->get_configuration().get_log_callback()(*handle_,
static_cast<int>(LogLevel::LogErr),
library_name,
error_msg.str());
}
catch (...) {} // sink everything
}
else {
rd_kafka_log_print(handle_->get_handle(),
static_cast<int>(LogLevel::LogErr),
library_name,
error_msg.str().c_str());
}
}
return error_value<RetType>();
}
private:
const char* callback_name_;
const Func& callback_;
KafkaHandleBase* handle_;
};
}
#endif

View File

@@ -14,7 +14,7 @@
#endif
#if defined(__linux__) || defined(__CYGWIN__)
#if defined(__linux__) || defined(__CYGWIN__) || defined(__sun)
# include <endian.h>
@@ -42,11 +42,11 @@
# define __LITTLE_ENDIAN LITTLE_ENDIAN
# define __PDP_ENDIAN PDP_ENDIAN
#elif defined(__OpenBSD__)
#elif defined(__OpenBSD__) || defined(__FreeBSD__)
# include <sys/endian.h>
#elif defined(__NetBSD__) || defined(__FreeBSD__) || defined(__DragonFly__)
#elif defined(__NetBSD__) || defined(__DragonFly__)
# include <sys/endian.h>

View File

@@ -42,6 +42,10 @@ namespace cppkafka {
*/
class CPPKAFKA_API Error {
public:
/**
* @brief Constructs an error object with RD_KAFKA_RESP_ERR_NO_ERROR
*/
Error() = default;
/**
* Constructs an error object
*/
@@ -75,9 +79,9 @@ public:
/**
* Writes this error's string representation into a stream
*/
friend std::ostream& operator<<(std::ostream& output, const Error& rhs);
CPPKAFKA_API friend std::ostream& operator<<(std::ostream& output, const Error& rhs);
private:
rd_kafka_resp_err_t error_;
rd_kafka_resp_err_t error_{RD_KAFKA_RESP_ERR_NO_ERROR};
};
} // cppkafka

180
include/cppkafka/event.h Normal file
View File

@@ -0,0 +1,180 @@
/*
* Copyright (c) 2018, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef CPPKAFKA_EVENT_H
#define CPPKAFKA_EVENT_H
#include <memory>
#include <string>
#include <vector>
#include "error.h"
#include "message.h"
#include "topic_partition.h"
#include "topic_partition_list.h"
namespace cppkafka {
class Event {
public:
/**
* Construct an Event from a rdkafka event handle and take ownership of it
*
* /param handle The handle to construct this event from
*/
Event(rd_kafka_event_t* handle);
/**
* Returns the name of this event
*/
std::string get_name() const;
/**
* Returns the type of this event
*/
rd_kafka_event_type_t get_type() const;
/**
* \brief Gets the next message contained in this event.
*
* This call is only valid if the event type is one of:
* * RD_KAFKA_EVENT_FETCH
* * RD_KAFKA_EVENT_DR
*
* \note The returned message's lifetime *is tied to this Event*. That is, if the event
* is free'd so will the contents of the message.
*/
Message get_next_message() const;
/**
* \brief Gets all messages in this event (if any)
*
* This call is only valid if the event type is one of:
* * RD_KAFKA_EVENT_FETCH
* * RD_KAFKA_EVENT_DR
*
* \note The returned messages' lifetime *is tied to this Event*. That is, if the event
* is free'd so will the contents of the messages.
*
* \return A vector containing 0 or more messages
*/
std::vector<Message> get_messages();
/**
* \brief Gets all messages in this event (if any)
*
* This call is only valid if the event type is one of:
* * RD_KAFKA_EVENT_FETCH
* * RD_KAFKA_EVENT_DR
*
* \param allocator The allocator to use on the output vector
*
* \note The returned messages' lifetime *is tied to this Event*. That is, if the event
* is free'd so will the contents of the messages.
*
* \return A vector containing 0 or more messages
*/
template <typename Allocator>
std::vector<Message, Allocator> get_messages(const Allocator allocator);
/**
* \brief Gets the number of messages contained in this event
*
* This call is only valid if the event type is one of:
* * RD_KAFKA_EVENT_FETCH
* * RD_KAFKA_EVENT_DR
*/
size_t get_message_count() const;
/**
* \brief Returns the error in this event
*/
Error get_error() const;
/**
* Gets the opaque pointer in this event
*/
void* get_opaque() const;
#if RD_KAFKA_VERSION >= RD_KAFKA_EVENT_STATS_SUPPORT_VERSION
/**
* \brief Gets the stats in this event
*
* This call is only valid if the event type is RD_KAFKA_EVENT_STATS
*/
std::string get_stats() const {
return rd_kafka_event_stats(handle_.get());
}
#endif
/**
* \brief Gets the topic/partition for this event
*
* This call is only valid if the event type is RD_KAFKA_EVENT_ERROR
*/
TopicPartition get_topic_partition() const;
/**
* \brief Gets the list of topic/partitions in this event
*
* This call is only valid if the event type is one of:
* * RD_KAFKA_EVENT_REBALANCE
* * RD_KAFKA_EVENT_OFFSET_COMMIT
*/
TopicPartitionList get_topic_partition_list() const;
/**
* Check whether this event is valid
*
* /return true iff this event has a valid (non-null) handle inside
*/
operator bool() const;
private:
using HandlePtr = std::unique_ptr<rd_kafka_event_t, decltype(&rd_kafka_event_destroy)>;
HandlePtr handle_;
};
template <typename Allocator>
std::vector<Message, Allocator> Event::get_messages(const Allocator allocator) {
const size_t total_messages = get_message_count();
std::vector<const rd_kafka_message_t*> raw_messages(total_messages);
const auto messages_read = rd_kafka_event_message_array(handle_.get(),
raw_messages.data(),
total_messages);
std::vector<Message, Allocator> output(allocator);
output.reserve(messages_read);
for (auto message : raw_messages) {
output.emplace_back(Message::make_non_owning(const_cast<rd_kafka_message_t*>(message)));
}
return output;
}
} // cppkafka
#endif // CPPKAFKA_EVENT_H

View File

@@ -110,6 +110,38 @@ private:
Error error_;
};
/**
* Consumer exception
*/
class CPPKAFKA_API ConsumerException : public Exception {
public:
ConsumerException(Error error);
Error get_error() const;
private:
Error error_;
};
/**
* Queue exception for rd_kafka_queue_t errors
*/
class CPPKAFKA_API QueueException : public Exception {
public:
QueueException(Error error);
Error get_error() const;
private:
Error error_;
};
/**
* Backoff performer has no more retries left for a specific action.
*/
class CPPKAFKA_API ActionTerminatedException : public Exception {
public:
ActionTerminatedException(const std::string& error);
};
} // cppkafka
#endif // CPPKAFKA_EXCEPTIONS_H

View File

@@ -136,6 +136,8 @@ private:
std::vector<GroupMemberInformation> members_;
};
using GroupInformationList = std::vector<GroupInformation>;
} // cppkafka
#endif // CPPKAFKA_GROUP_INFORMATION_H

195
include/cppkafka/header.h Normal file
View File

@@ -0,0 +1,195 @@
/*
* Copyright (c) 2017, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef CPPKAFKA_HEADER_H
#define CPPKAFKA_HEADER_H
#include "macros.h"
#include "buffer.h"
#include <string>
#include <assert.h>
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
namespace cppkafka {
/**
* \brief Class representing a rdkafka header.
*
* The template parameter 'BufferType' can represent a cppkafka::Buffer, std::string, std::vector, etc.
* A valid header may contain an empty name as well as null data.
*/
template <typename BufferType>
class Header {
public:
using ValueType = BufferType;
/**
* \brief Build an empty header with no data
*/
Header() = default;
/**
* \brief Build a header instance
* \param name The header name
* \param value The non-modifiable header data
*/
Header(std::string name,
const BufferType& value);
/**
* \brief Build a header instance
* \param name The header name
* \param value The header data to be moved
*/
Header(std::string name,
BufferType&& value);
/**
* \brief Get the header name
* \return A reference to the name
*/
const std::string& get_name() const;
/**
* \brief Get the header value
* \return A const reference to the underlying buffer
*/
const BufferType& get_value() const;
/**
* \brief Get the header value
* \return A non-const reference to the underlying buffer
*/
BufferType& get_value();
/**
* \brief Check if this header is empty
* \return True if the header contains valid data, false otherwise.
*/
operator bool() const;
private:
template <typename T>
T make_value(const T& other);
Buffer make_value(const Buffer& other);
std::string name_;
BufferType value_;
};
// Comparison operators for Header type
template <typename BufferType>
bool operator==(const Header<BufferType>& lhs, const Header<BufferType>& rhs) {
return std::tie(lhs.get_name(), lhs.get_value()) == std::tie(rhs.get_name(), rhs.get_value());
}
template <typename BufferType>
bool operator!=(const Header<BufferType>& lhs, const Header<BufferType>& rhs) {
return !(lhs == rhs);
}
template <typename BufferType>
bool operator<(const Header<BufferType>& lhs, const Header<BufferType>& rhs) {
return std::tie(lhs.get_name(), lhs.get_value()) < std::tie(rhs.get_name(), rhs.get_value());
}
template <typename BufferType>
bool operator>(const Header<BufferType>& lhs, const Header<BufferType>& rhs) {
return std::tie(lhs.get_name(), lhs.get_value()) > std::tie(rhs.get_name(), rhs.get_value());
}
template <typename BufferType>
bool operator<=(const Header<BufferType>& lhs, const Header<BufferType>& rhs) {
return !(lhs > rhs);
}
template <typename BufferType>
bool operator>=(const Header<BufferType>& lhs, const Header<BufferType>& rhs) {
return !(lhs < rhs);
}
// Implementation
template <typename BufferType>
Header<BufferType>::Header(std::string name,
const BufferType& value)
: name_(std::move(name)),
value_(make_value(value)) {
}
template <typename BufferType>
Header<BufferType>::Header(std::string name,
BufferType&& value)
: name_(std::move(name)),
value_(std::move(value)) {
}
template <typename BufferType>
const std::string& Header<BufferType>::get_name() const {
return name_;
}
template <typename BufferType>
const BufferType& Header<BufferType>::get_value() const {
return value_;
}
template <typename BufferType>
BufferType& Header<BufferType>::get_value() {
return value_;
}
template <typename BufferType>
Header<BufferType>::operator bool() const {
return !value_.empty();
}
template <>
inline
Header<Buffer>::operator bool() const {
return value_.get_size() > 0;
}
template <typename BufferType>
template <typename T>
T Header<BufferType>::make_value(const T& other) {
return other;
}
template <typename BufferType>
Buffer Header<BufferType>::make_value(const Buffer& other) {
return Buffer(other.get_data(), other.get_size());
}
} //namespace cppkafka
#endif //RD_KAFKA_HEADERS_SUPPORT_VERSION
#endif //CPPKAFKA_HEADER_H

View File

@@ -0,0 +1,337 @@
/*
* Copyright (c) 2017, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef CPPKAFKA_HEADER_LIST_H
#define CPPKAFKA_HEADER_LIST_H
#include <librdkafka/rdkafka.h>
#include "clonable_ptr.h"
#include "header.h"
#include "header_list_iterator.h"
#include "exceptions.h"
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
namespace cppkafka {
/**
* \brief Thin wrapper over a rd_kafka_headers_t handle which optionally controls its lifetime.
* \tparam HeaderType The header type
*
* This is a copyable and movable class that wraps a rd_kafka_header_t*. When copying this class,
* all associated headers are also copied via rd_kafka_headers_copy(). If this list owns the underlying handle,
* its destructor will call rd_kafka_headers_destroy().
*/
template <typename HeaderType>
class HeaderList {
public:
template <typename OtherHeaderType>
friend class HeaderList;
using BufferType = typename HeaderType::ValueType;
using Iterator = HeaderIterator<HeaderType>;
/**
* Constructs a message that won't take ownership of the given pointer.
*/
static HeaderList<HeaderType> make_non_owning(rd_kafka_headers_t* handle);
/**
* \brief Create an empty header list with no handle.
*/
HeaderList();
/**
* \brief Create an empty header list. This call translates to rd_kafka_headers_new().
* \param reserve The number of headers to reserve space for.
*/
explicit HeaderList(size_t reserve);
/**
* \brief Create a header list and assume ownership of the handle.
* \param handle The header list handle.
*/
explicit HeaderList(rd_kafka_headers_t* handle);
/**
* \brief Create a header list from another header list type
* \param other The other list
*/
template <typename OtherHeaderType>
HeaderList(const HeaderList<OtherHeaderType>& other);
template <typename OtherHeaderType>
HeaderList(HeaderList<OtherHeaderType>&& other);
/**
* \brief Add a header to the list. This translates to rd_kafka_header_add().
* \param header The header.
* \return An Error indicating if the operation was successful or not.
* \warning This operation shall invalidate all iterators.
*/
Error add(const HeaderType& header);
/**
* \brief Remove all headers with 'name'. This translates to rd_kafka_header_remove().
* \param name The name of the header(s) to remove.
* \return An Error indicating if the operation was successful or not.
* \warning This operation shall invalidate all iterators.
*/
Error remove(const std::string& name);
/**
* \brief Return the header present at position 'index'. Throws on error.
* This translates to rd_kafka_header_get(index)
* \param index The header index in the list (0-based).
* \return The header at that position.
*/
HeaderType at(size_t index) const; //throws
/**
* \brief Return the first header in the list. Throws if the list is empty.
* This translates to rd_kafka_header_get(0).
* \return The first header.
*/
HeaderType front() const; //throws
/**
* \brief Return the first header in the list. Throws if the list is empty.
* This translates to rd_kafka_header_get(size-1).
* \return The last header.
*/
HeaderType back() const; //throws
/**
* \brief Returns the number of headers in the list. This translates to rd_kafka_header_cnt().
* \return The number of headers.
*/
size_t size() const;
/**
* \brief Indicates if this list is empty.
* \return True if empty, false otherwise.
*/
bool empty() const;
/**
* \brief Returns a HeaderIterator pointing to the first position if the list is not empty
* or pointing to end() otherwise.
* \return An iterator.
* \warning This iterator will be invalid if add() or remove() is called.
*/
Iterator begin() const;
/**
* \brief Returns a HeaderIterator pointing to one element past the end of the list.
* \return An iterator.
* \remark This iterator cannot be de-referenced.
*/
Iterator end() const;
/**
* \brief Get the underlying header list handle.
* \return The handle.
*/
rd_kafka_headers_t* get_handle() const;
/**
* \brief Get the underlying header list handle and release its ownership.
* \return The handle.
* \warning After this call, the HeaderList becomes invalid.
*/
rd_kafka_headers_t* release_handle();
/**
* \brief Indicates if this list is valid (contains a non-null handle) or not.
* \return True if valid, false otherwise.
*/
explicit operator bool() const;
private:
struct NonOwningTag { };
static void dummy_deleter(rd_kafka_headers_t*) {}
using HandlePtr = ClonablePtr<rd_kafka_headers_t, decltype(&rd_kafka_headers_destroy),
decltype(&rd_kafka_headers_copy)>;
HeaderList(rd_kafka_headers_t* handle, NonOwningTag);
HandlePtr handle_;
};
template <typename HeaderType>
bool operator==(const HeaderList<HeaderType>& lhs, const HeaderList<HeaderType> rhs) {
if (!lhs && !rhs) {
return true;
}
if (!lhs || !rhs) {
return false;
}
if (lhs.size() != rhs.size()) {
return false;
}
return std::equal(lhs.begin(), lhs.end(), rhs.begin());
}
template <typename HeaderType>
bool operator!=(const HeaderList<HeaderType>& lhs, const HeaderList<HeaderType> rhs) {
return !(lhs == rhs);
}
template <typename HeaderType>
HeaderList<HeaderType> HeaderList<HeaderType>::make_non_owning(rd_kafka_headers_t* handle) {
return HeaderList(handle, NonOwningTag());
}
template <typename HeaderType>
HeaderList<HeaderType>::HeaderList()
: handle_(nullptr, nullptr, nullptr) {
}
template <typename HeaderType>
HeaderList<HeaderType>::HeaderList(size_t reserve)
: handle_(rd_kafka_headers_new(reserve), &rd_kafka_headers_destroy, &rd_kafka_headers_copy) {
assert(reserve);
}
template <typename HeaderType>
HeaderList<HeaderType>::HeaderList(rd_kafka_headers_t* handle)
: handle_(handle, &rd_kafka_headers_destroy, &rd_kafka_headers_copy) { //if we own the header list, we clone it on copy
assert(handle);
}
template <typename HeaderType>
HeaderList<HeaderType>::HeaderList(rd_kafka_headers_t* handle, NonOwningTag)
: handle_(handle, &dummy_deleter, nullptr) { //if we don't own the header list, we forward the handle on copy.
assert(handle);
}
template <typename HeaderType>
template <typename OtherHeaderType>
HeaderList<HeaderType>::HeaderList(const HeaderList<OtherHeaderType>& other)
: handle_(other.handle_) {
}
template <typename HeaderType>
template <typename OtherHeaderType>
HeaderList<HeaderType>::HeaderList(HeaderList<OtherHeaderType>&& other)
: handle_(std::move(other.handle_)) {
}
// Methods
template <typename HeaderType>
Error HeaderList<HeaderType>::add(const HeaderType& header) {
assert(handle_);
return rd_kafka_header_add(handle_.get(),
header.get_name().data(), header.get_name().size(),
header.get_value().data(), header.get_value().size());
}
template <>
inline
Error HeaderList<Header<Buffer>>::add(const Header<Buffer>& header) {
assert(handle_);
return rd_kafka_header_add(handle_.get(),
header.get_name().data(), header.get_name().size(),
header.get_value().get_data(), header.get_value().get_size());
}
template <typename HeaderType>
Error HeaderList<HeaderType>::remove(const std::string& name) {
assert(handle_);
return rd_kafka_header_remove(handle_.get(), name.data());
}
template <typename HeaderType>
HeaderType HeaderList<HeaderType>::at(size_t index) const {
assert(handle_);
const char *name, *value;
size_t size;
Error error = rd_kafka_header_get_all(handle_.get(), index, &name, reinterpret_cast<const void**>(&value), &size);
if (error != RD_KAFKA_RESP_ERR_NO_ERROR) {
throw Exception(error.to_string());
}
return HeaderType(name, BufferType(value, value + size));
}
template <typename HeaderType>
HeaderType HeaderList<HeaderType>::front() const {
return at(0);
}
template <typename HeaderType>
HeaderType HeaderList<HeaderType>::back() const {
return at(size()-1);
}
template <typename HeaderType>
size_t HeaderList<HeaderType>::size() const {
return handle_ ? rd_kafka_header_cnt(handle_.get()) : 0;
}
template <typename HeaderType>
bool HeaderList<HeaderType>::empty() const {
return size() == 0;
}
template <typename HeaderType>
typename HeaderList<HeaderType>::Iterator
HeaderList<HeaderType>::begin() const {
return Iterator(*this, 0);
}
template <typename HeaderType>
typename HeaderList<HeaderType>::Iterator
HeaderList<HeaderType>::end() const {
return Iterator(*this, size());
}
template <typename HeaderType>
rd_kafka_headers_t* HeaderList<HeaderType>::get_handle() const {
return handle_.get();
}
template <typename HeaderType>
rd_kafka_headers_t* HeaderList<HeaderType>::release_handle() {
return handle_.release();
}
template <typename HeaderType>
HeaderList<HeaderType>::operator bool() const {
return static_cast<bool>(handle_);
}
} //namespace cppkafka
#endif //RD_KAFKA_HEADERS_SUPPORT_VERSION
#endif //CPPKAFKA_HEADER_LIST_H

View File

@@ -0,0 +1,193 @@
/*
* Copyright (c) 2017, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef CPPKAFKA_HEADER_LIST_ITERATOR_H
#define CPPKAFKA_HEADER_LIST_ITERATOR_H
#include <cstddef>
#include <utility>
#include <iterator>
#include "header.h"
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
namespace cppkafka {
template <typename HeaderType>
class HeaderList;
template <typename HeaderType>
class HeaderIterator;
template <typename HeaderType>
bool operator==(const HeaderIterator<HeaderType>& lhs, const HeaderIterator<HeaderType>& rhs);
/**
* \brief Iterator over a HeaderList object.
* \tparam HeaderType The type of header this iterator points to.
*/
template <typename HeaderType>
class HeaderIterator {
public:
friend HeaderList<HeaderType>;
using HeaderListType = HeaderList<HeaderType>;
using BufferType = typename HeaderType::ValueType;
//std::iterator_traits
using difference_type = std::ptrdiff_t;
using value_type = HeaderType;
using pointer = value_type*;
using reference = value_type&;
using iterator_category = std::bidirectional_iterator_tag;
friend bool operator==<HeaderType>(const HeaderIterator<HeaderType>& lhs,
const HeaderIterator<HeaderType>& rhs);
HeaderIterator(const HeaderIterator& other)
: header_list_(other.header_list_),
header_(make_header(other.header_)),
index_(other.index_) {
}
HeaderIterator& operator=(const HeaderIterator& other) {
if (this == &other) return *this;
header_list_ = other.header_list_;
header_ = make_header(other.header_);
index_ = other.index_;
return *this;
}
HeaderIterator(HeaderIterator&&) = default;
HeaderIterator& operator=(HeaderIterator&&) = default;
/**
* \brief Prefix increment of the iterator.
* \return Itself after being incremented.
*/
HeaderIterator& operator++() {
assert(index_ < header_list_.size());
++index_;
return *this;
}
/**
* \brief Postfix increment of the iterator.
* \return Itself before being incremented.
*/
HeaderIterator operator++(int) {
HeaderIterator tmp(*this);
operator++();
return tmp;
}
/**
* \brief Prefix decrement of the iterator.
* \return Itself after being decremented.
*/
HeaderIterator& operator--() {
assert(index_ > 0);
--index_;
return *this;
}
/**
* \brief Postfix decrement of the iterator.
* \return Itself before being decremented.
*/
HeaderIterator operator--(int) {
HeaderIterator tmp(*this);
operator--();
return tmp;
}
/**
* \brief Dereferences this iterator.
* \return A reference to the header the iterator points to.
* \warning Throws if invalid or if *this == end().
*/
const HeaderType& operator*() const {
header_ = header_list_.at(index_);
return header_;
}
HeaderType& operator*() {
header_ = header_list_.at(index_);
return header_;
}
/**
* \brief Dereferences this iterator.
* \return The address to the header the iterator points to.
* \warning Throws if invalid or if *this == end().
*/
const HeaderType* operator->() const {
header_ = header_list_.at(index_);
return &header_;
}
HeaderType* operator->() {
header_ = header_list_.at(index_);
return &header_;
}
private:
HeaderIterator(const HeaderListType& headers,
size_t index)
: header_list_(headers),
index_(index) {
}
template <typename T>
T make_header(const T& other) {
return other;
}
Header<Buffer> make_header(const Header<Buffer>& other) {
return Header<Buffer>(other.get_name(),
Buffer(other.get_value().get_data(),
other.get_value().get_size()));
}
const HeaderListType& header_list_;
HeaderType header_;
size_t index_;
};
// Equality comparison operators
template <typename HeaderType>
bool operator==(const HeaderIterator<HeaderType>& lhs, const HeaderIterator<HeaderType>& rhs) {
return (lhs.header_list_.get_handle() == rhs.header_list_.get_handle()) && (lhs.index_ == rhs.index_);
}
template <typename HeaderType>
bool operator!=(const HeaderIterator<HeaderType>& lhs, const HeaderIterator<HeaderType>& rhs) {
return !(lhs == rhs);
}
} //namespace cppkafka
#endif //RD_KAFKA_HEADERS_SUPPORT_VERSION
#endif //CPPKAFKA_HEADER_LIST_ITERATOR_H

View File

@@ -39,11 +39,14 @@
#include <tuple>
#include <chrono>
#include <librdkafka/rdkafka.h>
#include "group_information.h"
#include "topic_partition.h"
#include "topic_partition_list.h"
#include "topic_configuration.h"
#include "configuration.h"
#include "macros.h"
#include "logging.h"
#include "queue.h"
namespace cppkafka {
@@ -75,6 +78,13 @@ public:
*/
void pause_partitions(const TopicPartitionList& topic_partitions);
/**
* \brief Pauses consumption/production for this topic
*
* \param topic The topic name
*/
void pause(const std::string& topic);
/**
* \brief Resumes consumption/production from the given topic/partition list
*
@@ -84,6 +94,13 @@ public:
*/
void resume_partitions(const TopicPartitionList& topic_partitions);
/**
* \brief Resumes consumption/production for this topic
*
* \param topic The topic name
*/
void resume(const std::string& topic);
/**
* \brief Sets the timeout for operations that require a timeout
*
@@ -93,6 +110,11 @@ public:
*/
void set_timeout(std::chrono::milliseconds timeout);
/**
* \brief Sets the log level
*/
void set_log_level(LogLevel level);
/**
* \brief Adds one or more brokers to this handle's broker list
*
@@ -108,11 +130,29 @@ public:
* This translates into a call to rd_kafka_query_watermark_offsets
*
* \param topic_partition The topic/partition to be queried
*
* \return A pair of watermark offsets {low, high}
*/
OffsetTuple query_offsets(const TopicPartition& topic_partition) const;
/**
* Gets the rdkafka handle
* \brief Queries the offset for the given topic/partition with a given timeout
*
* This translates into a call to rd_kafka_query_watermark_offsets
*
* \param topic_partition The topic/partition to be queried
*
* \timeout The timeout for this operation. This supersedes the default handle timeout.
*
* \return A pair of watermark offsets {low, high}
*/
OffsetTuple query_offsets(const TopicPartition& topic_partition,
std::chrono::milliseconds timeout) const;
/**
* \brief Gets the rdkafka handle
*
* \return The rdkafka handle
*/
rd_kafka_t* get_handle() const;
@@ -124,6 +164,8 @@ public:
* if any.
*
* \param name The name of the topic to be created
*
* \return A topic
*/
Topic get_topic(const std::string& name);
@@ -134,18 +176,36 @@ public:
*
* \param name The name of the topic to be created
* \param config The configuration to be used for the new topic
*
* \return A topic
*/
Topic get_topic(const std::string& name, TopicConfiguration config);
/**
* \brief Gets metadata for brokers, topics, partitions, etc
*
* This translates into a call to rd_kafka_metadata
*
* \param all_topics Whether to fetch metadata about all topics or only locally known ones
*
* This translates into a call to rd_kafka_metadata
* \return The metadata
*/
Metadata get_metadata(bool all_topics = true) const;
/**
* \brief Gets metadata for brokers, topics, partitions, etc with a timeout
*
* This translates into a call to rd_kafka_metadata
*
* \param all_topics Whether to fetch metadata about all topics or only locally known ones
*
* \param timeout The timeout for this operation. Supersedes the default handle timeout.
*
* \return The metadata
*/
Metadata get_metadata(bool all_topics,
std::chrono::milliseconds timeout) const;
/**
* \brief Gets general metadata but only fetches metadata for the given topic rather than
* all of them
@@ -153,20 +213,62 @@ public:
* This translates into a call to rd_kafka_metadata
*
* \param topic The topic to fetch information for
*
* \return The topic metadata
*/
TopicMetadata get_metadata(const Topic& topic) const;
/**
* Gets the consumer group information
* \brief Gets general metadata but only fetches metadata for the given topic rather than
* all of them. Uses a timeout to limit the operation execution time.
*
* This translates into a call to rd_kafka_metadata
*
* \param topic The topic to fetch information for
*
* \param timeout The timeout for this operation. Supersedes the default handle timeout.
*
* \return The topic metadata
*/
TopicMetadata get_metadata(const Topic& topic,
std::chrono::milliseconds timeout) const;
/**
* \brief Gets the consumer group information
*
* \param name The name of the consumer group to look up
*
* \return The group information
*/
GroupInformation get_consumer_group(const std::string& name);
/**
* Gets all consumer groups
* \brief Gets the consumer group information with a timeout
*
* \param name The name of the consumer group to look up
*
* \param timeout The timeout for this operation. Supersedes the default handle timeout.
*
* \return The group information
*/
std::vector<GroupInformation> get_consumer_groups();
GroupInformation get_consumer_group(const std::string& name,
std::chrono::milliseconds timeout);
/**
* \brief Gets all consumer groups
*
* \return A list of consumer groups
*/
GroupInformationList get_consumer_groups();
/**
* \brief Gets all consumer groups with a timeout
*
* \param timeout The timeout for this operation. Supersedes the default handle timeout.
*
* \return A list of consumer groups
*/
GroupInformationList get_consumer_groups(std::chrono::milliseconds timeout);
/**
* \brief Gets topic/partition offsets based on timestamps
@@ -174,54 +276,123 @@ public:
* This translates into a call to rd_kafka_offsets_for_times
*
* \param queries A map from topic/partition to the timestamp to be used
*
* \return A topic partition list
*/
TopicPartitionList get_offsets_for_times(const TopicPartitionsTimestampsMap& queries) const;
/**
* Returns the kafka handle name
* \brief Gets topic/partition offsets based on timestamps with a timeout
*
* This translates into a call to rd_kafka_offsets_for_times
*
* \param queries A map from topic/partition to the timestamp to be used
*
* \param timeout The timeout for this operation. This supersedes the default handle timeout.
*
* \return A topic partition list
*/
TopicPartitionList get_offsets_for_times(const TopicPartitionsTimestampsMap& queries,
std::chrono::milliseconds timeout) const;
/**
* \brief Get the kafka handle name
*
* \return The handle name
*/
std::string get_name() const;
/**
* Gets the configured timeout.
* \brief Gets the configured timeout.
*
* \return The configured timeout
*
* \sa KafkaHandleBase::set_timeout
*/
std::chrono::milliseconds get_timeout() const;
/**
* Gets the handle's configuration
* \brief Gets the handle's configuration
*
* \return A reference to the configuration object
*/
const Configuration& get_configuration() const;
#if RD_KAFKA_VERSION >= RD_KAFKA_ADMIN_API_SUPPORT_VERSION
/**
* \brief Gets the background queue
*
* This translates into a call to rd_kafka_queue_get_background
*
* \return The background queue
*/
Queue get_background_queue() const {
return Queue::make_queue(rd_kafka_queue_get_background(handle_.get()));
}
#endif
/**
* \brief Gets the length of the out queue
*
* This calls rd_kafka_outq_len
*
* \return The length of the queue
*/
int get_out_queue_length() const;
#if RD_KAFKA_VERSION >= RD_KAFKA_DESTROY_FLAGS_SUPPORT_VERSION
/**
* \brief Sets flags for rd_kafka_destroy_flags()
*/
void set_destroy_flags(int destroy_flags);
/**
* \brief Returns flags for rd_kafka_destroy_flags()
*/
int get_destroy_flags() const;
#endif
/**
* \brief Cancels the current callback dispatcher
*
* This calls rd_kafka_yield
*/
void yield() const;
protected:
KafkaHandleBase(Configuration config);
void set_handle(rd_kafka_t* handle);
void check_error(rd_kafka_resp_err_t error) const;
void check_error(rd_kafka_resp_err_t error,
const rd_kafka_topic_partition_list_t* list_ptr) const;
rd_kafka_conf_t* get_configuration_handle();
private:
static const std::chrono::milliseconds DEFAULT_TIMEOUT;
using HandlePtr = std::unique_ptr<rd_kafka_t, decltype(&rd_kafka_destroy)>;
struct HandleDeleter {
explicit HandleDeleter(const KafkaHandleBase* handle_base_ptr) : handle_base_ptr_{handle_base_ptr} {}
void operator()(rd_kafka_t* handle);
private:
const KafkaHandleBase * handle_base_ptr_;
};
using HandlePtr = std::unique_ptr<rd_kafka_t, HandleDeleter>;
using TopicConfigurationMap = std::unordered_map<std::string, TopicConfiguration>;
Topic get_topic(const std::string& name, rd_kafka_topic_conf_t* conf);
Metadata get_metadata(bool all_topics, rd_kafka_topic_t* topic_ptr) const;
std::vector<GroupInformation> fetch_consumer_groups(const char* name);
Metadata get_metadata(bool all_topics,
rd_kafka_topic_t* topic_ptr,
std::chrono::milliseconds timeout) const;
GroupInformationList fetch_consumer_groups(const char* name,
std::chrono::milliseconds timeout);
void save_topic_config(const std::string& topic_name, TopicConfiguration config);
HandlePtr handle_;
std::chrono::milliseconds timeout_ms_;
Configuration config_;
TopicConfigurationMap topic_configurations_;
std::mutex topic_configurations_mutex_;
HandlePtr handle_;
int destroy_flags_;
};
} // cppkafka

View File

@@ -0,0 +1,49 @@
/*
* Copyright (c) 2017, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef CPPKAFKA_LOGGING_H
#define CPPKAFKA_LOGGING_H
namespace cppkafka {
// Based on syslog.h levels
enum class LogLevel : int {
LogEmerg = 0, /* system is unusable */
LogAlert = 1, /* action must be taken immediately */
LogCrit = 2, /* critical conditions */
LogErr = 3, /* error conditions */
LogWarning = 4, /* warning conditions */
LogNotice = 5, /* normal but significant condition */
LogInfo = 6, /* informational */
LogDebug = 7 /* debug-level messages */
};
} //cppkafka
#endif //CPPKAFKA_LOGGING_H

View File

@@ -43,4 +43,14 @@
#define CPPKAFKA_API
#endif // _WIN32 && !CPPKAFKA_STATIC
// See: https://github.com/edenhill/librdkafka/issues/1792
#define RD_KAFKA_QUEUE_REFCOUNT_BUG_VERSION 0x000b0500 //v0.11.5.00
#define RD_KAFKA_HEADERS_SUPPORT_VERSION 0x000b0402 //v0.11.4.02
#define RD_KAFKA_ADMIN_API_SUPPORT_VERSION 0x000b0500 //v0.11.5.00
#define RD_KAFKA_MESSAGE_LATENCY_SUPPORT_VERSION 0x000b0000 //v0.11.0.00
#define RD_KAFKA_EVENT_STATS_SUPPORT_VERSION 0x000b0000 //v0.11.0.00
#define RD_KAFKA_MESSAGE_STATUS_SUPPORT_VERSION 0x01000002 //v1.0.0.02
#define RD_KAFKA_STORE_OFFSETS_SUPPORT_VERSION 0x00090501 //v0.9.5.01
#define RD_KAFKA_DESTROY_FLAGS_SUPPORT_VERSION 0x000b0600 //v0.11.6
#endif // CPPKAFKA_MACROS_H

View File

@@ -33,15 +33,18 @@
#include <memory>
#include <cstdint>
#include <chrono>
#include <cassert>
#include <boost/optional.hpp>
#include <librdkafka/rdkafka.h>
#include "buffer.h"
#include "macros.h"
#include "error.h"
#include "header_list.h"
#include "message_timestamp.h"
namespace cppkafka {
class MessageTimestamp;
class Internal;
/**
* \brief Thin wrapper over a rdkafka message handle
@@ -55,6 +58,12 @@ class MessageTimestamp;
*/
class CPPKAFKA_API Message {
public:
friend class MessageInternal;
using InternalPtr = std::shared_ptr<Internal>;
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
using HeaderType = Header<Buffer>;
using HeaderListType = HeaderList<HeaderType>;
#endif
/**
* Constructs a message that won't take ownership of the given pointer
*/
@@ -80,47 +89,100 @@ public:
Message& operator=(Message&& rhs) = default;
/**
* Gets the error attribute
* \brief Gets the error attribute
*/
Error get_error() const;
Error get_error() const {
assert(handle_);
return handle_->err;
}
/**
* Utility function to check for get_error() == RD_KAFKA_RESP_ERR__PARTITION_EOF
* \brief Utility function to check for get_error() == RD_KAFKA_RESP_ERR__PARTITION_EOF
*/
bool is_eof() const;
bool is_eof() const {
return get_error() == RD_KAFKA_RESP_ERR__PARTITION_EOF;
}
/**
* Gets the topic that this message belongs to
* \brief Gets the topic that this message belongs to
*/
std::string get_topic() const;
std::string get_topic() const {
assert(handle_);
return handle_->rkt ? rd_kafka_topic_name(handle_->rkt) : std::string{};
}
/**
* Gets the partition that this message belongs to
* \brief Gets the partition that this message belongs to
*/
int get_partition() const;
int get_partition() const {
assert(handle_);
return handle_->partition;
}
/**
* Gets the message's payload
* \brief Gets the message's payload
*/
const Buffer& get_payload() const;
const Buffer& get_payload() const {
return payload_;
}
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
/**
* \brief Sets the message's header list.
* \note This calls rd_kafka_message_set_headers.
*/
void set_header_list(const HeaderListType& headers) {
assert(handle_);
if (!headers) {
return; //nothing to set
}
rd_kafka_headers_t* handle_copy = rd_kafka_headers_copy(headers.get_handle());
rd_kafka_message_set_headers(handle_.get(), handle_copy);
header_list_ = HeaderListType::make_non_owning(handle_copy);
}
/**
* Gets the message's key
* \brief Gets the message's header list
*/
const Buffer& get_key() const;
const HeaderListType& get_header_list() const {
return header_list_;
}
/**
* Gets the message offset
* \brief Detaches the message's header list
*/
int64_t get_offset() const;
template <typename HeaderType>
HeaderList<HeaderType> detach_header_list() {
rd_kafka_headers_t* headers_handle;
Error error = rd_kafka_message_detach_headers(handle_.get(), &headers_handle);
return error ? HeaderList<HeaderType>() : HeaderList<HeaderType>(headers_handle);
}
#endif
/**
* \brief Gets the private data.
* \brief Gets the message's key
*/
const Buffer& get_key() const {
return key_;
}
/**
* \brief Gets the message offset
*/
int64_t get_offset() const {
assert(handle_);
return handle_->offset;
}
/**
* \brief Gets the private user data.
*
* This should only be used on messages produced by a Producer that were set a private data
* attribute
*/
void* get_private_data() const;
void* get_user_data() const {
return user_data_;
}
/**
* \brief Gets this Message's timestamp
@@ -129,15 +191,46 @@ public:
*/
boost::optional<MessageTimestamp> get_timestamp() const;
#if RD_KAFKA_VERSION >= RD_KAFKA_MESSAGE_LATENCY_SUPPORT_VERSION
/**
* Indicates whether this message is valid (not null)
* \brief Gets the message latency in microseconds as measured from the produce() call.
*/
explicit operator bool() const;
std::chrono::microseconds get_latency() const {
assert(handle_);
return std::chrono::microseconds(rd_kafka_message_latency(handle_.get()));
}
#endif
#if (RD_KAFKA_VERSION >= RD_KAFKA_MESSAGE_STATUS_SUPPORT_VERSION)
/**
* \brief Gets the message persistence status
*/
rd_kafka_msg_status_t get_status() const {
assert(handle_);
return rd_kafka_message_status(handle_.get());
}
#endif
/**
* Gets the rdkafka message handle
* \brief Indicates whether this message is valid (not null)
*/
rd_kafka_message_t* get_handle() const;
explicit operator bool() const {
return handle_ != nullptr;
}
/**
* \brief Gets the rdkafka message handle
*/
rd_kafka_message_t* get_handle() const {
return handle_.get();
}
/**
* \brief Internal private const data accessor (internal use only)
*/
InternalPtr internal() const {
return internal_;
}
private:
using HandlePtr = std::unique_ptr<rd_kafka_message_t, decltype(&rd_kafka_message_destroy)>;
@@ -145,43 +238,19 @@ private:
Message(rd_kafka_message_t* handle, NonOwningTag);
Message(HandlePtr handle);
Message& load_internal();
HandlePtr handle_;
Buffer payload_;
Buffer key_;
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
HeaderListType header_list_;
#endif
void* user_data_;
InternalPtr internal_;
};
/**
* Represents a message's timestamp
*/
class CPPKAFKA_API MessageTimestamp {
public:
/**
* The timestamp type
*/
enum TimestampType {
CREATE_TIME = RD_KAFKA_TIMESTAMP_CREATE_TIME,
LOG_APPEND_TIME = RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME
};
/**
* Constructs a timestamp object
*/
MessageTimestamp(std::chrono::milliseconds timestamp, TimestampType type);
/**
* Gets the timestamp value
*/
std::chrono::milliseconds get_timestamp() const;
/**
* Gets the timestamp type
*/
TimestampType get_type() const;
private:
std::chrono::milliseconds timestamp_;
TimestampType type_;
};
using MessageList = std::vector<Message>;
} // cppkafka

View File

@@ -34,6 +34,8 @@
#include "buffer.h"
#include "topic.h"
#include "macros.h"
#include "message.h"
#include "header_list.h"
namespace cppkafka {
@@ -41,8 +43,12 @@ namespace cppkafka {
* \brief Base template class for message construction
*/
template <typename BufferType, typename Concrete>
class CPPKAFKA_API BasicMessageBuilder {
class BasicMessageBuilder {
public:
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
using HeaderType = Header<BufferType>;
using HeaderListType = HeaderList<HeaderType>;
#endif
/**
* Construct a BasicMessageBuilder
*
@@ -50,6 +56,11 @@ public:
*/
BasicMessageBuilder(std::string topic);
/**
* Construct a BasicMessageBuilder from a Message object
*/
BasicMessageBuilder(const Message& message);
/**
* \brief Construct a message builder from another one that uses a different buffer type
*
@@ -59,7 +70,12 @@ public:
*/
template <typename OtherBufferType, typename OtherConcrete>
BasicMessageBuilder(const BasicMessageBuilder<OtherBufferType, OtherConcrete>& rhs);
template <typename OtherBufferType, typename OtherConcrete>
BasicMessageBuilder(BasicMessageBuilder<OtherBufferType, OtherConcrete>&& rhs);
/**
* Default copy and move constructors and assignment operators
*/
BasicMessageBuilder(BasicMessageBuilder&&) = default;
BasicMessageBuilder(const BasicMessageBuilder&) = default;
BasicMessageBuilder& operator=(BasicMessageBuilder&&) = default;
@@ -93,6 +109,17 @@ public:
*/
Concrete& key(BufferType&& value);
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
/**
* Add a header(s) to the message
*
* \param header The header to be used
*/
Concrete& header(const HeaderType& header);
Concrete& headers(const HeaderListType& headers);
Concrete& headers(HeaderListType&& headers);
#endif
/**
* Sets the message's payload
*
@@ -108,12 +135,20 @@ public:
Concrete& payload(BufferType&& value);
/**
* Sets the message's timestamp
* Sets the message's timestamp with a 'duration'
*
* \param value The timestamp to be used
*/
Concrete& timestamp(std::chrono::milliseconds value);
/**
* Sets the message's timestamp with a 'time_point'.
*
* \param value The timestamp to be used
*/
template <typename Clock, typename Duration = typename Clock::duration>
Concrete& timestamp(std::chrono::time_point<Clock, Duration> value);
/**
* Sets the message's user data pointer
*
@@ -141,6 +176,18 @@ public:
*/
BufferType& key();
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
/**
* Gets the list of headers
*/
const HeaderListType& header_list() const;
/**
* Gets the list of headers
*/
HeaderListType& header_list();
#endif
/**
* Gets the message's payload
*/
@@ -152,7 +199,8 @@ public:
BufferType& payload();
/**
* Gets the message's timestamp
* Gets the message's timestamp as a duration. If the timestamp was created with a 'time_point',
* the duration represents the number of milliseconds since epoch.
*/
std::chrono::milliseconds timestamp() const;
@@ -160,32 +208,86 @@ public:
* Gets the message's user data pointer
*/
void* user_data() const;
private:
/**
* Private data accessor (internal use only)
*/
Message::InternalPtr internal() const;
Concrete& internal(Message::InternalPtr internal);
protected:
void construct_buffer(BufferType& lhs, const BufferType& rhs);
private:
Concrete& get_concrete();
std::string topic_;
int partition_{-1};
BufferType key_;
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
HeaderListType header_list_;
#endif
BufferType payload_;
std::chrono::milliseconds timestamp_{0};
void* user_data_;
Message::InternalPtr internal_;
};
template <typename T, typename C>
BasicMessageBuilder<T, C>::BasicMessageBuilder(std::string topic)
: topic_(std::move(topic)) {
: topic_(std::move(topic)),
user_data_(nullptr) {
}
template <typename T, typename C>
BasicMessageBuilder<T, C>::BasicMessageBuilder(const Message& message)
: topic_(message.get_topic()),
key_(Buffer(message.get_key().get_data(), message.get_key().get_size())),
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
//Here we must copy explicitly the Message headers since they are non-owning and this class
//assumes full ownership. Otherwise we will be holding an invalid handle when Message goes
//out of scope and rdkafka frees its resource.
header_list_(message.get_header_list() ?
HeaderListType(rd_kafka_headers_copy(message.get_header_list().get_handle())) : HeaderListType()), //copy headers
#endif
payload_(Buffer(message.get_payload().get_data(), message.get_payload().get_size())),
timestamp_(message.get_timestamp() ? message.get_timestamp().get().get_timestamp() :
std::chrono::milliseconds(0)),
user_data_(message.get_user_data()),
internal_(message.internal()) {
}
template <typename T, typename C>
template <typename U, typename V>
BasicMessageBuilder<T, C>::BasicMessageBuilder(const BasicMessageBuilder<U, V>& rhs)
: topic_(rhs.topic()), partition_(rhs.partition()), timestamp_(rhs.timestamp()),
user_data_(rhs.user_data()) {
: topic_(rhs.topic()),
partition_(rhs.partition()),
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
header_list_(rhs.header_list()), //copy headers
#endif
timestamp_(rhs.timestamp()),
user_data_(rhs.user_data()),
internal_(rhs.internal()) {
get_concrete().construct_buffer(key_, rhs.key());
get_concrete().construct_buffer(payload_, rhs.payload());
}
template <typename T, typename C>
template <typename U, typename V>
BasicMessageBuilder<T, C>::BasicMessageBuilder(BasicMessageBuilder<U, V>&& rhs)
: topic_(rhs.topic()),
partition_(rhs.partition()),
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
header_list_(std::move(header_list())), //assume header ownership
#endif
timestamp_(rhs.timestamp()),
user_data_(rhs.user_data()),
internal_(rhs.internal()) {
get_concrete().construct_buffer(key_, std::move(rhs.key()));
get_concrete().construct_buffer(payload_, std::move(rhs.payload()));
}
template <typename T, typename C>
C& BasicMessageBuilder<T, C>::topic(std::string value) {
topic_ = std::move(value);
@@ -210,6 +312,29 @@ C& BasicMessageBuilder<T, C>::key(T&& value) {
return get_concrete();
}
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
template <typename T, typename C>
C& BasicMessageBuilder<T, C>::header(const HeaderType& header) {
if (!header_list_) {
header_list_ = HeaderListType(5);
}
header_list_.add(header);
return get_concrete();
}
template <typename T, typename C>
C& BasicMessageBuilder<T, C>::headers(const HeaderListType& headers) {
header_list_ = headers;
return get_concrete();
}
template <typename T, typename C>
C& BasicMessageBuilder<T, C>::headers(HeaderListType&& headers) {
header_list_ = std::move(headers);
return get_concrete();
}
#endif
template <typename T, typename C>
C& BasicMessageBuilder<T, C>::payload(const T& value) {
get_concrete().construct_buffer(payload_, value);
@@ -228,6 +353,14 @@ C& BasicMessageBuilder<T, C>::timestamp(std::chrono::milliseconds value) {
return get_concrete();
}
template <typename T, typename C>
template <typename Clock, typename Duration>
C& BasicMessageBuilder<T, C>::timestamp(std::chrono::time_point<Clock, Duration> value)
{
timestamp_ = std::chrono::duration_cast<std::chrono::milliseconds>(value.time_since_epoch());
return get_concrete();
}
template <typename T, typename C>
C& BasicMessageBuilder<T, C>::user_data(void* value) {
user_data_ = value;
@@ -254,6 +387,20 @@ T& BasicMessageBuilder<T, C>::key() {
return key_;
}
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
template <typename T, typename C>
const typename BasicMessageBuilder<T, C>::HeaderListType&
BasicMessageBuilder<T, C>::header_list() const {
return header_list_;
}
template <typename T, typename C>
typename BasicMessageBuilder<T, C>::HeaderListType&
BasicMessageBuilder<T, C>::header_list() {
return header_list_;
}
#endif
template <typename T, typename C>
const T& BasicMessageBuilder<T, C>::payload() const {
return payload_;
@@ -274,6 +421,17 @@ void* BasicMessageBuilder<T, C>::user_data() const {
return user_data_;
}
template <typename T, typename C>
Message::InternalPtr BasicMessageBuilder<T, C>::internal() const {
return internal_;
}
template <typename T, typename C>
C& BasicMessageBuilder<T, C>::internal(Message::InternalPtr internal) {
internal_ = internal;
return get_concrete();
}
template <typename T, typename C>
void BasicMessageBuilder<T, C>::construct_buffer(T& lhs, const T& rhs) {
lhs = rhs;
@@ -300,15 +458,34 @@ C& BasicMessageBuilder<T, C>::get_concrete() {
*/
class MessageBuilder : public BasicMessageBuilder<Buffer, MessageBuilder> {
public:
using BasicMessageBuilder::BasicMessageBuilder;
using Base = BasicMessageBuilder<Buffer, MessageBuilder>;
using BasicMessageBuilder<Buffer, MessageBuilder>::BasicMessageBuilder;
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
using HeaderType = Base::HeaderType;
using HeaderListType = Base::HeaderListType;
#endif
void construct_buffer(Buffer& lhs, const Buffer& rhs) {
lhs = Buffer(rhs.get_data(), rhs.get_size());
}
template <typename T>
void construct_buffer(Buffer& lhs, const T& rhs) {
lhs = Buffer(rhs);
void construct_buffer(Buffer& lhs, T&& rhs) {
lhs = Buffer(std::forward<T>(rhs));
}
MessageBuilder clone() const {
MessageBuilder builder(topic());
builder.key(Buffer(key().get_data(), key().get_size())).
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
headers(header_list()).
#endif
payload(Buffer(payload().get_data(), payload().get_size())).
timestamp(timestamp()).
user_data(user_data()).
internal(internal());
return builder;
}
};
@@ -318,7 +495,12 @@ public:
template <typename T>
class ConcreteMessageBuilder : public BasicMessageBuilder<T, ConcreteMessageBuilder<T>> {
public:
using Base = BasicMessageBuilder<T, ConcreteMessageBuilder<T>>;
using BasicMessageBuilder<T, ConcreteMessageBuilder<T>>::BasicMessageBuilder;
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
using HeaderType = typename Base::HeaderType;
using HeaderListType = typename Base::HeaderListType;
#endif
};
} // cppkafka

View File

@@ -0,0 +1,87 @@
/*
* Copyright (c) 2017, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef CPPKAFKA_MESSAGE_INTERNAL_H
#define CPPKAFKA_MESSAGE_INTERNAL_H
#include <memory>
#include "macros.h"
namespace cppkafka {
class Message;
class Internal {
public:
virtual ~Internal() = default;
};
using InternalPtr = std::shared_ptr<Internal>;
/**
* \brief Private message data structure
*/
class CPPKAFKA_API MessageInternal {
public:
MessageInternal(void* user_data, std::shared_ptr<Internal> internal);
static std::unique_ptr<MessageInternal> load(Message& message);
void* get_user_data() const;
InternalPtr get_internal() const;
private:
void* user_data_;
InternalPtr internal_;
};
template <typename BuilderType>
class MessageInternalGuard {
public:
MessageInternalGuard(BuilderType& builder)
: builder_(builder),
user_data_(builder.user_data()) {
if (builder_.internal()) {
// Swap contents with user_data
ptr_.reset(new MessageInternal(user_data_, builder_.internal()));
builder_.user_data(ptr_.get()); //overwrite user data
}
}
~MessageInternalGuard() {
//Restore user data
builder_.user_data(user_data_);
}
void release() {
ptr_.release();
}
private:
BuilderType& builder_;
std::unique_ptr<MessageInternal> ptr_;
void* user_data_;
};
}
#endif //CPPKAFKA_MESSAGE_INTERNAL_H

View File

@@ -0,0 +1,72 @@
/*
* Copyright (c) 2017, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef CPPKAFKA_MESSAGE_TIMESTAMP_H
#define CPPKAFKA_MESSAGE_TIMESTAMP_H
#include <chrono>
#include <librdkafka/rdkafka.h>
#include "macros.h"
namespace cppkafka {
/**
* Represents a message's timestamp
*/
class CPPKAFKA_API MessageTimestamp {
friend class Message;
public:
/**
* The timestamp type
*/
enum TimestampType {
CREATE_TIME = RD_KAFKA_TIMESTAMP_CREATE_TIME,
LOG_APPEND_TIME = RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME
};
/**
* Gets the timestamp value. If the timestamp was created with a 'time_point',
* the duration represents the number of milliseconds since epoch.
*/
std::chrono::milliseconds get_timestamp() const;
/**
* Gets the timestamp type
*/
TimestampType get_type() const;
private:
MessageTimestamp(std::chrono::milliseconds timestamp, TimestampType type);
std::chrono::milliseconds timestamp_;
TimestampType type_;
};
} // cppkafka
#endif //CPPKAFKA_MESSAGE_TIMESTAMP_H

View File

@@ -140,7 +140,24 @@ private:
*/
class CPPKAFKA_API Metadata {
public:
Metadata(const rd_kafka_metadata_t* ptr);
/**
* \brief Creates a Metadata object that doesn't take ownership of the handle
*
* \param handle The handle to be used
*/
static Metadata make_non_owning(const rd_kafka_metadata_t* handle);
/**
* \brief Constructs an empty metadata object
*
* \remark Using any methods except Metadata::get_handle on an empty metadata is undefined behavior
*/
Metadata();
/**
* Constructor
*/
Metadata(const rd_kafka_metadata_t* handle);
/**
* Gets the brokers' metadata
@@ -165,9 +182,23 @@ public:
* \param prefix The prefix to be looked up
*/
std::vector<TopicMetadata> get_topics_prefixed(const std::string& prefix) const;
/**
* Indicates whether this metadata is valid (not null)
*/
explicit operator bool() const;
/**
* Returns the rdkakfa handle
*/
const rd_kafka_metadata_t* get_handle() const;
private:
using HandlePtr = std::unique_ptr<const rd_kafka_metadata_t, decltype(&rd_kafka_metadata_destroy)>;
struct NonOwningTag { };
Metadata(const rd_kafka_metadata_t* handle, NonOwningTag);
HandlePtr handle_;
};

View File

@@ -43,6 +43,7 @@ namespace cppkafka {
class Topic;
class Buffer;
class TopicConfiguration;
class Message;
/**
* \brief Producer class
@@ -73,52 +74,65 @@ class TopicConfiguration;
* // Write using a key on a fixed partition (42)
* producer.produce(MessageBuilder("some_topic").partition(42).key(key).payload(payload));
*
* // Flush the produced messages
* producer.flush();
*
* \endcode
*/
class CPPKAFKA_API Producer : public KafkaHandleBase {
public:
using KafkaHandleBase::pause;
/**
* The policy to use for the payload. The default policy is COPY_PAYLOAD
*/
enum class PayloadPolicy {
PASSTHROUGH_PAYLOAD = 0, ///< Rdkafka will not copy nor free the payload.
COPY_PAYLOAD = RD_KAFKA_MSG_F_COPY, ///< Means RD_KAFKA_MSG_F_COPY
FREE_PAYLOAD = RD_KAFKA_MSG_F_FREE ///< Means RD_KAFKA_MSG_F_FREE
FREE_PAYLOAD = RD_KAFKA_MSG_F_FREE, ///< Means RD_KAFKA_MSG_F_FREE
BLOCK_ON_FULL_QUEUE = RD_KAFKA_MSG_F_BLOCK ///< Producer will block if the underlying queue is full
};
/**
* Constructs a producer using the given configuration
* \brief Constructs a producer using the given configuration
*
* \param config The configuration to use
*/
Producer(Configuration config);
/**
* Sets the payload policy
* \brief Sets the payload policy
*
* \param policy The payload policy to be used
*/
void set_payload_policy(PayloadPolicy policy);
/**
* Returns the current payload policy
* \brief Returns the current payload policy
*/
PayloadPolicy get_payload_policy() const;
/**
* Produces a message
* \brief Produces a message
*
* \param topic The topic to write the message to
* \param partition The partition to write the message to
* \param payload The message payload
* \param builder The builder class used to compose a message
*/
void produce(const MessageBuilder& builder);
void produce(MessageBuilder&& builder);
/**
* \brief Produces a message
*
* \param message The message to be produced
*/
void produce(const Message& message);
void produce(Message&& message);
/**
* \brief Polls on this handle
*
* This translates into a call to rd_kafka_poll.
*
* The timeout used on this call is the one configured via Producer::set_timeout.
* \remark The timeout used on this call is the one configured via Producer::set_timeout.
*/
int poll();
@@ -136,7 +150,7 @@ public:
*
* This translates into a call to rd_kafka_flush.
*
* The timeout used on this call is the one configured via Producer::set_timeout.
* \remark The timeout used on this call is the one configured via Producer::set_timeout.
*/
void flush();
@@ -149,6 +163,15 @@ public:
*/
void flush(std::chrono::milliseconds timeout);
private:
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
void do_produce(const MessageBuilder& builder, MessageBuilder::HeaderListType&& headers);
void do_produce(const Message& message, MessageBuilder::HeaderListType&& headers);
#else
void do_produce(const MessageBuilder& builder);
void do_produce(const Message& message);
#endif
// Members
PayloadPolicy message_payload_policy_;
};

270
include/cppkafka/queue.h Normal file
View File

@@ -0,0 +1,270 @@
/*
* Copyright (c) 2017, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <vector>
#include <memory>
#include <librdkafka/rdkafka.h>
#include "event.h"
#include "macros.h"
#include "message.h"
#ifndef CPPKAFKA_QUEUE_H
#define CPPKAFKA_QUEUE_H
namespace cppkafka {
/**
* \brief Represents a rdkafka queue
*
* This is a simple wrapper over a rd_kafka_queue_t*
*/
class CPPKAFKA_API Queue {
public:
/**
* \brief Creates a Queue object that doesn't take ownership of the handle
*
* \param handle The handle to be used
*/
static Queue make_non_owning(rd_kafka_queue_t* handle);
/**
* \brieef Creates a Queue object out of a handle.
*
* This will check what the rdkafka version is and will return either an owned
* queue handle or a non owned one, depending on whether the current version
* is >= RD_KAFKA_QUEUE_REFCOUNT_BUG_VERSION (see macros.h)
*
* \param handle The handle to be used
*/
static Queue make_queue(rd_kafka_queue_t* handle);
/**
* \brief Constructs an empty queue
*
* Note that using any methods except Queue::get_handle on an empty queue is undefined
* behavior
*/
Queue();
/**
* \brief Constructs a queue using a handle
*
* This will take ownership of the handle
*
* \param handle The handle to be used
*/
Queue(rd_kafka_queue_t* handle);
/**
* Returns the rdkakfa handle
*/
rd_kafka_queue_t* get_handle() const;
/**
* \brief Returns the length of the queue
*
* This translates to a call to rd_kafka_queue_length
*/
size_t get_length() const;
/**
* \brief Forward to another queue
*
* This translates to a call to rd_kafka_queue_forward
*/
void forward_to_queue(const Queue& forward_queue) const;
/**
* \brief Disable forwarding to another queue
*
* This translates to a call to rd_kafka_queue_forward(NULL)
*/
void disable_queue_forwarding() const;
/**
* \brief Sets the timeout for consume operations
*
* This timeout is applied when calling consume()
*
* \param timeout The timeout to be set
*/
void set_timeout(std::chrono::milliseconds timeout);
/**
* Gets the configured timeout.
*
* \sa Queue::set_timeout
*/
std::chrono::milliseconds get_timeout() const;
/**
* \brief Consume a message from this queue
*
* This translates to a call to rd_kafka_consume_queue using the configured timeout for this object
*
* \return A message
*/
Message consume() const;
/**
* \brief Consume a message from this queue
*
* Same as consume() but the specified timeout will be used instead of the configured one
*
* \param timeout The timeout to be used on this call
*
* \return A message
*/
Message consume(std::chrono::milliseconds timeout) const;
/**
* \brief Consumes a batch of messages from this queue
*
* This translates to a call to rd_kafka_consume_batch_queue using the configured timeout
* for this object
*
* \param max_batch_size The max number of messages to consume if available
* \param alloc The optionally supplied allocator for the message list
*
* \return A list of messages. Could be empty if there's nothing to consume
*/
template <typename Allocator>
std::vector<Message, Allocator> consume_batch(size_t max_batch_size,
const Allocator& alloc) const;
/**
* \brief Consumes a batch of messages from this queue
*
* This translates to a call to rd_kafka_consume_batch_queue using the configured timeout
* for this object
*
* \param max_batch_size The max number of messages to consume if available
*
* \return A list of messages. Could be empty if there's nothing to consume
*/
std::vector<Message> consume_batch(size_t max_batch_size) const;
/**
* \brief Consumes a batch of messages from this queue
*
* Same as Queue::consume_batch(size_t) but the specified timeout will be used instead of the
* configured one
*
* \param max_batch_size The max number of messages to consume if available
* \param timeout The timeout to be used on this call
* \param alloc The optionally supplied allocator for the message list
*
* \return A list of messages. Could be empty if there's nothing to consume
*/
template <typename Allocator>
std::vector<Message, Allocator> consume_batch(size_t max_batch_size,
std::chrono::milliseconds timeout,
const Allocator& alloc) const;
/**
* \brief Consumes a batch of messages from this queue
*
* Same as Queue::consume_batch(size_t) but the specified timeout will be used instead of the
* configured one
*
* \param max_batch_size The max number of messages to consume if available
* \param timeout The timeout to be used on this call
*
* \return A list of messages. Could be empty if there's nothing to consume
*/
std::vector<Message> consume_batch(size_t max_batch_size,
std::chrono::milliseconds timeout) const;
/**
* \brief Extracts the next message in this Queue
*
* /return The latest event, if any
*/
Event next_event() const;
/**
* \brief Extracts the next message in this Queue
*
* \param timeout The amount of time to wait for this operation to complete
*
* /return The latest event, if any
*/
Event next_event(std::chrono::milliseconds timeout) const;
/**
* Indicates whether this queue is valid (not null)
*/
explicit operator bool() const {
return handle_ != nullptr;
}
private:
static const std::chrono::milliseconds DEFAULT_TIMEOUT;
using HandlePtr = std::unique_ptr<rd_kafka_queue_t, decltype(&rd_kafka_queue_destroy)>;
struct NonOwningTag { };
Queue(rd_kafka_queue_t* handle, NonOwningTag);
// Members
HandlePtr handle_;
std::chrono::milliseconds timeout_ms_;
};
using QueueList = std::vector<Queue>;
template <typename Allocator>
std::vector<Message, Allocator> Queue::consume_batch(size_t max_batch_size,
const Allocator& alloc) const {
return consume_batch(max_batch_size, timeout_ms_, alloc);
}
template <typename Allocator>
std::vector<Message, Allocator> Queue::consume_batch(size_t max_batch_size,
std::chrono::milliseconds timeout,
const Allocator& alloc) const {
std::vector<rd_kafka_message_t*> raw_messages(max_batch_size);
ssize_t result = rd_kafka_consume_batch_queue(handle_.get(),
static_cast<int>(timeout.count()),
raw_messages.data(),
raw_messages.size());
if (result == -1) {
rd_kafka_resp_err_t error = rd_kafka_last_error();
if (error != RD_KAFKA_RESP_ERR_NO_ERROR) {
throw QueueException(error);
}
return std::vector<Message, Allocator>(alloc);
}
// Build message list
return std::vector<Message, Allocator>(raw_messages.begin(), raw_messages.begin() + result, alloc);
}
} // cppkafka
#endif //CPPKAFKA_QUEUE_H

View File

@@ -32,7 +32,6 @@
#include <string>
#include <memory>
#include <boost/optional.hpp>
#include <librdkafka/rdkafka.h>
#include "macros.h"
@@ -83,6 +82,13 @@ public:
*/
bool is_partition_available(int partition) const;
/**
* Indicates whether this topic is valid (not null)
*/
explicit operator bool() const {
return handle_ != nullptr;
}
/**
* Returns the rdkakfa handle
*/

View File

@@ -107,6 +107,11 @@ public:
*/
int64_t get_offset() const;
/**
* @brief Sets the partition
*/
void set_partition(int partition);
/**
* Sets the offset
*/
@@ -130,7 +135,7 @@ public:
/**
* Print to a stream
*/
friend std::ostream& operator<<(std::ostream& output, const TopicPartition& rhs);
CPPKAFKA_API friend std::ostream& operator<<(std::ostream& output, const TopicPartition& rhs);
private:
std::string topic_;
int partition_;

View File

@@ -34,12 +34,14 @@
#include <iosfwd>
#include <algorithm>
#include <vector>
#include <set>
#include <librdkafka/rdkafka.h>
#include "macros.h"
namespace cppkafka {
class TopicPartition;
class PartitionMetadata;
using TopicPartitionsListPtr = std::unique_ptr<rd_kafka_topic_partition_list_t,
decltype(&rd_kafka_topic_partition_list_destroy)>;
@@ -49,11 +51,23 @@ using TopicPartitionsListPtr = std::unique_ptr<rd_kafka_topic_partition_list_t,
using TopicPartitionList = std::vector<TopicPartition>;
// Conversions between rdkafka handles and TopicPartitionList
CPPKAFKA_API TopicPartitionsListPtr convert(const std::vector<TopicPartition>& topic_partitions);
CPPKAFKA_API std::vector<TopicPartition> convert(const TopicPartitionsListPtr& topic_partitions);
CPPKAFKA_API std::vector<TopicPartition> convert(rd_kafka_topic_partition_list_t* topic_partitions);
CPPKAFKA_API TopicPartitionsListPtr convert(const TopicPartitionList& topic_partitions);
CPPKAFKA_API TopicPartitionList convert(const TopicPartitionsListPtr& topic_partitions);
CPPKAFKA_API TopicPartitionList convert(rd_kafka_topic_partition_list_t* topic_partitions);
CPPKAFKA_API TopicPartitionList convert(const std::string& topic,
const std::vector<PartitionMetadata>& partition_metadata);
CPPKAFKA_API TopicPartitionsListPtr make_handle(rd_kafka_topic_partition_list_t* handle);
// Extracts a partition list subset belonging to the provided topics (case-insensitive)
CPPKAFKA_API TopicPartitionList find_matches(const TopicPartitionList& partitions,
const std::set<std::string>& topics);
// Extracts a partition list subset belonging to the provided partition ids
// Note: this assumes that all topic partitions in the original list belong to the same topic
// otherwise the partition ids may not be unique
CPPKAFKA_API TopicPartitionList find_matches(const TopicPartitionList& partitions,
const std::set<int>& ids);
CPPKAFKA_API std::ostream& operator<<(std::ostream& output, const TopicPartitionList& rhs);
} // cppkafka

View File

@@ -33,7 +33,11 @@
#include <chrono>
#include <functional>
#include <thread>
#include <string>
#include "../consumer.h"
#include "backoff_performer.h"
#include "../detail/callback_invoker.h"
#include "../macros.h"
namespace cppkafka {
@@ -68,30 +72,17 @@ namespace cppkafka {
* committer.commit(some_message);
* \endcode
*/
class BackoffCommitter {
class CPPKAFKA_API BackoffCommitter : public BackoffPerformer {
public:
using TimeUnit = std::chrono::milliseconds;
static constexpr TimeUnit DEFAULT_INITIAL_BACKOFF{100};
static constexpr TimeUnit DEFAULT_BACKOFF_STEP{50};
static constexpr TimeUnit DEFAULT_MAXIMUM_BACKOFF{1000};
/**
* \brief The error callback.
*
* Whenever an error occurs comitting an offset, this callback will be executed using
* Whenever an error occurs committing an offset, this callback will be executed using
* the generated error. While the function returns true, then this is offset will be
* committed again until it either succeeds or the function returns false.
*/
using ErrorCallback = std::function<bool(Error)>;
/**
* The backoff policy to use
*/
enum class BackoffPolicy {
LINEAR,
EXPONENTIAL
};
/**
* \brief Constructs an instance using default values
*
@@ -101,42 +92,6 @@ public:
*/
BackoffCommitter(Consumer& consumer);
/**
* \brief Sets the backoff policy
*
* \param policy The backoff policy to be used
*/
void set_backoff_policy(BackoffPolicy policy);
/**
* \brief Sets the initial backoff
*
* The first time a commit fails, this will be the delay between the request is sent
* and we re-try doing so
*
* \param value The value to be used
*/
void set_initial_backoff(TimeUnit value);
/**
* \brief Sets the backoff step
*
* When using the linear backoff policy, this will be the delay between sending a request
* that fails and re-trying it
*
* \param value The value to be used
*/
void set_backoff_step(TimeUnit value);
/**
* \brief Sets the maximum backoff
*
* The backoff used will never be larger than this number
*
* \param value The value to be used
*/
void set_maximum_backoff(TimeUnit value);
/**
* \brief Sets the error callback
*
@@ -145,10 +100,18 @@ public:
*/
void set_error_callback(ErrorCallback callback);
/**
* \brief Commits the current partition assignment synchronously
*
* This will call Consumer::commit() until either the message is successfully
* committed or the error callback returns false (if any is set).
*/
void commit();
/**
* \brief Commits the given message synchronously
*
* This will call Consumer::commit until either the message is successfully
* This will call Consumer::commit(msg) until either the message is successfully
* committed or the error callback returns false (if any is set).
*
* \param msg The message to be committed
@@ -158,49 +121,48 @@ public:
/**
* \brief Commits the offsets on the given topic/partitions synchronously
*
* This will call Consumer::commit until either the offsets are successfully
* This will call Consumer::commit(topic_partitions) until either the offsets are successfully
* committed or the error callback returns false (if any is set).
*
* \param topic_partitions The topic/partition list to be committed
*/
void commit(const TopicPartitionList& topic_partitions);
private:
TimeUnit increase_backoff(TimeUnit backoff);
template <typename T>
void do_commit(const T& object) {
TimeUnit backoff = initial_backoff_;
while (true) {
auto start = std::chrono::steady_clock::now();
/**
* \brief Get the internal Consumer object
*
* \return A reference to the Consumer
*/
Consumer& get_consumer();
private:
// If the ReturnType contains 'true', we abort committing. Otherwise we continue.
// The second member of the ReturnType contains the RdKafka error if any.
template <typename...Args>
bool do_commit(Args&&...args) {
try {
consumer_.commit(object);
// If the commit succeeds, we're done
return;
consumer_.commit(std::forward<Args>(args)...);
// If the commit succeeds, we're done.
return true;
}
catch (const HandleException& ex) {
// If there's a callback and it returns false for this message, abort
if (callback_ && !callback_(ex.get_error())) {
return;
Error error = ex.get_error();
// If there were actually no offsets to commit, return. Retrying won't solve
// anything here.
if (error == RD_KAFKA_RESP_ERR__NO_OFFSET) {
return true; //not considered an error.
}
// If there's a callback and it returns false for this message, abort.
// Otherwise keep committing.
CallbackInvoker<ErrorCallback> callback("backoff committer", callback_, &consumer_);
if (callback && !callback(error)) {
throw ex; //abort
}
}
auto end = std::chrono::steady_clock::now();
auto time_elapsed = end - start;
// If we still have time left, then sleep
if (time_elapsed < backoff) {
std::this_thread::sleep_for(backoff - time_elapsed);
}
// Increase out backoff depending on the policy being used
backoff = increase_backoff(backoff);
}
return false; //continue
}
Consumer& consumer_;
TimeUnit initial_backoff_;
TimeUnit backoff_step_;
TimeUnit maximum_backoff_;
ErrorCallback callback_;
BackoffPolicy policy_;
};
} // cppkafka

View File

@@ -0,0 +1,153 @@
/*
* Copyright (c) 2017, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef CPPKAFKA_BACKOFF_PERFORMER_H
#define CPPKAFKA_BACKOFF_PERFORMER_H
#include <chrono>
#include <functional>
#include <thread>
#include "../consumer.h"
#include "../exceptions.h"
namespace cppkafka {
/**
*
*/
class CPPKAFKA_API BackoffPerformer {
public:
using TimeUnit = std::chrono::milliseconds;
static const TimeUnit DEFAULT_INITIAL_BACKOFF;
static const TimeUnit DEFAULT_BACKOFF_STEP;
static const TimeUnit DEFAULT_MAXIMUM_BACKOFF;
static const size_t DEFAULT_MAXIMUM_RETRIES;
/**
* The backoff policy to use
*/
enum class BackoffPolicy {
LINEAR,
EXPONENTIAL
};
/**
* Constructs an instance of backoff performer
*
* By default, the linear backoff policy is used
*/
BackoffPerformer();
/**
* \brief Sets the backoff policy
*
* \param policy The backoff policy to be used
*/
void set_backoff_policy(BackoffPolicy policy);
/**
* \brief Sets the initial backoff
*
* The first time a commit fails, this will be the delay between the request is sent
* and we re-try doing so
*
* \param value The value to be used
*/
void set_initial_backoff(TimeUnit value);
/**
* \brief Sets the backoff step
*
* When using the linear backoff policy, this will be the delay between sending a request
* that fails and re-trying it
*
* \param value The value to be used
*/
void set_backoff_step(TimeUnit value);
/**
* \brief Sets the maximum backoff
*
* The backoff used will never be larger than this number
*
* \param value The value to be used
*/
void set_maximum_backoff(TimeUnit value);
/**
* \brief Sets the maximum number of retries for the commit operation
*
* \param value The number of retries before giving up
*
* \remark Setting value to 0 is equivalent to 1, i.e. it will try at least once
*/
void set_maximum_retries(size_t value);
/**
* \brief Executes an action and backs off if it fails
*
* This will call the functor and will retry in case it returns false
*
* \param callback The action to be executed
*/
template <typename Functor>
void perform(const Functor& callback) {
TimeUnit backoff = initial_backoff_;
size_t retries = maximum_retries_;
while (retries--) {
auto start = std::chrono::steady_clock::now();
// If the callback returns true, we're done
if (callback()) {
return; //success
}
auto end = std::chrono::steady_clock::now();
auto time_elapsed = end - start;
// If we still have time left, then sleep
if (time_elapsed < backoff) {
std::this_thread::sleep_for(backoff - time_elapsed);
}
// Increase out backoff depending on the policy being used
backoff = increase_backoff(backoff);
}
// No more retries left or we have a terminal error.
throw ActionTerminatedException("Commit failed: no more retries.");
}
private:
TimeUnit increase_backoff(TimeUnit backoff);
TimeUnit initial_backoff_;
TimeUnit backoff_step_;
TimeUnit maximum_backoff_;
BackoffPolicy policy_;
size_t maximum_retries_;
};
} // cppkafka
#endif // CPPKAFKA_BACKOFF_PERFORMER_H

File diff suppressed because it is too large Load Diff

View File

@@ -37,6 +37,7 @@
#include <boost/optional.hpp>
#include "../buffer.h"
#include "../consumer.h"
#include "../macros.h"
namespace cppkafka {
/**

View File

@@ -0,0 +1,373 @@
/*
* Copyright (c) 2017, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef CPPKAFKA_CONSUMER_DISPATCHER_H
#define CPPKAFKA_CONSUMER_DISPATCHER_H
#include <tuple>
#include "../consumer.h"
#include "backoff_performer.h"
namespace cppkafka {
/**
* \brief Helper to perform pattern matching when consuming messages
*
* As the way to consume messages requires you to:
*
* * Poll for a message
* * Check if it's not null
* * Check if it's an error (optionally handling EOF as a non error)
* * Process the message
*
* This class introduces a pattern matching based approach to consuming messages
* so the usual loop is simplified away and you can process messages without
* having to check for all those cases.
*
* When calling BasicConsumerDispatcher::run, a list of callbacks has to be provided.
* These will handle each case (message, timeout, error, eof), allowing you to
* only provide what you need. The only callback that is required is the message one.
* For the rest, the following actions will be performed as defaults:
*
* * Timeout: ignore
* * EOF: ignore
* * Error (not an EOF error): throw a ConsumerException exception
*
* The signature for each callback should be as following (or compatible)
*
* * Message callback, either:
* - void(Message)
* - Message(Message). In this case if the message is returned, it will be buffered
* while calling the throttle callback until the message is actually processed.
* * Timeout: void(BasicConsumerDispatcher::Timeout)
* * Error: void(Error)
* * EOF: void(BasicConsumerDispatcher::EndOfFile, TopicPartition)
*/
template <typename ConsumerType>
class BasicConsumerDispatcher {
public:
/**
* Tag to indicate a timeout occurred
*/
struct Timeout {};
/**
* Tag to indicate end of file was reached on a partition being consumed
*/
struct EndOfFile {};
/*
* Tag to indicate end of file was reached on a partition being consumed
*/
struct Throttle {};
/**
* Tag to indicate there was some event processed (message, timeout, error, etc)
*/
struct Event {};
/**
* Constructs a consumer dispatcher over the given consumer
*
* \param consumer The consumer to be used
*/
BasicConsumerDispatcher(ConsumerType& consumer);
/**
* \brief Consumes messages dispatching events to the appropriate callack
*
* This will loop until BasicConsumerDispatcher::stop is called
*
* \param args The list of callbacks to be executed
*/
template <typename... Args>
void run(const Args&... args);
/**
* \brief Stops consumption
*
* Note that as this is synchronous, if there's any poll operations currently in
* progress, then this will stop after the current call returns
*/
void stop();
private:
// Define the types we need for each type of callback
using OnMessageArgs = std::tuple<Message>;
using OnErrorArgs = std::tuple<Error>;
using OnEofArgs = std::tuple<EndOfFile, TopicPartition>;
using OnTimeoutArgs = std::tuple<Timeout>;
using OnEventArgs = std::tuple<Event>;
static void handle_error(Error error);
static void handle_eof(EndOfFile, const TopicPartition& /*topic_partition*/) { }
static void handle_timeout(Timeout) { }
static void handle_event(Event) { }
template <typename Functor>
void handle_throttle(Throttle, const Functor& callback, Message msg) {
BackoffPerformer{}.perform([&]() {
if (!running_) {
return true;
}
msg = callback(std::move(msg));
if (msg) {
// Poll so we send heartbeats to the brokers
consumer_.poll();
}
return !msg;
});
}
// Simple RAII wrapper for pausing/resuming
template <typename C>
class Pauser {
public:
Pauser(C& consumer, const TopicPartitionList& topic_partitions)
: consumer_(consumer), topic_partitions_(topic_partitions) {
consumer_.pause_partitions(topic_partitions_);
}
~Pauser() {
consumer_.resume_partitions(topic_partitions_);
}
Pauser(const Pauser&) = delete;
Pauser& operator=(const Pauser&) = delete;
private:
C& consumer_;
TopicPartitionList topic_partitions_;
};
// Traits and template helpers
// Finds whether type T accepts arguments of types Args...
template <typename T, typename... Args>
struct takes_arguments {
using yes = double;
using no = bool;
template <typename Functor>
static yes test(decltype(std::declval<Functor&>()(std::declval<Args>()...))*);
template <typename Functor>
static no test(...);
static constexpr bool value = sizeof(test<T>(nullptr)) == sizeof(yes);
};
// Specialization for tuple
template <typename T, typename... Args>
struct takes_arguments<T, std::tuple<Args...>> : takes_arguments<T, Args...> {
};
template <typename T>
struct identity {
using type = T;
};
// Placeholder to indicate a type wasn't found
struct type_not_found {
};
// find_type: given a tuple of types and a list of functors, finds the functor
// type that accepts the given tuple types as parameters
template <typename Tuple, typename Functor, typename... Functors>
struct find_type_helper {
using type = typename std::conditional<takes_arguments<Functor, Tuple>::value,
identity<Functor>,
find_type_helper<Tuple, Functors...>
>::type::type;
};
template <typename Tuple>
struct find_type_helper<Tuple, type_not_found> {
using type = type_not_found;
};
template <typename Tuple, typename... Functors>
struct find_type {
using type = typename find_type_helper<Tuple, Functors..., type_not_found>::type;
};
// find_functor: given a Functor and a template parameter pack of functors, finds
// the one that matches the given type
template <typename Functor>
struct find_functor_helper {
template <typename... Functors>
static const Functor& find(const Functor& arg, Functors&&...) {
return arg;
}
template <typename Head, typename... Functors>
static typename std::enable_if<!std::is_same<Head, Functor>::value, const Functor&>::type
find(const Head&, const Functors&... functors) {
return find(functors...);
}
};
template <typename Functor, typename... Args>
const Functor& find_functor(const Args&... args) {
return find_functor_helper<Functor>::find(args...);
}
// Finds the first functor that accepts the parameters in a tuple and returns it. If no
// such functor is found, a static assertion will occur
template <typename Tuple, typename... Functors>
const typename find_type<Tuple, Functors...>::type&
find_matching_functor(const Functors&... functors) {
using type = typename find_type<Tuple, Functors...>::type;
static_assert(!std::is_same<type_not_found, type>::value, "Valid functor not found");
return find_functor<type>(functors...);
}
// Check that a given functor matches at least one of the expected signatures
template <typename Functor>
void check_callback_matches(const Functor& functor) {
static_assert(
!std::is_same<type_not_found,
typename find_type<OnMessageArgs, Functor>::type>::value ||
!std::is_same<type_not_found,
typename find_type<OnEofArgs, Functor>::type>::value ||
!std::is_same<type_not_found,
typename find_type<OnTimeoutArgs, Functor>::type>::value ||
!std::is_same<type_not_found,
typename find_type<OnErrorArgs, Functor>::type>::value ||
!std::is_same<type_not_found,
typename find_type<OnEventArgs, Functor>::type>::value,
"Callback doesn't match any of the expected signatures"
);
}
// Base case for recursion
void check_callbacks_match() {
}
// Check that all given functors match at least one of the expected signatures
template <typename Functor, typename... Functors>
void check_callbacks_match(const Functor& functor, const Functors&... functors) {
check_callback_matches(functor);
check_callbacks_match(functors...);
}
template <typename Functor, typename... Functors>
auto process_message(const Functor& callback, Message msg, const Functors&...)
-> typename std::enable_if<std::is_same<void, decltype(callback(std::move(msg)))>::value,
void>::type {
callback(std::move(msg));
}
template <typename Functor, typename... Functors>
auto process_message(const Functor& callback, Message msg, const Functors&... functors)
-> typename std::enable_if<std::is_same<Message, decltype(callback(std::move(msg)))>::value,
void>::type {
const auto throttle_ptr = &BasicConsumerDispatcher::handle_throttle<Functor>;
const auto default_throttler = std::bind(throttle_ptr, this, std::placeholders::_1,
std::placeholders::_2, std::placeholders::_3);
using OnThrottleArgs = std::tuple<Throttle, const Functor&, Message>;
const auto on_throttle = find_matching_functor<OnThrottleArgs>(functors...,
default_throttler);
msg = callback(std::move(msg));
// The callback rejected the message, start throttling
if (msg) {
// Pause consumption. When the pauser goes off scope, it will resume it
Pauser<ConsumerType> pauser(consumer_, consumer_.get_assignment());
// Handle throttling on this message
on_throttle(Throttle{}, callback, std::move(msg));
}
}
ConsumerType& consumer_;
bool running_;
};
using ConsumerDispatcher = BasicConsumerDispatcher<Consumer>;
template <typename ConsumerType>
BasicConsumerDispatcher<ConsumerType>::BasicConsumerDispatcher(ConsumerType& consumer)
: consumer_(consumer) {
}
template <typename ConsumerType>
void BasicConsumerDispatcher<ConsumerType>::stop() {
running_ = false;
}
template <typename ConsumerType>
void BasicConsumerDispatcher<ConsumerType>::handle_error(Error error) {
throw ConsumerException(error);
}
template <typename ConsumerType>
template <typename... Args>
void BasicConsumerDispatcher<ConsumerType>::run(const Args&... args) {
using self = BasicConsumerDispatcher<ConsumerType>;
// Make sure all callbacks match one of the signatures. Otherwise users could provide
// bogus callbacks that would never be executed
check_callbacks_match(args...);
// This one is required
const auto on_message = find_matching_functor<OnMessageArgs>(args...);
// For the rest, append our own implementation at the end as a fallback
const auto on_error = find_matching_functor<OnErrorArgs>(args..., &self::handle_error);
const auto on_eof = find_matching_functor<OnEofArgs>(args..., &self::handle_eof);
const auto on_timeout = find_matching_functor<OnTimeoutArgs>(args..., &self::handle_timeout);
const auto on_event = find_matching_functor<OnEventArgs>(args..., &self::handle_event);
running_ = true;
while (running_) {
Message msg = consumer_.poll();
if (!msg) {
on_timeout(Timeout{});
}
else if (msg.get_error()) {
if (msg.is_eof()) {
on_eof(EndOfFile{}, { msg.get_topic(), msg.get_partition(), msg.get_offset() });
}
else {
on_error(msg.get_error());
}
}
else {
process_message(on_message, std::move(msg), args...);
}
on_event(Event{});
}
}
} // cppkafka
#endif // CPPKAFKA_CONSUMER_DISPATCHER_H

View File

@@ -0,0 +1,130 @@
/*
* Copyright (c) 2017, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef CPPKAFKA_POLL_INTERFACE_H
#define CPPKAFKA_POLL_INTERFACE_H
#include "../consumer.h"
namespace cppkafka {
/**
* \interface PollInterface
*
* \brief Interface defining polling methods for the Consumer class
*/
struct PollInterface {
virtual ~PollInterface() = default;
/**
* \brief Get the underlying consumer controlled by this strategy
*
* \return A reference to the consumer instance
*/
virtual Consumer& get_consumer() = 0;
/**
* \brief Sets the timeout for polling functions
*
* This calls Consumer::set_timeout
*
* \param timeout The timeout to be set
*/
virtual void set_timeout(std::chrono::milliseconds timeout) = 0;
/**
* \brief Gets the timeout for polling functions
*
* This calls Consumer::get_timeout
*
* \return The timeout
*/
virtual std::chrono::milliseconds get_timeout() = 0;
/**
* \brief Polls all assigned partitions for new messages in round-robin fashion
*
* Each call to poll() will first consume from the global event queue and if there are
* no pending events, will attempt to consume from all partitions until a valid message is found.
* The timeout used on this call will be the one configured via PollInterface::set_timeout.
*
* \return A message. The returned message *might* be empty. It's necessary to check
* that it's a valid one before using it (see example above).
*
* \remark You need to call poll() or poll_batch() periodically as a keep alive mechanism,
* otherwise the broker will think this consumer is down and will trigger a rebalance
* (if using dynamic subscription)
*/
virtual Message poll() = 0;
/**
* \brief Polls for new messages
*
* Same as the other overload of PollInterface::poll but the provided
* timeout will be used instead of the one configured on this Consumer.
*
* \param timeout The timeout to be used on this call
*/
virtual Message poll(std::chrono::milliseconds timeout) = 0;
/**
* \brief Polls all assigned partitions for a batch of new messages in round-robin fashion
*
* Each call to poll_batch() will first attempt to consume from the global event queue
* and if the maximum batch number has not yet been filled, will attempt to fill it by
* reading the remaining messages from each partition.
*
* \param max_batch_size The maximum amount of messages expected
*
* \return A list of messages
*
* \remark You need to call poll() or poll_batch() periodically as a keep alive mechanism,
* otherwise the broker will think this consumer is down and will trigger a rebalance
* (if using dynamic subscription)
*/
virtual std::vector<Message> poll_batch(size_t max_batch_size) = 0;
/**
* \brief Polls all assigned partitions for a batch of new messages in round-robin fashion
*
* Same as the other overload of PollInterface::poll_batch but the provided
* timeout will be used instead of the one configured on this Consumer.
*
* \param max_batch_size The maximum amount of messages expected
*
* \param timeout The timeout for this operation
*
* \return A list of messages
*/
virtual std::vector<Message> poll_batch(size_t max_batch_size, std::chrono::milliseconds timeout) = 0;
};
} //cppkafka
#endif //CPPKAFKA_POLL_INTERFACE_H

View File

@@ -0,0 +1,181 @@
/*
* Copyright (c) 2017, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef CPPKAFKA_POLL_STRATEGY_BASE_H
#define CPPKAFKA_POLL_STRATEGY_BASE_H
#include <map>
#include <boost/any.hpp>
#include "../queue.h"
#include "../topic_partition_list.h"
#include "poll_interface.h"
#include "../macros.h"
namespace cppkafka {
/**
* \brief Contains a partition queue and generic metadata which can be used to store
* related (user-specific) information.
*/
struct QueueData {
Queue queue;
boost::any metadata;
};
/**
* \class PollStrategyBase
*
* \brief Base implementation of the PollInterface
*/
class CPPKAFKA_API PollStrategyBase : public PollInterface {
public:
using QueueMap = std::map<TopicPartition, QueueData>;
/**
* \brief Constructor
*
* \param consumer A reference to the polled consumer instance
*/
explicit PollStrategyBase(Consumer& consumer);
/**
* \brief Destructor
*/
~PollStrategyBase();
/**
* \sa PollInterface::set_timeout
*/
void set_timeout(std::chrono::milliseconds timeout) override;
/**
* \sa PollInterface::get_timeout
*/
std::chrono::milliseconds get_timeout() override;
/**
* \sa PollInterface::get_consumer
*/
Consumer& get_consumer() final;
/**
* \brief Creates partitions queues associated with the supplied partitions.
*
* This method contains a default implementation. It adds all the new queues belonging
* to the provided partition list and calls reset_state().
* To be used with static consumers.
*
* \param partitions Assigned topic partitions.
*/
virtual void assign(TopicPartitionList& partitions);
/**
* \brief Removes partitions queues associated with the supplied partitions.
*
* This method contains a default implementation. It removes all the queues
* belonging to the provided partition list and calls reset_state().
* To be used with static consumers.
*
* \param partitions Revoked topic partitions.
*/
virtual void revoke(const TopicPartitionList& partitions);
/**
* \brief Removes all partitions queues associated with the supplied partitions.
*
* This method contains a default implementation. It removes all the queues
* currently assigned and calls reset_state(). To be used with static consumers.
*/
virtual void revoke();
protected:
/**
* \brief Get the queues from all assigned partitions
*
* \return A map of queues indexed by partition
*/
QueueMap& get_partition_queues();
/**
* \brief Get the main consumer queue which services the underlying Consumer object
*
* \return The consumer queue
*/
QueueData& get_consumer_queue();
/**
* \brief Reset the internal state of the queues.
*
* Use this function to reset the state of any polling strategy or algorithm.
*
* \remark This function gets called by on_assignement(), on_revocation() and on_rebalance_error()
*/
virtual void reset_state();
/**
* \brief Function to be called when a new partition assignment takes place
*
* This method contains a default implementation. It calls assign()
* and invokes the user assignment callback.
*
* \param partitions Assigned topic partitions
*/
virtual void on_assignment(TopicPartitionList& partitions);
/**
* \brief Function to be called when an old partition assignment gets revoked
*
* This method contains a default implementation. It calls revoke()
* and invokes the user revocation callback.
*
* \param partitions Revoked topic partitions
*/
virtual void on_revocation(const TopicPartitionList& partitions);
/**
* \brief Function to be called when a topic rebalance error happens
*
* This method contains a default implementation. Calls reset_state().
*
* \param error The rebalance error
*/
virtual void on_rebalance_error(Error error);
private:
Consumer& consumer_;
QueueData consumer_queue_;
QueueMap partition_queues_;
Consumer::AssignmentCallback assignment_callback_;
Consumer::RevocationCallback revocation_callback_;
Consumer::RebalanceErrorCallback rebalance_error_callback_;
};
} //cppkafka
#endif //CPPKAFKA_POLL_STRATEGY_BASE_H

View File

@@ -0,0 +1,191 @@
/*
* Copyright (c) 2017, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef CPPKAFKA_ROUNDROBIN_POLL_STRATEGY_H
#define CPPKAFKA_ROUNDROBIN_POLL_STRATEGY_H
#include <map>
#include <string>
#include "../exceptions.h"
#include "../consumer.h"
#include "../queue.h"
#include "poll_strategy_base.h"
namespace cppkafka {
/**
* \brief This adapter changes the default polling strategy of the Consumer into a fair round-robin
* polling mechanism.
*
* The default librdkafka (and cppkafka) poll() and poll_batch() behavior is to consume batches of
* messages from each partition in turn. For performance reasons, librdkafka pre-fetches batches
* of messages from the kafka broker (one batch from each partition), and stores them locally in
* partition queues. Since all the internal partition queues are forwarded by default unto the
* group consumer queue (one per consumer), these batches end up being polled and consumed in the
* same sequence order.
* This adapter allows fair round-robin polling of all assigned partitions, one message at a time
* (or one batch at a time if poll_batch() is used). Note that poll_batch() has nothing to do with
* the internal batching mechanism of librdkafka.
*
* Example code on how to use this:
*
* \code
* // Create a consumer
* Consumer consumer(...);
* consumer.subscribe({ "my_topic" });
*
* // Optionally set the callbacks. This must be done *BEFORE* creating the strategy adapter
* consumer.set_assignment_callback(...);
* consumer.set_revocation_callback(...);
* consumer.set_rebalance_error_callback(...);
*
* // Create the adapter and use it for polling
* RoundRobinPollStrategy poll_strategy(consumer);
*
* while (true) {
* // Poll each partition in turn
* Message msg = poll_strategy.poll();
* if (msg) {
* // process valid message
* }
* }
* }
* \endcode
*
* \warning Calling directly poll() or poll_batch() on the Consumer object while using this adapter will
* lead to undesired results since the RoundRobinPollStrategy modifies the internal queuing mechanism of
* the Consumer instance it owns.
*/
class RoundRobinPollStrategy : public PollStrategyBase {
public:
RoundRobinPollStrategy(Consumer& consumer);
~RoundRobinPollStrategy();
/**
* \sa PollInterface::poll
*/
Message poll() override;
/**
* \sa PollInterface::poll
*/
Message poll(std::chrono::milliseconds timeout) override;
/**
* \sa PollInterface::poll_batch
*/
template <typename Allocator>
std::vector<Message, Allocator> poll_batch(size_t max_batch_size,
const Allocator& alloc);
std::vector<Message> poll_batch(size_t max_batch_size) override;
/**
* \sa PollInterface::poll_batch
*/
template <typename Allocator>
std::vector<Message, Allocator> poll_batch(size_t max_batch_size,
std::chrono::milliseconds timeout,
const Allocator& alloc);
std::vector<Message> poll_batch(size_t max_batch_size,
std::chrono::milliseconds timeout) override;
protected:
/**
* \sa PollStrategyBase::reset_state
*/
void reset_state() final;
QueueData& get_next_queue();
private:
template <typename Allocator>
void consume_batch(Queue& queue,
std::vector<Message, Allocator>& messages,
ssize_t& count,
std::chrono::milliseconds timeout,
const Allocator& alloc);
void restore_forwarding();
// Members
QueueMap::iterator queue_iter_;
};
// Implementations
template <typename Allocator>
std::vector<Message, Allocator> RoundRobinPollStrategy::poll_batch(size_t max_batch_size,
const Allocator& alloc) {
return poll_batch(max_batch_size, get_consumer().get_timeout(), alloc);
}
template <typename Allocator>
std::vector<Message, Allocator> RoundRobinPollStrategy::poll_batch(size_t max_batch_size,
std::chrono::milliseconds timeout,
const Allocator& alloc) {
std::vector<Message, Allocator> messages(alloc);
ssize_t count = max_batch_size;
// batch from the group event queue first (non-blocking)
consume_batch(get_consumer_queue().queue, messages, count, std::chrono::milliseconds(0), alloc);
size_t num_queues = get_partition_queues().size();
while ((count > 0) && (num_queues--)) {
// batch from the next partition (non-blocking)
consume_batch(get_next_queue().queue, messages, count, std::chrono::milliseconds(0), alloc);
}
// we still have space left in the buffer
if (count > 0) {
// wait on the event queue until timeout
consume_batch(get_consumer_queue().queue, messages, count, timeout, alloc);
}
return messages;
}
template <typename Allocator>
void RoundRobinPollStrategy::consume_batch(Queue& queue,
std::vector<Message, Allocator>& messages,
ssize_t& count,
std::chrono::milliseconds timeout,
const Allocator& alloc) {
std::vector<Message, Allocator> queue_messages = queue.consume_batch(count, timeout, alloc);
if (queue_messages.empty()) {
return;
}
// concatenate both lists
messages.insert(messages.end(),
make_move_iterator(queue_messages.begin()),
make_move_iterator(queue_messages.end()));
// reduce total batch count
count -= queue_messages.size();
}
} //cppkafka
#endif //CPPKAFKA_ROUNDROBIN_POLL_STRATEGY_H

View File

@@ -5,31 +5,104 @@ set(SOURCES
exceptions.cpp
topic.cpp
buffer.cpp
queue.cpp
message.cpp
message_timestamp.cpp
message_internal.cpp
topic_partition.cpp
topic_partition_list.cpp
metadata.cpp
group_information.cpp
error.cpp
event.cpp
kafka_handle_base.cpp
producer.cpp
consumer.cpp
utils/backoff_performer.cpp
utils/backoff_committer.cpp
utils/poll_strategy_base.cpp
utils/roundrobin_poll_strategy.cpp
)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include/cppkafka)
include_directories(SYSTEM ${Boost_INCLUDE_DIRS} ${RDKAFKA_INCLUDE_DIR})
set(TARGET_NAME cppkafka)
set(PKG_DIR "${CMAKE_BINARY_DIR}/package")
set(PKG_CONFIG_FILE "${PKG_DIR}/${TARGET_NAME}.pc")
set(CONFIG_FILE "${PKG_DIR}/${PROJECT_NAME}Config.cmake")
set(VERSION_FILE "${PKG_DIR}/${PROJECT_NAME}ConfigVersion.cmake")
set(FIND_RDKAFKA_FILE "${PROJECT_SOURCE_DIR}/cmake/FindRdKafka.cmake")
set(NAMESPACE "${PROJECT_NAME}::")
set(TARGET_EXPORT_NAME ${PROJECT_NAME}Targets)
add_library(cppkafka ${CPPKAFKA_LIBRARY_TYPE} ${SOURCES})
set_target_properties(cppkafka PROPERTIES VERSION ${CPPKAFKA_VERSION}
add_library(${TARGET_NAME} ${CPPKAFKA_LIBRARY_TYPE} ${SOURCES})
target_compile_features(${TARGET_NAME} PUBLIC cxx_std_11)
target_include_directories(${TARGET_NAME} PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../include/cppkafka>)
set_target_properties(${TARGET_NAME} PROPERTIES
ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_INSTALL_LIBDIR}"
ARCHIVE_OUTPUT_NAME "${TARGET_NAME}"
LIBRARY_OUTPUT_DIRECTORY "${CMAKE_INSTALL_LIBDIR}"
LIBRARY_OUTPUT_NAME "${TARGET_NAME}"
INSTALL_RPATH "${CMAKE_INSTALL_LIBDIR}"
INSTALL_RPATH_USE_LINK_PATH TRUE
VERSION ${CPPKAFKA_VERSION}
SOVERSION ${CPPKAFKA_VERSION})
target_link_libraries(cppkafka ${RDKAFKA_LIBRARY})
# In CMake >= 3.15 Boost::boost == Boost::headers
target_link_libraries(${TARGET_NAME} PUBLIC RdKafka::rdkafka Boost::boost)
if (WIN32)
# On windows ntohs and related are in ws2_32
target_link_libraries(${TARGET_NAME} PUBLIC ws2_32.lib)
endif()
# Install cppkafka target and specify all properties needed for the exported file
install(
TARGETS cppkafka
LIBRARY DESTINATION lib
ARCHIVE DESTINATION lib
COMPONENT dev
TARGETS ${TARGET_NAME}
EXPORT ${TARGET_EXPORT_NAME}
COMPONENT binaries
LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}"
ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}"
RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}"
INCLUDES DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}"
)
if (CPPKAFKA_EXPORT_PKGCONFIG)
# Generate and install pkgconfig file
configure_file(${PROJECT_SOURCE_DIR}/cmake/cppkafka.pc.in ${PKG_CONFIG_FILE} @ONLY)
install(
FILES ${PKG_CONFIG_FILE}
DESTINATION "${CPPKAFKA_PKGCONFIG_DIR}"
COMPONENT pkgconfig
)
endif()
if (CPPKAFKA_EXPORT_CMAKE_CONFIG)
# Install the exported file
install(
EXPORT "${TARGET_EXPORT_NAME}"
NAMESPACE "${NAMESPACE}"
COMPONENT config
DESTINATION "${CPPKAFKA_CONFIG_DIR}"
)
# Generate CMAKE configuration file and exported targets
configure_package_config_file(
"${PROJECT_SOURCE_DIR}/cmake/config.cmake.in"
"${CONFIG_FILE}"
INSTALL_DESTINATION "${CPPKAFKA_CONFIG_DIR}"
PATH_VARS RDKAFKA_MIN_VERSION_HEX CMAKE_INSTALL_PREFIX CMAKE_INSTALL_INCLUDEDIR CMAKE_INSTALL_LIBDIR
)
# Generate version file
write_basic_package_version_file(
"${VERSION_FILE}"
VERSION ${CPPKAFKA_VERSION}
COMPATIBILITY AnyNewerVersion
)
install(
FILES "${CONFIG_FILE}" "${VERSION_FILE}" "${FIND_RDKAFKA_FILE}"
DESTINATION "${CPPKAFKA_CONFIG_DIR}"
COMPONENT config
)
endif()

View File

@@ -34,6 +34,7 @@
using std::string;
using std::equal;
using std::lexicographical_compare;
using std::ostream;
using std::hex;
using std::dec;
@@ -101,4 +102,22 @@ bool operator!=(const Buffer& lhs, const Buffer& rhs) {
return !(lhs == rhs);
}
bool operator<(const Buffer& lhs, const Buffer& rhs) {
return lexicographical_compare(lhs.get_data(), lhs.get_data() + lhs.get_size(),
rhs.get_data(), rhs.get_data() + rhs.get_size());
}
bool operator>(const Buffer& lhs, const Buffer& rhs) {
return lexicographical_compare(rhs.get_data(), rhs.get_data() + rhs.get_size(),
lhs.get_data(), lhs.get_data() + lhs.get_size());
}
bool operator<=(const Buffer& lhs, const Buffer& rhs) {
return !(lhs > rhs);
}
bool operator>=(const Buffer& lhs, const Buffer& rhs) {
return !(lhs < rhs);
}
} // cppkafka

View File

@@ -40,10 +40,8 @@ using std::map;
using std::move;
using std::vector;
using std::initializer_list;
using boost::optional;
using std::chrono::milliseconds;
using boost::optional;
namespace cppkafka {
@@ -52,66 +50,63 @@ namespace cppkafka {
void delivery_report_callback_proxy(rd_kafka_t*, const rd_kafka_message_t* msg, void *opaque) {
Producer* handle = static_cast<Producer*>(opaque);
Message message = Message::make_non_owning((rd_kafka_message_t*)msg);
const auto& callback = handle->get_configuration().get_delivery_report_callback();
if (callback) {
callback(*handle, message);
}
CallbackInvoker<Configuration::DeliveryReportCallback>
("delivery report", handle->get_configuration().get_delivery_report_callback(), handle)
(*handle, message);
}
void offset_commit_callback_proxy(rd_kafka_t*, rd_kafka_resp_err_t err,
rd_kafka_topic_partition_list_t *offsets, void *opaque) {
Consumer* handle = static_cast<Consumer*>(opaque);
TopicPartitionList list = offsets ? convert(offsets) : TopicPartitionList{};
const auto& callback = handle->get_configuration().get_offset_commit_callback();
if (callback) {
callback(*handle, err, list);
}
CallbackInvoker<Configuration::OffsetCommitCallback>
("offset commit", handle->get_configuration().get_offset_commit_callback(), handle)
(*handle, err, list);
}
void error_callback_proxy(rd_kafka_t*, int err, const char *reason, void *opaque) {
KafkaHandleBase* handle = static_cast<KafkaHandleBase*>(opaque);
const auto& callback = handle->get_configuration().get_error_callback();
if (callback) {
callback(*handle, err, reason);
}
CallbackInvoker<Configuration::ErrorCallback>
("error", handle->get_configuration().get_error_callback(), handle)
(*handle, err, reason);
}
void throttle_callback_proxy(rd_kafka_t*, const char* broker_name,
int32_t broker_id, int throttle_time_ms, void *opaque) {
KafkaHandleBase* handle = static_cast<KafkaHandleBase*>(opaque);
const auto& callback = handle->get_configuration().get_throttle_callback();
if (callback) {
callback(*handle, broker_name, broker_id, milliseconds(throttle_time_ms));
}
CallbackInvoker<Configuration::ThrottleCallback>
("throttle", handle->get_configuration().get_throttle_callback(), handle)
(*handle, broker_name, broker_id, milliseconds(throttle_time_ms));
}
void log_callback_proxy(const rd_kafka_t* h, int level,
const char* facility, const char* message) {
KafkaHandleBase* handle = static_cast<KafkaHandleBase*>(rd_kafka_opaque(h));
const auto& callback = handle->get_configuration().get_log_callback();
if (callback) {
callback(*handle, level, facility, message);
}
CallbackInvoker<Configuration::LogCallback>
("log", handle->get_configuration().get_log_callback(), nullptr)
(*handle, level, facility, message);
}
int stats_callback_proxy(rd_kafka_t*, char *json, size_t json_len, void *opaque) {
KafkaHandleBase* handle = static_cast<KafkaHandleBase*>(opaque);
const auto& callback = handle->get_configuration().get_stats_callback();
if (callback) {
callback(*handle, string(json, json + json_len));
}
CallbackInvoker<Configuration::StatsCallback>
("statistics", handle->get_configuration().get_stats_callback(), handle)
(*handle, string(json, json + json_len));
return 0;
}
int socket_callback_proxy(int domain, int type, int protocol, void* opaque) {
KafkaHandleBase* handle = static_cast<KafkaHandleBase*>(opaque);
const auto& callback = handle->get_configuration().get_socket_callback();
if (callback) {
return callback(domain, type, protocol);
}
else {
return -1;
}
return CallbackInvoker<Configuration::SocketCallback>
("socket", handle->get_configuration().get_socket_callback(), handle)
(domain, type, protocol);
}
void background_event_callback_proxy(rd_kafka_t*, rd_kafka_event_t* event_ptr, void *opaque) {
KafkaHandleBase* handle = static_cast<KafkaHandleBase*>(opaque);
CallbackInvoker<Configuration::BackgroundEventCallback>
("background_event", handle->get_configuration().get_background_event_callback(), handle)
(*handle, Event{event_ptr});
}
// Configuration
@@ -189,6 +184,19 @@ Configuration& Configuration::set_socket_callback(SocketCallback callback) {
return *this;
}
#if RD_KAFKA_VERSION >= RD_KAFKA_ADMIN_API_SUPPORT_VERSION
Configuration& Configuration::set_background_event_callback(BackgroundEventCallback callback) {
background_event_callback_ = move(callback);
rd_kafka_conf_set_background_event_cb(handle_.get(), &background_event_callback_proxy);
return *this;
}
Configuration& Configuration::set_events(int events) {
rd_kafka_conf_set_events(handle_.get(), events);
return *this;
}
#endif
Configuration&
Configuration::set_default_topic_configuration(TopicConfiguration config) {
default_topic_config_ = std::move(config);
@@ -251,6 +259,11 @@ const Configuration::SocketCallback& Configuration::get_socket_callback() const
return socket_callback_;
}
const Configuration::BackgroundEventCallback&
Configuration::get_background_event_callback() const {
return background_event_callback_;
}
const optional<TopicConfiguration>& Configuration::get_default_topic_configuration() const {
return default_topic_config_;
}

View File

@@ -26,18 +26,26 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <sstream>
#include <algorithm>
#include <cctype>
#include "macros.h"
#include "consumer.h"
#include "exceptions.h"
#include "logging.h"
#include "configuration.h"
#include "topic_partition_list.h"
#include "detail/callback_invoker.h"
using std::vector;
using std::string;
using std::move;
using std::make_tuple;
using std::ostringstream;
using std::chrono::milliseconds;
using std::toupper;
using std::equal;
using std::allocator;
namespace cppkafka {
@@ -65,7 +73,29 @@ Consumer::Consumer(Configuration config)
}
Consumer::~Consumer() {
try {
// make sure to destroy the function closures. in case they hold kafka
// objects, they will need to be destroyed before we destroy the handle
assignment_callback_ = nullptr;
revocation_callback_ = nullptr;
rebalance_error_callback_ = nullptr;
close();
}
catch (const HandleException& ex) {
ostringstream error_msg;
error_msg << "Failed to close consumer [" << get_name() << "]: " << ex.what();
CallbackInvoker<Configuration::ErrorCallback> error_cb("error", get_configuration().get_error_callback(), this);
CallbackInvoker<Configuration::LogCallback> logger_cb("log", get_configuration().get_log_callback(), nullptr);
if (error_cb) {
error_cb(*this, static_cast<int>(ex.get_error().get_error()), error_msg.str());
}
else if (logger_cb) {
logger_cb(*this, static_cast<int>(LogLevel::LogErr), "cppkafka", error_msg.str());
}
else {
rd_kafka_log_print(get_handle(), static_cast<int>(LogLevel::LogErr), "cppkafka", error_msg.str().c_str());
}
}
}
void Consumer::set_assignment_callback(AssignmentCallback callback) {
@@ -93,11 +123,10 @@ void Consumer::unsubscribe() {
}
void Consumer::assign(const TopicPartitionList& topic_partitions) {
rd_kafka_resp_err_t error;
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
// If the list is empty, then we need to use a null pointer
auto handle = topic_partitions.empty() ? nullptr : topic_list_handle.get();
rd_kafka_resp_err_t error = rd_kafka_assign(get_handle(), handle);
check_error(error);
error = rd_kafka_assign(get_handle(), topic_list_handle.get());
check_error(error, topic_list_handle.get());
}
void Consumer::unassign() {
@@ -105,6 +134,22 @@ void Consumer::unassign() {
check_error(error);
}
void Consumer::pause() {
pause_partitions(get_assignment());
}
void Consumer::resume() {
resume_partitions(get_assignment());
}
void Consumer::commit() {
commit(nullptr, false);
}
void Consumer::async_commit() {
commit(nullptr, true);
}
void Consumer::commit(const Message& msg) {
commit(msg, false);
}
@@ -114,11 +159,11 @@ void Consumer::async_commit(const Message& msg) {
}
void Consumer::commit(const TopicPartitionList& topic_partitions) {
commit(topic_partitions, false);
commit(&topic_partitions, false);
}
void Consumer::async_commit(const TopicPartitionList& topic_partitions) {
commit(topic_partitions, true);
commit(&topic_partitions, true);
}
KafkaHandleBase::OffsetTuple Consumer::get_offsets(const TopicPartition& topic_partition) const {
@@ -134,10 +179,16 @@ KafkaHandleBase::OffsetTuple Consumer::get_offsets(const TopicPartition& topic_p
TopicPartitionList
Consumer::get_offsets_committed(const TopicPartitionList& topic_partitions) const {
return get_offsets_committed(topic_partitions, get_timeout());
}
TopicPartitionList
Consumer::get_offsets_committed(const TopicPartitionList& topic_partitions,
milliseconds timeout) const {
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
rd_kafka_resp_err_t error = rd_kafka_committed(get_handle(), topic_list_handle.get(),
static_cast<int>(get_timeout().count()));
check_error(error);
static_cast<int>(timeout.count()));
check_error(error, topic_list_handle.get());
return convert(topic_list_handle);
}
@@ -145,10 +196,27 @@ TopicPartitionList
Consumer::get_offsets_position(const TopicPartitionList& topic_partitions) const {
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
rd_kafka_resp_err_t error = rd_kafka_position(get_handle(), topic_list_handle.get());
check_error(error);
check_error(error, topic_list_handle.get());
return convert(topic_list_handle);
}
#if (RD_KAFKA_VERSION >= RD_KAFKA_STORE_OFFSETS_SUPPORT_VERSION)
void Consumer::store_consumed_offsets() const {
store_offsets(get_offsets_position(get_assignment()));
}
void Consumer::store_offsets(const TopicPartitionList& topic_partitions) const {
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
rd_kafka_resp_err_t error = rd_kafka_offsets_store(get_handle(), topic_list_handle.get());
check_error(error, topic_list_handle.get());
}
#endif
void Consumer::store_offset(const Message& msg) const {
rd_kafka_resp_err_t error = rd_kafka_offset_store(msg.get_handle()->rkt, msg.get_partition(), msg.get_offset());
check_error(error);
}
vector<string> Consumer::get_subscription() const {
rd_kafka_resp_err_t error;
rd_kafka_topic_partition_list_t* list = nullptr;
@@ -192,9 +260,33 @@ Message Consumer::poll() {
}
Message Consumer::poll(milliseconds timeout) {
rd_kafka_message_t* message = rd_kafka_consumer_poll(get_handle(),
static_cast<int>(timeout.count()));
return message ? Message(message) : Message();
return rd_kafka_consumer_poll(get_handle(), static_cast<int>(timeout.count()));
}
std::vector<Message> Consumer::poll_batch(size_t max_batch_size) {
return poll_batch(max_batch_size, get_timeout(), allocator<Message>());
}
std::vector<Message> Consumer::poll_batch(size_t max_batch_size, milliseconds timeout) {
return poll_batch(max_batch_size, timeout, allocator<Message>());
}
Queue Consumer::get_main_queue() const {
Queue queue = Queue::make_queue(rd_kafka_queue_get_main(get_handle()));
queue.disable_queue_forwarding();
return queue;
}
Queue Consumer::get_consumer_queue() const {
return Queue::make_queue(rd_kafka_queue_get_consumer(get_handle()));
}
Queue Consumer::get_partition_queue(const TopicPartition& partition) const {
Queue queue = Queue::make_queue(rd_kafka_queue_get_partition(get_handle(),
partition.get_topic().c_str(),
partition.get_partition()));
queue.disable_queue_forwarding();
return queue;
}
void Consumer::close() {
@@ -204,36 +296,35 @@ void Consumer::close() {
void Consumer::commit(const Message& msg, bool async) {
rd_kafka_resp_err_t error;
error = rd_kafka_commit_message(get_handle(), msg.get_handle(),
async ? 1 : 0);
error = rd_kafka_commit_message(get_handle(), msg.get_handle(), async ? 1 : 0);
check_error(error);
}
void Consumer::commit(const TopicPartitionList& topic_partitions, bool async) {
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
void Consumer::commit(const TopicPartitionList* topic_partitions, bool async) {
rd_kafka_resp_err_t error;
error = rd_kafka_commit(get_handle(), topic_list_handle.get(), async ? 1 : 0);
if (topic_partitions == nullptr) {
error = rd_kafka_commit(get_handle(), nullptr, async ? 1 : 0);
check_error(error);
}
else {
TopicPartitionsListPtr topic_list_handle = convert(*topic_partitions);
error = rd_kafka_commit(get_handle(), topic_list_handle.get(), async ? 1 : 0);
check_error(error, topic_list_handle.get());
}
}
void Consumer::handle_rebalance(rd_kafka_resp_err_t error,
TopicPartitionList& topic_partitions) {
if (error == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) {
if (assignment_callback_) {
assignment_callback_(topic_partitions);
}
CallbackInvoker<AssignmentCallback>("assignment", assignment_callback_, this)(topic_partitions);
assign(topic_partitions);
}
else if (error == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS) {
if (revocation_callback_) {
revocation_callback_(topic_partitions);
}
CallbackInvoker<RevocationCallback>("revocation", revocation_callback_, this)(topic_partitions);
unassign();
}
else {
if (rebalance_error_callback_) {
rebalance_error_callback_(error);
}
CallbackInvoker<RebalanceErrorCallback>("rebalance error", rebalance_error_callback_, this)(error);
unassign();
}
}

93
src/event.cpp Normal file
View File

@@ -0,0 +1,93 @@
/*
* Copyright (c) 2018, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "event.h"
using std::allocator;
using std::string;
using std::unique_ptr;
using std::vector;
namespace cppkafka {
Event::Event(rd_kafka_event_t* handle)
: handle_(handle, &rd_kafka_event_destroy) {
}
string Event::get_name() const {
return rd_kafka_event_name(handle_.get());
}
rd_kafka_event_type_t Event::get_type() const {
return rd_kafka_event_type(handle_.get());
}
Message Event::get_next_message() const {
// Note: the constness in rd_kafka_event_message_next's return value is not needed and it
// breaks Message's interface. This is dirty but it looks like it should have no side effects.
const auto message =
const_cast<rd_kafka_message_t*>(rd_kafka_event_message_next(handle_.get()));
return Message::make_non_owning(message);
}
vector<Message> Event::get_messages() {
return get_messages(allocator<Message>());
}
size_t Event::get_message_count() const {
return rd_kafka_event_message_count(handle_.get());
}
Error Event::get_error() const {
return rd_kafka_event_error(handle_.get());
}
void* Event::get_opaque() const {
return rd_kafka_event_opaque(handle_.get());
}
TopicPartition Event::get_topic_partition() const {
using TopparHandle = unique_ptr<rd_kafka_topic_partition_t,
decltype(&rd_kafka_topic_partition_destroy)>;
TopparHandle toppar_handle{rd_kafka_event_topic_partition(handle_.get()),
&rd_kafka_topic_partition_destroy};
return TopicPartition(toppar_handle->topic, toppar_handle->partition, toppar_handle->offset);
}
TopicPartitionList Event::get_topic_partition_list() const {
auto toppars_handle = rd_kafka_event_topic_partition_list(handle_.get());
return convert(toppars_handle);
}
Event::operator bool() const {
return !!handle_;
}
} // cppkafka

View File

@@ -97,4 +97,33 @@ Error HandleException::get_error() const {
return error_;
}
// ConsumerException
ConsumerException::ConsumerException(Error error)
: Exception(error.to_string()), error_(error) {
}
Error ConsumerException::get_error() const {
return error_;
}
// QueueException
QueueException::QueueException(Error error)
: Exception(error.to_string()), error_(error) {
}
Error QueueException::get_error() const {
return error_;
}
// ActionTerminatedException
ActionTerminatedException::ActionTerminatedException(const string& error)
: Exception(error) {
}
} // cppkafka

View File

@@ -48,7 +48,7 @@ namespace cppkafka {
const milliseconds KafkaHandleBase::DEFAULT_TIMEOUT{1000};
KafkaHandleBase::KafkaHandleBase(Configuration config)
: handle_(nullptr, nullptr), timeout_ms_(DEFAULT_TIMEOUT), config_(move(config)) {
: timeout_ms_(DEFAULT_TIMEOUT), config_(move(config)), handle_(nullptr, HandleDeleter(this)), destroy_flags_(0) {
auto& maybe_config = config_.get_default_topic_configuration();
if (maybe_config) {
maybe_config->set_as_opaque();
@@ -61,20 +61,32 @@ void KafkaHandleBase::pause_partitions(const TopicPartitionList& topic_partition
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
rd_kafka_resp_err_t error = rd_kafka_pause_partitions(get_handle(),
topic_list_handle.get());
check_error(error);
check_error(error, topic_list_handle.get());
}
void KafkaHandleBase::pause(const std::string& topic) {
pause_partitions(convert(topic, get_metadata(get_topic(topic)).get_partitions()));
}
void KafkaHandleBase::resume_partitions(const TopicPartitionList& topic_partitions) {
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
rd_kafka_resp_err_t error = rd_kafka_resume_partitions(get_handle(),
topic_list_handle.get());
check_error(error);
check_error(error, topic_list_handle.get());
}
void KafkaHandleBase::resume(const std::string& topic) {
resume_partitions(convert(topic, get_metadata(get_topic(topic)).get_partitions()));
}
void KafkaHandleBase::set_timeout(milliseconds timeout) {
timeout_ms_ = timeout;
}
void KafkaHandleBase::set_log_level(LogLevel level) {
rd_kafka_set_log_level(handle_.get(), static_cast<int>(level));
}
void KafkaHandleBase::add_brokers(const string& brokers) {
rd_kafka_brokers_add(handle_.get(), brokers.data());
}
@@ -96,24 +108,40 @@ Topic KafkaHandleBase::get_topic(const string& name, TopicConfiguration config)
KafkaHandleBase::OffsetTuple
KafkaHandleBase::query_offsets(const TopicPartition& topic_partition) const {
return query_offsets(topic_partition, timeout_ms_);
}
KafkaHandleBase::OffsetTuple
KafkaHandleBase::query_offsets(const TopicPartition& topic_partition,
milliseconds timeout) const {
int64_t low;
int64_t high;
const string& topic = topic_partition.get_topic();
const int partition = topic_partition.get_partition();
const int timeout = static_cast<int>(timeout_ms_.count());
const int timeout_ms = static_cast<int>(timeout.count());
rd_kafka_resp_err_t result = rd_kafka_query_watermark_offsets(handle_.get(), topic.data(),
partition, &low, &high,
timeout);
timeout_ms);
check_error(result);
return make_tuple(low, high);
}
Metadata KafkaHandleBase::get_metadata(bool all_topics) const {
return get_metadata(all_topics, nullptr);
return get_metadata(all_topics, nullptr, timeout_ms_);
}
Metadata KafkaHandleBase::get_metadata(bool all_topics,
milliseconds timeout) const {
return get_metadata(all_topics, nullptr, timeout);
}
TopicMetadata KafkaHandleBase::get_metadata(const Topic& topic) const {
Metadata md = get_metadata(false, topic.get_handle());
return get_metadata(topic, timeout_ms_);
}
TopicMetadata KafkaHandleBase::get_metadata(const Topic& topic,
milliseconds timeout) const {
Metadata md = get_metadata(false, topic.get_handle(), timeout);
auto topics = md.get_topics();
if (topics.empty()) {
throw ElementNotFound("topic metadata", topic.get_name());
@@ -122,7 +150,12 @@ TopicMetadata KafkaHandleBase::get_metadata(const Topic& topic) const {
}
GroupInformation KafkaHandleBase::get_consumer_group(const string& name) {
auto result = fetch_consumer_groups(name.c_str());
return get_consumer_group(name, timeout_ms_);
}
GroupInformation KafkaHandleBase::get_consumer_group(const string& name,
milliseconds timeout) {
auto result = fetch_consumer_groups(name.c_str(), timeout);
if (result.empty()) {
throw ElementNotFound("consumer group information", name);
}
@@ -130,11 +163,21 @@ GroupInformation KafkaHandleBase::get_consumer_group(const string& name) {
}
vector<GroupInformation> KafkaHandleBase::get_consumer_groups() {
return fetch_consumer_groups(nullptr);
return get_consumer_groups(timeout_ms_);
}
vector<GroupInformation> KafkaHandleBase::get_consumer_groups(milliseconds timeout) {
return fetch_consumer_groups(nullptr, timeout);
}
TopicPartitionList
KafkaHandleBase::get_offsets_for_times(const TopicPartitionsTimestampsMap& queries) const {
return get_offsets_for_times(queries, timeout_ms_);
}
TopicPartitionList
KafkaHandleBase::get_offsets_for_times(const TopicPartitionsTimestampsMap& queries,
milliseconds timeout) const {
TopicPartitionList topic_partitions;
for (const auto& query : queries) {
const TopicPartition& topic_partition = query.first;
@@ -142,10 +185,10 @@ KafkaHandleBase::get_offsets_for_times(const TopicPartitionsTimestampsMap& queri
query.second.count());
}
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
const int timeout = static_cast<int>(timeout_ms_.count());
const int timeout_ms = static_cast<int>(timeout.count());
rd_kafka_resp_err_t result = rd_kafka_offsets_for_times(handle_.get(), topic_list_handle.get(),
timeout);
check_error(result);
timeout_ms);
check_error(result, topic_list_handle.get());
return convert(topic_list_handle);
}
@@ -165,31 +208,38 @@ int KafkaHandleBase::get_out_queue_length() const {
return rd_kafka_outq_len(handle_.get());
}
void KafkaHandleBase::yield() const {
rd_kafka_yield(handle_.get());
}
void KafkaHandleBase::set_handle(rd_kafka_t* handle) {
handle_ = HandlePtr(handle, &rd_kafka_destroy);
handle_ = HandlePtr(handle, HandleDeleter(this));
}
Topic KafkaHandleBase::get_topic(const string& name, rd_kafka_topic_conf_t* conf) {
rd_kafka_topic_t* topic = rd_kafka_topic_new(get_handle(), name.data(), conf);
if (!topic) {
throw HandleException(rd_kafka_errno2err(errno));
throw HandleException(rd_kafka_last_error());
}
return Topic(topic);
}
Metadata KafkaHandleBase::get_metadata(bool all_topics, rd_kafka_topic_t* topic_ptr) const {
Metadata KafkaHandleBase::get_metadata(bool all_topics,
rd_kafka_topic_t* topic_ptr,
milliseconds timeout) const {
const rd_kafka_metadata_t* metadata;
const int timeout = static_cast<int>(timeout_ms_.count());
const int timeout_ms = static_cast<int>(timeout.count());
rd_kafka_resp_err_t error = rd_kafka_metadata(get_handle(), !!all_topics,
topic_ptr, &metadata, timeout);
topic_ptr, &metadata, timeout_ms);
check_error(error);
return Metadata(metadata);
}
vector<GroupInformation> KafkaHandleBase::fetch_consumer_groups(const char* name) {
vector<GroupInformation> KafkaHandleBase::fetch_consumer_groups(const char* name,
milliseconds timeout) {
const rd_kafka_group_list* list = nullptr;
const int timeout = static_cast<int>(timeout_ms_.count());
auto result = rd_kafka_list_groups(get_handle(), name, &list, timeout);
const int timeout_ms = static_cast<int>(timeout.count());
auto result = rd_kafka_list_groups(get_handle(), name, &list, timeout_ms);
check_error(result);
// Wrap this in a unique_ptr so it gets auto deleted
@@ -216,8 +266,44 @@ void KafkaHandleBase::check_error(rd_kafka_resp_err_t error) const {
}
}
void KafkaHandleBase::check_error(rd_kafka_resp_err_t error,
const rd_kafka_topic_partition_list_t* list_ptr) const {
if (error != RD_KAFKA_RESP_ERR_NO_ERROR) {
throw HandleException(error);
}
if (list_ptr) {
//check if any partition has errors
for (int i = 0; i < list_ptr->cnt; ++i) {
if (list_ptr->elems[i].err != RD_KAFKA_RESP_ERR_NO_ERROR) {
throw HandleException(list_ptr->elems[i].err);
}
}
}
}
rd_kafka_conf_t* KafkaHandleBase::get_configuration_handle() {
return config_.get_handle();
}
#if RD_KAFKA_VERSION >= RD_KAFKA_DESTROY_FLAGS_SUPPORT_VERSION
void KafkaHandleBase::set_destroy_flags(int destroy_flags) {
destroy_flags_ = destroy_flags;
};
int KafkaHandleBase::get_destroy_flags() const {
return destroy_flags_;
};
#endif
void KafkaHandleBase::HandleDeleter::operator()(rd_kafka_t* handle) {
#if RD_KAFKA_VERSION >= RD_KAFKA_DESTROY_FLAGS_SUPPORT_VERSION
rd_kafka_destroy_flags(handle, handle_base_ptr_->get_destroy_flags());
#else
rd_kafka_destroy(handle);
#endif
}
} // cppkafka

View File

@@ -28,14 +28,10 @@
*/
#include "message.h"
using std::string;
#include "message_internal.h"
using std::chrono::milliseconds;
using boost::optional;
using boost::none_t;
namespace cppkafka {
void dummy_deleter(rd_kafka_message_t*) {
@@ -47,7 +43,8 @@ Message Message::make_non_owning(rd_kafka_message_t* handle) {
}
Message::Message()
: handle_(nullptr, nullptr) {
: handle_(nullptr, nullptr),
user_data_(nullptr) {
}
@@ -63,74 +60,38 @@ Message::Message(rd_kafka_message_t* handle, NonOwningTag)
Message::Message(HandlePtr handle)
: handle_(move(handle)),
payload_((const Buffer::DataType*)handle_->payload, handle_->len),
key_((const Buffer::DataType*)handle_->key, handle_->key_len) {
payload_(handle_ ? Buffer((const Buffer::DataType*)handle_->payload, handle_->len) : Buffer()),
key_(handle_ ? Buffer((const Buffer::DataType*)handle_->key, handle_->key_len) : Buffer()),
user_data_(handle_ ? handle_->_private : nullptr) {
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
// get the header list if any
if (handle_) {
rd_kafka_headers_t* headers_handle;
Error error = rd_kafka_message_headers(handle_.get(), &headers_handle);
if (!error) {
header_list_ = HeaderListType::make_non_owning(headers_handle);
}
}
#endif
}
Error Message::get_error() const {
return handle_->err;
Message& Message::load_internal() {
if (user_data_) {
MessageInternal* mi = static_cast<MessageInternal*>(user_data_);
user_data_ = mi->get_user_data();
internal_ = mi->get_internal();
}
return *this;
}
bool Message::is_eof() const {
return get_error() == RD_KAFKA_RESP_ERR__PARTITION_EOF;
}
int Message::get_partition() const {
return handle_->partition;
}
string Message::get_topic() const {
return rd_kafka_topic_name(handle_->rkt);
}
const Buffer& Message::get_payload() const {
return payload_;
}
const Buffer& Message::get_key() const {
return key_;
}
int64_t Message::get_offset() const {
return handle_->offset;
}
void* Message::get_private_data() const {
return handle_->_private;
}
optional<MessageTimestamp> Message::get_timestamp() const {
boost::optional<MessageTimestamp> Message::get_timestamp() const {
rd_kafka_timestamp_type_t type = RD_KAFKA_TIMESTAMP_NOT_AVAILABLE;
int64_t timestamp = rd_kafka_message_timestamp(handle_.get(), &type);
if (timestamp == -1 || type == RD_KAFKA_TIMESTAMP_NOT_AVAILABLE) {
return {};
}
return MessageTimestamp(milliseconds(timestamp),
return MessageTimestamp(std::chrono::milliseconds(timestamp),
static_cast<MessageTimestamp::TimestampType>(type));
}
Message::operator bool() const {
return handle_ != nullptr;
}
rd_kafka_message_t* Message::get_handle() const {
return handle_.get();
}
// MessageTimestamp
MessageTimestamp::MessageTimestamp(milliseconds timestamp, TimestampType type)
: timestamp_(timestamp), type_(type) {
}
milliseconds MessageTimestamp::get_timestamp() const {
return timestamp_;
}
MessageTimestamp::TimestampType MessageTimestamp::get_type() const {
return type_;
}
} // cppkafka

56
src/message_internal.cpp Normal file
View File

@@ -0,0 +1,56 @@
/*
* Copyright (c) 2017, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "message_internal.h"
#include "message.h"
#include "message_builder.h"
namespace cppkafka {
// MessageInternal
MessageInternal::MessageInternal(void* user_data,
std::shared_ptr<Internal> internal)
: user_data_(user_data),
internal_(internal) {
}
std::unique_ptr<MessageInternal> MessageInternal::load(Message& message) {
return std::unique_ptr<MessageInternal>(message.load_internal().get_handle() ?
static_cast<MessageInternal*>(message.get_handle()->_private) : nullptr);
}
void* MessageInternal::get_user_data() const {
return user_data_;
}
InternalPtr MessageInternal::get_internal() const {
return internal_;
}
}

51
src/message_timestamp.cpp Normal file
View File

@@ -0,0 +1,51 @@
/*
* Copyright (c) 2017, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "message_timestamp.h"
using std::chrono::milliseconds;
namespace cppkafka {
MessageTimestamp::MessageTimestamp(milliseconds timestamp, TimestampType type)
: timestamp_(timestamp),
type_(type) {
}
milliseconds MessageTimestamp::get_timestamp() const {
return timestamp_;
}
MessageTimestamp::TimestampType MessageTimestamp::get_type() const {
return type_;
}
} // cppkafka

View File

@@ -27,6 +27,7 @@
*
*/
#include <assert.h>
#include "metadata.h"
#include "error.h"
@@ -110,12 +111,31 @@ uint16_t BrokerMetadata::get_port() const {
// Metadata
Metadata::Metadata(const rd_kafka_metadata_t* ptr)
: handle_(ptr, &rd_kafka_metadata_destroy) {
void dummy_metadata_destroyer(const rd_kafka_metadata_t*) {
}
Metadata Metadata::make_non_owning(const rd_kafka_metadata_t* handle) {
return Metadata(handle, NonOwningTag{});
}
Metadata::Metadata()
: handle_(nullptr, nullptr) {
}
Metadata::Metadata(const rd_kafka_metadata_t* handle)
: handle_(handle, &rd_kafka_metadata_destroy) {
}
Metadata::Metadata(const rd_kafka_metadata_t* handle, NonOwningTag)
: handle_(handle, &dummy_metadata_destroyer) {
}
vector<BrokerMetadata> Metadata::get_brokers() const {
assert(handle_);
vector<BrokerMetadata> output;
for (int i = 0; i < handle_->broker_cnt; ++i) {
const rd_kafka_metadata_broker_t& broker = handle_->brokers[i];
@@ -125,6 +145,7 @@ vector<BrokerMetadata> Metadata::get_brokers() const {
}
vector<TopicMetadata> Metadata::get_topics() const {
assert(handle_);
vector<TopicMetadata> output;
for (int i = 0; i < handle_->topic_cnt; ++i) {
const rd_kafka_metadata_topic_t& topic = handle_->topics[i];
@@ -134,6 +155,7 @@ vector<TopicMetadata> Metadata::get_topics() const {
}
vector<TopicMetadata> Metadata::get_topics(const unordered_set<string>& topics) const {
assert(handle_);
vector<TopicMetadata> output;
for (int i = 0; i < handle_->topic_cnt; ++i) {
const rd_kafka_metadata_topic_t& topic = handle_->topics[i];
@@ -145,6 +167,7 @@ vector<TopicMetadata> Metadata::get_topics(const unordered_set<string>& topics)
}
vector<TopicMetadata> Metadata::get_topics_prefixed(const string& prefix) const {
assert(handle_);
vector<TopicMetadata> output;
for (int i = 0; i < handle_->topic_cnt; ++i) {
const rd_kafka_metadata_topic_t& topic = handle_->topics[i];
@@ -156,4 +179,13 @@ vector<TopicMetadata> Metadata::get_topics_prefixed(const string& prefix) const
return output;
}
Metadata::operator bool() const {
return handle_ != nullptr;
}
const rd_kafka_metadata_t* Metadata::get_handle() const {
return handle_.get();
}
} // cppkafka

View File

@@ -28,13 +28,16 @@
*/
#include <errno.h>
#include <memory>
#include "producer.h"
#include "exceptions.h"
#include "message_internal.h"
using std::move;
using std::string;
using std::chrono::milliseconds;
using std::unique_ptr;
using std::get;
namespace cppkafka {
@@ -49,7 +52,6 @@ Producer::Producer(Configuration config)
if (!ptr) {
throw Exception("Failed to create producer handle: " + string(error_buffer));
}
rd_kafka_set_log_level(ptr, 7);
set_handle(ptr);
}
@@ -61,22 +63,44 @@ Producer::PayloadPolicy Producer::get_payload_policy() const {
return message_payload_policy_;
}
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
void Producer::produce(const MessageBuilder& builder) {
const Buffer& payload = builder.payload();
const Buffer& key = builder.key();
const int policy = static_cast<int>(message_payload_policy_);
auto result = rd_kafka_producev(get_handle(),
RD_KAFKA_V_TOPIC(builder.topic().data()),
RD_KAFKA_V_PARTITION(builder.partition()),
RD_KAFKA_V_MSGFLAGS(policy),
RD_KAFKA_V_TIMESTAMP(builder.timestamp().count()),
RD_KAFKA_V_KEY((void*)key.get_data(), key.get_size()),
RD_KAFKA_V_VALUE((void*)payload.get_data(), payload.get_size()),
RD_KAFKA_V_OPAQUE(builder.user_data()),
RD_KAFKA_V_END);
check_error(result);
do_produce(builder, MessageBuilder::HeaderListType(builder.header_list())); //copy headers
}
void Producer::produce(MessageBuilder&& builder) {
do_produce(builder, std::move(builder.header_list())); //move headers
}
void Producer::produce(const Message& message) {
do_produce(message, HeaderList<Message::HeaderType>(message.get_header_list())); //copy headers
}
void Producer::produce(Message&& message) {
do_produce(message, message.detach_header_list<Message::HeaderType>()); //move headers
}
#else
void Producer::produce(const MessageBuilder& builder) {
do_produce(builder);
}
void Producer::produce(MessageBuilder&& builder) {
do_produce(builder);
}
void Producer::produce(const Message& message) {
do_produce(message);
}
void Producer::produce(Message&& message) {
do_produce(message);
}
#endif
int Producer::poll() {
return poll(get_timeout());
}
@@ -94,4 +118,80 @@ void Producer::flush(milliseconds timeout) {
check_error(result);
}
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
void Producer::do_produce(const MessageBuilder& builder,
MessageBuilder::HeaderListType&& headers) {
const Buffer& payload = builder.payload();
const Buffer& key = builder.key();
const int policy = static_cast<int>(message_payload_policy_);
auto result = rd_kafka_producev(get_handle(),
RD_KAFKA_V_TOPIC(builder.topic().data()),
RD_KAFKA_V_PARTITION(builder.partition()),
RD_KAFKA_V_MSGFLAGS(policy),
RD_KAFKA_V_TIMESTAMP(builder.timestamp().count()),
RD_KAFKA_V_KEY((void*)key.get_data(), key.get_size()),
RD_KAFKA_V_HEADERS(headers.release_handle()), //pass ownership to rdkafka
RD_KAFKA_V_VALUE((void*)payload.get_data(), payload.get_size()),
RD_KAFKA_V_OPAQUE(builder.user_data()),
RD_KAFKA_V_END);
check_error(result);
}
void Producer::do_produce(const Message& message,
MessageBuilder::HeaderListType&& headers) {
const Buffer& payload = message.get_payload();
const Buffer& key = message.get_key();
const int policy = static_cast<int>(message_payload_policy_);
int64_t duration = message.get_timestamp() ? message.get_timestamp().get().get_timestamp().count() : 0;
auto result = rd_kafka_producev(get_handle(),
RD_KAFKA_V_TOPIC(message.get_topic().data()),
RD_KAFKA_V_PARTITION(message.get_partition()),
RD_KAFKA_V_MSGFLAGS(policy),
RD_KAFKA_V_TIMESTAMP(duration),
RD_KAFKA_V_KEY((void*)key.get_data(), key.get_size()),
RD_KAFKA_V_HEADERS(headers.release_handle()), //pass ownership to rdkafka
RD_KAFKA_V_VALUE((void*)payload.get_data(), payload.get_size()),
RD_KAFKA_V_OPAQUE(message.get_user_data()),
RD_KAFKA_V_END);
check_error(result);
}
#else
void Producer::do_produce(const MessageBuilder& builder) {
const Buffer& payload = builder.payload();
const Buffer& key = builder.key();
const int policy = static_cast<int>(message_payload_policy_);
auto result = rd_kafka_producev(get_handle(),
RD_KAFKA_V_TOPIC(builder.topic().data()),
RD_KAFKA_V_PARTITION(builder.partition()),
RD_KAFKA_V_MSGFLAGS(policy),
RD_KAFKA_V_TIMESTAMP(builder.timestamp().count()),
RD_KAFKA_V_KEY((void*)key.get_data(), key.get_size()),
RD_KAFKA_V_VALUE((void*)payload.get_data(), payload.get_size()),
RD_KAFKA_V_OPAQUE(builder.user_data()),
RD_KAFKA_V_END);
check_error(result);
}
void Producer::do_produce(const Message& message) {
const Buffer& payload = message.get_payload();
const Buffer& key = message.get_key();
const int policy = static_cast<int>(message_payload_policy_);
int64_t duration = message.get_timestamp() ? message.get_timestamp().get().get_timestamp().count() : 0;
auto result = rd_kafka_producev(get_handle(),
RD_KAFKA_V_TOPIC(message.get_topic().data()),
RD_KAFKA_V_PARTITION(message.get_partition()),
RD_KAFKA_V_MSGFLAGS(policy),
RD_KAFKA_V_TIMESTAMP(duration),
RD_KAFKA_V_KEY((void*)key.get_data(), key.get_size()),
RD_KAFKA_V_VALUE((void*)payload.get_data(), payload.get_size()),
RD_KAFKA_V_OPAQUE(message.get_user_data()),
RD_KAFKA_V_END);
check_error(result);
}
#endif //RD_KAFKA_HEADERS_SUPPORT_VERSION
} // cppkafka

123
src/queue.cpp Normal file
View File

@@ -0,0 +1,123 @@
/*
* Copyright (c) 2017, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "queue.h"
#include "exceptions.h"
using std::vector;
using std::exception;
using std::chrono::milliseconds;
using std::allocator;
namespace cppkafka {
void dummy_deleter(rd_kafka_queue_t*) {
}
const milliseconds Queue::DEFAULT_TIMEOUT{1000};
Queue Queue::make_non_owning(rd_kafka_queue_t* handle) {
return Queue(handle, NonOwningTag{});
}
Queue Queue::make_queue(rd_kafka_queue_t* handle) {
if (rd_kafka_version() <= RD_KAFKA_QUEUE_REFCOUNT_BUG_VERSION) {
return Queue::make_non_owning(handle);
}
else {
return Queue(handle);
}
}
Queue::Queue()
: handle_(nullptr, nullptr),
timeout_ms_(DEFAULT_TIMEOUT) {
}
Queue::Queue(rd_kafka_queue_t* handle)
: handle_(handle, &rd_kafka_queue_destroy),
timeout_ms_(DEFAULT_TIMEOUT) {
}
Queue::Queue(rd_kafka_queue_t* handle, NonOwningTag)
: handle_(handle, &dummy_deleter) {
}
rd_kafka_queue_t* Queue::get_handle() const {
return handle_.get();
}
size_t Queue::get_length() const {
return rd_kafka_queue_length(handle_.get());
}
void Queue::forward_to_queue(const Queue& forward_queue) const {
return rd_kafka_queue_forward(handle_.get(), forward_queue.handle_.get());
}
void Queue::disable_queue_forwarding() const {
return rd_kafka_queue_forward(handle_.get(), nullptr);
}
void Queue::set_timeout(milliseconds timeout) {
timeout_ms_ = timeout;
}
milliseconds Queue::get_timeout() const {
return timeout_ms_;
}
Message Queue::consume() const {
return consume(timeout_ms_);
}
Message Queue::consume(milliseconds timeout) const {
return Message(rd_kafka_consume_queue(handle_.get(), static_cast<int>(timeout.count())));
}
vector<Message> Queue::consume_batch(size_t max_batch_size) const {
return consume_batch(max_batch_size, timeout_ms_, allocator<Message>());
}
vector<Message> Queue::consume_batch(size_t max_batch_size, milliseconds timeout) const {
return consume_batch(max_batch_size, timeout, allocator<Message>());
}
Event Queue::next_event() const {
return next_event(timeout_ms_);
}
Event Queue::next_event(milliseconds timeout) const {
return Event(rd_kafka_queue_poll(handle_.get(), timeout.count()));
}
} //cppkafka

View File

@@ -34,7 +34,7 @@ using std::string;
namespace cppkafka {
void dummy_topic_destroyer(rd_kafka_topic_t*) {
void dummy_deleter(rd_kafka_topic_t*) {
}
@@ -53,7 +53,7 @@ Topic::Topic(rd_kafka_topic_t* handle)
}
Topic::Topic(rd_kafka_topic_t* handle, NonOwningTag)
: handle_(handle, &dummy_topic_destroyer) {
: handle_(handle, &dummy_deleter) {
}

View File

@@ -33,6 +33,7 @@
#include "exceptions.h"
#include "topic.h"
#include "buffer.h"
#include "detail/callback_invoker.h"
using std::string;
using std::map;
@@ -49,7 +50,8 @@ int32_t partitioner_callback_proxy(const rd_kafka_topic_t* handle, const void *k
if (callback) {
Topic topic = Topic::make_non_owning(const_cast<rd_kafka_topic_t*>(handle));
Buffer key(static_cast<const char*>(key_ptr), key_size);
return callback(topic, key, partition_count);
return CallbackInvoker<TopicConfiguration::PartitionerCallback>("topic partitioner", callback, nullptr)
(topic, key, partition_count);
}
else {
return rd_kafka_msg_partitioner_consistent_random(handle, key_ptr, key_size,

View File

@@ -33,6 +33,7 @@
#include "topic_partition.h"
using std::string;
using std::to_string;
using std::ostream;
using std::tie;
@@ -75,6 +76,10 @@ int64_t TopicPartition::get_offset() const {
return offset_;
}
void TopicPartition::set_partition(int partition) {
partition_ = partition;
}
void TopicPartition::set_offset(int64_t offset) {
offset_ = offset;
}
@@ -92,7 +97,10 @@ bool TopicPartition::operator!=(const TopicPartition& rhs) const {
}
ostream& operator<<(ostream& output, const TopicPartition& rhs) {
return output << rhs.get_topic() << "[" << rhs.get_partition() << "]";
return output << rhs.get_topic() << "["
<< rhs.get_partition() << ":"
<< (rhs.get_offset() == RD_KAFKA_OFFSET_INVALID ? "#" : to_string(rhs.get_offset()))
<< "]";
}
} // cppkafka

View File

@@ -28,34 +28,40 @@
*/
#include <iostream>
#include <string>
#include "topic_partition_list.h"
#include "topic_partition.h"
#include "exceptions.h"
#include "metadata.h"
using std::vector;
using std::set;
using std::ostream;
using std::string;
using std::equal;
namespace cppkafka {
TopicPartitionsListPtr convert(const vector<TopicPartition>& topic_partitions) {
TopicPartitionsListPtr convert(const TopicPartitionList& topic_partitions) {
TopicPartitionsListPtr handle(rd_kafka_topic_partition_list_new(topic_partitions.size()),
&rd_kafka_topic_partition_list_destroy);
for (const auto& item : topic_partitions) {
rd_kafka_topic_partition_t* new_item = nullptr;
new_item = rd_kafka_topic_partition_list_add(handle.get(),
rd_kafka_topic_partition_t* new_item = rd_kafka_topic_partition_list_add(
handle.get(),
item.get_topic().data(),
item.get_partition());
item.get_partition()
);
new_item->offset = item.get_offset();
}
return handle;
}
vector<TopicPartition> convert(const TopicPartitionsListPtr& topic_partitions) {
TopicPartitionList convert(const TopicPartitionsListPtr& topic_partitions) {
return convert(topic_partitions.get());
}
vector<TopicPartition> convert(rd_kafka_topic_partition_list_t* topic_partitions) {
vector<TopicPartition> output;
TopicPartitionList convert(rd_kafka_topic_partition_list_t* topic_partitions) {
TopicPartitionList output;
for (int i = 0; i < topic_partitions->cnt; ++i) {
const auto& elem = topic_partitions->elems[i];
output.emplace_back(elem.topic, elem.partition, elem.offset);
@@ -63,10 +69,51 @@ vector<TopicPartition> convert(rd_kafka_topic_partition_list_t* topic_partitions
return output;
}
TopicPartitionList convert(const std::string& topic,
const std::vector<PartitionMetadata>& partition_metadata)
{
TopicPartitionList output;
for (const auto& meta : partition_metadata) {
output.emplace_back(topic, meta.get_id());
}
return output;
}
TopicPartitionsListPtr make_handle(rd_kafka_topic_partition_list_t* handle) {
return TopicPartitionsListPtr(handle, &rd_kafka_topic_partition_list_destroy);
}
TopicPartitionList find_matches(const TopicPartitionList& partitions,
const set<string>& topics) {
TopicPartitionList subset;
for (const auto& partition : partitions) {
for (const auto& topic : topics) {
if (topic.size() == partition.get_topic().size()) {
// compare both strings
bool match = equal(topic.begin(), topic.end(), partition.get_topic().begin(),
[](char c1, char c2)->bool {
return toupper(c1) == toupper(c2);
});
if (match) {
subset.emplace_back(partition);
}
}
}
}
return subset;
}
TopicPartitionList find_matches(const TopicPartitionList& partitions,
const set<int>& ids) {
TopicPartitionList subset;
for (const auto& partition : partitions) {
if (ids.count(partition.get_partition()) > 0) {
subset.emplace_back(partition);
}
}
return subset;
}
ostream& operator<<(ostream& output, const TopicPartitionList& rhs) {
output << "[ ";
for (auto iter = rhs.begin(); iter != rhs.end(); ++iter) {

View File

@@ -35,48 +35,34 @@ using std::min;
namespace cppkafka {
BackoffCommitter::BackoffCommitter(Consumer& consumer)
: consumer_(consumer), initial_backoff_(DEFAULT_INITIAL_BACKOFF),
backoff_step_(DEFAULT_BACKOFF_STEP), maximum_backoff_(DEFAULT_MAXIMUM_BACKOFF),
policy_(BackoffPolicy::LINEAR) {
: consumer_(consumer) {
}
void BackoffCommitter::set_backoff_policy(BackoffPolicy policy) {
policy_ = policy;
}
void BackoffCommitter::set_initial_backoff(TimeUnit value) {
initial_backoff_ = value;
}
void BackoffCommitter::set_backoff_step(TimeUnit value) {
backoff_step_ = value;
}
void BackoffCommitter::set_maximum_backoff(TimeUnit value) {
maximum_backoff_ = value;
}
void BackoffCommitter::set_error_callback(ErrorCallback callback) {
callback_ = move(callback);
}
void BackoffCommitter::commit() {
perform([&] {
return do_commit();
});
}
void BackoffCommitter::commit(const Message& msg) {
do_commit(msg);
perform([&] {
return do_commit(msg);
});
}
void BackoffCommitter::commit(const TopicPartitionList& topic_partitions) {
do_commit(topic_partitions);
perform([&] {
return do_commit(topic_partitions);
});
}
BackoffCommitter::TimeUnit BackoffCommitter::increase_backoff(TimeUnit backoff) {
if (policy_ == BackoffPolicy::LINEAR) {
backoff = backoff + backoff_step_;
}
else {
backoff = backoff * 2;
}
return min(backoff, maximum_backoff_);
Consumer& BackoffCommitter::get_consumer() {
return consumer_;
}
} // cppkafka

View File

@@ -0,0 +1,81 @@
/*
* Copyright (c) 2017, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <algorithm>
#include <limits>
#include "utils/backoff_performer.h"
using std::min;
using std::numeric_limits;
namespace cppkafka {
const BackoffPerformer::TimeUnit BackoffPerformer::DEFAULT_INITIAL_BACKOFF{100};
const BackoffPerformer::TimeUnit BackoffPerformer::DEFAULT_BACKOFF_STEP{50};
const BackoffPerformer::TimeUnit BackoffPerformer::DEFAULT_MAXIMUM_BACKOFF{1000};
const size_t BackoffPerformer::DEFAULT_MAXIMUM_RETRIES{numeric_limits<size_t>::max()};
BackoffPerformer::BackoffPerformer()
: initial_backoff_(DEFAULT_INITIAL_BACKOFF),
backoff_step_(DEFAULT_BACKOFF_STEP), maximum_backoff_(DEFAULT_MAXIMUM_BACKOFF),
policy_(BackoffPolicy::LINEAR), maximum_retries_(DEFAULT_MAXIMUM_RETRIES) {
}
void BackoffPerformer::set_backoff_policy(BackoffPolicy policy) {
policy_ = policy;
}
void BackoffPerformer::set_initial_backoff(TimeUnit value) {
initial_backoff_ = value;
}
void BackoffPerformer::set_backoff_step(TimeUnit value) {
backoff_step_ = value;
}
void BackoffPerformer::set_maximum_backoff(TimeUnit value) {
maximum_backoff_ = value;
}
void BackoffPerformer::set_maximum_retries(size_t value) {
maximum_retries_ = value == 0 ? 1 : value;
}
BackoffPerformer::TimeUnit BackoffPerformer::increase_backoff(TimeUnit backoff) {
if (policy_ == BackoffPolicy::LINEAR) {
backoff = backoff + backoff_step_;
}
else {
backoff = backoff * 2;
}
return min(backoff, maximum_backoff_);
}
} // cppkafka

View File

@@ -0,0 +1,137 @@
/*
* Copyright (c) 2017, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "utils/poll_strategy_base.h"
#include "consumer.h"
using std::chrono::milliseconds;
namespace cppkafka {
PollStrategyBase::PollStrategyBase(Consumer& consumer)
: consumer_(consumer),
consumer_queue_(QueueData{consumer.get_consumer_queue(), boost::any()}) {
// get all currently active partition assignments
TopicPartitionList assignment = consumer_.get_assignment();
on_assignment(assignment);
// take over the assignment callback
assignment_callback_ = consumer.get_assignment_callback();
consumer_.set_assignment_callback([this](TopicPartitionList& partitions) {
on_assignment(partitions);
});
// take over the revocation callback
revocation_callback_ = consumer.get_revocation_callback();
consumer_.set_revocation_callback([this](const TopicPartitionList& partitions) {
on_revocation(partitions);
});
// take over the rebalance error callback
rebalance_error_callback_ = consumer.get_rebalance_error_callback();
consumer_.set_rebalance_error_callback([this](Error error) {
on_rebalance_error(error);
});
}
PollStrategyBase::~PollStrategyBase() {
//reset the original callbacks
consumer_.set_assignment_callback(assignment_callback_);
consumer_.set_revocation_callback(revocation_callback_);
consumer_.set_rebalance_error_callback(rebalance_error_callback_);
}
void PollStrategyBase::set_timeout(milliseconds timeout) {
consumer_.set_timeout(timeout);
}
milliseconds PollStrategyBase::get_timeout() {
return consumer_.get_timeout();
}
Consumer& PollStrategyBase::get_consumer() {
return consumer_;
}
QueueData& PollStrategyBase::get_consumer_queue() {
return consumer_queue_;
}
PollStrategyBase::QueueMap& PollStrategyBase::get_partition_queues() {
return partition_queues_;
}
void PollStrategyBase::reset_state() {
}
void PollStrategyBase::assign(TopicPartitionList& partitions) {
// populate partition queues
for (const auto& partition : partitions) {
// get the queue associated with this partition
partition_queues_.emplace(partition, QueueData{consumer_.get_partition_queue(partition), boost::any()});
}
reset_state();
}
void PollStrategyBase::revoke(const TopicPartitionList& partitions) {
for (const auto &partition : partitions) {
partition_queues_.erase(partition);
}
reset_state();
}
void PollStrategyBase::revoke() {
partition_queues_.clear();
reset_state();
}
void PollStrategyBase::on_assignment(TopicPartitionList& partitions) {
assign(partitions);
// call original consumer callback if any
if (assignment_callback_) {
assignment_callback_(partitions);
}
}
void PollStrategyBase::on_revocation(const TopicPartitionList& partitions) {
revoke(partitions);
// call original consumer callback if any
if (revocation_callback_) {
revocation_callback_(partitions);
}
}
void PollStrategyBase::on_rebalance_error(Error error) {
reset_state();
// call original consumer callback if any
if (rebalance_error_callback_) {
rebalance_error_callback_(error);
}
}
} //cppkafka

View File

@@ -0,0 +1,101 @@
/*
* Copyright (c) 2017, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "utils/roundrobin_poll_strategy.h"
using std::string;
using std::chrono::milliseconds;
using std::make_move_iterator;
using std::allocator;
namespace cppkafka {
RoundRobinPollStrategy::RoundRobinPollStrategy(Consumer& consumer)
: PollStrategyBase(consumer) {
reset_state();
}
RoundRobinPollStrategy::~RoundRobinPollStrategy() {
restore_forwarding();
}
Message RoundRobinPollStrategy::poll() {
return poll(get_consumer().get_timeout());
}
Message RoundRobinPollStrategy::poll(milliseconds timeout) {
// Always give priority to group and global events
Message message = get_consumer_queue().queue.consume(milliseconds(0));
if (message) {
return message;
}
size_t num_queues = get_partition_queues().size();
while (num_queues--) {
//consume the next partition (non-blocking)
message = get_next_queue().queue.consume(milliseconds(0));
if (message) {
return message;
}
}
// We still don't have a valid message so we block on the event queue
return get_consumer_queue().queue.consume(timeout);
}
std::vector<Message> RoundRobinPollStrategy::poll_batch(size_t max_batch_size) {
return poll_batch(max_batch_size, get_consumer().get_timeout(), allocator<Message>());
}
std::vector<Message> RoundRobinPollStrategy::poll_batch(size_t max_batch_size,
milliseconds timeout) {
return poll_batch(max_batch_size, timeout, allocator<Message>());
}
void RoundRobinPollStrategy::restore_forwarding() {
// forward all partition queues
for (const auto& toppar : get_partition_queues()) {
toppar.second.queue.forward_to_queue(get_consumer_queue().queue);
}
}
QueueData& RoundRobinPollStrategy::get_next_queue() {
if (get_partition_queues().empty()) {
throw QueueException(RD_KAFKA_RESP_ERR__STATE);
}
if (++queue_iter_ == get_partition_queues().end()) {
queue_iter_ = get_partition_queues().begin();
}
return queue_iter_->second;
}
void RoundRobinPollStrategy::reset_state() {
queue_iter_ = get_partition_queues().begin();
}
} //cppkafka

View File

@@ -1,31 +1,52 @@
include_directories(${GOOGLETEST_INCLUDE})
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include/)
include_directories(SYSTEM ${Boost_INCLUDE_DIRS} ${RDKAFKA_INCLUDE_DIR})
include_directories(SYSTEM ${CATCH_INCLUDE})
link_directories(${GOOGLETEST_LIBRARY})
link_libraries(cppkafka ${RDKAFKA_LIBRARY} gtest gtest_main pthread)
set(KAFKA_TEST_INSTANCE "kafka-vm:9092"
if (NOT KAFKA_TEST_INSTANCE)
set(KAFKA_TEST_INSTANCE kafka-vm:9092
CACHE STRING "The kafka instance to which to connect to run tests")
endif()
if (NOT KAFKA_NUM_PARTITIONS)
set(KAFKA_NUM_PARTITIONS 3 CACHE STRING "Kafka Number of partitions")
endif()
if (NOT KAFKA_TOPICS)
set(KAFKA_TOPICS "cppkafka_test1;cppkafka_test2" CACHE STRING "Kafka topics")
endif()
# Convert list of topics into a C++ initializer list
FOREACH(TOPIC ${KAFKA_TOPICS})
if (NOT TOPIC_LIST)
set(TOPIC_LIST "\"${TOPIC}\"")
else()
set(TOPIC_LIST "${TOPIC_LIST},\"${TOPIC}\"")
endif()
ENDFOREACH()
add_custom_target(tests)
macro(create_test test_name)
add_executable(${test_name}_test EXCLUDE_FROM_ALL "${test_name}_test.cpp")
add_test(${test_name} ${test_name}_test)
add_dependencies(tests ${test_name}_test)
add_dependencies(${test_name}_test cppkafka)
target_link_libraries(${test_name}_test cppkafka-test)
endmacro()
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
add_library(cppkafka-test EXCLUDE_FROM_ALL test_utils.cpp)
add_dependencies(cppkafka-test cppkafka)
add_definitions(
"-DKAFKA_TEST_INSTANCE=\"${KAFKA_TEST_INSTANCE}\""
-DKAFKA_NUM_PARTITIONS=${KAFKA_NUM_PARTITIONS}
-DKAFKA_TOPIC_NAMES=${TOPIC_LIST}
)
add_definitions("-DKAFKA_TEST_INSTANCE=\"${KAFKA_TEST_INSTANCE}\"")
create_test(consumer)
create_test(producer)
create_test(kafka_handle_base)
create_test(topic_partition_list)
create_test(configuration)
create_test(buffer)
create_test(compacted_topic_processor)
add_executable(cppkafka_tests
buffer_test.cpp
compacted_topic_processor_test.cpp
configuration_test.cpp
topic_partition_list_test.cpp
kafka_handle_base_test.cpp
producer_test.cpp
consumer_test.cpp
roundrobin_poll_test.cpp
headers_test.cpp
test_utils.cpp
# Main file
test_main.cpp
)
# In CMake >= 3.15 Boost::boost == Boost::headers
target_link_libraries(cppkafka_tests cppkafka RdKafka::rdkafka Boost::boost)
add_dependencies(tests cppkafka_tests)
add_test(cppkafka cppkafka_tests)

View File

@@ -1,78 +1,95 @@
#include <string>
#include <vector>
#include <array>
#include <sstream>
#include <gtest/gtest.h>
#include <catch.hpp>
#include "cppkafka/buffer.h"
using std::string;
using std::vector;
using std::array;
using std::ostringstream;
using namespace cppkafka;
class BufferTest : public testing::Test {
public:
TEST_CASE("conversions", "[buffer]") {
const string data = "Hello world!";
const Buffer buffer(data);
const Buffer empty_buffer;
};
SECTION("construction") {
CHECK_THROWS_AS(Buffer((const char*)nullptr, 5), Exception);
}
TEST_F(BufferTest, OperatorBool) {
string data = "Hello world!";
Buffer buffer1(data);
Buffer buffer2;
SECTION("bool conversion") {
CHECK(!!buffer == true);
CHECK(!!empty_buffer == false);
}
EXPECT_TRUE(buffer1);
EXPECT_FALSE(buffer2);
SECTION("string conversion") {
CHECK(static_cast<string>(buffer) == data);
CHECK(static_cast<string>(empty_buffer).empty());
}
SECTION("vector conversion") {
const vector<char> buffer_as_vector = buffer;
CHECK(string(buffer_as_vector.begin(), buffer_as_vector.end()) == data);
}
}
TEST_F(BufferTest, StringConversion) {
string data = "Hello world!";
Buffer buffer(data);
string buffer_as_string = buffer;
EXPECT_EQ(data, buffer_as_string);
}
TEST_F(BufferTest, StringConversionOnEmptyBuffer) {
Buffer buffer;
EXPECT_EQ("", static_cast<string>(buffer));
}
TEST_F(BufferTest, VectorConversion) {
string data = "Hello world!";
Buffer buffer(data);
vector<char> buffer_as_vector = buffer;
EXPECT_EQ(data, string(buffer_as_vector.begin(), buffer_as_vector.end()));
}
TEST_F(BufferTest, VectorConstruction) {
TEST_CASE("construction", "[buffer]") {
// From string
const string str_data = "Hello world!";
const vector<uint8_t> data(str_data.begin(), str_data.end());
Buffer buffer(data);
EXPECT_EQ(str_data, buffer);
// From vector
const vector<uint8_t> vector_data(str_data.begin(), str_data.end());
// From array
const array<char,12> array_data{{'H','e','l','l','o',' ','w','o','r','l','d','!'}};
// From raw array
const char raw_array[12]{'H','e','l','l','o',' ','w','o','r','l','d','!'};
// Build buffers
const Buffer buffer(vector_data); //vector
const Buffer buffer2(vector_data.begin(), vector_data.end()); //iterators
const Buffer buffer3(str_data.data(), str_data.data() + str_data.size()); //char iterators
const Buffer buffer4(array_data); //arrays
const Buffer buffer5(raw_array); //raw arrays
const Buffer buffer6(str_data); //string
const Buffer buffer7(str_data.data(), str_data.size()); //type + size
// Test
CHECK(str_data == buffer);
CHECK(buffer == buffer2);
CHECK(buffer == buffer3);
CHECK(buffer == buffer4);
CHECK(buffer == buffer5);
CHECK(buffer == buffer6);
CHECK(buffer == buffer7);
}
TEST_F(BufferTest, Equality) {
string data = "Hello world!";
Buffer buffer1(data);
Buffer buffer2(data);
EXPECT_EQ(buffer1, buffer2);
TEST_CASE("comparison", "[buffer]") {
const string data = "Hello world!";
const Buffer buffer1(data);
const Buffer buffer2(data);
const Buffer empty_buffer;
SECTION("equality") {
CHECK(buffer1 == buffer2);
CHECK(buffer2 == buffer1);
}
SECTION("inequality") {
CHECK(buffer1 != empty_buffer);
CHECK(empty_buffer != buffer1);
}
}
TEST_F(BufferTest, InEquality) {
string data1 = "Hello world!";
string data2 = "Hello worldz";
Buffer buffer1(data1);
Buffer buffer2(data2);
EXPECT_NE(buffer1, buffer2);
}
TEST_F(BufferTest, OutputOperator) {
string data = "Hello \x7fwor\x03ld!";
string pretty_string = "Hello \\x7fwor\\x03ld!";
Buffer buffer(data);
TEST_CASE("stream extraction", "[buffer]") {
const string data = "Hello \x7fwor\x03ld!";
const string pretty_string = "Hello \\x7fwor\\x03ld!";
const Buffer buffer(data);
ostringstream output;
output << buffer;
EXPECT_EQ(pretty_string, output.str());
CHECK(output.str() == pretty_string );
}

View File

@@ -4,10 +4,11 @@
#include <set>
#include <map>
#include <condition_variable>
#include <gtest/gtest.h>
#include "cppkafka/producer.h"
#include <catch.hpp>
#include "cppkafka/utils/buffered_producer.h"
#include "cppkafka/consumer.h"
#include "cppkafka/utils/compacted_topic_processor.h"
#include "test_utils.h"
using std::string;
using std::to_string;
@@ -29,28 +30,21 @@ using std::chrono::milliseconds;
using namespace cppkafka;
class CompactedTopicProcessorTest : public testing::Test {
public:
static const string KAFKA_TOPIC;
Configuration make_producer_config() {
static Configuration make_producer_config() {
Configuration config;
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
return config;
}
}
Configuration make_consumer_config() {
static Configuration make_consumer_config() {
Configuration config;
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
config.set("enable.auto.commit", false);
config.set("group.id", "compacted_topic_test");
return config;
}
};
}
const string CompactedTopicProcessorTest::KAFKA_TOPIC = "cppkafka_test1";
TEST_F(CompactedTopicProcessorTest, Consume) {
TEST_CASE("consumption", "[consumer][compacted]") {
Consumer consumer(make_consumer_config());
// We'll use ints as the key, strings as the value
using CompactedConsumer = CompactedTopicProcessor<int, string>;
@@ -70,12 +64,16 @@ TEST_F(CompactedTopicProcessorTest, Consume) {
compacted_consumer.set_event_handler([&](const Event& event) {
events.push_back(event);
});
consumer.subscribe({ KAFKA_TOPIC });
consumer.poll();
consumer.poll();
consumer.poll();
consumer.subscribe({ KAFKA_TOPICS[0] });
set<int> eof_partitions;
while (eof_partitions.size() != static_cast<size_t>(KAFKA_NUM_PARTITIONS)) {
Message msg = consumer.poll();
if (msg && msg.is_eof()) {
eof_partitions.insert(msg.get_partition());
}
}
Producer producer(make_producer_config());
BufferedProducer<string> producer(make_producer_config());
struct ElementType {
string value;
@@ -87,13 +85,14 @@ TEST_F(CompactedTopicProcessorTest, Consume) {
};
for (const auto& element_pair : elements) {
const ElementType& element = element_pair.second;
MessageBuilder builder(KAFKA_TOPIC);
MessageBuilder builder(KAFKA_TOPICS[0]);
builder.partition(element.partition).key(element_pair.first).payload(element.value);
producer.produce(builder);
}
// Now erase the first element
string deleted_key = "42";
producer.produce(MessageBuilder(KAFKA_TOPIC).partition(0).key(deleted_key));
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(0).key(deleted_key));
producer.flush();
for (size_t i = 0; i < 10; ++i) {
compacted_consumer.process_event();
@@ -101,27 +100,27 @@ TEST_F(CompactedTopicProcessorTest, Consume) {
size_t set_count = 0;
size_t delete_count = 0;
ASSERT_FALSE(events.empty());
CHECK(events.empty() == false);
for (const Event& event : events) {
switch (event.get_type()) {
case Event::SET_ELEMENT:
{
auto iter = elements.find(to_string(event.get_key()));
ASSERT_NE(iter, elements.end());
EXPECT_EQ(iter->second.value, event.get_value());
EXPECT_EQ(iter->second.partition, event.get_partition());
REQUIRE(iter != elements.end());
CHECK(iter->second.value == event.get_value());
CHECK(iter->second.partition == event.get_partition());
set_count++;
}
break;
case Event::DELETE_ELEMENT:
EXPECT_EQ(0, event.get_partition());
EXPECT_EQ(42, event.get_key());
CHECK(event.get_partition() == 0);
CHECK(event.get_key() == 42);
delete_count++;
break;
default:
break;
}
}
EXPECT_EQ(2, set_count);
EXPECT_EQ(1, delete_count);
CHECK(set_count == 2);
CHECK(delete_count == 1);
}

View File

@@ -1,4 +1,4 @@
#include <gtest/gtest.h>
#include <catch.hpp>
#include "cppkafka/configuration.h"
#include "cppkafka/exceptions.h"
@@ -6,86 +6,88 @@ using namespace cppkafka;
using std::string;
class ConfigurationTest : public testing::Test {
public:
};
TEST_F(ConfigurationTest, GetSetConfig) {
TEST_CASE("normal config", "[config]") {
Configuration config;
SECTION("get existing") {
config.set("group.id", "foo").set("metadata.broker.list", "asd:9092");
EXPECT_EQ("foo", config.get("group.id"));
EXPECT_EQ("asd:9092", config.get("metadata.broker.list"));
EXPECT_EQ("foo", config.get<string>("group.id"));
CHECK(config.get("group.id") == "foo");
CHECK(config.get("metadata.broker.list") == "asd:9092");
CHECK(config.get<string>("group.id") == "foo");
}
EXPECT_THROW(config.get("asd"), ConfigOptionNotFound);
}
SECTION("get non existent") {
REQUIRE_THROWS_AS(config.get("asd"), ConfigOptionNotFound);
}
TEST_F(ConfigurationTest, GetSetTopicConfig) {
TopicConfiguration config;
config.set("auto.commit.enable", true).set("offset.store.method", "broker");
EXPECT_EQ("true", config.get("auto.commit.enable"));
EXPECT_EQ("broker", config.get("offset.store.method"));
EXPECT_EQ(true, config.get<bool>("auto.commit.enable"));
SECTION("set overloads") {
config.set("enable.auto.commit", true);
config.set("auto.commit.interval.ms", 100);
EXPECT_THROW(config.get("asd"), ConfigOptionNotFound);
}
CHECK(config.get("enable.auto.commit") == "true");
CHECK(config.get("auto.commit.interval.ms") == "100");
CHECK(config.get<int>("auto.commit.interval.ms") == 100);
}
TEST_F(ConfigurationTest, ConfigSetMultiple) {
Configuration config = {
SECTION("set multiple") {
config = {
{ "group.id", "foo" },
{ "metadata.broker.list", string("asd:9092") },
{ "message.max.bytes", 2000 },
{ "topic.metadata.refresh.sparse", true }
};
EXPECT_EQ("foo", config.get("group.id"));
EXPECT_EQ("asd:9092", config.get("metadata.broker.list"));
EXPECT_EQ(2000, config.get<int>("message.max.bytes"));
EXPECT_EQ(true, config.get<bool>("topic.metadata.refresh.sparse"));
CHECK(config.get("group.id") == "foo");
CHECK(config.get("metadata.broker.list") == "asd:9092");
CHECK(config.get<int>("message.max.bytes") == 2000);
CHECK(config.get<bool>("topic.metadata.refresh.sparse") == true);
}
SECTION("default topic config") {
config.set_default_topic_configuration({{ "request.required.acks", 2 }});
const auto& topic_config = config.get_default_topic_configuration();
CHECK(!!topic_config == true);
CHECK(topic_config->get<int>("request.required.acks") == 2);
}
SECTION("get all") {
config.set("enable.auto.commit", false);
auto option_map = config.get_all();
CHECK(option_map.at("enable.auto.commit") == "false");
}
}
TEST_F(ConfigurationTest, TopicConfigSetMultiple) {
TopicConfiguration config = {
TEST_CASE("topic config", "[config]") {
TopicConfiguration config;
SECTION("get existing") {
config.set("auto.commit.enable", true).set("offset.store.method", "broker");
CHECK(config.get("auto.commit.enable") == "true");
CHECK(config.get("offset.store.method") == "broker");
CHECK(config.get<bool>("auto.commit.enable") == true);
}
SECTION("get non existent") {
REQUIRE_THROWS_AS(config.get("asd"), ConfigOptionNotFound);
}
SECTION("set multiple") {
config = {
{ "compression.codec", "none" },
{ "offset.store.method", string("file") },
{ "request.required.acks", 2 },
{ "produce.offset.report", true }
};
EXPECT_EQ("none", config.get("compression.codec"));
EXPECT_EQ("file", config.get("offset.store.method"));
EXPECT_EQ(2, config.get<int>("request.required.acks"));
EXPECT_EQ(true, config.get<bool>("produce.offset.report"));
}
CHECK(config.get("compression.codec") == "none");
CHECK(config.get("offset.store.method") == "file");
CHECK(config.get<int>("request.required.acks") == 2);
CHECK(config.get<bool>("produce.offset.report") == true);
}
TEST_F(ConfigurationTest, SetDefaultTopicConfiguration) {
Configuration config;
config.set_default_topic_configuration({{ "request.required.acks", 2 }});
const auto& topic_config = config.get_default_topic_configuration();
EXPECT_TRUE(topic_config);
EXPECT_EQ(2, topic_config->get<int>("request.required.acks"));
}
TEST_F(ConfigurationTest, SetOverloads) {
Configuration config;
config.set("enable.auto.commit", true);
config.set("auto.commit.interval.ms", 100);
EXPECT_EQ("true", config.get("enable.auto.commit"));
EXPECT_EQ("100", config.get("auto.commit.interval.ms"));
EXPECT_EQ(100, config.get<int>("auto.commit.interval.ms"));
}
TEST_F(ConfigurationTest, GetAll) {
Configuration config;
config.set("enable.auto.commit", false);
auto option_map = config.get_all();
EXPECT_EQ("false", option_map.at("enable.auto.commit"));
}
TEST_F(ConfigurationTest, TopicGetAll) {
TopicConfiguration config;
SECTION("get all") {
config.set("auto.commit.enable", false);
auto option_map = config.get_all();
EXPECT_EQ("false", option_map.at("auto.commit.enable"));
CHECK(option_map.at("auto.commit.enable") == "false");
}
}

View File

@@ -3,10 +3,13 @@
#include <set>
#include <mutex>
#include <chrono>
#include <iterator>
#include <condition_variable>
#include <gtest/gtest.h>
#include <catch.hpp>
#include "cppkafka/consumer.h"
#include "cppkafka/producer.h"
#include "cppkafka/utils/consumer_dispatcher.h"
#include "cppkafka/utils/buffered_producer.h"
#include "test_utils.h"
using std::vector;
@@ -19,121 +22,117 @@ using std::tie;
using std::condition_variable;
using std::lock_guard;
using std::unique_lock;
using std::make_move_iterator;
using std::chrono::seconds;
using std::chrono::milliseconds;
using std::chrono::system_clock;
using namespace cppkafka;
class ConsumerTest : public testing::Test {
public:
static const string KAFKA_TOPIC;
Configuration make_producer_config() {
static Configuration make_producer_config() {
Configuration config;
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
return config;
}
}
Configuration make_consumer_config(const string& group_id = "consumer_test") {
static Configuration make_consumer_config(const string& group_id = make_consumer_group_id()) {
Configuration config;
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
config.set("enable.auto.commit", false);
config.set("group.id", group_id);
return config;
}
};
}
const string ConsumerTest::KAFKA_TOPIC = "cppkafka_test1";
TEST_F(ConsumerTest, AssignmentCallback) {
vector<TopicPartition> assignment;
TEST_CASE("message consumption", "[consumer]") {
TopicPartitionList assignment;
int partition = 0;
// Create a consumer and subscribe to the topic
Consumer consumer(make_consumer_config());
consumer.set_assignment_callback([&](const vector<TopicPartition>& topic_partitions) {
consumer.set_assignment_callback([&](const TopicPartitionList& topic_partitions) {
assignment = topic_partitions;
});
consumer.subscribe({ KAFKA_TOPIC });
ConsumerRunner runner(consumer, 1, 3);
consumer.subscribe({ KAFKA_TOPICS[0] });
ConsumerRunner runner(consumer, 1, KAFKA_NUM_PARTITIONS);
// Produce a message just so we stop the consumer
Producer producer(make_producer_config());
string payload = "Hello world!";
producer.produce(MessageBuilder(KAFKA_TOPIC).partition(partition).payload(payload));
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition).payload(payload));
runner.try_join();
// All 3 partitions should be ours
EXPECT_EQ(3, assignment.size());
set<int> partitions = { 0, 1, 2 };
// All partitions should be ours
REQUIRE(assignment.size() == KAFKA_NUM_PARTITIONS);
set<int> partitions;
for (int i = 0; i < KAFKA_NUM_PARTITIONS; partitions.emplace(i++));
for (const auto& topic_partition : assignment) {
EXPECT_EQ(KAFKA_TOPIC, topic_partition.get_topic());
EXPECT_TRUE(partitions.erase(topic_partition.get_partition()));
CHECK(topic_partition.get_topic() == KAFKA_TOPICS[0]);
CHECK(partitions.erase(topic_partition.get_partition()) == true);
}
EXPECT_EQ(1, runner.get_messages().size());
EXPECT_EQ(vector<string>{ KAFKA_TOPIC }, consumer.get_subscription());
REQUIRE(runner.get_messages().size() == 1);
CHECK(consumer.get_subscription() == vector<string>{ KAFKA_TOPICS[0] });
assignment = consumer.get_assignment();
EXPECT_EQ(3, assignment.size());
CHECK(assignment.size() == KAFKA_NUM_PARTITIONS);
int64_t low;
int64_t high;
tie(low, high) = consumer.get_offsets({ KAFKA_TOPIC, partition });
EXPECT_GT(high, low);
EXPECT_EQ(high, runner.get_messages().back().get_offset() + 1);
tie(low, high) = consumer.get_offsets({ KAFKA_TOPICS[0], partition });
CHECK(high > low);
CHECK(runner.get_messages().back().get_offset() + 1 == high);
}
TEST_F(ConsumerTest, Rebalance) {
vector<TopicPartition> assignment1;
vector<TopicPartition> assignment2;
TEST_CASE("consumer rebalance", "[consumer]") {
TopicPartitionList assignment1;
TopicPartitionList assignment2;
const string group_id = make_consumer_group_id();
bool revocation_called = false;
int partition = 0;
// Create a consumer and subscribe to the topic
Consumer consumer1(make_consumer_config());
consumer1.set_assignment_callback([&](const vector<TopicPartition>& topic_partitions) {
Consumer consumer1(make_consumer_config(group_id));
consumer1.set_assignment_callback([&](const TopicPartitionList& topic_partitions) {
assignment1 = topic_partitions;
});
consumer1.set_revocation_callback([&](const vector<TopicPartition>&) {
consumer1.set_revocation_callback([&](const TopicPartitionList&) {
revocation_called = true;
});
consumer1.subscribe({ KAFKA_TOPIC });
ConsumerRunner runner1(consumer1, 1, 3);
consumer1.subscribe({ KAFKA_TOPICS[0] });
ConsumerRunner runner1(consumer1, 1, KAFKA_NUM_PARTITIONS);
// Create a second consumer and subscribe to the topic
Consumer consumer2(make_consumer_config());
consumer2.set_assignment_callback([&](const vector<TopicPartition>& topic_partitions) {
Consumer consumer2(make_consumer_config(group_id));
consumer2.set_assignment_callback([&](const TopicPartitionList& topic_partitions) {
assignment2 = topic_partitions;
});
consumer2.subscribe({ KAFKA_TOPIC });
consumer2.subscribe({ KAFKA_TOPICS[0] });
ConsumerRunner runner2(consumer2, 1, 1);
EXPECT_TRUE(revocation_called);
CHECK(revocation_called == true);
// Produce a message just so we stop the consumer
Producer producer(make_producer_config());
string payload = "Hello world!";
producer.produce(MessageBuilder(KAFKA_TOPIC).partition(partition).payload(payload));
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition).payload(payload));
runner1.try_join();
runner2.try_join();
// All 3 partitions should be assigned
EXPECT_EQ(3, assignment1.size() + assignment2.size());
set<int> partitions = { 0, 1, 2 };
// All partitions should be assigned
CHECK(assignment1.size() + assignment2.size() == KAFKA_NUM_PARTITIONS);
set<int> partitions;
for (int i = 0; i < KAFKA_NUM_PARTITIONS; partitions.emplace(i++));
for (const auto& topic_partition : assignment1) {
EXPECT_EQ(KAFKA_TOPIC, topic_partition.get_topic());
EXPECT_TRUE(partitions.erase(topic_partition.get_partition()));
CHECK(topic_partition.get_topic() == KAFKA_TOPICS[0]);
CHECK(partitions.erase(topic_partition.get_partition()) == true);
}
for (const auto& topic_partition : assignment2) {
EXPECT_EQ(KAFKA_TOPIC, topic_partition.get_topic());
EXPECT_TRUE(partitions.erase(topic_partition.get_partition()));
CHECK(topic_partition.get_topic() == KAFKA_TOPICS[0]);
CHECK(partitions.erase(topic_partition.get_partition()) == true);
}
EXPECT_EQ(1, runner1.get_messages().size() + runner2.get_messages().size());
CHECK(runner1.get_messages().size() + runner2.get_messages().size() == 1);
}
TEST_F(ConsumerTest, OffsetCommit) {
TEST_CASE("consumer offset commit", "[consumer]") {
int partition = 0;
int64_t message_offset = 0;
bool offset_commit_called = false;
@@ -143,28 +142,119 @@ TEST_F(ConsumerTest, OffsetCommit) {
config.set_offset_commit_callback([&](Consumer&, Error error,
const TopicPartitionList& topic_partitions) {
offset_commit_called = true;
EXPECT_FALSE(error);
ASSERT_EQ(1, topic_partitions.size());
EXPECT_EQ(KAFKA_TOPIC, topic_partitions[0].get_topic());
EXPECT_EQ(0, topic_partitions[0].get_partition());
EXPECT_EQ(message_offset + 1, topic_partitions[0].get_offset());
CHECK(!!error == false);
REQUIRE(topic_partitions.size() == 1);
CHECK(topic_partitions[0].get_topic() == KAFKA_TOPICS[0]);
CHECK(topic_partitions[0].get_partition() == 0);
CHECK(topic_partitions[0].get_offset() == message_offset + 1);
});
Consumer consumer(config);
consumer.assign({ { KAFKA_TOPIC, 0 } });
consumer.assign({ { KAFKA_TOPICS[0], 0 } });
ConsumerRunner runner(consumer, 1, 1);
// Produce a message just so we stop the consumer
Producer producer(make_producer_config());
string payload = "Hello world!";
producer.produce(MessageBuilder(KAFKA_TOPIC).partition(partition).payload(payload));
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition).payload(payload));
runner.try_join();
ASSERT_EQ(1, runner.get_messages().size());
REQUIRE(runner.get_messages().size() == 1);
const Message& msg = runner.get_messages()[0];
message_offset = msg.get_offset();
consumer.commit(msg);
for (size_t i = 0; i < 3 && !offset_commit_called; ++i) {
consumer.poll();
}
EXPECT_TRUE(offset_commit_called);
CHECK(offset_commit_called == true);
}
TEST_CASE("consumer throttle", "[consumer]") {
int partition = 0;
// Create a consumer and subscribe to the topic
Configuration config = make_consumer_config("offset_commit");
Consumer consumer(config);
consumer.assign({ { KAFKA_TOPICS[0], 0 } });
{
ConsumerRunner runner(consumer, 0, 1);
runner.try_join();
}
// Produce a message just so we stop the consumer
BufferedProducer<string> producer(make_producer_config());
string payload = "Hello world!";
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition).payload(payload));
producer.flush();
size_t callback_executed_count = 0;
ConsumerDispatcher dispatcher(consumer);
dispatcher.run(
[&](Message msg) {
callback_executed_count++;
if (callback_executed_count == 3) {
return Message();
}
return msg;
},
[&](ConsumerDispatcher::Timeout) {
if (callback_executed_count == 3) {
dispatcher.stop();
}
}
);
CHECK(callback_executed_count == 3);
}
TEST_CASE("consume batch", "[consumer]") {
int partition = 0;
// Create a consumer and subscribe to the topic
Configuration config = make_consumer_config("test");
Consumer consumer(config);
consumer.assign({ { KAFKA_TOPICS[0], 0 } });
{
ConsumerRunner runner(consumer, 0, 1);
runner.try_join();
}
// Produce a message just so we stop the consumer
BufferedProducer<string> producer(make_producer_config());
string payload = "Hello world!";
// Produce it twice
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition).payload(payload));
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition).payload(payload));
producer.flush();
MessageList all_messages;
int i = 0;
while (i < 5 && all_messages.size() != 2) {
MessageList messages = consumer.poll_batch(2);
all_messages.insert(all_messages.end(), make_move_iterator(messages.begin()),
make_move_iterator(messages.end()));
++i;
}
REQUIRE(all_messages.size() == 2);
CHECK(all_messages[0].get_payload() == payload);
CHECK(all_messages[1].get_payload() == payload);
}
// This test may fail due to what seems to be an rdkafka bug. Skip it for now until we're
// certain of what to do
TEST_CASE("Event consumption", "[!hide][consumer]") {
// Create a consumer and subscribe to the topic
Consumer consumer(make_consumer_config());
consumer.subscribe({ KAFKA_TOPICS[0] });
vector<rd_kafka_event_type_t> types = {
RD_KAFKA_EVENT_NONE
};
Queue queue = consumer.get_main_queue();
for (const auto type : types) {
const Event event = queue.next_event();
CHECK(event.get_type() == type);
}
}

226
tests/headers_test.cpp Normal file
View File

@@ -0,0 +1,226 @@
#include <vector>
#include <thread>
#include <set>
#include <mutex>
#include <chrono>
#include <iterator>
#include <condition_variable>
#include <catch.hpp>
#include "cppkafka/consumer.h"
#include "cppkafka/producer.h"
#include "cppkafka/header_list.h"
#include "test_utils.h"
using std::vector;
using std::move;
using std::string;
using std::thread;
using std::set;
using std::mutex;
using std::tie;
using std::condition_variable;
using std::lock_guard;
using std::unique_lock;
using std::make_move_iterator;
using std::chrono::seconds;
using std::chrono::milliseconds;
using std::chrono::system_clock;
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
using namespace cppkafka;
using StringHeader = Header<std::string>;
using BufferHeader = Header<Buffer>;
TEST_CASE("creation", "[headers]") {
SECTION("empty") {
HeaderList<StringHeader> list;
REQUIRE(!!list == false);
}
SECTION("default") {
HeaderList<StringHeader> list(2);
REQUIRE(!!list == true);
REQUIRE(list.size() == 0);
REQUIRE(list.empty() == true);
REQUIRE(list.get_handle() != nullptr);
}
SECTION("from handle") {
HeaderList<StringHeader> list(rd_kafka_headers_new(1));
REQUIRE(!!list == true);
REQUIRE(list.size() == 0);
REQUIRE(list.empty() == true);
REQUIRE(list.get_handle() != nullptr);
}
}
TEST_CASE("release", "[headers]") {
HeaderList<StringHeader> list(2);
auto handle = list.release_handle();
REQUIRE(handle != nullptr);
REQUIRE(list.release_handle() == nullptr); //release again
REQUIRE(!!list == false);
rd_kafka_headers_destroy(handle);
}
TEST_CASE("modify", "[headers]") {
SECTION("add") {
HeaderList<StringHeader> list(10);
//empty header name
list.add({{}, "payload1"});
//empty payload
list.add({"header2", {}});
list.add({"header3", "payload3"});
//both null
list.add({{}, {}});
//both empty (0-length strings)
list.add({"", ""});
//validate
REQUIRE(list.size() == 5);
REQUIRE_FALSE(list.empty());
//access a header
REQUIRE(list.at(1).get_name() == "header2");
REQUIRE(list.at(1).get_value().empty());
REQUIRE(list.at(2).get_value() == "payload3");
}
SECTION("remove") {
HeaderList<StringHeader> list(10);
//empty header name
list.add({{}, "payload1"});
//empty payload
list.add({"header2", {}});
list.add({"header3", "payload3"});
//both null
list.add({{}, {}});
//both empty (0 length strings)
list.add({"", ""});
//Remove a bogus name
Error err = list.remove("bogus");
REQUIRE(err.get_error() == RD_KAFKA_RESP_ERR__NOENT);
//Remove header with name
list.remove("header2");
REQUIRE(list.size() == 4);
list.remove("header3");
REQUIRE(list.size() == 3);
//Remove headers without name
list.remove({});
REQUIRE(list.size() == 0);
}
}
TEST_CASE("copy and move", "[headers]") {
SECTION("copy owning") {
//Create an owning header list and copy it
HeaderList<StringHeader> list(3), list2(3);
list.add({"header1", "payload1"});
list.add({"header2", "payload2"});
list.add({"header3", "payload3"});
REQUIRE(list2.size() == 0);
list2 = list;
REQUIRE(list2.size() == 3);
REQUIRE(list2.size() == list.size());
//make sure the handles are different
CHECK(list.get_handle() != list2.get_handle());
CHECK(list.at(0) == list2.at(0));
CHECK(list.at(1) == list2.at(1));
CHECK(list.at(2) == list2.at(2));
CHECK(list == list2);
}
SECTION("copy owning with buffers") {
//Create an owning header list and copy it
HeaderList<BufferHeader> list(3), list2(3);
string payload1 = "payload1", payload2 = "payload2", payload3 = "payload3";
list.add({"header1", payload1});
list.add({"header2", payload2});
list.add({"header3", payload3});
REQUIRE(list2.size() == 0);
list2 = list;
REQUIRE(list2.size() == 3);
REQUIRE(list2.size() == list.size());
//make sure the handles are different
CHECK(list.get_handle() != list2.get_handle());
CHECK(list.at(0) == list2.at(0));
CHECK(list.at(1) == list2.at(1));
CHECK(list.at(2) == list2.at(2));
CHECK(list == list2);
}
SECTION("copy non-owning") {
//Create an owning header list and copy it
HeaderList<StringHeader> list(3), list2(3), list3(HeaderList<StringHeader>::make_non_owning(list.get_handle()));
list.add({"header1", "payload1"});
list.add({"header2", "payload2"});
list.add({"header3", "payload3"});
list2 = list3; //copy non-owning list
REQUIRE(list.size() == 3);
REQUIRE(list3.size() == list.size());
REQUIRE(list2.size() == list.size());
//make sure the handles are the same
CHECK(list2.get_handle() == list3.get_handle());
CHECK(list2.at(0) == list3.at(0));
CHECK(list2.at(1) == list3.at(1));
CHECK(list2.at(2) == list3.at(2));
CHECK(list2 == list3);
}
SECTION("move") {
HeaderList<StringHeader> list(3), list2;
list.add({"header1", "payload1"});
list.add({"header2", "payload2"});
list.add({"header3", "payload3"});
auto handle = list.get_handle();
list2 = std::move(list);
CHECK_FALSE(!!list);
CHECK(!!list2);
CHECK(list2.size() == 3);
CHECK(handle == list2.get_handle());
}
}
TEST_CASE("access", "[headers]") {
HeaderList<StringHeader> list(3);
list.add({"header1", "payload1"});
list.add({"header2", "payload2"});
list.add({"header3", "payload3"});
CHECK(list.at(0).get_value() == "payload1");
CHECK(list.at(1).get_value() == "payload2");
CHECK(list.at(2).get_value() == "payload3");
CHECK_THROWS_AS(list.at(3), Exception);
CHECK(list.front() == list.at(0));
CHECK(list.back() == list.at(2));
}
TEST_CASE("iterate", "[headers]") {
HeaderList<StringHeader> list(3);
REQUIRE(list.begin() == list.end());
list.add({"header1", "payload1"});
REQUIRE(list.begin() != list.end());
CHECK(++list.begin() == list.end());
list.add({"header2", "payload2"});
list.add({"header3", "payload3"});
int i = 0;
for (auto it = list.begin(); it != list.end(); ++it, ++i) {
CHECK(it->get_name().length() == 7);
if (i == 0) {
CHECK(it->get_name() == "header1");
}
else if (i == 1) {
CHECK(it->get_name() == "header2");
}
else if (i == 2) {
CHECK(it->get_name() == "header3");
}
}
//rewind end() iterator
CHECK((--list.end())->get_name() == "header3");
}
#endif //RD_KAFKA_HEADERS_SUPPORT_VERSION

View File

@@ -1,6 +1,6 @@
#include <set>
#include <unordered_set>
#include <gtest/gtest.h>
#include <catch.hpp>
#include "cppkafka/consumer.h"
#include "cppkafka/producer.h"
#include "cppkafka/metadata.h"
@@ -14,17 +14,13 @@ using std::string;
using namespace cppkafka;
class KafkaHandleBaseTest : public testing::Test {
public:
static const string KAFKA_TOPIC;
Configuration make_config() {
Configuration make_config() {
Configuration config;
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
return config;
}
}
string get_kafka_host() {
string get_kafka_host() {
string uri = KAFKA_TEST_INSTANCE;
size_t index = uri.find(':');
if (index == string::npos) {
@@ -33,9 +29,9 @@ public:
else {
return uri.substr(0, index);
}
}
}
uint16_t get_kafka_port() {
uint16_t get_kafka_port() {
string uri = KAFKA_TEST_INSTANCE;
size_t index = uri.find(':');
if (index == string::npos) {
@@ -44,64 +40,64 @@ public:
else {
return stoul(uri.substr(index + 1));
}
}
TEST_CASE("metadata", "[handle_base]") {
if (KAFKA_TOPICS.size() < 2) {
return; //skip test
}
};
const string KafkaHandleBaseTest::KAFKA_TOPIC = "cppkafka_test1";
TEST_F(KafkaHandleBaseTest, BrokersMetadata) {
Producer producer({});
producer.add_brokers(KAFKA_TEST_INSTANCE);
Metadata metadata = producer.get_metadata();
SECTION("brokers") {
vector<BrokerMetadata> brokers = metadata.get_brokers();
ASSERT_EQ(1, brokers.size());
REQUIRE(brokers.size() == 1);
const auto& broker = brokers[0];
// TODO: resolve this
//EXPECT_EQ(get_kafka_host(), broker.get_host());
EXPECT_EQ(get_kafka_port(), broker.get_port());
}
//REQUIRE(broker.get_host() == get_kafka_host());
CHECK(broker.get_port() == get_kafka_port());
}
TEST_F(KafkaHandleBaseTest, TopicsMetadata) {
unordered_set<string> topic_names = { "cppkafka_test1", "cppkafka_test2" };
SECTION("topics") {
unordered_set<string> topic_names = { KAFKA_TOPICS[0], KAFKA_TOPICS[1] };
size_t found_topics = 0;
Producer producer(make_config());
Metadata metadata = producer.get_metadata();
const vector<TopicMetadata>& topics = metadata.get_topics();
ASSERT_GE(topics.size(), 2);
CHECK(topics.size() >= 2);
for (const auto& topic : topics) {
if (topic_names.count(topic.get_name()) == 1) {
const vector<PartitionMetadata>& partitions = topic.get_partitions();
EXPECT_EQ(3, partitions.size());
set<int32_t> expected_ids = { 0, 1, 2 };
REQUIRE(partitions.size() == KAFKA_NUM_PARTITIONS);
set<int32_t> expected_ids;
for (int i = 0; i < KAFKA_NUM_PARTITIONS; expected_ids.emplace(i++));
for (const PartitionMetadata& partition : partitions) {
EXPECT_EQ(1, expected_ids.erase(partition.get_id()));
REQUIRE(expected_ids.erase(partition.get_id()) == 1);
for (int32_t replica : partition.get_replicas()) {
EXPECT_EQ(0, replica);
REQUIRE(replica == 0);
}
for (int32_t isr : partition.get_in_sync_replica_brokers()) {
EXPECT_EQ(0, isr);
REQUIRE(isr == 0);
}
}
found_topics++;
}
}
EXPECT_EQ(topic_names.size(), found_topics);
CHECK(found_topics == topic_names.size());
// Find by names
EXPECT_EQ(topic_names.size(), metadata.get_topics(topic_names).size());
CHECK(metadata.get_topics(topic_names).size() == topic_names.size());
// Find by prefix
EXPECT_EQ(topic_names.size(), metadata.get_topics_prefixed("cppkafka_").size());
CHECK(metadata.get_topics_prefixed("cppkafka_").size() == topic_names.size());
// Now get the whole metadata only for this topic
Topic topic = producer.get_topic(KAFKA_TOPIC);
EXPECT_EQ(KAFKA_TOPIC, producer.get_metadata(topic).get_name());
Topic topic = producer.get_topic(KAFKA_TOPICS[0]);
CHECK(producer.get_metadata(topic).get_name() == KAFKA_TOPICS[0]);
}
}
TEST_F(KafkaHandleBaseTest, ConsumerGroups) {
TEST_CASE("consumer groups", "[handle_base]") {
string consumer_group = "kafka_handle_test";
string client_id = "my_client_id";
@@ -112,30 +108,23 @@ TEST_F(KafkaHandleBaseTest, ConsumerGroups) {
// Build consumer
Consumer consumer(config);
consumer.subscribe({ KAFKA_TOPIC });
consumer.subscribe({ KAFKA_TOPICS[0] });
ConsumerRunner runner(consumer, 0, 3);
runner.try_join();
GroupInformation information = consumer.get_consumer_group(consumer_group);
EXPECT_EQ(consumer_group, information.get_name());
EXPECT_EQ("consumer", information.get_protocol_type());
ASSERT_EQ(1, information.get_members().size());
CHECK(information.get_name() == consumer_group);
CHECK(information.get_protocol_type() == "consumer");
CHECK(information.get_members().size() == 1);
auto member = information.get_members()[0];
EXPECT_EQ(client_id, member.get_client_id());
CHECK(member.get_client_id() == client_id);
MemberAssignmentInformation assignment = member.get_member_assignment();
EXPECT_EQ(0, assignment.get_version());
vector<TopicPartition> expected_topic_partitions = {
{ KAFKA_TOPIC, 0 },
{ KAFKA_TOPIC, 1 },
{ KAFKA_TOPIC, 2 }
};
vector<TopicPartition> topic_partitions = assignment.get_topic_partitions();
CHECK(assignment.get_version() == 0);
TopicPartitionList expected_topic_partitions;
for (int i = 0; i < KAFKA_NUM_PARTITIONS; expected_topic_partitions.emplace_back(KAFKA_TOPICS[0], i++));
TopicPartitionList topic_partitions = assignment.get_topic_partitions();
sort(topic_partitions.begin(), topic_partitions.end());
EXPECT_EQ(expected_topic_partitions, topic_partitions);
/*for (const auto c : ) {
printf("%0d,", (int)c & 0xff);
}
std::cout << std::endl;*/
CHECK(topic_partitions == expected_topic_partitions);
}

View File

@@ -3,7 +3,7 @@
#include <chrono>
#include <set>
#include <condition_variable>
#include <gtest/gtest.h>
#include <catch.hpp>
#include "cppkafka/producer.h"
#include "cppkafka/consumer.h"
#include "cppkafka/utils/buffered_producer.h"
@@ -12,248 +12,599 @@
using std::string;
using std::to_string;
using std::set;
using std::vector;
using std::tie;
using std::move;
using std::thread;
namespace this_thread = std::this_thread;
using std::mutex;
using std::unique_lock;
using std::lock_guard;
using std::condition_variable;
using std::chrono::system_clock;
using std::chrono::seconds;
using std::chrono::milliseconds;
using std::chrono::time_point;
using std::chrono::duration_cast;
using std::ref;
using namespace cppkafka;
class ProducerTest : public testing::Test {
public:
static const string KAFKA_TOPIC;
Configuration make_producer_config() {
static Configuration make_producer_config() {
Configuration config = {
{ "metadata.broker.list", KAFKA_TEST_INSTANCE },
{ "queue.buffering.max.ms", 0 }
{ "queue.buffering.max.ms", 0 },
{ "api.version.request", true },
{ "queue.buffering.max.ms", 50 }
};
return config;
}
}
Configuration make_consumer_config() {
static Configuration make_consumer_config() {
Configuration config = {
{ "metadata.broker.list", KAFKA_TEST_INSTANCE },
{ "enable.auto.commit", false },
{ "group.id", "producer_test" }
{ "group.id", make_consumer_group_id() },
{ "api.version.request", true }
};
return config;
}
void producer_run(BufferedProducer<string>& producer,
int& exit_flag, condition_variable& clear,
int num_messages,
int partition) {
MessageBuilder builder(KAFKA_TOPICS[0]);
string key("wassup?");
string payload("nothing much!");
builder.partition(partition).key(key).payload(payload);
for (int i = 0; i < num_messages; ++i) {
if (i == num_messages/2) {
clear.notify_one();
}
producer.add_message(builder);
this_thread::sleep_for(milliseconds(10));
}
exit_flag = 1;
}
void flusher_run(BufferedProducer<string>& producer,
int& exit_flag,
int num_flush) {
while (!exit_flag) {
if (producer.get_buffer_size() >= (size_t)num_flush) {
producer.flush();
}
this_thread::sleep_for(milliseconds(10));
}
producer.flush();
}
void async_flusher_run(BufferedProducer<string>& producer,
int& exit_flag,
int num_flush) {
while (!exit_flag) {
if (producer.get_buffer_size() >= (size_t)num_flush) {
producer.async_flush();
}
this_thread::sleep_for(milliseconds(10));
}
producer.async_flush();
producer.wait_for_acks();
}
void clear_run(BufferedProducer<string>& producer,
condition_variable& clear) {
mutex m;
unique_lock<mutex> lock(m);
clear.wait(lock);
producer.clear();
}
vector<int> dr_data = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
void dr_callback(const Message& message) {
static int i = 0;
if (!message || message.is_eof()) return;
CHECK(message.get_user_data() == &dr_data[i]);
CHECK(*static_cast<int*>(message.get_user_data()) == dr_data[i]);
++i;
}
bool dr_failure_callback(const Message& message) {
if (!message || message.is_eof()) return true;
CHECK(message.get_user_data() == &dr_data[0]);
CHECK(*static_cast<int*>(message.get_user_data()) == dr_data[0]);
return true; //always retry
}
template <typename B>
class ErrorProducer : public BufferedProducer<B>
{
public:
ErrorProducer(Configuration config,
typename BufferedProducer<B>::TestParameters params) :
BufferedProducer<B>(config),
params_(params) {
this->set_test_parameters(&params_);
}
private:
typename BufferedProducer<B>::TestParameters params_;
};
const string ProducerTest::KAFKA_TOPIC = "cppkafka_test1";
TEST_F(ProducerTest, OneMessageOnFixedPartition) {
TEST_CASE("simple production", "[producer]") {
int partition = 0;
// Create a consumer and assign this topic/partition
Consumer consumer(make_consumer_config());
consumer.assign({ TopicPartition(KAFKA_TOPIC, partition) });
consumer.assign({ TopicPartition(KAFKA_TOPICS[0], partition) });
ConsumerRunner runner(consumer, 1, 1);
Configuration config = make_producer_config();
SECTION("message with no key") {
// Now create a producer and produce a message
Producer producer(make_producer_config());
string payload = "Hello world! 1";
producer.produce(MessageBuilder(KAFKA_TOPIC).partition(partition).payload(payload));
const string payload = "Hello world! 1";
Producer producer(config);
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition).payload(payload));
runner.try_join();
const auto& messages = runner.get_messages();
ASSERT_EQ(1, messages.size());
REQUIRE(messages.size() == 1);
const auto& message = messages[0];
EXPECT_EQ(Buffer(payload), message.get_payload());
EXPECT_FALSE(message.get_key());
EXPECT_EQ(KAFKA_TOPIC, message.get_topic());
EXPECT_EQ(partition, message.get_partition());
EXPECT_FALSE(message.get_error());
CHECK(message.get_payload() == payload);
CHECK(!!message.get_key() == false);
CHECK(message.get_topic() == KAFKA_TOPICS[0]);
CHECK(message.get_partition() == partition);
CHECK(!!message.get_error() == false);
int64_t low;
int64_t high;
tie(low, high) = producer.query_offsets({ KAFKA_TOPIC, partition });
EXPECT_GT(high, low);
}
tie(low, high) = producer.query_offsets({ KAFKA_TOPICS[0], partition });
CHECK(high > low);
}
TEST_F(ProducerTest, OneMessageUsingKey) {
int partition = 0;
// Create a consumer and assign this topic/partition
Consumer consumer(make_consumer_config());
consumer.assign({ TopicPartition(KAFKA_TOPIC, partition) });
ConsumerRunner runner(consumer, 1, 1);
// Now create a producer and produce a message
Producer producer(make_producer_config());
string payload = "Hello world! 2";
string key = "such key";
producer.produce(MessageBuilder(KAFKA_TOPIC).partition(partition).key(key).payload(payload));
SECTION("message with key") {
const string payload = "Hello world! 2";
const string key = "such key";
auto timestamp = system_clock::now();
Producer producer(config);
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition)
.key(key)
.payload(payload)
.timestamp(timestamp));
runner.try_join();
const auto& messages = runner.get_messages();
ASSERT_EQ(1, messages.size());
REQUIRE(messages.size() == 1);
const auto& message = messages[0];
EXPECT_EQ(Buffer(payload), message.get_payload());
EXPECT_EQ(Buffer(key), message.get_key());
EXPECT_EQ(KAFKA_TOPIC, message.get_topic());
EXPECT_EQ(partition, message.get_partition());
EXPECT_FALSE(message.get_error());
// NOTE: if this line fails, then you're using kafka 0.10+ and that's okay
EXPECT_FALSE(message.get_timestamp());
}
TEST_F(ProducerTest, MultipleMessagesUnassignedPartitions) {
size_t message_count = 10;
int partitions = 3;
set<string> payloads;
// Create a consumer and subscribe to this topic
Consumer consumer(make_consumer_config());
consumer.subscribe({ KAFKA_TOPIC });
ConsumerRunner runner(consumer, message_count, partitions);
// Now create a producer and produce a message
Producer producer(make_producer_config());
string payload_base = "Hello world ";
for (size_t i = 0; i < message_count; ++i) {
string payload = payload_base + to_string(i);
payloads.insert(payload);
producer.produce(MessageBuilder(KAFKA_TOPIC).payload(payload));
CHECK(message.get_payload() == payload);
CHECK(message.get_key() == key);
CHECK(message.get_topic() == KAFKA_TOPICS[0]);
CHECK(message.get_partition() == partition);
CHECK(!!message.get_error() == false);
REQUIRE(!!message.get_timestamp() == true);
CHECK(message.get_timestamp()->get_timestamp() == duration_cast<milliseconds>(timestamp.time_since_epoch()));
}
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
SECTION("message with key and move-able headers") {
using Hdr = MessageBuilder::HeaderType;
const string payload = "Hello world! 2";
const string key = "such key";
const string header1, header2 = "", header3 = "header3";
const milliseconds timestamp{15};
Producer producer(config);
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition)
.key(key)
.payload(payload)
.timestamp(timestamp)
.header(Hdr{})
.header(Hdr{"", header2})
.header(Hdr{"header3", header3}));
runner.try_join();
const auto& messages = runner.get_messages();
ASSERT_EQ(message_count, messages.size());
for (const auto& message : messages) {
EXPECT_EQ(KAFKA_TOPIC, message.get_topic());
EXPECT_EQ(1, payloads.erase(message.get_payload()));
EXPECT_FALSE(message.get_error());
EXPECT_FALSE(message.get_key());
EXPECT_GE(message.get_partition(), 0);
EXPECT_LT(message.get_partition(), 3);
REQUIRE(messages.size() == 1);
const auto& message = messages[0];
CHECK(message.get_payload() == payload);
CHECK(message.get_key() == key);
CHECK(message.get_topic() == KAFKA_TOPICS[0]);
CHECK(message.get_partition() == partition);
CHECK(!!message.get_error() == false);
REQUIRE(!!message.get_timestamp() == true);
CHECK(message.get_timestamp()->get_timestamp() == timestamp);
//validate headers
REQUIRE(!!message.get_header_list());
REQUIRE(message.get_header_list().size() == 3);
CHECK(message.get_header_list().front() == Hdr{});
CHECK(message.get_header_list().at(1) == Hdr{"", header2});
CHECK(message.get_header_list().back() == Hdr{"header3", header3});
}
}
#endif //RD_KAFKA_HEADERS_SUPPORT_VERSION
TEST_F(ProducerTest, Callbacks) {
int partition = 0;
SECTION("message without message builder") {
const string payload = "Goodbye cruel world!";
const string key = "replay key";
const milliseconds timestamp{15};
Producer producer(config);
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition)
.key(key)
.payload(payload)
.timestamp(timestamp));
runner.try_join();
ConsumerRunner runner2(consumer, 1, 1);
// Create a consumer and assign this topic/partition
Consumer consumer(make_consumer_config());
consumer.assign({ TopicPartition(KAFKA_TOPIC, partition) });
ConsumerRunner runner(consumer, 1, 1);
const auto& replay_messages = runner.get_messages();
REQUIRE(replay_messages.size() == 1);
const auto& replay_message = replay_messages[0];
//produce the same message again
producer.produce(replay_message);
runner2.try_join();
const auto& messages = runner2.get_messages();
REQUIRE(messages.size() == 1);
const auto& message = messages[0];
CHECK(message.get_payload() == payload);
CHECK(message.get_key() == key);
CHECK(message.get_topic() == KAFKA_TOPICS[0]);
CHECK(message.get_partition() == partition);
CHECK(!!message.get_error() == false);
REQUIRE(!!message.get_timestamp() == true);
CHECK(message.get_timestamp()->get_timestamp() == timestamp);
}
SECTION("callbacks") {
// Now create a producer and produce a message
string payload = "Hello world! 3";
string key = "hehe";
const string payload = "Hello world! 3";
const string key = "hehe";
bool delivery_report_called = false;
Configuration config = make_producer_config();
config.set_delivery_report_callback([&](Producer&, const Message& msg) {
EXPECT_EQ(Buffer(payload), msg.get_payload());
CHECK(msg.get_payload() == payload);
delivery_report_called = true;
});
TopicConfiguration topic_config;
topic_config.set_partitioner_callback([&](const Topic& topic, const Buffer& msg_key,
int32_t partition_count) {
EXPECT_EQ(Buffer(key), msg_key);
EXPECT_EQ(3, partition_count);
EXPECT_EQ(KAFKA_TOPIC, topic.get_name());
CHECK(msg_key == key);
CHECK(partition_count == KAFKA_NUM_PARTITIONS);
CHECK(topic.get_name() == KAFKA_TOPICS[0]);
return 0;
});
config.set_default_topic_configuration(topic_config);
Producer producer(move(config));
producer.produce(MessageBuilder(KAFKA_TOPIC).key(key).payload(payload));
Producer producer(config);
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).key(key).payload(payload));
while (producer.get_out_queue_length() > 0) {
producer.poll();
}
runner.try_join();
const auto& messages = runner.get_messages();
ASSERT_EQ(1, messages.size());
REQUIRE(messages.size() == 1);
const auto& message = messages[0];
EXPECT_EQ(Buffer(payload), message.get_payload());
EXPECT_EQ(Buffer(key), message.get_key());
EXPECT_EQ(KAFKA_TOPIC, message.get_topic());
EXPECT_EQ(partition, message.get_partition());
EXPECT_FALSE(message.get_error());
EXPECT_TRUE(delivery_report_called);
}
TEST_F(ProducerTest, PartitionerCallbackOnDefaultTopicConfig) {
int partition = 0;
// Create a consumer and assign this topic/partition
Consumer consumer(make_consumer_config());
consumer.assign({ TopicPartition(KAFKA_TOPIC, partition) });
ConsumerRunner runner(consumer, 1, 1);
CHECK(message.get_payload() == payload);
CHECK(message.get_key() == key);
CHECK(message.get_topic() == KAFKA_TOPICS[0]);
CHECK(message.get_partition() == partition);
CHECK(!!message.get_error() == false);
CHECK(delivery_report_called == true);
}
SECTION("partitioner callback") {
// Now create a producer and produce a message
string payload = "Hello world! 4";
string key = "hehe";
const string payload = "Hello world! 4";
const string key = "hehe";
bool callback_called = false;
Configuration config = make_producer_config();
TopicConfiguration topic_config;
topic_config.set_partitioner_callback([&](const Topic& topic, const Buffer& msg_key,
int32_t partition_count) {
EXPECT_EQ(Buffer(key), msg_key);
EXPECT_EQ(3, partition_count);
EXPECT_EQ(KAFKA_TOPIC, topic.get_name());
CHECK(msg_key == key);
CHECK(partition_count == KAFKA_NUM_PARTITIONS);
CHECK(topic.get_name() == KAFKA_TOPICS[0]);
callback_called = true;
return 0;
});
config.set_default_topic_configuration(topic_config);
Producer producer(config);
Producer producer(move(config));
producer.produce(MessageBuilder(KAFKA_TOPIC).key(key).payload(payload));
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).key(key).payload(payload));
producer.poll();
runner.try_join();
const auto& messages = runner.get_messages();
ASSERT_EQ(1, messages.size());
REQUIRE(messages.size() == 1);
const auto& message = messages[0];
EXPECT_EQ(partition, message.get_partition());
EXPECT_TRUE(callback_called);
CHECK(message.get_partition() == partition);
CHECK(callback_called == true);
}
}
TEST_F(ProducerTest, BufferedProducer) {
TEST_CASE("multiple messages", "[producer]") {
size_t message_count = 10;
set<string> payloads;
// Create a consumer and subscribe to this topic
Consumer consumer(make_consumer_config());
consumer.subscribe({ KAFKA_TOPICS[0] });
ConsumerRunner runner(consumer, message_count, KAFKA_NUM_PARTITIONS);
// Now create a producer and produce a message
Producer producer(make_producer_config());
const string payload_base = "Hello world ";
for (size_t i = 0; i < message_count; ++i) {
const string payload = payload_base + to_string(i);
payloads.insert(payload);
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).payload(payload));
}
runner.try_join();
const auto& messages = runner.get_messages();
REQUIRE(messages.size() == message_count);
for (const auto& message : messages) {
CHECK(message.get_topic() == KAFKA_TOPICS[0]);
CHECK(payloads.erase(message.get_payload()) == 1);
CHECK(!!message.get_error() == false);
CHECK(!!message.get_key() == false);
CHECK(message.get_partition() >= 0);
CHECK(message.get_partition() < KAFKA_NUM_PARTITIONS);
}
}
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
TEST_CASE("multiple messages with copy-able headers", "[producer][headers]") {
using Hdr = MessageBuilder::HeaderType;
size_t message_count = 2;
string payload = "Hello world with headers";
const string header1, header2 = "", header3 = "header3";
// Create a consumer and subscribe to this topic
Consumer consumer(make_consumer_config());
consumer.subscribe({ KAFKA_TOPICS[0] });
ConsumerRunner runner(consumer, message_count, KAFKA_NUM_PARTITIONS);
// Now create a producer and produce a message
Producer producer(make_producer_config());
MessageBuilder builder(KAFKA_TOPICS[0]);
builder.payload(payload)
.header(Hdr{})
.header(Hdr{"", header2})
.header(Hdr{"header3", header3});
producer.produce(builder);
producer.produce(builder);
//Check we still have the messages after production
CHECK(!!builder.header_list());
CHECK(builder.header_list().size() == 3);
runner.try_join();
const auto& messages = runner.get_messages();
REQUIRE(messages.size() == message_count);
const auto& message = messages[0];
CHECK(message.get_payload() == payload);
CHECK(!!message.get_error() == false);
//validate headers
REQUIRE(!!message.get_header_list());
REQUIRE(message.get_header_list().size() == 3);
CHECK(message.get_header_list().front() == Hdr{});
CHECK(message.get_header_list().at(1) == Hdr{"", header2});
CHECK(message.get_header_list().back() == Hdr{"header3", header3});
//validate second message
CHECK(messages[0].get_header_list() == messages[1].get_header_list());
CHECK(messages[0].get_header_list().get_handle() != messages[1].get_header_list().get_handle());
}
#endif //RD_KAFKA_HEADERS_SUPPORT_VERSION
TEST_CASE("multiple sync messages", "[producer][buffered_producer][sync]") {
size_t message_count = 10;
set<string> payloads;
// Create a consumer and subscribe to this topic
Consumer consumer(make_consumer_config());
consumer.subscribe({ KAFKA_TOPICS[0] });
ConsumerRunner runner(consumer, message_count, KAFKA_NUM_PARTITIONS);
// Now create a producer and produce a message
BufferedProducer<string> producer(make_producer_config());
producer.set_produce_success_callback(dr_callback);
const string payload_base = "Hello world ";
for (size_t i = 0; i < message_count; ++i) {
const string payload = payload_base + to_string(i);
payloads.insert(payload);
producer.sync_produce(MessageBuilder(KAFKA_TOPICS[0]).payload(payload).user_data(&dr_data[i]));
}
runner.try_join();
const auto& messages = runner.get_messages();
REQUIRE(messages.size() == message_count);
for (size_t i = 0; i < messages.size(); ++i) {
const auto& message = messages[i];
CHECK(message.get_topic() == KAFKA_TOPICS[0]);
CHECK(payloads.erase(message.get_payload()) == 1);
CHECK(!!message.get_error() == false);
CHECK(!!message.get_key() == false);
CHECK(message.get_partition() >= 0);
CHECK(message.get_partition() < KAFKA_NUM_PARTITIONS);
}
}
TEST_CASE("replay sync messages with errors", "[producer][buffered_producer][sync]") {
size_t num_retries = 4;
// Create a consumer and subscribe to this topic
Consumer consumer(make_consumer_config());
consumer.subscribe({ KAFKA_TOPICS[0] });
ConsumerRunner runner(consumer, 2*(num_retries+1), KAFKA_NUM_PARTITIONS);
// Now create a producer and produce a message
ErrorProducer<string> producer(make_producer_config(), BufferedProducer<string>::TestParameters{true, false});
producer.set_produce_failure_callback(dr_failure_callback);
producer.set_max_number_retries(num_retries);
string payload = "Hello world";
MessageBuilder builder(KAFKA_TOPICS[0]);
builder.payload(payload).user_data(&dr_data[0]);
//Produce the same message twice
producer.sync_produce(builder);
producer.sync_produce(builder);
runner.try_join();
const auto& messages = runner.get_messages();
REQUIRE(messages.size() == 2*(num_retries+1));
for (size_t i = 0; i < messages.size(); ++i) {
const auto& message = messages[i];
CHECK(message.get_topic() == KAFKA_TOPICS[0]);
CHECK(message.get_payload() == payload);
CHECK(!!message.get_error() == false);
CHECK(!!message.get_key() == false);
CHECK(message.get_partition() >= 0);
CHECK(message.get_partition() < KAFKA_NUM_PARTITIONS);
}
}
TEST_CASE("replay async messages with errors", "[producer][buffered_producer][async]") {
size_t num_retries = 4;
int exit_flag = 0;
// Now create a producer and produce a message
ErrorProducer<string> producer(make_producer_config(),
BufferedProducer<string>::TestParameters{false, true});
producer.set_max_number_retries(num_retries);
thread flusher_thread(async_flusher_run, ref(producer), ref(exit_flag), 0);
string payload = "Hello world";
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).payload(payload));
this_thread::sleep_for(milliseconds(2000));
exit_flag = 1;
flusher_thread.join();
REQUIRE(producer.get_total_messages_produced() == 0);
CHECK(producer.get_total_messages_dropped() == 1);
}
TEST_CASE("buffered producer", "[producer][buffered_producer]") {
int partition = 0;
// Create a consumer and assign this topic/partition
Consumer consumer(make_consumer_config());
consumer.assign({ TopicPartition(KAFKA_TOPIC, partition) });
consumer.assign({ TopicPartition(KAFKA_TOPICS[0], partition) });
ConsumerRunner runner(consumer, 3, 1);
// Now create a buffered producer and produce two messages
BufferedProducer<string> producer(make_producer_config());
string payload = "Hello world! 2";
string key = "such key";
producer.add_message(MessageBuilder(KAFKA_TOPIC).partition(partition)
const string payload = "Hello world! 2";
const string key = "such key";
producer.add_message(MessageBuilder(KAFKA_TOPICS[0]).partition(partition)
.key(key)
.payload(payload));
producer.add_message(producer.make_builder(KAFKA_TOPIC).partition(partition).payload(payload));
producer.add_message(producer.make_builder(KAFKA_TOPICS[0]).partition(partition).payload(payload));
producer.flush();
producer.produce(MessageBuilder(KAFKA_TOPIC).partition(partition).payload(payload));
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition).payload(payload));
producer.wait_for_acks();
// Add another one but then clear it
producer.add_message(producer.make_builder(KAFKA_TOPIC).partition(partition).payload(payload));
producer.add_message(producer.make_builder(KAFKA_TOPICS[0]).partition(partition).payload(payload));
producer.clear();
runner.try_join();
const auto& messages = runner.get_messages();
ASSERT_EQ(3, messages.size());
REQUIRE(messages.size() == 3);
const auto& message = messages[0];
EXPECT_EQ(Buffer(key), message.get_key());
EXPECT_EQ(KAFKA_TOPIC, message.get_topic());
EXPECT_EQ(partition, message.get_partition());
EXPECT_FALSE(message.get_error());
CHECK(message.get_key() == key);
CHECK(message.get_topic() == KAFKA_TOPICS[0]);
CHECK(message.get_partition() == partition);
CHECK(!!message.get_error() == false);
EXPECT_FALSE(messages[1].get_key());
EXPECT_FALSE(messages[2].get_key());
CHECK(!!messages[1].get_key() == false);
CHECK(!!messages[2].get_key() == false);
for (const auto& message : messages) {
EXPECT_EQ(Buffer(payload), message.get_payload());
CHECK(message.get_payload() == payload);
}
}
TEST_CASE("buffered producer with limited buffer", "[producer]") {
int partition = 0;
int num_messages = 4;
// Create a consumer and assign this topic/partition
Consumer consumer(make_consumer_config());
consumer.assign({ TopicPartition(KAFKA_TOPICS[0], partition) });
ConsumerRunner runner(consumer, 3, 1);
// Now create a buffered producer and produce two messages
BufferedProducer<string> producer(make_producer_config());
const string payload = "Hello world! 2";
const string key = "such key";
REQUIRE(producer.get_buffer_size() == 0);
REQUIRE(producer.get_max_buffer_size() == -1);
// Limit the size of the internal buffer
producer.set_max_buffer_size(num_messages-1);
while (num_messages--) {
producer.add_message(MessageBuilder(KAFKA_TOPICS[0]).partition(partition).key(key).payload(payload));
}
REQUIRE(producer.get_buffer_size() == 1);
// Finish the runner
runner.try_join();
// Validate messages received
const auto& messages = runner.get_messages();
REQUIRE(messages.size() == producer.get_max_buffer_size());
}
TEST_CASE("multi-threaded buffered producer", "[producer][buffered_producer]") {
int partition = 0;
vector<thread> threads;
int num_messages = 50;
int num_flush = 10;
int exit_flag = 0;
condition_variable clear;
// Create a consumer and assign this topic/partition
Consumer consumer(make_consumer_config());
consumer.assign({ TopicPartition(KAFKA_TOPICS[0], partition) });
ConsumerRunner runner(consumer, num_messages, 1);
BufferedProducer<string> producer(make_producer_config());
threads.push_back(thread(producer_run, ref(producer), ref(exit_flag), ref(clear), num_messages, partition));
threads.push_back(thread(flusher_run, ref(producer), ref(exit_flag), num_flush));
// Wait for completion
runner.try_join();
for (auto&& thread : threads) {
thread.join();
}
const auto& messages = runner.get_messages();
REQUIRE(messages.size() == num_messages);
REQUIRE(producer.get_flushes_in_progress() == 0);
REQUIRE(producer.get_pending_acks() == 0);
REQUIRE(producer.get_total_messages_produced() == num_messages);
REQUIRE(producer.get_buffer_size() == 0);
}
TEST_CASE("clear multi-threaded buffered producer", "[producer][buffered_producer]") {
int partition = 0;
vector<thread> threads;
int num_messages = 50;
int exit_flag = 0;
condition_variable clear;
BufferedProducer<string> producer(make_producer_config());
threads.push_back(thread(producer_run, ref(producer), ref(exit_flag), ref(clear), num_messages, partition));
threads.push_back(thread(clear_run, ref(producer), ref(clear)));
// Wait for completion
for (auto&& thread : threads) {
thread.join();
}
REQUIRE(producer.get_total_messages_produced() == 0);
REQUIRE(producer.get_flushes_in_progress() == 0);
REQUIRE(producer.get_pending_acks() == 0);
REQUIRE(producer.get_buffer_size() < num_messages);
}

View File

@@ -0,0 +1,141 @@
#include <vector>
#include <thread>
#include <set>
#include <mutex>
#include <chrono>
#include <iterator>
#include <condition_variable>
#include <catch.hpp>
#include <memory>
#include <stdexcept>
#include "cppkafka/cppkafka.h"
#include "test_utils.h"
using std::vector;
using std::move;
using std::string;
using std::exception;
using std::thread;
using std::set;
using std::mutex;
using std::tie;
using std::condition_variable;
using std::lock_guard;
using std::unique_lock;
using std::unique_ptr;
using std::make_move_iterator;
using std::chrono::seconds;
using std::chrono::milliseconds;
using std::chrono::system_clock;
using namespace cppkafka;
#define ENABLE_STRICT_RR_ORDER 0
//==================================================================================
// Helper functions
//==================================================================================
static Configuration make_producer_config() {
Configuration config = {
{ "metadata.broker.list", KAFKA_TEST_INSTANCE },
{ "max.in.flight", 1 }
};
return config;
}
static Configuration make_consumer_config(const string& group_id = make_consumer_group_id()) {
Configuration config = {
{ "metadata.broker.list", KAFKA_TEST_INSTANCE },
{ "enable.auto.commit", false },
{ "group.id", group_id },
};
return config;
}
#if ENABLE_STRICT_RR_ORDER
static vector<int> make_roundrobin_partition_vector(int total_messages) {
vector<int> partition_order;
for (int i = 0, partition = 0; i < total_messages+1; ++i) {
if ((i % KAFKA_NUM_PARTITIONS) == 0) {
partition = 0;
}
partition_order.push_back(partition++);
}
return partition_order;
}
#endif
//========================================================================
// TESTS
//========================================================================
TEST_CASE("roundrobin consumer test", "[roundrobin consumer]") {
TopicPartitionList assignment;
int messages_per_partition = 3;
int total_messages = KAFKA_NUM_PARTITIONS * messages_per_partition;
// Create a consumer and subscribe to the topic
PollStrategyAdapter consumer(make_consumer_config());
consumer.subscribe({ KAFKA_TOPICS[0] });
consumer.add_polling_strategy(unique_ptr<PollInterface>(new RoundRobinPollStrategy(consumer)));
PollConsumerRunner runner(consumer, total_messages, KAFKA_NUM_PARTITIONS);
// Produce messages so we stop the consumer
BufferedProducer<string> producer(make_producer_config());
string payload = "RoundRobin";
// push 3 messages in each partition
for (int i = 0; i < total_messages; ++i) {
producer.sync_produce(MessageBuilder(KAFKA_TOPICS[0])
.partition(i % KAFKA_NUM_PARTITIONS)
.payload(payload));
}
producer.flush();
runner.try_join();
// Check that we have all messages
REQUIRE(runner.get_messages().size() == total_messages);
#if ENABLE_STRICT_RR_ORDER
// Check that we have one message from each partition in desired order
vector<int> partition_order = make_roundrobin_partition_vector(total_messages+KAFKA_NUM_PARTITIONS);
int partition_idx;
for (int i = 0; i < total_messages; ++i) {
if (i == 0) {
// find first polled partition index
partition_idx = runner.get_messages()[i].get_partition();
}
CHECK(runner.get_messages()[i].get_partition() == partition_order[i+partition_idx]);
REQUIRE((string)runner.get_messages()[i].get_payload() == payload);
}
//============ resume original poll strategy =============//
//validate that once the round robin strategy is deleted, normal poll works as before
consumer.delete_polling_strategy();
ConsumerRunner serial_runner(consumer, total_messages, KAFKA_NUM_PARTITIONS);
payload = "SerialPolling";
// push 3 messages in each partition
for (int i = 0; i < total_messages; ++i) {
producer.sync_produce(MessageBuilder(KAFKA_TOPICS[0]).partition(i%KAFKA_NUM_PARTITIONS).payload(payload));
}
producer.flush();
serial_runner.try_join();
// Check that we have all messages
REQUIRE(serial_runner.get_messages().size() == total_messages);
for (int i = 0; i < total_messages; ++i) {
REQUIRE((string)serial_runner.get_messages()[i].get_payload() == payload);
}
#else
// Simple payload check
for (int i = 0; i < total_messages; ++i) {
REQUIRE((string)runner.get_messages()[i].get_payload() == payload);
}
#endif
}

76
tests/test_main.cpp Normal file
View File

@@ -0,0 +1,76 @@
#include <chrono>
#define CATCH_CONFIG_RUNNER
#include <catch.hpp>
using std::string;
using std::chrono::steady_clock;
using std::chrono::milliseconds;
using std::chrono::duration_cast;
using Catch::ConsoleReporter;
using Catch::ReporterConfig;
using Catch::ReporterPreferences;
using Catch::TestCaseInfo;
using Catch::TestCaseStats;
using Catch::Totals;
using Catch::Session;
std::vector<std::string> KAFKA_TOPICS = {KAFKA_TOPIC_NAMES};
namespace cppkafka {
class InstantTestReporter : public ConsoleReporter {
public:
using ClockType = steady_clock;
InstantTestReporter(const ReporterConfig& config)
: ConsoleReporter(config) {
}
static string getDescription() {
return "Reports the tests' progress as they run";
}
ReporterPreferences getPreferences() const override {
ReporterPreferences output;
output.shouldRedirectStdOut = false;
return output;
}
void testCaseStarting(const TestCaseInfo& info) override {
ConsoleReporter::testCaseStarting(info);
stream << "Running test \"" << info.name << "\" @ " << info.lineInfo << "\n";
test_start_ts_ = ClockType::now();
}
void testCaseEnded(const TestCaseStats& stats) override {
const Totals& totals = stats.totals;
const size_t totalTestCases = totals.assertions.passed + totals.assertions.failed;
const auto elapsed = ClockType::now() - test_start_ts_;
stream << "Done. " << totals.assertions.passed << "/" << totalTestCases
<< " assertions succeeded in " << duration_cast<milliseconds>(elapsed).count()
<< "ms\n";
}
private:
ClockType::time_point test_start_ts_;
};
CATCH_REGISTER_REPORTER("instant", InstantTestReporter)
} // cppkafka
int main(int argc, char* argv[]) {
Session session;
int returnCode = session.applyCommandLine( argc, argv );
if (returnCode != 0) {
return returnCode;
}
if (session.configData().reporterNames.empty()) {
// Set our reporter as the default one
session.configData().reporterNames.emplace_back("instant");
}
int numFailed = session.run();
return numFailed;
}

View File

@@ -1,77 +1,94 @@
#include <mutex>
#include <chrono>
#include <condition_variable>
#include <cstdint>
#include <iomanip>
#include <limits>
#include <sstream>
#include <random>
#include "test_utils.h"
using std::vector;
using std::move;
using std::thread;
using std::mutex;
using std::lock_guard;
using std::unique_lock;
using std::condition_variable;
using std::chrono::system_clock;
using std::chrono::duration_cast;
using std::chrono::milliseconds;
using std::chrono::seconds;
using std::chrono::system_clock;
using std::hex;
using std::move;
using std::numeric_limits;
using std::ostringstream;
using std::random_device;
using std::string;
using std::uniform_int_distribution;
using std::unique_ptr;
using std::vector;
using cppkafka::Consumer;
using cppkafka::Message;
//==================================================================================
// PollStrategyAdapter
//==================================================================================
ConsumerRunner::ConsumerRunner(Consumer& consumer, size_t expected, size_t partitions)
: consumer_(consumer) {
bool booted = false;
mutex mtx;
condition_variable cond;
thread_ = thread([&, expected, partitions]() {
consumer_.set_timeout(milliseconds(500));
size_t number_eofs = 0;
auto start = system_clock::now();
while (system_clock::now() - start < seconds(20)) {
if (expected > 0 && messages_.size() == expected) {
break;
}
if (expected == 0 && number_eofs >= partitions) {
break;
}
Message msg = consumer_.poll();
if (msg && number_eofs != partitions &&
msg.get_error() == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
number_eofs++;
if (number_eofs == partitions) {
lock_guard<mutex> _(mtx);
booted = true;
cond.notify_one();
}
}
else if (msg && !msg.get_error() && number_eofs == partitions) {
messages_.push_back(move(msg));
}
}
if (number_eofs < partitions) {
lock_guard<mutex> _(mtx);
booted = true;
cond.notify_one();
}
});
PollStrategyAdapter::PollStrategyAdapter(Configuration config)
: Consumer(config) {
}
unique_lock<mutex> lock(mtx);
while (!booted) {
cond.wait(lock);
void PollStrategyAdapter::add_polling_strategy(unique_ptr<PollInterface> poll_strategy) {
strategy_ = move(poll_strategy);
}
void PollStrategyAdapter::delete_polling_strategy() {
strategy_.reset();
}
Message PollStrategyAdapter::poll() {
if (strategy_) {
return strategy_->poll();
}
return Consumer::poll();
}
Message PollStrategyAdapter::poll(milliseconds timeout) {
if (strategy_) {
return strategy_->poll(timeout);
}
return Consumer::poll(timeout);
}
vector<Message> PollStrategyAdapter::poll_batch(size_t max_batch_size) {
if (strategy_) {
return strategy_->poll_batch(max_batch_size);
}
return Consumer::poll_batch(max_batch_size);
}
vector<Message> PollStrategyAdapter::poll_batch(size_t max_batch_size, milliseconds timeout) {
if (strategy_) {
return strategy_->poll_batch(max_batch_size, timeout);
}
return Consumer::poll_batch(max_batch_size, timeout);
}
void PollStrategyAdapter::set_timeout(milliseconds timeout) {
if (strategy_) {
strategy_->set_timeout(timeout);
}
else {
Consumer::set_timeout(timeout);
}
}
ConsumerRunner::~ConsumerRunner() {
try_join();
}
const vector<Message>& ConsumerRunner::get_messages() const {
return messages_;
}
void ConsumerRunner::try_join() {
if (thread_.joinable()) {
thread_.join();
milliseconds PollStrategyAdapter::get_timeout() {
if (strategy_) {
return strategy_->get_timeout();
}
return Consumer::get_timeout();
}
// Misc
string make_consumer_group_id() {
ostringstream output;
output << hex;
random_device rd;
uniform_int_distribution<uint64_t> distribution(0, numeric_limits<uint64_t>::max());
const auto now = duration_cast<seconds>(system_clock::now().time_since_epoch());
const auto random_number = distribution(rd);
output << now.count() << random_number;
return output.str();
}

View File

@@ -1,24 +1,70 @@
#ifndef CPPKAFKA_TEST_UTILS_H
#define CPPKAFKA_TEST_UTILS_H
#include <string>
#include <thread>
#include <vector>
#include "cppkafka/consumer.h"
#include "cppkafka/utils/roundrobin_poll_strategy.h"
#include "cppkafka/utils/consumer_dispatcher.h"
class ConsumerRunner {
extern const std::vector<std::string> KAFKA_TOPICS;
using namespace cppkafka;
//==================================================================================
// BasicConsumerRunner
//==================================================================================
template <typename ConsumerType>
class BasicConsumerRunner {
public:
ConsumerRunner(cppkafka::Consumer& consumer, size_t expected, size_t partitions);
ConsumerRunner(const ConsumerRunner&) = delete;
ConsumerRunner& operator=(const ConsumerRunner&) = delete;
~ConsumerRunner();
BasicConsumerRunner(ConsumerType& consumer,
size_t expected,
size_t partitions);
BasicConsumerRunner(const BasicConsumerRunner&) = delete;
BasicConsumerRunner& operator=(const BasicConsumerRunner&) = delete;
~BasicConsumerRunner();
const std::vector<cppkafka::Message>& get_messages() const;
void try_join();
private:
cppkafka::Consumer& consumer_;
ConsumerType& consumer_;
std::thread thread_;
std::vector<cppkafka::Message> messages_;
};
//==================================================================================
// PollStrategyAdapter
//==================================================================================
/**
* \brief Specific implementation which can be used with other
* util classes such as BasicConsumerDispatcher.
*/
class PollStrategyAdapter : public Consumer {
public:
PollStrategyAdapter(Configuration config);
void add_polling_strategy(std::unique_ptr<PollInterface> poll_strategy);
void delete_polling_strategy();
Message poll();
Message poll(std::chrono::milliseconds timeout);
std::vector<Message> poll_batch(size_t max_batch_size);
std::vector<Message> poll_batch(size_t max_batch_size,
std::chrono::milliseconds timeout);
void set_timeout(std::chrono::milliseconds timeout);
std::chrono::milliseconds get_timeout();
private:
std::unique_ptr<PollInterface> strategy_;
};
// Misc
std::string make_consumer_group_id();
using PollConsumerRunner = BasicConsumerRunner<PollStrategyAdapter>;
using ConsumerRunner = BasicConsumerRunner<Consumer>;
#include "test_utils_impl.h"
#endif // CPPKAFKA_TEST_UTILS_H

102
tests/test_utils_impl.h Normal file
View File

@@ -0,0 +1,102 @@
#include <mutex>
#include <chrono>
#include <condition_variable>
#include "cppkafka/utils/consumer_dispatcher.h"
using std::vector;
using std::move;
using std::thread;
using std::mutex;
using std::lock_guard;
using std::unique_lock;
using std::condition_variable;
using std::chrono::system_clock;
using std::chrono::milliseconds;
using std::chrono::seconds;
using cppkafka::Consumer;
using cppkafka::BasicConsumerDispatcher;
using cppkafka::Message;
using cppkafka::TopicPartition;
//==================================================================================
// BasicConsumerRunner
//==================================================================================
template <typename ConsumerType>
BasicConsumerRunner<ConsumerType>::BasicConsumerRunner(ConsumerType& consumer,
size_t expected,
size_t partitions)
: consumer_(consumer) {
bool booted = false;
mutex mtx;
condition_variable cond;
thread_ = thread([&, expected, partitions]() {
consumer_.set_timeout(milliseconds(500));
size_t number_eofs = 0;
auto start = system_clock::now();
BasicConsumerDispatcher<ConsumerType> dispatcher(consumer_);
dispatcher.run(
// Message callback
[&](Message msg) {
if (number_eofs == partitions) {
messages_.push_back(move(msg));
}
},
// EOF callback
[&](typename BasicConsumerDispatcher<ConsumerType>::EndOfFile,
const TopicPartition& topic_partition) {
if (number_eofs != partitions) {
number_eofs++;
if (number_eofs == partitions) {
lock_guard<mutex> _(mtx);
booted = true;
cond.notify_one();
}
}
},
// Every time there's any event callback
[&](typename BasicConsumerDispatcher<ConsumerType>::Event) {
if (expected > 0 && messages_.size() == expected) {
dispatcher.stop();
}
if (expected == 0 && number_eofs >= partitions) {
dispatcher.stop();
}
if (system_clock::now() - start >= seconds(20)) {
dispatcher.stop();
}
}
);
// dispatcher has stopped
if (number_eofs < partitions) {
lock_guard<mutex> _(mtx);
booted = true;
cond.notify_one();
}
});
unique_lock<mutex> lock(mtx);
while (!booted) {
cond.wait(lock);
}
}
template <typename ConsumerType>
BasicConsumerRunner<ConsumerType>::~BasicConsumerRunner() {
try_join();
}
template <typename ConsumerType>
const std::vector<Message>& BasicConsumerRunner<ConsumerType>::get_messages() const {
return messages_;
}
template <typename ConsumerType>
void BasicConsumerRunner<ConsumerType>::try_join() {
if (thread_.joinable()) {
thread_.join();
}
}

View File

@@ -1,18 +1,15 @@
#include <sstream>
#include <gtest/gtest.h>
#include <catch.hpp>
#include "cppkafka/topic_partition_list.h"
#include "cppkafka/topic_partition.h"
using std::ostringstream;
using std::set;
using std::string;
using namespace cppkafka;
class TopicPartitionListTest : public testing::Test {
public:
};
TEST_F(TopicPartitionListTest, Conversion) {
TEST_CASE("rdkafka conversion", "[topic_partition]") {
TopicPartitionList list1;
list1.push_back("foo");
list1.push_back({ "bar", 2 });
@@ -20,29 +17,71 @@ TEST_F(TopicPartitionListTest, Conversion) {
TopicPartitionList list2 = convert(convert(list1));
EXPECT_EQ(list1.size(), list2.size());
CHECK(list1.size() == list2.size());
for (size_t i = 0; i < list1.size(); ++i) {
const auto& item1 = list1[i];
const auto& item2 = list2[i];
EXPECT_EQ(item1.get_topic(), item2.get_topic());
EXPECT_EQ(item1.get_partition(), item2.get_partition());
EXPECT_EQ(item1.get_offset(), item2.get_offset());
CHECK(item1.get_topic() == item2.get_topic());
CHECK(item1.get_partition() == item2.get_partition());
CHECK(item1.get_offset() == item2.get_offset());
}
}
TEST_F(TopicPartitionListTest, AsString) {
TEST_CASE("topic partition to string", "[topic_partition]") {
ostringstream output;
TopicPartition topic_partition("foo", 5);
output << topic_partition;
EXPECT_EQ("foo[5]", output.str());
CHECK(output.str() == "foo[5:#]");
}
TEST_F(TopicPartitionListTest, ListAsString) {
TEST_CASE("topic partition list to string", "[topic_partition]") {
ostringstream output;
TopicPartitionList list;
list.push_back("foo");
list.push_back({ "bar", 2 });
list.push_back({ "foobar", 3, 4 });
output << list;
EXPECT_EQ("[ foo[-1], bar[2] ]", output.str());
CHECK(output.str() == "[ foo[-1:#], bar[2:#], foobar[3:4] ]");
}
TEST_CASE("find matches by topic", "[topic_partition]") {
const TopicPartitionList list = {
{ "foo", 0 },
{ "bar", 3 },
{ "fb", 1 },
{ "foo", 1 },
{ "fb", 2 },
{ "other", 1 },
{ "a", 1 }
};
const TopicPartitionList expected = {
{ "foo", 0 },
{ "fb", 1 },
{ "foo", 1 },
{ "fb", 2 },
};
const TopicPartitionList subset = find_matches(list, set<string>{"foo", "fb"});
CHECK(subset == expected);
}
TEST_CASE("find matches by id", "[topic_partition]") {
const TopicPartitionList list = {
{ "foo", 2 },
{ "foo", 3 },
{ "foo", 4 },
{ "foo", 5 },
{ "foo", 6 },
{ "foo", 7 },
{ "foo", 8 }
};
const TopicPartitionList expected = {
{ "foo", 2 },
{ "foo", 5 },
{ "foo", 8 },
};
const TopicPartitionList subset = find_matches(list, set<int>{2,5,8});
CHECK(subset == expected);
}

1
third_party/Catch2 vendored Submodule

Submodule third_party/Catch2 added at d2d8455b57

Submodule third_party/googletest deleted from 0a439623f7