mirror of
https://github.com/Telecominfraproject/wlan-cloud-lib-cppkafka.git
synced 2025-11-05 13:07:56 +00:00
Compare commits
110 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f458514fb2 | ||
|
|
d3ef9cad32 | ||
|
|
df04b27e22 | ||
|
|
d6f8129207 | ||
|
|
3238c94f43 | ||
|
|
081f8d80a0 | ||
|
|
577bbb0242 | ||
|
|
6158d932c0 | ||
|
|
5c72f3fe28 | ||
|
|
069ea3df8e | ||
|
|
c5aca985b8 | ||
|
|
eb46b8808e | ||
|
|
b8f4be5e1b | ||
|
|
9a20b588c5 | ||
|
|
3c72eb5752 | ||
|
|
157b7ec997 | ||
|
|
f220062e40 | ||
|
|
7530b9f9e4 | ||
|
|
3cf9bb53e9 | ||
|
|
0c7a3b0c25 | ||
|
|
972a008aa4 | ||
|
|
a4eefacaa1 | ||
|
|
23810654ab | ||
|
|
f746653841 | ||
|
|
597c026555 | ||
|
|
71e6e2e4e5 | ||
|
|
f15b59cb13 | ||
|
|
5dcede6411 | ||
|
|
5cad740aea | ||
|
|
9714bec5bf | ||
|
|
15fdab6943 | ||
|
|
ea9601ba1b | ||
|
|
ffc64b9a5a | ||
|
|
556f15a43f | ||
|
|
6144330835 | ||
|
|
169ea4f8ed | ||
|
|
65f35dcd39 | ||
|
|
532d83b225 | ||
|
|
a1ce130bfd | ||
|
|
71afaba3e1 | ||
|
|
15be627f8e | ||
|
|
429ec92369 | ||
|
|
f543810515 | ||
|
|
841e632fbd | ||
|
|
46c396f729 | ||
|
|
ee71b3979a | ||
|
|
d9feb5c3db | ||
|
|
2451c74c4f | ||
|
|
ae74814791 | ||
|
|
ee0c0829a4 | ||
|
|
59d8adc4a4 | ||
|
|
cb2c8877d8 | ||
|
|
30b3652a94 | ||
|
|
8fc6a0f02d | ||
|
|
83a963c1db | ||
|
|
c95d790547 | ||
|
|
eee60407fa | ||
|
|
05d5a0404b | ||
|
|
3d1402f53a | ||
|
|
6db2cdcecf | ||
|
|
018a1f52d9 | ||
|
|
df12b5fd5c | ||
|
|
9513b01b8e | ||
|
|
86ed154c92 | ||
|
|
69e30f9e74 | ||
|
|
675954ef75 | ||
|
|
98b9839ff9 | ||
|
|
d173526f99 | ||
|
|
deff8b1ff3 | ||
|
|
cc7d183ff1 | ||
|
|
1817115784 | ||
|
|
9c09243633 | ||
|
|
5b63c642f9 | ||
|
|
c874ccc43f | ||
|
|
af368bba04 | ||
|
|
c7715733bf | ||
|
|
80e0ed5007 | ||
|
|
a1dc9d115e | ||
|
|
edb2737263 | ||
|
|
5bd61e8915 | ||
|
|
0e96f87eeb | ||
|
|
702279d0e9 | ||
|
|
94dac08d79 | ||
|
|
29fa7bed19 | ||
|
|
179e669c06 | ||
|
|
853396acab | ||
|
|
5889c322c2 | ||
|
|
17da880854 | ||
|
|
4c9aa6fcd4 | ||
|
|
37cb16c3f5 | ||
|
|
04d5b41c6b | ||
|
|
c3011c9eed | ||
|
|
d0c794b978 | ||
|
|
86d4bc8037 | ||
|
|
0d4b9ef2f6 | ||
|
|
1582f6156d | ||
|
|
2340046544 | ||
|
|
b7a0dce710 | ||
|
|
08815e97c0 | ||
|
|
9e6315fcc2 | ||
|
|
191956b4ca | ||
|
|
4af48ff0e7 | ||
|
|
556dac7015 | ||
|
|
4cd03aea3c | ||
|
|
2e6bfd64d3 | ||
|
|
ed71ab2daa | ||
|
|
52822fdb61 | ||
|
|
bb5fb490ce | ||
|
|
4369b75695 | ||
|
|
f0ec0bfb10 |
6
.gitmodules
vendored
6
.gitmodules
vendored
@@ -1,3 +1,3 @@
|
|||||||
[submodule "third_party/googletest"]
|
[submodule "third_party/Catch2"]
|
||||||
path = third_party/googletest
|
path = third_party/Catch2
|
||||||
url = https://github.com/google/googletest.git
|
url = https://github.com/catchorg/Catch2.git
|
||||||
|
|||||||
43
.travis.yml
Normal file
43
.travis.yml
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
language: cpp
|
||||||
|
|
||||||
|
sudo: false
|
||||||
|
|
||||||
|
compiler:
|
||||||
|
- gcc
|
||||||
|
- clang
|
||||||
|
|
||||||
|
env:
|
||||||
|
- RDKAFKA_VERSION=v0.11.0
|
||||||
|
|
||||||
|
os:
|
||||||
|
- linux
|
||||||
|
|
||||||
|
addons:
|
||||||
|
apt:
|
||||||
|
packages:
|
||||||
|
- libboost-dev
|
||||||
|
- libboost-program-options-dev
|
||||||
|
- zookeeper
|
||||||
|
- zookeeperd
|
||||||
|
|
||||||
|
before_script:
|
||||||
|
- service zookeeper start
|
||||||
|
- KAFKA_VERSION=2.11-1.0.0
|
||||||
|
- wget http://apache.cs.utah.edu/kafka/1.0.0/kafka_$KAFKA_VERSION.tgz
|
||||||
|
- tar xvzf kafka_$KAFKA_VERSION.tgz
|
||||||
|
- ./kafka_$KAFKA_VERSION/bin/kafka-server-start.sh ./kafka_$KAFKA_VERSION/config/server.properties > /dev/null 2> /dev/null &
|
||||||
|
- git clone https://github.com/edenhill/librdkafka.git
|
||||||
|
- while ! echo "asd" | nc localhost 9092; do sleep 1; done
|
||||||
|
- ./kafka_$KAFKA_VERSION/bin/kafka-topics.sh --create --zookeeper localhost:2181 --topic cppkafka_test1 --partitions 3 --replication-factor 1
|
||||||
|
- ./kafka_$KAFKA_VERSION/bin/kafka-topics.sh --create --zookeeper localhost:2181 --topic cppkafka_test2 --partitions 3 --replication-factor 1
|
||||||
|
|
||||||
|
script:
|
||||||
|
- cd librdkafka
|
||||||
|
- git checkout $RDKAFKA_VERSION
|
||||||
|
- ./configure --prefix=./install && make libs && make install
|
||||||
|
- cd ..
|
||||||
|
- mkdir build && cd build
|
||||||
|
- cmake .. -DRDKAFKA_ROOT_DIR=../librdkafka/install/ -DKAFKA_TEST_INSTANCE=localhost:9092
|
||||||
|
- make examples
|
||||||
|
- make tests
|
||||||
|
- ./tests/cppkafka_tests
|
||||||
@@ -3,8 +3,9 @@ project(cppkafka)
|
|||||||
|
|
||||||
# Set the version number.
|
# Set the version number.
|
||||||
set(CPPKAFKA_VERSION_MAJOR 0)
|
set(CPPKAFKA_VERSION_MAJOR 0)
|
||||||
set(CPPKAFKA_VERSION_MINOR 1)
|
set(CPPKAFKA_VERSION_MINOR 2)
|
||||||
set(CPPKAFKA_VERSION "${CPPKAFKA_VERSION_MAJOR}.${CPPKAFKA_VERSION_MINOR}")
|
set(CPPKAFKA_VERSION "${CPPKAFKA_VERSION_MAJOR}.${CPPKAFKA_VERSION_MINOR}")
|
||||||
|
set(RDKAFKA_MIN_VERSION 0x00090400)
|
||||||
|
|
||||||
if(MSVC)
|
if(MSVC)
|
||||||
# Don't always use Wall, since VC's /Wall is ridiculously verbose.
|
# Don't always use Wall, since VC's /Wall is ridiculously verbose.
|
||||||
@@ -14,6 +15,7 @@ if(MSVC)
|
|||||||
add_definitions("-D_CRT_SECURE_NO_WARNINGS=1")
|
add_definitions("-D_CRT_SECURE_NO_WARNINGS=1")
|
||||||
add_definitions("-D_SCL_SECURE_NO_WARNINGS=1")
|
add_definitions("-D_SCL_SECURE_NO_WARNINGS=1")
|
||||||
add_definitions("-DNOGDI=1")
|
add_definitions("-DNOGDI=1")
|
||||||
|
add_definitions("-DNOMINMAX=1")
|
||||||
else()
|
else()
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall")
|
||||||
endif()
|
endif()
|
||||||
@@ -24,7 +26,19 @@ set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/lib)
|
|||||||
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/lib)
|
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/lib)
|
||||||
|
|
||||||
# Build output checks
|
# Build output checks
|
||||||
|
option(CPPKAFKA_CMAKE_VERBOSE "Generate verbose output." OFF)
|
||||||
option(CPPKAFKA_BUILD_SHARED "Build cppkafka as a shared library." ON)
|
option(CPPKAFKA_BUILD_SHARED "Build cppkafka as a shared library." ON)
|
||||||
|
option(CPPKAFKA_DISABLE_TESTS "Disable build of cppkafka tests." OFF)
|
||||||
|
option(CPPKAFKA_DISABLE_EXAMPLES "Disable build of cppkafka examples." OFF)
|
||||||
|
option(CPPKAFKA_BOOST_STATIC_LIBS "Link with Boost static libraries." ON)
|
||||||
|
option(CPPKAFKA_BOOST_USE_MULTITHREADED "Use Boost multithreaded libraries." ON)
|
||||||
|
option(CPPKAFKA_RDKAFKA_STATIC_LIB "Link with Rdkafka static library." OFF)
|
||||||
|
|
||||||
|
# Disable output from find_package macro
|
||||||
|
if (NOT CPPKAFKA_CMAKE_VERBOSE)
|
||||||
|
set(FIND_PACKAGE_QUIET QUIET)
|
||||||
|
endif()
|
||||||
|
|
||||||
if(CPPKAFKA_BUILD_SHARED)
|
if(CPPKAFKA_BUILD_SHARED)
|
||||||
message(STATUS "Build will generate a shared library. "
|
message(STATUS "Build will generate a shared library. "
|
||||||
"Use CPPKAFKA_BUILD_SHARED=0 to perform a static build")
|
"Use CPPKAFKA_BUILD_SHARED=0 to perform a static build")
|
||||||
@@ -35,17 +49,41 @@ else()
|
|||||||
add_definitions("-DCPPKAFKA_STATIC=1")
|
add_definitions("-DCPPKAFKA_STATIC=1")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if (CPPKAFKA_RDKAFKA_STATIC_LIB)
|
||||||
|
add_definitions("-DLIBRDKAFKA_STATICLIB")
|
||||||
|
endif()
|
||||||
|
|
||||||
# Look for Boost (just need boost.optional headers here)
|
# Look for Boost (just need boost.optional headers here)
|
||||||
find_package(Boost REQUIRED)
|
find_package(Boost REQUIRED ${FIND_PACKAGE_QUIET})
|
||||||
find_package(RdKafka REQUIRED)
|
find_package(RdKafka REQUIRED ${FIND_PACKAGE_QUIET})
|
||||||
|
|
||||||
|
if (Boost_FOUND)
|
||||||
|
find_package(Boost COMPONENTS program_options ${FIND_PACKAGE_QUIET})
|
||||||
|
set(Boost_USE_STATIC_LIBS ${CPPKAFKA_BOOST_STATIC_LIBS})
|
||||||
|
set(Boost_USE_MULTITHREADED ${CPPKAFKA_BOOST_USE_MULTITHREADED})
|
||||||
|
include_directories(${Boost_INCLUDE_DIRS})
|
||||||
|
link_directories(${Boost_LIBRARY_DIRS})
|
||||||
|
if (CPPKAFKA_CMAKE_VERBOSE)
|
||||||
|
message(STATUS "Boost include dir: ${Boost_INCLUDE_DIRS}")
|
||||||
|
message(STATUS "Boost library dir: ${Boost_LIBRARY_DIRS}")
|
||||||
|
message(STATUS "Boost use static libs: ${Boost_USE_STATIC_LIBS}")
|
||||||
|
message(STATUS "Boost is multi-threaded: ${CPPKAFKA_BOOST_USE_MULTITHREADED}")
|
||||||
|
message(STATUS "Boost libraries: ${Boost_LIBRARIES}")
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
|
||||||
add_subdirectory(src)
|
add_subdirectory(src)
|
||||||
add_subdirectory(include)
|
add_subdirectory(include)
|
||||||
|
|
||||||
add_subdirectory(examples)
|
# Examples target
|
||||||
|
if (NOT CPPKAFKA_DISABLE_EXAMPLES AND Boost_PROGRAM_OPTIONS_FOUND)
|
||||||
|
add_subdirectory(examples)
|
||||||
|
else()
|
||||||
|
message(STATUS "Disabling examples")
|
||||||
|
endif()
|
||||||
|
|
||||||
# Add a target to generate API documentation using Doxygen
|
# Add a target to generate API documentation using Doxygen
|
||||||
find_package(Doxygen QUIET)
|
find_package(Doxygen ${FIND_PACKAGE_QUIET})
|
||||||
if(DOXYGEN_FOUND)
|
if(DOXYGEN_FOUND)
|
||||||
configure_file(
|
configure_file(
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/docs/Doxyfile.in
|
${CMAKE_CURRENT_SOURCE_DIR}/docs/Doxyfile.in
|
||||||
@@ -60,39 +98,28 @@ if(DOXYGEN_FOUND)
|
|||||||
)
|
)
|
||||||
endif(DOXYGEN_FOUND)
|
endif(DOXYGEN_FOUND)
|
||||||
|
|
||||||
set(GOOGLETEST_ROOT ${CMAKE_SOURCE_DIR}/third_party/googletest)
|
if(NOT CPPKAFKA_DISABLE_TESTS)
|
||||||
if(EXISTS "${GOOGLETEST_ROOT}/CMakeLists.txt")
|
set(CATCH_ROOT ${CMAKE_SOURCE_DIR}/third_party/Catch2)
|
||||||
set(GOOGLETEST_INCLUDE ${GOOGLETEST_ROOT}/googletest/include)
|
if(EXISTS ${CATCH_ROOT}/CMakeLists.txt)
|
||||||
set(GOOGLETEST_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/googletest)
|
set(CATCH_INCLUDE ${CATCH_ROOT}/single_include)
|
||||||
set(GOOGLETEST_LIBRARY ${GOOGLETEST_BINARY_DIR}/googletest)
|
enable_testing()
|
||||||
|
add_subdirectory(tests)
|
||||||
include(ExternalProject)
|
else()
|
||||||
|
message(STATUS "Disabling tests because submodule Catch2 isn't checked out")
|
||||||
ExternalProject_Add(
|
endif()
|
||||||
googletest
|
|
||||||
DOWNLOAD_COMMAND ""
|
|
||||||
SOURCE_DIR ${GOOGLETEST_ROOT}
|
|
||||||
BINARY_DIR ${GOOGLETEST_BINARY_DIR}
|
|
||||||
CMAKE_CACHE_ARGS "-DBUILD_GTEST:bool=ON" "-DBUILD_GMOCK:bool=OFF"
|
|
||||||
"-Dgtest_force_shared_crt:bool=ON"
|
|
||||||
INSTALL_COMMAND ""
|
|
||||||
)
|
|
||||||
|
|
||||||
enable_testing()
|
|
||||||
add_subdirectory(tests)
|
|
||||||
# Make sure we build googletest before anything else
|
|
||||||
add_dependencies(cppkafka googletest)
|
|
||||||
else()
|
else()
|
||||||
message(STATUS "Disabling tests")
|
message(STATUS "Disabling tests")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# Confiugure the uninstall script
|
if(NOT TARGET uninstall)
|
||||||
configure_file(
|
# Confiugure the uninstall script
|
||||||
"${CMAKE_CURRENT_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in"
|
configure_file(
|
||||||
"${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake"
|
"${CMAKE_CURRENT_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in"
|
||||||
IMMEDIATE @ONLY
|
"${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake"
|
||||||
)
|
IMMEDIATE @ONLY
|
||||||
|
)
|
||||||
|
|
||||||
# Add uninstall target
|
# Add uninstall target
|
||||||
add_custom_target(uninstall
|
add_custom_target(uninstall
|
||||||
COMMAND ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake)
|
COMMAND ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake)
|
||||||
|
endif()
|
||||||
|
|||||||
57
README.md
57
README.md
@@ -1,7 +1,11 @@
|
|||||||
# cppkafka
|
# cppkafka: high level C++ wrapper for _rdkafka_
|
||||||
---
|
|
||||||
High level C++ wrapper for _rdkafka_
|
[](https://travis-ci.org/mfontanini/cppkafka)
|
||||||
---
|
|
||||||
|
_cppkafka_ allows C++ applications to consume and produce messages using the Apache Kafka
|
||||||
|
protocol. The library is built on top of [_librdkafka_](https://github.com/edenhill/librdkafka),
|
||||||
|
and provides a high level API that uses modern C++ features to make it easier to write code
|
||||||
|
while keeping the wrapper's performance overhead to a minimum.
|
||||||
|
|
||||||
# Features
|
# Features
|
||||||
|
|
||||||
@@ -9,7 +13,7 @@ High level C++ wrapper for _rdkafka_
|
|||||||
simple, less error prone way.
|
simple, less error prone way.
|
||||||
|
|
||||||
* _cppkafka_ provides an API to produce messages as well as consuming messages, but the latter is
|
* _cppkafka_ provides an API to produce messages as well as consuming messages, but the latter is
|
||||||
only supported via the high level consumer API. _cppkafka_ requires **rdkakfa >= 0.9.4** in
|
only supported via the high level consumer API. _cppkafka_ requires **rdkafka >= 0.9.4** in
|
||||||
order to use it. Other wrapped functionalities are also provided, like fetching metadata,
|
order to use it. Other wrapped functionalities are also provided, like fetching metadata,
|
||||||
offsets, etc.
|
offsets, etc.
|
||||||
|
|
||||||
@@ -22,7 +26,7 @@ _cppkafka_'s API is simple to use. For example, this code creates a producer tha
|
|||||||
into some partition:
|
into some partition:
|
||||||
|
|
||||||
```c++
|
```c++
|
||||||
#include <cppkafka/producer.h>
|
#include <cppkafka/cppkafka.h>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
using namespace cppkafka;
|
using namespace cppkafka;
|
||||||
@@ -39,6 +43,7 @@ int main() {
|
|||||||
// Produce a message!
|
// Produce a message!
|
||||||
string message = "hey there!";
|
string message = "hey there!";
|
||||||
producer.produce(MessageBuilder("my_topic").partition(0).payload(message));
|
producer.produce(MessageBuilder("my_topic").partition(0).payload(message));
|
||||||
|
producer.flush();
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -50,38 +55,47 @@ In order to compile _cppkafka_ you need:
|
|||||||
* _CMake_
|
* _CMake_
|
||||||
* A compiler with good C++11 support (e.g. gcc >= 4.8). This was tested successfully on
|
* A compiler with good C++11 support (e.g. gcc >= 4.8). This was tested successfully on
|
||||||
_g++ 4.8.3_.
|
_g++ 4.8.3_.
|
||||||
* The boost library. _cppkafka_ only requires boost.optional, which is a header only library,
|
* The boost library.
|
||||||
so this doesn't add any additional runtime dependencies.
|
|
||||||
|
|
||||||
Now, in order to build, just run:
|
Now, in order to build, just run:
|
||||||
|
|
||||||
```Shell
|
```Shell
|
||||||
mkdir build
|
mkdir build
|
||||||
cd build
|
cd build
|
||||||
cmake ..
|
cmake <OPTIONS> ..
|
||||||
make
|
make
|
||||||
```
|
```
|
||||||
|
|
||||||
## CMake options
|
## CMake options
|
||||||
|
|
||||||
If you have installed _librdkafka_ on a non standard directory, you can use the
|
The following cmake options can be specified:
|
||||||
`RDKAFKA_ROOT_DIR` cmake parameter when configuring the project:
|
* `RDKAFKA_ROOT_DIR` : Specify a different librdkafka install directory.
|
||||||
|
* `BOOST_ROOT` : Specify a different Boost install directory.
|
||||||
|
* `CPPKAFKA_CMAKE_VERBOSE` : Generate verbose output. Default is `OFF`.
|
||||||
|
* `CPPKAFKA_BUILD_SHARED` : Build cppkafka as a shared library. Default is `ON`.
|
||||||
|
* `CPPKAFKA_DISABLE_TESTS` : Disable build of cppkafka tests. Default is `OFF`.
|
||||||
|
* `CPPKAFKA_DISABLE_EXAMPLES` : Disable build of cppkafka examples. Default is `OFF`.
|
||||||
|
* `CPPKAFKA_BOOST_STATIC_LIBS` : Link with Boost static libraries. Default is `ON`.
|
||||||
|
* `CPPKAFKA_BOOST_USE_MULTITHREADED` : Use Boost multi-threaded libraries. Default is `ON`.
|
||||||
|
* `CPPKAFKA_RDKAFKA_STATIC_LIB` : Link to Rdkafka static library. Default is `OFF`.
|
||||||
|
|
||||||
|
Example:
|
||||||
```Shell
|
```Shell
|
||||||
cmake .. -DRDKAFKA_ROOT_DIR=/some/other/dir
|
cmake -DRDKAFKA_ROOT_DIR=/some/other/dir -DCPPKAFKA_BUILD_SHARED=OFF ...
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that finding _librdkafka_ will succeed iff there's an _include_ and _lib_
|
The `RDKAFKA_ROOT_DIR` must contain the following structure. If the system
|
||||||
directories inside the specified path, including both the _rdkafka.h_ header
|
architecture is 64-bit and both `lib` and `lib64` folders are available, the `lib64`
|
||||||
and the _librdkafka_ library file.
|
folder location will be selected by cmake.
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
By default, a shared library will be built. If you want to perform a static build,
|
|
||||||
use the _CPPKAFKA_BUILD_SHARED_ parameter:
|
|
||||||
|
|
||||||
```Shell
|
```Shell
|
||||||
cmake .. -DCPPKAFKA_BUILD_SHARED=0
|
${RDKAFKA_ROOT_DIR}/
|
||||||
|
|
|
||||||
|
+ include/librdkafka/rdkafka.h
|
||||||
|
|
|
||||||
|
+ lib/librdkafka.a
|
||||||
|
|
|
||||||
|
+ lib64/librdkafka.a (optional)
|
||||||
```
|
```
|
||||||
|
|
||||||
# Using
|
# Using
|
||||||
@@ -99,4 +113,3 @@ _Doxygen_ to be installed. The documentation will be written in html format at
|
|||||||
|
|
||||||
Make sure to check the [wiki](https://github.com/mfontanini/cppkafka/wiki) which includes
|
Make sure to check the [wiki](https://github.com/mfontanini/cppkafka/wiki) which includes
|
||||||
some documentation about the project and some of its features.
|
some documentation about the project and some of its features.
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +1,18 @@
|
|||||||
|
# Override default CMAKE_FIND_LIBRARY_SUFFIXES
|
||||||
|
if (CPPKAFKA_RDKAFKA_STATIC_LIB)
|
||||||
|
if (MSVC)
|
||||||
|
set(RDKAFKA_SUFFIX lib)
|
||||||
|
else()
|
||||||
|
set(RDKAFKA_SUFFIX a)
|
||||||
|
endif()
|
||||||
|
else()
|
||||||
|
if (MSVC)
|
||||||
|
set(RDKAFKA_SUFFIX dll)
|
||||||
|
else()
|
||||||
|
set(RDKAFKA_SUFFIX so)
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
|
||||||
find_path(RDKAFKA_ROOT_DIR
|
find_path(RDKAFKA_ROOT_DIR
|
||||||
NAMES include/librdkafka/rdkafka.h
|
NAMES include/librdkafka/rdkafka.h
|
||||||
)
|
)
|
||||||
@@ -7,11 +22,17 @@ find_path(RDKAFKA_INCLUDE_DIR
|
|||||||
HINTS ${RDKAFKA_ROOT_DIR}/include
|
HINTS ${RDKAFKA_ROOT_DIR}/include
|
||||||
)
|
)
|
||||||
|
|
||||||
set(HINT_DIR ${RDKAFKA_ROOT_DIR}/lib)
|
# Check lib paths
|
||||||
|
if (CPPKAFKA_CMAKE_VERBOSE)
|
||||||
|
get_property(FIND_LIBRARY_32 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB32_PATHS)
|
||||||
|
get_property(FIND_LIBRARY_64 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB64_PATHS)
|
||||||
|
MESSAGE(STATUS "RDKAFKA search 32-bit library paths: ${FIND_LIBRARY_32}")
|
||||||
|
MESSAGE(STATUS "RDKAFKA search 64-bit library paths: ${FIND_LIBRARY_64}")
|
||||||
|
endif()
|
||||||
|
|
||||||
find_library(RDKAFKA_LIBRARY
|
find_library(RDKAFKA_LIBRARY
|
||||||
NAMES rdkafka librdkafka
|
NAMES rdkafka.${RDKAFKA_SUFFIX} librdkafka.${RDKAFKA_SUFFIX} rdkafka
|
||||||
HINTS ${HINT_DIR}
|
HINTS ${RDKAFKA_ROOT_DIR}/lib
|
||||||
)
|
)
|
||||||
|
|
||||||
include(FindPackageHandleStandardArgs)
|
include(FindPackageHandleStandardArgs)
|
||||||
@@ -20,7 +41,7 @@ find_package_handle_standard_args(RDKAFKA DEFAULT_MSG
|
|||||||
RDKAFKA_INCLUDE_DIR
|
RDKAFKA_INCLUDE_DIR
|
||||||
)
|
)
|
||||||
|
|
||||||
set(CONTENTS "#include <librdkafka/rdkafka.h>\n #if RD_KAFKA_VERSION >= 0x00090400\n int main() { }\n #endif")
|
set(CONTENTS "#include <librdkafka/rdkafka.h>\n #if RD_KAFKA_VERSION >= ${RDKAFKA_MIN_VERSION}\n int main() { }\n #endif")
|
||||||
set(FILE_NAME ${CMAKE_CURRENT_BINARY_DIR}/rdkafka_version_test.c)
|
set(FILE_NAME ${CMAKE_CURRENT_BINARY_DIR}/rdkafka_version_test.c)
|
||||||
file(WRITE ${FILE_NAME} ${CONTENTS})
|
file(WRITE ${FILE_NAME} ${CONTENTS})
|
||||||
|
|
||||||
|
|||||||
34
cppkafka.h.in
Normal file
34
cppkafka.h.in
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2017, Matias Fontanini
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CPPKAFKA_H
|
||||||
|
#define CPPKAFKA_H
|
||||||
|
|
||||||
|
@CPPKAFKA_HEADERS@
|
||||||
|
#endif
|
||||||
@@ -1,21 +1,17 @@
|
|||||||
find_package(Boost COMPONENTS program_options)
|
link_libraries(cppkafka ${RDKAFKA_LIBRARY} ${Boost_LIBRARIES} pthread rt ssl crypto dl z)
|
||||||
|
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include)
|
||||||
|
include_directories(SYSTEM ${RDKAFKA_INCLUDE_DIR})
|
||||||
|
|
||||||
if (Boost_PROGRAM_OPTIONS_FOUND)
|
add_custom_target(examples)
|
||||||
link_libraries(${Boost_LIBRARIES} cppkafka ${RDKAFKA_LIBRARY})
|
macro(create_example example_name)
|
||||||
|
string(REPLACE "_" "-" sanitized_name ${example_name})
|
||||||
|
add_executable(${sanitized_name} EXCLUDE_FROM_ALL "${example_name}_example.cpp")
|
||||||
|
add_dependencies(examples ${sanitized_name})
|
||||||
|
endmacro()
|
||||||
|
|
||||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include)
|
create_example(producer)
|
||||||
include_directories(SYSTEM ${Boost_INCLUDE_DIRS} ${RDKAFKA_INCLUDE_DIR})
|
create_example(buffered_producer)
|
||||||
|
create_example(consumer)
|
||||||
add_custom_target(examples)
|
create_example(consumer_dispatcher)
|
||||||
macro(create_example example_name)
|
create_example(metadata)
|
||||||
add_executable(${example_name} EXCLUDE_FROM_ALL "${example_name}.cpp")
|
create_example(consumers_information)
|
||||||
add_dependencies(examples ${example_name})
|
|
||||||
endmacro()
|
|
||||||
|
|
||||||
create_example(kafka_producer)
|
|
||||||
create_example(kafka_consumer)
|
|
||||||
create_example(metadata)
|
|
||||||
create_example(consumers_information)
|
|
||||||
else()
|
|
||||||
message(STATUS "Disabling examples since boost.program_options was not found")
|
|
||||||
endif()
|
|
||||||
|
|||||||
96
examples/buffered_producer_example.cpp
Normal file
96
examples/buffered_producer_example.cpp
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
#include <stdexcept>
|
||||||
|
#include <iostream>
|
||||||
|
#include <boost/program_options.hpp>
|
||||||
|
#include "cppkafka/utils/buffered_producer.h"
|
||||||
|
#include "cppkafka/configuration.h"
|
||||||
|
|
||||||
|
using std::string;
|
||||||
|
using std::exception;
|
||||||
|
using std::getline;
|
||||||
|
using std::cin;
|
||||||
|
using std::cout;
|
||||||
|
using std::endl;
|
||||||
|
|
||||||
|
using cppkafka::BufferedProducer;
|
||||||
|
using cppkafka::Configuration;
|
||||||
|
using cppkafka::Topic;
|
||||||
|
using cppkafka::MessageBuilder;
|
||||||
|
using cppkafka::Message;
|
||||||
|
|
||||||
|
namespace po = boost::program_options;
|
||||||
|
|
||||||
|
int main(int argc, char* argv[]) {
|
||||||
|
string brokers;
|
||||||
|
string topic_name;
|
||||||
|
int partition_value = -1;
|
||||||
|
|
||||||
|
po::options_description options("Options");
|
||||||
|
options.add_options()
|
||||||
|
("help,h", "produce this help message")
|
||||||
|
("brokers,b", po::value<string>(&brokers)->required(),
|
||||||
|
"the kafka broker list")
|
||||||
|
("topic,t", po::value<string>(&topic_name)->required(),
|
||||||
|
"the topic in which to write to")
|
||||||
|
("partition,p", po::value<int>(&partition_value),
|
||||||
|
"the partition to write into (unassigned if not provided)")
|
||||||
|
;
|
||||||
|
|
||||||
|
po::variables_map vm;
|
||||||
|
|
||||||
|
try {
|
||||||
|
po::store(po::command_line_parser(argc, argv).options(options).run(), vm);
|
||||||
|
po::notify(vm);
|
||||||
|
}
|
||||||
|
catch (exception& ex) {
|
||||||
|
cout << "Error parsing options: " << ex.what() << endl;
|
||||||
|
cout << endl;
|
||||||
|
cout << options << endl;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a message builder for this topic
|
||||||
|
MessageBuilder builder(topic_name);
|
||||||
|
|
||||||
|
// Get the partition we want to write to. If no partition is provided, this will be
|
||||||
|
// an unassigned one
|
||||||
|
if (partition_value != -1) {
|
||||||
|
builder.partition(partition_value);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct the configuration
|
||||||
|
Configuration config = {
|
||||||
|
{ "metadata.broker.list", brokers }
|
||||||
|
};
|
||||||
|
|
||||||
|
// Create the producer
|
||||||
|
BufferedProducer<string> producer(config);
|
||||||
|
|
||||||
|
// Set a produce success callback
|
||||||
|
producer.set_produce_success_callback([](const Message& msg) {
|
||||||
|
cout << "Successfully produced message with payload " << msg.get_payload() << endl;
|
||||||
|
});
|
||||||
|
// Set a produce failure callback
|
||||||
|
producer.set_produce_failure_callback([](const Message& msg) {
|
||||||
|
cout << "Failed to produce message with payload " << msg.get_payload() << endl;
|
||||||
|
// Return false so we stop trying to produce this message
|
||||||
|
return false;
|
||||||
|
});
|
||||||
|
|
||||||
|
cout << "Producing messages into topic " << topic_name << endl;
|
||||||
|
|
||||||
|
// Now read lines and write them into kafka
|
||||||
|
string line;
|
||||||
|
while (getline(cin, line)) {
|
||||||
|
// Set the payload on this builder
|
||||||
|
builder.payload(line);
|
||||||
|
|
||||||
|
// Add the message we've built to the buffered producer
|
||||||
|
producer.add_message(builder);
|
||||||
|
|
||||||
|
// Now flush so we:
|
||||||
|
// * emit the buffered message
|
||||||
|
// * poll the producer so we dispatch on delivery report callbacks and
|
||||||
|
// therefore get the produce failure/success callbacks
|
||||||
|
producer.flush();
|
||||||
|
}
|
||||||
|
}
|
||||||
121
examples/consumer_dispatcher_example.cpp
Normal file
121
examples/consumer_dispatcher_example.cpp
Normal file
@@ -0,0 +1,121 @@
|
|||||||
|
#include <stdexcept>
|
||||||
|
#include <iostream>
|
||||||
|
#include <csignal>
|
||||||
|
#include <boost/program_options.hpp>
|
||||||
|
#include "cppkafka/consumer.h"
|
||||||
|
#include "cppkafka/configuration.h"
|
||||||
|
#include "cppkafka/utils/consumer_dispatcher.h"
|
||||||
|
|
||||||
|
using std::string;
|
||||||
|
using std::exception;
|
||||||
|
using std::cout;
|
||||||
|
using std::endl;
|
||||||
|
using std::function;
|
||||||
|
|
||||||
|
using cppkafka::Consumer;
|
||||||
|
using cppkafka::ConsumerDispatcher;
|
||||||
|
using cppkafka::Configuration;
|
||||||
|
using cppkafka::Message;
|
||||||
|
using cppkafka::TopicPartition;
|
||||||
|
using cppkafka::TopicPartitionList;
|
||||||
|
using cppkafka::Error;
|
||||||
|
|
||||||
|
namespace po = boost::program_options;
|
||||||
|
|
||||||
|
function<void()> on_signal;
|
||||||
|
|
||||||
|
void signal_handler(int) {
|
||||||
|
on_signal();
|
||||||
|
}
|
||||||
|
|
||||||
|
// This example uses ConsumerDispatcher, a simple synchronous wrapper over a Consumer
|
||||||
|
// to allow processing messages using pattern matching rather than writing a loop
|
||||||
|
// and check if there's a message, if there's an error, etc.
|
||||||
|
int main(int argc, char* argv[]) {
|
||||||
|
string brokers;
|
||||||
|
string topic_name;
|
||||||
|
string group_id;
|
||||||
|
|
||||||
|
po::options_description options("Options");
|
||||||
|
options.add_options()
|
||||||
|
("help,h", "produce this help message")
|
||||||
|
("brokers,b", po::value<string>(&brokers)->required(),
|
||||||
|
"the kafka broker list")
|
||||||
|
("topic,t", po::value<string>(&topic_name)->required(),
|
||||||
|
"the topic in which to write to")
|
||||||
|
("group-id,g", po::value<string>(&group_id)->required(),
|
||||||
|
"the consumer group id")
|
||||||
|
;
|
||||||
|
|
||||||
|
po::variables_map vm;
|
||||||
|
|
||||||
|
try {
|
||||||
|
po::store(po::command_line_parser(argc, argv).options(options).run(), vm);
|
||||||
|
po::notify(vm);
|
||||||
|
}
|
||||||
|
catch (exception& ex) {
|
||||||
|
cout << "Error parsing options: " << ex.what() << endl;
|
||||||
|
cout << endl;
|
||||||
|
cout << options << endl;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct the configuration
|
||||||
|
Configuration config = {
|
||||||
|
{ "metadata.broker.list", brokers },
|
||||||
|
{ "group.id", group_id },
|
||||||
|
// Disable auto commit
|
||||||
|
{ "enable.auto.commit", false }
|
||||||
|
};
|
||||||
|
|
||||||
|
// Create the consumer
|
||||||
|
Consumer consumer(config);
|
||||||
|
|
||||||
|
// Print the assigned partitions on assignment
|
||||||
|
consumer.set_assignment_callback([](const TopicPartitionList& partitions) {
|
||||||
|
cout << "Got assigned: " << partitions << endl;
|
||||||
|
});
|
||||||
|
|
||||||
|
// Print the revoked partitions on revocation
|
||||||
|
consumer.set_revocation_callback([](const TopicPartitionList& partitions) {
|
||||||
|
cout << "Got revoked: " << partitions << endl;
|
||||||
|
});
|
||||||
|
|
||||||
|
// Subscribe to the topic
|
||||||
|
consumer.subscribe({ topic_name });
|
||||||
|
|
||||||
|
cout << "Consuming messages from topic " << topic_name << endl;
|
||||||
|
|
||||||
|
// Create a consumer dispatcher
|
||||||
|
ConsumerDispatcher dispatcher(consumer);
|
||||||
|
|
||||||
|
// Stop processing on SIGINT
|
||||||
|
on_signal = [&]() {
|
||||||
|
dispatcher.stop();
|
||||||
|
};
|
||||||
|
signal(SIGINT, signal_handler);
|
||||||
|
|
||||||
|
// Now run the dispatcher, providing a callback to handle messages, one to handle
|
||||||
|
// errors and another one to handle EOF on a partition
|
||||||
|
dispatcher.run(
|
||||||
|
// Callback executed whenever a new message is consumed
|
||||||
|
[&](Message msg) {
|
||||||
|
// Print the key (if any)
|
||||||
|
if (msg.get_key()) {
|
||||||
|
cout << msg.get_key() << " -> ";
|
||||||
|
}
|
||||||
|
// Print the payload
|
||||||
|
cout << msg.get_payload() << endl;
|
||||||
|
// Now commit the message
|
||||||
|
consumer.commit(msg);
|
||||||
|
},
|
||||||
|
// Whenever there's an error (other than the EOF soft error)
|
||||||
|
[](Error error) {
|
||||||
|
cout << "[+] Received error notification: " << error << endl;
|
||||||
|
},
|
||||||
|
// Whenever EOF is reached on a partition, print this
|
||||||
|
[](ConsumerDispatcher::EndOfFile, const TopicPartition& topic_partition) {
|
||||||
|
cout << "Reched EOF on partition " << topic_partition << endl;
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
@@ -1,5 +1,25 @@
|
|||||||
|
# Local function to auto-generate main cppkafka.h header file
|
||||||
|
function(make_cppkafka_header)
|
||||||
|
set(CPPKAFKA_HEADER ${CMAKE_CURRENT_SOURCE_DIR}/cppkafka.h)
|
||||||
|
file(GLOB INCLUDE_HEADERS RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "*.h" "utils/*.h")
|
||||||
|
list(SORT INCLUDE_HEADERS)
|
||||||
|
foreach(header ${INCLUDE_HEADERS})
|
||||||
|
if (NOT ${header} MATCHES "cppkafka.h")
|
||||||
|
SET(CPPKAFKA_HEADERS "${CPPKAFKA_HEADERS}#include <cppkafka/${header}>\n")
|
||||||
|
endif()
|
||||||
|
endforeach()
|
||||||
|
|
||||||
|
#create file from template
|
||||||
|
configure_file(${PROJECT_SOURCE_DIR}/cppkafka.h.in ${CPPKAFKA_HEADER})
|
||||||
|
endfunction()
|
||||||
|
|
||||||
|
# Run file generation function
|
||||||
|
make_cppkafka_header()
|
||||||
|
|
||||||
|
# Install headers including the auto-generated cppkafka.h
|
||||||
file(GLOB INCLUDE_FILES "*.h")
|
file(GLOB INCLUDE_FILES "*.h")
|
||||||
file(GLOB UTILS_INCLUDE_FILES "utils/*.h")
|
file(GLOB UTILS_INCLUDE_FILES "utils/*.h")
|
||||||
|
file(GLOB DETAIL_INCLUDE_FILES "detail/*.h")
|
||||||
install(
|
install(
|
||||||
FILES ${INCLUDE_FILES}
|
FILES ${INCLUDE_FILES}
|
||||||
DESTINATION include/cppkafka
|
DESTINATION include/cppkafka
|
||||||
@@ -10,3 +30,8 @@ install(
|
|||||||
DESTINATION include/cppkafka/utils/
|
DESTINATION include/cppkafka/utils/
|
||||||
COMPONENT Headers
|
COMPONENT Headers
|
||||||
)
|
)
|
||||||
|
install(
|
||||||
|
FILES ${DETAIL_INCLUDE_FILES}
|
||||||
|
DESTINATION include/cppkafka/detail/
|
||||||
|
COMPONENT Headers
|
||||||
|
)
|
||||||
|
|||||||
@@ -35,6 +35,7 @@
|
|||||||
#include <iosfwd>
|
#include <iosfwd>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include "macros.h"
|
#include "macros.h"
|
||||||
|
#include "exceptions.h"
|
||||||
|
|
||||||
namespace cppkafka {
|
namespace cppkafka {
|
||||||
|
|
||||||
@@ -75,6 +76,9 @@ public:
|
|||||||
Buffer(const T* data, size_t size)
|
Buffer(const T* data, size_t size)
|
||||||
: data_(reinterpret_cast<const DataType*>(data)), size_(size) {
|
: data_(reinterpret_cast<const DataType*>(data)), size_(size) {
|
||||||
static_assert(sizeof(T) == sizeof(DataType), "sizeof(T) != sizeof(DataType)");
|
static_assert(sizeof(T) == sizeof(DataType), "sizeof(T) != sizeof(DataType)");
|
||||||
|
if ((data_ == nullptr) && (size_ > 0)) {
|
||||||
|
throw Exception("Invalid buffer configuration");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -152,7 +156,7 @@ public:
|
|||||||
/**
|
/**
|
||||||
* Output operator
|
* Output operator
|
||||||
*/
|
*/
|
||||||
friend std::ostream& operator<<(std::ostream& output, const Buffer& rhs);
|
CPPKAFKA_API friend std::ostream& operator<<(std::ostream& output, const Buffer& rhs);
|
||||||
private:
|
private:
|
||||||
const DataType* data_;
|
const DataType* data_;
|
||||||
size_t size_;
|
size_t size_;
|
||||||
@@ -161,12 +165,12 @@ private:
|
|||||||
/**
|
/**
|
||||||
* Compares Buffer objects for equality
|
* Compares Buffer objects for equality
|
||||||
*/
|
*/
|
||||||
bool operator==(const Buffer& lhs, const Buffer& rhs);
|
CPPKAFKA_API bool operator==(const Buffer& lhs, const Buffer& rhs);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Compares Buffer objects for inequality
|
* Compares Buffer objects for inequality
|
||||||
*/
|
*/
|
||||||
bool operator!=(const Buffer& lhs, const Buffer& rhs);
|
CPPKAFKA_API bool operator!=(const Buffer& lhs, const Buffer& rhs);
|
||||||
|
|
||||||
} // cppkafka
|
} // cppkafka
|
||||||
|
|
||||||
|
|||||||
@@ -62,19 +62,22 @@ class KafkaHandleBase;
|
|||||||
class CPPKAFKA_API Configuration : public ConfigurationBase<Configuration> {
|
class CPPKAFKA_API Configuration : public ConfigurationBase<Configuration> {
|
||||||
public:
|
public:
|
||||||
using DeliveryReportCallback = std::function<void(Producer& producer, const Message&)>;
|
using DeliveryReportCallback = std::function<void(Producer& producer, const Message&)>;
|
||||||
using OffsetCommitCallback = std::function<void(Consumer& consumer, Error,
|
using OffsetCommitCallback = std::function<void(Consumer& consumer,
|
||||||
|
Error error,
|
||||||
const TopicPartitionList& topic_partitions)>;
|
const TopicPartitionList& topic_partitions)>;
|
||||||
using ErrorCallback = std::function<void(KafkaHandleBase& handle, int error,
|
using ErrorCallback = std::function<void(KafkaHandleBase& handle,
|
||||||
|
int error,
|
||||||
const std::string& reason)>;
|
const std::string& reason)>;
|
||||||
using ThrottleCallback = std::function<void(KafkaHandleBase& handle,
|
using ThrottleCallback = std::function<void(KafkaHandleBase& handle,
|
||||||
const std::string& broker_name,
|
const std::string& broker_name,
|
||||||
int32_t broker_id,
|
int32_t broker_id,
|
||||||
std::chrono::milliseconds throttle_time)>;
|
std::chrono::milliseconds throttle_time)>;
|
||||||
using LogCallback = std::function<void(KafkaHandleBase& handle, int level,
|
using LogCallback = std::function<void(KafkaHandleBase& handle,
|
||||||
|
int level,
|
||||||
const std::string& facility,
|
const std::string& facility,
|
||||||
const std::string& message)>;
|
const std::string& message)>;
|
||||||
using StatsCallback = std::function<void(KafkaHandleBase& handle, const std::string& json)>;
|
using StatsCallback = std::function<void(KafkaHandleBase& handle, const std::string& json)>;
|
||||||
using SocketCallback = std::function<int(int domain, int type, int protoco)>;
|
using SocketCallback = std::function<int(int domain, int type, int protocol)>;
|
||||||
|
|
||||||
using ConfigurationBase<Configuration>::set;
|
using ConfigurationBase<Configuration>::set;
|
||||||
using ConfigurationBase<Configuration>::get;
|
using ConfigurationBase<Configuration>::get;
|
||||||
@@ -145,7 +148,7 @@ public:
|
|||||||
Configuration& set_default_topic_configuration(TopicConfiguration config);
|
Configuration& set_default_topic_configuration(TopicConfiguration config);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns true iff the given property name has been set
|
* Returns true if the given property name has been set
|
||||||
*/
|
*/
|
||||||
bool has_property(const std::string& name) const;
|
bool has_property(const std::string& name) const;
|
||||||
|
|
||||||
|
|||||||
@@ -32,13 +32,14 @@
|
|||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <type_traits>
|
#include <type_traits>
|
||||||
|
#include "macros.h"
|
||||||
|
|
||||||
namespace cppkafka {
|
namespace cppkafka {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Wrapper over a configuration (key, value) pair
|
* Wrapper over a configuration (key, value) pair
|
||||||
*/
|
*/
|
||||||
class ConfigurationOption {
|
class CPPKAFKA_API ConfigurationOption {
|
||||||
public:
|
public:
|
||||||
/**
|
/**
|
||||||
* Construct using a std::string value
|
* Construct using a std::string value
|
||||||
|
|||||||
@@ -35,9 +35,10 @@
|
|||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <functional>
|
#include <functional>
|
||||||
#include "kafka_handle_base.h"
|
#include "kafka_handle_base.h"
|
||||||
#include "message.h"
|
#include "queue.h"
|
||||||
#include "macros.h"
|
#include "macros.h"
|
||||||
#include "error.h"
|
#include "error.h"
|
||||||
|
#include "detail/callback_invoker.h"
|
||||||
|
|
||||||
namespace cppkafka {
|
namespace cppkafka {
|
||||||
|
|
||||||
@@ -64,14 +65,14 @@ class TopicConfiguration;
|
|||||||
* Consumer consumer(config);
|
* Consumer consumer(config);
|
||||||
*
|
*
|
||||||
* // Set the assignment callback
|
* // Set the assignment callback
|
||||||
* consumer.set_assignment_callback([&](vector<TopicPartition>& topic_partitions) {
|
* consumer.set_assignment_callback([&](TopicPartitionList& topic_partitions) {
|
||||||
* // Here you could fetch offsets and do something, altering the offsets on the
|
* // Here you could fetch offsets and do something, altering the offsets on the
|
||||||
* // topic_partitions vector if needed
|
* // topic_partitions vector if needed
|
||||||
* cout << "Got assigned " << topic_partitions.count() << " partitions!" << endl;
|
* cout << "Got assigned " << topic_partitions.size() << " partitions!" << endl;
|
||||||
* });
|
* });
|
||||||
*
|
*
|
||||||
* // Set the revocation callback
|
* // Set the revocation callback
|
||||||
* consumer.set_revocation_callback([&](const vector<TopicPartition>& topic_partitions) {
|
* consumer.set_revocation_callback([&](const TopicPartitionList& topic_partitions) {
|
||||||
* cout << topic_partitions.size() << " partitions revoked!" << endl;
|
* cout << topic_partitions.size() << " partitions revoked!" << endl;
|
||||||
* });
|
* });
|
||||||
*
|
*
|
||||||
@@ -100,6 +101,7 @@ public:
|
|||||||
using AssignmentCallback = std::function<void(TopicPartitionList&)>;
|
using AssignmentCallback = std::function<void(TopicPartitionList&)>;
|
||||||
using RevocationCallback = std::function<void(const TopicPartitionList&)>;
|
using RevocationCallback = std::function<void(const TopicPartitionList&)>;
|
||||||
using RebalanceErrorCallback = std::function<void(Error)>;
|
using RebalanceErrorCallback = std::function<void(Error)>;
|
||||||
|
using KafkaHandleBase::pause;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Creates an instance of a consumer.
|
* \brief Creates an instance of a consumer.
|
||||||
@@ -116,7 +118,7 @@ public:
|
|||||||
Consumer& operator=(Consumer&&) = delete;
|
Consumer& operator=(Consumer&&) = delete;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Closes and estroys the rdkafka handle
|
* \brief Closes and destroys the rdkafka handle
|
||||||
*
|
*
|
||||||
* This will call Consumer::close before destroying the handle
|
* This will call Consumer::close before destroying the handle
|
||||||
*/
|
*/
|
||||||
@@ -126,7 +128,7 @@ public:
|
|||||||
* \brief Sets the topic/partition assignment callback
|
* \brief Sets the topic/partition assignment callback
|
||||||
*
|
*
|
||||||
* The Consumer class will use rd_kafka_conf_set_rebalance_cb and will handle the
|
* The Consumer class will use rd_kafka_conf_set_rebalance_cb and will handle the
|
||||||
* rebalance, converting from rdkafka topic partition list handles into vector<TopicPartition>
|
* rebalance, converting from rdkafka topic partition list handles into TopicPartitionList
|
||||||
* and executing the assignment/revocation/rebalance_error callbacks.
|
* and executing the assignment/revocation/rebalance_error callbacks.
|
||||||
*
|
*
|
||||||
* \note You *do not need* to call Consumer::assign with the provided topic parttitions. This
|
* \note You *do not need* to call Consumer::assign with the provided topic parttitions. This
|
||||||
@@ -140,7 +142,7 @@ public:
|
|||||||
* \brief Sets the topic/partition revocation callback
|
* \brief Sets the topic/partition revocation callback
|
||||||
*
|
*
|
||||||
* The Consumer class will use rd_kafka_conf_set_rebalance_cb and will handle the
|
* The Consumer class will use rd_kafka_conf_set_rebalance_cb and will handle the
|
||||||
* rebalance, converting from rdkafka topic partition list handles into vector<TopicPartition>
|
* rebalance, converting from rdkafka topic partition list handles into TopicPartitionList
|
||||||
* and executing the assignment/revocation/rebalance_error callbacks.
|
* and executing the assignment/revocation/rebalance_error callbacks.
|
||||||
*
|
*
|
||||||
* \note You *do not need* to call Consumer::assign with an empty topic partition list or
|
* \note You *do not need* to call Consumer::assign with an empty topic partition list or
|
||||||
@@ -155,7 +157,7 @@ public:
|
|||||||
* \brief Sets the rebalance error callback
|
* \brief Sets the rebalance error callback
|
||||||
*
|
*
|
||||||
* The Consumer class will use rd_kafka_conf_set_rebalance_cb and will handle the
|
* The Consumer class will use rd_kafka_conf_set_rebalance_cb and will handle the
|
||||||
* rebalance, converting from rdkafka topic partition list handles into vector<TopicPartition>
|
* rebalance, converting from rdkafka topic partition list handles into TopicPartitionList
|
||||||
* and executing the assignment/revocation/rebalance_error callbacks.
|
* and executing the assignment/revocation/rebalance_error callbacks.
|
||||||
*
|
*
|
||||||
* \param callback The rebalance error callback
|
* \param callback The rebalance error callback
|
||||||
@@ -193,6 +195,34 @@ public:
|
|||||||
*/
|
*/
|
||||||
void unassign();
|
void unassign();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Pauses all consumption
|
||||||
|
*/
|
||||||
|
void pause();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Resumes all consumption
|
||||||
|
*/
|
||||||
|
void resume();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Commits the current partition assignment
|
||||||
|
*
|
||||||
|
* This translates into a call to rd_kafka_commit with a null partition list.
|
||||||
|
*
|
||||||
|
* \remark This function is equivalent to calling commit(get_assignment())
|
||||||
|
*/
|
||||||
|
void commit();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Commits the current partition assignment asynchronously
|
||||||
|
*
|
||||||
|
* This translates into a call to rd_kafka_commit with a null partition list.
|
||||||
|
*
|
||||||
|
* \remark This function is equivalent to calling async_commit(get_assignment())
|
||||||
|
*/
|
||||||
|
void async_commit();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Commits the given message synchronously
|
* \brief Commits the given message synchronously
|
||||||
*
|
*
|
||||||
@@ -235,6 +265,8 @@ public:
|
|||||||
* This translates into a call to rd_kafka_get_watermark_offsets
|
* This translates into a call to rd_kafka_get_watermark_offsets
|
||||||
*
|
*
|
||||||
* \param topic_partition The topic/partition to get the offsets from
|
* \param topic_partition The topic/partition to get the offsets from
|
||||||
|
*
|
||||||
|
* \return A pair of offsets {low, high}
|
||||||
*/
|
*/
|
||||||
OffsetTuple get_offsets(const TopicPartition& topic_partition) const;
|
OffsetTuple get_offsets(const TopicPartition& topic_partition) const;
|
||||||
|
|
||||||
@@ -244,6 +276,8 @@ public:
|
|||||||
* This translates into a call to rd_kafka_committed
|
* This translates into a call to rd_kafka_committed
|
||||||
*
|
*
|
||||||
* \param topic_partitions The topic/partition list to be queried
|
* \param topic_partitions The topic/partition list to be queried
|
||||||
|
*
|
||||||
|
* \return The topic partition list
|
||||||
*/
|
*/
|
||||||
TopicPartitionList get_offsets_committed(const TopicPartitionList& topic_partitions) const;
|
TopicPartitionList get_offsets_committed(const TopicPartitionList& topic_partitions) const;
|
||||||
|
|
||||||
@@ -253,6 +287,8 @@ public:
|
|||||||
* This translates into a call to rd_kafka_position
|
* This translates into a call to rd_kafka_position
|
||||||
*
|
*
|
||||||
* \param topic_partitions The topic/partition list to be queried
|
* \param topic_partitions The topic/partition list to be queried
|
||||||
|
*
|
||||||
|
* \return The topic partition list
|
||||||
*/
|
*/
|
||||||
TopicPartitionList get_offsets_position(const TopicPartitionList& topic_partitions) const;
|
TopicPartitionList get_offsets_position(const TopicPartitionList& topic_partitions) const;
|
||||||
|
|
||||||
@@ -267,6 +303,8 @@ public:
|
|||||||
* \brief Gets the current topic/partition list assignment
|
* \brief Gets the current topic/partition list assignment
|
||||||
*
|
*
|
||||||
* This translates to a call to rd_kafka_assignment
|
* This translates to a call to rd_kafka_assignment
|
||||||
|
*
|
||||||
|
* \return The topic partition list
|
||||||
*/
|
*/
|
||||||
TopicPartitionList get_assignment() const;
|
TopicPartitionList get_assignment() const;
|
||||||
|
|
||||||
@@ -274,21 +312,29 @@ public:
|
|||||||
* \brief Gets the group member id
|
* \brief Gets the group member id
|
||||||
*
|
*
|
||||||
* This translates to a call to rd_kafka_memberid
|
* This translates to a call to rd_kafka_memberid
|
||||||
|
*
|
||||||
|
* \return The id
|
||||||
*/
|
*/
|
||||||
std::string get_member_id() const;
|
std::string get_member_id() const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the partition assignment callback.
|
* \brief Gets the partition assignment callback.
|
||||||
|
*
|
||||||
|
* \return The callback reference
|
||||||
*/
|
*/
|
||||||
const AssignmentCallback& get_assignment_callback() const;
|
const AssignmentCallback& get_assignment_callback() const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the partition revocation callback.
|
* \brief Gets the partition revocation callback.
|
||||||
|
*
|
||||||
|
* \return The callback reference
|
||||||
*/
|
*/
|
||||||
const RevocationCallback& get_revocation_callback() const;
|
const RevocationCallback& get_revocation_callback() const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the rebalance error callback.
|
* \brief Gets the rebalance error callback.
|
||||||
|
*
|
||||||
|
* \return The callback reference
|
||||||
*/
|
*/
|
||||||
const RebalanceErrorCallback& get_rebalance_error_callback() const;
|
const RebalanceErrorCallback& get_rebalance_error_callback() const;
|
||||||
|
|
||||||
@@ -303,8 +349,8 @@ public:
|
|||||||
*
|
*
|
||||||
* The timeout used on this call will be the one configured via Consumer::set_timeout.
|
* The timeout used on this call will be the one configured via Consumer::set_timeout.
|
||||||
*
|
*
|
||||||
* The returned message *might* be empty. If's necessary to check that it's a valid one before
|
* \return A message. The returned message *might* be empty. It's necessary to check
|
||||||
* using it:
|
* that it's valid before using it:
|
||||||
*
|
*
|
||||||
* \code
|
* \code
|
||||||
* Message msg = consumer.poll();
|
* Message msg = consumer.poll();
|
||||||
@@ -322,15 +368,71 @@ public:
|
|||||||
* instead of the one configured on this Consumer.
|
* instead of the one configured on this Consumer.
|
||||||
*
|
*
|
||||||
* \param timeout The timeout to be used on this call
|
* \param timeout The timeout to be used on this call
|
||||||
|
*
|
||||||
|
* \return A message
|
||||||
*/
|
*/
|
||||||
Message poll(std::chrono::milliseconds timeout);
|
Message poll(std::chrono::milliseconds timeout);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Polls for a batch of messages
|
||||||
|
*
|
||||||
|
* This can return one or more messages
|
||||||
|
*
|
||||||
|
* \param max_batch_size The maximum amount of messages expected
|
||||||
|
*
|
||||||
|
* \return A list of messages
|
||||||
|
*/
|
||||||
|
MessageList poll_batch(size_t max_batch_size);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Polls for a batch of messages
|
||||||
|
*
|
||||||
|
* This can return one or more messages
|
||||||
|
*
|
||||||
|
* \param max_batch_size The maximum amount of messages expected
|
||||||
|
* \param timeout The timeout for this operation
|
||||||
|
*
|
||||||
|
* \return A list of messages
|
||||||
|
*/
|
||||||
|
MessageList poll_batch(size_t max_batch_size, std::chrono::milliseconds timeout);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Get the global event queue servicing this consumer corresponding to
|
||||||
|
* rd_kafka_queue_get_main and which is polled via rd_kafka_poll
|
||||||
|
*
|
||||||
|
* \return A Queue object
|
||||||
|
*
|
||||||
|
* \remark Note that this call will disable forwarding to the consumer_queue.
|
||||||
|
* To restore forwarding if desired, call Queue::forward_to_queue(consumer_queue)
|
||||||
|
*/
|
||||||
|
Queue get_main_queue() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Get the consumer group queue servicing corresponding to
|
||||||
|
* rd_kafka_queue_get_consumer and which is polled via rd_kafka_consumer_poll
|
||||||
|
*
|
||||||
|
* \return A Queue object
|
||||||
|
*/
|
||||||
|
Queue get_consumer_queue() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Get the queue belonging to this partition. If the consumer is not assigned to this
|
||||||
|
* partition, an empty queue will be returned
|
||||||
|
*
|
||||||
|
* \param partition The partition object
|
||||||
|
*
|
||||||
|
* \return A Queue object
|
||||||
|
*
|
||||||
|
* \remark Note that this call will disable forwarding to the consumer_queue.
|
||||||
|
* To restore forwarding if desired, call Queue::forward_to_queue(consumer_queue)
|
||||||
|
*/
|
||||||
|
Queue get_partition_queue(const TopicPartition& partition) const;
|
||||||
private:
|
private:
|
||||||
static void rebalance_proxy(rd_kafka_t *handle, rd_kafka_resp_err_t error,
|
static void rebalance_proxy(rd_kafka_t *handle, rd_kafka_resp_err_t error,
|
||||||
rd_kafka_topic_partition_list_t *partitions, void *opaque);
|
rd_kafka_topic_partition_list_t *partitions, void *opaque);
|
||||||
|
|
||||||
void close();
|
void close();
|
||||||
void commit(const Message& msg, bool async);
|
void commit(const Message& msg, bool async);
|
||||||
void commit(const TopicPartitionList& topic_partitions, bool async);
|
void commit(const TopicPartitionList* topic_partitions, bool async);
|
||||||
void handle_rebalance(rd_kafka_resp_err_t err, TopicPartitionList& topic_partitions);
|
void handle_rebalance(rd_kafka_resp_err_t err, TopicPartitionList& topic_partitions);
|
||||||
|
|
||||||
AssignmentCallback assignment_callback_;
|
AssignmentCallback assignment_callback_;
|
||||||
|
|||||||
64
include/cppkafka/cppkafka.h
Normal file
64
include/cppkafka/cppkafka.h
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2017, Matias Fontanini
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CPPKAFKA_H
|
||||||
|
#define CPPKAFKA_H
|
||||||
|
|
||||||
|
#include <cppkafka/buffer.h>
|
||||||
|
#include <cppkafka/clonable_ptr.h>
|
||||||
|
#include <cppkafka/configuration.h>
|
||||||
|
#include <cppkafka/configuration_base.h>
|
||||||
|
#include <cppkafka/configuration_option.h>
|
||||||
|
#include <cppkafka/consumer.h>
|
||||||
|
#include <cppkafka/error.h>
|
||||||
|
#include <cppkafka/exceptions.h>
|
||||||
|
#include <cppkafka/group_information.h>
|
||||||
|
#include <cppkafka/kafka_handle_base.h>
|
||||||
|
#include <cppkafka/logging.h>
|
||||||
|
#include <cppkafka/macros.h>
|
||||||
|
#include <cppkafka/message.h>
|
||||||
|
#include <cppkafka/message_builder.h>
|
||||||
|
#include <cppkafka/message_internal.h>
|
||||||
|
#include <cppkafka/metadata.h>
|
||||||
|
#include <cppkafka/producer.h>
|
||||||
|
#include <cppkafka/queue.h>
|
||||||
|
#include <cppkafka/topic.h>
|
||||||
|
#include <cppkafka/topic_configuration.h>
|
||||||
|
#include <cppkafka/topic_partition.h>
|
||||||
|
#include <cppkafka/topic_partition_list.h>
|
||||||
|
#include <cppkafka/utils/backoff_committer.h>
|
||||||
|
#include <cppkafka/utils/backoff_performer.h>
|
||||||
|
#include <cppkafka/utils/buffered_producer.h>
|
||||||
|
#include <cppkafka/utils/compacted_topic_processor.h>
|
||||||
|
#include <cppkafka/utils/consumer_dispatcher.h>
|
||||||
|
#include <cppkafka/utils/poll_interface.h>
|
||||||
|
#include <cppkafka/utils/poll_strategy_base.h>
|
||||||
|
#include <cppkafka/utils/roundrobin_poll_strategy.h>
|
||||||
|
|
||||||
|
#endif
|
||||||
127
include/cppkafka/detail/callback_invoker.h
Normal file
127
include/cppkafka/detail/callback_invoker.h
Normal file
@@ -0,0 +1,127 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2017, Matias Fontanini
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CPPKAFKA_CALLBACK_INVOKER_H
|
||||||
|
#define CPPKAFKA_CALLBACK_INVOKER_H
|
||||||
|
|
||||||
|
#include <sstream>
|
||||||
|
#include <assert.h>
|
||||||
|
#include "../logging.h"
|
||||||
|
#include "../kafka_handle_base.h"
|
||||||
|
|
||||||
|
namespace cppkafka {
|
||||||
|
|
||||||
|
// Error values
|
||||||
|
template <typename T>
|
||||||
|
T error_value() { return T{}; }
|
||||||
|
|
||||||
|
template<> inline
|
||||||
|
void error_value<void>() {};
|
||||||
|
|
||||||
|
template<> inline
|
||||||
|
bool error_value<bool>() { return false; }
|
||||||
|
|
||||||
|
template<> inline
|
||||||
|
int error_value<int>() { return -1; }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Wraps an std::function object and runs it while preventing all exceptions from escaping
|
||||||
|
* \tparam Func An std::function object
|
||||||
|
*/
|
||||||
|
template <typename Func>
|
||||||
|
class CallbackInvoker
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
using RetType = typename Func::result_type;
|
||||||
|
using LogCallback = std::function<void(KafkaHandleBase& handle,
|
||||||
|
int level,
|
||||||
|
const std::string& facility,
|
||||||
|
const std::string& message)>;
|
||||||
|
CallbackInvoker(const char* callback_name,
|
||||||
|
const Func& callback,
|
||||||
|
KafkaHandleBase* handle)
|
||||||
|
: callback_name_(callback_name),
|
||||||
|
callback_(callback),
|
||||||
|
handle_(handle) {
|
||||||
|
}
|
||||||
|
|
||||||
|
explicit operator bool() const {
|
||||||
|
return (bool)callback_;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename ...Args>
|
||||||
|
RetType operator()(Args&&... args) const {
|
||||||
|
static const char* library_name = "cppkafka";
|
||||||
|
std::ostringstream error_msg;
|
||||||
|
try {
|
||||||
|
if (callback_) {
|
||||||
|
return callback_(std::forward<Args>(args)...);
|
||||||
|
}
|
||||||
|
return error_value<RetType>();
|
||||||
|
}
|
||||||
|
catch (const std::exception& ex) {
|
||||||
|
if (handle_) {
|
||||||
|
error_msg << "Caught exception in " << callback_name_ << " callback: " << ex.what();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
catch (...) {
|
||||||
|
if (handle_) {
|
||||||
|
error_msg << "Caught unknown exception in " << callback_name_ << " callback";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Log error
|
||||||
|
if (handle_) {
|
||||||
|
if (handle_->get_configuration().get_log_callback()) {
|
||||||
|
try {
|
||||||
|
// Log it
|
||||||
|
handle_->get_configuration().get_log_callback()(*handle_,
|
||||||
|
static_cast<int>(LogLevel::LogErr),
|
||||||
|
library_name,
|
||||||
|
error_msg.str());
|
||||||
|
}
|
||||||
|
catch (...) {} // sink everything
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
rd_kafka_log_print(handle_->get_handle(),
|
||||||
|
static_cast<int>(LogLevel::LogErr),
|
||||||
|
library_name,
|
||||||
|
error_msg.str().c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return error_value<RetType>();
|
||||||
|
}
|
||||||
|
private:
|
||||||
|
const char* callback_name_;
|
||||||
|
const Func& callback_;
|
||||||
|
KafkaHandleBase* handle_;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
@@ -75,7 +75,7 @@ public:
|
|||||||
/**
|
/**
|
||||||
* Writes this error's string representation into a stream
|
* Writes this error's string representation into a stream
|
||||||
*/
|
*/
|
||||||
friend std::ostream& operator<<(std::ostream& output, const Error& rhs);
|
CPPKAFKA_API friend std::ostream& operator<<(std::ostream& output, const Error& rhs);
|
||||||
private:
|
private:
|
||||||
rd_kafka_resp_err_t error_;
|
rd_kafka_resp_err_t error_;
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -110,6 +110,30 @@ private:
|
|||||||
Error error_;
|
Error error_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Consumer exception
|
||||||
|
*/
|
||||||
|
class CPPKAFKA_API ConsumerException : public Exception {
|
||||||
|
public:
|
||||||
|
ConsumerException(Error error);
|
||||||
|
|
||||||
|
Error get_error() const;
|
||||||
|
private:
|
||||||
|
Error error_;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Queue exception for rd_kafka_queue_t errors
|
||||||
|
*/
|
||||||
|
class CPPKAFKA_API QueueException : public Exception {
|
||||||
|
public:
|
||||||
|
QueueException(Error error);
|
||||||
|
|
||||||
|
Error get_error() const;
|
||||||
|
private:
|
||||||
|
Error error_;
|
||||||
|
};
|
||||||
|
|
||||||
} // cppkafka
|
} // cppkafka
|
||||||
|
|
||||||
#endif // CPPKAFKA_EXCEPTIONS_H
|
#endif // CPPKAFKA_EXCEPTIONS_H
|
||||||
|
|||||||
@@ -136,6 +136,8 @@ private:
|
|||||||
std::vector<GroupMemberInformation> members_;
|
std::vector<GroupMemberInformation> members_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
using GroupInformationList = std::vector<GroupInformation>;
|
||||||
|
|
||||||
} // cppkafka
|
} // cppkafka
|
||||||
|
|
||||||
#endif // CPPKAFKA_GROUP_INFORMATION_H
|
#endif // CPPKAFKA_GROUP_INFORMATION_H
|
||||||
|
|||||||
@@ -39,6 +39,7 @@
|
|||||||
#include <tuple>
|
#include <tuple>
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <librdkafka/rdkafka.h>
|
#include <librdkafka/rdkafka.h>
|
||||||
|
#include "group_information.h"
|
||||||
#include "topic_partition.h"
|
#include "topic_partition.h"
|
||||||
#include "topic_partition_list.h"
|
#include "topic_partition_list.h"
|
||||||
#include "topic_configuration.h"
|
#include "topic_configuration.h"
|
||||||
@@ -75,15 +76,29 @@ public:
|
|||||||
*/
|
*/
|
||||||
void pause_partitions(const TopicPartitionList& topic_partitions);
|
void pause_partitions(const TopicPartitionList& topic_partitions);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Pauses consumption/production for this topic
|
||||||
|
*
|
||||||
|
* \param topic The topic name
|
||||||
|
*/
|
||||||
|
void pause(const std::string& topic);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Resumes consumption/production from the given topic/partition list
|
* \brief Resumes consumption/production from the given topic/partition list
|
||||||
*
|
*
|
||||||
* This translates into a call to rd_kafka_resume_partitions
|
* This translates into a call to rd_kafka_resume_partitions
|
||||||
*
|
*
|
||||||
* \param topic_partitions The topic/partition list to resume consuming/producing from/to
|
* \param topic_partitions The topic/partition list to resume consuming/producing from/to
|
||||||
*/
|
*/
|
||||||
void resume_partitions(const TopicPartitionList& topic_partitions);
|
void resume_partitions(const TopicPartitionList& topic_partitions);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Resumes consumption/production for this topic
|
||||||
|
*
|
||||||
|
* \param topic The topic name
|
||||||
|
*/
|
||||||
|
void resume(const std::string& topic);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Sets the timeout for operations that require a timeout
|
* \brief Sets the timeout for operations that require a timeout
|
||||||
*
|
*
|
||||||
@@ -108,11 +123,15 @@ public:
|
|||||||
* This translates into a call to rd_kafka_query_watermark_offsets
|
* This translates into a call to rd_kafka_query_watermark_offsets
|
||||||
*
|
*
|
||||||
* \param topic_partition The topic/partition to be queried
|
* \param topic_partition The topic/partition to be queried
|
||||||
|
*
|
||||||
|
* \return A pair of watermark offsets {low, high}
|
||||||
*/
|
*/
|
||||||
OffsetTuple query_offsets(const TopicPartition& topic_partition) const;
|
OffsetTuple query_offsets(const TopicPartition& topic_partition) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the rdkafka handle
|
* \brief Gets the rdkafka handle
|
||||||
|
*
|
||||||
|
* \return The rdkafka handle
|
||||||
*/
|
*/
|
||||||
rd_kafka_t* get_handle() const;
|
rd_kafka_t* get_handle() const;
|
||||||
|
|
||||||
@@ -124,6 +143,8 @@ public:
|
|||||||
* if any.
|
* if any.
|
||||||
*
|
*
|
||||||
* \param name The name of the topic to be created
|
* \param name The name of the topic to be created
|
||||||
|
*
|
||||||
|
* \return A topic
|
||||||
*/
|
*/
|
||||||
Topic get_topic(const std::string& name);
|
Topic get_topic(const std::string& name);
|
||||||
|
|
||||||
@@ -134,15 +155,19 @@ public:
|
|||||||
*
|
*
|
||||||
* \param name The name of the topic to be created
|
* \param name The name of the topic to be created
|
||||||
* \param config The configuration to be used for the new topic
|
* \param config The configuration to be used for the new topic
|
||||||
|
*
|
||||||
|
* \return A topic
|
||||||
*/
|
*/
|
||||||
Topic get_topic(const std::string& name, TopicConfiguration config);
|
Topic get_topic(const std::string& name, TopicConfiguration config);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Gets metadata for brokers, topics, partitions, etc
|
* \brief Gets metadata for brokers, topics, partitions, etc
|
||||||
*
|
*
|
||||||
|
* This translates into a call to rd_kafka_metadata
|
||||||
|
*
|
||||||
* \param all_topics Whether to fetch metadata about all topics or only locally known ones
|
* \param all_topics Whether to fetch metadata about all topics or only locally known ones
|
||||||
*
|
*
|
||||||
* This translates into a call to rd_kafka_metadata
|
* \return The metadata
|
||||||
*/
|
*/
|
||||||
Metadata get_metadata(bool all_topics = true) const;
|
Metadata get_metadata(bool all_topics = true) const;
|
||||||
|
|
||||||
@@ -153,20 +178,26 @@ public:
|
|||||||
* This translates into a call to rd_kafka_metadata
|
* This translates into a call to rd_kafka_metadata
|
||||||
*
|
*
|
||||||
* \param topic The topic to fetch information for
|
* \param topic The topic to fetch information for
|
||||||
|
*
|
||||||
|
* \return The topic metadata
|
||||||
*/
|
*/
|
||||||
TopicMetadata get_metadata(const Topic& topic) const;
|
TopicMetadata get_metadata(const Topic& topic) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the consumer group information
|
* \brief Gets the consumer group information
|
||||||
*
|
*
|
||||||
* \param name The name of the consumer group to look up
|
* \param name The name of the consumer group to look up
|
||||||
|
*
|
||||||
|
* \return The group information
|
||||||
*/
|
*/
|
||||||
GroupInformation get_consumer_group(const std::string& name);
|
GroupInformation get_consumer_group(const std::string& name);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets all consumer groups
|
* \brief Gets all consumer groups
|
||||||
|
*
|
||||||
|
* \return A list of consumer groups
|
||||||
*/
|
*/
|
||||||
std::vector<GroupInformation> get_consumer_groups();
|
GroupInformationList get_consumer_groups();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Gets topic/partition offsets based on timestamps
|
* \brief Gets topic/partition offsets based on timestamps
|
||||||
@@ -174,23 +205,31 @@ public:
|
|||||||
* This translates into a call to rd_kafka_offsets_for_times
|
* This translates into a call to rd_kafka_offsets_for_times
|
||||||
*
|
*
|
||||||
* \param queries A map from topic/partition to the timestamp to be used
|
* \param queries A map from topic/partition to the timestamp to be used
|
||||||
|
*
|
||||||
|
* \return A topic partition list
|
||||||
*/
|
*/
|
||||||
TopicPartitionList get_offsets_for_times(const TopicPartitionsTimestampsMap& queries) const;
|
TopicPartitionList get_offsets_for_times(const TopicPartitionsTimestampsMap& queries) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the kafka handle name
|
* \brief Get the kafka handle name
|
||||||
|
*
|
||||||
|
* \return The handle name
|
||||||
*/
|
*/
|
||||||
std::string get_name() const;
|
std::string get_name() const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the configured timeout.
|
* \brief Gets the configured timeout.
|
||||||
|
*
|
||||||
|
* \return The configured timeout
|
||||||
*
|
*
|
||||||
* \sa KafkaHandleBase::set_timeout
|
* \sa KafkaHandleBase::set_timeout
|
||||||
*/
|
*/
|
||||||
std::chrono::milliseconds get_timeout() const;
|
std::chrono::milliseconds get_timeout() const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the handle's configuration
|
* \brief Gets the handle's configuration
|
||||||
|
*
|
||||||
|
* \return A reference to the configuration object
|
||||||
*/
|
*/
|
||||||
const Configuration& get_configuration() const;
|
const Configuration& get_configuration() const;
|
||||||
|
|
||||||
@@ -198,13 +237,24 @@ public:
|
|||||||
* \brief Gets the length of the out queue
|
* \brief Gets the length of the out queue
|
||||||
*
|
*
|
||||||
* This calls rd_kafka_outq_len
|
* This calls rd_kafka_outq_len
|
||||||
|
*
|
||||||
|
* \return The length of the queue
|
||||||
*/
|
*/
|
||||||
int get_out_queue_length() const;
|
int get_out_queue_length() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Cancels the current callback dispatcher
|
||||||
|
*
|
||||||
|
* This calls rd_kafka_yield
|
||||||
|
*/
|
||||||
|
void yield() const;
|
||||||
protected:
|
protected:
|
||||||
KafkaHandleBase(Configuration config);
|
KafkaHandleBase(Configuration config);
|
||||||
|
|
||||||
void set_handle(rd_kafka_t* handle);
|
void set_handle(rd_kafka_t* handle);
|
||||||
void check_error(rd_kafka_resp_err_t error) const;
|
void check_error(rd_kafka_resp_err_t error) const;
|
||||||
|
void check_error(rd_kafka_resp_err_t error,
|
||||||
|
const rd_kafka_topic_partition_list_t* list_ptr) const;
|
||||||
rd_kafka_conf_t* get_configuration_handle();
|
rd_kafka_conf_t* get_configuration_handle();
|
||||||
private:
|
private:
|
||||||
static const std::chrono::milliseconds DEFAULT_TIMEOUT;
|
static const std::chrono::milliseconds DEFAULT_TIMEOUT;
|
||||||
@@ -214,14 +264,14 @@ private:
|
|||||||
|
|
||||||
Topic get_topic(const std::string& name, rd_kafka_topic_conf_t* conf);
|
Topic get_topic(const std::string& name, rd_kafka_topic_conf_t* conf);
|
||||||
Metadata get_metadata(bool all_topics, rd_kafka_topic_t* topic_ptr) const;
|
Metadata get_metadata(bool all_topics, rd_kafka_topic_t* topic_ptr) const;
|
||||||
std::vector<GroupInformation> fetch_consumer_groups(const char* name);
|
GroupInformationList fetch_consumer_groups(const char* name);
|
||||||
void save_topic_config(const std::string& topic_name, TopicConfiguration config);
|
void save_topic_config(const std::string& topic_name, TopicConfiguration config);
|
||||||
|
|
||||||
HandlePtr handle_;
|
|
||||||
std::chrono::milliseconds timeout_ms_;
|
std::chrono::milliseconds timeout_ms_;
|
||||||
Configuration config_;
|
Configuration config_;
|
||||||
TopicConfigurationMap topic_configurations_;
|
TopicConfigurationMap topic_configurations_;
|
||||||
std::mutex topic_configurations_mutex_;
|
std::mutex topic_configurations_mutex_;
|
||||||
|
HandlePtr handle_;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // cppkafka
|
} // cppkafka
|
||||||
|
|||||||
49
include/cppkafka/logging.h
Normal file
49
include/cppkafka/logging.h
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2017, Matias Fontanini
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CPPKAFKA_LOGGING_H
|
||||||
|
#define CPPKAFKA_LOGGING_H
|
||||||
|
|
||||||
|
namespace cppkafka {
|
||||||
|
|
||||||
|
// Based on syslog.h levels
|
||||||
|
enum class LogLevel : int {
|
||||||
|
LogEmerg = 0, /* system is unusable */
|
||||||
|
LogAlert = 1, /* action must be taken immediately */
|
||||||
|
LogCrit = 2, /* critical conditions */
|
||||||
|
LogErr = 3, /* error conditions */
|
||||||
|
LogWarning = 4, /* warning conditions */
|
||||||
|
LogNotice = 5, /* normal but significant condition */
|
||||||
|
LogInfo = 6, /* informational */
|
||||||
|
LogDebug = 7 /* debug-level messages */
|
||||||
|
};
|
||||||
|
|
||||||
|
} //cppkafka
|
||||||
|
|
||||||
|
#endif //CPPKAFKA_LOGGING_H
|
||||||
@@ -33,6 +33,7 @@
|
|||||||
#include <memory>
|
#include <memory>
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
|
#include <cassert>
|
||||||
#include <boost/optional.hpp>
|
#include <boost/optional.hpp>
|
||||||
#include <librdkafka/rdkafka.h>
|
#include <librdkafka/rdkafka.h>
|
||||||
#include "buffer.h"
|
#include "buffer.h"
|
||||||
@@ -42,6 +43,7 @@
|
|||||||
namespace cppkafka {
|
namespace cppkafka {
|
||||||
|
|
||||||
class MessageTimestamp;
|
class MessageTimestamp;
|
||||||
|
class Internal;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Thin wrapper over a rdkafka message handle
|
* \brief Thin wrapper over a rdkafka message handle
|
||||||
@@ -55,6 +57,8 @@ class MessageTimestamp;
|
|||||||
*/
|
*/
|
||||||
class CPPKAFKA_API Message {
|
class CPPKAFKA_API Message {
|
||||||
public:
|
public:
|
||||||
|
friend class MessageInternal;
|
||||||
|
using InternalPtr = std::shared_ptr<Internal>;
|
||||||
/**
|
/**
|
||||||
* Constructs a message that won't take ownership of the given pointer
|
* Constructs a message that won't take ownership of the given pointer
|
||||||
*/
|
*/
|
||||||
@@ -82,62 +86,93 @@ public:
|
|||||||
/**
|
/**
|
||||||
* Gets the error attribute
|
* Gets the error attribute
|
||||||
*/
|
*/
|
||||||
Error get_error() const;
|
Error get_error() const {
|
||||||
|
assert(handle_);
|
||||||
|
return handle_->err;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Utility function to check for get_error() == RD_KAFKA_RESP_ERR__PARTITION_EOF
|
* Utility function to check for get_error() == RD_KAFKA_RESP_ERR__PARTITION_EOF
|
||||||
*/
|
*/
|
||||||
bool is_eof() const;
|
bool is_eof() const {
|
||||||
|
return get_error() == RD_KAFKA_RESP_ERR__PARTITION_EOF;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the topic that this message belongs to
|
* Gets the topic that this message belongs to
|
||||||
*/
|
*/
|
||||||
std::string get_topic() const;
|
std::string get_topic() const {
|
||||||
|
assert(handle_);
|
||||||
|
return rd_kafka_topic_name(handle_->rkt);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the partition that this message belongs to
|
* Gets the partition that this message belongs to
|
||||||
*/
|
*/
|
||||||
int get_partition() const;
|
int get_partition() const {
|
||||||
|
assert(handle_);
|
||||||
|
return handle_->partition;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the message's payload
|
* Gets the message's payload
|
||||||
*/
|
*/
|
||||||
const Buffer& get_payload() const;
|
const Buffer& get_payload() const {
|
||||||
|
return payload_;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the message's key
|
* Gets the message's key
|
||||||
*/
|
*/
|
||||||
const Buffer& get_key() const;
|
const Buffer& get_key() const {
|
||||||
|
return key_;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the message offset
|
* Gets the message offset
|
||||||
*/
|
*/
|
||||||
int64_t get_offset() const;
|
int64_t get_offset() const {
|
||||||
|
assert(handle_);
|
||||||
|
return handle_->offset;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Gets the private data.
|
* \brief Gets the private user data.
|
||||||
*
|
*
|
||||||
* This should only be used on messages produced by a Producer that were set a private data
|
* This should only be used on messages produced by a Producer that were set a private data
|
||||||
* attribute
|
* attribute
|
||||||
*/
|
*/
|
||||||
void* get_private_data() const;
|
void* get_user_data() const {
|
||||||
|
return user_data_;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Gets this Message's timestamp
|
* \brief Gets this Message's timestamp
|
||||||
*
|
*
|
||||||
* If calling rd_kafka_message_timestamp returns -1, then boost::none_t will be returned.
|
* If calling rd_kafka_message_timestamp returns -1, then boost::none_t will be returned.
|
||||||
*/
|
*/
|
||||||
boost::optional<MessageTimestamp> get_timestamp() const;
|
inline boost::optional<MessageTimestamp> get_timestamp() const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Indicates whether this message is valid (not null)
|
* Indicates whether this message is valid (not null)
|
||||||
*/
|
*/
|
||||||
explicit operator bool() const;
|
explicit operator bool() const {
|
||||||
|
return handle_ != nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the rdkafka message handle
|
* Gets the rdkafka message handle
|
||||||
*/
|
*/
|
||||||
rd_kafka_message_t* get_handle() const;
|
rd_kafka_message_t* get_handle() const {
|
||||||
|
return handle_.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Internal private const data accessor (internal use only)
|
||||||
|
*/
|
||||||
|
InternalPtr internal() const {
|
||||||
|
return internal_;
|
||||||
|
}
|
||||||
private:
|
private:
|
||||||
using HandlePtr = std::unique_ptr<rd_kafka_message_t, decltype(&rd_kafka_message_destroy)>;
|
using HandlePtr = std::unique_ptr<rd_kafka_message_t, decltype(&rd_kafka_message_destroy)>;
|
||||||
|
|
||||||
@@ -145,12 +180,17 @@ private:
|
|||||||
|
|
||||||
Message(rd_kafka_message_t* handle, NonOwningTag);
|
Message(rd_kafka_message_t* handle, NonOwningTag);
|
||||||
Message(HandlePtr handle);
|
Message(HandlePtr handle);
|
||||||
|
Message& load_internal();
|
||||||
|
|
||||||
HandlePtr handle_;
|
HandlePtr handle_;
|
||||||
Buffer payload_;
|
Buffer payload_;
|
||||||
Buffer key_;
|
Buffer key_;
|
||||||
|
void* user_data_;
|
||||||
|
InternalPtr internal_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
using MessageList = std::vector<Message>;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Represents a message's timestamp
|
* Represents a message's timestamp
|
||||||
*/
|
*/
|
||||||
@@ -183,6 +223,16 @@ private:
|
|||||||
TimestampType type_;
|
TimestampType type_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
boost::optional<MessageTimestamp> Message::get_timestamp() const {
|
||||||
|
rd_kafka_timestamp_type_t type = RD_KAFKA_TIMESTAMP_NOT_AVAILABLE;
|
||||||
|
int64_t timestamp = rd_kafka_message_timestamp(handle_.get(), &type);
|
||||||
|
if (timestamp == -1 || type == RD_KAFKA_TIMESTAMP_NOT_AVAILABLE) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
return MessageTimestamp(std::chrono::milliseconds(timestamp),
|
||||||
|
static_cast<MessageTimestamp::TimestampType>(type));
|
||||||
|
}
|
||||||
|
|
||||||
} // cppkafka
|
} // cppkafka
|
||||||
|
|
||||||
#endif // CPPKAFKA_MESSAGE_H
|
#endif // CPPKAFKA_MESSAGE_H
|
||||||
|
|||||||
@@ -34,6 +34,7 @@
|
|||||||
#include "buffer.h"
|
#include "buffer.h"
|
||||||
#include "topic.h"
|
#include "topic.h"
|
||||||
#include "macros.h"
|
#include "macros.h"
|
||||||
|
#include "message.h"
|
||||||
|
|
||||||
namespace cppkafka {
|
namespace cppkafka {
|
||||||
|
|
||||||
@@ -41,7 +42,7 @@ namespace cppkafka {
|
|||||||
* \brief Base template class for message construction
|
* \brief Base template class for message construction
|
||||||
*/
|
*/
|
||||||
template <typename BufferType, typename Concrete>
|
template <typename BufferType, typename Concrete>
|
||||||
class CPPKAFKA_API BasicMessageBuilder {
|
class BasicMessageBuilder {
|
||||||
public:
|
public:
|
||||||
/**
|
/**
|
||||||
* Construct a BasicMessageBuilder
|
* Construct a BasicMessageBuilder
|
||||||
@@ -50,6 +51,11 @@ public:
|
|||||||
*/
|
*/
|
||||||
BasicMessageBuilder(std::string topic);
|
BasicMessageBuilder(std::string topic);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Construct a BasicMessageBuilder from a Message object
|
||||||
|
*/
|
||||||
|
BasicMessageBuilder(const Message& message);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Construct a message builder from another one that uses a different buffer type
|
* \brief Construct a message builder from another one that uses a different buffer type
|
||||||
*
|
*
|
||||||
@@ -160,6 +166,13 @@ public:
|
|||||||
* Gets the message's user data pointer
|
* Gets the message's user data pointer
|
||||||
*/
|
*/
|
||||||
void* user_data() const;
|
void* user_data() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Private data accessor (internal use only)
|
||||||
|
*/
|
||||||
|
Message::InternalPtr internal() const;
|
||||||
|
Concrete& internal(Message::InternalPtr internal);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void construct_buffer(BufferType& lhs, const BufferType& rhs);
|
void construct_buffer(BufferType& lhs, const BufferType& rhs);
|
||||||
Concrete& get_concrete();
|
Concrete& get_concrete();
|
||||||
@@ -170,18 +183,32 @@ private:
|
|||||||
BufferType payload_;
|
BufferType payload_;
|
||||||
std::chrono::milliseconds timestamp_{0};
|
std::chrono::milliseconds timestamp_{0};
|
||||||
void* user_data_;
|
void* user_data_;
|
||||||
|
Message::InternalPtr internal_;
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename T, typename C>
|
template <typename T, typename C>
|
||||||
BasicMessageBuilder<T, C>::BasicMessageBuilder(std::string topic)
|
BasicMessageBuilder<T, C>::BasicMessageBuilder(std::string topic)
|
||||||
: topic_(std::move(topic)) {
|
: topic_(std::move(topic)),
|
||||||
|
user_data_(nullptr) {
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename C>
|
||||||
|
BasicMessageBuilder<T, C>::BasicMessageBuilder(const Message& message)
|
||||||
|
: topic_(message.get_topic()),
|
||||||
|
key_(Buffer(message.get_key().get_data(), message.get_key().get_size())),
|
||||||
|
payload_(Buffer(message.get_payload().get_data(), message.get_payload().get_size())),
|
||||||
|
timestamp_(message.get_timestamp() ? message.get_timestamp().get().get_timestamp() :
|
||||||
|
std::chrono::milliseconds(0)),
|
||||||
|
user_data_(message.get_user_data()),
|
||||||
|
internal_(message.internal()) {
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T, typename C>
|
template <typename T, typename C>
|
||||||
template <typename U, typename V>
|
template <typename U, typename V>
|
||||||
BasicMessageBuilder<T, C>::BasicMessageBuilder(const BasicMessageBuilder<U, V>& rhs)
|
BasicMessageBuilder<T, C>::BasicMessageBuilder(const BasicMessageBuilder<U, V>& rhs)
|
||||||
: topic_(rhs.topic()), partition_(rhs.partition()), timestamp_(rhs.timestamp()),
|
: topic_(rhs.topic()), partition_(rhs.partition()), timestamp_(rhs.timestamp()),
|
||||||
user_data_(rhs.user_data()) {
|
user_data_(rhs.user_data()),
|
||||||
|
internal_(rhs.internal()) {
|
||||||
get_concrete().construct_buffer(key_, rhs.key());
|
get_concrete().construct_buffer(key_, rhs.key());
|
||||||
get_concrete().construct_buffer(payload_, rhs.payload());
|
get_concrete().construct_buffer(payload_, rhs.payload());
|
||||||
}
|
}
|
||||||
@@ -274,6 +301,17 @@ void* BasicMessageBuilder<T, C>::user_data() const {
|
|||||||
return user_data_;
|
return user_data_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename T, typename C>
|
||||||
|
Message::InternalPtr BasicMessageBuilder<T, C>::internal() const {
|
||||||
|
return internal_;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename C>
|
||||||
|
C& BasicMessageBuilder<T, C>::internal(Message::InternalPtr internal) {
|
||||||
|
internal_ = internal;
|
||||||
|
return get_concrete();
|
||||||
|
}
|
||||||
|
|
||||||
template <typename T, typename C>
|
template <typename T, typename C>
|
||||||
void BasicMessageBuilder<T, C>::construct_buffer(T& lhs, const T& rhs) {
|
void BasicMessageBuilder<T, C>::construct_buffer(T& lhs, const T& rhs) {
|
||||||
lhs = rhs;
|
lhs = rhs;
|
||||||
@@ -310,6 +348,15 @@ public:
|
|||||||
void construct_buffer(Buffer& lhs, const T& rhs) {
|
void construct_buffer(Buffer& lhs, const T& rhs) {
|
||||||
lhs = Buffer(rhs);
|
lhs = Buffer(rhs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MessageBuilder clone() const {
|
||||||
|
return std::move(MessageBuilder(topic()).
|
||||||
|
key(Buffer(key().get_data(), key().get_size())).
|
||||||
|
payload(Buffer(payload().get_data(), payload().get_size())).
|
||||||
|
timestamp(timestamp()).
|
||||||
|
user_data(user_data()).
|
||||||
|
internal(internal()));
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
86
include/cppkafka/message_internal.h
Normal file
86
include/cppkafka/message_internal.h
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2017, Matias Fontanini
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CPPKAFKA_MESSAGE_INTERNAL_H
|
||||||
|
#define CPPKAFKA_MESSAGE_INTERNAL_H
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
namespace cppkafka {
|
||||||
|
|
||||||
|
class Message;
|
||||||
|
|
||||||
|
class Internal {
|
||||||
|
public:
|
||||||
|
virtual ~Internal() = default;
|
||||||
|
};
|
||||||
|
using InternalPtr = std::shared_ptr<Internal>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Private message data structure
|
||||||
|
*/
|
||||||
|
class MessageInternal {
|
||||||
|
public:
|
||||||
|
MessageInternal(void* user_data, std::shared_ptr<Internal> internal);
|
||||||
|
static std::unique_ptr<MessageInternal> load(Message& message);
|
||||||
|
void* get_user_data() const;
|
||||||
|
InternalPtr get_internal() const;
|
||||||
|
private:
|
||||||
|
void* user_data_;
|
||||||
|
InternalPtr internal_;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename BuilderType>
|
||||||
|
class MessageInternalGuard {
|
||||||
|
public:
|
||||||
|
MessageInternalGuard(BuilderType& builder)
|
||||||
|
: builder_(builder),
|
||||||
|
user_data_(builder.user_data()) {
|
||||||
|
if (builder_.internal()) {
|
||||||
|
// Swap contents with user_data
|
||||||
|
ptr_.reset(new MessageInternal(user_data_, builder_.internal()));
|
||||||
|
builder_.user_data(ptr_.get()); //overwrite user data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
~MessageInternalGuard() {
|
||||||
|
//Restore user data
|
||||||
|
builder_.user_data(user_data_);
|
||||||
|
}
|
||||||
|
void release() {
|
||||||
|
ptr_.release();
|
||||||
|
}
|
||||||
|
private:
|
||||||
|
BuilderType& builder_;
|
||||||
|
std::unique_ptr<MessageInternal> ptr_;
|
||||||
|
void* user_data_;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif //CPPKAFKA_MESSAGE_INTERNAL_H
|
||||||
@@ -140,7 +140,24 @@ private:
|
|||||||
*/
|
*/
|
||||||
class CPPKAFKA_API Metadata {
|
class CPPKAFKA_API Metadata {
|
||||||
public:
|
public:
|
||||||
Metadata(const rd_kafka_metadata_t* ptr);
|
/**
|
||||||
|
* \brief Creates a Metadata object that doesn't take ownership of the handle
|
||||||
|
*
|
||||||
|
* \param handle The handle to be used
|
||||||
|
*/
|
||||||
|
static Metadata make_non_owning(const rd_kafka_metadata_t* handle);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Constructs an empty metadata object
|
||||||
|
*
|
||||||
|
* \remark Using any methods except Metadata::get_handle on an empty metadata is undefined behavior
|
||||||
|
*/
|
||||||
|
Metadata();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructor
|
||||||
|
*/
|
||||||
|
Metadata(const rd_kafka_metadata_t* handle);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the brokers' metadata
|
* Gets the brokers' metadata
|
||||||
@@ -165,9 +182,23 @@ public:
|
|||||||
* \param prefix The prefix to be looked up
|
* \param prefix The prefix to be looked up
|
||||||
*/
|
*/
|
||||||
std::vector<TopicMetadata> get_topics_prefixed(const std::string& prefix) const;
|
std::vector<TopicMetadata> get_topics_prefixed(const std::string& prefix) const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Indicates whether this metadata is valid (not null)
|
||||||
|
*/
|
||||||
|
explicit operator bool() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the rdkakfa handle
|
||||||
|
*/
|
||||||
|
const rd_kafka_metadata_t* get_handle() const;
|
||||||
private:
|
private:
|
||||||
using HandlePtr = std::unique_ptr<const rd_kafka_metadata_t, decltype(&rd_kafka_metadata_destroy)>;
|
using HandlePtr = std::unique_ptr<const rd_kafka_metadata_t, decltype(&rd_kafka_metadata_destroy)>;
|
||||||
|
|
||||||
|
struct NonOwningTag { };
|
||||||
|
|
||||||
|
Metadata(const rd_kafka_metadata_t* handle, NonOwningTag);
|
||||||
|
|
||||||
HandlePtr handle_;
|
HandlePtr handle_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -43,6 +43,7 @@ namespace cppkafka {
|
|||||||
class Topic;
|
class Topic;
|
||||||
class Buffer;
|
class Buffer;
|
||||||
class TopicConfiguration;
|
class TopicConfiguration;
|
||||||
|
class Message;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Producer class
|
* \brief Producer class
|
||||||
@@ -77,48 +78,55 @@ class TopicConfiguration;
|
|||||||
*/
|
*/
|
||||||
class CPPKAFKA_API Producer : public KafkaHandleBase {
|
class CPPKAFKA_API Producer : public KafkaHandleBase {
|
||||||
public:
|
public:
|
||||||
|
using KafkaHandleBase::pause;
|
||||||
/**
|
/**
|
||||||
* The policy to use for the payload. The default policy is COPY_PAYLOAD
|
* The policy to use for the payload. The default policy is COPY_PAYLOAD
|
||||||
*/
|
*/
|
||||||
enum class PayloadPolicy {
|
enum class PayloadPolicy {
|
||||||
|
PASSTHROUGH_PAYLOAD = 0, ///< Rdkafka will not copy nor free the payload.
|
||||||
COPY_PAYLOAD = RD_KAFKA_MSG_F_COPY, ///< Means RD_KAFKA_MSG_F_COPY
|
COPY_PAYLOAD = RD_KAFKA_MSG_F_COPY, ///< Means RD_KAFKA_MSG_F_COPY
|
||||||
FREE_PAYLOAD = RD_KAFKA_MSG_F_FREE ///< Means RD_KAFKA_MSG_F_FREE
|
FREE_PAYLOAD = RD_KAFKA_MSG_F_FREE ///< Means RD_KAFKA_MSG_F_FREE
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructs a producer using the given configuration
|
* \brief Constructs a producer using the given configuration
|
||||||
*
|
*
|
||||||
* \param config The configuration to use
|
* \param config The configuration to use
|
||||||
*/
|
*/
|
||||||
Producer(Configuration config);
|
Producer(Configuration config);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets the payload policy
|
* \brief Sets the payload policy
|
||||||
*
|
*
|
||||||
* \param policy The payload policy to be used
|
* \param policy The payload policy to be used
|
||||||
*/
|
*/
|
||||||
void set_payload_policy(PayloadPolicy policy);
|
void set_payload_policy(PayloadPolicy policy);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the current payload policy
|
* \brief Returns the current payload policy
|
||||||
*/
|
*/
|
||||||
PayloadPolicy get_payload_policy() const;
|
PayloadPolicy get_payload_policy() const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Produces a message
|
* \brief Produces a message
|
||||||
*
|
*
|
||||||
* \param topic The topic to write the message to
|
* \param builder The builder class used to compose a message
|
||||||
* \param partition The partition to write the message to
|
|
||||||
* \param payload The message payload
|
|
||||||
*/
|
*/
|
||||||
void produce(const MessageBuilder& builder);
|
void produce(const MessageBuilder& builder);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Produces a message
|
||||||
|
*
|
||||||
|
* \param message The message to be produced
|
||||||
|
*/
|
||||||
|
void produce(const Message& message);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Polls on this handle
|
* \brief Polls on this handle
|
||||||
*
|
*
|
||||||
* This translates into a call to rd_kafka_poll.
|
* This translates into a call to rd_kafka_poll.
|
||||||
*
|
*
|
||||||
* The timeout used on this call is the one configured via Producer::set_timeout.
|
* \remark The timeout used on this call is the one configured via Producer::set_timeout.
|
||||||
*/
|
*/
|
||||||
int poll();
|
int poll();
|
||||||
|
|
||||||
@@ -136,7 +144,7 @@ public:
|
|||||||
*
|
*
|
||||||
* This translates into a call to rd_kafka_flush.
|
* This translates into a call to rd_kafka_flush.
|
||||||
*
|
*
|
||||||
* The timeout used on this call is the one configured via Producer::set_timeout.
|
* \remark The timeout used on this call is the one configured via Producer::set_timeout.
|
||||||
*/
|
*/
|
||||||
void flush();
|
void flush();
|
||||||
|
|
||||||
|
|||||||
183
include/cppkafka/queue.h
Normal file
183
include/cppkafka/queue.h
Normal file
@@ -0,0 +1,183 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2017, Matias Fontanini
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
#include <memory>
|
||||||
|
#include <boost/optional.hpp>
|
||||||
|
#include <librdkafka/rdkafka.h>
|
||||||
|
#include "macros.h"
|
||||||
|
#include "message.h"
|
||||||
|
|
||||||
|
#ifndef CPPKAFKA_QUEUE_H
|
||||||
|
#define CPPKAFKA_QUEUE_H
|
||||||
|
|
||||||
|
namespace cppkafka {
|
||||||
|
/**
|
||||||
|
* \brief Represents a rdkafka queue
|
||||||
|
*
|
||||||
|
* This is a simple wrapper over a rd_kafka_queue_t*
|
||||||
|
*/
|
||||||
|
class CPPKAFKA_API Queue {
|
||||||
|
public:
|
||||||
|
/**
|
||||||
|
* \brief Creates a Queue object that doesn't take ownership of the handle
|
||||||
|
*
|
||||||
|
* \param handle The handle to be used
|
||||||
|
*/
|
||||||
|
static Queue make_non_owning(rd_kafka_queue_t* handle);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Constructs an empty queue
|
||||||
|
*
|
||||||
|
* Note that using any methods except Queue::get_handle on an empty queue is undefined
|
||||||
|
* behavior
|
||||||
|
*/
|
||||||
|
Queue();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Constructs a queue using a handle
|
||||||
|
*
|
||||||
|
* This will take ownership of the handle
|
||||||
|
*
|
||||||
|
* \param handle The handle to be used
|
||||||
|
*/
|
||||||
|
Queue(rd_kafka_queue_t* handle);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the rdkakfa handle
|
||||||
|
*/
|
||||||
|
rd_kafka_queue_t* get_handle() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Returns the length of the queue
|
||||||
|
*
|
||||||
|
* This translates to a call to rd_kafka_queue_length
|
||||||
|
*/
|
||||||
|
size_t get_length() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Forward to another queue
|
||||||
|
*
|
||||||
|
* This translates to a call to rd_kafka_queue_forward
|
||||||
|
*/
|
||||||
|
void forward_to_queue(const Queue& forward_queue) const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Disable forwarding to another queue
|
||||||
|
*
|
||||||
|
* This translates to a call to rd_kafka_queue_forward(NULL)
|
||||||
|
*/
|
||||||
|
void disable_queue_forwarding() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Sets the timeout for consume operations
|
||||||
|
*
|
||||||
|
* This timeout is applied when calling consume()
|
||||||
|
*
|
||||||
|
* \param timeout The timeout to be set
|
||||||
|
*/
|
||||||
|
void set_timeout(std::chrono::milliseconds timeout);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the configured timeout.
|
||||||
|
*
|
||||||
|
* \sa Queue::set_timeout
|
||||||
|
*/
|
||||||
|
std::chrono::milliseconds get_timeout() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Consume a message from this queue
|
||||||
|
*
|
||||||
|
* This translates to a call to rd_kafka_consume_queue using the configured timeout for this object
|
||||||
|
*
|
||||||
|
* \return A message
|
||||||
|
*/
|
||||||
|
Message consume() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Consume a message from this queue
|
||||||
|
*
|
||||||
|
* Same as consume() but the specified timeout will be used instead of the configured one
|
||||||
|
*
|
||||||
|
* \param timeout The timeout to be used on this call
|
||||||
|
*
|
||||||
|
* \return A message
|
||||||
|
*/
|
||||||
|
Message consume(std::chrono::milliseconds timeout) const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Consumes a batch of messages from this queue
|
||||||
|
*
|
||||||
|
* This translates to a call to rd_kafka_consume_batch_queue using the configured timeout for this object
|
||||||
|
*
|
||||||
|
* \param max_batch_size The max number of messages to consume if available
|
||||||
|
*
|
||||||
|
* \return A list of messages. Could be empty if there's nothing to consume
|
||||||
|
*/
|
||||||
|
MessageList consume_batch(size_t max_batch_size) const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Consumes a batch of messages from this queue
|
||||||
|
*
|
||||||
|
* Same as Queue::consume_batch(size_t) but the specified timeout will be used instead of the configured one
|
||||||
|
*
|
||||||
|
* \param max_batch_size The max number of messages to consume if available
|
||||||
|
*
|
||||||
|
* \param timeout The timeout to be used on this call
|
||||||
|
*
|
||||||
|
* \return A list of messages. Could be empty if there's nothing to consume
|
||||||
|
*/
|
||||||
|
MessageList consume_batch(size_t max_batch_size, std::chrono::milliseconds timeout) const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Indicates whether this queue is valid (not null)
|
||||||
|
*/
|
||||||
|
explicit operator bool() const {
|
||||||
|
return handle_ != nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
static const std::chrono::milliseconds DEFAULT_TIMEOUT;
|
||||||
|
|
||||||
|
using HandlePtr = std::unique_ptr<rd_kafka_queue_t, decltype(&rd_kafka_queue_destroy)>;
|
||||||
|
|
||||||
|
struct NonOwningTag { };
|
||||||
|
|
||||||
|
Queue(rd_kafka_queue_t* handle, NonOwningTag);
|
||||||
|
|
||||||
|
// Members
|
||||||
|
HandlePtr handle_;
|
||||||
|
std::chrono::milliseconds timeout_ms_;
|
||||||
|
};
|
||||||
|
|
||||||
|
using QueueList = std::vector<Queue>;
|
||||||
|
|
||||||
|
} // cppkafka
|
||||||
|
|
||||||
|
#endif //CPPKAFKA_QUEUE_H
|
||||||
@@ -83,6 +83,13 @@ public:
|
|||||||
*/
|
*/
|
||||||
bool is_partition_available(int partition) const;
|
bool is_partition_available(int partition) const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Indicates whether this topic is valid (not null)
|
||||||
|
*/
|
||||||
|
explicit operator bool() const {
|
||||||
|
return handle_ != nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the rdkakfa handle
|
* Returns the rdkakfa handle
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -130,7 +130,7 @@ public:
|
|||||||
/**
|
/**
|
||||||
* Print to a stream
|
* Print to a stream
|
||||||
*/
|
*/
|
||||||
friend std::ostream& operator<<(std::ostream& output, const TopicPartition& rhs);
|
CPPKAFKA_API friend std::ostream& operator<<(std::ostream& output, const TopicPartition& rhs);
|
||||||
private:
|
private:
|
||||||
std::string topic_;
|
std::string topic_;
|
||||||
int partition_;
|
int partition_;
|
||||||
|
|||||||
@@ -34,12 +34,14 @@
|
|||||||
#include <iosfwd>
|
#include <iosfwd>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
#include <set>
|
||||||
#include <librdkafka/rdkafka.h>
|
#include <librdkafka/rdkafka.h>
|
||||||
#include "macros.h"
|
#include "macros.h"
|
||||||
|
|
||||||
namespace cppkafka {
|
namespace cppkafka {
|
||||||
|
|
||||||
class TopicPartition;
|
class TopicPartition;
|
||||||
|
class PartitionMetadata;
|
||||||
|
|
||||||
using TopicPartitionsListPtr = std::unique_ptr<rd_kafka_topic_partition_list_t,
|
using TopicPartitionsListPtr = std::unique_ptr<rd_kafka_topic_partition_list_t,
|
||||||
decltype(&rd_kafka_topic_partition_list_destroy)>;
|
decltype(&rd_kafka_topic_partition_list_destroy)>;
|
||||||
@@ -49,11 +51,23 @@ using TopicPartitionsListPtr = std::unique_ptr<rd_kafka_topic_partition_list_t,
|
|||||||
using TopicPartitionList = std::vector<TopicPartition>;
|
using TopicPartitionList = std::vector<TopicPartition>;
|
||||||
|
|
||||||
// Conversions between rdkafka handles and TopicPartitionList
|
// Conversions between rdkafka handles and TopicPartitionList
|
||||||
CPPKAFKA_API TopicPartitionsListPtr convert(const std::vector<TopicPartition>& topic_partitions);
|
CPPKAFKA_API TopicPartitionsListPtr convert(const TopicPartitionList& topic_partitions);
|
||||||
CPPKAFKA_API std::vector<TopicPartition> convert(const TopicPartitionsListPtr& topic_partitions);
|
CPPKAFKA_API TopicPartitionList convert(const TopicPartitionsListPtr& topic_partitions);
|
||||||
CPPKAFKA_API std::vector<TopicPartition> convert(rd_kafka_topic_partition_list_t* topic_partitions);
|
CPPKAFKA_API TopicPartitionList convert(rd_kafka_topic_partition_list_t* topic_partitions);
|
||||||
|
CPPKAFKA_API TopicPartitionList convert(const std::string& topic,
|
||||||
|
const std::vector<PartitionMetadata>& partition_metadata);
|
||||||
CPPKAFKA_API TopicPartitionsListPtr make_handle(rd_kafka_topic_partition_list_t* handle);
|
CPPKAFKA_API TopicPartitionsListPtr make_handle(rd_kafka_topic_partition_list_t* handle);
|
||||||
|
|
||||||
|
// Extracts a partition list subset belonging to the provided topics (case-insensitive)
|
||||||
|
CPPKAFKA_API TopicPartitionList find_matches(const TopicPartitionList& partitions,
|
||||||
|
const std::set<std::string>& topics);
|
||||||
|
|
||||||
|
// Extracts a partition list subset belonging to the provided partition ids
|
||||||
|
// Note: this assumes that all topic partitions in the original list belong to the same topic
|
||||||
|
// otherwise the partition ids may not be unique
|
||||||
|
CPPKAFKA_API TopicPartitionList find_matches(const TopicPartitionList& partitions,
|
||||||
|
const std::set<int>& ids);
|
||||||
|
|
||||||
CPPKAFKA_API std::ostream& operator<<(std::ostream& output, const TopicPartitionList& rhs);
|
CPPKAFKA_API std::ostream& operator<<(std::ostream& output, const TopicPartitionList& rhs);
|
||||||
|
|
||||||
} // cppkafka
|
} // cppkafka
|
||||||
|
|||||||
@@ -33,7 +33,10 @@
|
|||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <functional>
|
#include <functional>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
|
#include <string>
|
||||||
#include "../consumer.h"
|
#include "../consumer.h"
|
||||||
|
#include "backoff_performer.h"
|
||||||
|
#include "../detail/callback_invoker.h"
|
||||||
|
|
||||||
namespace cppkafka {
|
namespace cppkafka {
|
||||||
|
|
||||||
@@ -68,30 +71,17 @@ namespace cppkafka {
|
|||||||
* committer.commit(some_message);
|
* committer.commit(some_message);
|
||||||
* \endcode
|
* \endcode
|
||||||
*/
|
*/
|
||||||
class BackoffCommitter {
|
class BackoffCommitter : public BackoffPerformer {
|
||||||
public:
|
public:
|
||||||
using TimeUnit = std::chrono::milliseconds;
|
|
||||||
static constexpr TimeUnit DEFAULT_INITIAL_BACKOFF{100};
|
|
||||||
static constexpr TimeUnit DEFAULT_BACKOFF_STEP{50};
|
|
||||||
static constexpr TimeUnit DEFAULT_MAXIMUM_BACKOFF{1000};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief The error callback.
|
* \brief The error callback.
|
||||||
*
|
*
|
||||||
* Whenever an error occurs comitting an offset, this callback will be executed using
|
* Whenever an error occurs committing an offset, this callback will be executed using
|
||||||
* the generated error. While the function returns true, then this is offset will be
|
* the generated error. While the function returns true, then this is offset will be
|
||||||
* committed again until it either succeeds or the function returns false.
|
* committed again until it either succeeds or the function returns false.
|
||||||
*/
|
*/
|
||||||
using ErrorCallback = std::function<bool(Error)>;
|
using ErrorCallback = std::function<bool(Error)>;
|
||||||
|
|
||||||
/**
|
|
||||||
* The backoff policy to use
|
|
||||||
*/
|
|
||||||
enum class BackoffPolicy {
|
|
||||||
LINEAR,
|
|
||||||
EXPONENTIAL
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Constructs an instance using default values
|
* \brief Constructs an instance using default values
|
||||||
*
|
*
|
||||||
@@ -101,42 +91,6 @@ public:
|
|||||||
*/
|
*/
|
||||||
BackoffCommitter(Consumer& consumer);
|
BackoffCommitter(Consumer& consumer);
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Sets the backoff policy
|
|
||||||
*
|
|
||||||
* \param policy The backoff policy to be used
|
|
||||||
*/
|
|
||||||
void set_backoff_policy(BackoffPolicy policy);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Sets the initial backoff
|
|
||||||
*
|
|
||||||
* The first time a commit fails, this will be the delay between the request is sent
|
|
||||||
* and we re-try doing so
|
|
||||||
*
|
|
||||||
* \param value The value to be used
|
|
||||||
*/
|
|
||||||
void set_initial_backoff(TimeUnit value);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Sets the backoff step
|
|
||||||
*
|
|
||||||
* When using the linear backoff policy, this will be the delay between sending a request
|
|
||||||
* that fails and re-trying it
|
|
||||||
*
|
|
||||||
* \param value The value to be used
|
|
||||||
*/
|
|
||||||
void set_backoff_step(TimeUnit value);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \brief Sets the maximum backoff
|
|
||||||
*
|
|
||||||
* The backoff used will never be larger than this number
|
|
||||||
*
|
|
||||||
* \param value The value to be used
|
|
||||||
*/
|
|
||||||
void set_maximum_backoff(TimeUnit value);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Sets the error callback
|
* \brief Sets the error callback
|
||||||
*
|
*
|
||||||
@@ -164,43 +118,37 @@ public:
|
|||||||
* \param topic_partitions The topic/partition list to be committed
|
* \param topic_partitions The topic/partition list to be committed
|
||||||
*/
|
*/
|
||||||
void commit(const TopicPartitionList& topic_partitions);
|
void commit(const TopicPartitionList& topic_partitions);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Get the internal Consumer object
|
||||||
|
*
|
||||||
|
* \return A reference to the Consumer
|
||||||
|
*/
|
||||||
|
Consumer& get_consumer();
|
||||||
private:
|
private:
|
||||||
TimeUnit increase_backoff(TimeUnit backoff);
|
// Return true to abort and false to continue committing
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void do_commit(const T& object) {
|
bool do_commit(const T& object) {
|
||||||
TimeUnit backoff = initial_backoff_;
|
try {
|
||||||
while (true) {
|
consumer_.commit(object);
|
||||||
auto start = std::chrono::steady_clock::now();
|
// If the commit succeeds, we're done
|
||||||
try {
|
return true;
|
||||||
consumer_.commit(object);
|
}
|
||||||
// If the commit succeeds, we're done
|
catch (const HandleException& ex) {
|
||||||
return;
|
// If there were actually no offsets to commit, return. Retrying won't solve
|
||||||
|
// anything here
|
||||||
|
if (ex.get_error() == RD_KAFKA_RESP_ERR__NO_OFFSET) {
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
catch (const HandleException& ex) {
|
// If there's a callback and it returns false for this message, abort.
|
||||||
// If there's a callback and it returns false for this message, abort
|
// Otherwise keep committing.
|
||||||
if (callback_ && !callback_(ex.get_error())) {
|
CallbackInvoker<ErrorCallback> callback("backoff committer", callback_, &consumer_);
|
||||||
return;
|
return callback && !callback(ex.get_error());
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
auto end = std::chrono::steady_clock::now();
|
|
||||||
auto time_elapsed = end - start;
|
|
||||||
// If we still have time left, then sleep
|
|
||||||
if (time_elapsed < backoff) {
|
|
||||||
std::this_thread::sleep_for(backoff - time_elapsed);
|
|
||||||
}
|
|
||||||
// Increase out backoff depending on the policy being used
|
|
||||||
backoff = increase_backoff(backoff);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Consumer& consumer_;
|
Consumer& consumer_;
|
||||||
TimeUnit initial_backoff_;
|
|
||||||
TimeUnit backoff_step_;
|
|
||||||
TimeUnit maximum_backoff_;
|
|
||||||
ErrorCallback callback_;
|
ErrorCallback callback_;
|
||||||
BackoffPolicy policy_;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // cppkafka
|
} // cppkafka
|
||||||
|
|||||||
150
include/cppkafka/utils/backoff_performer.h
Normal file
150
include/cppkafka/utils/backoff_performer.h
Normal file
@@ -0,0 +1,150 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2017, Matias Fontanini
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CPPKAFKA_BACKOFF_PERFORMER_H
|
||||||
|
#define CPPKAFKA_BACKOFF_PERFORMER_H
|
||||||
|
|
||||||
|
#include <chrono>
|
||||||
|
#include <functional>
|
||||||
|
#include <thread>
|
||||||
|
#include "../consumer.h"
|
||||||
|
|
||||||
|
namespace cppkafka {
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
class CPPKAFKA_API BackoffPerformer {
|
||||||
|
public:
|
||||||
|
using TimeUnit = std::chrono::milliseconds;
|
||||||
|
static const TimeUnit DEFAULT_INITIAL_BACKOFF;
|
||||||
|
static const TimeUnit DEFAULT_BACKOFF_STEP;
|
||||||
|
static const TimeUnit DEFAULT_MAXIMUM_BACKOFF;
|
||||||
|
static const size_t DEFAULT_MAXIMUM_RETRIES;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The backoff policy to use
|
||||||
|
*/
|
||||||
|
enum class BackoffPolicy {
|
||||||
|
LINEAR,
|
||||||
|
EXPONENTIAL
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructs an instance of backoff performer
|
||||||
|
*
|
||||||
|
* By default, the linear backoff policy is used
|
||||||
|
*/
|
||||||
|
BackoffPerformer();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Sets the backoff policy
|
||||||
|
*
|
||||||
|
* \param policy The backoff policy to be used
|
||||||
|
*/
|
||||||
|
void set_backoff_policy(BackoffPolicy policy);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Sets the initial backoff
|
||||||
|
*
|
||||||
|
* The first time a commit fails, this will be the delay between the request is sent
|
||||||
|
* and we re-try doing so
|
||||||
|
*
|
||||||
|
* \param value The value to be used
|
||||||
|
*/
|
||||||
|
void set_initial_backoff(TimeUnit value);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Sets the backoff step
|
||||||
|
*
|
||||||
|
* When using the linear backoff policy, this will be the delay between sending a request
|
||||||
|
* that fails and re-trying it
|
||||||
|
*
|
||||||
|
* \param value The value to be used
|
||||||
|
*/
|
||||||
|
void set_backoff_step(TimeUnit value);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Sets the maximum backoff
|
||||||
|
*
|
||||||
|
* The backoff used will never be larger than this number
|
||||||
|
*
|
||||||
|
* \param value The value to be used
|
||||||
|
*/
|
||||||
|
void set_maximum_backoff(TimeUnit value);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Sets the maximum number of retries for the commit operation
|
||||||
|
*
|
||||||
|
* \param value The number of retries before giving up
|
||||||
|
*
|
||||||
|
* \remark Setting value to 0 is equivalent to 1, i.e. it will try at least once
|
||||||
|
*/
|
||||||
|
void set_maximum_retries(size_t value);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Executes an action and backs off if it fails
|
||||||
|
*
|
||||||
|
* This will call the functor and will retry in case it returns false
|
||||||
|
*
|
||||||
|
* \param callback The action to be executed
|
||||||
|
*/
|
||||||
|
template <typename Functor>
|
||||||
|
void perform(const Functor& callback) {
|
||||||
|
TimeUnit backoff = initial_backoff_;
|
||||||
|
size_t retries = maximum_retries_;
|
||||||
|
while (retries--) {
|
||||||
|
auto start = std::chrono::steady_clock::now();
|
||||||
|
// If the callback returns true, we're done
|
||||||
|
if (callback()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
auto end = std::chrono::steady_clock::now();
|
||||||
|
auto time_elapsed = end - start;
|
||||||
|
// If we still have time left, then sleep
|
||||||
|
if (time_elapsed < backoff) {
|
||||||
|
std::this_thread::sleep_for(backoff - time_elapsed);
|
||||||
|
}
|
||||||
|
// Increase out backoff depending on the policy being used
|
||||||
|
backoff = increase_backoff(backoff);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
private:
|
||||||
|
TimeUnit increase_backoff(TimeUnit backoff);
|
||||||
|
|
||||||
|
TimeUnit initial_backoff_;
|
||||||
|
TimeUnit backoff_step_;
|
||||||
|
TimeUnit maximum_backoff_;
|
||||||
|
BackoffPolicy policy_;
|
||||||
|
size_t maximum_retries_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // cppkafka
|
||||||
|
|
||||||
|
#endif // CPPKAFKA_BACKOFF_PERFORMER_H
|
||||||
@@ -31,15 +31,19 @@
|
|||||||
#define CPPKAFKA_BUFFERED_PRODUCER_H
|
#define CPPKAFKA_BUFFERED_PRODUCER_H
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <queue>
|
#include <deque>
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <unordered_set>
|
#include <unordered_set>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
#include <map>
|
#include <map>
|
||||||
|
#include <mutex>
|
||||||
|
#include <atomic>
|
||||||
|
#include <future>
|
||||||
#include <boost/optional.hpp>
|
#include <boost/optional.hpp>
|
||||||
#include "../producer.h"
|
#include "../producer.h"
|
||||||
#include "../message.h"
|
#include "../detail/callback_invoker.h"
|
||||||
|
#include "../message_internal.h"
|
||||||
|
|
||||||
namespace cppkafka {
|
namespace cppkafka {
|
||||||
|
|
||||||
@@ -50,27 +54,63 @@ namespace cppkafka {
|
|||||||
* to produce them just as you would using the Producer class.
|
* to produce them just as you would using the Producer class.
|
||||||
*
|
*
|
||||||
* When calling either flush or wait_for_acks, the buffered producer will block until all
|
* When calling either flush or wait_for_acks, the buffered producer will block until all
|
||||||
* produced messages (either in a buffer or non buffered way) are acknowledged by the kafka
|
* produced messages (either buffered or sent directly) are acknowledged by the kafka brokers.
|
||||||
* brokers.
|
|
||||||
*
|
*
|
||||||
* When producing messages, this class will handle cases where the producer's queue is full so it\
|
* When producing messages, this class will handle cases where the producer's queue is full so it
|
||||||
* will poll until the production is successful.
|
* will poll until the production is successful.
|
||||||
*
|
*
|
||||||
* This class is not thread safe.
|
* \remark This class is thread safe.
|
||||||
|
*
|
||||||
|
* \remark Releasing buffers: For high-performance applications preferring a zero-copy approach
|
||||||
|
* (using PayloadPolicy::PASSTHROUGH_PAYLOAD - see warning below) it is very important to know when
|
||||||
|
* to safely release owned message buffers. One way is to perform individual cleanup when
|
||||||
|
* ProduceSuccessCallback is called. If the application produces messages in batches or has a
|
||||||
|
* bursty behavior another way is to check when flush operations have fully completed with
|
||||||
|
* get_buffer_size()==0 && get_flushes_in_progress()==0. Note that get_pending_acks()==0
|
||||||
|
* is not always a guarantee as there is very small window when flush() starts where
|
||||||
|
* get_buffer_size()==0 && get_pending_acks()==0 but messages have not yet been sent to the
|
||||||
|
* remote broker. For applications producing messages w/o buffering, get_pending_acks()==0
|
||||||
|
* is sufficient.
|
||||||
|
*
|
||||||
|
* \warning Delivery Report Callback: This class makes internal use of this function and will
|
||||||
|
* overwrite anything the user has supplied as part of the configuration options. Instead user
|
||||||
|
* should call set_produce_success_callback() and set_produce_failure_callback() respectively.
|
||||||
|
*
|
||||||
|
* \warning Payload Policy: For payload-owning BufferTypes such as std::string or std::vector<char>
|
||||||
|
* the default policy is set to Producer::PayloadPolicy::COPY_PAYLOAD. For the specific non-payload owning type
|
||||||
|
* cppkafka::Buffer the policy is Producer::PayloadPolicy::PASSTHROUGH_PAYLOAD. In this case, librdkafka
|
||||||
|
* shall not make any internal copies of the message and it is the application's responsability to free
|
||||||
|
* the messages *after* the ProduceSuccessCallback has reported a successful delivery to avoid memory
|
||||||
|
* corruptions.
|
||||||
*/
|
*/
|
||||||
template <typename BufferType>
|
template <typename BufferType>
|
||||||
class BufferedProducer {
|
class CPPKAFKA_API BufferedProducer {
|
||||||
public:
|
public:
|
||||||
|
enum class FlushMethod { Sync, ///< Empty the buffer and wait for acks from the broker
|
||||||
|
Async }; ///< Empty the buffer and don't wait for acks
|
||||||
/**
|
/**
|
||||||
* Concrete builder
|
* Concrete builder
|
||||||
*/
|
*/
|
||||||
using Builder = ConcreteMessageBuilder<BufferType>;
|
using Builder = ConcreteMessageBuilder<BufferType>;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Callback to indicate a message failed to be produced.
|
* Callback to indicate a message was delivered to the broker
|
||||||
|
*/
|
||||||
|
using ProduceSuccessCallback = std::function<void(const Message&)>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Callback to indicate a message failed to be produced by the broker.
|
||||||
|
*
|
||||||
|
* The returned bool indicates whether the BufferedProducer should try to produce
|
||||||
|
* the message again after each failure.
|
||||||
*/
|
*/
|
||||||
using ProduceFailureCallback = std::function<bool(const Message&)>;
|
using ProduceFailureCallback = std::function<bool(const Message&)>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Callback to indicate a message failed to be flushed
|
||||||
|
*/
|
||||||
|
using FlushFailureCallback = std::function<bool(const MessageBuilder&, Error error)>;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Constructs a buffered producer using the provided configuration
|
* \brief Constructs a buffered producer using the provided configuration
|
||||||
*
|
*
|
||||||
@@ -100,33 +140,182 @@ public:
|
|||||||
void add_message(Builder builder);
|
void add_message(Builder builder);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Produces a message without buffering it
|
* \brief Produces a message asynchronously without buffering it
|
||||||
*
|
*
|
||||||
* The message will still be tracked so that a call to flush or wait_for_acks will actually
|
* The message will still be tracked so that a call to flush or wait_for_acks will actually
|
||||||
* wait for it to be acknowledged.
|
* wait for it to be acknowledged.
|
||||||
*
|
*
|
||||||
* \param builder The builder that contains the message to be produced
|
* \param builder The builder that contains the message to be produced
|
||||||
|
*
|
||||||
|
* \remark This method throws cppkafka::HandleException on failure
|
||||||
*/
|
*/
|
||||||
void produce(const MessageBuilder& builder);
|
void produce(const MessageBuilder& builder);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Produces a message synchronously without buffering it
|
||||||
|
*
|
||||||
|
* In case of failure, the message will be replayed until 'max_number_retries' is reached
|
||||||
|
* or until the user ProduceFailureCallback returns false.
|
||||||
|
*
|
||||||
|
* \param builder The builder that contains the message to be produced
|
||||||
|
*
|
||||||
|
* \remark This method throws cppkafka::HandleException on failure
|
||||||
|
*/
|
||||||
|
void sync_produce(const MessageBuilder& builder);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Produces a message asynchronously without buffering it
|
||||||
|
*
|
||||||
|
* The message will still be tracked so that a call to flush or wait_for_acks will actually
|
||||||
|
* wait for it to be acknowledged.
|
||||||
|
*
|
||||||
|
* \param message The message to be produced
|
||||||
|
*
|
||||||
|
* \remark This method throws cppkafka::HandleException on failure
|
||||||
|
*/
|
||||||
|
void produce(const Message& message);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Flushes all buffered messages and returns immediately.
|
||||||
|
*
|
||||||
|
* Similar to flush, it will send all messages but will not wait for acks to complete.
|
||||||
|
*/
|
||||||
|
void async_flush();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Flushes the buffered messages.
|
* \brief Flushes the buffered messages.
|
||||||
*
|
*
|
||||||
* This will send all messages and keep waiting until all of them are acknowledged (this is
|
* This will send all messages and keep waiting until all of them are acknowledged (this is
|
||||||
* done by calling wait_for_acks).
|
* done by calling wait_for_acks).
|
||||||
|
*
|
||||||
|
* \param preserve_order If set to True, each message in the queue will be flushed only when the previous
|
||||||
|
* message ack is received. This may result in performance degradation as messages
|
||||||
|
* are sent one at a time. This calls sync_produce() on each message in the buffer.
|
||||||
|
* If set to False, all messages are flushed in one batch before waiting for acks,
|
||||||
|
* however message reordering may occur if librdkafka setting 'messages.sent.max.retries > 0'.
|
||||||
|
*
|
||||||
|
* \remark Although it is possible to call flush from multiple threads concurrently, better
|
||||||
|
* performance is achieved when called from the same thread or when serialized
|
||||||
|
* with respect to other threads.
|
||||||
*/
|
*/
|
||||||
void flush();
|
void flush(bool preserve_order = false);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Flushes the buffered messages and waits up to 'timeout'
|
||||||
|
*
|
||||||
|
* \param timeout The maximum time to wait until all acks are received
|
||||||
|
*
|
||||||
|
* \param preserve_order True to preserve message ordering, False otherwise. See flush above for more details.
|
||||||
|
*
|
||||||
|
* \return True if the operation completes and all acks have been received.
|
||||||
|
*/
|
||||||
|
bool flush(std::chrono::milliseconds timeout, bool preserve_order = false);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Waits for produced message's acknowledgements from the brokers
|
* Waits for produced message's acknowledgements from the brokers
|
||||||
*/
|
*/
|
||||||
void wait_for_acks();
|
void wait_for_acks();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Waits for produced message's acknowledgements from the brokers up to 'timeout'.
|
||||||
|
*
|
||||||
|
* \return True if the operation completes and all acks have been received.
|
||||||
|
*/
|
||||||
|
bool wait_for_acks(std::chrono::milliseconds timeout);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Clears any buffered messages
|
* Clears any buffered messages
|
||||||
*/
|
*/
|
||||||
void clear();
|
void clear();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Get the number of messages in the buffer
|
||||||
|
*
|
||||||
|
* \return The number of messages
|
||||||
|
*/
|
||||||
|
size_t get_buffer_size() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Sets the maximum amount of messages to be enqueued in the buffer.
|
||||||
|
*
|
||||||
|
* After 'max_buffer_size' is reached, flush() will be called automatically.
|
||||||
|
*
|
||||||
|
* \param size The max size of the internal buffer. Allowed values are:
|
||||||
|
* -1 : Unlimited buffer size. Must be flushed manually (default value)
|
||||||
|
* 0 : Don't buffer anything. add_message() behaves like produce()
|
||||||
|
* > 0 : Max number of messages before flush() is called.
|
||||||
|
*
|
||||||
|
* \remark add_message() will block when 'max_buffer_size' is reached due to flush()
|
||||||
|
*/
|
||||||
|
void set_max_buffer_size(ssize_t max_buffer_size);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Return the maximum allowed buffer size.
|
||||||
|
*
|
||||||
|
* \return The max buffer size. A value of -1 indicates an unbounded buffer.
|
||||||
|
*/
|
||||||
|
ssize_t get_max_buffer_size() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Sets the method used to flush the internal buffer when 'max_buffer_size' is reached.
|
||||||
|
* Default is 'Sync'
|
||||||
|
*
|
||||||
|
* \param method The method
|
||||||
|
*/
|
||||||
|
void set_flush_method(FlushMethod method);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Gets the method used to flush the internal buffer.
|
||||||
|
*
|
||||||
|
* \return The method
|
||||||
|
*/
|
||||||
|
FlushMethod get_flush_method() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Get the number of messages not yet acked by the broker
|
||||||
|
*
|
||||||
|
* \return The number of messages
|
||||||
|
*/
|
||||||
|
size_t get_pending_acks() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Get the total number of messages successfully produced since the beginning
|
||||||
|
*
|
||||||
|
* \return The number of messages
|
||||||
|
*/
|
||||||
|
size_t get_total_messages_produced() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Get the total number of messages dropped since the beginning
|
||||||
|
*
|
||||||
|
* \return The number of messages
|
||||||
|
*/
|
||||||
|
size_t get_total_messages_dropped() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Get the total outstanding flush operations in progress
|
||||||
|
*
|
||||||
|
* Since flush can be called from multiple threads concurrently, this counter indicates
|
||||||
|
* how many operations are curretnly in progress.
|
||||||
|
*
|
||||||
|
* \return The number of outstanding flush operations.
|
||||||
|
*/
|
||||||
|
size_t get_flushes_in_progress() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Sets the maximum number of retries per message until giving up
|
||||||
|
*
|
||||||
|
* Default is 5
|
||||||
|
*/
|
||||||
|
void set_max_number_retries(size_t max_number_retries);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Gets the max number of retries
|
||||||
|
*
|
||||||
|
* \return The number of retries
|
||||||
|
*/
|
||||||
|
size_t get_max_number_retries() const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the Producer object
|
* Gets the Producer object
|
||||||
*/
|
*/
|
||||||
@@ -150,59 +339,252 @@ public:
|
|||||||
* false. Note that if the callback return false, then the message will be discarded.
|
* false. Note that if the callback return false, then the message will be discarded.
|
||||||
*
|
*
|
||||||
* \param callback The callback to be set
|
* \param callback The callback to be set
|
||||||
|
*
|
||||||
|
* \remark It is *highly* recommended to set this callback as your message may be produced
|
||||||
|
* indefinitely if there's a remote error.
|
||||||
|
*
|
||||||
|
* \warning Do not call any method on the BufferedProducer while inside this callback.
|
||||||
*/
|
*/
|
||||||
void set_produce_failure_callback(ProduceFailureCallback callback);
|
void set_produce_failure_callback(ProduceFailureCallback callback);
|
||||||
private:
|
|
||||||
using QueueType = std::queue<Builder>;
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Sets the successful delivery callback
|
||||||
|
*
|
||||||
|
* The user can use this function to cleanup any application-owned message buffers.
|
||||||
|
*
|
||||||
|
* \param callback The callback to be set
|
||||||
|
*/
|
||||||
|
void set_produce_success_callback(ProduceSuccessCallback callback);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Sets the local message produce failure callback
|
||||||
|
*
|
||||||
|
* This callback will be called when local message production fails during a flush() operation.
|
||||||
|
* Failure errors are typically payload too large, unknown topic or unknown partition.
|
||||||
|
* Note that if the callback returns false, the message will be dropped from the buffer,
|
||||||
|
* otherwise it will be re-enqueued for later retry.
|
||||||
|
*
|
||||||
|
* \param callback
|
||||||
|
*
|
||||||
|
* \warning Do not call any method on the BufferedProducer while inside this callback
|
||||||
|
*/
|
||||||
|
void set_flush_failure_callback(FlushFailureCallback callback);
|
||||||
|
|
||||||
|
struct TestParameters {
|
||||||
|
bool force_delivery_error_;
|
||||||
|
bool force_produce_error_;
|
||||||
|
};
|
||||||
|
protected:
|
||||||
|
//For testing purposes only
|
||||||
|
#ifdef KAFKA_TEST_INSTANCE
|
||||||
|
void set_test_parameters(TestParameters *test_params) {
|
||||||
|
test_params_ = test_params;
|
||||||
|
}
|
||||||
|
TestParameters* get_test_parameters() {
|
||||||
|
return test_params_;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
TestParameters* get_test_parameters() {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
private:
|
||||||
|
using QueueType = std::deque<Builder>;
|
||||||
|
enum class MessagePriority { Low, High };
|
||||||
|
enum class SenderType { Sync, Async };
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
struct CounterGuard{
|
||||||
|
CounterGuard(std::atomic<T>& counter) : counter_(counter) { ++counter_; }
|
||||||
|
~CounterGuard() { --counter_; }
|
||||||
|
std::atomic<T>& counter_;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct Tracker : public Internal {
|
||||||
|
Tracker(SenderType sender, size_t num_retries)
|
||||||
|
: sender_(sender), num_retries_(num_retries)
|
||||||
|
{}
|
||||||
|
std::future<bool> get_new_future() {
|
||||||
|
should_retry_ = std::promise<bool>(); //reset shared data
|
||||||
|
return should_retry_.get_future(); //issue new future
|
||||||
|
}
|
||||||
|
SenderType sender_;
|
||||||
|
std::promise<bool> should_retry_;
|
||||||
|
size_t num_retries_;
|
||||||
|
};
|
||||||
|
using TrackerPtr = std::shared_ptr<Tracker>;
|
||||||
|
|
||||||
|
// Returns existing tracker or creates new one
|
||||||
template <typename BuilderType>
|
template <typename BuilderType>
|
||||||
void do_add_message(BuilderType&& builder);
|
TrackerPtr add_tracker(SenderType sender, BuilderType& builder) {
|
||||||
void produce_message(const MessageBuilder& message);
|
if (has_internal_data_) {
|
||||||
|
if (!builder.internal()) {
|
||||||
|
// Add message tracker only if it hasn't been added before
|
||||||
|
builder.internal(std::make_shared<Tracker>(sender, max_number_retries_));
|
||||||
|
}
|
||||||
|
return std::static_pointer_cast<Tracker>(builder.internal());
|
||||||
|
}
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
template <typename BuilderType>
|
||||||
|
void do_add_message(BuilderType&& builder, MessagePriority priority, bool do_flush);
|
||||||
|
template <typename BuilderType>
|
||||||
|
void produce_message(BuilderType&& builder);
|
||||||
Configuration prepare_configuration(Configuration config);
|
Configuration prepare_configuration(Configuration config);
|
||||||
void on_delivery_report(const Message& message);
|
void on_delivery_report(const Message& message);
|
||||||
|
template <typename BuilderType>
|
||||||
|
void async_produce(BuilderType&& message, bool throw_on_error);
|
||||||
|
|
||||||
|
// Members
|
||||||
Producer producer_;
|
Producer producer_;
|
||||||
QueueType messages_;
|
QueueType messages_;
|
||||||
|
mutable std::mutex mutex_;
|
||||||
|
ProduceSuccessCallback produce_success_callback_;
|
||||||
ProduceFailureCallback produce_failure_callback_;
|
ProduceFailureCallback produce_failure_callback_;
|
||||||
size_t expected_acks_{0};
|
FlushFailureCallback flush_failure_callback_;
|
||||||
size_t messages_acked_{0};
|
ssize_t max_buffer_size_{-1};
|
||||||
|
FlushMethod flush_method_{FlushMethod::Sync};
|
||||||
|
std::atomic<size_t> pending_acks_{0};
|
||||||
|
std::atomic<size_t> flushes_in_progress_{0};
|
||||||
|
std::atomic<size_t> total_messages_produced_{0};
|
||||||
|
std::atomic<size_t> total_messages_dropped_{0};
|
||||||
|
int max_number_retries_{0};
|
||||||
|
bool has_internal_data_{false};
|
||||||
|
#ifdef KAFKA_TEST_INSTANCE
|
||||||
|
TestParameters* test_params_;
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template <typename BufferType>
|
||||||
|
Producer::PayloadPolicy get_default_payload_policy() {
|
||||||
|
return Producer::PayloadPolicy::COPY_PAYLOAD;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <> inline
|
||||||
|
Producer::PayloadPolicy get_default_payload_policy<Buffer>() {
|
||||||
|
return Producer::PayloadPolicy::PASSTHROUGH_PAYLOAD;
|
||||||
|
}
|
||||||
|
|
||||||
template <typename BufferType>
|
template <typename BufferType>
|
||||||
BufferedProducer<BufferType>::BufferedProducer(Configuration config)
|
BufferedProducer<BufferType>::BufferedProducer(Configuration config)
|
||||||
: producer_(prepare_configuration(std::move(config))) {
|
: producer_(prepare_configuration(std::move(config))) {
|
||||||
|
producer_.set_payload_policy(get_default_payload_policy<BufferType>());
|
||||||
|
#ifdef KAFKA_TEST_INSTANCE
|
||||||
|
test_params_ = nullptr;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename BufferType>
|
template <typename BufferType>
|
||||||
void BufferedProducer<BufferType>::add_message(const MessageBuilder& builder) {
|
void BufferedProducer<BufferType>::add_message(const MessageBuilder& builder) {
|
||||||
do_add_message(builder);
|
add_message(Builder(builder)); //make ConcreteBuilder
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename BufferType>
|
template <typename BufferType>
|
||||||
void BufferedProducer<BufferType>::add_message(Builder builder) {
|
void BufferedProducer<BufferType>::add_message(Builder builder) {
|
||||||
do_add_message(move(builder));
|
add_tracker(SenderType::Async, builder);
|
||||||
|
do_add_message(move(builder), MessagePriority::Low, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename BufferType>
|
template <typename BufferType>
|
||||||
void BufferedProducer<BufferType>::produce(const MessageBuilder& builder) {
|
void BufferedProducer<BufferType>::produce(const MessageBuilder& builder) {
|
||||||
expected_acks_++;
|
if (has_internal_data_) {
|
||||||
produce_message(builder);
|
MessageBuilder builder_clone(builder.clone());
|
||||||
|
add_tracker(SenderType::Async, builder_clone);
|
||||||
|
async_produce(builder_clone, true);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
async_produce(builder, true);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename BufferType>
|
template <typename BufferType>
|
||||||
void BufferedProducer<BufferType>::flush() {
|
void BufferedProducer<BufferType>::sync_produce(const MessageBuilder& builder) {
|
||||||
while (!messages_.empty()) {
|
if (has_internal_data_) {
|
||||||
produce_message(messages_.front());
|
MessageBuilder builder_clone(builder.clone());
|
||||||
messages_.pop();
|
TrackerPtr tracker = add_tracker(SenderType::Sync, builder_clone);
|
||||||
|
// produce until we succeed or we reach max retry limit
|
||||||
|
std::future<bool> should_retry;
|
||||||
|
do {
|
||||||
|
should_retry = tracker->get_new_future();
|
||||||
|
produce_message(builder_clone);
|
||||||
|
wait_for_acks();
|
||||||
|
}
|
||||||
|
while (should_retry.get());
|
||||||
}
|
}
|
||||||
|
else {
|
||||||
|
// produce once
|
||||||
|
produce_message(builder);
|
||||||
|
wait_for_acks();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
wait_for_acks();
|
template <typename BufferType>
|
||||||
|
void BufferedProducer<BufferType>::produce(const Message& message) {
|
||||||
|
async_produce(MessageBuilder(message), true);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename BufferType>
|
||||||
|
void BufferedProducer<BufferType>::async_flush() {
|
||||||
|
CounterGuard<size_t> counter_guard(flushes_in_progress_);
|
||||||
|
QueueType flush_queue; // flush from temporary queue
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lock(mutex_);
|
||||||
|
std::swap(messages_, flush_queue);
|
||||||
|
}
|
||||||
|
while (!flush_queue.empty()) {
|
||||||
|
async_produce(std::move(flush_queue.front()), false);
|
||||||
|
flush_queue.pop_front();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename BufferType>
|
||||||
|
void BufferedProducer<BufferType>::flush(bool preserve_order) {
|
||||||
|
if (preserve_order) {
|
||||||
|
CounterGuard<size_t> counter_guard(flushes_in_progress_);
|
||||||
|
QueueType flush_queue; // flush from temporary queue
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lock(mutex_);
|
||||||
|
std::swap(messages_, flush_queue);
|
||||||
|
}
|
||||||
|
while (!flush_queue.empty()) {
|
||||||
|
sync_produce(flush_queue.front());
|
||||||
|
flush_queue.pop_front();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
async_flush();
|
||||||
|
wait_for_acks();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename BufferType>
|
||||||
|
bool BufferedProducer<BufferType>::flush(std::chrono::milliseconds timeout,
|
||||||
|
bool preserve_order) {
|
||||||
|
if (preserve_order) {
|
||||||
|
CounterGuard<size_t> counter_guard(flushes_in_progress_);
|
||||||
|
QueueType flush_queue; // flush from temporary queue
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lock(mutex_);
|
||||||
|
std::swap(messages_, flush_queue);
|
||||||
|
}
|
||||||
|
auto start_time = std::chrono::high_resolution_clock::now();
|
||||||
|
while (!flush_queue.empty() &&
|
||||||
|
(std::chrono::duration_cast<std::chrono::milliseconds>
|
||||||
|
(std::chrono::high_resolution_clock::now() - start_time) < timeout)) {
|
||||||
|
sync_produce(flush_queue.front());
|
||||||
|
flush_queue.pop_front();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
async_flush();
|
||||||
|
return wait_for_acks(timeout);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename BufferType>
|
template <typename BufferType>
|
||||||
void BufferedProducer<BufferType>::wait_for_acks() {
|
void BufferedProducer<BufferType>::wait_for_acks() {
|
||||||
while (messages_acked_ < expected_acks_) {
|
while (pending_acks_ > 0) {
|
||||||
try {
|
try {
|
||||||
producer_.flush();
|
producer_.flush();
|
||||||
}
|
}
|
||||||
@@ -216,21 +598,91 @@ void BufferedProducer<BufferType>::wait_for_acks() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
expected_acks_ = 0;
|
}
|
||||||
messages_acked_ = 0;
|
|
||||||
|
template <typename BufferType>
|
||||||
|
bool BufferedProducer<BufferType>::wait_for_acks(std::chrono::milliseconds timeout) {
|
||||||
|
auto remaining = timeout;
|
||||||
|
auto start_time = std::chrono::high_resolution_clock::now();
|
||||||
|
while ((pending_acks_ > 0) && (remaining.count() > 0)) {
|
||||||
|
try {
|
||||||
|
producer_.flush(remaining);
|
||||||
|
}
|
||||||
|
catch (const HandleException& ex) {
|
||||||
|
// If we just hit the timeout, keep going, otherwise re-throw
|
||||||
|
if (ex.get_error() == RD_KAFKA_RESP_ERR__TIMED_OUT) {
|
||||||
|
// There is no time remaining
|
||||||
|
return (pending_acks_ == 0);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// calculate remaining time
|
||||||
|
remaining = timeout - std::chrono::duration_cast<std::chrono::milliseconds>
|
||||||
|
(std::chrono::high_resolution_clock::now() - start_time);
|
||||||
|
}
|
||||||
|
return (pending_acks_ == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename BufferType>
|
template <typename BufferType>
|
||||||
void BufferedProducer<BufferType>::clear() {
|
void BufferedProducer<BufferType>::clear() {
|
||||||
|
std::lock_guard<std::mutex> lock(mutex_);
|
||||||
QueueType tmp;
|
QueueType tmp;
|
||||||
std::swap(tmp, messages_);
|
std::swap(tmp, messages_);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename BufferType>
|
||||||
|
size_t BufferedProducer<BufferType>::get_buffer_size() const {
|
||||||
|
return messages_.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename BufferType>
|
||||||
|
void BufferedProducer<BufferType>::set_max_buffer_size(ssize_t max_buffer_size) {
|
||||||
|
if (max_buffer_size < -1) {
|
||||||
|
throw Exception("Invalid buffer size.");
|
||||||
|
}
|
||||||
|
max_buffer_size_ = max_buffer_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename BufferType>
|
||||||
|
ssize_t BufferedProducer<BufferType>::get_max_buffer_size() const {
|
||||||
|
return max_buffer_size_;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename BufferType>
|
||||||
|
void BufferedProducer<BufferType>::set_flush_method(FlushMethod method) {
|
||||||
|
flush_method_ = method;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename BufferType>
|
||||||
|
typename BufferedProducer<BufferType>::FlushMethod
|
||||||
|
BufferedProducer<BufferType>::get_flush_method() const {
|
||||||
|
return flush_method_;
|
||||||
|
}
|
||||||
|
|
||||||
template <typename BufferType>
|
template <typename BufferType>
|
||||||
template <typename BuilderType>
|
template <typename BuilderType>
|
||||||
void BufferedProducer<BufferType>::do_add_message(BuilderType&& builder) {
|
void BufferedProducer<BufferType>::do_add_message(BuilderType&& builder,
|
||||||
expected_acks_++;
|
MessagePriority priority,
|
||||||
messages_.push(std::move(builder));
|
bool do_flush) {
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lock(mutex_);
|
||||||
|
if (priority == MessagePriority::High) {
|
||||||
|
messages_.emplace_front(std::forward<BuilderType>(builder));
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
messages_.emplace_back(std::forward<BuilderType>(builder));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (do_flush && (max_buffer_size_ >= 0) && (max_buffer_size_ <= (ssize_t)messages_.size())) {
|
||||||
|
if (flush_method_ == FlushMethod::Sync) {
|
||||||
|
flush();
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
async_flush();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename BufferType>
|
template <typename BufferType>
|
||||||
@@ -243,6 +695,39 @@ const Producer& BufferedProducer<BufferType>::get_producer() const {
|
|||||||
return producer_;
|
return producer_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename BufferType>
|
||||||
|
size_t BufferedProducer<BufferType>::get_pending_acks() const {
|
||||||
|
return pending_acks_;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename BufferType>
|
||||||
|
size_t BufferedProducer<BufferType>::get_total_messages_produced() const {
|
||||||
|
return total_messages_produced_;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename BufferType>
|
||||||
|
size_t BufferedProducer<BufferType>::get_total_messages_dropped() const {
|
||||||
|
return total_messages_dropped_;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename BufferType>
|
||||||
|
size_t BufferedProducer<BufferType>::get_flushes_in_progress() const {
|
||||||
|
return flushes_in_progress_;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename BufferType>
|
||||||
|
void BufferedProducer<BufferType>::set_max_number_retries(size_t max_number_retries) {
|
||||||
|
if (!has_internal_data_ && (max_number_retries > 0)) {
|
||||||
|
has_internal_data_ = true; //enable once
|
||||||
|
}
|
||||||
|
max_number_retries_ = max_number_retries;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename BufferType>
|
||||||
|
size_t BufferedProducer<BufferType>::get_max_number_retries() const {
|
||||||
|
return max_number_retries_;
|
||||||
|
}
|
||||||
|
|
||||||
template <typename BufferType>
|
template <typename BufferType>
|
||||||
typename BufferedProducer<BufferType>::Builder
|
typename BufferedProducer<BufferType>::Builder
|
||||||
BufferedProducer<BufferType>::make_builder(std::string topic) {
|
BufferedProducer<BufferType>::make_builder(std::string topic) {
|
||||||
@@ -255,16 +740,30 @@ void BufferedProducer<BufferType>::set_produce_failure_callback(ProduceFailureCa
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <typename BufferType>
|
template <typename BufferType>
|
||||||
void BufferedProducer<BufferType>::produce_message(const MessageBuilder& builder) {
|
void BufferedProducer<BufferType>::set_produce_success_callback(ProduceSuccessCallback callback) {
|
||||||
bool sent = false;
|
produce_success_callback_ = std::move(callback);
|
||||||
while (!sent) {
|
}
|
||||||
|
|
||||||
|
template <typename BufferType>
|
||||||
|
void BufferedProducer<BufferType>::set_flush_failure_callback(FlushFailureCallback callback) {
|
||||||
|
flush_failure_callback_ = std::move(callback);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename BufferType>
|
||||||
|
template <typename BuilderType>
|
||||||
|
void BufferedProducer<BufferType>::produce_message(BuilderType&& builder) {
|
||||||
|
using builder_type = typename std::decay<BuilderType>::type;
|
||||||
|
while (true) {
|
||||||
try {
|
try {
|
||||||
|
MessageInternalGuard<builder_type> internal_guard(const_cast<builder_type&>(builder));
|
||||||
producer_.produce(builder);
|
producer_.produce(builder);
|
||||||
sent = true;
|
internal_guard.release();
|
||||||
|
// Sent successfully
|
||||||
|
++pending_acks_;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
catch (const HandleException& ex) {
|
catch (const HandleException& ex) {
|
||||||
const Error error = ex.get_error();
|
if (ex.get_error() == RD_KAFKA_RESP_ERR__QUEUE_FULL) {
|
||||||
if (error == RD_KAFKA_RESP_ERR__QUEUE_FULL) {
|
|
||||||
// If the output queue is full, then just poll
|
// If the output queue is full, then just poll
|
||||||
producer_.poll();
|
producer_.poll();
|
||||||
}
|
}
|
||||||
@@ -275,6 +774,34 @@ void BufferedProducer<BufferType>::produce_message(const MessageBuilder& builder
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename BufferType>
|
||||||
|
template <typename BuilderType>
|
||||||
|
void BufferedProducer<BufferType>::async_produce(BuilderType&& builder, bool throw_on_error) {
|
||||||
|
try {
|
||||||
|
TestParameters* test_params = get_test_parameters();
|
||||||
|
if (test_params && test_params->force_produce_error_) {
|
||||||
|
throw HandleException(Error(RD_KAFKA_RESP_ERR_UNKNOWN));
|
||||||
|
}
|
||||||
|
produce_message(builder);
|
||||||
|
}
|
||||||
|
catch (const HandleException& ex) {
|
||||||
|
// If we have a flush failure callback and it returns true, we retry producing this message later
|
||||||
|
CallbackInvoker<FlushFailureCallback> callback("flush failure", flush_failure_callback_, &producer_);
|
||||||
|
if (!callback || callback(builder, ex.get_error())) {
|
||||||
|
TrackerPtr tracker = std::static_pointer_cast<Tracker>(builder.internal());
|
||||||
|
if (tracker && tracker->num_retries_ > 0) {
|
||||||
|
--tracker->num_retries_;
|
||||||
|
do_add_message(std::forward<BuilderType>(builder), MessagePriority::High, false);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
++total_messages_dropped_;
|
||||||
|
if (throw_on_error) {
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
template <typename BufferType>
|
template <typename BufferType>
|
||||||
Configuration BufferedProducer<BufferType>::prepare_configuration(Configuration config) {
|
Configuration BufferedProducer<BufferType>::prepare_configuration(Configuration config) {
|
||||||
using std::placeholders::_2;
|
using std::placeholders::_2;
|
||||||
@@ -285,26 +812,46 @@ Configuration BufferedProducer<BufferType>::prepare_configuration(Configuration
|
|||||||
|
|
||||||
template <typename BufferType>
|
template <typename BufferType>
|
||||||
void BufferedProducer<BufferType>::on_delivery_report(const Message& message) {
|
void BufferedProducer<BufferType>::on_delivery_report(const Message& message) {
|
||||||
// We should produce this message again if it has an error and we either don't have a
|
//Get tracker data
|
||||||
// produce failure callback or we have one but it returns true
|
TestParameters* test_params = get_test_parameters();
|
||||||
bool should_produce = message.get_error() &&
|
TrackerPtr tracker = has_internal_data_ ?
|
||||||
(!produce_failure_callback_ || produce_failure_callback_(message));
|
std::static_pointer_cast<Tracker>(MessageInternal::load(const_cast<Message&>(message))->get_internal()) : nullptr;
|
||||||
if (should_produce) {
|
bool should_retry = false;
|
||||||
MessageBuilder builder(message.get_topic());
|
if (message.get_error() || (test_params && test_params->force_delivery_error_)) {
|
||||||
const auto& key = message.get_key();
|
// We should produce this message again if we don't have a produce failure callback
|
||||||
const auto& payload = message.get_payload();
|
// or we have one but it returns true
|
||||||
builder.partition(message.get_partition())
|
CallbackInvoker<ProduceFailureCallback> callback("produce failure", produce_failure_callback_, &producer_);
|
||||||
.key(Buffer(key.get_data(), key.get_size()))
|
if (!callback || callback(message)) {
|
||||||
.payload(Buffer(payload.get_data(), payload.get_size()));
|
// Check if we have reached the maximum retry limit
|
||||||
if (message.get_timestamp()) {
|
if (tracker && tracker->num_retries_ > 0) {
|
||||||
builder.timestamp(message.get_timestamp()->get_timestamp());
|
--tracker->num_retries_;
|
||||||
|
if (tracker->sender_ == SenderType::Async) {
|
||||||
|
// Re-enqueue for later retransmission with higher priority (i.e. front of the queue)
|
||||||
|
do_add_message(Builder(message), MessagePriority::High, false);
|
||||||
|
}
|
||||||
|
should_retry = true;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
++total_messages_dropped_;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
++total_messages_dropped_;
|
||||||
}
|
}
|
||||||
produce_message(builder);
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
// If production was successful or the produce failure callback returned false, then
|
else {
|
||||||
// let's consider it to be acked
|
// Successful delivery
|
||||||
messages_acked_++;
|
CallbackInvoker<ProduceSuccessCallback>("delivery success", produce_success_callback_, &producer_)(message);
|
||||||
|
// Increment the total successful transmissions
|
||||||
|
++total_messages_produced_;
|
||||||
|
}
|
||||||
|
// Signal producers
|
||||||
|
if (tracker) {
|
||||||
|
tracker->should_retry_.set_value(should_retry);
|
||||||
|
}
|
||||||
|
// Decrement the expected acks
|
||||||
|
--pending_acks_;
|
||||||
|
assert(pending_acks_ != (size_t)-1); // Prevent underflow
|
||||||
}
|
}
|
||||||
|
|
||||||
} // cppkafka
|
} // cppkafka
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ namespace cppkafka {
|
|||||||
* \brief Events generated by a CompactedTopicProcessor
|
* \brief Events generated by a CompactedTopicProcessor
|
||||||
*/
|
*/
|
||||||
template <typename Key, typename Value>
|
template <typename Key, typename Value>
|
||||||
class CompactedTopicEvent {
|
class CPPKAFKA_API CompactedTopicEvent {
|
||||||
public:
|
public:
|
||||||
/**
|
/**
|
||||||
* \brief Event type enum
|
* \brief Event type enum
|
||||||
|
|||||||
373
include/cppkafka/utils/consumer_dispatcher.h
Normal file
373
include/cppkafka/utils/consumer_dispatcher.h
Normal file
@@ -0,0 +1,373 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2017, Matias Fontanini
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CPPKAFKA_CONSUMER_DISPATCHER_H
|
||||||
|
#define CPPKAFKA_CONSUMER_DISPATCHER_H
|
||||||
|
|
||||||
|
#include <tuple>
|
||||||
|
#include "../consumer.h"
|
||||||
|
#include "backoff_performer.h"
|
||||||
|
|
||||||
|
namespace cppkafka {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Helper to perform pattern matching when consuming messages
|
||||||
|
*
|
||||||
|
* As the way to consume messages requires you to:
|
||||||
|
*
|
||||||
|
* * Poll for a message
|
||||||
|
* * Check if it's not null
|
||||||
|
* * Check if it's an error (optionally handling EOF as a non error)
|
||||||
|
* * Process the message
|
||||||
|
*
|
||||||
|
* This class introduces a pattern matching based approach to consuming messages
|
||||||
|
* so the usual loop is simplified away and you can process messages without
|
||||||
|
* having to check for all those cases.
|
||||||
|
*
|
||||||
|
* When calling BasicConsumerDispatcher::run, a list of callbacks has to be provided.
|
||||||
|
* These will handle each case (message, timeout, error, eof), allowing you to
|
||||||
|
* only provide what you need. The only callback that is required is the message one.
|
||||||
|
* For the rest, the following actions will be performed as defaults:
|
||||||
|
*
|
||||||
|
* * Timeout: ignore
|
||||||
|
* * EOF: ignore
|
||||||
|
* * Error (not an EOF error): throw a ConsumerException exception
|
||||||
|
*
|
||||||
|
* The signature for each callback should be as following (or compatible)
|
||||||
|
*
|
||||||
|
* * Message callback, either:
|
||||||
|
* - void(Message)
|
||||||
|
* - Message(Message). In this case if the message is returned, it will be buffered
|
||||||
|
* while calling the throttle callback until the message is actually processed.
|
||||||
|
* * Timeout: void(BasicConsumerDispatcher::Timeout)
|
||||||
|
* * Error: void(Error)
|
||||||
|
* * EOF: void(BasicConsumerDispatcher::EndOfFile, TopicPartition)
|
||||||
|
*/
|
||||||
|
template <typename ConsumerType>
|
||||||
|
class CPPKAFKA_API BasicConsumerDispatcher {
|
||||||
|
public:
|
||||||
|
/**
|
||||||
|
* Tag to indicate a timeout occurred
|
||||||
|
*/
|
||||||
|
struct Timeout {};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tag to indicate end of file was reached on a partition being consumed
|
||||||
|
*/
|
||||||
|
struct EndOfFile {};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Tag to indicate end of file was reached on a partition being consumed
|
||||||
|
*/
|
||||||
|
struct Throttle {};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tag to indicate there was some event processed (message, timeout, error, etc)
|
||||||
|
*/
|
||||||
|
struct Event {};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructs a consumer dispatcher over the given consumer
|
||||||
|
*
|
||||||
|
* \param consumer The consumer to be used
|
||||||
|
*/
|
||||||
|
BasicConsumerDispatcher(ConsumerType& consumer);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Consumes messages dispatching events to the appropriate callack
|
||||||
|
*
|
||||||
|
* This will loop until BasicConsumerDispatcher::stop is called
|
||||||
|
*
|
||||||
|
* \param args The list of callbacks to be executed
|
||||||
|
*/
|
||||||
|
template <typename... Args>
|
||||||
|
void run(const Args&... args);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Stops consumption
|
||||||
|
*
|
||||||
|
* Note that as this is synchronous, if there's any poll operations currently in
|
||||||
|
* progress, then this will stop after the current call returns
|
||||||
|
*/
|
||||||
|
void stop();
|
||||||
|
private:
|
||||||
|
// Define the types we need for each type of callback
|
||||||
|
using OnMessageArgs = std::tuple<Message>;
|
||||||
|
using OnErrorArgs = std::tuple<Error>;
|
||||||
|
using OnEofArgs = std::tuple<EndOfFile, TopicPartition>;
|
||||||
|
using OnTimeoutArgs = std::tuple<Timeout>;
|
||||||
|
using OnEventArgs = std::tuple<Event>;
|
||||||
|
|
||||||
|
static void handle_error(Error error);
|
||||||
|
static void handle_eof(EndOfFile, const TopicPartition& /*topic_partition*/) { }
|
||||||
|
static void handle_timeout(Timeout) { }
|
||||||
|
static void handle_event(Event) { }
|
||||||
|
|
||||||
|
template <typename Functor>
|
||||||
|
void handle_throttle(Throttle, const Functor& callback, Message msg) {
|
||||||
|
BackoffPerformer{}.perform([&]() {
|
||||||
|
if (!running_) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
msg = callback(std::move(msg));
|
||||||
|
if (msg) {
|
||||||
|
// Poll so we send heartbeats to the brokers
|
||||||
|
consumer_.poll();
|
||||||
|
}
|
||||||
|
return !msg;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Simple RAII wrapper for pausing/resuming
|
||||||
|
template <typename C>
|
||||||
|
class Pauser {
|
||||||
|
public:
|
||||||
|
Pauser(C& consumer, const TopicPartitionList& topic_partitions)
|
||||||
|
: consumer_(consumer), topic_partitions_(topic_partitions) {
|
||||||
|
consumer_.pause_partitions(topic_partitions_);
|
||||||
|
}
|
||||||
|
|
||||||
|
~Pauser() {
|
||||||
|
consumer_.resume_partitions(topic_partitions_);
|
||||||
|
}
|
||||||
|
|
||||||
|
Pauser(const Pauser&) = delete;
|
||||||
|
Pauser& operator=(const Pauser&) = delete;
|
||||||
|
private:
|
||||||
|
C& consumer_;
|
||||||
|
TopicPartitionList topic_partitions_;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Traits and template helpers
|
||||||
|
|
||||||
|
// Finds whether type T accepts arguments of types Args...
|
||||||
|
template <typename T, typename... Args>
|
||||||
|
struct takes_arguments {
|
||||||
|
using yes = double;
|
||||||
|
using no = bool;
|
||||||
|
|
||||||
|
template <typename Functor>
|
||||||
|
static yes test(decltype(std::declval<Functor&>()(std::declval<Args>()...))*);
|
||||||
|
template <typename Functor>
|
||||||
|
static no test(...);
|
||||||
|
|
||||||
|
static constexpr bool value = sizeof(test<T>(nullptr)) == sizeof(yes);
|
||||||
|
};
|
||||||
|
|
||||||
|
// Specialization for tuple
|
||||||
|
template <typename T, typename... Args>
|
||||||
|
struct takes_arguments<T, std::tuple<Args...>> : takes_arguments<T, Args...> {
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
struct identity {
|
||||||
|
using type = T;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Placeholder to indicate a type wasn't found
|
||||||
|
struct type_not_found {
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
// find_type: given a tuple of types and a list of functors, finds the functor
|
||||||
|
// type that accepts the given tuple types as parameters
|
||||||
|
template <typename Tuple, typename Functor, typename... Functors>
|
||||||
|
struct find_type_helper {
|
||||||
|
using type = typename std::conditional<takes_arguments<Functor, Tuple>::value,
|
||||||
|
identity<Functor>,
|
||||||
|
find_type_helper<Tuple, Functors...>
|
||||||
|
>::type::type;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename Tuple>
|
||||||
|
struct find_type_helper<Tuple, type_not_found> {
|
||||||
|
using type = type_not_found;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename Tuple, typename... Functors>
|
||||||
|
struct find_type {
|
||||||
|
using type = typename find_type_helper<Tuple, Functors..., type_not_found>::type;
|
||||||
|
};
|
||||||
|
|
||||||
|
// find_functor: given a Functor and a template parameter pack of functors, finds
|
||||||
|
// the one that matches the given type
|
||||||
|
template <typename Functor>
|
||||||
|
struct find_functor_helper {
|
||||||
|
template <typename... Functors>
|
||||||
|
static const Functor& find(const Functor& arg, Functors&&...) {
|
||||||
|
return arg;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Head, typename... Functors>
|
||||||
|
static typename std::enable_if<!std::is_same<Head, Functor>::value, const Functor&>::type
|
||||||
|
find(const Head&, const Functors&... functors) {
|
||||||
|
return find(functors...);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename Functor, typename... Args>
|
||||||
|
const Functor& find_functor(const Args&... args) {
|
||||||
|
return find_functor_helper<Functor>::find(args...);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finds the first functor that accepts the parameters in a tuple and returns it. If no
|
||||||
|
// such functor is found, a static assertion will occur
|
||||||
|
template <typename Tuple, typename... Functors>
|
||||||
|
const typename find_type<Tuple, Functors...>::type&
|
||||||
|
find_matching_functor(const Functors&... functors) {
|
||||||
|
using type = typename find_type<Tuple, Functors...>::type;
|
||||||
|
static_assert(!std::is_same<type_not_found, type>::value, "Valid functor not found");
|
||||||
|
return find_functor<type>(functors...);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that a given functor matches at least one of the expected signatures
|
||||||
|
template <typename Functor>
|
||||||
|
void check_callback_matches(const Functor& functor) {
|
||||||
|
static_assert(
|
||||||
|
!std::is_same<type_not_found,
|
||||||
|
typename find_type<OnMessageArgs, Functor>::type>::value ||
|
||||||
|
!std::is_same<type_not_found,
|
||||||
|
typename find_type<OnEofArgs, Functor>::type>::value ||
|
||||||
|
!std::is_same<type_not_found,
|
||||||
|
typename find_type<OnTimeoutArgs, Functor>::type>::value ||
|
||||||
|
!std::is_same<type_not_found,
|
||||||
|
typename find_type<OnErrorArgs, Functor>::type>::value ||
|
||||||
|
!std::is_same<type_not_found,
|
||||||
|
typename find_type<OnEventArgs, Functor>::type>::value,
|
||||||
|
"Callback doesn't match any of the expected signatures"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Base case for recursion
|
||||||
|
void check_callbacks_match() {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that all given functors match at least one of the expected signatures
|
||||||
|
template <typename Functor, typename... Functors>
|
||||||
|
void check_callbacks_match(const Functor& functor, const Functors&... functors) {
|
||||||
|
check_callback_matches(functor);
|
||||||
|
check_callbacks_match(functors...);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Functor, typename... Functors>
|
||||||
|
auto process_message(const Functor& callback, Message msg, const Functors&...)
|
||||||
|
-> typename std::enable_if<std::is_same<void, decltype(callback(std::move(msg)))>::value,
|
||||||
|
void>::type {
|
||||||
|
callback(std::move(msg));
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Functor, typename... Functors>
|
||||||
|
auto process_message(const Functor& callback, Message msg, const Functors&... functors)
|
||||||
|
-> typename std::enable_if<std::is_same<Message, decltype(callback(std::move(msg)))>::value,
|
||||||
|
void>::type {
|
||||||
|
const auto throttle_ptr = &BasicConsumerDispatcher::handle_throttle<Functor>;
|
||||||
|
const auto default_throttler = std::bind(throttle_ptr, this, std::placeholders::_1,
|
||||||
|
std::placeholders::_2, std::placeholders::_3);
|
||||||
|
|
||||||
|
using OnThrottleArgs = std::tuple<Throttle, const Functor&, Message>;
|
||||||
|
const auto on_throttle = find_matching_functor<OnThrottleArgs>(functors...,
|
||||||
|
default_throttler);
|
||||||
|
|
||||||
|
msg = callback(std::move(msg));
|
||||||
|
// The callback rejected the message, start throttling
|
||||||
|
if (msg) {
|
||||||
|
// Pause consumption. When the pauser goes off scope, it will resume it
|
||||||
|
Pauser<ConsumerType> pauser(consumer_, consumer_.get_assignment());
|
||||||
|
|
||||||
|
// Handle throttling on this message
|
||||||
|
on_throttle(Throttle{}, callback, std::move(msg));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ConsumerType& consumer_;
|
||||||
|
bool running_;
|
||||||
|
};
|
||||||
|
|
||||||
|
using ConsumerDispatcher = BasicConsumerDispatcher<Consumer>;
|
||||||
|
|
||||||
|
template <typename ConsumerType>
|
||||||
|
BasicConsumerDispatcher<ConsumerType>::BasicConsumerDispatcher(ConsumerType& consumer)
|
||||||
|
: consumer_(consumer) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename ConsumerType>
|
||||||
|
void BasicConsumerDispatcher<ConsumerType>::stop() {
|
||||||
|
running_ = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename ConsumerType>
|
||||||
|
void BasicConsumerDispatcher<ConsumerType>::handle_error(Error error) {
|
||||||
|
throw ConsumerException(error);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename ConsumerType>
|
||||||
|
template <typename... Args>
|
||||||
|
void BasicConsumerDispatcher<ConsumerType>::run(const Args&... args) {
|
||||||
|
using self = BasicConsumerDispatcher<ConsumerType>;
|
||||||
|
|
||||||
|
// Make sure all callbacks match one of the signatures. Otherwise users could provide
|
||||||
|
// bogus callbacks that would never be executed
|
||||||
|
check_callbacks_match(args...);
|
||||||
|
|
||||||
|
// This one is required
|
||||||
|
const auto on_message = find_matching_functor<OnMessageArgs>(args...);
|
||||||
|
|
||||||
|
// For the rest, append our own implementation at the end as a fallback
|
||||||
|
const auto on_error = find_matching_functor<OnErrorArgs>(args..., &self::handle_error);
|
||||||
|
const auto on_eof = find_matching_functor<OnEofArgs>(args..., &self::handle_eof);
|
||||||
|
const auto on_timeout = find_matching_functor<OnTimeoutArgs>(args..., &self::handle_timeout);
|
||||||
|
const auto on_event = find_matching_functor<OnEventArgs>(args..., &self::handle_event);
|
||||||
|
|
||||||
|
running_ = true;
|
||||||
|
while (running_) {
|
||||||
|
Message msg = consumer_.poll();
|
||||||
|
if (!msg) {
|
||||||
|
on_timeout(Timeout{});
|
||||||
|
}
|
||||||
|
else if (msg.get_error()) {
|
||||||
|
if (msg.is_eof()) {
|
||||||
|
on_eof(EndOfFile{}, { msg.get_topic(), msg.get_partition(), msg.get_offset() });
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
on_error(msg.get_error());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
process_message(on_message, std::move(msg), args...);
|
||||||
|
}
|
||||||
|
on_event(Event{});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // cppkafka
|
||||||
|
|
||||||
|
#endif // CPPKAFKA_CONSUMER_DISPATCHER_H
|
||||||
130
include/cppkafka/utils/poll_interface.h
Normal file
130
include/cppkafka/utils/poll_interface.h
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2017, Matias Fontanini
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CPPKAFKA_POLL_INTERFACE_H
|
||||||
|
#define CPPKAFKA_POLL_INTERFACE_H
|
||||||
|
|
||||||
|
#include "../consumer.h"
|
||||||
|
|
||||||
|
namespace cppkafka {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \interface PollInterface
|
||||||
|
*
|
||||||
|
* \brief Interface defining polling methods for the Consumer class
|
||||||
|
*/
|
||||||
|
struct PollInterface {
|
||||||
|
virtual ~PollInterface() = default;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Get the underlying consumer controlled by this strategy
|
||||||
|
*
|
||||||
|
* \return A reference to the consumer instance
|
||||||
|
*/
|
||||||
|
virtual Consumer& get_consumer() = 0;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Sets the timeout for polling functions
|
||||||
|
*
|
||||||
|
* This calls Consumer::set_timeout
|
||||||
|
*
|
||||||
|
* \param timeout The timeout to be set
|
||||||
|
*/
|
||||||
|
virtual void set_timeout(std::chrono::milliseconds timeout) = 0;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Gets the timeout for polling functions
|
||||||
|
*
|
||||||
|
* This calls Consumer::get_timeout
|
||||||
|
*
|
||||||
|
* \return The timeout
|
||||||
|
*/
|
||||||
|
virtual std::chrono::milliseconds get_timeout() = 0;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Polls all assigned partitions for new messages in round-robin fashion
|
||||||
|
*
|
||||||
|
* Each call to poll() will first consume from the global event queue and if there are
|
||||||
|
* no pending events, will attempt to consume from all partitions until a valid message is found.
|
||||||
|
* The timeout used on this call will be the one configured via PollInterface::set_timeout.
|
||||||
|
*
|
||||||
|
* \return A message. The returned message *might* be empty. It's necessary to check
|
||||||
|
* that it's a valid one before using it (see example above).
|
||||||
|
*
|
||||||
|
* \remark You need to call poll() or poll_batch() periodically as a keep alive mechanism,
|
||||||
|
* otherwise the broker will think this consumer is down and will trigger a rebalance
|
||||||
|
* (if using dynamic subscription)
|
||||||
|
*/
|
||||||
|
virtual Message poll() = 0;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Polls for new messages
|
||||||
|
*
|
||||||
|
* Same as the other overload of PollInterface::poll but the provided
|
||||||
|
* timeout will be used instead of the one configured on this Consumer.
|
||||||
|
*
|
||||||
|
* \param timeout The timeout to be used on this call
|
||||||
|
*/
|
||||||
|
virtual Message poll(std::chrono::milliseconds timeout) = 0;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Polls all assigned partitions for a batch of new messages in round-robin fashion
|
||||||
|
*
|
||||||
|
* Each call to poll_batch() will first attempt to consume from the global event queue
|
||||||
|
* and if the maximum batch number has not yet been filled, will attempt to fill it by
|
||||||
|
* reading the remaining messages from each partition.
|
||||||
|
*
|
||||||
|
* \param max_batch_size The maximum amount of messages expected
|
||||||
|
*
|
||||||
|
* \return A list of messages
|
||||||
|
*
|
||||||
|
* \remark You need to call poll() or poll_batch() periodically as a keep alive mechanism,
|
||||||
|
* otherwise the broker will think this consumer is down and will trigger a rebalance
|
||||||
|
* (if using dynamic subscription)
|
||||||
|
*/
|
||||||
|
virtual MessageList poll_batch(size_t max_batch_size) = 0;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Polls all assigned partitions for a batch of new messages in round-robin fashion
|
||||||
|
*
|
||||||
|
* Same as the other overload of PollInterface::poll_batch but the provided
|
||||||
|
* timeout will be used instead of the one configured on this Consumer.
|
||||||
|
*
|
||||||
|
* \param max_batch_size The maximum amount of messages expected
|
||||||
|
*
|
||||||
|
* \param timeout The timeout for this operation
|
||||||
|
*
|
||||||
|
* \return A list of messages
|
||||||
|
*/
|
||||||
|
virtual MessageList poll_batch(size_t max_batch_size, std::chrono::milliseconds timeout) = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
} //cppkafka
|
||||||
|
|
||||||
|
#endif //CPPKAFKA_POLL_INTERFACE_H
|
||||||
150
include/cppkafka/utils/poll_strategy_base.h
Normal file
150
include/cppkafka/utils/poll_strategy_base.h
Normal file
@@ -0,0 +1,150 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2017, Matias Fontanini
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CPPKAFKA_POLL_STRATEGY_BASE_H
|
||||||
|
#define CPPKAFKA_POLL_STRATEGY_BASE_H
|
||||||
|
|
||||||
|
#include <map>
|
||||||
|
#include <boost/any.hpp>
|
||||||
|
#include "../queue.h"
|
||||||
|
#include "../topic_partition_list.h"
|
||||||
|
#include "poll_interface.h"
|
||||||
|
|
||||||
|
namespace cppkafka {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Contains a partition queue and generic metadata which can be used to store
|
||||||
|
* related (user-specific) information.
|
||||||
|
*/
|
||||||
|
struct QueueData {
|
||||||
|
Queue queue;
|
||||||
|
boost::any metadata;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \class PollStrategyBase
|
||||||
|
*
|
||||||
|
* \brief Base implementation of the PollInterface
|
||||||
|
*/
|
||||||
|
class PollStrategyBase : public PollInterface {
|
||||||
|
public:
|
||||||
|
using QueueMap = std::map<TopicPartition, QueueData>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Constructor
|
||||||
|
*
|
||||||
|
* \param consumer A reference to the polled consumer instance
|
||||||
|
*/
|
||||||
|
explicit PollStrategyBase(Consumer& consumer);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Destructor
|
||||||
|
*/
|
||||||
|
~PollStrategyBase();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \sa PollInterface::set_timeout
|
||||||
|
*/
|
||||||
|
void set_timeout(std::chrono::milliseconds timeout) override;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \sa PollInterface::get_timeout
|
||||||
|
*/
|
||||||
|
std::chrono::milliseconds get_timeout() override;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \sa PollInterface::get_consumer
|
||||||
|
*/
|
||||||
|
Consumer& get_consumer() final;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
/**
|
||||||
|
* \brief Get the queues from all assigned partitions
|
||||||
|
*
|
||||||
|
* \return A map of queues indexed by partition
|
||||||
|
*/
|
||||||
|
QueueMap& get_partition_queues();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Get the main consumer queue which services the underlying Consumer object
|
||||||
|
*
|
||||||
|
* \return The consumer queue
|
||||||
|
*/
|
||||||
|
QueueData& get_consumer_queue();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Reset the internal state of the queues.
|
||||||
|
*
|
||||||
|
* Use this function to reset the state of any polling strategy or algorithm.
|
||||||
|
*
|
||||||
|
* \remark This function gets called by on_assignement(), on_revocation() and on_rebalance_error()
|
||||||
|
*/
|
||||||
|
virtual void reset_state();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Function to be called when a new partition assignment takes place
|
||||||
|
*
|
||||||
|
* This method contains a default implementation. It adds all the new queues belonging
|
||||||
|
* to the provided partition list and calls reset_state().
|
||||||
|
*
|
||||||
|
* \param partitions Assigned topic partitions
|
||||||
|
*/
|
||||||
|
virtual void on_assignment(TopicPartitionList& partitions);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Function to be called when an old partition assignment gets revoked
|
||||||
|
*
|
||||||
|
* This method contains a default implementation. It removes all the queues
|
||||||
|
* belonging to the provided partition list and calls reset_state().
|
||||||
|
*
|
||||||
|
* \param partitions Revoked topic partitions
|
||||||
|
*/
|
||||||
|
virtual void on_revocation(const TopicPartitionList& partitions);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Function to be called when a topic rebalance error happens
|
||||||
|
*
|
||||||
|
* This method contains a default implementation. Calls reset_state().
|
||||||
|
*
|
||||||
|
* \param error The rebalance error
|
||||||
|
*/
|
||||||
|
virtual void on_rebalance_error(Error error);
|
||||||
|
|
||||||
|
private:
|
||||||
|
Consumer& consumer_;
|
||||||
|
QueueData consumer_queue_;
|
||||||
|
QueueMap partition_queues_;
|
||||||
|
Consumer::AssignmentCallback assignment_callback_;
|
||||||
|
Consumer::RevocationCallback revocation_callback_;
|
||||||
|
Consumer::RebalanceErrorCallback rebalance_error_callback_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} //cppkafka
|
||||||
|
|
||||||
|
#endif //CPPKAFKA_POLL_STRATEGY_BASE_H
|
||||||
135
include/cppkafka/utils/roundrobin_poll_strategy.h
Normal file
135
include/cppkafka/utils/roundrobin_poll_strategy.h
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2017, Matias Fontanini
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CPPKAFKA_ROUNDROBIN_POLL_STRATEGY_H
|
||||||
|
#define CPPKAFKA_ROUNDROBIN_POLL_STRATEGY_H
|
||||||
|
|
||||||
|
#include <map>
|
||||||
|
#include <string>
|
||||||
|
#include "../exceptions.h"
|
||||||
|
#include "../consumer.h"
|
||||||
|
#include "../queue.h"
|
||||||
|
#include "poll_strategy_base.h"
|
||||||
|
|
||||||
|
namespace cppkafka {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief This adapter changes the default polling strategy of the Consumer into a fair round-robin
|
||||||
|
* polling mechanism.
|
||||||
|
*
|
||||||
|
* The default librdkafka (and cppkafka) poll() and poll_batch() behavior is to consume batches of
|
||||||
|
* messages from each partition in turn. For performance reasons, librdkafka pre-fetches batches
|
||||||
|
* of messages from the kafka broker (one batch from each partition), and stores them locally in
|
||||||
|
* partition queues. Since all the internal partition queues are forwarded by default unto the
|
||||||
|
* group consumer queue (one per consumer), these batches end up being polled and consumed in the
|
||||||
|
* same sequence order.
|
||||||
|
* This adapter allows fair round-robin polling of all assigned partitions, one message at a time
|
||||||
|
* (or one batch at a time if poll_batch() is used). Note that poll_batch() has nothing to do with
|
||||||
|
* the internal batching mechanism of librdkafka.
|
||||||
|
*
|
||||||
|
* Example code on how to use this:
|
||||||
|
*
|
||||||
|
* \code
|
||||||
|
* // Create a consumer
|
||||||
|
* Consumer consumer(...);
|
||||||
|
* consumer.subscribe({ "my_topic" });
|
||||||
|
*
|
||||||
|
* // Optionally set the callbacks. This must be done *BEFORE* creating the strategy adapter
|
||||||
|
* consumer.set_assignment_callback(...);
|
||||||
|
* consumer.set_revocation_callback(...);
|
||||||
|
* consumer.set_rebalance_error_callback(...);
|
||||||
|
*
|
||||||
|
* // Create the adapter and use it for polling
|
||||||
|
* RoundRobinPollStrategy poll_strategy(consumer);
|
||||||
|
*
|
||||||
|
* while (true) {
|
||||||
|
* // Poll each partition in turn
|
||||||
|
* Message msg = poll_strategy.poll();
|
||||||
|
* if (msg) {
|
||||||
|
* // process valid message
|
||||||
|
* }
|
||||||
|
* }
|
||||||
|
* }
|
||||||
|
* \endcode
|
||||||
|
*
|
||||||
|
* \warning Calling directly poll() or poll_batch() on the Consumer object while using this adapter will
|
||||||
|
* lead to undesired results since the RoundRobinPollStrategy modifies the internal queuing mechanism of
|
||||||
|
* the Consumer instance it owns.
|
||||||
|
*/
|
||||||
|
|
||||||
|
class RoundRobinPollStrategy : public PollStrategyBase {
|
||||||
|
public:
|
||||||
|
RoundRobinPollStrategy(Consumer& consumer);
|
||||||
|
|
||||||
|
~RoundRobinPollStrategy();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \sa PollInterface::poll
|
||||||
|
*/
|
||||||
|
Message poll() override;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \sa PollInterface::poll
|
||||||
|
*/
|
||||||
|
Message poll(std::chrono::milliseconds timeout) override;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \sa PollInterface::poll_batch
|
||||||
|
*/
|
||||||
|
MessageList poll_batch(size_t max_batch_size) override;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \sa PollInterface::poll_batch
|
||||||
|
*/
|
||||||
|
MessageList poll_batch(size_t max_batch_size,
|
||||||
|
std::chrono::milliseconds timeout) override;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
/**
|
||||||
|
* \sa PollStrategyBase::reset_state
|
||||||
|
*/
|
||||||
|
void reset_state() final;
|
||||||
|
|
||||||
|
QueueData& get_next_queue();
|
||||||
|
|
||||||
|
private:
|
||||||
|
void consume_batch(Queue& queue,
|
||||||
|
MessageList& messages,
|
||||||
|
ssize_t& count,
|
||||||
|
std::chrono::milliseconds timeout);
|
||||||
|
|
||||||
|
void restore_forwarding();
|
||||||
|
|
||||||
|
// Members
|
||||||
|
QueueMap::iterator queue_iter_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} //cppkafka
|
||||||
|
|
||||||
|
#endif //CPPKAFKA_ROUNDROBIN_POLL_STRATEGY_H
|
||||||
@@ -5,7 +5,9 @@ set(SOURCES
|
|||||||
exceptions.cpp
|
exceptions.cpp
|
||||||
topic.cpp
|
topic.cpp
|
||||||
buffer.cpp
|
buffer.cpp
|
||||||
|
queue.cpp
|
||||||
message.cpp
|
message.cpp
|
||||||
|
message_internal.cpp
|
||||||
topic_partition.cpp
|
topic_partition.cpp
|
||||||
topic_partition_list.cpp
|
topic_partition_list.cpp
|
||||||
metadata.cpp
|
metadata.cpp
|
||||||
@@ -16,7 +18,10 @@ set(SOURCES
|
|||||||
producer.cpp
|
producer.cpp
|
||||||
consumer.cpp
|
consumer.cpp
|
||||||
|
|
||||||
|
utils/backoff_performer.cpp
|
||||||
utils/backoff_committer.cpp
|
utils/backoff_committer.cpp
|
||||||
|
utils/poll_strategy_base.cpp
|
||||||
|
utils/roundrobin_poll_strategy.cpp
|
||||||
)
|
)
|
||||||
|
|
||||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include/cppkafka)
|
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include/cppkafka)
|
||||||
@@ -25,7 +30,14 @@ include_directories(SYSTEM ${Boost_INCLUDE_DIRS} ${RDKAFKA_INCLUDE_DIR})
|
|||||||
add_library(cppkafka ${CPPKAFKA_LIBRARY_TYPE} ${SOURCES})
|
add_library(cppkafka ${CPPKAFKA_LIBRARY_TYPE} ${SOURCES})
|
||||||
set_target_properties(cppkafka PROPERTIES VERSION ${CPPKAFKA_VERSION}
|
set_target_properties(cppkafka PROPERTIES VERSION ${CPPKAFKA_VERSION}
|
||||||
SOVERSION ${CPPKAFKA_VERSION})
|
SOVERSION ${CPPKAFKA_VERSION})
|
||||||
target_link_libraries(cppkafka ${RDKAFKA_LIBRARY})
|
|
||||||
|
set(DEPENDENCIES ${RDKAFKA_LIBRARY})
|
||||||
|
if (WIN32)
|
||||||
|
# On windows ntohs and related are in ws2_32
|
||||||
|
set(DEPENDENCIES ${DEPENDENCIES} ws2_32.lib)
|
||||||
|
endif()
|
||||||
|
target_link_libraries(cppkafka ${DEPENDENCIES})
|
||||||
|
target_include_directories(cppkafka PUBLIC ${PROJECT_SOURCE_DIR}/include)
|
||||||
|
|
||||||
install(
|
install(
|
||||||
TARGETS cppkafka
|
TARGETS cppkafka
|
||||||
|
|||||||
@@ -40,10 +40,8 @@ using std::map;
|
|||||||
using std::move;
|
using std::move;
|
||||||
using std::vector;
|
using std::vector;
|
||||||
using std::initializer_list;
|
using std::initializer_list;
|
||||||
|
|
||||||
using boost::optional;
|
|
||||||
|
|
||||||
using std::chrono::milliseconds;
|
using std::chrono::milliseconds;
|
||||||
|
using boost::optional;
|
||||||
|
|
||||||
namespace cppkafka {
|
namespace cppkafka {
|
||||||
|
|
||||||
@@ -52,66 +50,56 @@ namespace cppkafka {
|
|||||||
void delivery_report_callback_proxy(rd_kafka_t*, const rd_kafka_message_t* msg, void *opaque) {
|
void delivery_report_callback_proxy(rd_kafka_t*, const rd_kafka_message_t* msg, void *opaque) {
|
||||||
Producer* handle = static_cast<Producer*>(opaque);
|
Producer* handle = static_cast<Producer*>(opaque);
|
||||||
Message message = Message::make_non_owning((rd_kafka_message_t*)msg);
|
Message message = Message::make_non_owning((rd_kafka_message_t*)msg);
|
||||||
const auto& callback = handle->get_configuration().get_delivery_report_callback();
|
CallbackInvoker<Configuration::DeliveryReportCallback>
|
||||||
if (callback) {
|
("delivery report", handle->get_configuration().get_delivery_report_callback(), handle)
|
||||||
callback(*handle, message);
|
(*handle, message);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void offset_commit_callback_proxy(rd_kafka_t*, rd_kafka_resp_err_t err,
|
void offset_commit_callback_proxy(rd_kafka_t*, rd_kafka_resp_err_t err,
|
||||||
rd_kafka_topic_partition_list_t *offsets, void *opaque) {
|
rd_kafka_topic_partition_list_t *offsets, void *opaque) {
|
||||||
Consumer* handle = static_cast<Consumer*>(opaque);
|
Consumer* handle = static_cast<Consumer*>(opaque);
|
||||||
TopicPartitionList list = offsets ? convert(offsets) : TopicPartitionList{};
|
TopicPartitionList list = offsets ? convert(offsets) : TopicPartitionList{};
|
||||||
const auto& callback = handle->get_configuration().get_offset_commit_callback();
|
CallbackInvoker<Configuration::OffsetCommitCallback>
|
||||||
if (callback) {
|
("offset commit", handle->get_configuration().get_offset_commit_callback(), handle)
|
||||||
callback(*handle, err, list);
|
(*handle, err, list);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void error_callback_proxy(rd_kafka_t*, int err, const char *reason, void *opaque) {
|
void error_callback_proxy(rd_kafka_t*, int err, const char *reason, void *opaque) {
|
||||||
KafkaHandleBase* handle = static_cast<KafkaHandleBase*>(opaque);
|
KafkaHandleBase* handle = static_cast<KafkaHandleBase*>(opaque);
|
||||||
const auto& callback = handle->get_configuration().get_error_callback();
|
CallbackInvoker<Configuration::ErrorCallback>
|
||||||
if (callback) {
|
("error", handle->get_configuration().get_error_callback(), handle)
|
||||||
callback(*handle, err, reason);
|
(*handle, err, reason);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void throttle_callback_proxy(rd_kafka_t*, const char* broker_name,
|
void throttle_callback_proxy(rd_kafka_t*, const char* broker_name,
|
||||||
int32_t broker_id, int throttle_time_ms, void *opaque) {
|
int32_t broker_id, int throttle_time_ms, void *opaque) {
|
||||||
KafkaHandleBase* handle = static_cast<KafkaHandleBase*>(opaque);
|
KafkaHandleBase* handle = static_cast<KafkaHandleBase*>(opaque);
|
||||||
const auto& callback = handle->get_configuration().get_throttle_callback();
|
CallbackInvoker<Configuration::ThrottleCallback>
|
||||||
if (callback) {
|
("throttle", handle->get_configuration().get_throttle_callback(), handle)
|
||||||
callback(*handle, broker_name, broker_id, milliseconds(throttle_time_ms));
|
(*handle, broker_name, broker_id, milliseconds(throttle_time_ms));
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void log_callback_proxy(const rd_kafka_t* h, int level,
|
void log_callback_proxy(const rd_kafka_t* h, int level,
|
||||||
const char* facility, const char* message) {
|
const char* facility, const char* message) {
|
||||||
KafkaHandleBase* handle = static_cast<KafkaHandleBase*>(rd_kafka_opaque(h));
|
KafkaHandleBase* handle = static_cast<KafkaHandleBase*>(rd_kafka_opaque(h));
|
||||||
const auto& callback = handle->get_configuration().get_log_callback();
|
CallbackInvoker<Configuration::LogCallback>
|
||||||
if (callback) {
|
("log", handle->get_configuration().get_log_callback(), nullptr)
|
||||||
callback(*handle, level, facility, message);
|
(*handle, level, facility, message);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int stats_callback_proxy(rd_kafka_t*, char *json, size_t json_len, void *opaque) {
|
int stats_callback_proxy(rd_kafka_t*, char *json, size_t json_len, void *opaque) {
|
||||||
KafkaHandleBase* handle = static_cast<KafkaHandleBase*>(opaque);
|
KafkaHandleBase* handle = static_cast<KafkaHandleBase*>(opaque);
|
||||||
const auto& callback = handle->get_configuration().get_stats_callback();
|
CallbackInvoker<Configuration::StatsCallback>
|
||||||
if (callback) {
|
("statistics", handle->get_configuration().get_stats_callback(), handle)
|
||||||
callback(*handle, string(json, json + json_len));
|
(*handle, string(json, json + json_len));
|
||||||
}
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int socket_callback_proxy(int domain, int type, int protocol, void* opaque) {
|
int socket_callback_proxy(int domain, int type, int protocol, void* opaque) {
|
||||||
KafkaHandleBase* handle = static_cast<KafkaHandleBase*>(opaque);
|
KafkaHandleBase* handle = static_cast<KafkaHandleBase*>(opaque);
|
||||||
const auto& callback = handle->get_configuration().get_socket_callback();
|
return CallbackInvoker<Configuration::SocketCallback>
|
||||||
if (callback) {
|
("socket", handle->get_configuration().get_socket_callback(), handle)
|
||||||
return callback(domain, type, protocol);
|
(domain, type, protocol);
|
||||||
}
|
|
||||||
else {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Configuration
|
// Configuration
|
||||||
|
|||||||
153
src/consumer.cpp
153
src/consumer.cpp
@@ -26,21 +26,38 @@
|
|||||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
#include <sstream>
|
||||||
|
#include <algorithm>
|
||||||
|
#include <cctype>
|
||||||
#include "consumer.h"
|
#include "consumer.h"
|
||||||
#include "exceptions.h"
|
#include "exceptions.h"
|
||||||
|
#include "logging.h"
|
||||||
#include "configuration.h"
|
#include "configuration.h"
|
||||||
#include "topic_partition_list.h"
|
#include "topic_partition_list.h"
|
||||||
|
#include "detail/callback_invoker.h"
|
||||||
|
|
||||||
using std::vector;
|
using std::vector;
|
||||||
using std::string;
|
using std::string;
|
||||||
using std::move;
|
using std::move;
|
||||||
using std::make_tuple;
|
using std::make_tuple;
|
||||||
|
using std::ostringstream;
|
||||||
using std::chrono::milliseconds;
|
using std::chrono::milliseconds;
|
||||||
|
using std::toupper;
|
||||||
|
using std::equal;
|
||||||
|
|
||||||
namespace cppkafka {
|
namespace cppkafka {
|
||||||
|
|
||||||
|
// See: https://github.com/edenhill/librdkafka/issues/1792
|
||||||
|
const int rd_kafka_queue_refcount_bug_version = 0x000b0500;
|
||||||
|
Queue get_queue(rd_kafka_queue_t* handle) {
|
||||||
|
if (rd_kafka_version() <= rd_kafka_queue_refcount_bug_version) {
|
||||||
|
return Queue::make_non_owning(handle);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
return Queue(handle);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void Consumer::rebalance_proxy(rd_kafka_t*, rd_kafka_resp_err_t error,
|
void Consumer::rebalance_proxy(rd_kafka_t*, rd_kafka_resp_err_t error,
|
||||||
rd_kafka_topic_partition_list_t *partitions, void *opaque) {
|
rd_kafka_topic_partition_list_t *partitions, void *opaque) {
|
||||||
TopicPartitionList list = convert(partitions);
|
TopicPartitionList list = convert(partitions);
|
||||||
@@ -65,7 +82,29 @@ Consumer::Consumer(Configuration config)
|
|||||||
}
|
}
|
||||||
|
|
||||||
Consumer::~Consumer() {
|
Consumer::~Consumer() {
|
||||||
close();
|
try {
|
||||||
|
// make sure to destroy the function closures. in case they hold kafka
|
||||||
|
// objects, they will need to be destroyed before we destroy the handle
|
||||||
|
assignment_callback_ = nullptr;
|
||||||
|
revocation_callback_ = nullptr;
|
||||||
|
rebalance_error_callback_ = nullptr;
|
||||||
|
close();
|
||||||
|
}
|
||||||
|
catch (const HandleException& ex) {
|
||||||
|
ostringstream error_msg;
|
||||||
|
error_msg << "Failed to close consumer [" << get_name() << "]: " << ex.what();
|
||||||
|
CallbackInvoker<Configuration::ErrorCallback> error_cb("error", get_configuration().get_error_callback(), this);
|
||||||
|
CallbackInvoker<Configuration::LogCallback> logger_cb("log", get_configuration().get_log_callback(), nullptr);
|
||||||
|
if (error_cb) {
|
||||||
|
error_cb(*this, static_cast<int>(ex.get_error().get_error()), error_msg.str());
|
||||||
|
}
|
||||||
|
else if (logger_cb) {
|
||||||
|
logger_cb(*this, static_cast<int>(LogLevel::LogErr), "cppkafka", error_msg.str());
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
rd_kafka_log_print(get_handle(), static_cast<int>(LogLevel::LogErr), "cppkafka", error_msg.str().c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Consumer::set_assignment_callback(AssignmentCallback callback) {
|
void Consumer::set_assignment_callback(AssignmentCallback callback) {
|
||||||
@@ -93,11 +132,16 @@ void Consumer::unsubscribe() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Consumer::assign(const TopicPartitionList& topic_partitions) {
|
void Consumer::assign(const TopicPartitionList& topic_partitions) {
|
||||||
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
|
rd_kafka_resp_err_t error;
|
||||||
// If the list is empty, then we need to use a null pointer
|
if (topic_partitions.empty()) {
|
||||||
auto handle = topic_partitions.empty() ? nullptr : topic_list_handle.get();
|
error = rd_kafka_assign(get_handle(), nullptr);
|
||||||
rd_kafka_resp_err_t error = rd_kafka_assign(get_handle(), handle);
|
check_error(error);
|
||||||
check_error(error);
|
}
|
||||||
|
else {
|
||||||
|
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
|
||||||
|
error = rd_kafka_assign(get_handle(), topic_list_handle.get());
|
||||||
|
check_error(error, topic_list_handle.get());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Consumer::unassign() {
|
void Consumer::unassign() {
|
||||||
@@ -105,6 +149,22 @@ void Consumer::unassign() {
|
|||||||
check_error(error);
|
check_error(error);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Consumer::pause() {
|
||||||
|
pause_partitions(get_assignment());
|
||||||
|
}
|
||||||
|
|
||||||
|
void Consumer::resume() {
|
||||||
|
resume_partitions(get_assignment());
|
||||||
|
}
|
||||||
|
|
||||||
|
void Consumer::commit() {
|
||||||
|
commit(nullptr, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Consumer::async_commit() {
|
||||||
|
commit(nullptr, true);
|
||||||
|
}
|
||||||
|
|
||||||
void Consumer::commit(const Message& msg) {
|
void Consumer::commit(const Message& msg) {
|
||||||
commit(msg, false);
|
commit(msg, false);
|
||||||
}
|
}
|
||||||
@@ -114,11 +174,11 @@ void Consumer::async_commit(const Message& msg) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Consumer::commit(const TopicPartitionList& topic_partitions) {
|
void Consumer::commit(const TopicPartitionList& topic_partitions) {
|
||||||
commit(topic_partitions, false);
|
commit(&topic_partitions, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Consumer::async_commit(const TopicPartitionList& topic_partitions) {
|
void Consumer::async_commit(const TopicPartitionList& topic_partitions) {
|
||||||
commit(topic_partitions, true);
|
commit(&topic_partitions, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
KafkaHandleBase::OffsetTuple Consumer::get_offsets(const TopicPartition& topic_partition) const {
|
KafkaHandleBase::OffsetTuple Consumer::get_offsets(const TopicPartition& topic_partition) const {
|
||||||
@@ -137,7 +197,7 @@ Consumer::get_offsets_committed(const TopicPartitionList& topic_partitions) cons
|
|||||||
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
|
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
|
||||||
rd_kafka_resp_err_t error = rd_kafka_committed(get_handle(), topic_list_handle.get(),
|
rd_kafka_resp_err_t error = rd_kafka_committed(get_handle(), topic_list_handle.get(),
|
||||||
static_cast<int>(get_timeout().count()));
|
static_cast<int>(get_timeout().count()));
|
||||||
check_error(error);
|
check_error(error, topic_list_handle.get());
|
||||||
return convert(topic_list_handle);
|
return convert(topic_list_handle);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -145,7 +205,7 @@ TopicPartitionList
|
|||||||
Consumer::get_offsets_position(const TopicPartitionList& topic_partitions) const {
|
Consumer::get_offsets_position(const TopicPartitionList& topic_partitions) const {
|
||||||
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
|
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
|
||||||
rd_kafka_resp_err_t error = rd_kafka_position(get_handle(), topic_list_handle.get());
|
rd_kafka_resp_err_t error = rd_kafka_position(get_handle(), topic_list_handle.get());
|
||||||
check_error(error);
|
check_error(error, topic_list_handle.get());
|
||||||
return convert(topic_list_handle);
|
return convert(topic_list_handle);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -192,9 +252,43 @@ Message Consumer::poll() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Message Consumer::poll(milliseconds timeout) {
|
Message Consumer::poll(milliseconds timeout) {
|
||||||
rd_kafka_message_t* message = rd_kafka_consumer_poll(get_handle(),
|
return rd_kafka_consumer_poll(get_handle(), static_cast<int>(timeout.count()));
|
||||||
static_cast<int>(timeout.count()));
|
}
|
||||||
return message ? Message(message) : Message();
|
|
||||||
|
MessageList Consumer::poll_batch(size_t max_batch_size) {
|
||||||
|
return poll_batch(max_batch_size, get_timeout());
|
||||||
|
}
|
||||||
|
|
||||||
|
MessageList Consumer::poll_batch(size_t max_batch_size, milliseconds timeout) {
|
||||||
|
vector<rd_kafka_message_t*> raw_messages(max_batch_size);
|
||||||
|
// Note that this will leak the queue when using rdkafka < 0.11.5 (see get_queue comment)
|
||||||
|
Queue queue(get_queue(rd_kafka_queue_get_consumer(get_handle())));
|
||||||
|
ssize_t result = rd_kafka_consume_batch_queue(queue.get_handle() , timeout.count(), raw_messages.data(),
|
||||||
|
raw_messages.size());
|
||||||
|
if (result == -1) {
|
||||||
|
check_error(rd_kafka_last_error());
|
||||||
|
// on the off-chance that check_error() does not throw an error
|
||||||
|
return MessageList();
|
||||||
|
}
|
||||||
|
return MessageList(raw_messages.begin(), raw_messages.begin() + result);
|
||||||
|
}
|
||||||
|
|
||||||
|
Queue Consumer::get_main_queue() const {
|
||||||
|
Queue queue(get_queue(rd_kafka_queue_get_main(get_handle())));
|
||||||
|
queue.disable_queue_forwarding();
|
||||||
|
return queue;
|
||||||
|
}
|
||||||
|
|
||||||
|
Queue Consumer::get_consumer_queue() const {
|
||||||
|
return get_queue(rd_kafka_queue_get_consumer(get_handle()));
|
||||||
|
}
|
||||||
|
|
||||||
|
Queue Consumer::get_partition_queue(const TopicPartition& partition) const {
|
||||||
|
Queue queue(get_queue(rd_kafka_queue_get_partition(get_handle(),
|
||||||
|
partition.get_topic().c_str(),
|
||||||
|
partition.get_partition())));
|
||||||
|
queue.disable_queue_forwarding();
|
||||||
|
return queue;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Consumer::close() {
|
void Consumer::close() {
|
||||||
@@ -204,36 +298,35 @@ void Consumer::close() {
|
|||||||
|
|
||||||
void Consumer::commit(const Message& msg, bool async) {
|
void Consumer::commit(const Message& msg, bool async) {
|
||||||
rd_kafka_resp_err_t error;
|
rd_kafka_resp_err_t error;
|
||||||
error = rd_kafka_commit_message(get_handle(), msg.get_handle(),
|
error = rd_kafka_commit_message(get_handle(), msg.get_handle(), async ? 1 : 0);
|
||||||
async ? 1 : 0);
|
|
||||||
check_error(error);
|
check_error(error);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Consumer::commit(const TopicPartitionList& topic_partitions, bool async) {
|
void Consumer::commit(const TopicPartitionList* topic_partitions, bool async) {
|
||||||
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
|
|
||||||
rd_kafka_resp_err_t error;
|
rd_kafka_resp_err_t error;
|
||||||
error = rd_kafka_commit(get_handle(), topic_list_handle.get(), async ? 1 : 0);
|
if (topic_partitions == nullptr) {
|
||||||
check_error(error);
|
error = rd_kafka_commit(get_handle(), nullptr, async ? 1 : 0);
|
||||||
|
check_error(error);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
TopicPartitionsListPtr topic_list_handle = convert(*topic_partitions);
|
||||||
|
error = rd_kafka_commit(get_handle(), topic_list_handle.get(), async ? 1 : 0);
|
||||||
|
check_error(error, topic_list_handle.get());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Consumer::handle_rebalance(rd_kafka_resp_err_t error,
|
void Consumer::handle_rebalance(rd_kafka_resp_err_t error,
|
||||||
TopicPartitionList& topic_partitions) {
|
TopicPartitionList& topic_partitions) {
|
||||||
if (error == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) {
|
if (error == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) {
|
||||||
if (assignment_callback_) {
|
CallbackInvoker<AssignmentCallback>("assignment", assignment_callback_, this)(topic_partitions);
|
||||||
assignment_callback_(topic_partitions);
|
|
||||||
}
|
|
||||||
assign(topic_partitions);
|
assign(topic_partitions);
|
||||||
}
|
}
|
||||||
else if (error == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS) {
|
else if (error == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS) {
|
||||||
if (revocation_callback_) {
|
CallbackInvoker<RevocationCallback>("revocation", revocation_callback_, this)(topic_partitions);
|
||||||
revocation_callback_(topic_partitions);
|
|
||||||
}
|
|
||||||
unassign();
|
unassign();
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
if (rebalance_error_callback_) {
|
CallbackInvoker<RebalanceErrorCallback>("rebalance error", rebalance_error_callback_, this)(error);
|
||||||
rebalance_error_callback_(error);
|
|
||||||
}
|
|
||||||
unassign();
|
unassign();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -97,4 +97,26 @@ Error HandleException::get_error() const {
|
|||||||
return error_;
|
return error_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ConsumerException
|
||||||
|
|
||||||
|
ConsumerException::ConsumerException(Error error)
|
||||||
|
: Exception(error.to_string()), error_(error) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
Error ConsumerException::get_error() const {
|
||||||
|
return error_;
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueueException
|
||||||
|
|
||||||
|
QueueException::QueueException(Error error)
|
||||||
|
: Exception(error.to_string()), error_(error) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
Error QueueException::get_error() const {
|
||||||
|
return error_;
|
||||||
|
}
|
||||||
|
|
||||||
} // cppkafka
|
} // cppkafka
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ namespace cppkafka {
|
|||||||
const milliseconds KafkaHandleBase::DEFAULT_TIMEOUT{1000};
|
const milliseconds KafkaHandleBase::DEFAULT_TIMEOUT{1000};
|
||||||
|
|
||||||
KafkaHandleBase::KafkaHandleBase(Configuration config)
|
KafkaHandleBase::KafkaHandleBase(Configuration config)
|
||||||
: handle_(nullptr, nullptr), timeout_ms_(DEFAULT_TIMEOUT), config_(move(config)) {
|
: timeout_ms_(DEFAULT_TIMEOUT), config_(move(config)), handle_(nullptr, nullptr) {
|
||||||
auto& maybe_config = config_.get_default_topic_configuration();
|
auto& maybe_config = config_.get_default_topic_configuration();
|
||||||
if (maybe_config) {
|
if (maybe_config) {
|
||||||
maybe_config->set_as_opaque();
|
maybe_config->set_as_opaque();
|
||||||
@@ -61,14 +61,22 @@ void KafkaHandleBase::pause_partitions(const TopicPartitionList& topic_partition
|
|||||||
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
|
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
|
||||||
rd_kafka_resp_err_t error = rd_kafka_pause_partitions(get_handle(),
|
rd_kafka_resp_err_t error = rd_kafka_pause_partitions(get_handle(),
|
||||||
topic_list_handle.get());
|
topic_list_handle.get());
|
||||||
check_error(error);
|
check_error(error, topic_list_handle.get());
|
||||||
|
}
|
||||||
|
|
||||||
|
void KafkaHandleBase::pause(const std::string& topic) {
|
||||||
|
pause_partitions(convert(topic, get_metadata(get_topic(topic)).get_partitions()));
|
||||||
}
|
}
|
||||||
|
|
||||||
void KafkaHandleBase::resume_partitions(const TopicPartitionList& topic_partitions) {
|
void KafkaHandleBase::resume_partitions(const TopicPartitionList& topic_partitions) {
|
||||||
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
|
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
|
||||||
rd_kafka_resp_err_t error = rd_kafka_resume_partitions(get_handle(),
|
rd_kafka_resp_err_t error = rd_kafka_resume_partitions(get_handle(),
|
||||||
topic_list_handle.get());
|
topic_list_handle.get());
|
||||||
check_error(error);
|
check_error(error, topic_list_handle.get());
|
||||||
|
}
|
||||||
|
|
||||||
|
void KafkaHandleBase::resume(const std::string& topic) {
|
||||||
|
resume_partitions(convert(topic, get_metadata(get_topic(topic)).get_partitions()));
|
||||||
}
|
}
|
||||||
|
|
||||||
void KafkaHandleBase::set_timeout(milliseconds timeout) {
|
void KafkaHandleBase::set_timeout(milliseconds timeout) {
|
||||||
@@ -145,7 +153,7 @@ KafkaHandleBase::get_offsets_for_times(const TopicPartitionsTimestampsMap& queri
|
|||||||
const int timeout = static_cast<int>(timeout_ms_.count());
|
const int timeout = static_cast<int>(timeout_ms_.count());
|
||||||
rd_kafka_resp_err_t result = rd_kafka_offsets_for_times(handle_.get(), topic_list_handle.get(),
|
rd_kafka_resp_err_t result = rd_kafka_offsets_for_times(handle_.get(), topic_list_handle.get(),
|
||||||
timeout);
|
timeout);
|
||||||
check_error(result);
|
check_error(result, topic_list_handle.get());
|
||||||
return convert(topic_list_handle);
|
return convert(topic_list_handle);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -165,6 +173,10 @@ int KafkaHandleBase::get_out_queue_length() const {
|
|||||||
return rd_kafka_outq_len(handle_.get());
|
return rd_kafka_outq_len(handle_.get());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void KafkaHandleBase::yield() const {
|
||||||
|
rd_kafka_yield(handle_.get());
|
||||||
|
}
|
||||||
|
|
||||||
void KafkaHandleBase::set_handle(rd_kafka_t* handle) {
|
void KafkaHandleBase::set_handle(rd_kafka_t* handle) {
|
||||||
handle_ = HandlePtr(handle, &rd_kafka_destroy);
|
handle_ = HandlePtr(handle, &rd_kafka_destroy);
|
||||||
}
|
}
|
||||||
@@ -172,7 +184,7 @@ void KafkaHandleBase::set_handle(rd_kafka_t* handle) {
|
|||||||
Topic KafkaHandleBase::get_topic(const string& name, rd_kafka_topic_conf_t* conf) {
|
Topic KafkaHandleBase::get_topic(const string& name, rd_kafka_topic_conf_t* conf) {
|
||||||
rd_kafka_topic_t* topic = rd_kafka_topic_new(get_handle(), name.data(), conf);
|
rd_kafka_topic_t* topic = rd_kafka_topic_new(get_handle(), name.data(), conf);
|
||||||
if (!topic) {
|
if (!topic) {
|
||||||
throw HandleException(rd_kafka_errno2err(errno));
|
throw HandleException(rd_kafka_last_error());
|
||||||
}
|
}
|
||||||
return Topic(topic);
|
return Topic(topic);
|
||||||
}
|
}
|
||||||
@@ -216,6 +228,21 @@ void KafkaHandleBase::check_error(rd_kafka_resp_err_t error) const {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void KafkaHandleBase::check_error(rd_kafka_resp_err_t error,
|
||||||
|
const rd_kafka_topic_partition_list_t* list_ptr) const {
|
||||||
|
if (error != RD_KAFKA_RESP_ERR_NO_ERROR) {
|
||||||
|
throw HandleException(error);
|
||||||
|
}
|
||||||
|
if (list_ptr) {
|
||||||
|
//check if any partition has errors
|
||||||
|
for (int i = 0; i < list_ptr->cnt; ++i) {
|
||||||
|
if (list_ptr->elems[i].err != RD_KAFKA_RESP_ERR_NO_ERROR) {
|
||||||
|
throw HandleException(error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
rd_kafka_conf_t* KafkaHandleBase::get_configuration_handle() {
|
rd_kafka_conf_t* KafkaHandleBase::get_configuration_handle() {
|
||||||
return config_.get_handle();
|
return config_.get_handle();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -28,14 +28,10 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "message.h"
|
#include "message.h"
|
||||||
|
#include "message_internal.h"
|
||||||
using std::string;
|
|
||||||
|
|
||||||
using std::chrono::milliseconds;
|
using std::chrono::milliseconds;
|
||||||
|
|
||||||
using boost::optional;
|
|
||||||
using boost::none_t;
|
|
||||||
|
|
||||||
namespace cppkafka {
|
namespace cppkafka {
|
||||||
|
|
||||||
void dummy_deleter(rd_kafka_message_t*) {
|
void dummy_deleter(rd_kafka_message_t*) {
|
||||||
@@ -47,7 +43,8 @@ Message Message::make_non_owning(rd_kafka_message_t* handle) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Message::Message()
|
Message::Message()
|
||||||
: handle_(nullptr, nullptr) {
|
: handle_(nullptr, nullptr),
|
||||||
|
user_data_(nullptr) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -63,59 +60,18 @@ Message::Message(rd_kafka_message_t* handle, NonOwningTag)
|
|||||||
|
|
||||||
Message::Message(HandlePtr handle)
|
Message::Message(HandlePtr handle)
|
||||||
: handle_(move(handle)),
|
: handle_(move(handle)),
|
||||||
payload_((const Buffer::DataType*)handle_->payload, handle_->len),
|
payload_(handle_ ? Buffer((const Buffer::DataType*)handle_->payload, handle_->len) : Buffer()),
|
||||||
key_((const Buffer::DataType*)handle_->key, handle_->key_len) {
|
key_(handle_ ? Buffer((const Buffer::DataType*)handle_->key, handle_->key_len) : Buffer()),
|
||||||
|
user_data_(handle_ ? handle_->_private : nullptr) {
|
||||||
}
|
}
|
||||||
|
|
||||||
Error Message::get_error() const {
|
Message& Message::load_internal() {
|
||||||
return handle_->err;
|
if (user_data_) {
|
||||||
}
|
MessageInternal* mi = static_cast<MessageInternal*>(user_data_);
|
||||||
|
user_data_ = mi->get_user_data();
|
||||||
bool Message::is_eof() const {
|
internal_ = mi->get_internal();
|
||||||
return get_error() == RD_KAFKA_RESP_ERR__PARTITION_EOF;
|
|
||||||
}
|
|
||||||
|
|
||||||
int Message::get_partition() const {
|
|
||||||
return handle_->partition;
|
|
||||||
}
|
|
||||||
|
|
||||||
string Message::get_topic() const {
|
|
||||||
return rd_kafka_topic_name(handle_->rkt);
|
|
||||||
}
|
|
||||||
|
|
||||||
const Buffer& Message::get_payload() const {
|
|
||||||
return payload_;
|
|
||||||
}
|
|
||||||
|
|
||||||
const Buffer& Message::get_key() const {
|
|
||||||
return key_;
|
|
||||||
}
|
|
||||||
|
|
||||||
int64_t Message::get_offset() const {
|
|
||||||
return handle_->offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
void* Message::get_private_data() const {
|
|
||||||
return handle_->_private;
|
|
||||||
}
|
|
||||||
|
|
||||||
optional<MessageTimestamp> Message::get_timestamp() const {
|
|
||||||
rd_kafka_timestamp_type_t type = RD_KAFKA_TIMESTAMP_NOT_AVAILABLE;
|
|
||||||
int64_t timestamp = rd_kafka_message_timestamp(handle_.get(), &type);
|
|
||||||
if (timestamp == -1 || type == RD_KAFKA_TIMESTAMP_NOT_AVAILABLE) {
|
|
||||||
return {};
|
|
||||||
}
|
}
|
||||||
return MessageTimestamp(milliseconds(timestamp),
|
return *this;
|
||||||
static_cast<MessageTimestamp::TimestampType>(type));
|
|
||||||
}
|
|
||||||
|
|
||||||
Message::operator bool() const {
|
|
||||||
return handle_ != nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
rd_kafka_message_t* Message::get_handle() const {
|
|
||||||
return handle_.get();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// MessageTimestamp
|
// MessageTimestamp
|
||||||
|
|||||||
56
src/message_internal.cpp
Normal file
56
src/message_internal.cpp
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2017, Matias Fontanini
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
#include "message_internal.h"
|
||||||
|
#include "message.h"
|
||||||
|
#include "message_builder.h"
|
||||||
|
|
||||||
|
namespace cppkafka {
|
||||||
|
|
||||||
|
// MessageInternal
|
||||||
|
|
||||||
|
MessageInternal::MessageInternal(void* user_data,
|
||||||
|
std::shared_ptr<Internal> internal)
|
||||||
|
: user_data_(user_data),
|
||||||
|
internal_(internal) {
|
||||||
|
}
|
||||||
|
|
||||||
|
std::unique_ptr<MessageInternal> MessageInternal::load(Message& message) {
|
||||||
|
return std::unique_ptr<MessageInternal>(message.load_internal().get_handle() ?
|
||||||
|
static_cast<MessageInternal*>(message.get_handle()->_private) : nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* MessageInternal::get_user_data() const {
|
||||||
|
return user_data_;
|
||||||
|
}
|
||||||
|
|
||||||
|
InternalPtr MessageInternal::get_internal() const {
|
||||||
|
return internal_;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
@@ -27,6 +27,7 @@
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <assert.h>
|
||||||
#include "metadata.h"
|
#include "metadata.h"
|
||||||
#include "error.h"
|
#include "error.h"
|
||||||
|
|
||||||
@@ -110,12 +111,31 @@ uint16_t BrokerMetadata::get_port() const {
|
|||||||
|
|
||||||
// Metadata
|
// Metadata
|
||||||
|
|
||||||
Metadata::Metadata(const rd_kafka_metadata_t* ptr)
|
void dummy_metadata_destroyer(const rd_kafka_metadata_t*) {
|
||||||
: handle_(ptr, &rd_kafka_metadata_destroy) {
|
|
||||||
|
}
|
||||||
|
|
||||||
|
Metadata Metadata::make_non_owning(const rd_kafka_metadata_t* handle) {
|
||||||
|
return Metadata(handle, NonOwningTag{});
|
||||||
|
}
|
||||||
|
|
||||||
|
Metadata::Metadata()
|
||||||
|
: handle_(nullptr, nullptr) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
Metadata::Metadata(const rd_kafka_metadata_t* handle)
|
||||||
|
: handle_(handle, &rd_kafka_metadata_destroy) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
Metadata::Metadata(const rd_kafka_metadata_t* handle, NonOwningTag)
|
||||||
|
: handle_(handle, &dummy_metadata_destroyer) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
vector<BrokerMetadata> Metadata::get_brokers() const {
|
vector<BrokerMetadata> Metadata::get_brokers() const {
|
||||||
|
assert(handle_);
|
||||||
vector<BrokerMetadata> output;
|
vector<BrokerMetadata> output;
|
||||||
for (int i = 0; i < handle_->broker_cnt; ++i) {
|
for (int i = 0; i < handle_->broker_cnt; ++i) {
|
||||||
const rd_kafka_metadata_broker_t& broker = handle_->brokers[i];
|
const rd_kafka_metadata_broker_t& broker = handle_->brokers[i];
|
||||||
@@ -125,6 +145,7 @@ vector<BrokerMetadata> Metadata::get_brokers() const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
vector<TopicMetadata> Metadata::get_topics() const {
|
vector<TopicMetadata> Metadata::get_topics() const {
|
||||||
|
assert(handle_);
|
||||||
vector<TopicMetadata> output;
|
vector<TopicMetadata> output;
|
||||||
for (int i = 0; i < handle_->topic_cnt; ++i) {
|
for (int i = 0; i < handle_->topic_cnt; ++i) {
|
||||||
const rd_kafka_metadata_topic_t& topic = handle_->topics[i];
|
const rd_kafka_metadata_topic_t& topic = handle_->topics[i];
|
||||||
@@ -134,6 +155,7 @@ vector<TopicMetadata> Metadata::get_topics() const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
vector<TopicMetadata> Metadata::get_topics(const unordered_set<string>& topics) const {
|
vector<TopicMetadata> Metadata::get_topics(const unordered_set<string>& topics) const {
|
||||||
|
assert(handle_);
|
||||||
vector<TopicMetadata> output;
|
vector<TopicMetadata> output;
|
||||||
for (int i = 0; i < handle_->topic_cnt; ++i) {
|
for (int i = 0; i < handle_->topic_cnt; ++i) {
|
||||||
const rd_kafka_metadata_topic_t& topic = handle_->topics[i];
|
const rd_kafka_metadata_topic_t& topic = handle_->topics[i];
|
||||||
@@ -145,6 +167,7 @@ vector<TopicMetadata> Metadata::get_topics(const unordered_set<string>& topics)
|
|||||||
}
|
}
|
||||||
|
|
||||||
vector<TopicMetadata> Metadata::get_topics_prefixed(const string& prefix) const {
|
vector<TopicMetadata> Metadata::get_topics_prefixed(const string& prefix) const {
|
||||||
|
assert(handle_);
|
||||||
vector<TopicMetadata> output;
|
vector<TopicMetadata> output;
|
||||||
for (int i = 0; i < handle_->topic_cnt; ++i) {
|
for (int i = 0; i < handle_->topic_cnt; ++i) {
|
||||||
const rd_kafka_metadata_topic_t& topic = handle_->topics[i];
|
const rd_kafka_metadata_topic_t& topic = handle_->topics[i];
|
||||||
@@ -156,4 +179,13 @@ vector<TopicMetadata> Metadata::get_topics_prefixed(const string& prefix) const
|
|||||||
return output;
|
return output;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Metadata::operator bool() const {
|
||||||
|
return handle_ != nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
const rd_kafka_metadata_t* Metadata::get_handle() const {
|
||||||
|
return handle_.get();
|
||||||
|
}
|
||||||
|
|
||||||
} // cppkafka
|
} // cppkafka
|
||||||
|
|||||||
@@ -28,13 +28,16 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include <errno.h>
|
#include <errno.h>
|
||||||
|
#include <memory>
|
||||||
#include "producer.h"
|
#include "producer.h"
|
||||||
#include "exceptions.h"
|
#include "exceptions.h"
|
||||||
|
#include "message_internal.h"
|
||||||
|
|
||||||
using std::move;
|
using std::move;
|
||||||
using std::string;
|
using std::string;
|
||||||
|
|
||||||
using std::chrono::milliseconds;
|
using std::chrono::milliseconds;
|
||||||
|
using std::unique_ptr;
|
||||||
|
using std::get;
|
||||||
|
|
||||||
namespace cppkafka {
|
namespace cppkafka {
|
||||||
|
|
||||||
@@ -77,6 +80,23 @@ void Producer::produce(const MessageBuilder& builder) {
|
|||||||
check_error(result);
|
check_error(result);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Producer::produce(const Message& message) {
|
||||||
|
const Buffer& payload = message.get_payload();
|
||||||
|
const Buffer& key = message.get_key();
|
||||||
|
const int policy = static_cast<int>(message_payload_policy_);
|
||||||
|
int64_t duration = message.get_timestamp() ? message.get_timestamp().get().get_timestamp().count() : 0;
|
||||||
|
auto result = rd_kafka_producev(get_handle(),
|
||||||
|
RD_KAFKA_V_TOPIC(message.get_topic().data()),
|
||||||
|
RD_KAFKA_V_PARTITION(message.get_partition()),
|
||||||
|
RD_KAFKA_V_MSGFLAGS(policy),
|
||||||
|
RD_KAFKA_V_TIMESTAMP(duration),
|
||||||
|
RD_KAFKA_V_KEY((void*)key.get_data(), key.get_size()),
|
||||||
|
RD_KAFKA_V_VALUE((void*)payload.get_data(), payload.get_size()),
|
||||||
|
RD_KAFKA_V_OPAQUE(message.get_user_data()),
|
||||||
|
RD_KAFKA_V_END);
|
||||||
|
check_error(result);
|
||||||
|
}
|
||||||
|
|
||||||
int Producer::poll() {
|
int Producer::poll() {
|
||||||
return poll(get_timeout());
|
return poll(get_timeout());
|
||||||
}
|
}
|
||||||
|
|||||||
118
src/queue.cpp
Normal file
118
src/queue.cpp
Normal file
@@ -0,0 +1,118 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2017, Matias Fontanini
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
#include "queue.h"
|
||||||
|
#include "exceptions.h"
|
||||||
|
|
||||||
|
using std::vector;
|
||||||
|
using std::exception;
|
||||||
|
using std::chrono::milliseconds;
|
||||||
|
|
||||||
|
namespace cppkafka {
|
||||||
|
|
||||||
|
void dummy_deleter(rd_kafka_queue_t*) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
const milliseconds Queue::DEFAULT_TIMEOUT{1000};
|
||||||
|
|
||||||
|
Queue Queue::make_non_owning(rd_kafka_queue_t* handle) {
|
||||||
|
return Queue(handle, NonOwningTag{});
|
||||||
|
}
|
||||||
|
|
||||||
|
Queue::Queue()
|
||||||
|
: handle_(nullptr, nullptr),
|
||||||
|
timeout_ms_(DEFAULT_TIMEOUT) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
Queue::Queue(rd_kafka_queue_t* handle)
|
||||||
|
: handle_(handle, &rd_kafka_queue_destroy),
|
||||||
|
timeout_ms_(DEFAULT_TIMEOUT) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
Queue::Queue(rd_kafka_queue_t* handle, NonOwningTag)
|
||||||
|
: handle_(handle, &dummy_deleter) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
rd_kafka_queue_t* Queue::get_handle() const {
|
||||||
|
return handle_.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t Queue::get_length() const {
|
||||||
|
return rd_kafka_queue_length(handle_.get());
|
||||||
|
}
|
||||||
|
|
||||||
|
void Queue::forward_to_queue(const Queue& forward_queue) const {
|
||||||
|
return rd_kafka_queue_forward(handle_.get(), forward_queue.handle_.get());
|
||||||
|
}
|
||||||
|
|
||||||
|
void Queue::disable_queue_forwarding() const {
|
||||||
|
return rd_kafka_queue_forward(handle_.get(), nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Queue::set_timeout(milliseconds timeout) {
|
||||||
|
timeout_ms_ = timeout;
|
||||||
|
}
|
||||||
|
|
||||||
|
milliseconds Queue::get_timeout() const {
|
||||||
|
return timeout_ms_;
|
||||||
|
}
|
||||||
|
|
||||||
|
Message Queue::consume() const {
|
||||||
|
return consume(timeout_ms_);
|
||||||
|
}
|
||||||
|
|
||||||
|
Message Queue::consume(milliseconds timeout) const {
|
||||||
|
return Message(rd_kafka_consume_queue(handle_.get(), static_cast<int>(timeout.count())));
|
||||||
|
}
|
||||||
|
|
||||||
|
MessageList Queue::consume_batch(size_t max_batch_size) const {
|
||||||
|
return consume_batch(max_batch_size, timeout_ms_);
|
||||||
|
}
|
||||||
|
|
||||||
|
MessageList Queue::consume_batch(size_t max_batch_size, milliseconds timeout) const {
|
||||||
|
vector<rd_kafka_message_t*> raw_messages(max_batch_size);
|
||||||
|
ssize_t result = rd_kafka_consume_batch_queue(handle_.get(),
|
||||||
|
static_cast<int>(timeout.count()),
|
||||||
|
raw_messages.data(),
|
||||||
|
raw_messages.size());
|
||||||
|
if (result == -1) {
|
||||||
|
rd_kafka_resp_err_t error = rd_kafka_last_error();
|
||||||
|
if (error != RD_KAFKA_RESP_ERR_NO_ERROR) {
|
||||||
|
throw QueueException(error);
|
||||||
|
}
|
||||||
|
return MessageList();
|
||||||
|
}
|
||||||
|
// Build message list
|
||||||
|
return MessageList(raw_messages.begin(), raw_messages.begin() + result);
|
||||||
|
}
|
||||||
|
|
||||||
|
} //cppkafka
|
||||||
@@ -34,7 +34,7 @@ using std::string;
|
|||||||
|
|
||||||
namespace cppkafka {
|
namespace cppkafka {
|
||||||
|
|
||||||
void dummy_topic_destroyer(rd_kafka_topic_t*) {
|
void dummy_deleter(rd_kafka_topic_t*) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -53,7 +53,7 @@ Topic::Topic(rd_kafka_topic_t* handle)
|
|||||||
}
|
}
|
||||||
|
|
||||||
Topic::Topic(rd_kafka_topic_t* handle, NonOwningTag)
|
Topic::Topic(rd_kafka_topic_t* handle, NonOwningTag)
|
||||||
: handle_(handle, &dummy_topic_destroyer) {
|
: handle_(handle, &dummy_deleter) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -33,6 +33,7 @@
|
|||||||
#include "exceptions.h"
|
#include "exceptions.h"
|
||||||
#include "topic.h"
|
#include "topic.h"
|
||||||
#include "buffer.h"
|
#include "buffer.h"
|
||||||
|
#include "detail/callback_invoker.h"
|
||||||
|
|
||||||
using std::string;
|
using std::string;
|
||||||
using std::map;
|
using std::map;
|
||||||
@@ -49,7 +50,8 @@ int32_t partitioner_callback_proxy(const rd_kafka_topic_t* handle, const void *k
|
|||||||
if (callback) {
|
if (callback) {
|
||||||
Topic topic = Topic::make_non_owning(const_cast<rd_kafka_topic_t*>(handle));
|
Topic topic = Topic::make_non_owning(const_cast<rd_kafka_topic_t*>(handle));
|
||||||
Buffer key(static_cast<const char*>(key_ptr), key_size);
|
Buffer key(static_cast<const char*>(key_ptr), key_size);
|
||||||
return callback(topic, key, partition_count);
|
return CallbackInvoker<TopicConfiguration::PartitionerCallback>("topic partitioner", callback, nullptr)
|
||||||
|
(topic, key, partition_count);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
return rd_kafka_msg_partitioner_consistent_random(handle, key_ptr, key_size,
|
return rd_kafka_msg_partitioner_consistent_random(handle, key_ptr, key_size,
|
||||||
|
|||||||
@@ -33,6 +33,7 @@
|
|||||||
#include "topic_partition.h"
|
#include "topic_partition.h"
|
||||||
|
|
||||||
using std::string;
|
using std::string;
|
||||||
|
using std::to_string;
|
||||||
using std::ostream;
|
using std::ostream;
|
||||||
using std::tie;
|
using std::tie;
|
||||||
|
|
||||||
@@ -92,7 +93,10 @@ bool TopicPartition::operator!=(const TopicPartition& rhs) const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ostream& operator<<(ostream& output, const TopicPartition& rhs) {
|
ostream& operator<<(ostream& output, const TopicPartition& rhs) {
|
||||||
return output << rhs.get_topic() << "[" << rhs.get_partition() << "]";
|
return output << rhs.get_topic() << "["
|
||||||
|
<< rhs.get_partition() << ":"
|
||||||
|
<< (rhs.get_offset() == RD_KAFKA_OFFSET_INVALID ? "#" : to_string(rhs.get_offset()))
|
||||||
|
<< "]";
|
||||||
}
|
}
|
||||||
|
|
||||||
} // cppkafka
|
} // cppkafka
|
||||||
|
|||||||
@@ -28,16 +28,20 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
#include <string>
|
||||||
#include "topic_partition_list.h"
|
#include "topic_partition_list.h"
|
||||||
#include "topic_partition.h"
|
#include "topic_partition.h"
|
||||||
#include "exceptions.h"
|
#include "exceptions.h"
|
||||||
|
#include "metadata.h"
|
||||||
|
|
||||||
using std::vector;
|
using std::vector;
|
||||||
|
using std::set;
|
||||||
using std::ostream;
|
using std::ostream;
|
||||||
|
using std::string;
|
||||||
|
|
||||||
namespace cppkafka {
|
namespace cppkafka {
|
||||||
|
|
||||||
TopicPartitionsListPtr convert(const vector<TopicPartition>& topic_partitions) {
|
TopicPartitionsListPtr convert(const TopicPartitionList& topic_partitions) {
|
||||||
TopicPartitionsListPtr handle(rd_kafka_topic_partition_list_new(topic_partitions.size()),
|
TopicPartitionsListPtr handle(rd_kafka_topic_partition_list_new(topic_partitions.size()),
|
||||||
&rd_kafka_topic_partition_list_destroy);
|
&rd_kafka_topic_partition_list_destroy);
|
||||||
for (const auto& item : topic_partitions) {
|
for (const auto& item : topic_partitions) {
|
||||||
@@ -50,12 +54,12 @@ TopicPartitionsListPtr convert(const vector<TopicPartition>& topic_partitions) {
|
|||||||
return handle;
|
return handle;
|
||||||
}
|
}
|
||||||
|
|
||||||
vector<TopicPartition> convert(const TopicPartitionsListPtr& topic_partitions) {
|
TopicPartitionList convert(const TopicPartitionsListPtr& topic_partitions) {
|
||||||
return convert(topic_partitions.get());
|
return convert(topic_partitions.get());
|
||||||
}
|
}
|
||||||
|
|
||||||
vector<TopicPartition> convert(rd_kafka_topic_partition_list_t* topic_partitions) {
|
TopicPartitionList convert(rd_kafka_topic_partition_list_t* topic_partitions) {
|
||||||
vector<TopicPartition> output;
|
TopicPartitionList output;
|
||||||
for (int i = 0; i < topic_partitions->cnt; ++i) {
|
for (int i = 0; i < topic_partitions->cnt; ++i) {
|
||||||
const auto& elem = topic_partitions->elems[i];
|
const auto& elem = topic_partitions->elems[i];
|
||||||
output.emplace_back(elem.topic, elem.partition, elem.offset);
|
output.emplace_back(elem.topic, elem.partition, elem.offset);
|
||||||
@@ -63,10 +67,51 @@ vector<TopicPartition> convert(rd_kafka_topic_partition_list_t* topic_partitions
|
|||||||
return output;
|
return output;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TopicPartitionList convert(const std::string& topic,
|
||||||
|
const std::vector<PartitionMetadata>& partition_metadata)
|
||||||
|
{
|
||||||
|
TopicPartitionList output;
|
||||||
|
for (const auto& meta : partition_metadata) {
|
||||||
|
output.emplace_back(topic, meta.get_id());
|
||||||
|
}
|
||||||
|
return output;
|
||||||
|
}
|
||||||
|
|
||||||
TopicPartitionsListPtr make_handle(rd_kafka_topic_partition_list_t* handle) {
|
TopicPartitionsListPtr make_handle(rd_kafka_topic_partition_list_t* handle) {
|
||||||
return TopicPartitionsListPtr(handle, &rd_kafka_topic_partition_list_destroy);
|
return TopicPartitionsListPtr(handle, &rd_kafka_topic_partition_list_destroy);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TopicPartitionList find_matches(const TopicPartitionList& partitions,
|
||||||
|
const set<string>& topics) {
|
||||||
|
TopicPartitionList subset;
|
||||||
|
for (const auto& partition : partitions) {
|
||||||
|
for (const auto& topic : topics) {
|
||||||
|
if (topic.size() == partition.get_topic().size()) {
|
||||||
|
// compare both strings
|
||||||
|
bool match = equal(topic.begin(), topic.end(), partition.get_topic().begin(),
|
||||||
|
[](char c1, char c2)->bool {
|
||||||
|
return toupper(c1) == toupper(c2);
|
||||||
|
});
|
||||||
|
if (match) {
|
||||||
|
subset.emplace_back(partition);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return subset;
|
||||||
|
}
|
||||||
|
|
||||||
|
TopicPartitionList find_matches(const TopicPartitionList& partitions,
|
||||||
|
const set<int>& ids) {
|
||||||
|
TopicPartitionList subset;
|
||||||
|
for (const auto& partition : partitions) {
|
||||||
|
if (ids.count(partition.get_partition()) > 0) {
|
||||||
|
subset.emplace_back(partition);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return subset;
|
||||||
|
}
|
||||||
|
|
||||||
ostream& operator<<(ostream& output, const TopicPartitionList& rhs) {
|
ostream& operator<<(ostream& output, const TopicPartitionList& rhs) {
|
||||||
output << "[ ";
|
output << "[ ";
|
||||||
for (auto iter = rhs.begin(); iter != rhs.end(); ++iter) {
|
for (auto iter = rhs.begin(); iter != rhs.end(); ++iter) {
|
||||||
|
|||||||
@@ -35,48 +35,28 @@ using std::min;
|
|||||||
namespace cppkafka {
|
namespace cppkafka {
|
||||||
|
|
||||||
BackoffCommitter::BackoffCommitter(Consumer& consumer)
|
BackoffCommitter::BackoffCommitter(Consumer& consumer)
|
||||||
: consumer_(consumer), initial_backoff_(DEFAULT_INITIAL_BACKOFF),
|
: consumer_(consumer) {
|
||||||
backoff_step_(DEFAULT_BACKOFF_STEP), maximum_backoff_(DEFAULT_MAXIMUM_BACKOFF),
|
|
||||||
policy_(BackoffPolicy::LINEAR) {
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BackoffCommitter::set_backoff_policy(BackoffPolicy policy) {
|
|
||||||
policy_ = policy;
|
|
||||||
}
|
|
||||||
|
|
||||||
void BackoffCommitter::set_initial_backoff(TimeUnit value) {
|
|
||||||
initial_backoff_ = value;
|
|
||||||
}
|
|
||||||
|
|
||||||
void BackoffCommitter::set_backoff_step(TimeUnit value) {
|
|
||||||
backoff_step_ = value;
|
|
||||||
}
|
|
||||||
|
|
||||||
void BackoffCommitter::set_maximum_backoff(TimeUnit value) {
|
|
||||||
maximum_backoff_ = value;
|
|
||||||
}
|
|
||||||
|
|
||||||
void BackoffCommitter::set_error_callback(ErrorCallback callback) {
|
void BackoffCommitter::set_error_callback(ErrorCallback callback) {
|
||||||
callback_ = move(callback);
|
callback_ = move(callback);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BackoffCommitter::commit(const Message& msg) {
|
void BackoffCommitter::commit(const Message& msg) {
|
||||||
do_commit(msg);
|
perform([&] {
|
||||||
|
return do_commit(msg);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
void BackoffCommitter::commit(const TopicPartitionList& topic_partitions) {
|
void BackoffCommitter::commit(const TopicPartitionList& topic_partitions) {
|
||||||
do_commit(topic_partitions);
|
perform([&] {
|
||||||
|
return do_commit(topic_partitions);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
BackoffCommitter::TimeUnit BackoffCommitter::increase_backoff(TimeUnit backoff) {
|
Consumer& BackoffCommitter::get_consumer() {
|
||||||
if (policy_ == BackoffPolicy::LINEAR) {
|
return consumer_;
|
||||||
backoff = backoff + backoff_step_;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
backoff = backoff * 2;
|
|
||||||
}
|
|
||||||
return min(backoff, maximum_backoff_);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // cppkafka
|
} // cppkafka
|
||||||
|
|||||||
81
src/utils/backoff_performer.cpp
Normal file
81
src/utils/backoff_performer.cpp
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2017, Matias Fontanini
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
#include <limits>
|
||||||
|
#include "utils/backoff_performer.h"
|
||||||
|
|
||||||
|
using std::min;
|
||||||
|
using std::numeric_limits;
|
||||||
|
|
||||||
|
namespace cppkafka {
|
||||||
|
|
||||||
|
const BackoffPerformer::TimeUnit BackoffPerformer::DEFAULT_INITIAL_BACKOFF{100};
|
||||||
|
const BackoffPerformer::TimeUnit BackoffPerformer::DEFAULT_BACKOFF_STEP{50};
|
||||||
|
const BackoffPerformer::TimeUnit BackoffPerformer::DEFAULT_MAXIMUM_BACKOFF{1000};
|
||||||
|
const size_t BackoffPerformer::DEFAULT_MAXIMUM_RETRIES{numeric_limits<size_t>::max()};
|
||||||
|
|
||||||
|
BackoffPerformer::BackoffPerformer()
|
||||||
|
: initial_backoff_(DEFAULT_INITIAL_BACKOFF),
|
||||||
|
backoff_step_(DEFAULT_BACKOFF_STEP), maximum_backoff_(DEFAULT_MAXIMUM_BACKOFF),
|
||||||
|
policy_(BackoffPolicy::LINEAR), maximum_retries_(DEFAULT_MAXIMUM_RETRIES) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void BackoffPerformer::set_backoff_policy(BackoffPolicy policy) {
|
||||||
|
policy_ = policy;
|
||||||
|
}
|
||||||
|
|
||||||
|
void BackoffPerformer::set_initial_backoff(TimeUnit value) {
|
||||||
|
initial_backoff_ = value;
|
||||||
|
}
|
||||||
|
|
||||||
|
void BackoffPerformer::set_backoff_step(TimeUnit value) {
|
||||||
|
backoff_step_ = value;
|
||||||
|
}
|
||||||
|
|
||||||
|
void BackoffPerformer::set_maximum_backoff(TimeUnit value) {
|
||||||
|
maximum_backoff_ = value;
|
||||||
|
}
|
||||||
|
|
||||||
|
void BackoffPerformer::set_maximum_retries(size_t value) {
|
||||||
|
maximum_retries_ = value == 0 ? 1 : value;
|
||||||
|
}
|
||||||
|
|
||||||
|
BackoffPerformer::TimeUnit BackoffPerformer::increase_backoff(TimeUnit backoff) {
|
||||||
|
if (policy_ == BackoffPolicy::LINEAR) {
|
||||||
|
backoff = backoff + backoff_step_;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
backoff = backoff * 2;
|
||||||
|
}
|
||||||
|
return min(backoff, maximum_backoff_);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // cppkafka
|
||||||
129
src/utils/poll_strategy_base.cpp
Normal file
129
src/utils/poll_strategy_base.cpp
Normal file
@@ -0,0 +1,129 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2017, Matias Fontanini
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "utils/poll_strategy_base.h"
|
||||||
|
#include "consumer.h"
|
||||||
|
|
||||||
|
using std::chrono::milliseconds;
|
||||||
|
|
||||||
|
namespace cppkafka {
|
||||||
|
|
||||||
|
PollStrategyBase::PollStrategyBase(Consumer& consumer)
|
||||||
|
: consumer_(consumer),
|
||||||
|
consumer_queue_(QueueData{consumer.get_consumer_queue(), boost::any()}) {
|
||||||
|
// get all currently active partition assignments
|
||||||
|
TopicPartitionList assignment = consumer_.get_assignment();
|
||||||
|
on_assignment(assignment);
|
||||||
|
|
||||||
|
// take over the assignment callback
|
||||||
|
assignment_callback_ = consumer.get_assignment_callback();
|
||||||
|
consumer_.set_assignment_callback([this](TopicPartitionList& partitions) {
|
||||||
|
on_assignment(partitions);
|
||||||
|
});
|
||||||
|
// take over the revocation callback
|
||||||
|
revocation_callback_ = consumer.get_revocation_callback();
|
||||||
|
consumer_.set_revocation_callback([this](const TopicPartitionList& partitions) {
|
||||||
|
on_revocation(partitions);
|
||||||
|
});
|
||||||
|
// take over the rebalance error callback
|
||||||
|
rebalance_error_callback_ = consumer.get_rebalance_error_callback();
|
||||||
|
consumer_.set_rebalance_error_callback([this](Error error) {
|
||||||
|
on_rebalance_error(error);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
PollStrategyBase::~PollStrategyBase() {
|
||||||
|
//reset the original callbacks
|
||||||
|
consumer_.set_assignment_callback(assignment_callback_);
|
||||||
|
consumer_.set_revocation_callback(revocation_callback_);
|
||||||
|
consumer_.set_rebalance_error_callback(rebalance_error_callback_);
|
||||||
|
}
|
||||||
|
|
||||||
|
void PollStrategyBase::set_timeout(milliseconds timeout) {
|
||||||
|
consumer_.set_timeout(timeout);
|
||||||
|
}
|
||||||
|
|
||||||
|
milliseconds PollStrategyBase::get_timeout() {
|
||||||
|
return consumer_.get_timeout();
|
||||||
|
}
|
||||||
|
|
||||||
|
Consumer& PollStrategyBase::get_consumer() {
|
||||||
|
return consumer_;
|
||||||
|
}
|
||||||
|
|
||||||
|
QueueData& PollStrategyBase::get_consumer_queue() {
|
||||||
|
return consumer_queue_;
|
||||||
|
}
|
||||||
|
|
||||||
|
PollStrategyBase::QueueMap& PollStrategyBase::get_partition_queues() {
|
||||||
|
return partition_queues_;
|
||||||
|
}
|
||||||
|
|
||||||
|
void PollStrategyBase::reset_state() {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void PollStrategyBase::on_assignment(TopicPartitionList& partitions) {
|
||||||
|
// populate partition queues
|
||||||
|
for (const auto& partition : partitions) {
|
||||||
|
// get the queue associated with this partition
|
||||||
|
partition_queues_.emplace(partition, QueueData{consumer_.get_partition_queue(partition), boost::any()});
|
||||||
|
}
|
||||||
|
reset_state();
|
||||||
|
// call original consumer callback if any
|
||||||
|
if (assignment_callback_) {
|
||||||
|
assignment_callback_(partitions);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void PollStrategyBase::on_revocation(const TopicPartitionList& partitions) {
|
||||||
|
for (const auto& partition : partitions) {
|
||||||
|
// get the queue associated with this partition
|
||||||
|
auto toppar_it = partition_queues_.find(partition);
|
||||||
|
if (toppar_it != partition_queues_.end()) {
|
||||||
|
// remove this queue from the list
|
||||||
|
partition_queues_.erase(toppar_it);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
reset_state();
|
||||||
|
// call original consumer callback if any
|
||||||
|
if (revocation_callback_) {
|
||||||
|
revocation_callback_(partitions);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void PollStrategyBase::on_rebalance_error(Error error) {
|
||||||
|
reset_state();
|
||||||
|
// call original consumer callback if any
|
||||||
|
if (rebalance_error_callback_) {
|
||||||
|
rebalance_error_callback_(error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} //cppkafka
|
||||||
131
src/utils/roundrobin_poll_strategy.cpp
Normal file
131
src/utils/roundrobin_poll_strategy.cpp
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2017, Matias Fontanini
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "utils/roundrobin_poll_strategy.h"
|
||||||
|
|
||||||
|
using std::string;
|
||||||
|
using std::chrono::milliseconds;
|
||||||
|
using std::make_move_iterator;
|
||||||
|
|
||||||
|
namespace cppkafka {
|
||||||
|
|
||||||
|
RoundRobinPollStrategy::RoundRobinPollStrategy(Consumer& consumer)
|
||||||
|
: PollStrategyBase(consumer) {
|
||||||
|
reset_state();
|
||||||
|
}
|
||||||
|
|
||||||
|
RoundRobinPollStrategy::~RoundRobinPollStrategy() {
|
||||||
|
restore_forwarding();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Message RoundRobinPollStrategy::poll() {
|
||||||
|
return poll(get_consumer().get_timeout());
|
||||||
|
}
|
||||||
|
|
||||||
|
Message RoundRobinPollStrategy::poll(milliseconds timeout) {
|
||||||
|
// Always give priority to group and global events
|
||||||
|
Message message = get_consumer_queue().queue.consume(milliseconds(0));
|
||||||
|
if (message) {
|
||||||
|
return message;
|
||||||
|
}
|
||||||
|
size_t num_queues = get_partition_queues().size();
|
||||||
|
while (num_queues--) {
|
||||||
|
//consume the next partition (non-blocking)
|
||||||
|
message = get_next_queue().queue.consume(milliseconds(0));
|
||||||
|
if (message) {
|
||||||
|
return message;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// We still don't have a valid message so we block on the event queue
|
||||||
|
return get_consumer_queue().queue.consume(timeout);
|
||||||
|
}
|
||||||
|
|
||||||
|
MessageList RoundRobinPollStrategy::poll_batch(size_t max_batch_size) {
|
||||||
|
return poll_batch(max_batch_size, get_consumer().get_timeout());
|
||||||
|
}
|
||||||
|
|
||||||
|
MessageList RoundRobinPollStrategy::poll_batch(size_t max_batch_size, milliseconds timeout) {
|
||||||
|
MessageList messages;
|
||||||
|
ssize_t count = max_batch_size;
|
||||||
|
|
||||||
|
// batch from the group event queue first (non-blocking)
|
||||||
|
consume_batch(get_consumer_queue().queue, messages, count, milliseconds(0));
|
||||||
|
size_t num_queues = get_partition_queues().size();
|
||||||
|
while ((count > 0) && (num_queues--)) {
|
||||||
|
// batch from the next partition (non-blocking)
|
||||||
|
consume_batch(get_next_queue().queue, messages, count, milliseconds(0));
|
||||||
|
}
|
||||||
|
// we still have space left in the buffer
|
||||||
|
if (count > 0) {
|
||||||
|
// wait on the event queue until timeout
|
||||||
|
consume_batch(get_consumer_queue().queue, messages, count, timeout);
|
||||||
|
}
|
||||||
|
return messages;
|
||||||
|
}
|
||||||
|
|
||||||
|
void RoundRobinPollStrategy::consume_batch(Queue& queue,
|
||||||
|
MessageList& messages,
|
||||||
|
ssize_t& count,
|
||||||
|
milliseconds timeout) {
|
||||||
|
MessageList queue_messages = queue.consume_batch(count, timeout);
|
||||||
|
if (queue_messages.empty()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// concatenate both lists
|
||||||
|
messages.insert(messages.end(),
|
||||||
|
make_move_iterator(queue_messages.begin()),
|
||||||
|
make_move_iterator(queue_messages.end()));
|
||||||
|
// reduce total batch count
|
||||||
|
count -= queue_messages.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void RoundRobinPollStrategy::restore_forwarding() {
|
||||||
|
// forward all partition queues
|
||||||
|
for (const auto& toppar : get_partition_queues()) {
|
||||||
|
toppar.second.queue.forward_to_queue(get_consumer_queue().queue);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
QueueData& RoundRobinPollStrategy::get_next_queue() {
|
||||||
|
if (get_partition_queues().empty()) {
|
||||||
|
throw QueueException(RD_KAFKA_RESP_ERR__STATE);
|
||||||
|
}
|
||||||
|
if (++queue_iter_ == get_partition_queues().end()) {
|
||||||
|
queue_iter_ = get_partition_queues().begin();
|
||||||
|
}
|
||||||
|
return queue_iter_->second;
|
||||||
|
}
|
||||||
|
|
||||||
|
void RoundRobinPollStrategy::reset_state() {
|
||||||
|
queue_iter_ = get_partition_queues().begin();
|
||||||
|
}
|
||||||
|
|
||||||
|
} //cppkafka
|
||||||
@@ -1,31 +1,28 @@
|
|||||||
include_directories(${GOOGLETEST_INCLUDE})
|
|
||||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include/)
|
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include/)
|
||||||
include_directories(SYSTEM ${Boost_INCLUDE_DIRS} ${RDKAFKA_INCLUDE_DIR})
|
include_directories(SYSTEM ${CATCH_INCLUDE})
|
||||||
|
include_directories(SYSTEM ${RDKAFKA_INCLUDE_DIR})
|
||||||
link_directories(${GOOGLETEST_LIBRARY})
|
|
||||||
link_libraries(cppkafka ${RDKAFKA_LIBRARY} gtest gtest_main pthread)
|
|
||||||
|
|
||||||
set(KAFKA_TEST_INSTANCE "kafka-vm:9092"
|
set(KAFKA_TEST_INSTANCE "kafka-vm:9092"
|
||||||
CACHE STRING "The kafka instance to which to connect to run tests")
|
CACHE STRING "The kafka instance to which to connect to run tests")
|
||||||
add_custom_target(tests)
|
add_custom_target(tests)
|
||||||
|
|
||||||
macro(create_test test_name)
|
|
||||||
add_executable(${test_name}_test EXCLUDE_FROM_ALL "${test_name}_test.cpp")
|
|
||||||
add_test(${test_name} ${test_name}_test)
|
|
||||||
add_dependencies(tests ${test_name}_test)
|
|
||||||
add_dependencies(${test_name}_test cppkafka)
|
|
||||||
target_link_libraries(${test_name}_test cppkafka-test)
|
|
||||||
endmacro()
|
|
||||||
|
|
||||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
||||||
add_library(cppkafka-test EXCLUDE_FROM_ALL test_utils.cpp)
|
|
||||||
add_dependencies(cppkafka-test cppkafka)
|
|
||||||
|
|
||||||
add_definitions("-DKAFKA_TEST_INSTANCE=\"${KAFKA_TEST_INSTANCE}\"")
|
add_definitions("-DKAFKA_TEST_INSTANCE=\"${KAFKA_TEST_INSTANCE}\"")
|
||||||
create_test(consumer)
|
|
||||||
create_test(producer)
|
add_executable(
|
||||||
create_test(kafka_handle_base)
|
cppkafka_tests
|
||||||
create_test(topic_partition_list)
|
buffer_test.cpp
|
||||||
create_test(configuration)
|
compacted_topic_processor_test.cpp
|
||||||
create_test(buffer)
|
configuration_test.cpp
|
||||||
create_test(compacted_topic_processor)
|
topic_partition_list_test.cpp
|
||||||
|
kafka_handle_base_test.cpp
|
||||||
|
producer_test.cpp
|
||||||
|
consumer_test.cpp
|
||||||
|
roundrobin_poll_test.cpp
|
||||||
|
|
||||||
|
# Main file
|
||||||
|
test_main.cpp
|
||||||
|
)
|
||||||
|
target_link_libraries(cppkafka_tests cppkafka ${RDKAFKA_LIBRARY} pthread rt ssl crypto dl z)
|
||||||
|
add_dependencies(tests cppkafka_tests)
|
||||||
|
add_test(cppkafka cppkafka_tests)
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
#include <gtest/gtest.h>
|
#include <catch.hpp>
|
||||||
#include "cppkafka/buffer.h"
|
#include "cppkafka/buffer.h"
|
||||||
|
|
||||||
using std::string;
|
using std::string;
|
||||||
@@ -10,69 +10,62 @@ using std::ostringstream;
|
|||||||
|
|
||||||
using namespace cppkafka;
|
using namespace cppkafka;
|
||||||
|
|
||||||
class BufferTest : public testing::Test {
|
TEST_CASE("conversions", "[buffer]") {
|
||||||
public:
|
const string data = "Hello world!";
|
||||||
|
const Buffer buffer(data);
|
||||||
|
const Buffer empty_buffer;
|
||||||
|
|
||||||
};
|
SECTION("construction") {
|
||||||
|
CHECK_THROWS_AS(Buffer((const char*)nullptr, 5), Exception);
|
||||||
|
}
|
||||||
|
|
||||||
TEST_F(BufferTest, OperatorBool) {
|
SECTION("bool conversion") {
|
||||||
string data = "Hello world!";
|
CHECK(!!buffer == true);
|
||||||
Buffer buffer1(data);
|
CHECK(!!empty_buffer == false);
|
||||||
Buffer buffer2;
|
}
|
||||||
|
|
||||||
EXPECT_TRUE(buffer1);
|
SECTION("string conversion") {
|
||||||
EXPECT_FALSE(buffer2);
|
CHECK(static_cast<string>(buffer) == data);
|
||||||
|
CHECK(static_cast<string>(empty_buffer).empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
SECTION("vector conversion") {
|
||||||
|
const vector<char> buffer_as_vector = buffer;
|
||||||
|
CHECK(string(buffer_as_vector.begin(), buffer_as_vector.end()) == data);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(BufferTest, StringConversion) {
|
TEST_CASE("construction", "[buffer]") {
|
||||||
string data = "Hello world!";
|
|
||||||
Buffer buffer(data);
|
|
||||||
string buffer_as_string = buffer;
|
|
||||||
EXPECT_EQ(data, buffer_as_string);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(BufferTest, StringConversionOnEmptyBuffer) {
|
|
||||||
Buffer buffer;
|
|
||||||
EXPECT_EQ("", static_cast<string>(buffer));
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(BufferTest, VectorConversion) {
|
|
||||||
string data = "Hello world!";
|
|
||||||
Buffer buffer(data);
|
|
||||||
vector<char> buffer_as_vector = buffer;
|
|
||||||
EXPECT_EQ(data, string(buffer_as_vector.begin(), buffer_as_vector.end()));
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(BufferTest, VectorConstruction) {
|
|
||||||
const string str_data = "Hello world!";
|
const string str_data = "Hello world!";
|
||||||
const vector<uint8_t> data(str_data.begin(), str_data.end());
|
const vector<uint8_t> data(str_data.begin(), str_data.end());
|
||||||
Buffer buffer(data);
|
const Buffer buffer(data);
|
||||||
EXPECT_EQ(str_data, buffer);
|
CHECK(str_data == buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(BufferTest, Equality) {
|
|
||||||
string data = "Hello world!";
|
|
||||||
Buffer buffer1(data);
|
|
||||||
Buffer buffer2(data);
|
|
||||||
|
|
||||||
EXPECT_EQ(buffer1, buffer2);
|
TEST_CASE("comparison", "[buffer]") {
|
||||||
|
const string data = "Hello world!";
|
||||||
|
const Buffer buffer1(data);
|
||||||
|
const Buffer buffer2(data);
|
||||||
|
const Buffer empty_buffer;
|
||||||
|
|
||||||
|
SECTION("equality") {
|
||||||
|
CHECK(buffer1 == buffer2);
|
||||||
|
CHECK(buffer2 == buffer1);
|
||||||
|
}
|
||||||
|
|
||||||
|
SECTION("inequality") {
|
||||||
|
CHECK(buffer1 != empty_buffer);
|
||||||
|
CHECK(empty_buffer != buffer1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(BufferTest, InEquality) {
|
TEST_CASE("stream extraction", "[buffer]") {
|
||||||
string data1 = "Hello world!";
|
const string data = "Hello \x7fwor\x03ld!";
|
||||||
string data2 = "Hello worldz";
|
const string pretty_string = "Hello \\x7fwor\\x03ld!";
|
||||||
Buffer buffer1(data1);
|
const Buffer buffer(data);
|
||||||
Buffer buffer2(data2);
|
|
||||||
|
|
||||||
EXPECT_NE(buffer1, buffer2);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(BufferTest, OutputOperator) {
|
|
||||||
string data = "Hello \x7fwor\x03ld!";
|
|
||||||
string pretty_string = "Hello \\x7fwor\\x03ld!";
|
|
||||||
Buffer buffer(data);
|
|
||||||
|
|
||||||
ostringstream output;
|
ostringstream output;
|
||||||
output << buffer;
|
output << buffer;
|
||||||
EXPECT_EQ(pretty_string, output.str());
|
CHECK(output.str() == pretty_string );
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,10 +4,11 @@
|
|||||||
#include <set>
|
#include <set>
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <condition_variable>
|
#include <condition_variable>
|
||||||
#include <gtest/gtest.h>
|
#include <catch.hpp>
|
||||||
#include "cppkafka/producer.h"
|
#include "cppkafka/utils/buffered_producer.h"
|
||||||
#include "cppkafka/consumer.h"
|
#include "cppkafka/consumer.h"
|
||||||
#include "cppkafka/utils/compacted_topic_processor.h"
|
#include "cppkafka/utils/compacted_topic_processor.h"
|
||||||
|
#include "test_utils.h"
|
||||||
|
|
||||||
using std::string;
|
using std::string;
|
||||||
using std::to_string;
|
using std::to_string;
|
||||||
@@ -29,28 +30,21 @@ using std::chrono::milliseconds;
|
|||||||
|
|
||||||
using namespace cppkafka;
|
using namespace cppkafka;
|
||||||
|
|
||||||
class CompactedTopicProcessorTest : public testing::Test {
|
static Configuration make_producer_config() {
|
||||||
public:
|
Configuration config;
|
||||||
static const string KAFKA_TOPIC;
|
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
|
||||||
|
return config;
|
||||||
|
}
|
||||||
|
|
||||||
Configuration make_producer_config() {
|
static Configuration make_consumer_config() {
|
||||||
Configuration config;
|
Configuration config;
|
||||||
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
|
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
|
||||||
return config;
|
config.set("enable.auto.commit", false);
|
||||||
}
|
config.set("group.id", "compacted_topic_test");
|
||||||
|
return config;
|
||||||
|
}
|
||||||
|
|
||||||
Configuration make_consumer_config() {
|
TEST_CASE("consumption", "[consumer][compacted]") {
|
||||||
Configuration config;
|
|
||||||
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
|
|
||||||
config.set("enable.auto.commit", false);
|
|
||||||
config.set("group.id", "compacted_topic_test");
|
|
||||||
return config;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const string CompactedTopicProcessorTest::KAFKA_TOPIC = "cppkafka_test1";
|
|
||||||
|
|
||||||
TEST_F(CompactedTopicProcessorTest, Consume) {
|
|
||||||
Consumer consumer(make_consumer_config());
|
Consumer consumer(make_consumer_config());
|
||||||
// We'll use ints as the key, strings as the value
|
// We'll use ints as the key, strings as the value
|
||||||
using CompactedConsumer = CompactedTopicProcessor<int, string>;
|
using CompactedConsumer = CompactedTopicProcessor<int, string>;
|
||||||
@@ -70,12 +64,16 @@ TEST_F(CompactedTopicProcessorTest, Consume) {
|
|||||||
compacted_consumer.set_event_handler([&](const Event& event) {
|
compacted_consumer.set_event_handler([&](const Event& event) {
|
||||||
events.push_back(event);
|
events.push_back(event);
|
||||||
});
|
});
|
||||||
consumer.subscribe({ KAFKA_TOPIC });
|
consumer.subscribe({ KAFKA_TOPICS[0] });
|
||||||
consumer.poll();
|
set<int> eof_partitions;
|
||||||
consumer.poll();
|
while (eof_partitions.size() != static_cast<size_t>(KAFKA_NUM_PARTITIONS)) {
|
||||||
consumer.poll();
|
Message msg = consumer.poll();
|
||||||
|
if (msg && msg.is_eof()) {
|
||||||
|
eof_partitions.insert(msg.get_partition());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Producer producer(make_producer_config());
|
BufferedProducer<string> producer(make_producer_config());
|
||||||
|
|
||||||
struct ElementType {
|
struct ElementType {
|
||||||
string value;
|
string value;
|
||||||
@@ -87,13 +85,14 @@ TEST_F(CompactedTopicProcessorTest, Consume) {
|
|||||||
};
|
};
|
||||||
for (const auto& element_pair : elements) {
|
for (const auto& element_pair : elements) {
|
||||||
const ElementType& element = element_pair.second;
|
const ElementType& element = element_pair.second;
|
||||||
MessageBuilder builder(KAFKA_TOPIC);
|
MessageBuilder builder(KAFKA_TOPICS[0]);
|
||||||
builder.partition(element.partition).key(element_pair.first).payload(element.value);
|
builder.partition(element.partition).key(element_pair.first).payload(element.value);
|
||||||
producer.produce(builder);
|
producer.produce(builder);
|
||||||
}
|
}
|
||||||
// Now erase the first element
|
// Now erase the first element
|
||||||
string deleted_key = "42";
|
string deleted_key = "42";
|
||||||
producer.produce(MessageBuilder(KAFKA_TOPIC).partition(0).key(deleted_key));
|
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(0).key(deleted_key));
|
||||||
|
producer.flush();
|
||||||
|
|
||||||
for (size_t i = 0; i < 10; ++i) {
|
for (size_t i = 0; i < 10; ++i) {
|
||||||
compacted_consumer.process_event();
|
compacted_consumer.process_event();
|
||||||
@@ -101,27 +100,27 @@ TEST_F(CompactedTopicProcessorTest, Consume) {
|
|||||||
|
|
||||||
size_t set_count = 0;
|
size_t set_count = 0;
|
||||||
size_t delete_count = 0;
|
size_t delete_count = 0;
|
||||||
ASSERT_FALSE(events.empty());
|
CHECK(events.empty() == false);
|
||||||
for (const Event& event : events) {
|
for (const Event& event : events) {
|
||||||
switch (event.get_type()) {
|
switch (event.get_type()) {
|
||||||
case Event::SET_ELEMENT:
|
case Event::SET_ELEMENT:
|
||||||
{
|
{
|
||||||
auto iter = elements.find(to_string(event.get_key()));
|
auto iter = elements.find(to_string(event.get_key()));
|
||||||
ASSERT_NE(iter, elements.end());
|
REQUIRE(iter != elements.end());
|
||||||
EXPECT_EQ(iter->second.value, event.get_value());
|
CHECK(iter->second.value == event.get_value());
|
||||||
EXPECT_EQ(iter->second.partition, event.get_partition());
|
CHECK(iter->second.partition == event.get_partition());
|
||||||
set_count++;
|
set_count++;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case Event::DELETE_ELEMENT:
|
case Event::DELETE_ELEMENT:
|
||||||
EXPECT_EQ(0, event.get_partition());
|
CHECK(event.get_partition() == 0);
|
||||||
EXPECT_EQ(42, event.get_key());
|
CHECK(event.get_key() == 42);
|
||||||
delete_count++;
|
delete_count++;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPECT_EQ(2, set_count);
|
CHECK(set_count == 2);
|
||||||
EXPECT_EQ(1, delete_count);
|
CHECK(delete_count == 1);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#include <gtest/gtest.h>
|
#include <catch.hpp>
|
||||||
#include "cppkafka/configuration.h"
|
#include "cppkafka/configuration.h"
|
||||||
#include "cppkafka/exceptions.h"
|
#include "cppkafka/exceptions.h"
|
||||||
|
|
||||||
@@ -6,86 +6,88 @@ using namespace cppkafka;
|
|||||||
|
|
||||||
using std::string;
|
using std::string;
|
||||||
|
|
||||||
class ConfigurationTest : public testing::Test {
|
TEST_CASE("normal config", "[config]") {
|
||||||
public:
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
TEST_F(ConfigurationTest, GetSetConfig) {
|
|
||||||
Configuration config;
|
Configuration config;
|
||||||
config.set("group.id", "foo").set("metadata.broker.list", "asd:9092");
|
|
||||||
EXPECT_EQ("foo", config.get("group.id"));
|
|
||||||
EXPECT_EQ("asd:9092", config.get("metadata.broker.list"));
|
|
||||||
EXPECT_EQ("foo", config.get<string>("group.id"));
|
|
||||||
|
|
||||||
EXPECT_THROW(config.get("asd"), ConfigOptionNotFound);
|
SECTION("get existing") {
|
||||||
|
config.set("group.id", "foo").set("metadata.broker.list", "asd:9092");
|
||||||
|
CHECK(config.get("group.id") == "foo");
|
||||||
|
CHECK(config.get("metadata.broker.list") == "asd:9092");
|
||||||
|
CHECK(config.get<string>("group.id") == "foo");
|
||||||
|
}
|
||||||
|
|
||||||
|
SECTION("get non existent") {
|
||||||
|
REQUIRE_THROWS_AS(config.get("asd"), ConfigOptionNotFound);
|
||||||
|
}
|
||||||
|
|
||||||
|
SECTION("set overloads") {
|
||||||
|
config.set("enable.auto.commit", true);
|
||||||
|
config.set("auto.commit.interval.ms", 100);
|
||||||
|
|
||||||
|
CHECK(config.get("enable.auto.commit") == "true");
|
||||||
|
CHECK(config.get("auto.commit.interval.ms") == "100");
|
||||||
|
CHECK(config.get<int>("auto.commit.interval.ms") == 100);
|
||||||
|
}
|
||||||
|
|
||||||
|
SECTION("set multiple") {
|
||||||
|
config = {
|
||||||
|
{ "group.id", "foo" },
|
||||||
|
{ "metadata.broker.list", string("asd:9092") },
|
||||||
|
{ "message.max.bytes", 2000 },
|
||||||
|
{ "topic.metadata.refresh.sparse", true }
|
||||||
|
};
|
||||||
|
|
||||||
|
CHECK(config.get("group.id") == "foo");
|
||||||
|
CHECK(config.get("metadata.broker.list") == "asd:9092");
|
||||||
|
CHECK(config.get<int>("message.max.bytes") == 2000);
|
||||||
|
CHECK(config.get<bool>("topic.metadata.refresh.sparse") == true);
|
||||||
|
}
|
||||||
|
|
||||||
|
SECTION("default topic config") {
|
||||||
|
config.set_default_topic_configuration({{ "request.required.acks", 2 }});
|
||||||
|
|
||||||
|
const auto& topic_config = config.get_default_topic_configuration();
|
||||||
|
CHECK(!!topic_config == true);
|
||||||
|
CHECK(topic_config->get<int>("request.required.acks") == 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
SECTION("get all") {
|
||||||
|
config.set("enable.auto.commit", false);
|
||||||
|
auto option_map = config.get_all();
|
||||||
|
CHECK(option_map.at("enable.auto.commit") == "false");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ConfigurationTest, GetSetTopicConfig) {
|
TEST_CASE("topic config", "[config]") {
|
||||||
TopicConfiguration config;
|
TopicConfiguration config;
|
||||||
config.set("auto.commit.enable", true).set("offset.store.method", "broker");
|
|
||||||
EXPECT_EQ("true", config.get("auto.commit.enable"));
|
|
||||||
EXPECT_EQ("broker", config.get("offset.store.method"));
|
|
||||||
EXPECT_EQ(true, config.get<bool>("auto.commit.enable"));
|
|
||||||
|
|
||||||
EXPECT_THROW(config.get("asd"), ConfigOptionNotFound);
|
SECTION("get existing") {
|
||||||
}
|
config.set("auto.commit.enable", true).set("offset.store.method", "broker");
|
||||||
|
CHECK(config.get("auto.commit.enable") == "true");
|
||||||
TEST_F(ConfigurationTest, ConfigSetMultiple) {
|
CHECK(config.get("offset.store.method") == "broker");
|
||||||
Configuration config = {
|
CHECK(config.get<bool>("auto.commit.enable") == true);
|
||||||
{ "group.id", "foo" },
|
}
|
||||||
{ "metadata.broker.list", string("asd:9092") },
|
|
||||||
{ "message.max.bytes", 2000 },
|
SECTION("get non existent") {
|
||||||
{ "topic.metadata.refresh.sparse", true }
|
REQUIRE_THROWS_AS(config.get("asd"), ConfigOptionNotFound);
|
||||||
};
|
}
|
||||||
EXPECT_EQ("foo", config.get("group.id"));
|
|
||||||
EXPECT_EQ("asd:9092", config.get("metadata.broker.list"));
|
SECTION("set multiple") {
|
||||||
EXPECT_EQ(2000, config.get<int>("message.max.bytes"));
|
config = {
|
||||||
EXPECT_EQ(true, config.get<bool>("topic.metadata.refresh.sparse"));
|
{ "compression.codec", "none" },
|
||||||
}
|
{ "offset.store.method", string("file") },
|
||||||
|
{ "request.required.acks", 2 },
|
||||||
TEST_F(ConfigurationTest, TopicConfigSetMultiple) {
|
{ "produce.offset.report", true }
|
||||||
TopicConfiguration config = {
|
};
|
||||||
{ "compression.codec", "none" },
|
CHECK(config.get("compression.codec") == "none");
|
||||||
{ "offset.store.method", string("file") },
|
CHECK(config.get("offset.store.method") == "file");
|
||||||
{ "request.required.acks", 2 },
|
CHECK(config.get<int>("request.required.acks") == 2);
|
||||||
{ "produce.offset.report", true }
|
CHECK(config.get<bool>("produce.offset.report") == true);
|
||||||
};
|
}
|
||||||
EXPECT_EQ("none", config.get("compression.codec"));
|
|
||||||
EXPECT_EQ("file", config.get("offset.store.method"));
|
SECTION("get all") {
|
||||||
EXPECT_EQ(2, config.get<int>("request.required.acks"));
|
config.set("auto.commit.enable", false);
|
||||||
EXPECT_EQ(true, config.get<bool>("produce.offset.report"));
|
auto option_map = config.get_all();
|
||||||
}
|
CHECK(option_map.at("auto.commit.enable") == "false");
|
||||||
|
}
|
||||||
TEST_F(ConfigurationTest, SetDefaultTopicConfiguration) {
|
|
||||||
Configuration config;
|
|
||||||
config.set_default_topic_configuration({{ "request.required.acks", 2 }});
|
|
||||||
|
|
||||||
const auto& topic_config = config.get_default_topic_configuration();
|
|
||||||
EXPECT_TRUE(topic_config);
|
|
||||||
EXPECT_EQ(2, topic_config->get<int>("request.required.acks"));
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ConfigurationTest, SetOverloads) {
|
|
||||||
Configuration config;
|
|
||||||
config.set("enable.auto.commit", true);
|
|
||||||
config.set("auto.commit.interval.ms", 100);
|
|
||||||
|
|
||||||
EXPECT_EQ("true", config.get("enable.auto.commit"));
|
|
||||||
EXPECT_EQ("100", config.get("auto.commit.interval.ms"));
|
|
||||||
EXPECT_EQ(100, config.get<int>("auto.commit.interval.ms"));
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ConfigurationTest, GetAll) {
|
|
||||||
Configuration config;
|
|
||||||
config.set("enable.auto.commit", false);
|
|
||||||
auto option_map = config.get_all();
|
|
||||||
EXPECT_EQ("false", option_map.at("enable.auto.commit"));
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ConfigurationTest, TopicGetAll) {
|
|
||||||
TopicConfiguration config;
|
|
||||||
config.set("auto.commit.enable", false);
|
|
||||||
auto option_map = config.get_all();
|
|
||||||
EXPECT_EQ("false", option_map.at("auto.commit.enable"));
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,10 +3,13 @@
|
|||||||
#include <set>
|
#include <set>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
|
#include <iterator>
|
||||||
#include <condition_variable>
|
#include <condition_variable>
|
||||||
#include <gtest/gtest.h>
|
#include <catch.hpp>
|
||||||
#include "cppkafka/consumer.h"
|
#include "cppkafka/consumer.h"
|
||||||
#include "cppkafka/producer.h"
|
#include "cppkafka/producer.h"
|
||||||
|
#include "cppkafka/utils/consumer_dispatcher.h"
|
||||||
|
#include "cppkafka/utils/buffered_producer.h"
|
||||||
#include "test_utils.h"
|
#include "test_utils.h"
|
||||||
|
|
||||||
using std::vector;
|
using std::vector;
|
||||||
@@ -19,121 +22,116 @@ using std::tie;
|
|||||||
using std::condition_variable;
|
using std::condition_variable;
|
||||||
using std::lock_guard;
|
using std::lock_guard;
|
||||||
using std::unique_lock;
|
using std::unique_lock;
|
||||||
|
using std::make_move_iterator;
|
||||||
using std::chrono::seconds;
|
using std::chrono::seconds;
|
||||||
using std::chrono::milliseconds;
|
using std::chrono::milliseconds;
|
||||||
using std::chrono::system_clock;
|
using std::chrono::system_clock;
|
||||||
|
|
||||||
using namespace cppkafka;
|
using namespace cppkafka;
|
||||||
|
|
||||||
class ConsumerTest : public testing::Test {
|
static Configuration make_producer_config() {
|
||||||
public:
|
Configuration config;
|
||||||
static const string KAFKA_TOPIC;
|
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
|
||||||
|
return config;
|
||||||
|
}
|
||||||
|
|
||||||
Configuration make_producer_config() {
|
static Configuration make_consumer_config(const string& group_id = "consumer_test") {
|
||||||
Configuration config;
|
Configuration config;
|
||||||
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
|
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
|
||||||
return config;
|
config.set("enable.auto.commit", false);
|
||||||
}
|
config.set("group.id", group_id);
|
||||||
|
return config;
|
||||||
|
}
|
||||||
|
|
||||||
Configuration make_consumer_config(const string& group_id = "consumer_test") {
|
TEST_CASE("message consumption", "[consumer]") {
|
||||||
Configuration config;
|
TopicPartitionList assignment;
|
||||||
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
|
|
||||||
config.set("enable.auto.commit", false);
|
|
||||||
config.set("group.id", group_id);
|
|
||||||
return config;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const string ConsumerTest::KAFKA_TOPIC = "cppkafka_test1";
|
|
||||||
|
|
||||||
TEST_F(ConsumerTest, AssignmentCallback) {
|
|
||||||
vector<TopicPartition> assignment;
|
|
||||||
int partition = 0;
|
int partition = 0;
|
||||||
|
|
||||||
// Create a consumer and subscribe to the topic
|
// Create a consumer and subscribe to the topic
|
||||||
Consumer consumer(make_consumer_config());
|
Consumer consumer(make_consumer_config());
|
||||||
consumer.set_assignment_callback([&](const vector<TopicPartition>& topic_partitions) {
|
consumer.set_assignment_callback([&](const TopicPartitionList& topic_partitions) {
|
||||||
assignment = topic_partitions;
|
assignment = topic_partitions;
|
||||||
});
|
});
|
||||||
consumer.subscribe({ KAFKA_TOPIC });
|
consumer.subscribe({ KAFKA_TOPICS[0] });
|
||||||
ConsumerRunner runner(consumer, 1, 3);
|
ConsumerRunner runner(consumer, 1, KAFKA_NUM_PARTITIONS);
|
||||||
|
|
||||||
// Produce a message just so we stop the consumer
|
// Produce a message just so we stop the consumer
|
||||||
Producer producer(make_producer_config());
|
Producer producer(make_producer_config());
|
||||||
string payload = "Hello world!";
|
string payload = "Hello world!";
|
||||||
producer.produce(MessageBuilder(KAFKA_TOPIC).partition(partition).payload(payload));
|
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition).payload(payload));
|
||||||
runner.try_join();
|
runner.try_join();
|
||||||
|
|
||||||
// All 3 partitions should be ours
|
// All partitions should be ours
|
||||||
EXPECT_EQ(3, assignment.size());
|
REQUIRE(assignment.size() == KAFKA_NUM_PARTITIONS);
|
||||||
set<int> partitions = { 0, 1, 2 };
|
set<int> partitions;
|
||||||
|
for (int i = 0; i < KAFKA_NUM_PARTITIONS; partitions.emplace(i++));
|
||||||
for (const auto& topic_partition : assignment) {
|
for (const auto& topic_partition : assignment) {
|
||||||
EXPECT_EQ(KAFKA_TOPIC, topic_partition.get_topic());
|
CHECK(topic_partition.get_topic() == KAFKA_TOPICS[0]);
|
||||||
EXPECT_TRUE(partitions.erase(topic_partition.get_partition()));
|
CHECK(partitions.erase(topic_partition.get_partition()) == true);
|
||||||
}
|
}
|
||||||
EXPECT_EQ(1, runner.get_messages().size());
|
REQUIRE(runner.get_messages().size() == 1);
|
||||||
|
CHECK(consumer.get_subscription() == vector<string>{ KAFKA_TOPICS[0] });
|
||||||
EXPECT_EQ(vector<string>{ KAFKA_TOPIC }, consumer.get_subscription());
|
|
||||||
|
|
||||||
assignment = consumer.get_assignment();
|
assignment = consumer.get_assignment();
|
||||||
EXPECT_EQ(3, assignment.size());
|
CHECK(assignment.size() == KAFKA_NUM_PARTITIONS);
|
||||||
|
|
||||||
int64_t low;
|
int64_t low;
|
||||||
int64_t high;
|
int64_t high;
|
||||||
tie(low, high) = consumer.get_offsets({ KAFKA_TOPIC, partition });
|
tie(low, high) = consumer.get_offsets({ KAFKA_TOPICS[0], partition });
|
||||||
EXPECT_GT(high, low);
|
CHECK(high > low);
|
||||||
EXPECT_EQ(high, runner.get_messages().back().get_offset() + 1);
|
CHECK(runner.get_messages().back().get_offset() + 1 == high);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ConsumerTest, Rebalance) {
|
TEST_CASE("consumer rebalance", "[consumer]") {
|
||||||
vector<TopicPartition> assignment1;
|
TopicPartitionList assignment1;
|
||||||
vector<TopicPartition> assignment2;
|
TopicPartitionList assignment2;
|
||||||
bool revocation_called = false;
|
bool revocation_called = false;
|
||||||
int partition = 0;
|
int partition = 0;
|
||||||
|
|
||||||
// Create a consumer and subscribe to the topic
|
// Create a consumer and subscribe to the topic
|
||||||
Consumer consumer1(make_consumer_config());
|
Consumer consumer1(make_consumer_config());
|
||||||
consumer1.set_assignment_callback([&](const vector<TopicPartition>& topic_partitions) {
|
consumer1.set_assignment_callback([&](const TopicPartitionList& topic_partitions) {
|
||||||
assignment1 = topic_partitions;
|
assignment1 = topic_partitions;
|
||||||
});
|
});
|
||||||
consumer1.set_revocation_callback([&](const vector<TopicPartition>&) {
|
consumer1.set_revocation_callback([&](const TopicPartitionList&) {
|
||||||
revocation_called = true;
|
revocation_called = true;
|
||||||
});
|
});
|
||||||
consumer1.subscribe({ KAFKA_TOPIC });
|
consumer1.subscribe({ KAFKA_TOPICS[0] });
|
||||||
ConsumerRunner runner1(consumer1, 1, 3);
|
ConsumerRunner runner1(consumer1, 1, KAFKA_NUM_PARTITIONS);
|
||||||
|
|
||||||
// Create a second consumer and subscribe to the topic
|
// Create a second consumer and subscribe to the topic
|
||||||
Consumer consumer2(make_consumer_config());
|
Consumer consumer2(make_consumer_config());
|
||||||
consumer2.set_assignment_callback([&](const vector<TopicPartition>& topic_partitions) {
|
consumer2.set_assignment_callback([&](const TopicPartitionList& topic_partitions) {
|
||||||
assignment2 = topic_partitions;
|
assignment2 = topic_partitions;
|
||||||
});
|
});
|
||||||
consumer2.subscribe({ KAFKA_TOPIC });
|
consumer2.subscribe({ KAFKA_TOPICS[0] });
|
||||||
ConsumerRunner runner2(consumer2, 1, 1);
|
ConsumerRunner runner2(consumer2, 1, 1);
|
||||||
|
|
||||||
EXPECT_TRUE(revocation_called);
|
CHECK(revocation_called == true);
|
||||||
|
|
||||||
// Produce a message just so we stop the consumer
|
// Produce a message just so we stop the consumer
|
||||||
Producer producer(make_producer_config());
|
Producer producer(make_producer_config());
|
||||||
string payload = "Hello world!";
|
string payload = "Hello world!";
|
||||||
producer.produce(MessageBuilder(KAFKA_TOPIC).partition(partition).payload(payload));
|
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition).payload(payload));
|
||||||
runner1.try_join();
|
runner1.try_join();
|
||||||
runner2.try_join();
|
runner2.try_join();
|
||||||
|
|
||||||
// All 3 partitions should be assigned
|
// All partitions should be assigned
|
||||||
EXPECT_EQ(3, assignment1.size() + assignment2.size());
|
CHECK(assignment1.size() + assignment2.size() == KAFKA_NUM_PARTITIONS);
|
||||||
set<int> partitions = { 0, 1, 2 };
|
set<int> partitions;
|
||||||
|
for (int i = 0; i < KAFKA_NUM_PARTITIONS; partitions.emplace(i++));
|
||||||
for (const auto& topic_partition : assignment1) {
|
for (const auto& topic_partition : assignment1) {
|
||||||
EXPECT_EQ(KAFKA_TOPIC, topic_partition.get_topic());
|
CHECK(topic_partition.get_topic() == KAFKA_TOPICS[0]);
|
||||||
EXPECT_TRUE(partitions.erase(topic_partition.get_partition()));
|
CHECK(partitions.erase(topic_partition.get_partition()) == true);
|
||||||
}
|
}
|
||||||
for (const auto& topic_partition : assignment2) {
|
for (const auto& topic_partition : assignment2) {
|
||||||
EXPECT_EQ(KAFKA_TOPIC, topic_partition.get_topic());
|
CHECK(topic_partition.get_topic() == KAFKA_TOPICS[0]);
|
||||||
EXPECT_TRUE(partitions.erase(topic_partition.get_partition()));
|
CHECK(partitions.erase(topic_partition.get_partition()) == true);
|
||||||
}
|
}
|
||||||
EXPECT_EQ(1, runner1.get_messages().size() + runner2.get_messages().size());
|
CHECK(runner1.get_messages().size() + runner2.get_messages().size() == 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ConsumerTest, OffsetCommit) {
|
TEST_CASE("consumer offset commit", "[consumer]") {
|
||||||
int partition = 0;
|
int partition = 0;
|
||||||
int64_t message_offset = 0;
|
int64_t message_offset = 0;
|
||||||
bool offset_commit_called = false;
|
bool offset_commit_called = false;
|
||||||
@@ -143,28 +141,102 @@ TEST_F(ConsumerTest, OffsetCommit) {
|
|||||||
config.set_offset_commit_callback([&](Consumer&, Error error,
|
config.set_offset_commit_callback([&](Consumer&, Error error,
|
||||||
const TopicPartitionList& topic_partitions) {
|
const TopicPartitionList& topic_partitions) {
|
||||||
offset_commit_called = true;
|
offset_commit_called = true;
|
||||||
EXPECT_FALSE(error);
|
CHECK(!!error == false);
|
||||||
ASSERT_EQ(1, topic_partitions.size());
|
REQUIRE(topic_partitions.size() == 1);
|
||||||
EXPECT_EQ(KAFKA_TOPIC, topic_partitions[0].get_topic());
|
CHECK(topic_partitions[0].get_topic() == KAFKA_TOPICS[0]);
|
||||||
EXPECT_EQ(0, topic_partitions[0].get_partition());
|
CHECK(topic_partitions[0].get_partition() == 0);
|
||||||
EXPECT_EQ(message_offset + 1, topic_partitions[0].get_offset());
|
CHECK(topic_partitions[0].get_offset() == message_offset + 1);
|
||||||
});
|
});
|
||||||
Consumer consumer(config);
|
Consumer consumer(config);
|
||||||
consumer.assign({ { KAFKA_TOPIC, 0 } });
|
consumer.assign({ { KAFKA_TOPICS[0], 0 } });
|
||||||
ConsumerRunner runner(consumer, 1, 1);
|
ConsumerRunner runner(consumer, 1, 1);
|
||||||
|
|
||||||
// Produce a message just so we stop the consumer
|
// Produce a message just so we stop the consumer
|
||||||
Producer producer(make_producer_config());
|
Producer producer(make_producer_config());
|
||||||
string payload = "Hello world!";
|
string payload = "Hello world!";
|
||||||
producer.produce(MessageBuilder(KAFKA_TOPIC).partition(partition).payload(payload));
|
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition).payload(payload));
|
||||||
runner.try_join();
|
runner.try_join();
|
||||||
|
|
||||||
ASSERT_EQ(1, runner.get_messages().size());
|
REQUIRE(runner.get_messages().size() == 1);
|
||||||
const Message& msg = runner.get_messages()[0];
|
const Message& msg = runner.get_messages()[0];
|
||||||
message_offset = msg.get_offset();
|
message_offset = msg.get_offset();
|
||||||
consumer.commit(msg);
|
consumer.commit(msg);
|
||||||
for (size_t i = 0; i < 3 && !offset_commit_called; ++i) {
|
for (size_t i = 0; i < 3 && !offset_commit_called; ++i) {
|
||||||
consumer.poll();
|
consumer.poll();
|
||||||
}
|
}
|
||||||
EXPECT_TRUE(offset_commit_called);
|
CHECK(offset_commit_called == true);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("consumer throttle", "[consumer]") {
|
||||||
|
int partition = 0;
|
||||||
|
|
||||||
|
// Create a consumer and subscribe to the topic
|
||||||
|
Configuration config = make_consumer_config("offset_commit");
|
||||||
|
Consumer consumer(config);
|
||||||
|
consumer.assign({ { KAFKA_TOPICS[0], 0 } });
|
||||||
|
|
||||||
|
{
|
||||||
|
ConsumerRunner runner(consumer, 0, 1);
|
||||||
|
runner.try_join();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Produce a message just so we stop the consumer
|
||||||
|
BufferedProducer<string> producer(make_producer_config());
|
||||||
|
string payload = "Hello world!";
|
||||||
|
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition).payload(payload));
|
||||||
|
producer.flush();
|
||||||
|
|
||||||
|
size_t callback_executed_count = 0;
|
||||||
|
|
||||||
|
ConsumerDispatcher dispatcher(consumer);
|
||||||
|
dispatcher.run(
|
||||||
|
[&](Message msg) {
|
||||||
|
callback_executed_count++;
|
||||||
|
if (callback_executed_count == 3) {
|
||||||
|
return Message();
|
||||||
|
}
|
||||||
|
return move(msg);
|
||||||
|
},
|
||||||
|
[&](ConsumerDispatcher::Timeout) {
|
||||||
|
if (callback_executed_count == 3) {
|
||||||
|
dispatcher.stop();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
CHECK(callback_executed_count == 3);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("consume batch", "[consumer]") {
|
||||||
|
int partition = 0;
|
||||||
|
|
||||||
|
// Create a consumer and subscribe to the topic
|
||||||
|
Configuration config = make_consumer_config("test");
|
||||||
|
Consumer consumer(config);
|
||||||
|
consumer.assign({ { KAFKA_TOPICS[0], 0 } });
|
||||||
|
|
||||||
|
{
|
||||||
|
ConsumerRunner runner(consumer, 0, 1);
|
||||||
|
runner.try_join();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Produce a message just so we stop the consumer
|
||||||
|
BufferedProducer<string> producer(make_producer_config());
|
||||||
|
string payload = "Hello world!";
|
||||||
|
// Produce it twice
|
||||||
|
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition).payload(payload));
|
||||||
|
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition).payload(payload));
|
||||||
|
producer.flush();
|
||||||
|
|
||||||
|
MessageList all_messages;
|
||||||
|
int i = 0;
|
||||||
|
while (i < 5 && all_messages.size() != 2) {
|
||||||
|
MessageList messages = consumer.poll_batch(2);
|
||||||
|
all_messages.insert(all_messages.end(), make_move_iterator(messages.begin()),
|
||||||
|
make_move_iterator(messages.end()));
|
||||||
|
++i;
|
||||||
|
}
|
||||||
|
REQUIRE(all_messages.size() == 2);
|
||||||
|
CHECK(all_messages[0].get_payload() == payload);
|
||||||
|
CHECK(all_messages[1].get_payload() == payload);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
#include <set>
|
#include <set>
|
||||||
#include <unordered_set>
|
#include <unordered_set>
|
||||||
#include <gtest/gtest.h>
|
#include <catch.hpp>
|
||||||
#include "cppkafka/consumer.h"
|
#include "cppkafka/consumer.h"
|
||||||
#include "cppkafka/producer.h"
|
#include "cppkafka/producer.h"
|
||||||
#include "cppkafka/metadata.h"
|
#include "cppkafka/metadata.h"
|
||||||
@@ -14,94 +14,90 @@ using std::string;
|
|||||||
|
|
||||||
using namespace cppkafka;
|
using namespace cppkafka;
|
||||||
|
|
||||||
class KafkaHandleBaseTest : public testing::Test {
|
Configuration make_config() {
|
||||||
public:
|
Configuration config;
|
||||||
static const string KAFKA_TOPIC;
|
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
|
||||||
|
return config;
|
||||||
|
}
|
||||||
|
|
||||||
Configuration make_config() {
|
string get_kafka_host() {
|
||||||
Configuration config;
|
string uri = KAFKA_TEST_INSTANCE;
|
||||||
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
|
size_t index = uri.find(':');
|
||||||
return config;
|
if (index == string::npos) {
|
||||||
|
return uri;
|
||||||
}
|
}
|
||||||
|
else {
|
||||||
string get_kafka_host() {
|
return uri.substr(0, index);
|
||||||
string uri = KAFKA_TEST_INSTANCE;
|
|
||||||
size_t index = uri.find(':');
|
|
||||||
if (index == string::npos) {
|
|
||||||
return uri;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
return uri.substr(0, index);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
uint16_t get_kafka_port() {
|
uint16_t get_kafka_port() {
|
||||||
string uri = KAFKA_TEST_INSTANCE;
|
string uri = KAFKA_TEST_INSTANCE;
|
||||||
size_t index = uri.find(':');
|
size_t index = uri.find(':');
|
||||||
if (index == string::npos) {
|
if (index == string::npos) {
|
||||||
return 9092;
|
return 9092;
|
||||||
}
|
|
||||||
else {
|
|
||||||
return stoul(uri.substr(index + 1));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
};
|
else {
|
||||||
|
return stoul(uri.substr(index + 1));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const string KafkaHandleBaseTest::KAFKA_TOPIC = "cppkafka_test1";
|
TEST_CASE("metadata", "[handle_base]") {
|
||||||
|
if (KAFKA_TOPICS.size() < 2) {
|
||||||
TEST_F(KafkaHandleBaseTest, BrokersMetadata) {
|
return; //skip test
|
||||||
|
}
|
||||||
Producer producer({});
|
Producer producer({});
|
||||||
producer.add_brokers(KAFKA_TEST_INSTANCE);
|
producer.add_brokers(KAFKA_TEST_INSTANCE);
|
||||||
Metadata metadata = producer.get_metadata();
|
Metadata metadata = producer.get_metadata();
|
||||||
|
|
||||||
vector<BrokerMetadata> brokers = metadata.get_brokers();
|
SECTION("brokers") {
|
||||||
ASSERT_EQ(1, brokers.size());
|
vector<BrokerMetadata> brokers = metadata.get_brokers();
|
||||||
const auto& broker = brokers[0];
|
REQUIRE(brokers.size() == 1);
|
||||||
// TODO: resolve this
|
const auto& broker = brokers[0];
|
||||||
//EXPECT_EQ(get_kafka_host(), broker.get_host());
|
// TODO: resolve this
|
||||||
EXPECT_EQ(get_kafka_port(), broker.get_port());
|
//REQUIRE(broker.get_host() == get_kafka_host());
|
||||||
}
|
CHECK(broker.get_port() == get_kafka_port());
|
||||||
|
|
||||||
TEST_F(KafkaHandleBaseTest, TopicsMetadata) {
|
|
||||||
unordered_set<string> topic_names = { "cppkafka_test1", "cppkafka_test2" };
|
|
||||||
size_t found_topics = 0;
|
|
||||||
|
|
||||||
Producer producer(make_config());
|
|
||||||
Metadata metadata = producer.get_metadata();
|
|
||||||
|
|
||||||
const vector<TopicMetadata>& topics = metadata.get_topics();
|
|
||||||
ASSERT_GE(topics.size(), 2);
|
|
||||||
|
|
||||||
for (const auto& topic : topics) {
|
|
||||||
if (topic_names.count(topic.get_name()) == 1) {
|
|
||||||
const vector<PartitionMetadata>& partitions = topic.get_partitions();
|
|
||||||
EXPECT_EQ(3, partitions.size());
|
|
||||||
set<int32_t> expected_ids = { 0, 1, 2 };
|
|
||||||
for (const PartitionMetadata& partition : partitions) {
|
|
||||||
EXPECT_EQ(1, expected_ids.erase(partition.get_id()));
|
|
||||||
for (int32_t replica : partition.get_replicas()) {
|
|
||||||
EXPECT_EQ(0, replica);
|
|
||||||
}
|
|
||||||
for (int32_t isr : partition.get_in_sync_replica_brokers()) {
|
|
||||||
EXPECT_EQ(0, isr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
found_topics++;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
EXPECT_EQ(topic_names.size(), found_topics);
|
|
||||||
|
|
||||||
// Find by names
|
SECTION("topics") {
|
||||||
EXPECT_EQ(topic_names.size(), metadata.get_topics(topic_names).size());
|
unordered_set<string> topic_names = { KAFKA_TOPICS[0], KAFKA_TOPICS[1] };
|
||||||
// Find by prefix
|
size_t found_topics = 0;
|
||||||
EXPECT_EQ(topic_names.size(), metadata.get_topics_prefixed("cppkafka_").size());
|
|
||||||
|
|
||||||
// Now get the whole metadata only for this topic
|
const vector<TopicMetadata>& topics = metadata.get_topics();
|
||||||
Topic topic = producer.get_topic(KAFKA_TOPIC);
|
CHECK(topics.size() >= 2);
|
||||||
EXPECT_EQ(KAFKA_TOPIC, producer.get_metadata(topic).get_name());
|
|
||||||
|
for (const auto& topic : topics) {
|
||||||
|
if (topic_names.count(topic.get_name()) == 1) {
|
||||||
|
const vector<PartitionMetadata>& partitions = topic.get_partitions();
|
||||||
|
REQUIRE(partitions.size() == KAFKA_NUM_PARTITIONS);
|
||||||
|
set<int32_t> expected_ids;
|
||||||
|
for (int i = 0; i < KAFKA_NUM_PARTITIONS; expected_ids.emplace(i++));
|
||||||
|
for (const PartitionMetadata& partition : partitions) {
|
||||||
|
REQUIRE(expected_ids.erase(partition.get_id()) == 1);
|
||||||
|
for (int32_t replica : partition.get_replicas()) {
|
||||||
|
REQUIRE(replica == 0);
|
||||||
|
}
|
||||||
|
for (int32_t isr : partition.get_in_sync_replica_brokers()) {
|
||||||
|
REQUIRE(isr == 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
found_topics++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
CHECK(found_topics == topic_names.size());
|
||||||
|
|
||||||
|
// Find by names
|
||||||
|
CHECK(metadata.get_topics(topic_names).size() == topic_names.size());
|
||||||
|
// Find by prefix
|
||||||
|
CHECK(metadata.get_topics_prefixed("cppkafka_").size() == topic_names.size());
|
||||||
|
|
||||||
|
// Now get the whole metadata only for this topic
|
||||||
|
Topic topic = producer.get_topic(KAFKA_TOPICS[0]);
|
||||||
|
CHECK(producer.get_metadata(topic).get_name() == KAFKA_TOPICS[0]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(KafkaHandleBaseTest, ConsumerGroups) {
|
TEST_CASE("consumer groups", "[handle_base]") {
|
||||||
string consumer_group = "kafka_handle_test";
|
string consumer_group = "kafka_handle_test";
|
||||||
string client_id = "my_client_id";
|
string client_id = "my_client_id";
|
||||||
|
|
||||||
@@ -112,30 +108,23 @@ TEST_F(KafkaHandleBaseTest, ConsumerGroups) {
|
|||||||
|
|
||||||
// Build consumer
|
// Build consumer
|
||||||
Consumer consumer(config);
|
Consumer consumer(config);
|
||||||
consumer.subscribe({ KAFKA_TOPIC });
|
consumer.subscribe({ KAFKA_TOPICS[0] });
|
||||||
ConsumerRunner runner(consumer, 0, 3);
|
ConsumerRunner runner(consumer, 0, 3);
|
||||||
runner.try_join();
|
runner.try_join();
|
||||||
|
|
||||||
GroupInformation information = consumer.get_consumer_group(consumer_group);
|
GroupInformation information = consumer.get_consumer_group(consumer_group);
|
||||||
EXPECT_EQ(consumer_group, information.get_name());
|
CHECK(information.get_name() == consumer_group);
|
||||||
EXPECT_EQ("consumer", information.get_protocol_type());
|
CHECK(information.get_protocol_type() == "consumer");
|
||||||
ASSERT_EQ(1, information.get_members().size());
|
CHECK(information.get_members().size() == 1);
|
||||||
|
|
||||||
auto member = information.get_members()[0];
|
auto member = information.get_members()[0];
|
||||||
EXPECT_EQ(client_id, member.get_client_id());
|
CHECK(member.get_client_id() == client_id);
|
||||||
|
|
||||||
MemberAssignmentInformation assignment = member.get_member_assignment();
|
MemberAssignmentInformation assignment = member.get_member_assignment();
|
||||||
EXPECT_EQ(0, assignment.get_version());
|
CHECK(assignment.get_version() == 0);
|
||||||
vector<TopicPartition> expected_topic_partitions = {
|
TopicPartitionList expected_topic_partitions;
|
||||||
{ KAFKA_TOPIC, 0 },
|
for (int i = 0; i < KAFKA_NUM_PARTITIONS; expected_topic_partitions.emplace_back(KAFKA_TOPICS[0], i++));
|
||||||
{ KAFKA_TOPIC, 1 },
|
TopicPartitionList topic_partitions = assignment.get_topic_partitions();
|
||||||
{ KAFKA_TOPIC, 2 }
|
|
||||||
};
|
|
||||||
vector<TopicPartition> topic_partitions = assignment.get_topic_partitions();
|
|
||||||
sort(topic_partitions.begin(), topic_partitions.end());
|
sort(topic_partitions.begin(), topic_partitions.end());
|
||||||
EXPECT_EQ(expected_topic_partitions, topic_partitions);
|
CHECK(topic_partitions == expected_topic_partitions);
|
||||||
/*for (const auto c : ) {
|
|
||||||
printf("%0d,", (int)c & 0xff);
|
|
||||||
}
|
|
||||||
std::cout << std::endl;*/
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <set>
|
#include <set>
|
||||||
#include <condition_variable>
|
#include <condition_variable>
|
||||||
#include <gtest/gtest.h>
|
#include <catch.hpp>
|
||||||
#include "cppkafka/producer.h"
|
#include "cppkafka/producer.h"
|
||||||
#include "cppkafka/consumer.h"
|
#include "cppkafka/consumer.h"
|
||||||
#include "cppkafka/utils/buffered_producer.h"
|
#include "cppkafka/utils/buffered_producer.h"
|
||||||
@@ -12,248 +12,514 @@
|
|||||||
using std::string;
|
using std::string;
|
||||||
using std::to_string;
|
using std::to_string;
|
||||||
using std::set;
|
using std::set;
|
||||||
|
using std::vector;
|
||||||
using std::tie;
|
using std::tie;
|
||||||
using std::move;
|
using std::move;
|
||||||
using std::thread;
|
using std::thread;
|
||||||
|
namespace this_thread = std::this_thread;
|
||||||
using std::mutex;
|
using std::mutex;
|
||||||
using std::unique_lock;
|
using std::unique_lock;
|
||||||
using std::lock_guard;
|
using std::lock_guard;
|
||||||
using std::condition_variable;
|
using std::condition_variable;
|
||||||
|
|
||||||
using std::chrono::system_clock;
|
using std::chrono::system_clock;
|
||||||
using std::chrono::seconds;
|
using std::chrono::seconds;
|
||||||
using std::chrono::milliseconds;
|
using std::chrono::milliseconds;
|
||||||
|
using std::ref;
|
||||||
|
|
||||||
using namespace cppkafka;
|
using namespace cppkafka;
|
||||||
|
|
||||||
class ProducerTest : public testing::Test {
|
static Configuration make_producer_config() {
|
||||||
|
Configuration config = {
|
||||||
|
{ "metadata.broker.list", KAFKA_TEST_INSTANCE },
|
||||||
|
{ "queue.buffering.max.ms", 0 },
|
||||||
|
{ "api.version.request", true },
|
||||||
|
{ "queue.buffering.max.ms", 50 }
|
||||||
|
};
|
||||||
|
return config;
|
||||||
|
}
|
||||||
|
|
||||||
|
static Configuration make_consumer_config() {
|
||||||
|
Configuration config = {
|
||||||
|
{ "metadata.broker.list", KAFKA_TEST_INSTANCE },
|
||||||
|
{ "enable.auto.commit", false },
|
||||||
|
{ "group.id", "producer_test" },
|
||||||
|
{ "api.version.request", true }
|
||||||
|
};
|
||||||
|
return config;
|
||||||
|
}
|
||||||
|
|
||||||
|
void producer_run(BufferedProducer<string>& producer,
|
||||||
|
int& exit_flag, condition_variable& clear,
|
||||||
|
int num_messages,
|
||||||
|
int partition) {
|
||||||
|
MessageBuilder builder(KAFKA_TOPICS[0]);
|
||||||
|
string key("wassup?");
|
||||||
|
string payload("nothing much!");
|
||||||
|
|
||||||
|
builder.partition(partition).key(key).payload(payload);
|
||||||
|
for (int i = 0; i < num_messages; ++i) {
|
||||||
|
if (i == num_messages/2) {
|
||||||
|
clear.notify_one();
|
||||||
|
}
|
||||||
|
producer.add_message(builder);
|
||||||
|
this_thread::sleep_for(milliseconds(10));
|
||||||
|
}
|
||||||
|
exit_flag = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
void flusher_run(BufferedProducer<string>& producer,
|
||||||
|
int& exit_flag,
|
||||||
|
int num_flush) {
|
||||||
|
while (!exit_flag) {
|
||||||
|
if (producer.get_buffer_size() >= (size_t)num_flush) {
|
||||||
|
producer.flush();
|
||||||
|
}
|
||||||
|
this_thread::sleep_for(milliseconds(10));
|
||||||
|
}
|
||||||
|
producer.flush();
|
||||||
|
}
|
||||||
|
|
||||||
|
void async_flusher_run(BufferedProducer<string>& producer,
|
||||||
|
int& exit_flag,
|
||||||
|
int num_flush) {
|
||||||
|
while (!exit_flag) {
|
||||||
|
if (producer.get_buffer_size() >= (size_t)num_flush) {
|
||||||
|
producer.async_flush();
|
||||||
|
}
|
||||||
|
this_thread::sleep_for(milliseconds(10));
|
||||||
|
}
|
||||||
|
producer.async_flush();
|
||||||
|
producer.wait_for_acks();
|
||||||
|
}
|
||||||
|
|
||||||
|
void clear_run(BufferedProducer<string>& producer,
|
||||||
|
condition_variable& clear) {
|
||||||
|
mutex m;
|
||||||
|
unique_lock<mutex> lock(m);
|
||||||
|
clear.wait(lock);
|
||||||
|
producer.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
vector<int> dr_data = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
|
||||||
|
void dr_callback(const Message& message) {
|
||||||
|
static int i = 0;
|
||||||
|
if (!message || message.is_eof()) return;
|
||||||
|
CHECK(message.get_user_data() == &dr_data[i]);
|
||||||
|
CHECK(*static_cast<int*>(message.get_user_data()) == dr_data[i]);
|
||||||
|
++i;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool dr_failure_callback(const Message& message) {
|
||||||
|
if (!message || message.is_eof()) return true;
|
||||||
|
CHECK(message.get_user_data() == &dr_data[0]);
|
||||||
|
CHECK(*static_cast<int*>(message.get_user_data()) == dr_data[0]);
|
||||||
|
return true; //always retry
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename B>
|
||||||
|
class ErrorProducer : public BufferedProducer<B>
|
||||||
|
{
|
||||||
public:
|
public:
|
||||||
static const string KAFKA_TOPIC;
|
ErrorProducer(Configuration config,
|
||||||
|
typename BufferedProducer<B>::TestParameters params) :
|
||||||
Configuration make_producer_config() {
|
BufferedProducer<B>(config),
|
||||||
Configuration config = {
|
params_(params) {
|
||||||
{ "metadata.broker.list", KAFKA_TEST_INSTANCE },
|
this->set_test_parameters(¶ms_);
|
||||||
{ "queue.buffering.max.ms", 0 }
|
|
||||||
};
|
|
||||||
return config;
|
|
||||||
}
|
|
||||||
|
|
||||||
Configuration make_consumer_config() {
|
|
||||||
Configuration config = {
|
|
||||||
{ "metadata.broker.list", KAFKA_TEST_INSTANCE },
|
|
||||||
{ "enable.auto.commit", false },
|
|
||||||
{ "group.id", "producer_test" }
|
|
||||||
};
|
|
||||||
return config;
|
|
||||||
}
|
}
|
||||||
|
private:
|
||||||
|
typename BufferedProducer<B>::TestParameters params_;
|
||||||
};
|
};
|
||||||
|
|
||||||
const string ProducerTest::KAFKA_TOPIC = "cppkafka_test1";
|
TEST_CASE("simple production", "[producer]") {
|
||||||
|
|
||||||
TEST_F(ProducerTest, OneMessageOnFixedPartition) {
|
|
||||||
int partition = 0;
|
int partition = 0;
|
||||||
|
|
||||||
// Create a consumer and assign this topic/partition
|
// Create a consumer and assign this topic/partition
|
||||||
Consumer consumer(make_consumer_config());
|
Consumer consumer(make_consumer_config());
|
||||||
consumer.assign({ TopicPartition(KAFKA_TOPIC, partition) });
|
consumer.assign({ TopicPartition(KAFKA_TOPICS[0], partition) });
|
||||||
ConsumerRunner runner(consumer, 1, 1);
|
ConsumerRunner runner(consumer, 1, 1);
|
||||||
|
|
||||||
// Now create a producer and produce a message
|
Configuration config = make_producer_config();
|
||||||
Producer producer(make_producer_config());
|
SECTION("message with no key") {
|
||||||
string payload = "Hello world! 1";
|
// Now create a producer and produce a message
|
||||||
producer.produce(MessageBuilder(KAFKA_TOPIC).partition(partition).payload(payload));
|
const string payload = "Hello world! 1";
|
||||||
runner.try_join();
|
Producer producer(config);
|
||||||
|
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition).payload(payload));
|
||||||
|
runner.try_join();
|
||||||
|
|
||||||
const auto& messages = runner.get_messages();
|
const auto& messages = runner.get_messages();
|
||||||
ASSERT_EQ(1, messages.size());
|
REQUIRE(messages.size() == 1);
|
||||||
const auto& message = messages[0];
|
const auto& message = messages[0];
|
||||||
EXPECT_EQ(Buffer(payload), message.get_payload());
|
CHECK(message.get_payload() == payload);
|
||||||
EXPECT_FALSE(message.get_key());
|
CHECK(!!message.get_key() == false);
|
||||||
EXPECT_EQ(KAFKA_TOPIC, message.get_topic());
|
CHECK(message.get_topic() == KAFKA_TOPICS[0]);
|
||||||
EXPECT_EQ(partition, message.get_partition());
|
CHECK(message.get_partition() == partition);
|
||||||
EXPECT_FALSE(message.get_error());
|
CHECK(!!message.get_error() == false);
|
||||||
|
|
||||||
int64_t low;
|
int64_t low;
|
||||||
int64_t high;
|
int64_t high;
|
||||||
tie(low, high) = producer.query_offsets({ KAFKA_TOPIC, partition });
|
tie(low, high) = producer.query_offsets({ KAFKA_TOPICS[0], partition });
|
||||||
EXPECT_GT(high, low);
|
CHECK(high > low);
|
||||||
|
}
|
||||||
|
|
||||||
|
SECTION("message with key") {
|
||||||
|
const string payload = "Hello world! 2";
|
||||||
|
const string key = "such key";
|
||||||
|
const milliseconds timestamp{15};
|
||||||
|
Producer producer(config);
|
||||||
|
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition)
|
||||||
|
.key(key)
|
||||||
|
.payload(payload)
|
||||||
|
.timestamp(timestamp));
|
||||||
|
runner.try_join();
|
||||||
|
|
||||||
|
const auto& messages = runner.get_messages();
|
||||||
|
REQUIRE(messages.size() == 1);
|
||||||
|
const auto& message = messages[0];
|
||||||
|
CHECK(message.get_payload() == payload);
|
||||||
|
CHECK(message.get_key() == key);
|
||||||
|
CHECK(message.get_topic() == KAFKA_TOPICS[0]);
|
||||||
|
CHECK(message.get_partition() == partition);
|
||||||
|
CHECK(!!message.get_error() == false);
|
||||||
|
REQUIRE(!!message.get_timestamp() == true);
|
||||||
|
CHECK(message.get_timestamp()->get_timestamp() == timestamp);
|
||||||
|
}
|
||||||
|
|
||||||
|
SECTION("message without message builder") {
|
||||||
|
const string payload = "Goodbye cruel world!";
|
||||||
|
const string key = "replay key";
|
||||||
|
const milliseconds timestamp{15};
|
||||||
|
Producer producer(config);
|
||||||
|
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition)
|
||||||
|
.key(key)
|
||||||
|
.payload(payload)
|
||||||
|
.timestamp(timestamp));
|
||||||
|
runner.try_join();
|
||||||
|
ConsumerRunner runner2(consumer, 1, 1);
|
||||||
|
|
||||||
|
const auto& replay_messages = runner.get_messages();
|
||||||
|
REQUIRE(replay_messages.size() == 1);
|
||||||
|
const auto& replay_message = replay_messages[0];
|
||||||
|
|
||||||
|
//produce the same message again
|
||||||
|
producer.produce(replay_message);
|
||||||
|
runner2.try_join();
|
||||||
|
|
||||||
|
const auto& messages = runner2.get_messages();
|
||||||
|
REQUIRE(messages.size() == 1);
|
||||||
|
const auto& message = messages[0];
|
||||||
|
CHECK(message.get_payload() == payload);
|
||||||
|
CHECK(message.get_key() == key);
|
||||||
|
CHECK(message.get_topic() == KAFKA_TOPICS[0]);
|
||||||
|
CHECK(message.get_partition() == partition);
|
||||||
|
CHECK(!!message.get_error() == false);
|
||||||
|
REQUIRE(!!message.get_timestamp() == true);
|
||||||
|
CHECK(message.get_timestamp()->get_timestamp() == timestamp);
|
||||||
|
}
|
||||||
|
|
||||||
|
SECTION("callbacks") {
|
||||||
|
// Now create a producer and produce a message
|
||||||
|
const string payload = "Hello world! 3";
|
||||||
|
const string key = "hehe";
|
||||||
|
bool delivery_report_called = false;
|
||||||
|
config.set_delivery_report_callback([&](Producer&, const Message& msg) {
|
||||||
|
CHECK(msg.get_payload() == payload);
|
||||||
|
delivery_report_called = true;
|
||||||
|
});
|
||||||
|
|
||||||
|
TopicConfiguration topic_config;
|
||||||
|
topic_config.set_partitioner_callback([&](const Topic& topic, const Buffer& msg_key,
|
||||||
|
int32_t partition_count) {
|
||||||
|
CHECK(msg_key == key);
|
||||||
|
CHECK(partition_count == KAFKA_NUM_PARTITIONS);
|
||||||
|
CHECK(topic.get_name() == KAFKA_TOPICS[0]);
|
||||||
|
return 0;
|
||||||
|
});
|
||||||
|
config.set_default_topic_configuration(topic_config);
|
||||||
|
|
||||||
|
Producer producer(config);
|
||||||
|
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).key(key).payload(payload));
|
||||||
|
while (producer.get_out_queue_length() > 0) {
|
||||||
|
producer.poll();
|
||||||
|
}
|
||||||
|
runner.try_join();
|
||||||
|
|
||||||
|
const auto& messages = runner.get_messages();
|
||||||
|
REQUIRE(messages.size() == 1);
|
||||||
|
const auto& message = messages[0];
|
||||||
|
CHECK(message.get_payload() == payload);
|
||||||
|
CHECK(message.get_key() == key);
|
||||||
|
CHECK(message.get_topic() == KAFKA_TOPICS[0]);
|
||||||
|
CHECK(message.get_partition() == partition);
|
||||||
|
CHECK(!!message.get_error() == false);
|
||||||
|
CHECK(delivery_report_called == true);
|
||||||
|
}
|
||||||
|
|
||||||
|
SECTION("partitioner callback") {
|
||||||
|
// Now create a producer and produce a message
|
||||||
|
const string payload = "Hello world! 4";
|
||||||
|
const string key = "hehe";
|
||||||
|
bool callback_called = false;
|
||||||
|
|
||||||
|
TopicConfiguration topic_config;
|
||||||
|
topic_config.set_partitioner_callback([&](const Topic& topic, const Buffer& msg_key,
|
||||||
|
int32_t partition_count) {
|
||||||
|
CHECK(msg_key == key);
|
||||||
|
CHECK(partition_count == KAFKA_NUM_PARTITIONS);
|
||||||
|
CHECK(topic.get_name() == KAFKA_TOPICS[0]);
|
||||||
|
callback_called = true;
|
||||||
|
return 0;
|
||||||
|
});
|
||||||
|
config.set_default_topic_configuration(topic_config);
|
||||||
|
Producer producer(config);
|
||||||
|
|
||||||
|
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).key(key).payload(payload));
|
||||||
|
producer.poll();
|
||||||
|
runner.try_join();
|
||||||
|
|
||||||
|
const auto& messages = runner.get_messages();
|
||||||
|
REQUIRE(messages.size() == 1);
|
||||||
|
const auto& message = messages[0];
|
||||||
|
CHECK(message.get_partition() == partition);
|
||||||
|
CHECK(callback_called == true);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ProducerTest, OneMessageUsingKey) {
|
TEST_CASE("multiple messages", "[producer]") {
|
||||||
int partition = 0;
|
|
||||||
|
|
||||||
// Create a consumer and assign this topic/partition
|
|
||||||
Consumer consumer(make_consumer_config());
|
|
||||||
consumer.assign({ TopicPartition(KAFKA_TOPIC, partition) });
|
|
||||||
ConsumerRunner runner(consumer, 1, 1);
|
|
||||||
|
|
||||||
// Now create a producer and produce a message
|
|
||||||
Producer producer(make_producer_config());
|
|
||||||
string payload = "Hello world! 2";
|
|
||||||
string key = "such key";
|
|
||||||
producer.produce(MessageBuilder(KAFKA_TOPIC).partition(partition).key(key).payload(payload));
|
|
||||||
runner.try_join();
|
|
||||||
|
|
||||||
const auto& messages = runner.get_messages();
|
|
||||||
ASSERT_EQ(1, messages.size());
|
|
||||||
const auto& message = messages[0];
|
|
||||||
EXPECT_EQ(Buffer(payload), message.get_payload());
|
|
||||||
EXPECT_EQ(Buffer(key), message.get_key());
|
|
||||||
EXPECT_EQ(KAFKA_TOPIC, message.get_topic());
|
|
||||||
EXPECT_EQ(partition, message.get_partition());
|
|
||||||
EXPECT_FALSE(message.get_error());
|
|
||||||
// NOTE: if this line fails, then you're using kafka 0.10+ and that's okay
|
|
||||||
EXPECT_FALSE(message.get_timestamp());
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ProducerTest, MultipleMessagesUnassignedPartitions) {
|
|
||||||
size_t message_count = 10;
|
size_t message_count = 10;
|
||||||
int partitions = 3;
|
|
||||||
set<string> payloads;
|
set<string> payloads;
|
||||||
|
|
||||||
// Create a consumer and subscribe to this topic
|
// Create a consumer and subscribe to this topic
|
||||||
Consumer consumer(make_consumer_config());
|
Consumer consumer(make_consumer_config());
|
||||||
consumer.subscribe({ KAFKA_TOPIC });
|
consumer.subscribe({ KAFKA_TOPICS[0] });
|
||||||
ConsumerRunner runner(consumer, message_count, partitions);
|
ConsumerRunner runner(consumer, message_count, KAFKA_NUM_PARTITIONS);
|
||||||
|
|
||||||
// Now create a producer and produce a message
|
// Now create a producer and produce a message
|
||||||
Producer producer(make_producer_config());
|
Producer producer(make_producer_config());
|
||||||
string payload_base = "Hello world ";
|
const string payload_base = "Hello world ";
|
||||||
for (size_t i = 0; i < message_count; ++i) {
|
for (size_t i = 0; i < message_count; ++i) {
|
||||||
string payload = payload_base + to_string(i);
|
const string payload = payload_base + to_string(i);
|
||||||
payloads.insert(payload);
|
payloads.insert(payload);
|
||||||
producer.produce(MessageBuilder(KAFKA_TOPIC).payload(payload));
|
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).payload(payload));
|
||||||
}
|
}
|
||||||
runner.try_join();
|
runner.try_join();
|
||||||
|
|
||||||
const auto& messages = runner.get_messages();
|
const auto& messages = runner.get_messages();
|
||||||
ASSERT_EQ(message_count, messages.size());
|
REQUIRE(messages.size() == message_count);
|
||||||
for (const auto& message : messages) {
|
for (const auto& message : messages) {
|
||||||
EXPECT_EQ(KAFKA_TOPIC, message.get_topic());
|
CHECK(message.get_topic() == KAFKA_TOPICS[0]);
|
||||||
EXPECT_EQ(1, payloads.erase(message.get_payload()));
|
CHECK(payloads.erase(message.get_payload()) == 1);
|
||||||
EXPECT_FALSE(message.get_error());
|
CHECK(!!message.get_error() == false);
|
||||||
EXPECT_FALSE(message.get_key());
|
CHECK(!!message.get_key() == false);
|
||||||
EXPECT_GE(message.get_partition(), 0);
|
CHECK(message.get_partition() >= 0);
|
||||||
EXPECT_LT(message.get_partition(), 3);
|
CHECK(message.get_partition() < KAFKA_NUM_PARTITIONS);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ProducerTest, Callbacks) {
|
TEST_CASE("multiple sync messages", "[producer][buffered_producer][sync]") {
|
||||||
int partition = 0;
|
size_t message_count = 10;
|
||||||
|
set<string> payloads;
|
||||||
|
|
||||||
// Create a consumer and assign this topic/partition
|
// Create a consumer and subscribe to this topic
|
||||||
Consumer consumer(make_consumer_config());
|
Consumer consumer(make_consumer_config());
|
||||||
consumer.assign({ TopicPartition(KAFKA_TOPIC, partition) });
|
consumer.subscribe({ KAFKA_TOPICS[0] });
|
||||||
ConsumerRunner runner(consumer, 1, 1);
|
ConsumerRunner runner(consumer, message_count, KAFKA_NUM_PARTITIONS);
|
||||||
|
|
||||||
// Now create a producer and produce a message
|
// Now create a producer and produce a message
|
||||||
string payload = "Hello world! 3";
|
BufferedProducer<string> producer(make_producer_config());
|
||||||
string key = "hehe";
|
producer.set_produce_success_callback(dr_callback);
|
||||||
bool delivery_report_called = false;
|
const string payload_base = "Hello world ";
|
||||||
Configuration config = make_producer_config();
|
for (size_t i = 0; i < message_count; ++i) {
|
||||||
config.set_delivery_report_callback([&](Producer&, const Message& msg) {
|
const string payload = payload_base + to_string(i);
|
||||||
EXPECT_EQ(Buffer(payload), msg.get_payload());
|
payloads.insert(payload);
|
||||||
delivery_report_called = true;
|
producer.sync_produce(MessageBuilder(KAFKA_TOPICS[0]).payload(payload).user_data(&dr_data[i]));
|
||||||
});
|
}
|
||||||
|
|
||||||
TopicConfiguration topic_config;
|
|
||||||
topic_config.set_partitioner_callback([&](const Topic& topic, const Buffer& msg_key,
|
|
||||||
int32_t partition_count) {
|
|
||||||
EXPECT_EQ(Buffer(key), msg_key);
|
|
||||||
EXPECT_EQ(3, partition_count);
|
|
||||||
EXPECT_EQ(KAFKA_TOPIC, topic.get_name());
|
|
||||||
return 0;
|
|
||||||
});
|
|
||||||
config.set_default_topic_configuration(topic_config);
|
|
||||||
|
|
||||||
Producer producer(move(config));
|
|
||||||
producer.produce(MessageBuilder(KAFKA_TOPIC).key(key).payload(payload));
|
|
||||||
producer.poll();
|
|
||||||
runner.try_join();
|
runner.try_join();
|
||||||
|
|
||||||
const auto& messages = runner.get_messages();
|
const auto& messages = runner.get_messages();
|
||||||
ASSERT_EQ(1, messages.size());
|
REQUIRE(messages.size() == message_count);
|
||||||
const auto& message = messages[0];
|
for (size_t i = 0; i < messages.size(); ++i) {
|
||||||
EXPECT_EQ(Buffer(payload), message.get_payload());
|
const auto& message = messages[i];
|
||||||
EXPECT_EQ(Buffer(key), message.get_key());
|
CHECK(message.get_topic() == KAFKA_TOPICS[0]);
|
||||||
EXPECT_EQ(KAFKA_TOPIC, message.get_topic());
|
CHECK(payloads.erase(message.get_payload()) == 1);
|
||||||
EXPECT_EQ(partition, message.get_partition());
|
CHECK(!!message.get_error() == false);
|
||||||
EXPECT_FALSE(message.get_error());
|
CHECK(!!message.get_key() == false);
|
||||||
EXPECT_TRUE(delivery_report_called);
|
CHECK(message.get_partition() >= 0);
|
||||||
|
CHECK(message.get_partition() < KAFKA_NUM_PARTITIONS);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ProducerTest, PartitionerCallbackOnDefaultTopicConfig) {
|
TEST_CASE("replay sync messages with errors", "[producer][buffered_producer][sync]") {
|
||||||
int partition = 0;
|
size_t num_retries = 4;
|
||||||
|
|
||||||
// Create a consumer and assign this topic/partition
|
// Create a consumer and subscribe to this topic
|
||||||
Consumer consumer(make_consumer_config());
|
Consumer consumer(make_consumer_config());
|
||||||
consumer.assign({ TopicPartition(KAFKA_TOPIC, partition) });
|
consumer.subscribe({ KAFKA_TOPICS[0] });
|
||||||
ConsumerRunner runner(consumer, 1, 1);
|
ConsumerRunner runner(consumer, 2*(num_retries+1), KAFKA_NUM_PARTITIONS);
|
||||||
|
|
||||||
// Now create a producer and produce a message
|
// Now create a producer and produce a message
|
||||||
string payload = "Hello world! 4";
|
ErrorProducer<string> producer(make_producer_config(), BufferedProducer<string>::TestParameters{true, false});
|
||||||
string key = "hehe";
|
producer.set_produce_failure_callback(dr_failure_callback);
|
||||||
bool callback_called = false;
|
producer.set_max_number_retries(num_retries);
|
||||||
|
string payload = "Hello world";
|
||||||
|
MessageBuilder builder(KAFKA_TOPICS[0]);
|
||||||
|
builder.payload(payload).user_data(&dr_data[0]);
|
||||||
|
|
||||||
Configuration config = make_producer_config();
|
//Produce the same message twice
|
||||||
TopicConfiguration topic_config;
|
producer.sync_produce(builder);
|
||||||
topic_config.set_partitioner_callback([&](const Topic& topic, const Buffer& msg_key,
|
producer.sync_produce(builder);
|
||||||
int32_t partition_count) {
|
|
||||||
EXPECT_EQ(Buffer(key), msg_key);
|
|
||||||
EXPECT_EQ(3, partition_count);
|
|
||||||
EXPECT_EQ(KAFKA_TOPIC, topic.get_name());
|
|
||||||
callback_called = true;
|
|
||||||
return 0;
|
|
||||||
});
|
|
||||||
config.set_default_topic_configuration(topic_config);
|
|
||||||
|
|
||||||
Producer producer(move(config));
|
|
||||||
producer.produce(MessageBuilder(KAFKA_TOPIC).key(key).payload(payload));
|
|
||||||
producer.poll();
|
|
||||||
runner.try_join();
|
runner.try_join();
|
||||||
|
|
||||||
const auto& messages = runner.get_messages();
|
const auto& messages = runner.get_messages();
|
||||||
ASSERT_EQ(1, messages.size());
|
REQUIRE(messages.size() == 2*(num_retries+1));
|
||||||
const auto& message = messages[0];
|
for (size_t i = 0; i < messages.size(); ++i) {
|
||||||
EXPECT_EQ(partition, message.get_partition());
|
const auto& message = messages[i];
|
||||||
EXPECT_TRUE(callback_called);
|
CHECK(message.get_topic() == KAFKA_TOPICS[0]);
|
||||||
|
CHECK(message.get_payload() == payload);
|
||||||
|
CHECK(!!message.get_error() == false);
|
||||||
|
CHECK(!!message.get_key() == false);
|
||||||
|
CHECK(message.get_partition() >= 0);
|
||||||
|
CHECK(message.get_partition() < KAFKA_NUM_PARTITIONS);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ProducerTest, BufferedProducer) {
|
TEST_CASE("replay async messages with errors", "[producer][buffered_producer][async]") {
|
||||||
|
size_t num_retries = 4;
|
||||||
|
int exit_flag = 0;
|
||||||
|
|
||||||
|
// Now create a producer and produce a message
|
||||||
|
ErrorProducer<string> producer(make_producer_config(),
|
||||||
|
BufferedProducer<string>::TestParameters{false, true});
|
||||||
|
producer.set_max_number_retries(num_retries);
|
||||||
|
thread flusher_thread(async_flusher_run, ref(producer), ref(exit_flag), 0);
|
||||||
|
string payload = "Hello world";
|
||||||
|
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).payload(payload));
|
||||||
|
this_thread::sleep_for(milliseconds(2000));
|
||||||
|
exit_flag = 1;
|
||||||
|
flusher_thread.join();
|
||||||
|
REQUIRE(producer.get_total_messages_produced() == 0);
|
||||||
|
CHECK(producer.get_total_messages_dropped() == 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("buffered producer", "[producer][buffered_producer]") {
|
||||||
int partition = 0;
|
int partition = 0;
|
||||||
|
|
||||||
// Create a consumer and assign this topic/partition
|
// Create a consumer and assign this topic/partition
|
||||||
Consumer consumer(make_consumer_config());
|
Consumer consumer(make_consumer_config());
|
||||||
consumer.assign({ TopicPartition(KAFKA_TOPIC, partition) });
|
consumer.assign({ TopicPartition(KAFKA_TOPICS[0], partition) });
|
||||||
ConsumerRunner runner(consumer, 3, 1);
|
ConsumerRunner runner(consumer, 3, 1);
|
||||||
|
|
||||||
// Now create a buffered producer and produce two messages
|
// Now create a buffered producer and produce two messages
|
||||||
BufferedProducer<string> producer(make_producer_config());
|
BufferedProducer<string> producer(make_producer_config());
|
||||||
string payload = "Hello world! 2";
|
const string payload = "Hello world! 2";
|
||||||
string key = "such key";
|
const string key = "such key";
|
||||||
producer.add_message(MessageBuilder(KAFKA_TOPIC).partition(partition)
|
producer.add_message(MessageBuilder(KAFKA_TOPICS[0]).partition(partition)
|
||||||
.key(key)
|
.key(key)
|
||||||
.payload(payload));
|
.payload(payload));
|
||||||
producer.add_message(producer.make_builder(KAFKA_TOPIC).partition(partition).payload(payload));
|
producer.add_message(producer.make_builder(KAFKA_TOPICS[0]).partition(partition).payload(payload));
|
||||||
producer.flush();
|
producer.flush();
|
||||||
producer.produce(MessageBuilder(KAFKA_TOPIC).partition(partition).payload(payload));
|
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition).payload(payload));
|
||||||
producer.wait_for_acks();
|
producer.wait_for_acks();
|
||||||
// Add another one but then clear it
|
// Add another one but then clear it
|
||||||
producer.add_message(producer.make_builder(KAFKA_TOPIC).partition(partition).payload(payload));
|
producer.add_message(producer.make_builder(KAFKA_TOPICS[0]).partition(partition).payload(payload));
|
||||||
producer.clear();
|
producer.clear();
|
||||||
runner.try_join();
|
runner.try_join();
|
||||||
|
|
||||||
const auto& messages = runner.get_messages();
|
const auto& messages = runner.get_messages();
|
||||||
ASSERT_EQ(3, messages.size());
|
REQUIRE(messages.size() == 3);
|
||||||
const auto& message = messages[0];
|
const auto& message = messages[0];
|
||||||
EXPECT_EQ(Buffer(key), message.get_key());
|
CHECK(message.get_key() == key);
|
||||||
EXPECT_EQ(KAFKA_TOPIC, message.get_topic());
|
CHECK(message.get_topic() == KAFKA_TOPICS[0]);
|
||||||
EXPECT_EQ(partition, message.get_partition());
|
CHECK(message.get_partition() == partition);
|
||||||
EXPECT_FALSE(message.get_error());
|
CHECK(!!message.get_error() == false);
|
||||||
|
|
||||||
EXPECT_FALSE(messages[1].get_key());
|
CHECK(!!messages[1].get_key() == false);
|
||||||
EXPECT_FALSE(messages[2].get_key());
|
CHECK(!!messages[2].get_key() == false);
|
||||||
for (const auto& message : messages) {
|
for (const auto& message : messages) {
|
||||||
EXPECT_EQ(Buffer(payload), message.get_payload());
|
CHECK(message.get_payload() == payload);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_CASE("buffered producer with limited buffer", "[producer]") {
|
||||||
|
int partition = 0;
|
||||||
|
int num_messages = 4;
|
||||||
|
|
||||||
|
// Create a consumer and assign this topic/partition
|
||||||
|
Consumer consumer(make_consumer_config());
|
||||||
|
consumer.assign({ TopicPartition(KAFKA_TOPICS[0], partition) });
|
||||||
|
ConsumerRunner runner(consumer, 3, 1);
|
||||||
|
|
||||||
|
// Now create a buffered producer and produce two messages
|
||||||
|
BufferedProducer<string> producer(make_producer_config());
|
||||||
|
const string payload = "Hello world! 2";
|
||||||
|
const string key = "such key";
|
||||||
|
REQUIRE(producer.get_buffer_size() == 0);
|
||||||
|
REQUIRE(producer.get_max_buffer_size() == -1);
|
||||||
|
|
||||||
|
// Limit the size of the internal buffer
|
||||||
|
producer.set_max_buffer_size(num_messages-1);
|
||||||
|
while (num_messages--) {
|
||||||
|
producer.add_message(MessageBuilder(KAFKA_TOPICS[0]).partition(partition).key(key).payload(payload));
|
||||||
|
}
|
||||||
|
REQUIRE(producer.get_buffer_size() == 1);
|
||||||
|
|
||||||
|
// Finish the runner
|
||||||
|
runner.try_join();
|
||||||
|
|
||||||
|
// Validate messages received
|
||||||
|
const auto& messages = runner.get_messages();
|
||||||
|
REQUIRE(messages.size() == producer.get_max_buffer_size());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("multi-threaded buffered producer", "[producer][buffered_producer]") {
|
||||||
|
int partition = 0;
|
||||||
|
vector<thread> threads;
|
||||||
|
int num_messages = 50;
|
||||||
|
int num_flush = 10;
|
||||||
|
int exit_flag = 0;
|
||||||
|
condition_variable clear;
|
||||||
|
|
||||||
|
// Create a consumer and assign this topic/partition
|
||||||
|
Consumer consumer(make_consumer_config());
|
||||||
|
consumer.assign({ TopicPartition(KAFKA_TOPICS[0], partition) });
|
||||||
|
ConsumerRunner runner(consumer, num_messages, 1);
|
||||||
|
|
||||||
|
BufferedProducer<string> producer(make_producer_config());
|
||||||
|
|
||||||
|
threads.push_back(thread(producer_run, ref(producer), ref(exit_flag), ref(clear), num_messages, partition));
|
||||||
|
threads.push_back(thread(flusher_run, ref(producer), ref(exit_flag), num_flush));
|
||||||
|
|
||||||
|
// Wait for completion
|
||||||
|
runner.try_join();
|
||||||
|
for (auto&& thread : threads) {
|
||||||
|
thread.join();
|
||||||
|
}
|
||||||
|
const auto& messages = runner.get_messages();
|
||||||
|
REQUIRE(messages.size() == num_messages);
|
||||||
|
REQUIRE(producer.get_flushes_in_progress() == 0);
|
||||||
|
REQUIRE(producer.get_pending_acks() == 0);
|
||||||
|
REQUIRE(producer.get_total_messages_produced() == num_messages);
|
||||||
|
REQUIRE(producer.get_buffer_size() == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("clear multi-threaded buffered producer", "[producer][buffered_producer]") {
|
||||||
|
int partition = 0;
|
||||||
|
vector<thread> threads;
|
||||||
|
int num_messages = 50;
|
||||||
|
int exit_flag = 0;
|
||||||
|
condition_variable clear;
|
||||||
|
|
||||||
|
BufferedProducer<string> producer(make_producer_config());
|
||||||
|
|
||||||
|
threads.push_back(thread(producer_run, ref(producer), ref(exit_flag), ref(clear), num_messages, partition));
|
||||||
|
threads.push_back(thread(clear_run, ref(producer), ref(clear)));
|
||||||
|
|
||||||
|
// Wait for completion
|
||||||
|
for (auto&& thread : threads) {
|
||||||
|
thread.join();
|
||||||
|
}
|
||||||
|
|
||||||
|
REQUIRE(producer.get_total_messages_produced() == 0);
|
||||||
|
REQUIRE(producer.get_flushes_in_progress() == 0);
|
||||||
|
REQUIRE(producer.get_pending_acks() == 0);
|
||||||
|
REQUIRE(producer.get_buffer_size() < num_messages);
|
||||||
|
}
|
||||||
|
|||||||
164
tests/roundrobin_poll_test.cpp
Normal file
164
tests/roundrobin_poll_test.cpp
Normal file
@@ -0,0 +1,164 @@
|
|||||||
|
#include <vector>
|
||||||
|
#include <thread>
|
||||||
|
#include <set>
|
||||||
|
#include <mutex>
|
||||||
|
#include <chrono>
|
||||||
|
#include <iterator>
|
||||||
|
#include <condition_variable>
|
||||||
|
#include <catch.hpp>
|
||||||
|
#include <memory>
|
||||||
|
#include <iostream>
|
||||||
|
#include "cppkafka/cppkafka.h"
|
||||||
|
#include "test_utils.h"
|
||||||
|
|
||||||
|
using std::vector;
|
||||||
|
using std::move;
|
||||||
|
using std::string;
|
||||||
|
using std::thread;
|
||||||
|
using std::set;
|
||||||
|
using std::mutex;
|
||||||
|
using std::tie;
|
||||||
|
using std::condition_variable;
|
||||||
|
using std::lock_guard;
|
||||||
|
using std::unique_lock;
|
||||||
|
using std::unique_ptr;
|
||||||
|
using std::make_move_iterator;
|
||||||
|
using std::chrono::seconds;
|
||||||
|
using std::chrono::milliseconds;
|
||||||
|
using std::chrono::system_clock;
|
||||||
|
|
||||||
|
using namespace cppkafka;
|
||||||
|
|
||||||
|
//==================================================================================
|
||||||
|
// Helper functions
|
||||||
|
//==================================================================================
|
||||||
|
static Configuration make_producer_config() {
|
||||||
|
Configuration config;
|
||||||
|
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
|
||||||
|
return config;
|
||||||
|
}
|
||||||
|
|
||||||
|
static Configuration make_consumer_config(const string& group_id = "rr_consumer_test") {
|
||||||
|
Configuration config;
|
||||||
|
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
|
||||||
|
config.set("enable.auto.commit", true);
|
||||||
|
config.set("enable.auto.offset.store", true );
|
||||||
|
config.set("auto.commit.interval.ms", 100);
|
||||||
|
config.set("group.id", group_id);
|
||||||
|
return config;
|
||||||
|
}
|
||||||
|
|
||||||
|
static vector<int> make_roundrobin_partition_vector(int total_messages) {
|
||||||
|
vector<int> partition_order;
|
||||||
|
for (int i = 0, partition = 0; i < total_messages+1; ++i) {
|
||||||
|
if ((i % KAFKA_NUM_PARTITIONS) == 0) {
|
||||||
|
partition = 0;
|
||||||
|
}
|
||||||
|
partition_order.push_back(partition++);
|
||||||
|
}
|
||||||
|
return partition_order;
|
||||||
|
}
|
||||||
|
|
||||||
|
//========================================================================
|
||||||
|
// TESTS
|
||||||
|
//========================================================================
|
||||||
|
|
||||||
|
TEST_CASE("serial consumer test", "[roundrobin consumer]") {
|
||||||
|
int messages_per_partition = 3;
|
||||||
|
int total_messages = KAFKA_NUM_PARTITIONS * messages_per_partition;
|
||||||
|
|
||||||
|
// Create a consumer and subscribe to the topic
|
||||||
|
Consumer consumer(make_consumer_config());
|
||||||
|
TopicPartitionList partitions;
|
||||||
|
for (int i = 0; i < KAFKA_NUM_PARTITIONS; partitions.emplace_back(KAFKA_TOPICS[0], i++));
|
||||||
|
consumer.assign(partitions);
|
||||||
|
|
||||||
|
// Start the runner with the original consumer
|
||||||
|
ConsumerRunner runner(consumer, total_messages, KAFKA_NUM_PARTITIONS);
|
||||||
|
|
||||||
|
// Produce messages so we stop the consumer
|
||||||
|
Producer producer(make_producer_config());
|
||||||
|
string payload = "Serial";
|
||||||
|
|
||||||
|
// push 3 messages in each partition
|
||||||
|
for (int i = 0; i < total_messages; ++i) {
|
||||||
|
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(i%KAFKA_NUM_PARTITIONS).payload(payload));
|
||||||
|
}
|
||||||
|
producer.flush();
|
||||||
|
runner.try_join();
|
||||||
|
|
||||||
|
// Check that we have all messages
|
||||||
|
REQUIRE(runner.get_messages().size() == total_messages);
|
||||||
|
|
||||||
|
// messages should have sequential identical partition ids in groups of <messages_per_partition>
|
||||||
|
int expected_partition;
|
||||||
|
for (int i = 0; i < total_messages; ++i) {
|
||||||
|
if ((i % messages_per_partition) == 0) {
|
||||||
|
expected_partition = runner.get_messages()[i].get_partition();
|
||||||
|
}
|
||||||
|
REQUIRE(runner.get_messages()[i].get_partition() == expected_partition);
|
||||||
|
REQUIRE((string)runner.get_messages()[i].get_payload() == payload);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("roundrobin consumer test", "[roundrobin consumer]") {
|
||||||
|
TopicPartitionList assignment;
|
||||||
|
int messages_per_partition = 3;
|
||||||
|
int total_messages = KAFKA_NUM_PARTITIONS * messages_per_partition;
|
||||||
|
|
||||||
|
// Create a consumer and subscribe to the topic
|
||||||
|
PollStrategyAdapter consumer(make_consumer_config());
|
||||||
|
consumer.subscribe({ KAFKA_TOPICS[0] });
|
||||||
|
consumer.add_polling_strategy(unique_ptr<PollInterface>(new RoundRobinPollStrategy(consumer)));
|
||||||
|
|
||||||
|
PollConsumerRunner runner(consumer, total_messages, KAFKA_NUM_PARTITIONS);
|
||||||
|
|
||||||
|
// Produce messages so we stop the consumer
|
||||||
|
Producer producer(make_producer_config());
|
||||||
|
string payload = "RoundRobin";
|
||||||
|
|
||||||
|
// push 3 messages in each partition
|
||||||
|
for (int i = 0; i < total_messages; ++i) {
|
||||||
|
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(i%KAFKA_NUM_PARTITIONS).payload(payload));
|
||||||
|
}
|
||||||
|
producer.flush();
|
||||||
|
runner.try_join();
|
||||||
|
|
||||||
|
// Check that we have all messages
|
||||||
|
REQUIRE(runner.get_messages().size() == total_messages);
|
||||||
|
|
||||||
|
// Check that we have one message from each partition in desired order
|
||||||
|
vector<int> partition_order = make_roundrobin_partition_vector(total_messages+KAFKA_NUM_PARTITIONS);
|
||||||
|
int partition_idx;
|
||||||
|
for (int i = 0; i < total_messages; ++i) {
|
||||||
|
if (i == 0) {
|
||||||
|
// find first polled partition index
|
||||||
|
partition_idx = runner.get_messages()[i].get_partition();
|
||||||
|
}
|
||||||
|
REQUIRE(runner.get_messages()[i].get_partition() == partition_order[i+partition_idx]);
|
||||||
|
REQUIRE((string)runner.get_messages()[i].get_payload() == payload);
|
||||||
|
}
|
||||||
|
|
||||||
|
//============ resume original poll strategy =============//
|
||||||
|
|
||||||
|
//validate that once the round robin strategy is deleted, normal poll works as before
|
||||||
|
consumer.delete_polling_strategy();
|
||||||
|
|
||||||
|
ConsumerRunner serial_runner(consumer, total_messages, KAFKA_NUM_PARTITIONS);
|
||||||
|
|
||||||
|
payload = "SerialPolling";
|
||||||
|
// push 3 messages in each partition
|
||||||
|
for (int i = 0; i < total_messages; ++i) {
|
||||||
|
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(i%KAFKA_NUM_PARTITIONS).payload(payload));
|
||||||
|
}
|
||||||
|
producer.flush();
|
||||||
|
serial_runner.try_join();
|
||||||
|
|
||||||
|
// Check that we have all messages
|
||||||
|
REQUIRE(serial_runner.get_messages().size() == total_messages);
|
||||||
|
|
||||||
|
for (int i = 0; i < total_messages; ++i) {
|
||||||
|
REQUIRE((string)serial_runner.get_messages()[i].get_payload() == payload);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
77
tests/test_main.cpp
Normal file
77
tests/test_main.cpp
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
#include <chrono>
|
||||||
|
#define CATCH_CONFIG_RUNNER
|
||||||
|
#include <catch.hpp>
|
||||||
|
|
||||||
|
using std::string;
|
||||||
|
using std::chrono::steady_clock;
|
||||||
|
using std::chrono::milliseconds;
|
||||||
|
using std::chrono::duration_cast;
|
||||||
|
|
||||||
|
using Catch::ConsoleReporter;
|
||||||
|
using Catch::ReporterConfig;
|
||||||
|
using Catch::ReporterPreferences;
|
||||||
|
using Catch::TestCaseInfo;
|
||||||
|
using Catch::TestCaseStats;
|
||||||
|
using Catch::Totals;
|
||||||
|
using Catch::Session;
|
||||||
|
|
||||||
|
std::vector<std::string> KAFKA_TOPICS = {"cppkafka_test1", "cppkafka_test2"};
|
||||||
|
int KAFKA_NUM_PARTITIONS = 3;
|
||||||
|
|
||||||
|
namespace cppkafka {
|
||||||
|
|
||||||
|
class InstantTestReporter : public ConsoleReporter {
|
||||||
|
public:
|
||||||
|
using ClockType = steady_clock;
|
||||||
|
|
||||||
|
InstantTestReporter(const ReporterConfig& config)
|
||||||
|
: ConsoleReporter(config) {
|
||||||
|
}
|
||||||
|
|
||||||
|
static string getDescription() {
|
||||||
|
return "Reports the tests' progress as they run";
|
||||||
|
}
|
||||||
|
|
||||||
|
ReporterPreferences getPreferences() const override {
|
||||||
|
ReporterPreferences output;
|
||||||
|
output.shouldRedirectStdOut = false;
|
||||||
|
return output;
|
||||||
|
}
|
||||||
|
|
||||||
|
void testCaseStarting(const TestCaseInfo& info) override {
|
||||||
|
ConsoleReporter::testCaseStarting(info);
|
||||||
|
stream << "Running test \"" << info.name << "\" @ " << info.lineInfo << "\n";
|
||||||
|
test_start_ts_ = ClockType::now();
|
||||||
|
}
|
||||||
|
|
||||||
|
void testCaseEnded(const TestCaseStats& stats) override {
|
||||||
|
const Totals& totals = stats.totals;
|
||||||
|
const size_t totalTestCases = totals.assertions.passed + totals.assertions.failed;
|
||||||
|
const auto elapsed = ClockType::now() - test_start_ts_;
|
||||||
|
stream << "Done. " << totals.assertions.passed << "/" << totalTestCases
|
||||||
|
<< " assertions succeeded in " << duration_cast<milliseconds>(elapsed).count()
|
||||||
|
<< "ms\n";
|
||||||
|
}
|
||||||
|
private:
|
||||||
|
ClockType::time_point test_start_ts_;
|
||||||
|
};
|
||||||
|
|
||||||
|
CATCH_REGISTER_REPORTER("instant", InstantTestReporter)
|
||||||
|
|
||||||
|
} // cppkafka
|
||||||
|
|
||||||
|
int main(int argc, char* argv[]) {
|
||||||
|
Session session;
|
||||||
|
|
||||||
|
int returnCode = session.applyCommandLine( argc, argv );
|
||||||
|
if (returnCode != 0) {
|
||||||
|
return returnCode;
|
||||||
|
}
|
||||||
|
if (session.configData().reporterNames.empty()) {
|
||||||
|
// Set our reporter as the default one
|
||||||
|
session.configData().reporterNames.emplace_back("instant");
|
||||||
|
}
|
||||||
|
|
||||||
|
int numFailed = session.run();
|
||||||
|
return numFailed;
|
||||||
|
}
|
||||||
@@ -1,77 +0,0 @@
|
|||||||
#include <mutex>
|
|
||||||
#include <chrono>
|
|
||||||
#include <condition_variable>
|
|
||||||
#include "test_utils.h"
|
|
||||||
|
|
||||||
using std::vector;
|
|
||||||
using std::move;
|
|
||||||
using std::thread;
|
|
||||||
using std::mutex;
|
|
||||||
using std::lock_guard;
|
|
||||||
using std::unique_lock;
|
|
||||||
using std::condition_variable;
|
|
||||||
|
|
||||||
using std::chrono::system_clock;
|
|
||||||
using std::chrono::milliseconds;
|
|
||||||
using std::chrono::seconds;
|
|
||||||
|
|
||||||
using cppkafka::Consumer;
|
|
||||||
using cppkafka::Message;
|
|
||||||
|
|
||||||
ConsumerRunner::ConsumerRunner(Consumer& consumer, size_t expected, size_t partitions)
|
|
||||||
: consumer_(consumer) {
|
|
||||||
bool booted = false;
|
|
||||||
mutex mtx;
|
|
||||||
condition_variable cond;
|
|
||||||
thread_ = thread([&, expected, partitions]() {
|
|
||||||
consumer_.set_timeout(milliseconds(500));
|
|
||||||
size_t number_eofs = 0;
|
|
||||||
auto start = system_clock::now();
|
|
||||||
while (system_clock::now() - start < seconds(20)) {
|
|
||||||
if (expected > 0 && messages_.size() == expected) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (expected == 0 && number_eofs >= partitions) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
Message msg = consumer_.poll();
|
|
||||||
if (msg && number_eofs != partitions &&
|
|
||||||
msg.get_error() == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
|
|
||||||
number_eofs++;
|
|
||||||
if (number_eofs == partitions) {
|
|
||||||
lock_guard<mutex> _(mtx);
|
|
||||||
booted = true;
|
|
||||||
cond.notify_one();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else if (msg && !msg.get_error() && number_eofs == partitions) {
|
|
||||||
messages_.push_back(move(msg));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (number_eofs < partitions) {
|
|
||||||
lock_guard<mutex> _(mtx);
|
|
||||||
booted = true;
|
|
||||||
cond.notify_one();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
unique_lock<mutex> lock(mtx);
|
|
||||||
while (!booted) {
|
|
||||||
cond.wait(lock);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ConsumerRunner::~ConsumerRunner() {
|
|
||||||
try_join();
|
|
||||||
}
|
|
||||||
|
|
||||||
const vector<Message>& ConsumerRunner::get_messages() const {
|
|
||||||
return messages_;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ConsumerRunner::try_join() {
|
|
||||||
if (thread_.joinable()) {
|
|
||||||
thread_.join();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -4,21 +4,62 @@
|
|||||||
#include <thread>
|
#include <thread>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include "cppkafka/consumer.h"
|
#include "cppkafka/consumer.h"
|
||||||
|
#include "cppkafka/utils/roundrobin_poll_strategy.h"
|
||||||
|
#include "cppkafka/utils/consumer_dispatcher.h"
|
||||||
|
|
||||||
class ConsumerRunner {
|
extern const std::vector<std::string> KAFKA_TOPICS;
|
||||||
|
extern const int KAFKA_NUM_PARTITIONS;
|
||||||
|
|
||||||
|
using namespace cppkafka;
|
||||||
|
|
||||||
|
//==================================================================================
|
||||||
|
// BasicConsumerRunner
|
||||||
|
//==================================================================================
|
||||||
|
template <typename ConsumerType>
|
||||||
|
class BasicConsumerRunner {
|
||||||
public:
|
public:
|
||||||
ConsumerRunner(cppkafka::Consumer& consumer, size_t expected, size_t partitions);
|
BasicConsumerRunner(ConsumerType& consumer,
|
||||||
ConsumerRunner(const ConsumerRunner&) = delete;
|
size_t expected,
|
||||||
ConsumerRunner& operator=(const ConsumerRunner&) = delete;
|
size_t partitions);
|
||||||
~ConsumerRunner();
|
BasicConsumerRunner(const BasicConsumerRunner&) = delete;
|
||||||
|
BasicConsumerRunner& operator=(const BasicConsumerRunner&) = delete;
|
||||||
|
~BasicConsumerRunner();
|
||||||
|
|
||||||
const std::vector<cppkafka::Message>& get_messages() const;
|
const std::vector<cppkafka::Message>& get_messages() const;
|
||||||
|
|
||||||
void try_join();
|
void try_join();
|
||||||
private:
|
private:
|
||||||
cppkafka::Consumer& consumer_;
|
ConsumerType& consumer_;
|
||||||
std::thread thread_;
|
std::thread thread_;
|
||||||
std::vector<cppkafka::Message> messages_;
|
std::vector<cppkafka::Message> messages_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
//==================================================================================
|
||||||
|
// PollStrategyAdapter
|
||||||
|
//==================================================================================
|
||||||
|
/**
|
||||||
|
* \brief Specific implementation which can be used with other
|
||||||
|
* util classes such as BasicConsumerDispatcher.
|
||||||
|
*/
|
||||||
|
class PollStrategyAdapter : public Consumer {
|
||||||
|
public:
|
||||||
|
PollStrategyAdapter(Configuration config);
|
||||||
|
void add_polling_strategy(std::unique_ptr<PollInterface> poll_strategy);
|
||||||
|
void delete_polling_strategy();
|
||||||
|
Message poll();
|
||||||
|
Message poll(std::chrono::milliseconds timeout);
|
||||||
|
MessageList poll_batch(size_t max_batch_size);
|
||||||
|
MessageList poll_batch(size_t max_batch_size,
|
||||||
|
std::chrono::milliseconds timeout);
|
||||||
|
void set_timeout(std::chrono::milliseconds timeout);
|
||||||
|
std::chrono::milliseconds get_timeout();
|
||||||
|
private:
|
||||||
|
std::unique_ptr<PollInterface> strategy_;
|
||||||
|
};
|
||||||
|
|
||||||
|
using PollConsumerRunner = BasicConsumerRunner<PollStrategyAdapter>;
|
||||||
|
using ConsumerRunner = BasicConsumerRunner<Consumer>;
|
||||||
|
|
||||||
|
#include "test_utils_impl.h"
|
||||||
|
|
||||||
#endif // CPPKAFKA_TEST_UTILS_H
|
#endif // CPPKAFKA_TEST_UTILS_H
|
||||||
|
|||||||
172
tests/test_utils_impl.h
Normal file
172
tests/test_utils_impl.h
Normal file
@@ -0,0 +1,172 @@
|
|||||||
|
#include <mutex>
|
||||||
|
#include <chrono>
|
||||||
|
#include <condition_variable>
|
||||||
|
#include "test_utils.h"
|
||||||
|
#include "cppkafka/utils/consumer_dispatcher.h"
|
||||||
|
|
||||||
|
using std::vector;
|
||||||
|
using std::move;
|
||||||
|
using std::thread;
|
||||||
|
using std::mutex;
|
||||||
|
using std::lock_guard;
|
||||||
|
using std::unique_lock;
|
||||||
|
using std::condition_variable;
|
||||||
|
using std::chrono::system_clock;
|
||||||
|
using std::chrono::milliseconds;
|
||||||
|
using std::chrono::seconds;
|
||||||
|
|
||||||
|
using cppkafka::Consumer;
|
||||||
|
using cppkafka::BasicConsumerDispatcher;
|
||||||
|
|
||||||
|
using cppkafka::Message;
|
||||||
|
using cppkafka::MessageList;
|
||||||
|
using cppkafka::TopicPartition;
|
||||||
|
|
||||||
|
//==================================================================================
|
||||||
|
// BasicConsumerRunner
|
||||||
|
//==================================================================================
|
||||||
|
template <typename ConsumerType>
|
||||||
|
BasicConsumerRunner<ConsumerType>::BasicConsumerRunner(ConsumerType& consumer,
|
||||||
|
size_t expected,
|
||||||
|
size_t partitions)
|
||||||
|
: consumer_(consumer) {
|
||||||
|
bool booted = false;
|
||||||
|
mutex mtx;
|
||||||
|
condition_variable cond;
|
||||||
|
thread_ = thread([&, expected, partitions]() {
|
||||||
|
consumer_.set_timeout(milliseconds(500));
|
||||||
|
size_t number_eofs = 0;
|
||||||
|
auto start = system_clock::now();
|
||||||
|
BasicConsumerDispatcher<ConsumerType> dispatcher(consumer_);
|
||||||
|
dispatcher.run(
|
||||||
|
// Message callback
|
||||||
|
[&](Message msg) {
|
||||||
|
if (number_eofs == partitions) {
|
||||||
|
messages_.push_back(move(msg));
|
||||||
|
}
|
||||||
|
},
|
||||||
|
// EOF callback
|
||||||
|
[&](typename BasicConsumerDispatcher<ConsumerType>::EndOfFile, const TopicPartition& topic_partition) {
|
||||||
|
if (number_eofs != partitions) {
|
||||||
|
number_eofs++;
|
||||||
|
if (number_eofs == partitions) {
|
||||||
|
lock_guard<mutex> _(mtx);
|
||||||
|
booted = true;
|
||||||
|
cond.notify_one();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
// Every time there's any event callback
|
||||||
|
[&](typename BasicConsumerDispatcher<ConsumerType>::Event) {
|
||||||
|
if (expected > 0 && messages_.size() == expected) {
|
||||||
|
dispatcher.stop();
|
||||||
|
}
|
||||||
|
if (expected == 0 && number_eofs >= partitions) {
|
||||||
|
dispatcher.stop();
|
||||||
|
}
|
||||||
|
if (system_clock::now() - start >= seconds(20)) {
|
||||||
|
dispatcher.stop();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
);
|
||||||
|
// dispatcher has stopped
|
||||||
|
if (number_eofs < partitions) {
|
||||||
|
lock_guard<mutex> _(mtx);
|
||||||
|
booted = true;
|
||||||
|
cond.notify_one();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
unique_lock<mutex> lock(mtx);
|
||||||
|
while (!booted) {
|
||||||
|
cond.wait(lock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename ConsumerType>
|
||||||
|
BasicConsumerRunner<ConsumerType>::~BasicConsumerRunner() {
|
||||||
|
try_join();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename ConsumerType>
|
||||||
|
const MessageList& BasicConsumerRunner<ConsumerType>::get_messages() const {
|
||||||
|
return messages_;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename ConsumerType>
|
||||||
|
void BasicConsumerRunner<ConsumerType>::try_join() {
|
||||||
|
if (thread_.joinable()) {
|
||||||
|
thread_.join();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//==================================================================================
|
||||||
|
// PollStrategyAdapter
|
||||||
|
//==================================================================================
|
||||||
|
inline
|
||||||
|
PollStrategyAdapter::PollStrategyAdapter(Configuration config)
|
||||||
|
: Consumer(config) {
|
||||||
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
void PollStrategyAdapter::add_polling_strategy(std::unique_ptr<PollInterface> poll_strategy) {
|
||||||
|
strategy_ = std::move(poll_strategy);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
void PollStrategyAdapter::delete_polling_strategy() {
|
||||||
|
strategy_.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
Message PollStrategyAdapter::poll() {
|
||||||
|
if (strategy_) {
|
||||||
|
return strategy_->poll();
|
||||||
|
}
|
||||||
|
return Consumer::poll();
|
||||||
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
Message PollStrategyAdapter::poll(milliseconds timeout) {
|
||||||
|
if (strategy_) {
|
||||||
|
return strategy_->poll(timeout);
|
||||||
|
}
|
||||||
|
return Consumer::poll(timeout);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
MessageList PollStrategyAdapter::poll_batch(size_t max_batch_size) {
|
||||||
|
if (strategy_) {
|
||||||
|
return strategy_->poll_batch(max_batch_size);
|
||||||
|
}
|
||||||
|
return Consumer::poll_batch(max_batch_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
MessageList PollStrategyAdapter::poll_batch(size_t max_batch_size,
|
||||||
|
milliseconds timeout) {
|
||||||
|
if (strategy_) {
|
||||||
|
return strategy_->poll_batch(max_batch_size, timeout);
|
||||||
|
}
|
||||||
|
return Consumer::poll_batch(max_batch_size, timeout);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
void PollStrategyAdapter::set_timeout(milliseconds timeout) {
|
||||||
|
if (strategy_) {
|
||||||
|
strategy_->set_timeout(timeout);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
Consumer::set_timeout(timeout);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
milliseconds PollStrategyAdapter::get_timeout() {
|
||||||
|
if (strategy_) {
|
||||||
|
return strategy_->get_timeout();
|
||||||
|
}
|
||||||
|
return Consumer::get_timeout();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -1,18 +1,15 @@
|
|||||||
#include <sstream>
|
#include <sstream>
|
||||||
#include <gtest/gtest.h>
|
#include <catch.hpp>
|
||||||
#include "cppkafka/topic_partition_list.h"
|
#include "cppkafka/topic_partition_list.h"
|
||||||
#include "cppkafka/topic_partition.h"
|
#include "cppkafka/topic_partition.h"
|
||||||
|
|
||||||
using std::ostringstream;
|
using std::ostringstream;
|
||||||
|
using std::set;
|
||||||
|
using std::string;
|
||||||
|
|
||||||
using namespace cppkafka;
|
using namespace cppkafka;
|
||||||
|
|
||||||
class TopicPartitionListTest : public testing::Test {
|
TEST_CASE("rdkafka conversion", "[topic_partition]") {
|
||||||
public:
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
TEST_F(TopicPartitionListTest, Conversion) {
|
|
||||||
TopicPartitionList list1;
|
TopicPartitionList list1;
|
||||||
list1.push_back("foo");
|
list1.push_back("foo");
|
||||||
list1.push_back({ "bar", 2 });
|
list1.push_back({ "bar", 2 });
|
||||||
@@ -20,29 +17,71 @@ TEST_F(TopicPartitionListTest, Conversion) {
|
|||||||
|
|
||||||
TopicPartitionList list2 = convert(convert(list1));
|
TopicPartitionList list2 = convert(convert(list1));
|
||||||
|
|
||||||
EXPECT_EQ(list1.size(), list2.size());
|
CHECK(list1.size() == list2.size());
|
||||||
for (size_t i = 0; i < list1.size(); ++i) {
|
for (size_t i = 0; i < list1.size(); ++i) {
|
||||||
const auto& item1 = list1[i];
|
const auto& item1 = list1[i];
|
||||||
const auto& item2 = list2[i];
|
const auto& item2 = list2[i];
|
||||||
EXPECT_EQ(item1.get_topic(), item2.get_topic());
|
CHECK(item1.get_topic() == item2.get_topic());
|
||||||
EXPECT_EQ(item1.get_partition(), item2.get_partition());
|
CHECK(item1.get_partition() == item2.get_partition());
|
||||||
EXPECT_EQ(item1.get_offset(), item2.get_offset());
|
CHECK(item1.get_offset() == item2.get_offset());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TopicPartitionListTest, AsString) {
|
TEST_CASE("topic partition to string", "[topic_partition]") {
|
||||||
ostringstream output;
|
ostringstream output;
|
||||||
TopicPartition topic_partition("foo", 5);
|
TopicPartition topic_partition("foo", 5);
|
||||||
output << topic_partition;
|
output << topic_partition;
|
||||||
EXPECT_EQ("foo[5]", output.str());
|
CHECK(output.str() == "foo[5:#]");
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TopicPartitionListTest, ListAsString) {
|
TEST_CASE("topic partition list to string", "[topic_partition]") {
|
||||||
ostringstream output;
|
ostringstream output;
|
||||||
TopicPartitionList list;
|
TopicPartitionList list;
|
||||||
list.push_back("foo");
|
list.push_back("foo");
|
||||||
list.push_back({ "bar", 2 });
|
list.push_back({ "bar", 2 });
|
||||||
|
list.push_back({ "foobar", 3, 4 });
|
||||||
|
|
||||||
output << list;
|
output << list;
|
||||||
EXPECT_EQ("[ foo[-1], bar[2] ]", output.str());
|
CHECK(output.str() == "[ foo[-1:#], bar[2:#], foobar[3:4] ]");
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("find matches by topic", "[topic_partition]") {
|
||||||
|
const TopicPartitionList list = {
|
||||||
|
{ "foo", 0 },
|
||||||
|
{ "bar", 3 },
|
||||||
|
{ "fb", 1 },
|
||||||
|
{ "foo", 1 },
|
||||||
|
{ "fb", 2 },
|
||||||
|
{ "other", 1 },
|
||||||
|
{ "a", 1 }
|
||||||
|
};
|
||||||
|
|
||||||
|
const TopicPartitionList expected = {
|
||||||
|
{ "foo", 0 },
|
||||||
|
{ "fb", 1 },
|
||||||
|
{ "foo", 1 },
|
||||||
|
{ "fb", 2 },
|
||||||
|
};
|
||||||
|
const TopicPartitionList subset = find_matches(list, set<string>{"foo", "fb"});
|
||||||
|
CHECK(subset == expected);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("find matches by id", "[topic_partition]") {
|
||||||
|
const TopicPartitionList list = {
|
||||||
|
{ "foo", 2 },
|
||||||
|
{ "foo", 3 },
|
||||||
|
{ "foo", 4 },
|
||||||
|
{ "foo", 5 },
|
||||||
|
{ "foo", 6 },
|
||||||
|
{ "foo", 7 },
|
||||||
|
{ "foo", 8 }
|
||||||
|
};
|
||||||
|
|
||||||
|
const TopicPartitionList expected = {
|
||||||
|
{ "foo", 2 },
|
||||||
|
{ "foo", 5 },
|
||||||
|
{ "foo", 8 },
|
||||||
|
};
|
||||||
|
const TopicPartitionList subset = find_matches(list, set<int>{2,5,8});
|
||||||
|
CHECK(subset == expected);
|
||||||
}
|
}
|
||||||
1
third_party/Catch2
vendored
Submodule
1
third_party/Catch2
vendored
Submodule
Submodule third_party/Catch2 added at d2d8455b57
1
third_party/googletest
vendored
1
third_party/googletest
vendored
Submodule third_party/googletest deleted from 0a439623f7
Reference in New Issue
Block a user