mirror of
https://github.com/Telecominfraproject/wlan-cloud-lib-cppkafka.git
synced 2025-11-05 13:07:56 +00:00
Compare commits
200 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f128c1764b | ||
|
|
03b12561af | ||
|
|
14f5261806 | ||
|
|
d7e08fa69c | ||
|
|
76d175e354 | ||
|
|
93893e1386 | ||
|
|
fc97759d93 | ||
|
|
025d8ed7e1 | ||
|
|
5a119f689f | ||
|
|
dd3966fb45 | ||
|
|
dabb2d3aa8 | ||
|
|
57a599d99c | ||
|
|
544972e48f | ||
|
|
847f530e6e | ||
|
|
2c24b6e6b9 | ||
|
|
01bd066d57 | ||
|
|
8fd0ef8fc5 | ||
|
|
85f1c0fcb1 | ||
|
|
62ec1d82c1 | ||
|
|
5e4b350806 | ||
|
|
e932d9567f | ||
|
|
a2056c36bf | ||
|
|
8cfd4595f6 | ||
|
|
f117720f66 | ||
|
|
e5dfd5085c | ||
|
|
a032f9a1e8 | ||
|
|
b06e64ef5b | ||
|
|
a48bf89292 | ||
|
|
31a58d433e | ||
|
|
4467743340 | ||
|
|
e8b4f5a8e9 | ||
|
|
eb1105d839 | ||
|
|
098735413b | ||
|
|
91a3be0a8f | ||
|
|
e2000b0741 | ||
|
|
ca3a1321ec | ||
|
|
244726c251 | ||
|
|
7aa60a1409 | ||
|
|
487585fd17 | ||
|
|
3b67ba072a | ||
|
|
14423bba40 | ||
|
|
006642cdb2 | ||
|
|
679f58dee3 | ||
|
|
b2b0d16fee | ||
|
|
2ce0ae4a62 | ||
|
|
935a34238b | ||
|
|
5a057e4c99 | ||
|
|
ffcf8956bd | ||
|
|
2287e0994b | ||
|
|
92e46aa6cb | ||
|
|
a4532ed336 | ||
|
|
68ae525eba | ||
|
|
e401e97b40 | ||
|
|
7d097df34d | ||
|
|
fbbd5bc5a6 | ||
|
|
bda2f4156d | ||
|
|
f1de729d4e | ||
|
|
81ce56a1bd | ||
|
|
a2a46f0ec8 | ||
|
|
c3b4580fef | ||
|
|
de06b7ad4e | ||
|
|
9a0f196d34 | ||
|
|
e5aec82ddf | ||
|
|
58111bdf62 | ||
|
|
fd19648d5a | ||
|
|
4a3ec91f87 | ||
|
|
a85a87bb9b | ||
|
|
a357529cc0 | ||
|
|
dd6ec44c27 | ||
|
|
20b806037b | ||
|
|
ad800a5765 | ||
|
|
4bddb2241c | ||
|
|
097184c648 | ||
|
|
bbc78f8dbb | ||
|
|
18d0b0c00b | ||
|
|
591e8abe4f | ||
|
|
ee30fabc2a | ||
|
|
0d2356f7dd | ||
|
|
11a6e4213b | ||
|
|
c4b6a95438 | ||
|
|
40e8559158 | ||
|
|
d20cab69f3 | ||
|
|
c733e0b8d8 | ||
|
|
07b3c4957d | ||
|
|
107cff7ed7 | ||
|
|
1a981f2674 | ||
|
|
8eb7751ff3 | ||
|
|
4b25f928a1 | ||
|
|
6adf1e82c9 | ||
|
|
bb0beb6db6 | ||
|
|
bbc3af67d9 | ||
|
|
a0530d79a9 | ||
|
|
d148fe18d5 | ||
|
|
6499ef9869 | ||
|
|
24e94fbfbc | ||
|
|
b91350d6a4 | ||
|
|
bd43d3c767 | ||
|
|
40d0221052 | ||
|
|
6e076810a0 | ||
|
|
81a131ff16 | ||
|
|
effdf7fb95 | ||
|
|
d84b75ca9d | ||
|
|
0c1119727b | ||
|
|
e8c4397b66 | ||
|
|
470a5b6857 | ||
|
|
df4eaa0735 | ||
|
|
de85a329cb | ||
|
|
a17a6f3b55 | ||
|
|
a935d1cb2e | ||
|
|
ca729ef6f0 | ||
|
|
c9c46d7a1f | ||
|
|
ace18d5d7b | ||
|
|
5bfc047263 | ||
|
|
4a887607b3 | ||
|
|
9bf535ac49 | ||
|
|
8ae5e9d573 | ||
|
|
e19d84b839 | ||
|
|
4f4c9e9c91 | ||
|
|
284e1c57a9 | ||
|
|
7bc03185a8 | ||
|
|
872ee0442b | ||
|
|
63327461bd | ||
|
|
efa4e95a18 | ||
|
|
755e9f10c2 | ||
|
|
fb4c5edc8e | ||
|
|
dc732445f7 | ||
|
|
5a34955fae | ||
|
|
05cc8304df | ||
|
|
2c6a47d68d | ||
|
|
85b7e579e2 | ||
|
|
93c2edf6ba | ||
|
|
71c4e02143 | ||
|
|
00370c981d | ||
|
|
97229ebfd9 | ||
|
|
4ba6b38b6e | ||
|
|
4a6b6779ad | ||
|
|
97d1bb9434 | ||
|
|
ed81ce446d | ||
|
|
520465510e | ||
|
|
40ee64c5c1 | ||
|
|
3ffb0f1fa8 | ||
|
|
7c5616da07 | ||
|
|
f14a4b9e8c | ||
|
|
ccc6738265 | ||
|
|
8b431c5421 | ||
|
|
4a24971d3f | ||
|
|
8dd5428c49 | ||
|
|
0b9b7bab11 | ||
|
|
ab002fe119 | ||
|
|
06ddd79a29 | ||
|
|
d89840b5f0 | ||
|
|
25c2eaa998 | ||
|
|
1c80af9b68 | ||
|
|
fe0c7e7dd5 | ||
|
|
93e066a1c1 | ||
|
|
6bbddcd5d5 | ||
|
|
e96dc6d1fc | ||
|
|
0b7931bfb8 | ||
|
|
57bddabfd0 | ||
|
|
c7ba478582 | ||
|
|
a9a0693e2a | ||
|
|
5aa4bc08a3 | ||
|
|
5a4481dc28 | ||
|
|
d06cd222fe | ||
|
|
74acf65fa6 | ||
|
|
4ad2685d61 | ||
|
|
248d1b0638 | ||
|
|
b48036fe62 | ||
|
|
757d2b623f | ||
|
|
4b7a10ec90 | ||
|
|
b366cf4bf6 | ||
|
|
7b4c3e163f | ||
|
|
70aef6681d | ||
|
|
29cb02b756 | ||
|
|
9859e54522 | ||
|
|
9f6556da0c | ||
|
|
46481d879f | ||
|
|
25e3aacf4a | ||
|
|
1f1f1c253b | ||
|
|
24960c0a49 | ||
|
|
4ac837d831 | ||
|
|
b242e2c35c | ||
|
|
19baa03cea | ||
|
|
8dc94869fd | ||
|
|
71fb76b8e1 | ||
|
|
c7e1dcb60a | ||
|
|
e73c997a0c | ||
|
|
b46991db7e | ||
|
|
b0ddceda1f | ||
|
|
451d60295a | ||
|
|
57268e666c | ||
|
|
ad9a1e4a49 | ||
|
|
416a7d43ce | ||
|
|
a2d17a6f45 | ||
|
|
0d54acbc64 | ||
|
|
b2ba4cbfa3 | ||
|
|
2b66fd3a22 | ||
|
|
fbe3759fed | ||
|
|
9af4330c6d | ||
|
|
d77e7466b8 |
12
.travis.yml
12
.travis.yml
@@ -1,13 +1,14 @@
|
|||||||
language: cpp
|
language: cpp
|
||||||
|
|
||||||
sudo: false
|
sudo: required
|
||||||
|
|
||||||
compiler:
|
compiler:
|
||||||
- gcc
|
- gcc
|
||||||
- clang
|
- clang
|
||||||
|
|
||||||
env:
|
env:
|
||||||
- RDKAFKA_VERSION=v0.11.0
|
- RDKAFKA_VERSION=v0.9.4
|
||||||
|
- RDKAFKA_VERSION=v0.11.6
|
||||||
|
|
||||||
os:
|
os:
|
||||||
- linux
|
- linux
|
||||||
@@ -21,9 +22,8 @@ addons:
|
|||||||
- zookeeperd
|
- zookeeperd
|
||||||
|
|
||||||
before_script:
|
before_script:
|
||||||
- service zookeeper start
|
- KAFKA_VERSION=2.11-2.2.0
|
||||||
- KAFKA_VERSION=2.11-1.0.0
|
- wget https://archive.apache.org/dist/kafka/2.2.0/kafka_$KAFKA_VERSION.tgz
|
||||||
- wget http://apache.cs.utah.edu/kafka/1.0.0/kafka_$KAFKA_VERSION.tgz
|
|
||||||
- tar xvzf kafka_$KAFKA_VERSION.tgz
|
- tar xvzf kafka_$KAFKA_VERSION.tgz
|
||||||
- ./kafka_$KAFKA_VERSION/bin/kafka-server-start.sh ./kafka_$KAFKA_VERSION/config/server.properties > /dev/null 2> /dev/null &
|
- ./kafka_$KAFKA_VERSION/bin/kafka-server-start.sh ./kafka_$KAFKA_VERSION/config/server.properties > /dev/null 2> /dev/null &
|
||||||
- git clone https://github.com/edenhill/librdkafka.git
|
- git clone https://github.com/edenhill/librdkafka.git
|
||||||
@@ -37,7 +37,7 @@ script:
|
|||||||
- ./configure --prefix=./install && make libs && make install
|
- ./configure --prefix=./install && make libs && make install
|
||||||
- cd ..
|
- cd ..
|
||||||
- mkdir build && cd build
|
- mkdir build && cd build
|
||||||
- cmake .. -DRDKAFKA_ROOT_DIR=../librdkafka/install/ -DKAFKA_TEST_INSTANCE=localhost:9092
|
- cmake .. -DCPPKAFKA_CMAKE_VERBOSE=ON -DRDKAFKA_ROOT=./librdkafka/install -DKAFKA_TEST_INSTANCE=localhost:9092
|
||||||
- make examples
|
- make examples
|
||||||
- make tests
|
- make tests
|
||||||
- ./tests/cppkafka_tests
|
- ./tests/cppkafka_tests
|
||||||
|
|||||||
100
CMakeLists.txt
100
CMakeLists.txt
@@ -1,25 +1,36 @@
|
|||||||
cmake_minimum_required(VERSION 2.8.1)
|
cmake_minimum_required(VERSION 3.9.2)
|
||||||
project(cppkafka)
|
project(CppKafka)
|
||||||
|
if (${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.12.0")
|
||||||
|
# Use <package>_ROOT variable to find configuration files
|
||||||
|
cmake_policy(SET CMP0074 NEW)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
include(GNUInstallDirs)
|
||||||
|
include(CMakePackageConfigHelpers)
|
||||||
|
|
||||||
# Set the version number.
|
# Set the version number.
|
||||||
set(CPPKAFKA_VERSION_MAJOR 0)
|
set(CPPKAFKA_VERSION_MAJOR 0)
|
||||||
set(CPPKAFKA_VERSION_MINOR 2)
|
set(CPPKAFKA_VERSION_MINOR 4)
|
||||||
set(CPPKAFKA_VERSION "${CPPKAFKA_VERSION_MAJOR}.${CPPKAFKA_VERSION_MINOR}")
|
set(CPPKAFKA_VERSION_REVISION 0)
|
||||||
set(RDKAFKA_MIN_VERSION 0x00090400)
|
set(CPPKAFKA_VERSION "${CPPKAFKA_VERSION_MAJOR}.${CPPKAFKA_VERSION_MINOR}.${CPPKAFKA_VERSION_REVISION}")
|
||||||
|
set(RDKAFKA_MIN_VERSION "0.9.4")
|
||||||
|
set(RDKAFKA_MIN_VERSION_HEX 0x00090400)
|
||||||
|
|
||||||
if(MSVC)
|
if (NOT CMAKE_CXX_FLAGS)
|
||||||
# Don't always use Wall, since VC's /Wall is ridiculously verbose.
|
# Set default compile flags for the project
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W3")
|
if(MSVC)
|
||||||
|
# Don't always use Wall, since VC's /Wall is ridiculously verbose.
|
||||||
|
set(CMAKE_CXX_FLAGS "/W3")
|
||||||
|
|
||||||
# Disable VC secure checks, since these are not really issues
|
# Disable VC secure checks, since these are not really issues
|
||||||
add_definitions("-D_CRT_SECURE_NO_WARNINGS=1")
|
add_definitions("-D_CRT_SECURE_NO_WARNINGS=1")
|
||||||
add_definitions("-D_SCL_SECURE_NO_WARNINGS=1")
|
add_definitions("-D_SCL_SECURE_NO_WARNINGS=1")
|
||||||
add_definitions("-DNOGDI=1")
|
add_definitions("-DNOGDI=1")
|
||||||
add_definitions("-DNOMINMAX=1")
|
add_definitions("-DNOMINMAX=1")
|
||||||
else()
|
else()
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall")
|
set(CMAKE_CXX_FLAGS "-Wall")
|
||||||
|
endif()
|
||||||
endif()
|
endif()
|
||||||
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake/")
|
|
||||||
|
|
||||||
# Set output directories
|
# Set output directories
|
||||||
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/lib)
|
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/lib)
|
||||||
@@ -33,6 +44,35 @@ option(CPPKAFKA_DISABLE_EXAMPLES "Disable build of cppkafka examples." OFF)
|
|||||||
option(CPPKAFKA_BOOST_STATIC_LIBS "Link with Boost static libraries." ON)
|
option(CPPKAFKA_BOOST_STATIC_LIBS "Link with Boost static libraries." ON)
|
||||||
option(CPPKAFKA_BOOST_USE_MULTITHREADED "Use Boost multithreaded libraries." ON)
|
option(CPPKAFKA_BOOST_USE_MULTITHREADED "Use Boost multithreaded libraries." ON)
|
||||||
option(CPPKAFKA_RDKAFKA_STATIC_LIB "Link with Rdkafka static library." OFF)
|
option(CPPKAFKA_RDKAFKA_STATIC_LIB "Link with Rdkafka static library." OFF)
|
||||||
|
option(CPPKAFKA_EXPORT_PKGCONFIG "Generate 'cppkafka.pc' file" ON)
|
||||||
|
option(CPPKAFKA_EXPORT_CMAKE_CONFIG "Generate CMake config, target and version files." ON)
|
||||||
|
|
||||||
|
# Add FindRdKafka.cmake
|
||||||
|
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake/")
|
||||||
|
|
||||||
|
if (NOT CPPKAFKA_CONFIG_DIR)
|
||||||
|
set(CPPKAFKA_CONFIG_DIR lib/cmake/${PROJECT_NAME})
|
||||||
|
endif()
|
||||||
|
|
||||||
|
# Maintain previous compatibility
|
||||||
|
if (RDKAFKA_ROOT_DIR)
|
||||||
|
set(RdKafka_ROOT ${RDKAFKA_ROOT_DIR})
|
||||||
|
elseif (RDKAFKA_ROOT)
|
||||||
|
set(RdKafka_ROOT ${RDKAFKA_ROOT})
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if (RdKafka_ROOT)
|
||||||
|
if (NOT IS_ABSOLUTE ${RdKafka_ROOT})
|
||||||
|
set(RdKafka_ROOT "${CMAKE_SOURCE_DIR}/${RdKafka_ROOT}")
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if (RDKAFKA_DIR)
|
||||||
|
set(RdKafka_DIR ${RDKAFKA_DIR}) # For older versions of find_package
|
||||||
|
if (NOT IS_ABSOLUTE ${RdKafka_ROOT})
|
||||||
|
set(RdKafka_DIR "${CMAKE_SOURCE_DIR}/${RdKafka_DIR}")
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
|
||||||
# Disable output from find_package macro
|
# Disable output from find_package macro
|
||||||
if (NOT CPPKAFKA_CMAKE_VERBOSE)
|
if (NOT CPPKAFKA_CMAKE_VERBOSE)
|
||||||
@@ -47,15 +87,23 @@ else()
|
|||||||
message(STATUS "Build will generate a static library.")
|
message(STATUS "Build will generate a static library.")
|
||||||
set(CPPKAFKA_LIBRARY_TYPE STATIC)
|
set(CPPKAFKA_LIBRARY_TYPE STATIC)
|
||||||
add_definitions("-DCPPKAFKA_STATIC=1")
|
add_definitions("-DCPPKAFKA_STATIC=1")
|
||||||
|
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (CPPKAFKA_RDKAFKA_STATIC_LIB)
|
if (CPPKAFKA_RDKAFKA_STATIC_LIB)
|
||||||
add_definitions("-DLIBRDKAFKA_STATICLIB")
|
add_definitions("-DLIBRDKAFKA_STATICLIB")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if (NOT CPPKAFKA_CONFIG_DIR)
|
||||||
|
set(CPPKAFKA_CONFIG_DIR lib/cmake/${PROJECT_NAME})
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if (NOT CPPKAFKA_PKGCONFIG_DIR)
|
||||||
|
set(CPPKAFKA_PKGCONFIG_DIR share/pkgconfig)
|
||||||
|
endif()
|
||||||
|
|
||||||
# Look for Boost (just need boost.optional headers here)
|
# Look for Boost (just need boost.optional headers here)
|
||||||
find_package(Boost REQUIRED ${FIND_PACKAGE_QUIET})
|
find_package(Boost REQUIRED ${FIND_PACKAGE_QUIET})
|
||||||
find_package(RdKafka REQUIRED ${FIND_PACKAGE_QUIET})
|
|
||||||
|
|
||||||
if (Boost_FOUND)
|
if (Boost_FOUND)
|
||||||
find_package(Boost COMPONENTS program_options ${FIND_PACKAGE_QUIET})
|
find_package(Boost COMPONENTS program_options ${FIND_PACKAGE_QUIET})
|
||||||
@@ -72,8 +120,24 @@ if (Boost_FOUND)
|
|||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
# Try to find the RdKafka configuration file if present.
|
||||||
|
# This will search default system locations as well as RdKafka_ROOT and RdKafka_Dir paths if specified.
|
||||||
|
find_package(RdKafka ${FIND_PACKAGE_QUIET} CONFIG)
|
||||||
|
set(RDKAFKA_TARGET_IMPORTS ${RdKafka_FOUND})
|
||||||
|
if (NOT RdKafka_FOUND)
|
||||||
|
message(STATUS "RdKafkaConfig.cmake not found. Attempting to find module instead...")
|
||||||
|
find_package(RdKafka REQUIRED ${FIND_PACKAGE_QUIET} MODULE)
|
||||||
|
if (NOT RdKafka_FOUND)
|
||||||
|
message(FATAL_ERROR "RdKafka module not found. Please set RDKAFKA_ROOT to the install path or RDKAFKA_DIR pointing to the RdKafka configuration file location.")
|
||||||
|
else()
|
||||||
|
message(STATUS "RdKafka module found.")
|
||||||
|
endif()
|
||||||
|
else()
|
||||||
|
message(STATUS "RdKafka configuration file found: ${RdKafka_CONFIG}")
|
||||||
|
endif()
|
||||||
|
|
||||||
add_subdirectory(src)
|
add_subdirectory(src)
|
||||||
add_subdirectory(include)
|
add_subdirectory(include/cppkafka)
|
||||||
|
|
||||||
# Examples target
|
# Examples target
|
||||||
if (NOT CPPKAFKA_DISABLE_EXAMPLES AND Boost_PROGRAM_OPTIONS_FOUND)
|
if (NOT CPPKAFKA_DISABLE_EXAMPLES AND Boost_PROGRAM_OPTIONS_FOUND)
|
||||||
|
|||||||
40
README.md
40
README.md
@@ -17,6 +17,8 @@ only supported via the high level consumer API. _cppkafka_ requires **rdkafka >=
|
|||||||
order to use it. Other wrapped functionalities are also provided, like fetching metadata,
|
order to use it. Other wrapped functionalities are also provided, like fetching metadata,
|
||||||
offsets, etc.
|
offsets, etc.
|
||||||
|
|
||||||
|
* _cppkafka_ provides message header support. This feature requires **rdkafka >= 0.11.4**.
|
||||||
|
|
||||||
* _cppkafka_ tries to add minimal overhead over _librdkafka_. A very thin wrapper for _librdkafka_
|
* _cppkafka_ tries to add minimal overhead over _librdkafka_. A very thin wrapper for _librdkafka_
|
||||||
messages is used for consumption so there's virtually no overhead at all.
|
messages is used for consumption so there's virtually no overhead at all.
|
||||||
|
|
||||||
@@ -52,10 +54,9 @@ int main() {
|
|||||||
In order to compile _cppkafka_ you need:
|
In order to compile _cppkafka_ you need:
|
||||||
|
|
||||||
* _librdkafka >= 0.9.4_
|
* _librdkafka >= 0.9.4_
|
||||||
* _CMake_
|
* _CMake >= 3.9.2_
|
||||||
* A compiler with good C++11 support (e.g. gcc >= 4.8). This was tested successfully on
|
* A compiler with good C++11 support (e.g. gcc >= 4.8). This was tested successfully on _g++ 4.8.3_.
|
||||||
_g++ 4.8.3_.
|
* The boost library (for boost::optional)
|
||||||
* The boost library.
|
|
||||||
|
|
||||||
Now, in order to build, just run:
|
Now, in order to build, just run:
|
||||||
|
|
||||||
@@ -64,12 +65,14 @@ mkdir build
|
|||||||
cd build
|
cd build
|
||||||
cmake <OPTIONS> ..
|
cmake <OPTIONS> ..
|
||||||
make
|
make
|
||||||
|
make install
|
||||||
```
|
```
|
||||||
|
|
||||||
## CMake options
|
## CMake options
|
||||||
|
|
||||||
The following cmake options can be specified:
|
The following cmake options can be specified:
|
||||||
* `RDKAFKA_ROOT_DIR` : Specify a different librdkafka install directory.
|
* `RDKAFKA_ROOT` : Specify a different librdkafka install directory.
|
||||||
|
* `RDKAFKA_DIR` : Specify a different directory where the RdKafkaConfig.cmake is installed.
|
||||||
* `BOOST_ROOT` : Specify a different Boost install directory.
|
* `BOOST_ROOT` : Specify a different Boost install directory.
|
||||||
* `CPPKAFKA_CMAKE_VERBOSE` : Generate verbose output. Default is `OFF`.
|
* `CPPKAFKA_CMAKE_VERBOSE` : Generate verbose output. Default is `OFF`.
|
||||||
* `CPPKAFKA_BUILD_SHARED` : Build cppkafka as a shared library. Default is `ON`.
|
* `CPPKAFKA_BUILD_SHARED` : Build cppkafka as a shared library. Default is `ON`.
|
||||||
@@ -78,24 +81,14 @@ The following cmake options can be specified:
|
|||||||
* `CPPKAFKA_BOOST_STATIC_LIBS` : Link with Boost static libraries. Default is `ON`.
|
* `CPPKAFKA_BOOST_STATIC_LIBS` : Link with Boost static libraries. Default is `ON`.
|
||||||
* `CPPKAFKA_BOOST_USE_MULTITHREADED` : Use Boost multi-threaded libraries. Default is `ON`.
|
* `CPPKAFKA_BOOST_USE_MULTITHREADED` : Use Boost multi-threaded libraries. Default is `ON`.
|
||||||
* `CPPKAFKA_RDKAFKA_STATIC_LIB` : Link to Rdkafka static library. Default is `OFF`.
|
* `CPPKAFKA_RDKAFKA_STATIC_LIB` : Link to Rdkafka static library. Default is `OFF`.
|
||||||
|
* `CPPKAFKA_CONFIG_DIR` : Install location of the cmake configuration files. Default is `lib/cmake/cppkafka`.
|
||||||
|
* `CPPKAFKA_PKGCONFIG_DIR` : Install location of the .pc file. Default is `share/pkgconfig`.
|
||||||
|
* `CPPKAFKA_EXPORT_PKGCONFIG` : Generate `cppkafka.pc` file. Default is `ON`.
|
||||||
|
* `CPPKAFKA_EXPORT_CMAKE_CONFIG` : Generate CMake config, target and version files. Default is `ON`.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
```Shell
|
```Shell
|
||||||
cmake -DRDKAFKA_ROOT_DIR=/some/other/dir -DCPPKAFKA_BUILD_SHARED=OFF ...
|
cmake -DRDKAFKA_ROOT=/some/other/dir -DCPPKAFKA_BUILD_SHARED=OFF ...
|
||||||
```
|
|
||||||
|
|
||||||
The `RDKAFKA_ROOT_DIR` must contain the following structure. If the system
|
|
||||||
architecture is 64-bit and both `lib` and `lib64` folders are available, the `lib64`
|
|
||||||
folder location will be selected by cmake.
|
|
||||||
|
|
||||||
```Shell
|
|
||||||
${RDKAFKA_ROOT_DIR}/
|
|
||||||
|
|
|
||||||
+ include/librdkafka/rdkafka.h
|
|
||||||
|
|
|
||||||
+ lib/librdkafka.a
|
|
||||||
|
|
|
||||||
+ lib64/librdkafka.a (optional)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
# Using
|
# Using
|
||||||
@@ -105,6 +98,13 @@ If you want to use _cppkafka_, you'll need to link your application with:
|
|||||||
* _cppkafka_
|
* _cppkafka_
|
||||||
* _rdkafka_
|
* _rdkafka_
|
||||||
|
|
||||||
|
If using CMake, this is simplified by doing:
|
||||||
|
```cmake
|
||||||
|
find_package(CppKafka REQUIRED)
|
||||||
|
|
||||||
|
target_link_libraries(<YourLibrary> CppKafka::cppkafka)
|
||||||
|
```
|
||||||
|
|
||||||
# Documentation
|
# Documentation
|
||||||
|
|
||||||
You can generate the documentation by running `make docs` inside the build directory. This requires
|
You can generate the documentation by running `make docs` inside the build directory. This requires
|
||||||
|
|||||||
@@ -1,60 +1,75 @@
|
|||||||
# Override default CMAKE_FIND_LIBRARY_SUFFIXES
|
# This find module helps find the RdKafka module. It exports the following variables:
|
||||||
|
# - RdKafka_INCLUDE_DIR : The directory where rdkafka.h is located.
|
||||||
|
# - RdKafka_LIBNAME : The name of the library, i.e. librdkafka.a, librdkafka.so, etc.
|
||||||
|
# - RdKafka_LIBRARY_PATH : The full library path i.e. <path_to_binaries>/${RdKafka_LIBNAME}
|
||||||
|
# - RdKafka::rdkafka : Imported library containing all above properties set.
|
||||||
|
|
||||||
if (CPPKAFKA_RDKAFKA_STATIC_LIB)
|
if (CPPKAFKA_RDKAFKA_STATIC_LIB)
|
||||||
if (MSVC)
|
set(RDKAFKA_PREFIX ${CMAKE_STATIC_LIBRARY_PREFIX})
|
||||||
set(RDKAFKA_SUFFIX lib)
|
set(RDKAFKA_SUFFIX ${CMAKE_STATIC_LIBRARY_SUFFIX})
|
||||||
else()
|
set(RDKAFKA_LIBRARY_TYPE STATIC)
|
||||||
set(RDKAFKA_SUFFIX a)
|
|
||||||
endif()
|
|
||||||
else()
|
else()
|
||||||
if (MSVC)
|
set(RDKAFKA_PREFIX ${CMAKE_SHARED_LIBRARY_PREFIX})
|
||||||
set(RDKAFKA_SUFFIX dll)
|
set(RDKAFKA_SUFFIX ${CMAKE_SHARED_LIBRARY_SUFFIX})
|
||||||
else()
|
set(RDKAFKA_LIBRARY_TYPE SHARED)
|
||||||
set(RDKAFKA_SUFFIX so)
|
|
||||||
endif()
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
find_path(RDKAFKA_ROOT_DIR
|
set(RdKafka_LIBNAME ${RDKAFKA_PREFIX}rdkafka${RDKAFKA_SUFFIX})
|
||||||
NAMES include/librdkafka/rdkafka.h
|
|
||||||
|
find_path(RdKafka_INCLUDE_DIR
|
||||||
|
NAMES librdkafka/rdkafka.h
|
||||||
|
HINTS ${RdKafka_ROOT}/include
|
||||||
)
|
)
|
||||||
|
|
||||||
find_path(RDKAFKA_INCLUDE_DIR
|
find_library(RdKafka_LIBRARY_PATH
|
||||||
NAMES librdkafka/rdkafka.h
|
NAMES ${RdKafka_LIBNAME} rdkafka
|
||||||
HINTS ${RDKAFKA_ROOT_DIR}/include
|
HINTS ${RdKafka_ROOT}/lib ${RdKafka_ROOT}/lib64
|
||||||
)
|
)
|
||||||
|
|
||||||
# Check lib paths
|
# Check lib paths
|
||||||
if (CPPKAFKA_CMAKE_VERBOSE)
|
if (CPPKAFKA_CMAKE_VERBOSE)
|
||||||
get_property(FIND_LIBRARY_32 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB32_PATHS)
|
get_property(FIND_LIBRARY_32 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB32_PATHS)
|
||||||
get_property(FIND_LIBRARY_64 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB64_PATHS)
|
get_property(FIND_LIBRARY_64 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB64_PATHS)
|
||||||
MESSAGE(STATUS "RDKAFKA search 32-bit library paths: ${FIND_LIBRARY_32}")
|
message(STATUS "RDKAFKA search 32-bit library paths: ${FIND_LIBRARY_32}")
|
||||||
MESSAGE(STATUS "RDKAFKA search 64-bit library paths: ${FIND_LIBRARY_64}")
|
message(STATUS "RDKAFKA search 64-bit library paths: ${FIND_LIBRARY_64}")
|
||||||
|
message(STATUS "RdKafka_ROOT = ${RdKafka_ROOT}")
|
||||||
|
message(STATUS "RdKafka_INCLUDE_DIR = ${RdKafka_INCLUDE_DIR}")
|
||||||
|
message(STATUS "RdKafka_LIBNAME = ${RdKafka_LIBNAME}")
|
||||||
|
message(STATUS "RdKafka_LIBRARY_PATH = ${RdKafka_LIBRARY_PATH}")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
find_library(RDKAFKA_LIBRARY
|
|
||||||
NAMES rdkafka.${RDKAFKA_SUFFIX} librdkafka.${RDKAFKA_SUFFIX} rdkafka
|
|
||||||
HINTS ${RDKAFKA_ROOT_DIR}/lib
|
|
||||||
)
|
|
||||||
|
|
||||||
include(FindPackageHandleStandardArgs)
|
include(FindPackageHandleStandardArgs)
|
||||||
find_package_handle_standard_args(RDKAFKA DEFAULT_MSG
|
find_package_handle_standard_args(RdKafka DEFAULT_MSG
|
||||||
RDKAFKA_LIBRARY
|
RdKafka_LIBNAME
|
||||||
RDKAFKA_INCLUDE_DIR
|
RdKafka_LIBRARY_PATH
|
||||||
|
RdKafka_INCLUDE_DIR
|
||||||
)
|
)
|
||||||
|
|
||||||
set(CONTENTS "#include <librdkafka/rdkafka.h>\n #if RD_KAFKA_VERSION >= ${RDKAFKA_MIN_VERSION}\n int main() { }\n #endif")
|
set(CONTENTS "#include <librdkafka/rdkafka.h>\n #if RD_KAFKA_VERSION >= ${RDKAFKA_MIN_VERSION_HEX}\n int main() { }\n #endif")
|
||||||
set(FILE_NAME ${CMAKE_CURRENT_BINARY_DIR}/rdkafka_version_test.c)
|
set(FILE_NAME ${CMAKE_CURRENT_BINARY_DIR}/rdkafka_version_test.cpp)
|
||||||
file(WRITE ${FILE_NAME} ${CONTENTS})
|
file(WRITE ${FILE_NAME} ${CONTENTS})
|
||||||
|
|
||||||
try_compile(HAVE_VALID_KAFKA_VERSION ${CMAKE_CURRENT_BINARY_DIR}
|
try_compile(RdKafka_FOUND ${CMAKE_CURRENT_BINARY_DIR}
|
||||||
SOURCES ${FILE_NAME}
|
SOURCES ${FILE_NAME}
|
||||||
CMAKE_FLAGS "-DINCLUDE_DIRECTORIES=${RDKAFKA_INCLUDE_DIR}")
|
CMAKE_FLAGS "-DINCLUDE_DIRECTORIES=${RdKafka_INCLUDE_DIR}")
|
||||||
|
|
||||||
if (HAVE_VALID_KAFKA_VERSION)
|
if (RdKafka_FOUND)
|
||||||
|
add_library(RdKafka::rdkafka ${RDKAFKA_LIBRARY_TYPE} IMPORTED GLOBAL)
|
||||||
|
if (UNIX AND NOT APPLE)
|
||||||
|
set(RDKAFKA_DEPENDENCIES pthread rt ssl crypto dl z)
|
||||||
|
else()
|
||||||
|
set(RDKAFKA_DEPENDENCIES pthread ssl crypto dl z)
|
||||||
|
endif()
|
||||||
|
set_target_properties(RdKafka::rdkafka PROPERTIES
|
||||||
|
IMPORTED_NAME RdKafka
|
||||||
|
IMPORTED_LOCATION "${RdKafka_LIBRARY_PATH}"
|
||||||
|
INTERFACE_INCLUDE_DIRECTORIES "${RdKafka_INCLUDE_DIR}"
|
||||||
|
INTERFACE_LINK_LIBRARIES "${RDKAFKA_DEPENDENCIES}")
|
||||||
message(STATUS "Found valid rdkafka version")
|
message(STATUS "Found valid rdkafka version")
|
||||||
mark_as_advanced(
|
mark_as_advanced(
|
||||||
RDKAFKA_ROOT_DIR
|
|
||||||
RDKAFKA_INCLUDE_DIR
|
|
||||||
RDKAFKA_LIBRARY
|
RDKAFKA_LIBRARY
|
||||||
|
RdKafka_INCLUDE_DIR
|
||||||
|
RdKafka_LIBRARY_PATH
|
||||||
)
|
)
|
||||||
else()
|
else()
|
||||||
message(FATAL_ERROR "Failed to find valid rdkafka version")
|
message(FATAL_ERROR "Failed to find valid rdkafka version")
|
||||||
|
|||||||
33
cmake/config.cmake.in
Normal file
33
cmake/config.cmake.in
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
@PACKAGE_INIT@
|
||||||
|
|
||||||
|
include(CMakeFindDependencyMacro)
|
||||||
|
|
||||||
|
# Add FindRdKafka.cmake
|
||||||
|
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_LIST_DIR}")
|
||||||
|
|
||||||
|
set(RDKAFKA_MIN_VERSION_HEX "@RDKAFKA_MIN_VERSION_HEX@")
|
||||||
|
|
||||||
|
# Find boost optional
|
||||||
|
find_dependency(Boost REQUIRED)
|
||||||
|
|
||||||
|
# Try to find the RdKafka configuration file if present.
|
||||||
|
# This will search default system locations as well as RdKafka_ROOT and RdKafka_DIR paths if specified.
|
||||||
|
find_package(RdKafka QUIET CONFIG)
|
||||||
|
set(RDKAFKA_TARGET_IMPORTS ${RdKafka_FOUND})
|
||||||
|
if (NOT RdKafka_FOUND)
|
||||||
|
find_dependency(RdKafka REQUIRED MODULE)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
include("${CMAKE_CURRENT_LIST_DIR}/@TARGET_EXPORT_NAME@.cmake")
|
||||||
|
|
||||||
|
# Export 'CppKafka_ROOT'
|
||||||
|
set_and_check(@PROJECT_NAME@_ROOT "@PACKAGE_CMAKE_INSTALL_PREFIX@")
|
||||||
|
|
||||||
|
# Export 'CppKafka_INSTALL_INCLUDE_DIR'
|
||||||
|
set_and_check(@PROJECT_NAME@_INSTALL_INCLUDE_DIR "@PACKAGE_CMAKE_INSTALL_INCLUDEDIR@")
|
||||||
|
|
||||||
|
# Export 'CppKafka_INSTALL_LIB_DIR'
|
||||||
|
set_and_check(@PROJECT_NAME@_INSTALL_LIB_DIR "@PACKAGE_CMAKE_INSTALL_LIBDIR@")
|
||||||
|
|
||||||
|
# Validate installed components
|
||||||
|
check_required_components("@PROJECT_NAME@")
|
||||||
14
cmake/cppkafka.pc.in
Normal file
14
cmake/cppkafka.pc.in
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
prefix=@CMAKE_INSTALL_PREFIX@
|
||||||
|
exec_prefix=${prefix}
|
||||||
|
libdir=${prefix}/@CMAKE_INSTALL_LIBDIR@
|
||||||
|
sharedlibdir=${prefix}/@CMAKE_INSTALL_LIBDIR@
|
||||||
|
includedir=${prefix}/include
|
||||||
|
|
||||||
|
Name: cppkafka
|
||||||
|
Url: https://github.com/mfontanini/cppkafka
|
||||||
|
Description: C++ wrapper library on top of RdKafka
|
||||||
|
Version: @CPPKAFKA_VERSION@
|
||||||
|
Requires:
|
||||||
|
Requires.private: rdkafka >= 0.9.4
|
||||||
|
Libs: -L${libdir} -L${sharedlibdir} -lcppkafka
|
||||||
|
Cflags: -I${includedir} -I${includedir}/cppkafka -I@Boost_INCLUDE_DIRS@
|
||||||
@@ -1,11 +1,10 @@
|
|||||||
link_libraries(cppkafka ${RDKAFKA_LIBRARY} ${Boost_LIBRARIES} pthread rt ssl crypto dl z)
|
|
||||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include)
|
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include)
|
||||||
include_directories(SYSTEM ${RDKAFKA_INCLUDE_DIR})
|
|
||||||
|
|
||||||
add_custom_target(examples)
|
add_custom_target(examples)
|
||||||
macro(create_example example_name)
|
macro(create_example example_name)
|
||||||
string(REPLACE "_" "-" sanitized_name ${example_name})
|
string(REPLACE "_" "-" sanitized_name ${example_name})
|
||||||
add_executable(${sanitized_name} EXCLUDE_FROM_ALL "${example_name}_example.cpp")
|
add_executable(${sanitized_name} EXCLUDE_FROM_ALL "${example_name}_example.cpp")
|
||||||
|
target_link_libraries(${sanitized_name} cppkafka RdKafka::rdkafka Boost::boost Boost::program_options)
|
||||||
add_dependencies(examples ${sanitized_name})
|
add_dependencies(examples ${sanitized_name})
|
||||||
endmacro()
|
endmacro()
|
||||||
|
|
||||||
|
|||||||
@@ -115,7 +115,7 @@ int main(int argc, char* argv[]) {
|
|||||||
},
|
},
|
||||||
// Whenever EOF is reached on a partition, print this
|
// Whenever EOF is reached on a partition, print this
|
||||||
[](ConsumerDispatcher::EndOfFile, const TopicPartition& topic_partition) {
|
[](ConsumerDispatcher::EndOfFile, const TopicPartition& topic_partition) {
|
||||||
cout << "Reched EOF on partition " << topic_partition << endl;
|
cout << "Reached EOF on partition " << topic_partition << endl;
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -75,4 +75,7 @@ int main(int argc, char* argv[]) {
|
|||||||
// Actually produce the message we've built
|
// Actually produce the message we've built
|
||||||
producer.produce(builder);
|
producer.produce(builder);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Flush all produced messages
|
||||||
|
producer.flush();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
add_subdirectory(cppkafka)
|
|
||||||
@@ -1,7 +1,8 @@
|
|||||||
|
set(CPPKAFKA_HEADER "${CMAKE_CURRENT_BINARY_DIR}/cppkafka.h")
|
||||||
|
|
||||||
# Local function to auto-generate main cppkafka.h header file
|
# Local function to auto-generate main cppkafka.h header file
|
||||||
function(make_cppkafka_header)
|
function(make_cppkafka_header)
|
||||||
set(CPPKAFKA_HEADER ${CMAKE_CURRENT_SOURCE_DIR}/cppkafka.h)
|
file(GLOB INCLUDE_HEADERS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "*.h" "utils/*.h")
|
||||||
file(GLOB INCLUDE_HEADERS RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "*.h" "utils/*.h")
|
|
||||||
list(SORT INCLUDE_HEADERS)
|
list(SORT INCLUDE_HEADERS)
|
||||||
foreach(header ${INCLUDE_HEADERS})
|
foreach(header ${INCLUDE_HEADERS})
|
||||||
if (NOT ${header} MATCHES "cppkafka.h")
|
if (NOT ${header} MATCHES "cppkafka.h")
|
||||||
@@ -10,7 +11,7 @@ function(make_cppkafka_header)
|
|||||||
endforeach()
|
endforeach()
|
||||||
|
|
||||||
#create file from template
|
#create file from template
|
||||||
configure_file(${PROJECT_SOURCE_DIR}/cppkafka.h.in ${CPPKAFKA_HEADER})
|
configure_file("${PROJECT_SOURCE_DIR}/cmake/cppkafka.h.in" "${CPPKAFKA_HEADER}" @ONLY)
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
# Run file generation function
|
# Run file generation function
|
||||||
@@ -22,7 +23,7 @@ file(GLOB UTILS_INCLUDE_FILES "utils/*.h")
|
|||||||
file(GLOB DETAIL_INCLUDE_FILES "detail/*.h")
|
file(GLOB DETAIL_INCLUDE_FILES "detail/*.h")
|
||||||
install(
|
install(
|
||||||
FILES ${INCLUDE_FILES}
|
FILES ${INCLUDE_FILES}
|
||||||
DESTINATION include/cppkafka
|
DESTINATION include/cppkafka/
|
||||||
COMPONENT Headers
|
COMPONENT Headers
|
||||||
)
|
)
|
||||||
install(
|
install(
|
||||||
@@ -35,3 +36,8 @@ install(
|
|||||||
DESTINATION include/cppkafka/detail/
|
DESTINATION include/cppkafka/detail/
|
||||||
COMPONENT Headers
|
COMPONENT Headers
|
||||||
)
|
)
|
||||||
|
install(
|
||||||
|
FILES "${CPPKAFKA_HEADER}"
|
||||||
|
DESTINATION include/cppkafka/
|
||||||
|
COMPONENT Headers
|
||||||
|
)
|
||||||
|
|||||||
@@ -31,6 +31,7 @@
|
|||||||
#define CPPKAFKA_BUFFER_H
|
#define CPPKAFKA_BUFFER_H
|
||||||
|
|
||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
|
#include <array>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <iosfwd>
|
#include <iosfwd>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
@@ -81,6 +82,17 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructs a buffer from two iterators in the range [first,last)
|
||||||
|
*
|
||||||
|
* \param first An iterator to the start of data
|
||||||
|
* \param last An iterator to the end of data (not included)
|
||||||
|
*/
|
||||||
|
template <typename Iter>
|
||||||
|
Buffer(const Iter first, const Iter last)
|
||||||
|
: Buffer(&*first, std::distance(first, last)) {
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructs a buffer from a vector
|
* Constructs a buffer from a vector
|
||||||
*
|
*
|
||||||
@@ -92,10 +104,43 @@ public:
|
|||||||
static_assert(sizeof(T) == sizeof(DataType), "sizeof(T) != sizeof(DataType)");
|
static_assert(sizeof(T) == sizeof(DataType), "sizeof(T) != sizeof(DataType)");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Don't allow construction from temporary vectors
|
/**
|
||||||
|
* Don't allow construction from temporary vectors
|
||||||
|
*/
|
||||||
template <typename T>
|
template <typename T>
|
||||||
Buffer(std::vector<T>&& data) = delete;
|
Buffer(std::vector<T>&& data) = delete;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructs a buffer from an array
|
||||||
|
*
|
||||||
|
* \param data The the array to be used as input
|
||||||
|
*/
|
||||||
|
template <typename T, size_t N>
|
||||||
|
Buffer(const std::array<T, N>& data)
|
||||||
|
: data_(reinterpret_cast<const DataType*>(data.data())), size_(data.size()) {
|
||||||
|
static_assert(sizeof(T) == sizeof(DataType), "sizeof(T) != sizeof(DataType)");
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Don't allow construction from temporary arrays
|
||||||
|
*/
|
||||||
|
template <typename T, size_t N>
|
||||||
|
Buffer(std::array<T, N>&& data) = delete;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructs a buffer from a raw array
|
||||||
|
*
|
||||||
|
* \param data The the array to be used as input
|
||||||
|
*/
|
||||||
|
template <typename T, size_t N>
|
||||||
|
Buffer(const T(&data)[N])
|
||||||
|
: Buffer(data, N) {
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't allow construction from temporary raw arrays
|
||||||
|
template <typename T, size_t N>
|
||||||
|
Buffer(T(&&data)[N]) = delete;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Construct a buffer from a const string ref
|
* \brief Construct a buffer from a const string ref
|
||||||
*
|
*
|
||||||
@@ -104,7 +149,9 @@ public:
|
|||||||
*/
|
*/
|
||||||
Buffer(const std::string& data);
|
Buffer(const std::string& data);
|
||||||
|
|
||||||
// Don't allow construction from temporary strings
|
/**
|
||||||
|
* Don't allow construction from temporary strings
|
||||||
|
*/
|
||||||
Buffer(std::string&&) = delete;
|
Buffer(std::string&&) = delete;
|
||||||
|
|
||||||
Buffer(const Buffer&) = delete;
|
Buffer(const Buffer&) = delete;
|
||||||
@@ -172,6 +219,14 @@ CPPKAFKA_API bool operator==(const Buffer& lhs, const Buffer& rhs);
|
|||||||
*/
|
*/
|
||||||
CPPKAFKA_API bool operator!=(const Buffer& lhs, const Buffer& rhs);
|
CPPKAFKA_API bool operator!=(const Buffer& lhs, const Buffer& rhs);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Compares Buffer objects lexicographically
|
||||||
|
*/
|
||||||
|
CPPKAFKA_API bool operator<(const Buffer& lhs, const Buffer& rhs);
|
||||||
|
CPPKAFKA_API bool operator<=(const Buffer& lhs, const Buffer& rhs);
|
||||||
|
CPPKAFKA_API bool operator>(const Buffer& lhs, const Buffer& rhs);
|
||||||
|
CPPKAFKA_API bool operator>=(const Buffer& lhs, const Buffer& rhs);
|
||||||
|
|
||||||
} // cppkafka
|
} // cppkafka
|
||||||
|
|
||||||
#endif // CPPKAFKA_BUFFER_H
|
#endif // CPPKAFKA_BUFFER_H
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ template <typename T, typename Deleter, typename Cloner>
|
|||||||
class ClonablePtr {
|
class ClonablePtr {
|
||||||
public:
|
public:
|
||||||
/**
|
/**
|
||||||
* Creates an instance
|
* \brief Creates an instance
|
||||||
*
|
*
|
||||||
* \param ptr The pointer to be wrapped
|
* \param ptr The pointer to be wrapped
|
||||||
* \param deleter The deleter functor
|
* \param deleter The deleter functor
|
||||||
@@ -60,17 +60,21 @@ public:
|
|||||||
* \param rhs The pointer to be copied
|
* \param rhs The pointer to be copied
|
||||||
*/
|
*/
|
||||||
ClonablePtr(const ClonablePtr& rhs)
|
ClonablePtr(const ClonablePtr& rhs)
|
||||||
: handle_(rhs.cloner_(rhs.handle_.get()), rhs.handle_.get_deleter()), cloner_(rhs.cloner_) {
|
: handle_(std::unique_ptr<T, Deleter>(rhs.try_clone(), rhs.get_deleter())),
|
||||||
|
cloner_(rhs.get_cloner()) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Copies and assigns the given pointer
|
* \brief Copies and assigns the given pointer
|
||||||
*
|
*
|
||||||
* \param rhs The pointer to be copied
|
* \param rhs The pointer to be copied
|
||||||
*/
|
*/
|
||||||
ClonablePtr& operator=(const ClonablePtr& rhs) {
|
ClonablePtr& operator=(const ClonablePtr& rhs) {
|
||||||
handle_.reset(cloner_(rhs.handle_.get()));
|
if (this != &rhs) {
|
||||||
|
handle_ = std::unique_ptr<T, Deleter>(rhs.try_clone(), rhs.get_deleter());
|
||||||
|
cloner_ = rhs.get_cloner();
|
||||||
|
}
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -79,12 +83,51 @@ public:
|
|||||||
~ClonablePtr() = default;
|
~ClonablePtr() = default;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Getter for the internal pointer
|
* \brief Getter for the internal pointer
|
||||||
*/
|
*/
|
||||||
T* get() const {
|
T* get() const {
|
||||||
return handle_.get();
|
return handle_.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Releases ownership of the internal pointer
|
||||||
|
*/
|
||||||
|
T* release() {
|
||||||
|
return handle_.release();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Reset the internal pointer to a new one
|
||||||
|
*/
|
||||||
|
void reset(T* ptr) {
|
||||||
|
handle_.reset(ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Get the deleter
|
||||||
|
*/
|
||||||
|
const Deleter& get_deleter() const {
|
||||||
|
return handle_.get_deleter();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Get the cloner
|
||||||
|
*/
|
||||||
|
const Cloner& get_cloner() const {
|
||||||
|
return cloner_;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Indicates whether this ClonablePtr instance is valid (not null)
|
||||||
|
*/
|
||||||
|
explicit operator bool() const {
|
||||||
|
return static_cast<bool>(handle_);
|
||||||
|
}
|
||||||
private:
|
private:
|
||||||
|
T* try_clone() const {
|
||||||
|
return cloner_ ? cloner_(get()) : get();
|
||||||
|
}
|
||||||
|
|
||||||
std::unique_ptr<T, Deleter> handle_;
|
std::unique_ptr<T, Deleter> handle_;
|
||||||
Cloner cloner_;
|
Cloner cloner_;
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -42,6 +42,7 @@
|
|||||||
#include "clonable_ptr.h"
|
#include "clonable_ptr.h"
|
||||||
#include "configuration_base.h"
|
#include "configuration_base.h"
|
||||||
#include "macros.h"
|
#include "macros.h"
|
||||||
|
#include "event.h"
|
||||||
|
|
||||||
namespace cppkafka {
|
namespace cppkafka {
|
||||||
|
|
||||||
@@ -78,6 +79,7 @@ public:
|
|||||||
const std::string& message)>;
|
const std::string& message)>;
|
||||||
using StatsCallback = std::function<void(KafkaHandleBase& handle, const std::string& json)>;
|
using StatsCallback = std::function<void(KafkaHandleBase& handle, const std::string& json)>;
|
||||||
using SocketCallback = std::function<int(int domain, int type, int protocol)>;
|
using SocketCallback = std::function<int(int domain, int type, int protocol)>;
|
||||||
|
using BackgroundEventCallback = std::function<void(KafkaHandleBase& handle, Event)>;
|
||||||
|
|
||||||
using ConfigurationBase<Configuration>::set;
|
using ConfigurationBase<Configuration>::set;
|
||||||
using ConfigurationBase<Configuration>::get;
|
using ConfigurationBase<Configuration>::get;
|
||||||
@@ -142,6 +144,18 @@ public:
|
|||||||
*/
|
*/
|
||||||
Configuration& set_socket_callback(SocketCallback callback);
|
Configuration& set_socket_callback(SocketCallback callback);
|
||||||
|
|
||||||
|
#if RD_KAFKA_VERSION >= RD_KAFKA_ADMIN_API_SUPPORT_VERSION
|
||||||
|
/**
|
||||||
|
* Sets the background event callback (invokes rd_kafka_conf_set_background_event_cb)
|
||||||
|
*/
|
||||||
|
Configuration& set_background_event_callback(BackgroundEventCallback callback);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the event mask (invokes rd_kafka_conf_set_events)
|
||||||
|
*/
|
||||||
|
Configuration& set_events(int events);
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets the default topic configuration
|
* Sets the default topic configuration
|
||||||
*/
|
*/
|
||||||
@@ -204,6 +218,11 @@ public:
|
|||||||
*/
|
*/
|
||||||
const SocketCallback& get_socket_callback() const;
|
const SocketCallback& get_socket_callback() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the background event callback
|
||||||
|
*/
|
||||||
|
const BackgroundEventCallback& get_background_event_callback() const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the default topic configuration
|
* Gets the default topic configuration
|
||||||
*/
|
*/
|
||||||
@@ -229,6 +248,7 @@ private:
|
|||||||
LogCallback log_callback_;
|
LogCallback log_callback_;
|
||||||
StatsCallback stats_callback_;
|
StatsCallback stats_callback_;
|
||||||
SocketCallback socket_callback_;
|
SocketCallback socket_callback_;
|
||||||
|
BackgroundEventCallback background_event_callback_;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // cppkafka
|
} // cppkafka
|
||||||
|
|||||||
@@ -102,6 +102,7 @@ public:
|
|||||||
using RevocationCallback = std::function<void(const TopicPartitionList&)>;
|
using RevocationCallback = std::function<void(const TopicPartitionList&)>;
|
||||||
using RebalanceErrorCallback = std::function<void(Error)>;
|
using RebalanceErrorCallback = std::function<void(Error)>;
|
||||||
using KafkaHandleBase::pause;
|
using KafkaHandleBase::pause;
|
||||||
|
using KafkaHandleBase::resume;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Creates an instance of a consumer.
|
* \brief Creates an instance of a consumer.
|
||||||
@@ -281,6 +282,20 @@ public:
|
|||||||
*/
|
*/
|
||||||
TopicPartitionList get_offsets_committed(const TopicPartitionList& topic_partitions) const;
|
TopicPartitionList get_offsets_committed(const TopicPartitionList& topic_partitions) const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Gets the offsets committed for the given topic/partition list with a timeout
|
||||||
|
*
|
||||||
|
* This translates into a call to rd_kafka_committed
|
||||||
|
*
|
||||||
|
* \param topic_partitions The topic/partition list to be queried
|
||||||
|
*
|
||||||
|
* \param timeout The timeout for this operation. Supersedes the default consumer timeout.
|
||||||
|
*
|
||||||
|
* \return The topic partition list
|
||||||
|
*/
|
||||||
|
TopicPartitionList get_offsets_committed(const TopicPartitionList& topic_partitions,
|
||||||
|
std::chrono::milliseconds timeout) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Gets the offset positions for the given topic/partition list
|
* \brief Gets the offset positions for the given topic/partition list
|
||||||
*
|
*
|
||||||
@@ -291,6 +306,38 @@ public:
|
|||||||
* \return The topic partition list
|
* \return The topic partition list
|
||||||
*/
|
*/
|
||||||
TopicPartitionList get_offsets_position(const TopicPartitionList& topic_partitions) const;
|
TopicPartitionList get_offsets_position(const TopicPartitionList& topic_partitions) const;
|
||||||
|
#if (RD_KAFKA_VERSION >= RD_KAFKA_STORE_OFFSETS_SUPPORT_VERSION)
|
||||||
|
/**
|
||||||
|
* \brief Stores the offsets on the currently assigned topic/partitions (legacy).
|
||||||
|
*
|
||||||
|
* This translates into a call to rd_kafka_offsets_store with the offsets prior to the current assignment positions.
|
||||||
|
* It is equivalent to calling rd_kafka_offsets_store(get_offsets_position(get_assignment())).
|
||||||
|
*
|
||||||
|
* \note When using this API it's recommended to set enable.auto.offset.store=false and enable.auto.commit=true.
|
||||||
|
*/
|
||||||
|
void store_consumed_offsets() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Stores the offsets on the given topic/partitions (legacy).
|
||||||
|
*
|
||||||
|
* This translates into a call to rd_kafka_offsets_store.
|
||||||
|
*
|
||||||
|
* \param topic_partitions The topic/partition list to be stored.
|
||||||
|
*
|
||||||
|
* \note When using this API it's recommended to set enable.auto.offset.store=false and enable.auto.commit=true.
|
||||||
|
*/
|
||||||
|
void store_offsets(const TopicPartitionList& topic_partitions) const;
|
||||||
|
#endif
|
||||||
|
/**
|
||||||
|
* \brief Stores the offset for this message (legacy).
|
||||||
|
*
|
||||||
|
* This translates into a call to rd_kafka_offset_store.
|
||||||
|
*
|
||||||
|
* \param msg The message whose offset will be stored.
|
||||||
|
*
|
||||||
|
* \note When using this API it's recommended to set enable.auto.offset.store=false and enable.auto.commit=true.
|
||||||
|
*/
|
||||||
|
void store_offset(const Message& msg) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Gets the current topic subscription
|
* \brief Gets the current topic subscription
|
||||||
@@ -376,13 +423,43 @@ public:
|
|||||||
/**
|
/**
|
||||||
* \brief Polls for a batch of messages
|
* \brief Polls for a batch of messages
|
||||||
*
|
*
|
||||||
* This can return one or more messages
|
* This can return zero or more messages
|
||||||
|
*
|
||||||
|
* \param max_batch_size The maximum amount of messages expected
|
||||||
|
* \param alloc The optionally supplied allocator for allocating messages
|
||||||
|
*
|
||||||
|
* \return A list of messages
|
||||||
|
*/
|
||||||
|
template <typename Allocator>
|
||||||
|
std::vector<Message, Allocator> poll_batch(size_t max_batch_size,
|
||||||
|
const Allocator& alloc);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Polls for a batch of messages
|
||||||
|
*
|
||||||
|
* This can return zero or more messages
|
||||||
*
|
*
|
||||||
* \param max_batch_size The maximum amount of messages expected
|
* \param max_batch_size The maximum amount of messages expected
|
||||||
*
|
*
|
||||||
* \return A list of messages
|
* \return A list of messages
|
||||||
*/
|
*/
|
||||||
MessageList poll_batch(size_t max_batch_size);
|
std::vector<Message> poll_batch(size_t max_batch_size);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Polls for a batch of messages
|
||||||
|
*
|
||||||
|
* This can return zero or more messages
|
||||||
|
*
|
||||||
|
* \param max_batch_size The maximum amount of messages expected
|
||||||
|
* \param timeout The timeout for this operation
|
||||||
|
* \param alloc The optionally supplied allocator for allocating messages
|
||||||
|
*
|
||||||
|
* \return A list of messages
|
||||||
|
*/
|
||||||
|
template <typename Allocator>
|
||||||
|
std::vector<Message, Allocator> poll_batch(size_t max_batch_size,
|
||||||
|
std::chrono::milliseconds timeout,
|
||||||
|
const Allocator& alloc);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Polls for a batch of messages
|
* \brief Polls for a batch of messages
|
||||||
@@ -394,7 +471,8 @@ public:
|
|||||||
*
|
*
|
||||||
* \return A list of messages
|
* \return A list of messages
|
||||||
*/
|
*/
|
||||||
MessageList poll_batch(size_t max_batch_size, std::chrono::milliseconds timeout);
|
std::vector<Message> poll_batch(size_t max_batch_size,
|
||||||
|
std::chrono::milliseconds timeout);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Get the global event queue servicing this consumer corresponding to
|
* \brief Get the global event queue servicing this consumer corresponding to
|
||||||
@@ -440,6 +518,34 @@ private:
|
|||||||
RebalanceErrorCallback rebalance_error_callback_;
|
RebalanceErrorCallback rebalance_error_callback_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Implementations
|
||||||
|
template <typename Allocator>
|
||||||
|
std::vector<Message, Allocator> Consumer::poll_batch(size_t max_batch_size,
|
||||||
|
const Allocator& alloc) {
|
||||||
|
return poll_batch(max_batch_size, get_timeout(), alloc);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Allocator>
|
||||||
|
std::vector<Message, Allocator> Consumer::poll_batch(size_t max_batch_size,
|
||||||
|
std::chrono::milliseconds timeout,
|
||||||
|
const Allocator& alloc) {
|
||||||
|
std::vector<rd_kafka_message_t*> raw_messages(max_batch_size);
|
||||||
|
// Note that this will leak the queue when using rdkafka < 0.11.5 (see get_queue comment)
|
||||||
|
Queue queue = Queue::make_queue(rd_kafka_queue_get_consumer(get_handle()));
|
||||||
|
ssize_t result = rd_kafka_consume_batch_queue(queue.get_handle(),
|
||||||
|
timeout.count(),
|
||||||
|
raw_messages.data(),
|
||||||
|
raw_messages.size());
|
||||||
|
if (result == -1) {
|
||||||
|
check_error(rd_kafka_last_error());
|
||||||
|
// on the off-chance that check_error() does not throw an error
|
||||||
|
return std::vector<Message, Allocator>(alloc);
|
||||||
|
}
|
||||||
|
return std::vector<Message, Allocator>(raw_messages.begin(),
|
||||||
|
raw_messages.begin() + result,
|
||||||
|
alloc);
|
||||||
|
}
|
||||||
|
|
||||||
} // cppkafka
|
} // cppkafka
|
||||||
|
|
||||||
#endif // CPP_KAFKA_CONSUMER_H
|
#endif // CPP_KAFKA_CONSUMER_H
|
||||||
|
|||||||
@@ -37,14 +37,19 @@
|
|||||||
#include <cppkafka/configuration_option.h>
|
#include <cppkafka/configuration_option.h>
|
||||||
#include <cppkafka/consumer.h>
|
#include <cppkafka/consumer.h>
|
||||||
#include <cppkafka/error.h>
|
#include <cppkafka/error.h>
|
||||||
|
#include <cppkafka/event.h>
|
||||||
#include <cppkafka/exceptions.h>
|
#include <cppkafka/exceptions.h>
|
||||||
#include <cppkafka/group_information.h>
|
#include <cppkafka/group_information.h>
|
||||||
|
#include <cppkafka/header.h>
|
||||||
|
#include <cppkafka/header_list.h>
|
||||||
|
#include <cppkafka/header_list_iterator.h>
|
||||||
#include <cppkafka/kafka_handle_base.h>
|
#include <cppkafka/kafka_handle_base.h>
|
||||||
#include <cppkafka/logging.h>
|
#include <cppkafka/logging.h>
|
||||||
#include <cppkafka/macros.h>
|
#include <cppkafka/macros.h>
|
||||||
#include <cppkafka/message.h>
|
#include <cppkafka/message.h>
|
||||||
#include <cppkafka/message_builder.h>
|
#include <cppkafka/message_builder.h>
|
||||||
#include <cppkafka/message_internal.h>
|
#include <cppkafka/message_internal.h>
|
||||||
|
#include <cppkafka/message_timestamp.h>
|
||||||
#include <cppkafka/metadata.h>
|
#include <cppkafka/metadata.h>
|
||||||
#include <cppkafka/producer.h>
|
#include <cppkafka/producer.h>
|
||||||
#include <cppkafka/queue.h>
|
#include <cppkafka/queue.h>
|
||||||
|
|||||||
@@ -14,7 +14,7 @@
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(__linux__) || defined(__CYGWIN__)
|
#if defined(__linux__) || defined(__CYGWIN__) || defined(__sun)
|
||||||
|
|
||||||
# include <endian.h>
|
# include <endian.h>
|
||||||
|
|
||||||
@@ -42,11 +42,11 @@
|
|||||||
# define __LITTLE_ENDIAN LITTLE_ENDIAN
|
# define __LITTLE_ENDIAN LITTLE_ENDIAN
|
||||||
# define __PDP_ENDIAN PDP_ENDIAN
|
# define __PDP_ENDIAN PDP_ENDIAN
|
||||||
|
|
||||||
#elif defined(__OpenBSD__)
|
#elif defined(__OpenBSD__) || defined(__FreeBSD__)
|
||||||
|
|
||||||
# include <sys/endian.h>
|
# include <sys/endian.h>
|
||||||
|
|
||||||
#elif defined(__NetBSD__) || defined(__FreeBSD__) || defined(__DragonFly__)
|
#elif defined(__NetBSD__) || defined(__DragonFly__)
|
||||||
|
|
||||||
# include <sys/endian.h>
|
# include <sys/endian.h>
|
||||||
|
|
||||||
|
|||||||
@@ -42,6 +42,10 @@ namespace cppkafka {
|
|||||||
*/
|
*/
|
||||||
class CPPKAFKA_API Error {
|
class CPPKAFKA_API Error {
|
||||||
public:
|
public:
|
||||||
|
/**
|
||||||
|
* @brief Constructs an error object with RD_KAFKA_RESP_ERR_NO_ERROR
|
||||||
|
*/
|
||||||
|
Error() = default;
|
||||||
/**
|
/**
|
||||||
* Constructs an error object
|
* Constructs an error object
|
||||||
*/
|
*/
|
||||||
@@ -77,7 +81,7 @@ public:
|
|||||||
*/
|
*/
|
||||||
CPPKAFKA_API friend std::ostream& operator<<(std::ostream& output, const Error& rhs);
|
CPPKAFKA_API friend std::ostream& operator<<(std::ostream& output, const Error& rhs);
|
||||||
private:
|
private:
|
||||||
rd_kafka_resp_err_t error_;
|
rd_kafka_resp_err_t error_{RD_KAFKA_RESP_ERR_NO_ERROR};
|
||||||
};
|
};
|
||||||
|
|
||||||
} // cppkafka
|
} // cppkafka
|
||||||
|
|||||||
180
include/cppkafka/event.h
Normal file
180
include/cppkafka/event.h
Normal file
@@ -0,0 +1,180 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2018, Matias Fontanini
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CPPKAFKA_EVENT_H
|
||||||
|
#define CPPKAFKA_EVENT_H
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
#include "error.h"
|
||||||
|
#include "message.h"
|
||||||
|
#include "topic_partition.h"
|
||||||
|
#include "topic_partition_list.h"
|
||||||
|
|
||||||
|
namespace cppkafka {
|
||||||
|
|
||||||
|
class Event {
|
||||||
|
public:
|
||||||
|
/**
|
||||||
|
* Construct an Event from a rdkafka event handle and take ownership of it
|
||||||
|
*
|
||||||
|
* /param handle The handle to construct this event from
|
||||||
|
*/
|
||||||
|
Event(rd_kafka_event_t* handle);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the name of this event
|
||||||
|
*/
|
||||||
|
std::string get_name() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the type of this event
|
||||||
|
*/
|
||||||
|
rd_kafka_event_type_t get_type() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Gets the next message contained in this event.
|
||||||
|
*
|
||||||
|
* This call is only valid if the event type is one of:
|
||||||
|
* * RD_KAFKA_EVENT_FETCH
|
||||||
|
* * RD_KAFKA_EVENT_DR
|
||||||
|
*
|
||||||
|
* \note The returned message's lifetime *is tied to this Event*. That is, if the event
|
||||||
|
* is free'd so will the contents of the message.
|
||||||
|
*/
|
||||||
|
Message get_next_message() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Gets all messages in this event (if any)
|
||||||
|
*
|
||||||
|
* This call is only valid if the event type is one of:
|
||||||
|
* * RD_KAFKA_EVENT_FETCH
|
||||||
|
* * RD_KAFKA_EVENT_DR
|
||||||
|
*
|
||||||
|
* \note The returned messages' lifetime *is tied to this Event*. That is, if the event
|
||||||
|
* is free'd so will the contents of the messages.
|
||||||
|
*
|
||||||
|
* \return A vector containing 0 or more messages
|
||||||
|
*/
|
||||||
|
std::vector<Message> get_messages();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Gets all messages in this event (if any)
|
||||||
|
*
|
||||||
|
* This call is only valid if the event type is one of:
|
||||||
|
* * RD_KAFKA_EVENT_FETCH
|
||||||
|
* * RD_KAFKA_EVENT_DR
|
||||||
|
*
|
||||||
|
* \param allocator The allocator to use on the output vector
|
||||||
|
*
|
||||||
|
* \note The returned messages' lifetime *is tied to this Event*. That is, if the event
|
||||||
|
* is free'd so will the contents of the messages.
|
||||||
|
*
|
||||||
|
* \return A vector containing 0 or more messages
|
||||||
|
*/
|
||||||
|
template <typename Allocator>
|
||||||
|
std::vector<Message, Allocator> get_messages(const Allocator allocator);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Gets the number of messages contained in this event
|
||||||
|
*
|
||||||
|
* This call is only valid if the event type is one of:
|
||||||
|
* * RD_KAFKA_EVENT_FETCH
|
||||||
|
* * RD_KAFKA_EVENT_DR
|
||||||
|
*/
|
||||||
|
size_t get_message_count() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Returns the error in this event
|
||||||
|
*/
|
||||||
|
Error get_error() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the opaque pointer in this event
|
||||||
|
*/
|
||||||
|
void* get_opaque() const;
|
||||||
|
|
||||||
|
#if RD_KAFKA_VERSION >= RD_KAFKA_EVENT_STATS_SUPPORT_VERSION
|
||||||
|
/**
|
||||||
|
* \brief Gets the stats in this event
|
||||||
|
*
|
||||||
|
* This call is only valid if the event type is RD_KAFKA_EVENT_STATS
|
||||||
|
*/
|
||||||
|
std::string get_stats() const {
|
||||||
|
return rd_kafka_event_stats(handle_.get());
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Gets the topic/partition for this event
|
||||||
|
*
|
||||||
|
* This call is only valid if the event type is RD_KAFKA_EVENT_ERROR
|
||||||
|
*/
|
||||||
|
TopicPartition get_topic_partition() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Gets the list of topic/partitions in this event
|
||||||
|
*
|
||||||
|
* This call is only valid if the event type is one of:
|
||||||
|
* * RD_KAFKA_EVENT_REBALANCE
|
||||||
|
* * RD_KAFKA_EVENT_OFFSET_COMMIT
|
||||||
|
*/
|
||||||
|
TopicPartitionList get_topic_partition_list() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check whether this event is valid
|
||||||
|
*
|
||||||
|
* /return true iff this event has a valid (non-null) handle inside
|
||||||
|
*/
|
||||||
|
operator bool() const;
|
||||||
|
private:
|
||||||
|
using HandlePtr = std::unique_ptr<rd_kafka_event_t, decltype(&rd_kafka_event_destroy)>;
|
||||||
|
|
||||||
|
HandlePtr handle_;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename Allocator>
|
||||||
|
std::vector<Message, Allocator> Event::get_messages(const Allocator allocator) {
|
||||||
|
const size_t total_messages = get_message_count();
|
||||||
|
std::vector<const rd_kafka_message_t*> raw_messages(total_messages);
|
||||||
|
const auto messages_read = rd_kafka_event_message_array(handle_.get(),
|
||||||
|
raw_messages.data(),
|
||||||
|
total_messages);
|
||||||
|
std::vector<Message, Allocator> output(allocator);
|
||||||
|
output.reserve(messages_read);
|
||||||
|
for (auto message : raw_messages) {
|
||||||
|
output.emplace_back(Message::make_non_owning(const_cast<rd_kafka_message_t*>(message)));
|
||||||
|
}
|
||||||
|
return output;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // cppkafka
|
||||||
|
|
||||||
|
#endif // CPPKAFKA_EVENT_H
|
||||||
@@ -134,6 +134,14 @@ private:
|
|||||||
Error error_;
|
Error error_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Backoff performer has no more retries left for a specific action.
|
||||||
|
*/
|
||||||
|
class CPPKAFKA_API ActionTerminatedException : public Exception {
|
||||||
|
public:
|
||||||
|
ActionTerminatedException(const std::string& error);
|
||||||
|
};
|
||||||
|
|
||||||
} // cppkafka
|
} // cppkafka
|
||||||
|
|
||||||
#endif // CPPKAFKA_EXCEPTIONS_H
|
#endif // CPPKAFKA_EXCEPTIONS_H
|
||||||
|
|||||||
195
include/cppkafka/header.h
Normal file
195
include/cppkafka/header.h
Normal file
@@ -0,0 +1,195 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2017, Matias Fontanini
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CPPKAFKA_HEADER_H
|
||||||
|
#define CPPKAFKA_HEADER_H
|
||||||
|
|
||||||
|
#include "macros.h"
|
||||||
|
#include "buffer.h"
|
||||||
|
#include <string>
|
||||||
|
#include <assert.h>
|
||||||
|
|
||||||
|
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
|
||||||
|
|
||||||
|
namespace cppkafka {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Class representing a rdkafka header.
|
||||||
|
*
|
||||||
|
* The template parameter 'BufferType' can represent a cppkafka::Buffer, std::string, std::vector, etc.
|
||||||
|
* A valid header may contain an empty name as well as null data.
|
||||||
|
*/
|
||||||
|
template <typename BufferType>
|
||||||
|
class Header {
|
||||||
|
public:
|
||||||
|
using ValueType = BufferType;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Build an empty header with no data
|
||||||
|
*/
|
||||||
|
Header() = default;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Build a header instance
|
||||||
|
* \param name The header name
|
||||||
|
* \param value The non-modifiable header data
|
||||||
|
*/
|
||||||
|
Header(std::string name,
|
||||||
|
const BufferType& value);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Build a header instance
|
||||||
|
* \param name The header name
|
||||||
|
* \param value The header data to be moved
|
||||||
|
*/
|
||||||
|
Header(std::string name,
|
||||||
|
BufferType&& value);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Get the header name
|
||||||
|
* \return A reference to the name
|
||||||
|
*/
|
||||||
|
const std::string& get_name() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Get the header value
|
||||||
|
* \return A const reference to the underlying buffer
|
||||||
|
*/
|
||||||
|
const BufferType& get_value() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Get the header value
|
||||||
|
* \return A non-const reference to the underlying buffer
|
||||||
|
*/
|
||||||
|
BufferType& get_value();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Check if this header is empty
|
||||||
|
* \return True if the header contains valid data, false otherwise.
|
||||||
|
*/
|
||||||
|
operator bool() const;
|
||||||
|
|
||||||
|
private:
|
||||||
|
template <typename T>
|
||||||
|
T make_value(const T& other);
|
||||||
|
|
||||||
|
Buffer make_value(const Buffer& other);
|
||||||
|
|
||||||
|
std::string name_;
|
||||||
|
BufferType value_;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Comparison operators for Header type
|
||||||
|
template <typename BufferType>
|
||||||
|
bool operator==(const Header<BufferType>& lhs, const Header<BufferType>& rhs) {
|
||||||
|
return std::tie(lhs.get_name(), lhs.get_value()) == std::tie(rhs.get_name(), rhs.get_value());
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename BufferType>
|
||||||
|
bool operator!=(const Header<BufferType>& lhs, const Header<BufferType>& rhs) {
|
||||||
|
return !(lhs == rhs);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename BufferType>
|
||||||
|
bool operator<(const Header<BufferType>& lhs, const Header<BufferType>& rhs) {
|
||||||
|
return std::tie(lhs.get_name(), lhs.get_value()) < std::tie(rhs.get_name(), rhs.get_value());
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename BufferType>
|
||||||
|
bool operator>(const Header<BufferType>& lhs, const Header<BufferType>& rhs) {
|
||||||
|
return std::tie(lhs.get_name(), lhs.get_value()) > std::tie(rhs.get_name(), rhs.get_value());
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename BufferType>
|
||||||
|
bool operator<=(const Header<BufferType>& lhs, const Header<BufferType>& rhs) {
|
||||||
|
return !(lhs > rhs);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename BufferType>
|
||||||
|
bool operator>=(const Header<BufferType>& lhs, const Header<BufferType>& rhs) {
|
||||||
|
return !(lhs < rhs);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implementation
|
||||||
|
template <typename BufferType>
|
||||||
|
Header<BufferType>::Header(std::string name,
|
||||||
|
const BufferType& value)
|
||||||
|
: name_(std::move(name)),
|
||||||
|
value_(make_value(value)) {
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename BufferType>
|
||||||
|
Header<BufferType>::Header(std::string name,
|
||||||
|
BufferType&& value)
|
||||||
|
: name_(std::move(name)),
|
||||||
|
value_(std::move(value)) {
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename BufferType>
|
||||||
|
const std::string& Header<BufferType>::get_name() const {
|
||||||
|
return name_;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename BufferType>
|
||||||
|
const BufferType& Header<BufferType>::get_value() const {
|
||||||
|
return value_;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename BufferType>
|
||||||
|
BufferType& Header<BufferType>::get_value() {
|
||||||
|
return value_;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename BufferType>
|
||||||
|
Header<BufferType>::operator bool() const {
|
||||||
|
return !value_.empty();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <>
|
||||||
|
inline
|
||||||
|
Header<Buffer>::operator bool() const {
|
||||||
|
return value_.get_size() > 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename BufferType>
|
||||||
|
template <typename T>
|
||||||
|
T Header<BufferType>::make_value(const T& other) {
|
||||||
|
return other;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename BufferType>
|
||||||
|
Buffer Header<BufferType>::make_value(const Buffer& other) {
|
||||||
|
return Buffer(other.get_data(), other.get_size());
|
||||||
|
}
|
||||||
|
|
||||||
|
} //namespace cppkafka
|
||||||
|
|
||||||
|
#endif //RD_KAFKA_HEADERS_SUPPORT_VERSION
|
||||||
|
|
||||||
|
#endif //CPPKAFKA_HEADER_H
|
||||||
337
include/cppkafka/header_list.h
Normal file
337
include/cppkafka/header_list.h
Normal file
@@ -0,0 +1,337 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2017, Matias Fontanini
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CPPKAFKA_HEADER_LIST_H
|
||||||
|
#define CPPKAFKA_HEADER_LIST_H
|
||||||
|
|
||||||
|
#include <librdkafka/rdkafka.h>
|
||||||
|
#include "clonable_ptr.h"
|
||||||
|
#include "header.h"
|
||||||
|
#include "header_list_iterator.h"
|
||||||
|
#include "exceptions.h"
|
||||||
|
|
||||||
|
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
|
||||||
|
|
||||||
|
namespace cppkafka {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Thin wrapper over a rd_kafka_headers_t handle which optionally controls its lifetime.
|
||||||
|
* \tparam HeaderType The header type
|
||||||
|
*
|
||||||
|
* This is a copyable and movable class that wraps a rd_kafka_header_t*. When copying this class,
|
||||||
|
* all associated headers are also copied via rd_kafka_headers_copy(). If this list owns the underlying handle,
|
||||||
|
* its destructor will call rd_kafka_headers_destroy().
|
||||||
|
*/
|
||||||
|
template <typename HeaderType>
|
||||||
|
class HeaderList {
|
||||||
|
public:
|
||||||
|
template <typename OtherHeaderType>
|
||||||
|
friend class HeaderList;
|
||||||
|
|
||||||
|
using BufferType = typename HeaderType::ValueType;
|
||||||
|
using Iterator = HeaderIterator<HeaderType>;
|
||||||
|
/**
|
||||||
|
* Constructs a message that won't take ownership of the given pointer.
|
||||||
|
*/
|
||||||
|
static HeaderList<HeaderType> make_non_owning(rd_kafka_headers_t* handle);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Create an empty header list with no handle.
|
||||||
|
*/
|
||||||
|
HeaderList();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Create an empty header list. This call translates to rd_kafka_headers_new().
|
||||||
|
* \param reserve The number of headers to reserve space for.
|
||||||
|
*/
|
||||||
|
explicit HeaderList(size_t reserve);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Create a header list and assume ownership of the handle.
|
||||||
|
* \param handle The header list handle.
|
||||||
|
*/
|
||||||
|
explicit HeaderList(rd_kafka_headers_t* handle);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Create a header list from another header list type
|
||||||
|
* \param other The other list
|
||||||
|
*/
|
||||||
|
template <typename OtherHeaderType>
|
||||||
|
HeaderList(const HeaderList<OtherHeaderType>& other);
|
||||||
|
|
||||||
|
template <typename OtherHeaderType>
|
||||||
|
HeaderList(HeaderList<OtherHeaderType>&& other);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Add a header to the list. This translates to rd_kafka_header_add().
|
||||||
|
* \param header The header.
|
||||||
|
* \return An Error indicating if the operation was successful or not.
|
||||||
|
* \warning This operation shall invalidate all iterators.
|
||||||
|
*/
|
||||||
|
Error add(const HeaderType& header);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Remove all headers with 'name'. This translates to rd_kafka_header_remove().
|
||||||
|
* \param name The name of the header(s) to remove.
|
||||||
|
* \return An Error indicating if the operation was successful or not.
|
||||||
|
* \warning This operation shall invalidate all iterators.
|
||||||
|
*/
|
||||||
|
Error remove(const std::string& name);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Return the header present at position 'index'. Throws on error.
|
||||||
|
* This translates to rd_kafka_header_get(index)
|
||||||
|
* \param index The header index in the list (0-based).
|
||||||
|
* \return The header at that position.
|
||||||
|
*/
|
||||||
|
HeaderType at(size_t index) const; //throws
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Return the first header in the list. Throws if the list is empty.
|
||||||
|
* This translates to rd_kafka_header_get(0).
|
||||||
|
* \return The first header.
|
||||||
|
*/
|
||||||
|
HeaderType front() const; //throws
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Return the first header in the list. Throws if the list is empty.
|
||||||
|
* This translates to rd_kafka_header_get(size-1).
|
||||||
|
* \return The last header.
|
||||||
|
*/
|
||||||
|
HeaderType back() const; //throws
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Returns the number of headers in the list. This translates to rd_kafka_header_cnt().
|
||||||
|
* \return The number of headers.
|
||||||
|
*/
|
||||||
|
size_t size() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Indicates if this list is empty.
|
||||||
|
* \return True if empty, false otherwise.
|
||||||
|
*/
|
||||||
|
bool empty() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Returns a HeaderIterator pointing to the first position if the list is not empty
|
||||||
|
* or pointing to end() otherwise.
|
||||||
|
* \return An iterator.
|
||||||
|
* \warning This iterator will be invalid if add() or remove() is called.
|
||||||
|
*/
|
||||||
|
Iterator begin() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Returns a HeaderIterator pointing to one element past the end of the list.
|
||||||
|
* \return An iterator.
|
||||||
|
* \remark This iterator cannot be de-referenced.
|
||||||
|
*/
|
||||||
|
Iterator end() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Get the underlying header list handle.
|
||||||
|
* \return The handle.
|
||||||
|
*/
|
||||||
|
rd_kafka_headers_t* get_handle() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Get the underlying header list handle and release its ownership.
|
||||||
|
* \return The handle.
|
||||||
|
* \warning After this call, the HeaderList becomes invalid.
|
||||||
|
*/
|
||||||
|
rd_kafka_headers_t* release_handle();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Indicates if this list is valid (contains a non-null handle) or not.
|
||||||
|
* \return True if valid, false otherwise.
|
||||||
|
*/
|
||||||
|
explicit operator bool() const;
|
||||||
|
|
||||||
|
private:
|
||||||
|
struct NonOwningTag { };
|
||||||
|
static void dummy_deleter(rd_kafka_headers_t*) {}
|
||||||
|
|
||||||
|
using HandlePtr = ClonablePtr<rd_kafka_headers_t, decltype(&rd_kafka_headers_destroy),
|
||||||
|
decltype(&rd_kafka_headers_copy)>;
|
||||||
|
|
||||||
|
HeaderList(rd_kafka_headers_t* handle, NonOwningTag);
|
||||||
|
|
||||||
|
HandlePtr handle_;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename HeaderType>
|
||||||
|
bool operator==(const HeaderList<HeaderType>& lhs, const HeaderList<HeaderType> rhs) {
|
||||||
|
if (!lhs && !rhs) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (!lhs || !rhs) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (lhs.size() != rhs.size()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return std::equal(lhs.begin(), lhs.end(), rhs.begin());
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename HeaderType>
|
||||||
|
bool operator!=(const HeaderList<HeaderType>& lhs, const HeaderList<HeaderType> rhs) {
|
||||||
|
return !(lhs == rhs);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename HeaderType>
|
||||||
|
HeaderList<HeaderType> HeaderList<HeaderType>::make_non_owning(rd_kafka_headers_t* handle) {
|
||||||
|
return HeaderList(handle, NonOwningTag());
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename HeaderType>
|
||||||
|
HeaderList<HeaderType>::HeaderList()
|
||||||
|
: handle_(nullptr, nullptr, nullptr) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename HeaderType>
|
||||||
|
HeaderList<HeaderType>::HeaderList(size_t reserve)
|
||||||
|
: handle_(rd_kafka_headers_new(reserve), &rd_kafka_headers_destroy, &rd_kafka_headers_copy) {
|
||||||
|
assert(reserve);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename HeaderType>
|
||||||
|
HeaderList<HeaderType>::HeaderList(rd_kafka_headers_t* handle)
|
||||||
|
: handle_(handle, &rd_kafka_headers_destroy, &rd_kafka_headers_copy) { //if we own the header list, we clone it on copy
|
||||||
|
assert(handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename HeaderType>
|
||||||
|
HeaderList<HeaderType>::HeaderList(rd_kafka_headers_t* handle, NonOwningTag)
|
||||||
|
: handle_(handle, &dummy_deleter, nullptr) { //if we don't own the header list, we forward the handle on copy.
|
||||||
|
assert(handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename HeaderType>
|
||||||
|
template <typename OtherHeaderType>
|
||||||
|
HeaderList<HeaderType>::HeaderList(const HeaderList<OtherHeaderType>& other)
|
||||||
|
: handle_(other.handle_) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename HeaderType>
|
||||||
|
template <typename OtherHeaderType>
|
||||||
|
HeaderList<HeaderType>::HeaderList(HeaderList<OtherHeaderType>&& other)
|
||||||
|
: handle_(std::move(other.handle_)) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Methods
|
||||||
|
template <typename HeaderType>
|
||||||
|
Error HeaderList<HeaderType>::add(const HeaderType& header) {
|
||||||
|
assert(handle_);
|
||||||
|
return rd_kafka_header_add(handle_.get(),
|
||||||
|
header.get_name().data(), header.get_name().size(),
|
||||||
|
header.get_value().data(), header.get_value().size());
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
template <>
|
||||||
|
inline
|
||||||
|
Error HeaderList<Header<Buffer>>::add(const Header<Buffer>& header) {
|
||||||
|
assert(handle_);
|
||||||
|
return rd_kafka_header_add(handle_.get(),
|
||||||
|
header.get_name().data(), header.get_name().size(),
|
||||||
|
header.get_value().get_data(), header.get_value().get_size());
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename HeaderType>
|
||||||
|
Error HeaderList<HeaderType>::remove(const std::string& name) {
|
||||||
|
assert(handle_);
|
||||||
|
return rd_kafka_header_remove(handle_.get(), name.data());
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename HeaderType>
|
||||||
|
HeaderType HeaderList<HeaderType>::at(size_t index) const {
|
||||||
|
assert(handle_);
|
||||||
|
const char *name, *value;
|
||||||
|
size_t size;
|
||||||
|
Error error = rd_kafka_header_get_all(handle_.get(), index, &name, reinterpret_cast<const void**>(&value), &size);
|
||||||
|
if (error != RD_KAFKA_RESP_ERR_NO_ERROR) {
|
||||||
|
throw Exception(error.to_string());
|
||||||
|
}
|
||||||
|
return HeaderType(name, BufferType(value, value + size));
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename HeaderType>
|
||||||
|
HeaderType HeaderList<HeaderType>::front() const {
|
||||||
|
return at(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename HeaderType>
|
||||||
|
HeaderType HeaderList<HeaderType>::back() const {
|
||||||
|
return at(size()-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename HeaderType>
|
||||||
|
size_t HeaderList<HeaderType>::size() const {
|
||||||
|
return handle_ ? rd_kafka_header_cnt(handle_.get()) : 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename HeaderType>
|
||||||
|
bool HeaderList<HeaderType>::empty() const {
|
||||||
|
return size() == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename HeaderType>
|
||||||
|
typename HeaderList<HeaderType>::Iterator
|
||||||
|
HeaderList<HeaderType>::begin() const {
|
||||||
|
return Iterator(*this, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename HeaderType>
|
||||||
|
typename HeaderList<HeaderType>::Iterator
|
||||||
|
HeaderList<HeaderType>::end() const {
|
||||||
|
return Iterator(*this, size());
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename HeaderType>
|
||||||
|
rd_kafka_headers_t* HeaderList<HeaderType>::get_handle() const {
|
||||||
|
return handle_.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename HeaderType>
|
||||||
|
rd_kafka_headers_t* HeaderList<HeaderType>::release_handle() {
|
||||||
|
return handle_.release();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename HeaderType>
|
||||||
|
HeaderList<HeaderType>::operator bool() const {
|
||||||
|
return static_cast<bool>(handle_);
|
||||||
|
}
|
||||||
|
|
||||||
|
} //namespace cppkafka
|
||||||
|
|
||||||
|
#endif //RD_KAFKA_HEADERS_SUPPORT_VERSION
|
||||||
|
|
||||||
|
#endif //CPPKAFKA_HEADER_LIST_H
|
||||||
193
include/cppkafka/header_list_iterator.h
Normal file
193
include/cppkafka/header_list_iterator.h
Normal file
@@ -0,0 +1,193 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2017, Matias Fontanini
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CPPKAFKA_HEADER_LIST_ITERATOR_H
|
||||||
|
#define CPPKAFKA_HEADER_LIST_ITERATOR_H
|
||||||
|
|
||||||
|
#include <cstddef>
|
||||||
|
#include <utility>
|
||||||
|
#include <iterator>
|
||||||
|
#include "header.h"
|
||||||
|
|
||||||
|
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
|
||||||
|
|
||||||
|
namespace cppkafka {
|
||||||
|
|
||||||
|
template <typename HeaderType>
|
||||||
|
class HeaderList;
|
||||||
|
|
||||||
|
template <typename HeaderType>
|
||||||
|
class HeaderIterator;
|
||||||
|
|
||||||
|
template <typename HeaderType>
|
||||||
|
bool operator==(const HeaderIterator<HeaderType>& lhs, const HeaderIterator<HeaderType>& rhs);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Iterator over a HeaderList object.
|
||||||
|
* \tparam HeaderType The type of header this iterator points to.
|
||||||
|
*/
|
||||||
|
template <typename HeaderType>
|
||||||
|
class HeaderIterator {
|
||||||
|
public:
|
||||||
|
friend HeaderList<HeaderType>;
|
||||||
|
using HeaderListType = HeaderList<HeaderType>;
|
||||||
|
using BufferType = typename HeaderType::ValueType;
|
||||||
|
//std::iterator_traits
|
||||||
|
using difference_type = std::ptrdiff_t;
|
||||||
|
using value_type = HeaderType;
|
||||||
|
using pointer = value_type*;
|
||||||
|
using reference = value_type&;
|
||||||
|
using iterator_category = std::bidirectional_iterator_tag;
|
||||||
|
friend bool operator==<HeaderType>(const HeaderIterator<HeaderType>& lhs,
|
||||||
|
const HeaderIterator<HeaderType>& rhs);
|
||||||
|
|
||||||
|
HeaderIterator(const HeaderIterator& other)
|
||||||
|
: header_list_(other.header_list_),
|
||||||
|
header_(make_header(other.header_)),
|
||||||
|
index_(other.index_) {
|
||||||
|
|
||||||
|
}
|
||||||
|
HeaderIterator& operator=(const HeaderIterator& other) {
|
||||||
|
if (this == &other) return *this;
|
||||||
|
header_list_ = other.header_list_;
|
||||||
|
header_ = make_header(other.header_);
|
||||||
|
index_ = other.index_;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
HeaderIterator(HeaderIterator&&) = default;
|
||||||
|
HeaderIterator& operator=(HeaderIterator&&) = default;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Prefix increment of the iterator.
|
||||||
|
* \return Itself after being incremented.
|
||||||
|
*/
|
||||||
|
HeaderIterator& operator++() {
|
||||||
|
assert(index_ < header_list_.size());
|
||||||
|
++index_;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Postfix increment of the iterator.
|
||||||
|
* \return Itself before being incremented.
|
||||||
|
*/
|
||||||
|
HeaderIterator operator++(int) {
|
||||||
|
HeaderIterator tmp(*this);
|
||||||
|
operator++();
|
||||||
|
return tmp;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Prefix decrement of the iterator.
|
||||||
|
* \return Itself after being decremented.
|
||||||
|
*/
|
||||||
|
HeaderIterator& operator--() {
|
||||||
|
assert(index_ > 0);
|
||||||
|
--index_;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Postfix decrement of the iterator.
|
||||||
|
* \return Itself before being decremented.
|
||||||
|
*/
|
||||||
|
HeaderIterator operator--(int) {
|
||||||
|
HeaderIterator tmp(*this);
|
||||||
|
operator--();
|
||||||
|
return tmp;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Dereferences this iterator.
|
||||||
|
* \return A reference to the header the iterator points to.
|
||||||
|
* \warning Throws if invalid or if *this == end().
|
||||||
|
*/
|
||||||
|
const HeaderType& operator*() const {
|
||||||
|
header_ = header_list_.at(index_);
|
||||||
|
return header_;
|
||||||
|
}
|
||||||
|
HeaderType& operator*() {
|
||||||
|
header_ = header_list_.at(index_);
|
||||||
|
return header_;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Dereferences this iterator.
|
||||||
|
* \return The address to the header the iterator points to.
|
||||||
|
* \warning Throws if invalid or if *this == end().
|
||||||
|
*/
|
||||||
|
const HeaderType* operator->() const {
|
||||||
|
header_ = header_list_.at(index_);
|
||||||
|
return &header_;
|
||||||
|
}
|
||||||
|
HeaderType* operator->() {
|
||||||
|
header_ = header_list_.at(index_);
|
||||||
|
return &header_;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
HeaderIterator(const HeaderListType& headers,
|
||||||
|
size_t index)
|
||||||
|
: header_list_(headers),
|
||||||
|
index_(index) {
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
T make_header(const T& other) {
|
||||||
|
return other;
|
||||||
|
}
|
||||||
|
|
||||||
|
Header<Buffer> make_header(const Header<Buffer>& other) {
|
||||||
|
return Header<Buffer>(other.get_name(),
|
||||||
|
Buffer(other.get_value().get_data(),
|
||||||
|
other.get_value().get_size()));
|
||||||
|
}
|
||||||
|
|
||||||
|
const HeaderListType& header_list_;
|
||||||
|
HeaderType header_;
|
||||||
|
size_t index_;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Equality comparison operators
|
||||||
|
template <typename HeaderType>
|
||||||
|
bool operator==(const HeaderIterator<HeaderType>& lhs, const HeaderIterator<HeaderType>& rhs) {
|
||||||
|
return (lhs.header_list_.get_handle() == rhs.header_list_.get_handle()) && (lhs.index_ == rhs.index_);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename HeaderType>
|
||||||
|
bool operator!=(const HeaderIterator<HeaderType>& lhs, const HeaderIterator<HeaderType>& rhs) {
|
||||||
|
return !(lhs == rhs);
|
||||||
|
}
|
||||||
|
|
||||||
|
} //namespace cppkafka
|
||||||
|
|
||||||
|
#endif //RD_KAFKA_HEADERS_SUPPORT_VERSION
|
||||||
|
|
||||||
|
#endif //CPPKAFKA_HEADER_LIST_ITERATOR_H
|
||||||
|
|
||||||
@@ -45,6 +45,8 @@
|
|||||||
#include "topic_configuration.h"
|
#include "topic_configuration.h"
|
||||||
#include "configuration.h"
|
#include "configuration.h"
|
||||||
#include "macros.h"
|
#include "macros.h"
|
||||||
|
#include "logging.h"
|
||||||
|
#include "queue.h"
|
||||||
|
|
||||||
namespace cppkafka {
|
namespace cppkafka {
|
||||||
|
|
||||||
@@ -108,6 +110,11 @@ public:
|
|||||||
*/
|
*/
|
||||||
void set_timeout(std::chrono::milliseconds timeout);
|
void set_timeout(std::chrono::milliseconds timeout);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Sets the log level
|
||||||
|
*/
|
||||||
|
void set_log_level(LogLevel level);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Adds one or more brokers to this handle's broker list
|
* \brief Adds one or more brokers to this handle's broker list
|
||||||
*
|
*
|
||||||
@@ -128,6 +135,20 @@ public:
|
|||||||
*/
|
*/
|
||||||
OffsetTuple query_offsets(const TopicPartition& topic_partition) const;
|
OffsetTuple query_offsets(const TopicPartition& topic_partition) const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Queries the offset for the given topic/partition with a given timeout
|
||||||
|
*
|
||||||
|
* This translates into a call to rd_kafka_query_watermark_offsets
|
||||||
|
*
|
||||||
|
* \param topic_partition The topic/partition to be queried
|
||||||
|
*
|
||||||
|
* \timeout The timeout for this operation. This supersedes the default handle timeout.
|
||||||
|
*
|
||||||
|
* \return A pair of watermark offsets {low, high}
|
||||||
|
*/
|
||||||
|
OffsetTuple query_offsets(const TopicPartition& topic_partition,
|
||||||
|
std::chrono::milliseconds timeout) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Gets the rdkafka handle
|
* \brief Gets the rdkafka handle
|
||||||
*
|
*
|
||||||
@@ -171,6 +192,20 @@ public:
|
|||||||
*/
|
*/
|
||||||
Metadata get_metadata(bool all_topics = true) const;
|
Metadata get_metadata(bool all_topics = true) const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Gets metadata for brokers, topics, partitions, etc with a timeout
|
||||||
|
*
|
||||||
|
* This translates into a call to rd_kafka_metadata
|
||||||
|
*
|
||||||
|
* \param all_topics Whether to fetch metadata about all topics or only locally known ones
|
||||||
|
*
|
||||||
|
* \param timeout The timeout for this operation. Supersedes the default handle timeout.
|
||||||
|
*
|
||||||
|
* \return The metadata
|
||||||
|
*/
|
||||||
|
Metadata get_metadata(bool all_topics,
|
||||||
|
std::chrono::milliseconds timeout) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Gets general metadata but only fetches metadata for the given topic rather than
|
* \brief Gets general metadata but only fetches metadata for the given topic rather than
|
||||||
* all of them
|
* all of them
|
||||||
@@ -183,6 +218,21 @@ public:
|
|||||||
*/
|
*/
|
||||||
TopicMetadata get_metadata(const Topic& topic) const;
|
TopicMetadata get_metadata(const Topic& topic) const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Gets general metadata but only fetches metadata for the given topic rather than
|
||||||
|
* all of them. Uses a timeout to limit the operation execution time.
|
||||||
|
*
|
||||||
|
* This translates into a call to rd_kafka_metadata
|
||||||
|
*
|
||||||
|
* \param topic The topic to fetch information for
|
||||||
|
*
|
||||||
|
* \param timeout The timeout for this operation. Supersedes the default handle timeout.
|
||||||
|
*
|
||||||
|
* \return The topic metadata
|
||||||
|
*/
|
||||||
|
TopicMetadata get_metadata(const Topic& topic,
|
||||||
|
std::chrono::milliseconds timeout) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Gets the consumer group information
|
* \brief Gets the consumer group information
|
||||||
*
|
*
|
||||||
@@ -192,6 +242,18 @@ public:
|
|||||||
*/
|
*/
|
||||||
GroupInformation get_consumer_group(const std::string& name);
|
GroupInformation get_consumer_group(const std::string& name);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Gets the consumer group information with a timeout
|
||||||
|
*
|
||||||
|
* \param name The name of the consumer group to look up
|
||||||
|
*
|
||||||
|
* \param timeout The timeout for this operation. Supersedes the default handle timeout.
|
||||||
|
*
|
||||||
|
* \return The group information
|
||||||
|
*/
|
||||||
|
GroupInformation get_consumer_group(const std::string& name,
|
||||||
|
std::chrono::milliseconds timeout);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Gets all consumer groups
|
* \brief Gets all consumer groups
|
||||||
*
|
*
|
||||||
@@ -199,6 +261,15 @@ public:
|
|||||||
*/
|
*/
|
||||||
GroupInformationList get_consumer_groups();
|
GroupInformationList get_consumer_groups();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Gets all consumer groups with a timeout
|
||||||
|
*
|
||||||
|
* \param timeout The timeout for this operation. Supersedes the default handle timeout.
|
||||||
|
*
|
||||||
|
* \return A list of consumer groups
|
||||||
|
*/
|
||||||
|
GroupInformationList get_consumer_groups(std::chrono::milliseconds timeout);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Gets topic/partition offsets based on timestamps
|
* \brief Gets topic/partition offsets based on timestamps
|
||||||
*
|
*
|
||||||
@@ -210,6 +281,20 @@ public:
|
|||||||
*/
|
*/
|
||||||
TopicPartitionList get_offsets_for_times(const TopicPartitionsTimestampsMap& queries) const;
|
TopicPartitionList get_offsets_for_times(const TopicPartitionsTimestampsMap& queries) const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Gets topic/partition offsets based on timestamps with a timeout
|
||||||
|
*
|
||||||
|
* This translates into a call to rd_kafka_offsets_for_times
|
||||||
|
*
|
||||||
|
* \param queries A map from topic/partition to the timestamp to be used
|
||||||
|
*
|
||||||
|
* \param timeout The timeout for this operation. This supersedes the default handle timeout.
|
||||||
|
*
|
||||||
|
* \return A topic partition list
|
||||||
|
*/
|
||||||
|
TopicPartitionList get_offsets_for_times(const TopicPartitionsTimestampsMap& queries,
|
||||||
|
std::chrono::milliseconds timeout) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Get the kafka handle name
|
* \brief Get the kafka handle name
|
||||||
*
|
*
|
||||||
@@ -233,6 +318,19 @@ public:
|
|||||||
*/
|
*/
|
||||||
const Configuration& get_configuration() const;
|
const Configuration& get_configuration() const;
|
||||||
|
|
||||||
|
#if RD_KAFKA_VERSION >= RD_KAFKA_ADMIN_API_SUPPORT_VERSION
|
||||||
|
/**
|
||||||
|
* \brief Gets the background queue
|
||||||
|
*
|
||||||
|
* This translates into a call to rd_kafka_queue_get_background
|
||||||
|
*
|
||||||
|
* \return The background queue
|
||||||
|
*/
|
||||||
|
Queue get_background_queue() const {
|
||||||
|
return Queue::make_queue(rd_kafka_queue_get_background(handle_.get()));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Gets the length of the out queue
|
* \brief Gets the length of the out queue
|
||||||
*
|
*
|
||||||
@@ -242,6 +340,18 @@ public:
|
|||||||
*/
|
*/
|
||||||
int get_out_queue_length() const;
|
int get_out_queue_length() const;
|
||||||
|
|
||||||
|
#if RD_KAFKA_VERSION >= RD_KAFKA_DESTROY_FLAGS_SUPPORT_VERSION
|
||||||
|
/**
|
||||||
|
* \brief Sets flags for rd_kafka_destroy_flags()
|
||||||
|
*/
|
||||||
|
void set_destroy_flags(int destroy_flags);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Returns flags for rd_kafka_destroy_flags()
|
||||||
|
*/
|
||||||
|
int get_destroy_flags() const;
|
||||||
|
|
||||||
|
#endif
|
||||||
/**
|
/**
|
||||||
* \brief Cancels the current callback dispatcher
|
* \brief Cancels the current callback dispatcher
|
||||||
*
|
*
|
||||||
@@ -259,12 +369,22 @@ protected:
|
|||||||
private:
|
private:
|
||||||
static const std::chrono::milliseconds DEFAULT_TIMEOUT;
|
static const std::chrono::milliseconds DEFAULT_TIMEOUT;
|
||||||
|
|
||||||
using HandlePtr = std::unique_ptr<rd_kafka_t, decltype(&rd_kafka_destroy)>;
|
struct HandleDeleter {
|
||||||
|
explicit HandleDeleter(const KafkaHandleBase* handle_base_ptr) : handle_base_ptr_{handle_base_ptr} {}
|
||||||
|
void operator()(rd_kafka_t* handle);
|
||||||
|
private:
|
||||||
|
const KafkaHandleBase * handle_base_ptr_;
|
||||||
|
};
|
||||||
|
|
||||||
|
using HandlePtr = std::unique_ptr<rd_kafka_t, HandleDeleter>;
|
||||||
using TopicConfigurationMap = std::unordered_map<std::string, TopicConfiguration>;
|
using TopicConfigurationMap = std::unordered_map<std::string, TopicConfiguration>;
|
||||||
|
|
||||||
Topic get_topic(const std::string& name, rd_kafka_topic_conf_t* conf);
|
Topic get_topic(const std::string& name, rd_kafka_topic_conf_t* conf);
|
||||||
Metadata get_metadata(bool all_topics, rd_kafka_topic_t* topic_ptr) const;
|
Metadata get_metadata(bool all_topics,
|
||||||
GroupInformationList fetch_consumer_groups(const char* name);
|
rd_kafka_topic_t* topic_ptr,
|
||||||
|
std::chrono::milliseconds timeout) const;
|
||||||
|
GroupInformationList fetch_consumer_groups(const char* name,
|
||||||
|
std::chrono::milliseconds timeout);
|
||||||
void save_topic_config(const std::string& topic_name, TopicConfiguration config);
|
void save_topic_config(const std::string& topic_name, TopicConfiguration config);
|
||||||
|
|
||||||
std::chrono::milliseconds timeout_ms_;
|
std::chrono::milliseconds timeout_ms_;
|
||||||
@@ -272,6 +392,7 @@ private:
|
|||||||
TopicConfigurationMap topic_configurations_;
|
TopicConfigurationMap topic_configurations_;
|
||||||
std::mutex topic_configurations_mutex_;
|
std::mutex topic_configurations_mutex_;
|
||||||
HandlePtr handle_;
|
HandlePtr handle_;
|
||||||
|
int destroy_flags_;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // cppkafka
|
} // cppkafka
|
||||||
|
|||||||
@@ -43,4 +43,14 @@
|
|||||||
#define CPPKAFKA_API
|
#define CPPKAFKA_API
|
||||||
#endif // _WIN32 && !CPPKAFKA_STATIC
|
#endif // _WIN32 && !CPPKAFKA_STATIC
|
||||||
|
|
||||||
|
// See: https://github.com/edenhill/librdkafka/issues/1792
|
||||||
|
#define RD_KAFKA_QUEUE_REFCOUNT_BUG_VERSION 0x000b0500 //v0.11.5.00
|
||||||
|
#define RD_KAFKA_HEADERS_SUPPORT_VERSION 0x000b0402 //v0.11.4.02
|
||||||
|
#define RD_KAFKA_ADMIN_API_SUPPORT_VERSION 0x000b0500 //v0.11.5.00
|
||||||
|
#define RD_KAFKA_MESSAGE_LATENCY_SUPPORT_VERSION 0x000b0000 //v0.11.0.00
|
||||||
|
#define RD_KAFKA_EVENT_STATS_SUPPORT_VERSION 0x000b0000 //v0.11.0.00
|
||||||
|
#define RD_KAFKA_MESSAGE_STATUS_SUPPORT_VERSION 0x01000002 //v1.0.0.02
|
||||||
|
#define RD_KAFKA_STORE_OFFSETS_SUPPORT_VERSION 0x00090501 //v0.9.5.01
|
||||||
|
#define RD_KAFKA_DESTROY_FLAGS_SUPPORT_VERSION 0x000b0600 //v0.11.6
|
||||||
|
|
||||||
#endif // CPPKAFKA_MACROS_H
|
#endif // CPPKAFKA_MACROS_H
|
||||||
|
|||||||
@@ -39,10 +39,11 @@
|
|||||||
#include "buffer.h"
|
#include "buffer.h"
|
||||||
#include "macros.h"
|
#include "macros.h"
|
||||||
#include "error.h"
|
#include "error.h"
|
||||||
|
#include "header_list.h"
|
||||||
|
#include "message_timestamp.h"
|
||||||
|
|
||||||
namespace cppkafka {
|
namespace cppkafka {
|
||||||
|
|
||||||
class MessageTimestamp;
|
|
||||||
class Internal;
|
class Internal;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -59,6 +60,10 @@ class CPPKAFKA_API Message {
|
|||||||
public:
|
public:
|
||||||
friend class MessageInternal;
|
friend class MessageInternal;
|
||||||
using InternalPtr = std::shared_ptr<Internal>;
|
using InternalPtr = std::shared_ptr<Internal>;
|
||||||
|
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
|
||||||
|
using HeaderType = Header<Buffer>;
|
||||||
|
using HeaderListType = HeaderList<HeaderType>;
|
||||||
|
#endif
|
||||||
/**
|
/**
|
||||||
* Constructs a message that won't take ownership of the given pointer
|
* Constructs a message that won't take ownership of the given pointer
|
||||||
*/
|
*/
|
||||||
@@ -84,7 +89,7 @@ public:
|
|||||||
Message& operator=(Message&& rhs) = default;
|
Message& operator=(Message&& rhs) = default;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the error attribute
|
* \brief Gets the error attribute
|
||||||
*/
|
*/
|
||||||
Error get_error() const {
|
Error get_error() const {
|
||||||
assert(handle_);
|
assert(handle_);
|
||||||
@@ -92,22 +97,22 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Utility function to check for get_error() == RD_KAFKA_RESP_ERR__PARTITION_EOF
|
* \brief Utility function to check for get_error() == RD_KAFKA_RESP_ERR__PARTITION_EOF
|
||||||
*/
|
*/
|
||||||
bool is_eof() const {
|
bool is_eof() const {
|
||||||
return get_error() == RD_KAFKA_RESP_ERR__PARTITION_EOF;
|
return get_error() == RD_KAFKA_RESP_ERR__PARTITION_EOF;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the topic that this message belongs to
|
* \brief Gets the topic that this message belongs to
|
||||||
*/
|
*/
|
||||||
std::string get_topic() const {
|
std::string get_topic() const {
|
||||||
assert(handle_);
|
assert(handle_);
|
||||||
return rd_kafka_topic_name(handle_->rkt);
|
return handle_->rkt ? rd_kafka_topic_name(handle_->rkt) : std::string{};
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the partition that this message belongs to
|
* \brief Gets the partition that this message belongs to
|
||||||
*/
|
*/
|
||||||
int get_partition() const {
|
int get_partition() const {
|
||||||
assert(handle_);
|
assert(handle_);
|
||||||
@@ -115,21 +120,54 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the message's payload
|
* \brief Gets the message's payload
|
||||||
*/
|
*/
|
||||||
const Buffer& get_payload() const {
|
const Buffer& get_payload() const {
|
||||||
return payload_;
|
return payload_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
|
||||||
/**
|
/**
|
||||||
* Gets the message's key
|
* \brief Sets the message's header list.
|
||||||
|
* \note This calls rd_kafka_message_set_headers.
|
||||||
|
*/
|
||||||
|
void set_header_list(const HeaderListType& headers) {
|
||||||
|
assert(handle_);
|
||||||
|
if (!headers) {
|
||||||
|
return; //nothing to set
|
||||||
|
}
|
||||||
|
rd_kafka_headers_t* handle_copy = rd_kafka_headers_copy(headers.get_handle());
|
||||||
|
rd_kafka_message_set_headers(handle_.get(), handle_copy);
|
||||||
|
header_list_ = HeaderListType::make_non_owning(handle_copy);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Gets the message's header list
|
||||||
|
*/
|
||||||
|
const HeaderListType& get_header_list() const {
|
||||||
|
return header_list_;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Detaches the message's header list
|
||||||
|
*/
|
||||||
|
template <typename HeaderType>
|
||||||
|
HeaderList<HeaderType> detach_header_list() {
|
||||||
|
rd_kafka_headers_t* headers_handle;
|
||||||
|
Error error = rd_kafka_message_detach_headers(handle_.get(), &headers_handle);
|
||||||
|
return error ? HeaderList<HeaderType>() : HeaderList<HeaderType>(headers_handle);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Gets the message's key
|
||||||
*/
|
*/
|
||||||
const Buffer& get_key() const {
|
const Buffer& get_key() const {
|
||||||
return key_;
|
return key_;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the message offset
|
* \brief Gets the message offset
|
||||||
*/
|
*/
|
||||||
int64_t get_offset() const {
|
int64_t get_offset() const {
|
||||||
assert(handle_);
|
assert(handle_);
|
||||||
@@ -151,24 +189,44 @@ public:
|
|||||||
*
|
*
|
||||||
* If calling rd_kafka_message_timestamp returns -1, then boost::none_t will be returned.
|
* If calling rd_kafka_message_timestamp returns -1, then boost::none_t will be returned.
|
||||||
*/
|
*/
|
||||||
inline boost::optional<MessageTimestamp> get_timestamp() const;
|
boost::optional<MessageTimestamp> get_timestamp() const;
|
||||||
|
|
||||||
|
#if RD_KAFKA_VERSION >= RD_KAFKA_MESSAGE_LATENCY_SUPPORT_VERSION
|
||||||
|
/**
|
||||||
|
* \brief Gets the message latency in microseconds as measured from the produce() call.
|
||||||
|
*/
|
||||||
|
std::chrono::microseconds get_latency() const {
|
||||||
|
assert(handle_);
|
||||||
|
return std::chrono::microseconds(rd_kafka_message_latency(handle_.get()));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if (RD_KAFKA_VERSION >= RD_KAFKA_MESSAGE_STATUS_SUPPORT_VERSION)
|
||||||
|
/**
|
||||||
|
* \brief Gets the message persistence status
|
||||||
|
*/
|
||||||
|
rd_kafka_msg_status_t get_status() const {
|
||||||
|
assert(handle_);
|
||||||
|
return rd_kafka_message_status(handle_.get());
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Indicates whether this message is valid (not null)
|
* \brief Indicates whether this message is valid (not null)
|
||||||
*/
|
*/
|
||||||
explicit operator bool() const {
|
explicit operator bool() const {
|
||||||
return handle_ != nullptr;
|
return handle_ != nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the rdkafka message handle
|
* \brief Gets the rdkafka message handle
|
||||||
*/
|
*/
|
||||||
rd_kafka_message_t* get_handle() const {
|
rd_kafka_message_t* get_handle() const {
|
||||||
return handle_.get();
|
return handle_.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Internal private const data accessor (internal use only)
|
* \brief Internal private const data accessor (internal use only)
|
||||||
*/
|
*/
|
||||||
InternalPtr internal() const {
|
InternalPtr internal() const {
|
||||||
return internal_;
|
return internal_;
|
||||||
@@ -185,54 +243,15 @@ private:
|
|||||||
HandlePtr handle_;
|
HandlePtr handle_;
|
||||||
Buffer payload_;
|
Buffer payload_;
|
||||||
Buffer key_;
|
Buffer key_;
|
||||||
|
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
|
||||||
|
HeaderListType header_list_;
|
||||||
|
#endif
|
||||||
void* user_data_;
|
void* user_data_;
|
||||||
InternalPtr internal_;
|
InternalPtr internal_;
|
||||||
};
|
};
|
||||||
|
|
||||||
using MessageList = std::vector<Message>;
|
using MessageList = std::vector<Message>;
|
||||||
|
|
||||||
/**
|
|
||||||
* Represents a message's timestamp
|
|
||||||
*/
|
|
||||||
class CPPKAFKA_API MessageTimestamp {
|
|
||||||
public:
|
|
||||||
/**
|
|
||||||
* The timestamp type
|
|
||||||
*/
|
|
||||||
enum TimestampType {
|
|
||||||
CREATE_TIME = RD_KAFKA_TIMESTAMP_CREATE_TIME,
|
|
||||||
LOG_APPEND_TIME = RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructs a timestamp object
|
|
||||||
*/
|
|
||||||
MessageTimestamp(std::chrono::milliseconds timestamp, TimestampType type);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the timestamp value
|
|
||||||
*/
|
|
||||||
std::chrono::milliseconds get_timestamp() const;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the timestamp type
|
|
||||||
*/
|
|
||||||
TimestampType get_type() const;
|
|
||||||
private:
|
|
||||||
std::chrono::milliseconds timestamp_;
|
|
||||||
TimestampType type_;
|
|
||||||
};
|
|
||||||
|
|
||||||
boost::optional<MessageTimestamp> Message::get_timestamp() const {
|
|
||||||
rd_kafka_timestamp_type_t type = RD_KAFKA_TIMESTAMP_NOT_AVAILABLE;
|
|
||||||
int64_t timestamp = rd_kafka_message_timestamp(handle_.get(), &type);
|
|
||||||
if (timestamp == -1 || type == RD_KAFKA_TIMESTAMP_NOT_AVAILABLE) {
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
return MessageTimestamp(std::chrono::milliseconds(timestamp),
|
|
||||||
static_cast<MessageTimestamp::TimestampType>(type));
|
|
||||||
}
|
|
||||||
|
|
||||||
} // cppkafka
|
} // cppkafka
|
||||||
|
|
||||||
#endif // CPPKAFKA_MESSAGE_H
|
#endif // CPPKAFKA_MESSAGE_H
|
||||||
|
|||||||
@@ -35,6 +35,7 @@
|
|||||||
#include "topic.h"
|
#include "topic.h"
|
||||||
#include "macros.h"
|
#include "macros.h"
|
||||||
#include "message.h"
|
#include "message.h"
|
||||||
|
#include "header_list.h"
|
||||||
|
|
||||||
namespace cppkafka {
|
namespace cppkafka {
|
||||||
|
|
||||||
@@ -44,6 +45,10 @@ namespace cppkafka {
|
|||||||
template <typename BufferType, typename Concrete>
|
template <typename BufferType, typename Concrete>
|
||||||
class BasicMessageBuilder {
|
class BasicMessageBuilder {
|
||||||
public:
|
public:
|
||||||
|
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
|
||||||
|
using HeaderType = Header<BufferType>;
|
||||||
|
using HeaderListType = HeaderList<HeaderType>;
|
||||||
|
#endif
|
||||||
/**
|
/**
|
||||||
* Construct a BasicMessageBuilder
|
* Construct a BasicMessageBuilder
|
||||||
*
|
*
|
||||||
@@ -65,7 +70,12 @@ public:
|
|||||||
*/
|
*/
|
||||||
template <typename OtherBufferType, typename OtherConcrete>
|
template <typename OtherBufferType, typename OtherConcrete>
|
||||||
BasicMessageBuilder(const BasicMessageBuilder<OtherBufferType, OtherConcrete>& rhs);
|
BasicMessageBuilder(const BasicMessageBuilder<OtherBufferType, OtherConcrete>& rhs);
|
||||||
|
template <typename OtherBufferType, typename OtherConcrete>
|
||||||
|
BasicMessageBuilder(BasicMessageBuilder<OtherBufferType, OtherConcrete>&& rhs);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Default copy and move constructors and assignment operators
|
||||||
|
*/
|
||||||
BasicMessageBuilder(BasicMessageBuilder&&) = default;
|
BasicMessageBuilder(BasicMessageBuilder&&) = default;
|
||||||
BasicMessageBuilder(const BasicMessageBuilder&) = default;
|
BasicMessageBuilder(const BasicMessageBuilder&) = default;
|
||||||
BasicMessageBuilder& operator=(BasicMessageBuilder&&) = default;
|
BasicMessageBuilder& operator=(BasicMessageBuilder&&) = default;
|
||||||
@@ -99,6 +109,17 @@ public:
|
|||||||
*/
|
*/
|
||||||
Concrete& key(BufferType&& value);
|
Concrete& key(BufferType&& value);
|
||||||
|
|
||||||
|
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
|
||||||
|
/**
|
||||||
|
* Add a header(s) to the message
|
||||||
|
*
|
||||||
|
* \param header The header to be used
|
||||||
|
*/
|
||||||
|
Concrete& header(const HeaderType& header);
|
||||||
|
Concrete& headers(const HeaderListType& headers);
|
||||||
|
Concrete& headers(HeaderListType&& headers);
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets the message's payload
|
* Sets the message's payload
|
||||||
*
|
*
|
||||||
@@ -114,12 +135,20 @@ public:
|
|||||||
Concrete& payload(BufferType&& value);
|
Concrete& payload(BufferType&& value);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets the message's timestamp
|
* Sets the message's timestamp with a 'duration'
|
||||||
*
|
*
|
||||||
* \param value The timestamp to be used
|
* \param value The timestamp to be used
|
||||||
*/
|
*/
|
||||||
Concrete& timestamp(std::chrono::milliseconds value);
|
Concrete& timestamp(std::chrono::milliseconds value);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the message's timestamp with a 'time_point'.
|
||||||
|
*
|
||||||
|
* \param value The timestamp to be used
|
||||||
|
*/
|
||||||
|
template <typename Clock, typename Duration = typename Clock::duration>
|
||||||
|
Concrete& timestamp(std::chrono::time_point<Clock, Duration> value);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets the message's user data pointer
|
* Sets the message's user data pointer
|
||||||
*
|
*
|
||||||
@@ -147,6 +176,18 @@ public:
|
|||||||
*/
|
*/
|
||||||
BufferType& key();
|
BufferType& key();
|
||||||
|
|
||||||
|
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
|
||||||
|
/**
|
||||||
|
* Gets the list of headers
|
||||||
|
*/
|
||||||
|
const HeaderListType& header_list() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the list of headers
|
||||||
|
*/
|
||||||
|
HeaderListType& header_list();
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the message's payload
|
* Gets the message's payload
|
||||||
*/
|
*/
|
||||||
@@ -158,7 +199,8 @@ public:
|
|||||||
BufferType& payload();
|
BufferType& payload();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the message's timestamp
|
* Gets the message's timestamp as a duration. If the timestamp was created with a 'time_point',
|
||||||
|
* the duration represents the number of milliseconds since epoch.
|
||||||
*/
|
*/
|
||||||
std::chrono::milliseconds timestamp() const;
|
std::chrono::milliseconds timestamp() const;
|
||||||
|
|
||||||
@@ -173,13 +215,18 @@ public:
|
|||||||
Message::InternalPtr internal() const;
|
Message::InternalPtr internal() const;
|
||||||
Concrete& internal(Message::InternalPtr internal);
|
Concrete& internal(Message::InternalPtr internal);
|
||||||
|
|
||||||
private:
|
protected:
|
||||||
void construct_buffer(BufferType& lhs, const BufferType& rhs);
|
void construct_buffer(BufferType& lhs, const BufferType& rhs);
|
||||||
|
|
||||||
|
private:
|
||||||
Concrete& get_concrete();
|
Concrete& get_concrete();
|
||||||
|
|
||||||
std::string topic_;
|
std::string topic_;
|
||||||
int partition_{-1};
|
int partition_{-1};
|
||||||
BufferType key_;
|
BufferType key_;
|
||||||
|
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
|
||||||
|
HeaderListType header_list_;
|
||||||
|
#endif
|
||||||
BufferType payload_;
|
BufferType payload_;
|
||||||
std::chrono::milliseconds timestamp_{0};
|
std::chrono::milliseconds timestamp_{0};
|
||||||
void* user_data_;
|
void* user_data_;
|
||||||
@@ -196,23 +243,51 @@ template <typename T, typename C>
|
|||||||
BasicMessageBuilder<T, C>::BasicMessageBuilder(const Message& message)
|
BasicMessageBuilder<T, C>::BasicMessageBuilder(const Message& message)
|
||||||
: topic_(message.get_topic()),
|
: topic_(message.get_topic()),
|
||||||
key_(Buffer(message.get_key().get_data(), message.get_key().get_size())),
|
key_(Buffer(message.get_key().get_data(), message.get_key().get_size())),
|
||||||
|
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
|
||||||
|
//Here we must copy explicitly the Message headers since they are non-owning and this class
|
||||||
|
//assumes full ownership. Otherwise we will be holding an invalid handle when Message goes
|
||||||
|
//out of scope and rdkafka frees its resource.
|
||||||
|
header_list_(message.get_header_list() ?
|
||||||
|
HeaderListType(rd_kafka_headers_copy(message.get_header_list().get_handle())) : HeaderListType()), //copy headers
|
||||||
|
#endif
|
||||||
payload_(Buffer(message.get_payload().get_data(), message.get_payload().get_size())),
|
payload_(Buffer(message.get_payload().get_data(), message.get_payload().get_size())),
|
||||||
timestamp_(message.get_timestamp() ? message.get_timestamp().get().get_timestamp() :
|
timestamp_(message.get_timestamp() ? message.get_timestamp().get().get_timestamp() :
|
||||||
std::chrono::milliseconds(0)),
|
std::chrono::milliseconds(0)),
|
||||||
user_data_(message.get_user_data()),
|
user_data_(message.get_user_data()),
|
||||||
internal_(message.internal()) {
|
internal_(message.internal()) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T, typename C>
|
template <typename T, typename C>
|
||||||
template <typename U, typename V>
|
template <typename U, typename V>
|
||||||
BasicMessageBuilder<T, C>::BasicMessageBuilder(const BasicMessageBuilder<U, V>& rhs)
|
BasicMessageBuilder<T, C>::BasicMessageBuilder(const BasicMessageBuilder<U, V>& rhs)
|
||||||
: topic_(rhs.topic()), partition_(rhs.partition()), timestamp_(rhs.timestamp()),
|
: topic_(rhs.topic()),
|
||||||
|
partition_(rhs.partition()),
|
||||||
|
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
|
||||||
|
header_list_(rhs.header_list()), //copy headers
|
||||||
|
#endif
|
||||||
|
timestamp_(rhs.timestamp()),
|
||||||
user_data_(rhs.user_data()),
|
user_data_(rhs.user_data()),
|
||||||
internal_(rhs.internal()) {
|
internal_(rhs.internal()) {
|
||||||
get_concrete().construct_buffer(key_, rhs.key());
|
get_concrete().construct_buffer(key_, rhs.key());
|
||||||
get_concrete().construct_buffer(payload_, rhs.payload());
|
get_concrete().construct_buffer(payload_, rhs.payload());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename T, typename C>
|
||||||
|
template <typename U, typename V>
|
||||||
|
BasicMessageBuilder<T, C>::BasicMessageBuilder(BasicMessageBuilder<U, V>&& rhs)
|
||||||
|
: topic_(rhs.topic()),
|
||||||
|
partition_(rhs.partition()),
|
||||||
|
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
|
||||||
|
header_list_(std::move(header_list())), //assume header ownership
|
||||||
|
#endif
|
||||||
|
timestamp_(rhs.timestamp()),
|
||||||
|
user_data_(rhs.user_data()),
|
||||||
|
internal_(rhs.internal()) {
|
||||||
|
get_concrete().construct_buffer(key_, std::move(rhs.key()));
|
||||||
|
get_concrete().construct_buffer(payload_, std::move(rhs.payload()));
|
||||||
|
}
|
||||||
|
|
||||||
template <typename T, typename C>
|
template <typename T, typename C>
|
||||||
C& BasicMessageBuilder<T, C>::topic(std::string value) {
|
C& BasicMessageBuilder<T, C>::topic(std::string value) {
|
||||||
topic_ = std::move(value);
|
topic_ = std::move(value);
|
||||||
@@ -237,6 +312,29 @@ C& BasicMessageBuilder<T, C>::key(T&& value) {
|
|||||||
return get_concrete();
|
return get_concrete();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
|
||||||
|
template <typename T, typename C>
|
||||||
|
C& BasicMessageBuilder<T, C>::header(const HeaderType& header) {
|
||||||
|
if (!header_list_) {
|
||||||
|
header_list_ = HeaderListType(5);
|
||||||
|
}
|
||||||
|
header_list_.add(header);
|
||||||
|
return get_concrete();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename C>
|
||||||
|
C& BasicMessageBuilder<T, C>::headers(const HeaderListType& headers) {
|
||||||
|
header_list_ = headers;
|
||||||
|
return get_concrete();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename C>
|
||||||
|
C& BasicMessageBuilder<T, C>::headers(HeaderListType&& headers) {
|
||||||
|
header_list_ = std::move(headers);
|
||||||
|
return get_concrete();
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
template <typename T, typename C>
|
template <typename T, typename C>
|
||||||
C& BasicMessageBuilder<T, C>::payload(const T& value) {
|
C& BasicMessageBuilder<T, C>::payload(const T& value) {
|
||||||
get_concrete().construct_buffer(payload_, value);
|
get_concrete().construct_buffer(payload_, value);
|
||||||
@@ -255,6 +353,14 @@ C& BasicMessageBuilder<T, C>::timestamp(std::chrono::milliseconds value) {
|
|||||||
return get_concrete();
|
return get_concrete();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename T, typename C>
|
||||||
|
template <typename Clock, typename Duration>
|
||||||
|
C& BasicMessageBuilder<T, C>::timestamp(std::chrono::time_point<Clock, Duration> value)
|
||||||
|
{
|
||||||
|
timestamp_ = std::chrono::duration_cast<std::chrono::milliseconds>(value.time_since_epoch());
|
||||||
|
return get_concrete();
|
||||||
|
}
|
||||||
|
|
||||||
template <typename T, typename C>
|
template <typename T, typename C>
|
||||||
C& BasicMessageBuilder<T, C>::user_data(void* value) {
|
C& BasicMessageBuilder<T, C>::user_data(void* value) {
|
||||||
user_data_ = value;
|
user_data_ = value;
|
||||||
@@ -281,6 +387,20 @@ T& BasicMessageBuilder<T, C>::key() {
|
|||||||
return key_;
|
return key_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
|
||||||
|
template <typename T, typename C>
|
||||||
|
const typename BasicMessageBuilder<T, C>::HeaderListType&
|
||||||
|
BasicMessageBuilder<T, C>::header_list() const {
|
||||||
|
return header_list_;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename C>
|
||||||
|
typename BasicMessageBuilder<T, C>::HeaderListType&
|
||||||
|
BasicMessageBuilder<T, C>::header_list() {
|
||||||
|
return header_list_;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
template <typename T, typename C>
|
template <typename T, typename C>
|
||||||
const T& BasicMessageBuilder<T, C>::payload() const {
|
const T& BasicMessageBuilder<T, C>::payload() const {
|
||||||
return payload_;
|
return payload_;
|
||||||
@@ -338,24 +458,34 @@ C& BasicMessageBuilder<T, C>::get_concrete() {
|
|||||||
*/
|
*/
|
||||||
class MessageBuilder : public BasicMessageBuilder<Buffer, MessageBuilder> {
|
class MessageBuilder : public BasicMessageBuilder<Buffer, MessageBuilder> {
|
||||||
public:
|
public:
|
||||||
using BasicMessageBuilder::BasicMessageBuilder;
|
using Base = BasicMessageBuilder<Buffer, MessageBuilder>;
|
||||||
|
using BasicMessageBuilder<Buffer, MessageBuilder>::BasicMessageBuilder;
|
||||||
|
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
|
||||||
|
using HeaderType = Base::HeaderType;
|
||||||
|
using HeaderListType = Base::HeaderListType;
|
||||||
|
#endif
|
||||||
|
|
||||||
void construct_buffer(Buffer& lhs, const Buffer& rhs) {
|
void construct_buffer(Buffer& lhs, const Buffer& rhs) {
|
||||||
lhs = Buffer(rhs.get_data(), rhs.get_size());
|
lhs = Buffer(rhs.get_data(), rhs.get_size());
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void construct_buffer(Buffer& lhs, const T& rhs) {
|
void construct_buffer(Buffer& lhs, T&& rhs) {
|
||||||
lhs = Buffer(rhs);
|
lhs = Buffer(std::forward<T>(rhs));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
MessageBuilder clone() const {
|
MessageBuilder clone() const {
|
||||||
return std::move(MessageBuilder(topic()).
|
MessageBuilder builder(topic());
|
||||||
key(Buffer(key().get_data(), key().get_size())).
|
builder.key(Buffer(key().get_data(), key().get_size())).
|
||||||
payload(Buffer(payload().get_data(), payload().get_size())).
|
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
|
||||||
timestamp(timestamp()).
|
headers(header_list()).
|
||||||
user_data(user_data()).
|
#endif
|
||||||
internal(internal()));
|
payload(Buffer(payload().get_data(), payload().get_size())).
|
||||||
|
timestamp(timestamp()).
|
||||||
|
user_data(user_data()).
|
||||||
|
internal(internal());
|
||||||
|
return builder;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -365,7 +495,12 @@ public:
|
|||||||
template <typename T>
|
template <typename T>
|
||||||
class ConcreteMessageBuilder : public BasicMessageBuilder<T, ConcreteMessageBuilder<T>> {
|
class ConcreteMessageBuilder : public BasicMessageBuilder<T, ConcreteMessageBuilder<T>> {
|
||||||
public:
|
public:
|
||||||
|
using Base = BasicMessageBuilder<T, ConcreteMessageBuilder<T>>;
|
||||||
using BasicMessageBuilder<T, ConcreteMessageBuilder<T>>::BasicMessageBuilder;
|
using BasicMessageBuilder<T, ConcreteMessageBuilder<T>>::BasicMessageBuilder;
|
||||||
|
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
|
||||||
|
using HeaderType = typename Base::HeaderType;
|
||||||
|
using HeaderListType = typename Base::HeaderListType;
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
} // cppkafka
|
} // cppkafka
|
||||||
|
|||||||
@@ -31,6 +31,7 @@
|
|||||||
#define CPPKAFKA_MESSAGE_INTERNAL_H
|
#define CPPKAFKA_MESSAGE_INTERNAL_H
|
||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
#include "macros.h"
|
||||||
|
|
||||||
namespace cppkafka {
|
namespace cppkafka {
|
||||||
|
|
||||||
@@ -45,7 +46,7 @@ using InternalPtr = std::shared_ptr<Internal>;
|
|||||||
/**
|
/**
|
||||||
* \brief Private message data structure
|
* \brief Private message data structure
|
||||||
*/
|
*/
|
||||||
class MessageInternal {
|
class CPPKAFKA_API MessageInternal {
|
||||||
public:
|
public:
|
||||||
MessageInternal(void* user_data, std::shared_ptr<Internal> internal);
|
MessageInternal(void* user_data, std::shared_ptr<Internal> internal);
|
||||||
static std::unique_ptr<MessageInternal> load(Message& message);
|
static std::unique_ptr<MessageInternal> load(Message& message);
|
||||||
|
|||||||
72
include/cppkafka/message_timestamp.h
Normal file
72
include/cppkafka/message_timestamp.h
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2017, Matias Fontanini
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CPPKAFKA_MESSAGE_TIMESTAMP_H
|
||||||
|
#define CPPKAFKA_MESSAGE_TIMESTAMP_H
|
||||||
|
|
||||||
|
#include <chrono>
|
||||||
|
#include <librdkafka/rdkafka.h>
|
||||||
|
#include "macros.h"
|
||||||
|
|
||||||
|
namespace cppkafka {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Represents a message's timestamp
|
||||||
|
*/
|
||||||
|
class CPPKAFKA_API MessageTimestamp {
|
||||||
|
friend class Message;
|
||||||
|
public:
|
||||||
|
/**
|
||||||
|
* The timestamp type
|
||||||
|
*/
|
||||||
|
enum TimestampType {
|
||||||
|
CREATE_TIME = RD_KAFKA_TIMESTAMP_CREATE_TIME,
|
||||||
|
LOG_APPEND_TIME = RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the timestamp value. If the timestamp was created with a 'time_point',
|
||||||
|
* the duration represents the number of milliseconds since epoch.
|
||||||
|
*/
|
||||||
|
std::chrono::milliseconds get_timestamp() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the timestamp type
|
||||||
|
*/
|
||||||
|
TimestampType get_type() const;
|
||||||
|
private:
|
||||||
|
MessageTimestamp(std::chrono::milliseconds timestamp, TimestampType type);
|
||||||
|
|
||||||
|
std::chrono::milliseconds timestamp_;
|
||||||
|
TimestampType type_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // cppkafka
|
||||||
|
|
||||||
|
#endif //CPPKAFKA_MESSAGE_TIMESTAMP_H
|
||||||
@@ -74,6 +74,9 @@ class Message;
|
|||||||
* // Write using a key on a fixed partition (42)
|
* // Write using a key on a fixed partition (42)
|
||||||
* producer.produce(MessageBuilder("some_topic").partition(42).key(key).payload(payload));
|
* producer.produce(MessageBuilder("some_topic").partition(42).key(key).payload(payload));
|
||||||
*
|
*
|
||||||
|
* // Flush the produced messages
|
||||||
|
* producer.flush();
|
||||||
|
*
|
||||||
* \endcode
|
* \endcode
|
||||||
*/
|
*/
|
||||||
class CPPKAFKA_API Producer : public KafkaHandleBase {
|
class CPPKAFKA_API Producer : public KafkaHandleBase {
|
||||||
@@ -83,9 +86,10 @@ public:
|
|||||||
* The policy to use for the payload. The default policy is COPY_PAYLOAD
|
* The policy to use for the payload. The default policy is COPY_PAYLOAD
|
||||||
*/
|
*/
|
||||||
enum class PayloadPolicy {
|
enum class PayloadPolicy {
|
||||||
PASSTHROUGH_PAYLOAD = 0, ///< Rdkafka will not copy nor free the payload.
|
PASSTHROUGH_PAYLOAD = 0, ///< Rdkafka will not copy nor free the payload.
|
||||||
COPY_PAYLOAD = RD_KAFKA_MSG_F_COPY, ///< Means RD_KAFKA_MSG_F_COPY
|
COPY_PAYLOAD = RD_KAFKA_MSG_F_COPY, ///< Means RD_KAFKA_MSG_F_COPY
|
||||||
FREE_PAYLOAD = RD_KAFKA_MSG_F_FREE ///< Means RD_KAFKA_MSG_F_FREE
|
FREE_PAYLOAD = RD_KAFKA_MSG_F_FREE, ///< Means RD_KAFKA_MSG_F_FREE
|
||||||
|
BLOCK_ON_FULL_QUEUE = RD_KAFKA_MSG_F_BLOCK ///< Producer will block if the underlying queue is full
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -113,6 +117,7 @@ public:
|
|||||||
* \param builder The builder class used to compose a message
|
* \param builder The builder class used to compose a message
|
||||||
*/
|
*/
|
||||||
void produce(const MessageBuilder& builder);
|
void produce(const MessageBuilder& builder);
|
||||||
|
void produce(MessageBuilder&& builder);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Produces a message
|
* \brief Produces a message
|
||||||
@@ -120,6 +125,7 @@ public:
|
|||||||
* \param message The message to be produced
|
* \param message The message to be produced
|
||||||
*/
|
*/
|
||||||
void produce(const Message& message);
|
void produce(const Message& message);
|
||||||
|
void produce(Message&& message);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Polls on this handle
|
* \brief Polls on this handle
|
||||||
@@ -157,6 +163,15 @@ public:
|
|||||||
*/
|
*/
|
||||||
void flush(std::chrono::milliseconds timeout);
|
void flush(std::chrono::milliseconds timeout);
|
||||||
private:
|
private:
|
||||||
|
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
|
||||||
|
void do_produce(const MessageBuilder& builder, MessageBuilder::HeaderListType&& headers);
|
||||||
|
void do_produce(const Message& message, MessageBuilder::HeaderListType&& headers);
|
||||||
|
#else
|
||||||
|
void do_produce(const MessageBuilder& builder);
|
||||||
|
void do_produce(const Message& message);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Members
|
||||||
PayloadPolicy message_payload_policy_;
|
PayloadPolicy message_payload_policy_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -29,8 +29,8 @@
|
|||||||
|
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <boost/optional.hpp>
|
|
||||||
#include <librdkafka/rdkafka.h>
|
#include <librdkafka/rdkafka.h>
|
||||||
|
#include "event.h"
|
||||||
#include "macros.h"
|
#include "macros.h"
|
||||||
#include "message.h"
|
#include "message.h"
|
||||||
|
|
||||||
@@ -52,6 +52,17 @@ public:
|
|||||||
*/
|
*/
|
||||||
static Queue make_non_owning(rd_kafka_queue_t* handle);
|
static Queue make_non_owning(rd_kafka_queue_t* handle);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brieef Creates a Queue object out of a handle.
|
||||||
|
*
|
||||||
|
* This will check what the rdkafka version is and will return either an owned
|
||||||
|
* queue handle or a non owned one, depending on whether the current version
|
||||||
|
* is >= RD_KAFKA_QUEUE_REFCOUNT_BUG_VERSION (see macros.h)
|
||||||
|
*
|
||||||
|
* \param handle The handle to be used
|
||||||
|
*/
|
||||||
|
static Queue make_queue(rd_kafka_queue_t* handle);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Constructs an empty queue
|
* \brief Constructs an empty queue
|
||||||
*
|
*
|
||||||
@@ -134,26 +145,76 @@ public:
|
|||||||
/**
|
/**
|
||||||
* \brief Consumes a batch of messages from this queue
|
* \brief Consumes a batch of messages from this queue
|
||||||
*
|
*
|
||||||
* This translates to a call to rd_kafka_consume_batch_queue using the configured timeout for this object
|
* This translates to a call to rd_kafka_consume_batch_queue using the configured timeout
|
||||||
|
* for this object
|
||||||
*
|
*
|
||||||
* \param max_batch_size The max number of messages to consume if available
|
* \param max_batch_size The max number of messages to consume if available
|
||||||
|
* \param alloc The optionally supplied allocator for the message list
|
||||||
*
|
*
|
||||||
* \return A list of messages. Could be empty if there's nothing to consume
|
* \return A list of messages. Could be empty if there's nothing to consume
|
||||||
*/
|
*/
|
||||||
MessageList consume_batch(size_t max_batch_size) const;
|
template <typename Allocator>
|
||||||
|
std::vector<Message, Allocator> consume_batch(size_t max_batch_size,
|
||||||
|
const Allocator& alloc) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Consumes a batch of messages from this queue
|
* \brief Consumes a batch of messages from this queue
|
||||||
*
|
*
|
||||||
* Same as Queue::consume_batch(size_t) but the specified timeout will be used instead of the configured one
|
* This translates to a call to rd_kafka_consume_batch_queue using the configured timeout
|
||||||
|
* for this object
|
||||||
*
|
*
|
||||||
* \param max_batch_size The max number of messages to consume if available
|
* \param max_batch_size The max number of messages to consume if available
|
||||||
*
|
*
|
||||||
|
* \return A list of messages. Could be empty if there's nothing to consume
|
||||||
|
*/
|
||||||
|
std::vector<Message> consume_batch(size_t max_batch_size) const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Consumes a batch of messages from this queue
|
||||||
|
*
|
||||||
|
* Same as Queue::consume_batch(size_t) but the specified timeout will be used instead of the
|
||||||
|
* configured one
|
||||||
|
*
|
||||||
|
* \param max_batch_size The max number of messages to consume if available
|
||||||
|
* \param timeout The timeout to be used on this call
|
||||||
|
* \param alloc The optionally supplied allocator for the message list
|
||||||
|
*
|
||||||
|
* \return A list of messages. Could be empty if there's nothing to consume
|
||||||
|
*/
|
||||||
|
template <typename Allocator>
|
||||||
|
std::vector<Message, Allocator> consume_batch(size_t max_batch_size,
|
||||||
|
std::chrono::milliseconds timeout,
|
||||||
|
const Allocator& alloc) const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Consumes a batch of messages from this queue
|
||||||
|
*
|
||||||
|
* Same as Queue::consume_batch(size_t) but the specified timeout will be used instead of the
|
||||||
|
* configured one
|
||||||
|
*
|
||||||
|
* \param max_batch_size The max number of messages to consume if available
|
||||||
* \param timeout The timeout to be used on this call
|
* \param timeout The timeout to be used on this call
|
||||||
*
|
*
|
||||||
* \return A list of messages. Could be empty if there's nothing to consume
|
* \return A list of messages. Could be empty if there's nothing to consume
|
||||||
*/
|
*/
|
||||||
MessageList consume_batch(size_t max_batch_size, std::chrono::milliseconds timeout) const;
|
std::vector<Message> consume_batch(size_t max_batch_size,
|
||||||
|
std::chrono::milliseconds timeout) const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Extracts the next message in this Queue
|
||||||
|
*
|
||||||
|
* /return The latest event, if any
|
||||||
|
*/
|
||||||
|
Event next_event() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Extracts the next message in this Queue
|
||||||
|
*
|
||||||
|
* \param timeout The amount of time to wait for this operation to complete
|
||||||
|
*
|
||||||
|
* /return The latest event, if any
|
||||||
|
*/
|
||||||
|
Event next_event(std::chrono::milliseconds timeout) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Indicates whether this queue is valid (not null)
|
* Indicates whether this queue is valid (not null)
|
||||||
@@ -178,6 +239,32 @@ private:
|
|||||||
|
|
||||||
using QueueList = std::vector<Queue>;
|
using QueueList = std::vector<Queue>;
|
||||||
|
|
||||||
|
template <typename Allocator>
|
||||||
|
std::vector<Message, Allocator> Queue::consume_batch(size_t max_batch_size,
|
||||||
|
const Allocator& alloc) const {
|
||||||
|
return consume_batch(max_batch_size, timeout_ms_, alloc);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Allocator>
|
||||||
|
std::vector<Message, Allocator> Queue::consume_batch(size_t max_batch_size,
|
||||||
|
std::chrono::milliseconds timeout,
|
||||||
|
const Allocator& alloc) const {
|
||||||
|
std::vector<rd_kafka_message_t*> raw_messages(max_batch_size);
|
||||||
|
ssize_t result = rd_kafka_consume_batch_queue(handle_.get(),
|
||||||
|
static_cast<int>(timeout.count()),
|
||||||
|
raw_messages.data(),
|
||||||
|
raw_messages.size());
|
||||||
|
if (result == -1) {
|
||||||
|
rd_kafka_resp_err_t error = rd_kafka_last_error();
|
||||||
|
if (error != RD_KAFKA_RESP_ERR_NO_ERROR) {
|
||||||
|
throw QueueException(error);
|
||||||
|
}
|
||||||
|
return std::vector<Message, Allocator>(alloc);
|
||||||
|
}
|
||||||
|
// Build message list
|
||||||
|
return std::vector<Message, Allocator>(raw_messages.begin(), raw_messages.begin() + result, alloc);
|
||||||
|
}
|
||||||
|
|
||||||
} // cppkafka
|
} // cppkafka
|
||||||
|
|
||||||
#endif //CPPKAFKA_QUEUE_H
|
#endif //CPPKAFKA_QUEUE_H
|
||||||
|
|||||||
@@ -32,7 +32,6 @@
|
|||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <boost/optional.hpp>
|
|
||||||
#include <librdkafka/rdkafka.h>
|
#include <librdkafka/rdkafka.h>
|
||||||
#include "macros.h"
|
#include "macros.h"
|
||||||
|
|
||||||
|
|||||||
@@ -107,6 +107,11 @@ public:
|
|||||||
*/
|
*/
|
||||||
int64_t get_offset() const;
|
int64_t get_offset() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Sets the partition
|
||||||
|
*/
|
||||||
|
void set_partition(int partition);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets the offset
|
* Sets the offset
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -37,6 +37,7 @@
|
|||||||
#include "../consumer.h"
|
#include "../consumer.h"
|
||||||
#include "backoff_performer.h"
|
#include "backoff_performer.h"
|
||||||
#include "../detail/callback_invoker.h"
|
#include "../detail/callback_invoker.h"
|
||||||
|
#include "../macros.h"
|
||||||
|
|
||||||
namespace cppkafka {
|
namespace cppkafka {
|
||||||
|
|
||||||
@@ -71,7 +72,7 @@ namespace cppkafka {
|
|||||||
* committer.commit(some_message);
|
* committer.commit(some_message);
|
||||||
* \endcode
|
* \endcode
|
||||||
*/
|
*/
|
||||||
class BackoffCommitter : public BackoffPerformer {
|
class CPPKAFKA_API BackoffCommitter : public BackoffPerformer {
|
||||||
public:
|
public:
|
||||||
/**
|
/**
|
||||||
* \brief The error callback.
|
* \brief The error callback.
|
||||||
@@ -99,10 +100,18 @@ public:
|
|||||||
*/
|
*/
|
||||||
void set_error_callback(ErrorCallback callback);
|
void set_error_callback(ErrorCallback callback);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Commits the current partition assignment synchronously
|
||||||
|
*
|
||||||
|
* This will call Consumer::commit() until either the message is successfully
|
||||||
|
* committed or the error callback returns false (if any is set).
|
||||||
|
*/
|
||||||
|
void commit();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Commits the given message synchronously
|
* \brief Commits the given message synchronously
|
||||||
*
|
*
|
||||||
* This will call Consumer::commit until either the message is successfully
|
* This will call Consumer::commit(msg) until either the message is successfully
|
||||||
* committed or the error callback returns false (if any is set).
|
* committed or the error callback returns false (if any is set).
|
||||||
*
|
*
|
||||||
* \param msg The message to be committed
|
* \param msg The message to be committed
|
||||||
@@ -112,7 +121,7 @@ public:
|
|||||||
/**
|
/**
|
||||||
* \brief Commits the offsets on the given topic/partitions synchronously
|
* \brief Commits the offsets on the given topic/partitions synchronously
|
||||||
*
|
*
|
||||||
* This will call Consumer::commit until either the offsets are successfully
|
* This will call Consumer::commit(topic_partitions) until either the offsets are successfully
|
||||||
* committed or the error callback returns false (if any is set).
|
* committed or the error callback returns false (if any is set).
|
||||||
*
|
*
|
||||||
* \param topic_partitions The topic/partition list to be committed
|
* \param topic_partitions The topic/partition list to be committed
|
||||||
@@ -126,25 +135,30 @@ public:
|
|||||||
*/
|
*/
|
||||||
Consumer& get_consumer();
|
Consumer& get_consumer();
|
||||||
private:
|
private:
|
||||||
// Return true to abort and false to continue committing
|
// If the ReturnType contains 'true', we abort committing. Otherwise we continue.
|
||||||
template <typename T>
|
// The second member of the ReturnType contains the RdKafka error if any.
|
||||||
bool do_commit(const T& object) {
|
template <typename...Args>
|
||||||
|
bool do_commit(Args&&...args) {
|
||||||
try {
|
try {
|
||||||
consumer_.commit(object);
|
consumer_.commit(std::forward<Args>(args)...);
|
||||||
// If the commit succeeds, we're done
|
// If the commit succeeds, we're done.
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
catch (const HandleException& ex) {
|
catch (const HandleException& ex) {
|
||||||
|
Error error = ex.get_error();
|
||||||
// If there were actually no offsets to commit, return. Retrying won't solve
|
// If there were actually no offsets to commit, return. Retrying won't solve
|
||||||
// anything here
|
// anything here.
|
||||||
if (ex.get_error() == RD_KAFKA_RESP_ERR__NO_OFFSET) {
|
if (error == RD_KAFKA_RESP_ERR__NO_OFFSET) {
|
||||||
return true;
|
return true; //not considered an error.
|
||||||
}
|
}
|
||||||
// If there's a callback and it returns false for this message, abort.
|
// If there's a callback and it returns false for this message, abort.
|
||||||
// Otherwise keep committing.
|
// Otherwise keep committing.
|
||||||
CallbackInvoker<ErrorCallback> callback("backoff committer", callback_, &consumer_);
|
CallbackInvoker<ErrorCallback> callback("backoff committer", callback_, &consumer_);
|
||||||
return callback && !callback(ex.get_error());
|
if (callback && !callback(error)) {
|
||||||
|
throw ex; //abort
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
return false; //continue
|
||||||
}
|
}
|
||||||
|
|
||||||
Consumer& consumer_;
|
Consumer& consumer_;
|
||||||
|
|||||||
@@ -34,6 +34,7 @@
|
|||||||
#include <functional>
|
#include <functional>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
#include "../consumer.h"
|
#include "../consumer.h"
|
||||||
|
#include "../exceptions.h"
|
||||||
|
|
||||||
namespace cppkafka {
|
namespace cppkafka {
|
||||||
|
|
||||||
@@ -123,7 +124,7 @@ public:
|
|||||||
auto start = std::chrono::steady_clock::now();
|
auto start = std::chrono::steady_clock::now();
|
||||||
// If the callback returns true, we're done
|
// If the callback returns true, we're done
|
||||||
if (callback()) {
|
if (callback()) {
|
||||||
return;
|
return; //success
|
||||||
}
|
}
|
||||||
auto end = std::chrono::steady_clock::now();
|
auto end = std::chrono::steady_clock::now();
|
||||||
auto time_elapsed = end - start;
|
auto time_elapsed = end - start;
|
||||||
@@ -134,6 +135,8 @@ public:
|
|||||||
// Increase out backoff depending on the policy being used
|
// Increase out backoff depending on the policy being used
|
||||||
backoff = increase_backoff(backoff);
|
backoff = increase_backoff(backoff);
|
||||||
}
|
}
|
||||||
|
// No more retries left or we have a terminal error.
|
||||||
|
throw ActionTerminatedException("Commit failed: no more retries.");
|
||||||
}
|
}
|
||||||
private:
|
private:
|
||||||
TimeUnit increase_backoff(TimeUnit backoff);
|
TimeUnit increase_backoff(TimeUnit backoff);
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -37,13 +37,14 @@
|
|||||||
#include <boost/optional.hpp>
|
#include <boost/optional.hpp>
|
||||||
#include "../buffer.h"
|
#include "../buffer.h"
|
||||||
#include "../consumer.h"
|
#include "../consumer.h"
|
||||||
|
#include "../macros.h"
|
||||||
|
|
||||||
namespace cppkafka {
|
namespace cppkafka {
|
||||||
/**
|
/**
|
||||||
* \brief Events generated by a CompactedTopicProcessor
|
* \brief Events generated by a CompactedTopicProcessor
|
||||||
*/
|
*/
|
||||||
template <typename Key, typename Value>
|
template <typename Key, typename Value>
|
||||||
class CPPKAFKA_API CompactedTopicEvent {
|
class CompactedTopicEvent {
|
||||||
public:
|
public:
|
||||||
/**
|
/**
|
||||||
* \brief Event type enum
|
* \brief Event type enum
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ namespace cppkafka {
|
|||||||
* * EOF: void(BasicConsumerDispatcher::EndOfFile, TopicPartition)
|
* * EOF: void(BasicConsumerDispatcher::EndOfFile, TopicPartition)
|
||||||
*/
|
*/
|
||||||
template <typename ConsumerType>
|
template <typename ConsumerType>
|
||||||
class CPPKAFKA_API BasicConsumerDispatcher {
|
class BasicConsumerDispatcher {
|
||||||
public:
|
public:
|
||||||
/**
|
/**
|
||||||
* Tag to indicate a timeout occurred
|
* Tag to indicate a timeout occurred
|
||||||
|
|||||||
@@ -108,7 +108,7 @@ struct PollInterface {
|
|||||||
* otherwise the broker will think this consumer is down and will trigger a rebalance
|
* otherwise the broker will think this consumer is down and will trigger a rebalance
|
||||||
* (if using dynamic subscription)
|
* (if using dynamic subscription)
|
||||||
*/
|
*/
|
||||||
virtual MessageList poll_batch(size_t max_batch_size) = 0;
|
virtual std::vector<Message> poll_batch(size_t max_batch_size) = 0;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \brief Polls all assigned partitions for a batch of new messages in round-robin fashion
|
* \brief Polls all assigned partitions for a batch of new messages in round-robin fashion
|
||||||
@@ -122,7 +122,7 @@ struct PollInterface {
|
|||||||
*
|
*
|
||||||
* \return A list of messages
|
* \return A list of messages
|
||||||
*/
|
*/
|
||||||
virtual MessageList poll_batch(size_t max_batch_size, std::chrono::milliseconds timeout) = 0;
|
virtual std::vector<Message> poll_batch(size_t max_batch_size, std::chrono::milliseconds timeout) = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
} //cppkafka
|
} //cppkafka
|
||||||
|
|||||||
@@ -35,6 +35,7 @@
|
|||||||
#include "../queue.h"
|
#include "../queue.h"
|
||||||
#include "../topic_partition_list.h"
|
#include "../topic_partition_list.h"
|
||||||
#include "poll_interface.h"
|
#include "poll_interface.h"
|
||||||
|
#include "../macros.h"
|
||||||
|
|
||||||
namespace cppkafka {
|
namespace cppkafka {
|
||||||
|
|
||||||
@@ -52,7 +53,7 @@ struct QueueData {
|
|||||||
*
|
*
|
||||||
* \brief Base implementation of the PollInterface
|
* \brief Base implementation of the PollInterface
|
||||||
*/
|
*/
|
||||||
class PollStrategyBase : public PollInterface {
|
class CPPKAFKA_API PollStrategyBase : public PollInterface {
|
||||||
public:
|
public:
|
||||||
using QueueMap = std::map<TopicPartition, QueueData>;
|
using QueueMap = std::map<TopicPartition, QueueData>;
|
||||||
|
|
||||||
@@ -83,6 +84,36 @@ public:
|
|||||||
*/
|
*/
|
||||||
Consumer& get_consumer() final;
|
Consumer& get_consumer() final;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Creates partitions queues associated with the supplied partitions.
|
||||||
|
*
|
||||||
|
* This method contains a default implementation. It adds all the new queues belonging
|
||||||
|
* to the provided partition list and calls reset_state().
|
||||||
|
* To be used with static consumers.
|
||||||
|
*
|
||||||
|
* \param partitions Assigned topic partitions.
|
||||||
|
*/
|
||||||
|
virtual void assign(TopicPartitionList& partitions);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Removes partitions queues associated with the supplied partitions.
|
||||||
|
*
|
||||||
|
* This method contains a default implementation. It removes all the queues
|
||||||
|
* belonging to the provided partition list and calls reset_state().
|
||||||
|
* To be used with static consumers.
|
||||||
|
*
|
||||||
|
* \param partitions Revoked topic partitions.
|
||||||
|
*/
|
||||||
|
virtual void revoke(const TopicPartitionList& partitions);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Removes all partitions queues associated with the supplied partitions.
|
||||||
|
*
|
||||||
|
* This method contains a default implementation. It removes all the queues
|
||||||
|
* currently assigned and calls reset_state(). To be used with static consumers.
|
||||||
|
*/
|
||||||
|
virtual void revoke();
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
/**
|
/**
|
||||||
* \brief Get the queues from all assigned partitions
|
* \brief Get the queues from all assigned partitions
|
||||||
@@ -110,8 +141,8 @@ protected:
|
|||||||
/**
|
/**
|
||||||
* \brief Function to be called when a new partition assignment takes place
|
* \brief Function to be called when a new partition assignment takes place
|
||||||
*
|
*
|
||||||
* This method contains a default implementation. It adds all the new queues belonging
|
* This method contains a default implementation. It calls assign()
|
||||||
* to the provided partition list and calls reset_state().
|
* and invokes the user assignment callback.
|
||||||
*
|
*
|
||||||
* \param partitions Assigned topic partitions
|
* \param partitions Assigned topic partitions
|
||||||
*/
|
*/
|
||||||
@@ -120,8 +151,8 @@ protected:
|
|||||||
/**
|
/**
|
||||||
* \brief Function to be called when an old partition assignment gets revoked
|
* \brief Function to be called when an old partition assignment gets revoked
|
||||||
*
|
*
|
||||||
* This method contains a default implementation. It removes all the queues
|
* This method contains a default implementation. It calls revoke()
|
||||||
* belonging to the provided partition list and calls reset_state().
|
* and invokes the user revocation callback.
|
||||||
*
|
*
|
||||||
* \param partitions Revoked topic partitions
|
* \param partitions Revoked topic partitions
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -102,13 +102,20 @@ public:
|
|||||||
/**
|
/**
|
||||||
* \sa PollInterface::poll_batch
|
* \sa PollInterface::poll_batch
|
||||||
*/
|
*/
|
||||||
MessageList poll_batch(size_t max_batch_size) override;
|
template <typename Allocator>
|
||||||
|
std::vector<Message, Allocator> poll_batch(size_t max_batch_size,
|
||||||
|
const Allocator& alloc);
|
||||||
|
std::vector<Message> poll_batch(size_t max_batch_size) override;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \sa PollInterface::poll_batch
|
* \sa PollInterface::poll_batch
|
||||||
*/
|
*/
|
||||||
MessageList poll_batch(size_t max_batch_size,
|
template <typename Allocator>
|
||||||
std::chrono::milliseconds timeout) override;
|
std::vector<Message, Allocator> poll_batch(size_t max_batch_size,
|
||||||
|
std::chrono::milliseconds timeout,
|
||||||
|
const Allocator& alloc);
|
||||||
|
std::vector<Message> poll_batch(size_t max_batch_size,
|
||||||
|
std::chrono::milliseconds timeout) override;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
/**
|
/**
|
||||||
@@ -119,10 +126,12 @@ protected:
|
|||||||
QueueData& get_next_queue();
|
QueueData& get_next_queue();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
template <typename Allocator>
|
||||||
void consume_batch(Queue& queue,
|
void consume_batch(Queue& queue,
|
||||||
MessageList& messages,
|
std::vector<Message, Allocator>& messages,
|
||||||
ssize_t& count,
|
ssize_t& count,
|
||||||
std::chrono::milliseconds timeout);
|
std::chrono::milliseconds timeout,
|
||||||
|
const Allocator& alloc);
|
||||||
|
|
||||||
void restore_forwarding();
|
void restore_forwarding();
|
||||||
|
|
||||||
@@ -130,6 +139,53 @@ private:
|
|||||||
QueueMap::iterator queue_iter_;
|
QueueMap::iterator queue_iter_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Implementations
|
||||||
|
template <typename Allocator>
|
||||||
|
std::vector<Message, Allocator> RoundRobinPollStrategy::poll_batch(size_t max_batch_size,
|
||||||
|
const Allocator& alloc) {
|
||||||
|
return poll_batch(max_batch_size, get_consumer().get_timeout(), alloc);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Allocator>
|
||||||
|
std::vector<Message, Allocator> RoundRobinPollStrategy::poll_batch(size_t max_batch_size,
|
||||||
|
std::chrono::milliseconds timeout,
|
||||||
|
const Allocator& alloc) {
|
||||||
|
std::vector<Message, Allocator> messages(alloc);
|
||||||
|
ssize_t count = max_batch_size;
|
||||||
|
|
||||||
|
// batch from the group event queue first (non-blocking)
|
||||||
|
consume_batch(get_consumer_queue().queue, messages, count, std::chrono::milliseconds(0), alloc);
|
||||||
|
size_t num_queues = get_partition_queues().size();
|
||||||
|
while ((count > 0) && (num_queues--)) {
|
||||||
|
// batch from the next partition (non-blocking)
|
||||||
|
consume_batch(get_next_queue().queue, messages, count, std::chrono::milliseconds(0), alloc);
|
||||||
|
}
|
||||||
|
// we still have space left in the buffer
|
||||||
|
if (count > 0) {
|
||||||
|
// wait on the event queue until timeout
|
||||||
|
consume_batch(get_consumer_queue().queue, messages, count, timeout, alloc);
|
||||||
|
}
|
||||||
|
return messages;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Allocator>
|
||||||
|
void RoundRobinPollStrategy::consume_batch(Queue& queue,
|
||||||
|
std::vector<Message, Allocator>& messages,
|
||||||
|
ssize_t& count,
|
||||||
|
std::chrono::milliseconds timeout,
|
||||||
|
const Allocator& alloc) {
|
||||||
|
std::vector<Message, Allocator> queue_messages = queue.consume_batch(count, timeout, alloc);
|
||||||
|
if (queue_messages.empty()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// concatenate both lists
|
||||||
|
messages.insert(messages.end(),
|
||||||
|
make_move_iterator(queue_messages.begin()),
|
||||||
|
make_move_iterator(queue_messages.end()));
|
||||||
|
// reduce total batch count
|
||||||
|
count -= queue_messages.size();
|
||||||
|
}
|
||||||
|
|
||||||
} //cppkafka
|
} //cppkafka
|
||||||
|
|
||||||
#endif //CPPKAFKA_ROUNDROBIN_POLL_STRATEGY_H
|
#endif //CPPKAFKA_ROUNDROBIN_POLL_STRATEGY_H
|
||||||
|
|||||||
@@ -7,12 +7,14 @@ set(SOURCES
|
|||||||
buffer.cpp
|
buffer.cpp
|
||||||
queue.cpp
|
queue.cpp
|
||||||
message.cpp
|
message.cpp
|
||||||
|
message_timestamp.cpp
|
||||||
message_internal.cpp
|
message_internal.cpp
|
||||||
topic_partition.cpp
|
topic_partition.cpp
|
||||||
topic_partition_list.cpp
|
topic_partition_list.cpp
|
||||||
metadata.cpp
|
metadata.cpp
|
||||||
group_information.cpp
|
group_information.cpp
|
||||||
error.cpp
|
error.cpp
|
||||||
|
event.cpp
|
||||||
|
|
||||||
kafka_handle_base.cpp
|
kafka_handle_base.cpp
|
||||||
producer.cpp
|
producer.cpp
|
||||||
@@ -24,24 +26,83 @@ set(SOURCES
|
|||||||
utils/roundrobin_poll_strategy.cpp
|
utils/roundrobin_poll_strategy.cpp
|
||||||
)
|
)
|
||||||
|
|
||||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include/cppkafka)
|
set(TARGET_NAME cppkafka)
|
||||||
include_directories(SYSTEM ${Boost_INCLUDE_DIRS} ${RDKAFKA_INCLUDE_DIR})
|
set(PKG_DIR "${CMAKE_BINARY_DIR}/package")
|
||||||
|
set(PKG_CONFIG_FILE "${PKG_DIR}/${TARGET_NAME}.pc")
|
||||||
|
set(CONFIG_FILE "${PKG_DIR}/${PROJECT_NAME}Config.cmake")
|
||||||
|
set(VERSION_FILE "${PKG_DIR}/${PROJECT_NAME}ConfigVersion.cmake")
|
||||||
|
set(FIND_RDKAFKA_FILE "${PROJECT_SOURCE_DIR}/cmake/FindRdKafka.cmake")
|
||||||
|
set(NAMESPACE "${PROJECT_NAME}::")
|
||||||
|
set(TARGET_EXPORT_NAME ${PROJECT_NAME}Targets)
|
||||||
|
|
||||||
add_library(cppkafka ${CPPKAFKA_LIBRARY_TYPE} ${SOURCES})
|
add_library(${TARGET_NAME} ${CPPKAFKA_LIBRARY_TYPE} ${SOURCES})
|
||||||
set_target_properties(cppkafka PROPERTIES VERSION ${CPPKAFKA_VERSION}
|
target_compile_features(${TARGET_NAME} PUBLIC cxx_std_11)
|
||||||
SOVERSION ${CPPKAFKA_VERSION})
|
target_include_directories(${TARGET_NAME} PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../include/cppkafka>)
|
||||||
|
set_target_properties(${TARGET_NAME} PROPERTIES
|
||||||
set(DEPENDENCIES ${RDKAFKA_LIBRARY})
|
ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_INSTALL_LIBDIR}"
|
||||||
|
ARCHIVE_OUTPUT_NAME "${TARGET_NAME}"
|
||||||
|
LIBRARY_OUTPUT_DIRECTORY "${CMAKE_INSTALL_LIBDIR}"
|
||||||
|
LIBRARY_OUTPUT_NAME "${TARGET_NAME}"
|
||||||
|
INSTALL_RPATH "${CMAKE_INSTALL_LIBDIR}"
|
||||||
|
INSTALL_RPATH_USE_LINK_PATH TRUE
|
||||||
|
VERSION ${CPPKAFKA_VERSION}
|
||||||
|
SOVERSION ${CPPKAFKA_VERSION})
|
||||||
|
# In CMake >= 3.15 Boost::boost == Boost::headers
|
||||||
|
target_link_libraries(${TARGET_NAME} PUBLIC RdKafka::rdkafka Boost::boost)
|
||||||
if (WIN32)
|
if (WIN32)
|
||||||
# On windows ntohs and related are in ws2_32
|
# On windows ntohs and related are in ws2_32
|
||||||
set(DEPENDENCIES ${DEPENDENCIES} ws2_32.lib)
|
target_link_libraries(${TARGET_NAME} PUBLIC ws2_32.lib)
|
||||||
endif()
|
endif()
|
||||||
target_link_libraries(cppkafka ${DEPENDENCIES})
|
|
||||||
target_include_directories(cppkafka PUBLIC ${PROJECT_SOURCE_DIR}/include)
|
|
||||||
|
|
||||||
|
# Install cppkafka target and specify all properties needed for the exported file
|
||||||
install(
|
install(
|
||||||
TARGETS cppkafka
|
TARGETS ${TARGET_NAME}
|
||||||
LIBRARY DESTINATION lib
|
EXPORT ${TARGET_EXPORT_NAME}
|
||||||
ARCHIVE DESTINATION lib
|
COMPONENT binaries
|
||||||
COMPONENT dev
|
LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}"
|
||||||
|
ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}"
|
||||||
|
RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}"
|
||||||
|
INCLUDES DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if (CPPKAFKA_EXPORT_PKGCONFIG)
|
||||||
|
# Generate and install pkgconfig file
|
||||||
|
configure_file(${PROJECT_SOURCE_DIR}/cmake/cppkafka.pc.in ${PKG_CONFIG_FILE} @ONLY)
|
||||||
|
|
||||||
|
install(
|
||||||
|
FILES ${PKG_CONFIG_FILE}
|
||||||
|
DESTINATION "${CPPKAFKA_PKGCONFIG_DIR}"
|
||||||
|
COMPONENT pkgconfig
|
||||||
|
)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if (CPPKAFKA_EXPORT_CMAKE_CONFIG)
|
||||||
|
# Install the exported file
|
||||||
|
install(
|
||||||
|
EXPORT "${TARGET_EXPORT_NAME}"
|
||||||
|
NAMESPACE "${NAMESPACE}"
|
||||||
|
COMPONENT config
|
||||||
|
DESTINATION "${CPPKAFKA_CONFIG_DIR}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Generate CMAKE configuration file and exported targets
|
||||||
|
configure_package_config_file(
|
||||||
|
"${PROJECT_SOURCE_DIR}/cmake/config.cmake.in"
|
||||||
|
"${CONFIG_FILE}"
|
||||||
|
INSTALL_DESTINATION "${CPPKAFKA_CONFIG_DIR}"
|
||||||
|
PATH_VARS RDKAFKA_MIN_VERSION_HEX CMAKE_INSTALL_PREFIX CMAKE_INSTALL_INCLUDEDIR CMAKE_INSTALL_LIBDIR
|
||||||
|
)
|
||||||
|
|
||||||
|
# Generate version file
|
||||||
|
write_basic_package_version_file(
|
||||||
|
"${VERSION_FILE}"
|
||||||
|
VERSION ${CPPKAFKA_VERSION}
|
||||||
|
COMPATIBILITY AnyNewerVersion
|
||||||
|
)
|
||||||
|
|
||||||
|
install(
|
||||||
|
FILES "${CONFIG_FILE}" "${VERSION_FILE}" "${FIND_RDKAFKA_FILE}"
|
||||||
|
DESTINATION "${CPPKAFKA_CONFIG_DIR}"
|
||||||
|
COMPONENT config
|
||||||
|
)
|
||||||
|
endif()
|
||||||
|
|||||||
@@ -34,6 +34,7 @@
|
|||||||
|
|
||||||
using std::string;
|
using std::string;
|
||||||
using std::equal;
|
using std::equal;
|
||||||
|
using std::lexicographical_compare;
|
||||||
using std::ostream;
|
using std::ostream;
|
||||||
using std::hex;
|
using std::hex;
|
||||||
using std::dec;
|
using std::dec;
|
||||||
@@ -101,4 +102,22 @@ bool operator!=(const Buffer& lhs, const Buffer& rhs) {
|
|||||||
return !(lhs == rhs);
|
return !(lhs == rhs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool operator<(const Buffer& lhs, const Buffer& rhs) {
|
||||||
|
return lexicographical_compare(lhs.get_data(), lhs.get_data() + lhs.get_size(),
|
||||||
|
rhs.get_data(), rhs.get_data() + rhs.get_size());
|
||||||
|
}
|
||||||
|
|
||||||
|
bool operator>(const Buffer& lhs, const Buffer& rhs) {
|
||||||
|
return lexicographical_compare(rhs.get_data(), rhs.get_data() + rhs.get_size(),
|
||||||
|
lhs.get_data(), lhs.get_data() + lhs.get_size());
|
||||||
|
}
|
||||||
|
|
||||||
|
bool operator<=(const Buffer& lhs, const Buffer& rhs) {
|
||||||
|
return !(lhs > rhs);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool operator>=(const Buffer& lhs, const Buffer& rhs) {
|
||||||
|
return !(lhs < rhs);
|
||||||
|
}
|
||||||
|
|
||||||
} // cppkafka
|
} // cppkafka
|
||||||
|
|||||||
@@ -74,7 +74,7 @@ void error_callback_proxy(rd_kafka_t*, int err, const char *reason, void *opaque
|
|||||||
void throttle_callback_proxy(rd_kafka_t*, const char* broker_name,
|
void throttle_callback_proxy(rd_kafka_t*, const char* broker_name,
|
||||||
int32_t broker_id, int throttle_time_ms, void *opaque) {
|
int32_t broker_id, int throttle_time_ms, void *opaque) {
|
||||||
KafkaHandleBase* handle = static_cast<KafkaHandleBase*>(opaque);
|
KafkaHandleBase* handle = static_cast<KafkaHandleBase*>(opaque);
|
||||||
CallbackInvoker<Configuration::ThrottleCallback>
|
CallbackInvoker<Configuration::ThrottleCallback>
|
||||||
("throttle", handle->get_configuration().get_throttle_callback(), handle)
|
("throttle", handle->get_configuration().get_throttle_callback(), handle)
|
||||||
(*handle, broker_name, broker_id, milliseconds(throttle_time_ms));
|
(*handle, broker_name, broker_id, milliseconds(throttle_time_ms));
|
||||||
}
|
}
|
||||||
@@ -102,6 +102,13 @@ int socket_callback_proxy(int domain, int type, int protocol, void* opaque) {
|
|||||||
(domain, type, protocol);
|
(domain, type, protocol);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void background_event_callback_proxy(rd_kafka_t*, rd_kafka_event_t* event_ptr, void *opaque) {
|
||||||
|
KafkaHandleBase* handle = static_cast<KafkaHandleBase*>(opaque);
|
||||||
|
CallbackInvoker<Configuration::BackgroundEventCallback>
|
||||||
|
("background_event", handle->get_configuration().get_background_event_callback(), handle)
|
||||||
|
(*handle, Event{event_ptr});
|
||||||
|
}
|
||||||
|
|
||||||
// Configuration
|
// Configuration
|
||||||
|
|
||||||
Configuration::Configuration()
|
Configuration::Configuration()
|
||||||
@@ -177,6 +184,19 @@ Configuration& Configuration::set_socket_callback(SocketCallback callback) {
|
|||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if RD_KAFKA_VERSION >= RD_KAFKA_ADMIN_API_SUPPORT_VERSION
|
||||||
|
Configuration& Configuration::set_background_event_callback(BackgroundEventCallback callback) {
|
||||||
|
background_event_callback_ = move(callback);
|
||||||
|
rd_kafka_conf_set_background_event_cb(handle_.get(), &background_event_callback_proxy);
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
Configuration& Configuration::set_events(int events) {
|
||||||
|
rd_kafka_conf_set_events(handle_.get(), events);
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
Configuration&
|
Configuration&
|
||||||
Configuration::set_default_topic_configuration(TopicConfiguration config) {
|
Configuration::set_default_topic_configuration(TopicConfiguration config) {
|
||||||
default_topic_config_ = std::move(config);
|
default_topic_config_ = std::move(config);
|
||||||
@@ -239,6 +259,11 @@ const Configuration::SocketCallback& Configuration::get_socket_callback() const
|
|||||||
return socket_callback_;
|
return socket_callback_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const Configuration::BackgroundEventCallback&
|
||||||
|
Configuration::get_background_event_callback() const {
|
||||||
|
return background_event_callback_;
|
||||||
|
}
|
||||||
|
|
||||||
const optional<TopicConfiguration>& Configuration::get_default_topic_configuration() const {
|
const optional<TopicConfiguration>& Configuration::get_default_topic_configuration() const {
|
||||||
return default_topic_config_;
|
return default_topic_config_;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -29,6 +29,7 @@
|
|||||||
#include <sstream>
|
#include <sstream>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <cctype>
|
#include <cctype>
|
||||||
|
#include "macros.h"
|
||||||
#include "consumer.h"
|
#include "consumer.h"
|
||||||
#include "exceptions.h"
|
#include "exceptions.h"
|
||||||
#include "logging.h"
|
#include "logging.h"
|
||||||
@@ -44,20 +45,10 @@ using std::ostringstream;
|
|||||||
using std::chrono::milliseconds;
|
using std::chrono::milliseconds;
|
||||||
using std::toupper;
|
using std::toupper;
|
||||||
using std::equal;
|
using std::equal;
|
||||||
|
using std::allocator;
|
||||||
|
|
||||||
namespace cppkafka {
|
namespace cppkafka {
|
||||||
|
|
||||||
// See: https://github.com/edenhill/librdkafka/issues/1792
|
|
||||||
const int rd_kafka_queue_refcount_bug_version = 0x000b0500;
|
|
||||||
Queue get_queue(rd_kafka_queue_t* handle) {
|
|
||||||
if (rd_kafka_version() <= rd_kafka_queue_refcount_bug_version) {
|
|
||||||
return Queue::make_non_owning(handle);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
return Queue(handle);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Consumer::rebalance_proxy(rd_kafka_t*, rd_kafka_resp_err_t error,
|
void Consumer::rebalance_proxy(rd_kafka_t*, rd_kafka_resp_err_t error,
|
||||||
rd_kafka_topic_partition_list_t *partitions, void *opaque) {
|
rd_kafka_topic_partition_list_t *partitions, void *opaque) {
|
||||||
TopicPartitionList list = convert(partitions);
|
TopicPartitionList list = convert(partitions);
|
||||||
@@ -133,15 +124,9 @@ void Consumer::unsubscribe() {
|
|||||||
|
|
||||||
void Consumer::assign(const TopicPartitionList& topic_partitions) {
|
void Consumer::assign(const TopicPartitionList& topic_partitions) {
|
||||||
rd_kafka_resp_err_t error;
|
rd_kafka_resp_err_t error;
|
||||||
if (topic_partitions.empty()) {
|
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
|
||||||
error = rd_kafka_assign(get_handle(), nullptr);
|
error = rd_kafka_assign(get_handle(), topic_list_handle.get());
|
||||||
check_error(error);
|
check_error(error, topic_list_handle.get());
|
||||||
}
|
|
||||||
else {
|
|
||||||
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
|
|
||||||
error = rd_kafka_assign(get_handle(), topic_list_handle.get());
|
|
||||||
check_error(error, topic_list_handle.get());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Consumer::unassign() {
|
void Consumer::unassign() {
|
||||||
@@ -194,9 +179,15 @@ KafkaHandleBase::OffsetTuple Consumer::get_offsets(const TopicPartition& topic_p
|
|||||||
|
|
||||||
TopicPartitionList
|
TopicPartitionList
|
||||||
Consumer::get_offsets_committed(const TopicPartitionList& topic_partitions) const {
|
Consumer::get_offsets_committed(const TopicPartitionList& topic_partitions) const {
|
||||||
|
return get_offsets_committed(topic_partitions, get_timeout());
|
||||||
|
}
|
||||||
|
|
||||||
|
TopicPartitionList
|
||||||
|
Consumer::get_offsets_committed(const TopicPartitionList& topic_partitions,
|
||||||
|
milliseconds timeout) const {
|
||||||
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
|
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
|
||||||
rd_kafka_resp_err_t error = rd_kafka_committed(get_handle(), topic_list_handle.get(),
|
rd_kafka_resp_err_t error = rd_kafka_committed(get_handle(), topic_list_handle.get(),
|
||||||
static_cast<int>(get_timeout().count()));
|
static_cast<int>(timeout.count()));
|
||||||
check_error(error, topic_list_handle.get());
|
check_error(error, topic_list_handle.get());
|
||||||
return convert(topic_list_handle);
|
return convert(topic_list_handle);
|
||||||
}
|
}
|
||||||
@@ -209,6 +200,23 @@ Consumer::get_offsets_position(const TopicPartitionList& topic_partitions) const
|
|||||||
return convert(topic_list_handle);
|
return convert(topic_list_handle);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if (RD_KAFKA_VERSION >= RD_KAFKA_STORE_OFFSETS_SUPPORT_VERSION)
|
||||||
|
void Consumer::store_consumed_offsets() const {
|
||||||
|
store_offsets(get_offsets_position(get_assignment()));
|
||||||
|
}
|
||||||
|
|
||||||
|
void Consumer::store_offsets(const TopicPartitionList& topic_partitions) const {
|
||||||
|
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
|
||||||
|
rd_kafka_resp_err_t error = rd_kafka_offsets_store(get_handle(), topic_list_handle.get());
|
||||||
|
check_error(error, topic_list_handle.get());
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void Consumer::store_offset(const Message& msg) const {
|
||||||
|
rd_kafka_resp_err_t error = rd_kafka_offset_store(msg.get_handle()->rkt, msg.get_partition(), msg.get_offset());
|
||||||
|
check_error(error);
|
||||||
|
}
|
||||||
|
|
||||||
vector<string> Consumer::get_subscription() const {
|
vector<string> Consumer::get_subscription() const {
|
||||||
rd_kafka_resp_err_t error;
|
rd_kafka_resp_err_t error;
|
||||||
rd_kafka_topic_partition_list_t* list = nullptr;
|
rd_kafka_topic_partition_list_t* list = nullptr;
|
||||||
@@ -255,38 +263,28 @@ Message Consumer::poll(milliseconds timeout) {
|
|||||||
return rd_kafka_consumer_poll(get_handle(), static_cast<int>(timeout.count()));
|
return rd_kafka_consumer_poll(get_handle(), static_cast<int>(timeout.count()));
|
||||||
}
|
}
|
||||||
|
|
||||||
MessageList Consumer::poll_batch(size_t max_batch_size) {
|
std::vector<Message> Consumer::poll_batch(size_t max_batch_size) {
|
||||||
return poll_batch(max_batch_size, get_timeout());
|
return poll_batch(max_batch_size, get_timeout(), allocator<Message>());
|
||||||
}
|
}
|
||||||
|
|
||||||
MessageList Consumer::poll_batch(size_t max_batch_size, milliseconds timeout) {
|
std::vector<Message> Consumer::poll_batch(size_t max_batch_size, milliseconds timeout) {
|
||||||
vector<rd_kafka_message_t*> raw_messages(max_batch_size);
|
return poll_batch(max_batch_size, timeout, allocator<Message>());
|
||||||
// Note that this will leak the queue when using rdkafka < 0.11.5 (see get_queue comment)
|
|
||||||
Queue queue(get_queue(rd_kafka_queue_get_consumer(get_handle())));
|
|
||||||
ssize_t result = rd_kafka_consume_batch_queue(queue.get_handle() , timeout.count(), raw_messages.data(),
|
|
||||||
raw_messages.size());
|
|
||||||
if (result == -1) {
|
|
||||||
check_error(rd_kafka_last_error());
|
|
||||||
// on the off-chance that check_error() does not throw an error
|
|
||||||
return MessageList();
|
|
||||||
}
|
|
||||||
return MessageList(raw_messages.begin(), raw_messages.begin() + result);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Queue Consumer::get_main_queue() const {
|
Queue Consumer::get_main_queue() const {
|
||||||
Queue queue(get_queue(rd_kafka_queue_get_main(get_handle())));
|
Queue queue = Queue::make_queue(rd_kafka_queue_get_main(get_handle()));
|
||||||
queue.disable_queue_forwarding();
|
queue.disable_queue_forwarding();
|
||||||
return queue;
|
return queue;
|
||||||
}
|
}
|
||||||
|
|
||||||
Queue Consumer::get_consumer_queue() const {
|
Queue Consumer::get_consumer_queue() const {
|
||||||
return get_queue(rd_kafka_queue_get_consumer(get_handle()));
|
return Queue::make_queue(rd_kafka_queue_get_consumer(get_handle()));
|
||||||
}
|
}
|
||||||
|
|
||||||
Queue Consumer::get_partition_queue(const TopicPartition& partition) const {
|
Queue Consumer::get_partition_queue(const TopicPartition& partition) const {
|
||||||
Queue queue(get_queue(rd_kafka_queue_get_partition(get_handle(),
|
Queue queue = Queue::make_queue(rd_kafka_queue_get_partition(get_handle(),
|
||||||
partition.get_topic().c_str(),
|
partition.get_topic().c_str(),
|
||||||
partition.get_partition())));
|
partition.get_partition()));
|
||||||
queue.disable_queue_forwarding();
|
queue.disable_queue_forwarding();
|
||||||
return queue;
|
return queue;
|
||||||
}
|
}
|
||||||
|
|||||||
93
src/event.cpp
Normal file
93
src/event.cpp
Normal file
@@ -0,0 +1,93 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2018, Matias Fontanini
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "event.h"
|
||||||
|
|
||||||
|
using std::allocator;
|
||||||
|
using std::string;
|
||||||
|
using std::unique_ptr;
|
||||||
|
using std::vector;
|
||||||
|
|
||||||
|
namespace cppkafka {
|
||||||
|
|
||||||
|
Event::Event(rd_kafka_event_t* handle)
|
||||||
|
: handle_(handle, &rd_kafka_event_destroy) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
string Event::get_name() const {
|
||||||
|
return rd_kafka_event_name(handle_.get());
|
||||||
|
}
|
||||||
|
|
||||||
|
rd_kafka_event_type_t Event::get_type() const {
|
||||||
|
return rd_kafka_event_type(handle_.get());
|
||||||
|
}
|
||||||
|
|
||||||
|
Message Event::get_next_message() const {
|
||||||
|
// Note: the constness in rd_kafka_event_message_next's return value is not needed and it
|
||||||
|
// breaks Message's interface. This is dirty but it looks like it should have no side effects.
|
||||||
|
const auto message =
|
||||||
|
const_cast<rd_kafka_message_t*>(rd_kafka_event_message_next(handle_.get()));
|
||||||
|
return Message::make_non_owning(message);
|
||||||
|
}
|
||||||
|
|
||||||
|
vector<Message> Event::get_messages() {
|
||||||
|
return get_messages(allocator<Message>());
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t Event::get_message_count() const {
|
||||||
|
return rd_kafka_event_message_count(handle_.get());
|
||||||
|
}
|
||||||
|
|
||||||
|
Error Event::get_error() const {
|
||||||
|
return rd_kafka_event_error(handle_.get());
|
||||||
|
}
|
||||||
|
|
||||||
|
void* Event::get_opaque() const {
|
||||||
|
return rd_kafka_event_opaque(handle_.get());
|
||||||
|
}
|
||||||
|
|
||||||
|
TopicPartition Event::get_topic_partition() const {
|
||||||
|
using TopparHandle = unique_ptr<rd_kafka_topic_partition_t,
|
||||||
|
decltype(&rd_kafka_topic_partition_destroy)>;
|
||||||
|
TopparHandle toppar_handle{rd_kafka_event_topic_partition(handle_.get()),
|
||||||
|
&rd_kafka_topic_partition_destroy};
|
||||||
|
return TopicPartition(toppar_handle->topic, toppar_handle->partition, toppar_handle->offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
TopicPartitionList Event::get_topic_partition_list() const {
|
||||||
|
auto toppars_handle = rd_kafka_event_topic_partition_list(handle_.get());
|
||||||
|
return convert(toppars_handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
Event::operator bool() const {
|
||||||
|
return !!handle_;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // cppkafka
|
||||||
@@ -119,4 +119,11 @@ Error QueueException::get_error() const {
|
|||||||
return error_;
|
return error_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ActionTerminatedException
|
||||||
|
|
||||||
|
ActionTerminatedException::ActionTerminatedException(const string& error)
|
||||||
|
: Exception(error) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
} // cppkafka
|
} // cppkafka
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ namespace cppkafka {
|
|||||||
const milliseconds KafkaHandleBase::DEFAULT_TIMEOUT{1000};
|
const milliseconds KafkaHandleBase::DEFAULT_TIMEOUT{1000};
|
||||||
|
|
||||||
KafkaHandleBase::KafkaHandleBase(Configuration config)
|
KafkaHandleBase::KafkaHandleBase(Configuration config)
|
||||||
: timeout_ms_(DEFAULT_TIMEOUT), config_(move(config)), handle_(nullptr, nullptr) {
|
: timeout_ms_(DEFAULT_TIMEOUT), config_(move(config)), handle_(nullptr, HandleDeleter(this)), destroy_flags_(0) {
|
||||||
auto& maybe_config = config_.get_default_topic_configuration();
|
auto& maybe_config = config_.get_default_topic_configuration();
|
||||||
if (maybe_config) {
|
if (maybe_config) {
|
||||||
maybe_config->set_as_opaque();
|
maybe_config->set_as_opaque();
|
||||||
@@ -83,6 +83,10 @@ void KafkaHandleBase::set_timeout(milliseconds timeout) {
|
|||||||
timeout_ms_ = timeout;
|
timeout_ms_ = timeout;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void KafkaHandleBase::set_log_level(LogLevel level) {
|
||||||
|
rd_kafka_set_log_level(handle_.get(), static_cast<int>(level));
|
||||||
|
}
|
||||||
|
|
||||||
void KafkaHandleBase::add_brokers(const string& brokers) {
|
void KafkaHandleBase::add_brokers(const string& brokers) {
|
||||||
rd_kafka_brokers_add(handle_.get(), brokers.data());
|
rd_kafka_brokers_add(handle_.get(), brokers.data());
|
||||||
}
|
}
|
||||||
@@ -104,24 +108,40 @@ Topic KafkaHandleBase::get_topic(const string& name, TopicConfiguration config)
|
|||||||
|
|
||||||
KafkaHandleBase::OffsetTuple
|
KafkaHandleBase::OffsetTuple
|
||||||
KafkaHandleBase::query_offsets(const TopicPartition& topic_partition) const {
|
KafkaHandleBase::query_offsets(const TopicPartition& topic_partition) const {
|
||||||
|
return query_offsets(topic_partition, timeout_ms_);
|
||||||
|
}
|
||||||
|
|
||||||
|
KafkaHandleBase::OffsetTuple
|
||||||
|
KafkaHandleBase::query_offsets(const TopicPartition& topic_partition,
|
||||||
|
milliseconds timeout) const {
|
||||||
int64_t low;
|
int64_t low;
|
||||||
int64_t high;
|
int64_t high;
|
||||||
const string& topic = topic_partition.get_topic();
|
const string& topic = topic_partition.get_topic();
|
||||||
const int partition = topic_partition.get_partition();
|
const int partition = topic_partition.get_partition();
|
||||||
const int timeout = static_cast<int>(timeout_ms_.count());
|
const int timeout_ms = static_cast<int>(timeout.count());
|
||||||
rd_kafka_resp_err_t result = rd_kafka_query_watermark_offsets(handle_.get(), topic.data(),
|
rd_kafka_resp_err_t result = rd_kafka_query_watermark_offsets(handle_.get(), topic.data(),
|
||||||
partition, &low, &high,
|
partition, &low, &high,
|
||||||
timeout);
|
timeout_ms);
|
||||||
check_error(result);
|
check_error(result);
|
||||||
return make_tuple(low, high);
|
return make_tuple(low, high);
|
||||||
}
|
}
|
||||||
|
|
||||||
Metadata KafkaHandleBase::get_metadata(bool all_topics) const {
|
Metadata KafkaHandleBase::get_metadata(bool all_topics) const {
|
||||||
return get_metadata(all_topics, nullptr);
|
return get_metadata(all_topics, nullptr, timeout_ms_);
|
||||||
|
}
|
||||||
|
|
||||||
|
Metadata KafkaHandleBase::get_metadata(bool all_topics,
|
||||||
|
milliseconds timeout) const {
|
||||||
|
return get_metadata(all_topics, nullptr, timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
TopicMetadata KafkaHandleBase::get_metadata(const Topic& topic) const {
|
TopicMetadata KafkaHandleBase::get_metadata(const Topic& topic) const {
|
||||||
Metadata md = get_metadata(false, topic.get_handle());
|
return get_metadata(topic, timeout_ms_);
|
||||||
|
}
|
||||||
|
|
||||||
|
TopicMetadata KafkaHandleBase::get_metadata(const Topic& topic,
|
||||||
|
milliseconds timeout) const {
|
||||||
|
Metadata md = get_metadata(false, topic.get_handle(), timeout);
|
||||||
auto topics = md.get_topics();
|
auto topics = md.get_topics();
|
||||||
if (topics.empty()) {
|
if (topics.empty()) {
|
||||||
throw ElementNotFound("topic metadata", topic.get_name());
|
throw ElementNotFound("topic metadata", topic.get_name());
|
||||||
@@ -130,7 +150,12 @@ TopicMetadata KafkaHandleBase::get_metadata(const Topic& topic) const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
GroupInformation KafkaHandleBase::get_consumer_group(const string& name) {
|
GroupInformation KafkaHandleBase::get_consumer_group(const string& name) {
|
||||||
auto result = fetch_consumer_groups(name.c_str());
|
return get_consumer_group(name, timeout_ms_);
|
||||||
|
}
|
||||||
|
|
||||||
|
GroupInformation KafkaHandleBase::get_consumer_group(const string& name,
|
||||||
|
milliseconds timeout) {
|
||||||
|
auto result = fetch_consumer_groups(name.c_str(), timeout);
|
||||||
if (result.empty()) {
|
if (result.empty()) {
|
||||||
throw ElementNotFound("consumer group information", name);
|
throw ElementNotFound("consumer group information", name);
|
||||||
}
|
}
|
||||||
@@ -138,11 +163,21 @@ GroupInformation KafkaHandleBase::get_consumer_group(const string& name) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
vector<GroupInformation> KafkaHandleBase::get_consumer_groups() {
|
vector<GroupInformation> KafkaHandleBase::get_consumer_groups() {
|
||||||
return fetch_consumer_groups(nullptr);
|
return get_consumer_groups(timeout_ms_);
|
||||||
|
}
|
||||||
|
|
||||||
|
vector<GroupInformation> KafkaHandleBase::get_consumer_groups(milliseconds timeout) {
|
||||||
|
return fetch_consumer_groups(nullptr, timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
TopicPartitionList
|
TopicPartitionList
|
||||||
KafkaHandleBase::get_offsets_for_times(const TopicPartitionsTimestampsMap& queries) const {
|
KafkaHandleBase::get_offsets_for_times(const TopicPartitionsTimestampsMap& queries) const {
|
||||||
|
return get_offsets_for_times(queries, timeout_ms_);
|
||||||
|
}
|
||||||
|
|
||||||
|
TopicPartitionList
|
||||||
|
KafkaHandleBase::get_offsets_for_times(const TopicPartitionsTimestampsMap& queries,
|
||||||
|
milliseconds timeout) const {
|
||||||
TopicPartitionList topic_partitions;
|
TopicPartitionList topic_partitions;
|
||||||
for (const auto& query : queries) {
|
for (const auto& query : queries) {
|
||||||
const TopicPartition& topic_partition = query.first;
|
const TopicPartition& topic_partition = query.first;
|
||||||
@@ -150,9 +185,9 @@ KafkaHandleBase::get_offsets_for_times(const TopicPartitionsTimestampsMap& queri
|
|||||||
query.second.count());
|
query.second.count());
|
||||||
}
|
}
|
||||||
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
|
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
|
||||||
const int timeout = static_cast<int>(timeout_ms_.count());
|
const int timeout_ms = static_cast<int>(timeout.count());
|
||||||
rd_kafka_resp_err_t result = rd_kafka_offsets_for_times(handle_.get(), topic_list_handle.get(),
|
rd_kafka_resp_err_t result = rd_kafka_offsets_for_times(handle_.get(), topic_list_handle.get(),
|
||||||
timeout);
|
timeout_ms);
|
||||||
check_error(result, topic_list_handle.get());
|
check_error(result, topic_list_handle.get());
|
||||||
return convert(topic_list_handle);
|
return convert(topic_list_handle);
|
||||||
}
|
}
|
||||||
@@ -178,7 +213,7 @@ void KafkaHandleBase::yield() const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void KafkaHandleBase::set_handle(rd_kafka_t* handle) {
|
void KafkaHandleBase::set_handle(rd_kafka_t* handle) {
|
||||||
handle_ = HandlePtr(handle, &rd_kafka_destroy);
|
handle_ = HandlePtr(handle, HandleDeleter(this));
|
||||||
}
|
}
|
||||||
|
|
||||||
Topic KafkaHandleBase::get_topic(const string& name, rd_kafka_topic_conf_t* conf) {
|
Topic KafkaHandleBase::get_topic(const string& name, rd_kafka_topic_conf_t* conf) {
|
||||||
@@ -189,19 +224,22 @@ Topic KafkaHandleBase::get_topic(const string& name, rd_kafka_topic_conf_t* conf
|
|||||||
return Topic(topic);
|
return Topic(topic);
|
||||||
}
|
}
|
||||||
|
|
||||||
Metadata KafkaHandleBase::get_metadata(bool all_topics, rd_kafka_topic_t* topic_ptr) const {
|
Metadata KafkaHandleBase::get_metadata(bool all_topics,
|
||||||
|
rd_kafka_topic_t* topic_ptr,
|
||||||
|
milliseconds timeout) const {
|
||||||
const rd_kafka_metadata_t* metadata;
|
const rd_kafka_metadata_t* metadata;
|
||||||
const int timeout = static_cast<int>(timeout_ms_.count());
|
const int timeout_ms = static_cast<int>(timeout.count());
|
||||||
rd_kafka_resp_err_t error = rd_kafka_metadata(get_handle(), !!all_topics,
|
rd_kafka_resp_err_t error = rd_kafka_metadata(get_handle(), !!all_topics,
|
||||||
topic_ptr, &metadata, timeout);
|
topic_ptr, &metadata, timeout_ms);
|
||||||
check_error(error);
|
check_error(error);
|
||||||
return Metadata(metadata);
|
return Metadata(metadata);
|
||||||
}
|
}
|
||||||
|
|
||||||
vector<GroupInformation> KafkaHandleBase::fetch_consumer_groups(const char* name) {
|
vector<GroupInformation> KafkaHandleBase::fetch_consumer_groups(const char* name,
|
||||||
|
milliseconds timeout) {
|
||||||
const rd_kafka_group_list* list = nullptr;
|
const rd_kafka_group_list* list = nullptr;
|
||||||
const int timeout = static_cast<int>(timeout_ms_.count());
|
const int timeout_ms = static_cast<int>(timeout.count());
|
||||||
auto result = rd_kafka_list_groups(get_handle(), name, &list, timeout);
|
auto result = rd_kafka_list_groups(get_handle(), name, &list, timeout_ms);
|
||||||
check_error(result);
|
check_error(result);
|
||||||
|
|
||||||
// Wrap this in a unique_ptr so it gets auto deleted
|
// Wrap this in a unique_ptr so it gets auto deleted
|
||||||
@@ -237,7 +275,7 @@ void KafkaHandleBase::check_error(rd_kafka_resp_err_t error,
|
|||||||
//check if any partition has errors
|
//check if any partition has errors
|
||||||
for (int i = 0; i < list_ptr->cnt; ++i) {
|
for (int i = 0; i < list_ptr->cnt; ++i) {
|
||||||
if (list_ptr->elems[i].err != RD_KAFKA_RESP_ERR_NO_ERROR) {
|
if (list_ptr->elems[i].err != RD_KAFKA_RESP_ERR_NO_ERROR) {
|
||||||
throw HandleException(error);
|
throw HandleException(list_ptr->elems[i].err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -247,4 +285,25 @@ rd_kafka_conf_t* KafkaHandleBase::get_configuration_handle() {
|
|||||||
return config_.get_handle();
|
return config_.get_handle();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if RD_KAFKA_VERSION >= RD_KAFKA_DESTROY_FLAGS_SUPPORT_VERSION
|
||||||
|
|
||||||
|
void KafkaHandleBase::set_destroy_flags(int destroy_flags) {
|
||||||
|
destroy_flags_ = destroy_flags;
|
||||||
|
};
|
||||||
|
|
||||||
|
int KafkaHandleBase::get_destroy_flags() const {
|
||||||
|
return destroy_flags_;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
void KafkaHandleBase::HandleDeleter::operator()(rd_kafka_t* handle) {
|
||||||
|
#if RD_KAFKA_VERSION >= RD_KAFKA_DESTROY_FLAGS_SUPPORT_VERSION
|
||||||
|
rd_kafka_destroy_flags(handle, handle_base_ptr_->get_destroy_flags());
|
||||||
|
#else
|
||||||
|
rd_kafka_destroy(handle);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
} // cppkafka
|
} // cppkafka
|
||||||
|
|||||||
@@ -63,6 +63,16 @@ Message::Message(HandlePtr handle)
|
|||||||
payload_(handle_ ? Buffer((const Buffer::DataType*)handle_->payload, handle_->len) : Buffer()),
|
payload_(handle_ ? Buffer((const Buffer::DataType*)handle_->payload, handle_->len) : Buffer()),
|
||||||
key_(handle_ ? Buffer((const Buffer::DataType*)handle_->key, handle_->key_len) : Buffer()),
|
key_(handle_ ? Buffer((const Buffer::DataType*)handle_->key, handle_->key_len) : Buffer()),
|
||||||
user_data_(handle_ ? handle_->_private : nullptr) {
|
user_data_(handle_ ? handle_->_private : nullptr) {
|
||||||
|
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
|
||||||
|
// get the header list if any
|
||||||
|
if (handle_) {
|
||||||
|
rd_kafka_headers_t* headers_handle;
|
||||||
|
Error error = rd_kafka_message_headers(handle_.get(), &headers_handle);
|
||||||
|
if (!error) {
|
||||||
|
header_list_ = HeaderListType::make_non_owning(headers_handle);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
Message& Message::load_internal() {
|
Message& Message::load_internal() {
|
||||||
@@ -74,19 +84,14 @@ Message& Message::load_internal() {
|
|||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
// MessageTimestamp
|
boost::optional<MessageTimestamp> Message::get_timestamp() const {
|
||||||
|
rd_kafka_timestamp_type_t type = RD_KAFKA_TIMESTAMP_NOT_AVAILABLE;
|
||||||
MessageTimestamp::MessageTimestamp(milliseconds timestamp, TimestampType type)
|
int64_t timestamp = rd_kafka_message_timestamp(handle_.get(), &type);
|
||||||
: timestamp_(timestamp), type_(type) {
|
if (timestamp == -1 || type == RD_KAFKA_TIMESTAMP_NOT_AVAILABLE) {
|
||||||
|
return {};
|
||||||
}
|
}
|
||||||
|
return MessageTimestamp(std::chrono::milliseconds(timestamp),
|
||||||
milliseconds MessageTimestamp::get_timestamp() const {
|
static_cast<MessageTimestamp::TimestampType>(type));
|
||||||
return timestamp_;
|
|
||||||
}
|
|
||||||
|
|
||||||
MessageTimestamp::TimestampType MessageTimestamp::get_type() const {
|
|
||||||
return type_;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // cppkafka
|
} // cppkafka
|
||||||
|
|||||||
51
src/message_timestamp.cpp
Normal file
51
src/message_timestamp.cpp
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2017, Matias Fontanini
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "message_timestamp.h"
|
||||||
|
|
||||||
|
using std::chrono::milliseconds;
|
||||||
|
|
||||||
|
namespace cppkafka {
|
||||||
|
|
||||||
|
MessageTimestamp::MessageTimestamp(milliseconds timestamp, TimestampType type)
|
||||||
|
: timestamp_(timestamp),
|
||||||
|
type_(type) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
milliseconds MessageTimestamp::get_timestamp() const {
|
||||||
|
return timestamp_;
|
||||||
|
}
|
||||||
|
|
||||||
|
MessageTimestamp::TimestampType MessageTimestamp::get_type() const {
|
||||||
|
return type_;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // cppkafka
|
||||||
|
|
||||||
136
src/producer.cpp
136
src/producer.cpp
@@ -52,7 +52,6 @@ Producer::Producer(Configuration config)
|
|||||||
if (!ptr) {
|
if (!ptr) {
|
||||||
throw Exception("Failed to create producer handle: " + string(error_buffer));
|
throw Exception("Failed to create producer handle: " + string(error_buffer));
|
||||||
}
|
}
|
||||||
rd_kafka_set_log_level(ptr, 7);
|
|
||||||
set_handle(ptr);
|
set_handle(ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -64,39 +63,44 @@ Producer::PayloadPolicy Producer::get_payload_policy() const {
|
|||||||
return message_payload_policy_;
|
return message_payload_policy_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
|
||||||
|
|
||||||
void Producer::produce(const MessageBuilder& builder) {
|
void Producer::produce(const MessageBuilder& builder) {
|
||||||
const Buffer& payload = builder.payload();
|
do_produce(builder, MessageBuilder::HeaderListType(builder.header_list())); //copy headers
|
||||||
const Buffer& key = builder.key();
|
}
|
||||||
const int policy = static_cast<int>(message_payload_policy_);
|
|
||||||
auto result = rd_kafka_producev(get_handle(),
|
void Producer::produce(MessageBuilder&& builder) {
|
||||||
RD_KAFKA_V_TOPIC(builder.topic().data()),
|
do_produce(builder, std::move(builder.header_list())); //move headers
|
||||||
RD_KAFKA_V_PARTITION(builder.partition()),
|
|
||||||
RD_KAFKA_V_MSGFLAGS(policy),
|
|
||||||
RD_KAFKA_V_TIMESTAMP(builder.timestamp().count()),
|
|
||||||
RD_KAFKA_V_KEY((void*)key.get_data(), key.get_size()),
|
|
||||||
RD_KAFKA_V_VALUE((void*)payload.get_data(), payload.get_size()),
|
|
||||||
RD_KAFKA_V_OPAQUE(builder.user_data()),
|
|
||||||
RD_KAFKA_V_END);
|
|
||||||
check_error(result);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Producer::produce(const Message& message) {
|
void Producer::produce(const Message& message) {
|
||||||
const Buffer& payload = message.get_payload();
|
do_produce(message, HeaderList<Message::HeaderType>(message.get_header_list())); //copy headers
|
||||||
const Buffer& key = message.get_key();
|
|
||||||
const int policy = static_cast<int>(message_payload_policy_);
|
|
||||||
int64_t duration = message.get_timestamp() ? message.get_timestamp().get().get_timestamp().count() : 0;
|
|
||||||
auto result = rd_kafka_producev(get_handle(),
|
|
||||||
RD_KAFKA_V_TOPIC(message.get_topic().data()),
|
|
||||||
RD_KAFKA_V_PARTITION(message.get_partition()),
|
|
||||||
RD_KAFKA_V_MSGFLAGS(policy),
|
|
||||||
RD_KAFKA_V_TIMESTAMP(duration),
|
|
||||||
RD_KAFKA_V_KEY((void*)key.get_data(), key.get_size()),
|
|
||||||
RD_KAFKA_V_VALUE((void*)payload.get_data(), payload.get_size()),
|
|
||||||
RD_KAFKA_V_OPAQUE(message.get_user_data()),
|
|
||||||
RD_KAFKA_V_END);
|
|
||||||
check_error(result);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Producer::produce(Message&& message) {
|
||||||
|
do_produce(message, message.detach_header_list<Message::HeaderType>()); //move headers
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
void Producer::produce(const MessageBuilder& builder) {
|
||||||
|
do_produce(builder);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Producer::produce(MessageBuilder&& builder) {
|
||||||
|
do_produce(builder);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Producer::produce(const Message& message) {
|
||||||
|
do_produce(message);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Producer::produce(Message&& message) {
|
||||||
|
do_produce(message);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
int Producer::poll() {
|
int Producer::poll() {
|
||||||
return poll(get_timeout());
|
return poll(get_timeout());
|
||||||
}
|
}
|
||||||
@@ -114,4 +118,80 @@ void Producer::flush(milliseconds timeout) {
|
|||||||
check_error(result);
|
check_error(result);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
|
||||||
|
|
||||||
|
void Producer::do_produce(const MessageBuilder& builder,
|
||||||
|
MessageBuilder::HeaderListType&& headers) {
|
||||||
|
const Buffer& payload = builder.payload();
|
||||||
|
const Buffer& key = builder.key();
|
||||||
|
const int policy = static_cast<int>(message_payload_policy_);
|
||||||
|
auto result = rd_kafka_producev(get_handle(),
|
||||||
|
RD_KAFKA_V_TOPIC(builder.topic().data()),
|
||||||
|
RD_KAFKA_V_PARTITION(builder.partition()),
|
||||||
|
RD_KAFKA_V_MSGFLAGS(policy),
|
||||||
|
RD_KAFKA_V_TIMESTAMP(builder.timestamp().count()),
|
||||||
|
RD_KAFKA_V_KEY((void*)key.get_data(), key.get_size()),
|
||||||
|
RD_KAFKA_V_HEADERS(headers.release_handle()), //pass ownership to rdkafka
|
||||||
|
RD_KAFKA_V_VALUE((void*)payload.get_data(), payload.get_size()),
|
||||||
|
RD_KAFKA_V_OPAQUE(builder.user_data()),
|
||||||
|
RD_KAFKA_V_END);
|
||||||
|
check_error(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Producer::do_produce(const Message& message,
|
||||||
|
MessageBuilder::HeaderListType&& headers) {
|
||||||
|
const Buffer& payload = message.get_payload();
|
||||||
|
const Buffer& key = message.get_key();
|
||||||
|
const int policy = static_cast<int>(message_payload_policy_);
|
||||||
|
int64_t duration = message.get_timestamp() ? message.get_timestamp().get().get_timestamp().count() : 0;
|
||||||
|
auto result = rd_kafka_producev(get_handle(),
|
||||||
|
RD_KAFKA_V_TOPIC(message.get_topic().data()),
|
||||||
|
RD_KAFKA_V_PARTITION(message.get_partition()),
|
||||||
|
RD_KAFKA_V_MSGFLAGS(policy),
|
||||||
|
RD_KAFKA_V_TIMESTAMP(duration),
|
||||||
|
RD_KAFKA_V_KEY((void*)key.get_data(), key.get_size()),
|
||||||
|
RD_KAFKA_V_HEADERS(headers.release_handle()), //pass ownership to rdkafka
|
||||||
|
RD_KAFKA_V_VALUE((void*)payload.get_data(), payload.get_size()),
|
||||||
|
RD_KAFKA_V_OPAQUE(message.get_user_data()),
|
||||||
|
RD_KAFKA_V_END);
|
||||||
|
check_error(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
void Producer::do_produce(const MessageBuilder& builder) {
|
||||||
|
const Buffer& payload = builder.payload();
|
||||||
|
const Buffer& key = builder.key();
|
||||||
|
const int policy = static_cast<int>(message_payload_policy_);
|
||||||
|
auto result = rd_kafka_producev(get_handle(),
|
||||||
|
RD_KAFKA_V_TOPIC(builder.topic().data()),
|
||||||
|
RD_KAFKA_V_PARTITION(builder.partition()),
|
||||||
|
RD_KAFKA_V_MSGFLAGS(policy),
|
||||||
|
RD_KAFKA_V_TIMESTAMP(builder.timestamp().count()),
|
||||||
|
RD_KAFKA_V_KEY((void*)key.get_data(), key.get_size()),
|
||||||
|
RD_KAFKA_V_VALUE((void*)payload.get_data(), payload.get_size()),
|
||||||
|
RD_KAFKA_V_OPAQUE(builder.user_data()),
|
||||||
|
RD_KAFKA_V_END);
|
||||||
|
check_error(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Producer::do_produce(const Message& message) {
|
||||||
|
const Buffer& payload = message.get_payload();
|
||||||
|
const Buffer& key = message.get_key();
|
||||||
|
const int policy = static_cast<int>(message_payload_policy_);
|
||||||
|
int64_t duration = message.get_timestamp() ? message.get_timestamp().get().get_timestamp().count() : 0;
|
||||||
|
auto result = rd_kafka_producev(get_handle(),
|
||||||
|
RD_KAFKA_V_TOPIC(message.get_topic().data()),
|
||||||
|
RD_KAFKA_V_PARTITION(message.get_partition()),
|
||||||
|
RD_KAFKA_V_MSGFLAGS(policy),
|
||||||
|
RD_KAFKA_V_TIMESTAMP(duration),
|
||||||
|
RD_KAFKA_V_KEY((void*)key.get_data(), key.get_size()),
|
||||||
|
RD_KAFKA_V_VALUE((void*)payload.get_data(), payload.get_size()),
|
||||||
|
RD_KAFKA_V_OPAQUE(message.get_user_data()),
|
||||||
|
RD_KAFKA_V_END);
|
||||||
|
check_error(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif //RD_KAFKA_HEADERS_SUPPORT_VERSION
|
||||||
|
|
||||||
} // cppkafka
|
} // cppkafka
|
||||||
|
|||||||
@@ -32,6 +32,7 @@
|
|||||||
using std::vector;
|
using std::vector;
|
||||||
using std::exception;
|
using std::exception;
|
||||||
using std::chrono::milliseconds;
|
using std::chrono::milliseconds;
|
||||||
|
using std::allocator;
|
||||||
|
|
||||||
namespace cppkafka {
|
namespace cppkafka {
|
||||||
|
|
||||||
@@ -45,6 +46,15 @@ Queue Queue::make_non_owning(rd_kafka_queue_t* handle) {
|
|||||||
return Queue(handle, NonOwningTag{});
|
return Queue(handle, NonOwningTag{});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Queue Queue::make_queue(rd_kafka_queue_t* handle) {
|
||||||
|
if (rd_kafka_version() <= RD_KAFKA_QUEUE_REFCOUNT_BUG_VERSION) {
|
||||||
|
return Queue::make_non_owning(handle);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
return Queue(handle);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Queue::Queue()
|
Queue::Queue()
|
||||||
: handle_(nullptr, nullptr),
|
: handle_(nullptr, nullptr),
|
||||||
timeout_ms_(DEFAULT_TIMEOUT) {
|
timeout_ms_(DEFAULT_TIMEOUT) {
|
||||||
@@ -94,25 +104,20 @@ Message Queue::consume(milliseconds timeout) const {
|
|||||||
return Message(rd_kafka_consume_queue(handle_.get(), static_cast<int>(timeout.count())));
|
return Message(rd_kafka_consume_queue(handle_.get(), static_cast<int>(timeout.count())));
|
||||||
}
|
}
|
||||||
|
|
||||||
MessageList Queue::consume_batch(size_t max_batch_size) const {
|
vector<Message> Queue::consume_batch(size_t max_batch_size) const {
|
||||||
return consume_batch(max_batch_size, timeout_ms_);
|
return consume_batch(max_batch_size, timeout_ms_, allocator<Message>());
|
||||||
}
|
}
|
||||||
|
|
||||||
MessageList Queue::consume_batch(size_t max_batch_size, milliseconds timeout) const {
|
vector<Message> Queue::consume_batch(size_t max_batch_size, milliseconds timeout) const {
|
||||||
vector<rd_kafka_message_t*> raw_messages(max_batch_size);
|
return consume_batch(max_batch_size, timeout, allocator<Message>());
|
||||||
ssize_t result = rd_kafka_consume_batch_queue(handle_.get(),
|
}
|
||||||
static_cast<int>(timeout.count()),
|
|
||||||
raw_messages.data(),
|
Event Queue::next_event() const {
|
||||||
raw_messages.size());
|
return next_event(timeout_ms_);
|
||||||
if (result == -1) {
|
}
|
||||||
rd_kafka_resp_err_t error = rd_kafka_last_error();
|
|
||||||
if (error != RD_KAFKA_RESP_ERR_NO_ERROR) {
|
Event Queue::next_event(milliseconds timeout) const {
|
||||||
throw QueueException(error);
|
return Event(rd_kafka_queue_poll(handle_.get(), timeout.count()));
|
||||||
}
|
|
||||||
return MessageList();
|
|
||||||
}
|
|
||||||
// Build message list
|
|
||||||
return MessageList(raw_messages.begin(), raw_messages.begin() + result);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} //cppkafka
|
} //cppkafka
|
||||||
|
|||||||
@@ -76,6 +76,10 @@ int64_t TopicPartition::get_offset() const {
|
|||||||
return offset_;
|
return offset_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void TopicPartition::set_partition(int partition) {
|
||||||
|
partition_ = partition;
|
||||||
|
}
|
||||||
|
|
||||||
void TopicPartition::set_offset(int64_t offset) {
|
void TopicPartition::set_offset(int64_t offset) {
|
||||||
offset_ = offset;
|
offset_ = offset;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -38,6 +38,7 @@ using std::vector;
|
|||||||
using std::set;
|
using std::set;
|
||||||
using std::ostream;
|
using std::ostream;
|
||||||
using std::string;
|
using std::string;
|
||||||
|
using std::equal;
|
||||||
|
|
||||||
namespace cppkafka {
|
namespace cppkafka {
|
||||||
|
|
||||||
@@ -45,10 +46,11 @@ TopicPartitionsListPtr convert(const TopicPartitionList& topic_partitions) {
|
|||||||
TopicPartitionsListPtr handle(rd_kafka_topic_partition_list_new(topic_partitions.size()),
|
TopicPartitionsListPtr handle(rd_kafka_topic_partition_list_new(topic_partitions.size()),
|
||||||
&rd_kafka_topic_partition_list_destroy);
|
&rd_kafka_topic_partition_list_destroy);
|
||||||
for (const auto& item : topic_partitions) {
|
for (const auto& item : topic_partitions) {
|
||||||
rd_kafka_topic_partition_t* new_item = nullptr;
|
rd_kafka_topic_partition_t* new_item = rd_kafka_topic_partition_list_add(
|
||||||
new_item = rd_kafka_topic_partition_list_add(handle.get(),
|
handle.get(),
|
||||||
item.get_topic().data(),
|
item.get_topic().data(),
|
||||||
item.get_partition());
|
item.get_partition()
|
||||||
|
);
|
||||||
new_item->offset = item.get_offset();
|
new_item->offset = item.get_offset();
|
||||||
}
|
}
|
||||||
return handle;
|
return handle;
|
||||||
|
|||||||
@@ -43,6 +43,12 @@ void BackoffCommitter::set_error_callback(ErrorCallback callback) {
|
|||||||
callback_ = move(callback);
|
callback_ = move(callback);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void BackoffCommitter::commit() {
|
||||||
|
perform([&] {
|
||||||
|
return do_commit();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
void BackoffCommitter::commit(const Message& msg) {
|
void BackoffCommitter::commit(const Message& msg) {
|
||||||
perform([&] {
|
perform([&] {
|
||||||
return do_commit(msg);
|
return do_commit(msg);
|
||||||
|
|||||||
@@ -89,13 +89,29 @@ void PollStrategyBase::reset_state() {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void PollStrategyBase::on_assignment(TopicPartitionList& partitions) {
|
void PollStrategyBase::assign(TopicPartitionList& partitions) {
|
||||||
// populate partition queues
|
// populate partition queues
|
||||||
for (const auto& partition : partitions) {
|
for (const auto& partition : partitions) {
|
||||||
// get the queue associated with this partition
|
// get the queue associated with this partition
|
||||||
partition_queues_.emplace(partition, QueueData{consumer_.get_partition_queue(partition), boost::any()});
|
partition_queues_.emplace(partition, QueueData{consumer_.get_partition_queue(partition), boost::any()});
|
||||||
}
|
}
|
||||||
reset_state();
|
reset_state();
|
||||||
|
}
|
||||||
|
|
||||||
|
void PollStrategyBase::revoke(const TopicPartitionList& partitions) {
|
||||||
|
for (const auto &partition : partitions) {
|
||||||
|
partition_queues_.erase(partition);
|
||||||
|
}
|
||||||
|
reset_state();
|
||||||
|
}
|
||||||
|
|
||||||
|
void PollStrategyBase::revoke() {
|
||||||
|
partition_queues_.clear();
|
||||||
|
reset_state();
|
||||||
|
}
|
||||||
|
|
||||||
|
void PollStrategyBase::on_assignment(TopicPartitionList& partitions) {
|
||||||
|
assign(partitions);
|
||||||
// call original consumer callback if any
|
// call original consumer callback if any
|
||||||
if (assignment_callback_) {
|
if (assignment_callback_) {
|
||||||
assignment_callback_(partitions);
|
assignment_callback_(partitions);
|
||||||
@@ -103,15 +119,7 @@ void PollStrategyBase::on_assignment(TopicPartitionList& partitions) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void PollStrategyBase::on_revocation(const TopicPartitionList& partitions) {
|
void PollStrategyBase::on_revocation(const TopicPartitionList& partitions) {
|
||||||
for (const auto& partition : partitions) {
|
revoke(partitions);
|
||||||
// get the queue associated with this partition
|
|
||||||
auto toppar_it = partition_queues_.find(partition);
|
|
||||||
if (toppar_it != partition_queues_.end()) {
|
|
||||||
// remove this queue from the list
|
|
||||||
partition_queues_.erase(toppar_it);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
reset_state();
|
|
||||||
// call original consumer callback if any
|
// call original consumer callback if any
|
||||||
if (revocation_callback_) {
|
if (revocation_callback_) {
|
||||||
revocation_callback_(partitions);
|
revocation_callback_(partitions);
|
||||||
|
|||||||
@@ -32,6 +32,7 @@
|
|||||||
using std::string;
|
using std::string;
|
||||||
using std::chrono::milliseconds;
|
using std::chrono::milliseconds;
|
||||||
using std::make_move_iterator;
|
using std::make_move_iterator;
|
||||||
|
using std::allocator;
|
||||||
|
|
||||||
namespace cppkafka {
|
namespace cppkafka {
|
||||||
|
|
||||||
@@ -67,46 +68,15 @@ Message RoundRobinPollStrategy::poll(milliseconds timeout) {
|
|||||||
return get_consumer_queue().queue.consume(timeout);
|
return get_consumer_queue().queue.consume(timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
MessageList RoundRobinPollStrategy::poll_batch(size_t max_batch_size) {
|
std::vector<Message> RoundRobinPollStrategy::poll_batch(size_t max_batch_size) {
|
||||||
return poll_batch(max_batch_size, get_consumer().get_timeout());
|
return poll_batch(max_batch_size, get_consumer().get_timeout(), allocator<Message>());
|
||||||
}
|
}
|
||||||
|
|
||||||
MessageList RoundRobinPollStrategy::poll_batch(size_t max_batch_size, milliseconds timeout) {
|
std::vector<Message> RoundRobinPollStrategy::poll_batch(size_t max_batch_size,
|
||||||
MessageList messages;
|
milliseconds timeout) {
|
||||||
ssize_t count = max_batch_size;
|
return poll_batch(max_batch_size, timeout, allocator<Message>());
|
||||||
|
|
||||||
// batch from the group event queue first (non-blocking)
|
|
||||||
consume_batch(get_consumer_queue().queue, messages, count, milliseconds(0));
|
|
||||||
size_t num_queues = get_partition_queues().size();
|
|
||||||
while ((count > 0) && (num_queues--)) {
|
|
||||||
// batch from the next partition (non-blocking)
|
|
||||||
consume_batch(get_next_queue().queue, messages, count, milliseconds(0));
|
|
||||||
}
|
|
||||||
// we still have space left in the buffer
|
|
||||||
if (count > 0) {
|
|
||||||
// wait on the event queue until timeout
|
|
||||||
consume_batch(get_consumer_queue().queue, messages, count, timeout);
|
|
||||||
}
|
|
||||||
return messages;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void RoundRobinPollStrategy::consume_batch(Queue& queue,
|
|
||||||
MessageList& messages,
|
|
||||||
ssize_t& count,
|
|
||||||
milliseconds timeout) {
|
|
||||||
MessageList queue_messages = queue.consume_batch(count, timeout);
|
|
||||||
if (queue_messages.empty()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
// concatenate both lists
|
|
||||||
messages.insert(messages.end(),
|
|
||||||
make_move_iterator(queue_messages.begin()),
|
|
||||||
make_move_iterator(queue_messages.end()));
|
|
||||||
// reduce total batch count
|
|
||||||
count -= queue_messages.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void RoundRobinPollStrategy::restore_forwarding() {
|
void RoundRobinPollStrategy::restore_forwarding() {
|
||||||
// forward all partition queues
|
// forward all partition queues
|
||||||
for (const auto& toppar : get_partition_queues()) {
|
for (const auto& toppar : get_partition_queues()) {
|
||||||
|
|||||||
@@ -1,16 +1,36 @@
|
|||||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include/)
|
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include/)
|
||||||
include_directories(SYSTEM ${CATCH_INCLUDE})
|
include_directories(SYSTEM ${CATCH_INCLUDE})
|
||||||
include_directories(SYSTEM ${RDKAFKA_INCLUDE_DIR})
|
|
||||||
|
|
||||||
set(KAFKA_TEST_INSTANCE "kafka-vm:9092"
|
if (NOT KAFKA_TEST_INSTANCE)
|
||||||
|
set(KAFKA_TEST_INSTANCE kafka-vm:9092
|
||||||
CACHE STRING "The kafka instance to which to connect to run tests")
|
CACHE STRING "The kafka instance to which to connect to run tests")
|
||||||
|
endif()
|
||||||
|
if (NOT KAFKA_NUM_PARTITIONS)
|
||||||
|
set(KAFKA_NUM_PARTITIONS 3 CACHE STRING "Kafka Number of partitions")
|
||||||
|
endif()
|
||||||
|
if (NOT KAFKA_TOPICS)
|
||||||
|
set(KAFKA_TOPICS "cppkafka_test1;cppkafka_test2" CACHE STRING "Kafka topics")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
# Convert list of topics into a C++ initializer list
|
||||||
|
FOREACH(TOPIC ${KAFKA_TOPICS})
|
||||||
|
if (NOT TOPIC_LIST)
|
||||||
|
set(TOPIC_LIST "\"${TOPIC}\"")
|
||||||
|
else()
|
||||||
|
set(TOPIC_LIST "${TOPIC_LIST},\"${TOPIC}\"")
|
||||||
|
endif()
|
||||||
|
ENDFOREACH()
|
||||||
|
|
||||||
add_custom_target(tests)
|
add_custom_target(tests)
|
||||||
|
|
||||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
||||||
add_definitions("-DKAFKA_TEST_INSTANCE=\"${KAFKA_TEST_INSTANCE}\"")
|
add_definitions(
|
||||||
|
"-DKAFKA_TEST_INSTANCE=\"${KAFKA_TEST_INSTANCE}\""
|
||||||
|
-DKAFKA_NUM_PARTITIONS=${KAFKA_NUM_PARTITIONS}
|
||||||
|
-DKAFKA_TOPIC_NAMES=${TOPIC_LIST}
|
||||||
|
)
|
||||||
|
|
||||||
add_executable(
|
add_executable(cppkafka_tests
|
||||||
cppkafka_tests
|
|
||||||
buffer_test.cpp
|
buffer_test.cpp
|
||||||
compacted_topic_processor_test.cpp
|
compacted_topic_processor_test.cpp
|
||||||
configuration_test.cpp
|
configuration_test.cpp
|
||||||
@@ -19,10 +39,14 @@ add_executable(
|
|||||||
producer_test.cpp
|
producer_test.cpp
|
||||||
consumer_test.cpp
|
consumer_test.cpp
|
||||||
roundrobin_poll_test.cpp
|
roundrobin_poll_test.cpp
|
||||||
|
headers_test.cpp
|
||||||
|
test_utils.cpp
|
||||||
|
|
||||||
# Main file
|
# Main file
|
||||||
test_main.cpp
|
test_main.cpp
|
||||||
)
|
)
|
||||||
target_link_libraries(cppkafka_tests cppkafka ${RDKAFKA_LIBRARY} pthread rt ssl crypto dl z)
|
|
||||||
|
# In CMake >= 3.15 Boost::boost == Boost::headers
|
||||||
|
target_link_libraries(cppkafka_tests cppkafka RdKafka::rdkafka Boost::boost)
|
||||||
add_dependencies(tests cppkafka_tests)
|
add_dependencies(tests cppkafka_tests)
|
||||||
add_test(cppkafka cppkafka_tests)
|
add_test(cppkafka cppkafka_tests)
|
||||||
|
|||||||
@@ -1,11 +1,13 @@
|
|||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
#include <array>
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
#include <catch.hpp>
|
#include <catch.hpp>
|
||||||
#include "cppkafka/buffer.h"
|
#include "cppkafka/buffer.h"
|
||||||
|
|
||||||
using std::string;
|
using std::string;
|
||||||
using std::vector;
|
using std::vector;
|
||||||
|
using std::array;
|
||||||
using std::ostringstream;
|
using std::ostringstream;
|
||||||
|
|
||||||
using namespace cppkafka;
|
using namespace cppkafka;
|
||||||
@@ -36,10 +38,32 @@ TEST_CASE("conversions", "[buffer]") {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("construction", "[buffer]") {
|
TEST_CASE("construction", "[buffer]") {
|
||||||
|
// From string
|
||||||
const string str_data = "Hello world!";
|
const string str_data = "Hello world!";
|
||||||
const vector<uint8_t> data(str_data.begin(), str_data.end());
|
// From vector
|
||||||
const Buffer buffer(data);
|
const vector<uint8_t> vector_data(str_data.begin(), str_data.end());
|
||||||
|
// From array
|
||||||
|
const array<char,12> array_data{{'H','e','l','l','o',' ','w','o','r','l','d','!'}};
|
||||||
|
// From raw array
|
||||||
|
const char raw_array[12]{'H','e','l','l','o',' ','w','o','r','l','d','!'};
|
||||||
|
|
||||||
|
// Build buffers
|
||||||
|
const Buffer buffer(vector_data); //vector
|
||||||
|
const Buffer buffer2(vector_data.begin(), vector_data.end()); //iterators
|
||||||
|
const Buffer buffer3(str_data.data(), str_data.data() + str_data.size()); //char iterators
|
||||||
|
const Buffer buffer4(array_data); //arrays
|
||||||
|
const Buffer buffer5(raw_array); //raw arrays
|
||||||
|
const Buffer buffer6(str_data); //string
|
||||||
|
const Buffer buffer7(str_data.data(), str_data.size()); //type + size
|
||||||
|
|
||||||
|
// Test
|
||||||
CHECK(str_data == buffer);
|
CHECK(str_data == buffer);
|
||||||
|
CHECK(buffer == buffer2);
|
||||||
|
CHECK(buffer == buffer3);
|
||||||
|
CHECK(buffer == buffer4);
|
||||||
|
CHECK(buffer == buffer5);
|
||||||
|
CHECK(buffer == buffer6);
|
||||||
|
CHECK(buffer == buffer7);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ static Configuration make_producer_config() {
|
|||||||
return config;
|
return config;
|
||||||
}
|
}
|
||||||
|
|
||||||
static Configuration make_consumer_config(const string& group_id = "consumer_test") {
|
static Configuration make_consumer_config(const string& group_id = make_consumer_group_id()) {
|
||||||
Configuration config;
|
Configuration config;
|
||||||
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
|
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
|
||||||
config.set("enable.auto.commit", false);
|
config.set("enable.auto.commit", false);
|
||||||
@@ -85,11 +85,12 @@ TEST_CASE("message consumption", "[consumer]") {
|
|||||||
TEST_CASE("consumer rebalance", "[consumer]") {
|
TEST_CASE("consumer rebalance", "[consumer]") {
|
||||||
TopicPartitionList assignment1;
|
TopicPartitionList assignment1;
|
||||||
TopicPartitionList assignment2;
|
TopicPartitionList assignment2;
|
||||||
|
const string group_id = make_consumer_group_id();
|
||||||
bool revocation_called = false;
|
bool revocation_called = false;
|
||||||
int partition = 0;
|
int partition = 0;
|
||||||
|
|
||||||
// Create a consumer and subscribe to the topic
|
// Create a consumer and subscribe to the topic
|
||||||
Consumer consumer1(make_consumer_config());
|
Consumer consumer1(make_consumer_config(group_id));
|
||||||
consumer1.set_assignment_callback([&](const TopicPartitionList& topic_partitions) {
|
consumer1.set_assignment_callback([&](const TopicPartitionList& topic_partitions) {
|
||||||
assignment1 = topic_partitions;
|
assignment1 = topic_partitions;
|
||||||
});
|
});
|
||||||
@@ -100,7 +101,7 @@ TEST_CASE("consumer rebalance", "[consumer]") {
|
|||||||
ConsumerRunner runner1(consumer1, 1, KAFKA_NUM_PARTITIONS);
|
ConsumerRunner runner1(consumer1, 1, KAFKA_NUM_PARTITIONS);
|
||||||
|
|
||||||
// Create a second consumer and subscribe to the topic
|
// Create a second consumer and subscribe to the topic
|
||||||
Consumer consumer2(make_consumer_config());
|
Consumer consumer2(make_consumer_config(group_id));
|
||||||
consumer2.set_assignment_callback([&](const TopicPartitionList& topic_partitions) {
|
consumer2.set_assignment_callback([&](const TopicPartitionList& topic_partitions) {
|
||||||
assignment2 = topic_partitions;
|
assignment2 = topic_partitions;
|
||||||
});
|
});
|
||||||
@@ -195,7 +196,7 @@ TEST_CASE("consumer throttle", "[consumer]") {
|
|||||||
if (callback_executed_count == 3) {
|
if (callback_executed_count == 3) {
|
||||||
return Message();
|
return Message();
|
||||||
}
|
}
|
||||||
return move(msg);
|
return msg;
|
||||||
},
|
},
|
||||||
[&](ConsumerDispatcher::Timeout) {
|
[&](ConsumerDispatcher::Timeout) {
|
||||||
if (callback_executed_count == 3) {
|
if (callback_executed_count == 3) {
|
||||||
@@ -240,3 +241,20 @@ TEST_CASE("consume batch", "[consumer]") {
|
|||||||
CHECK(all_messages[0].get_payload() == payload);
|
CHECK(all_messages[0].get_payload() == payload);
|
||||||
CHECK(all_messages[1].get_payload() == payload);
|
CHECK(all_messages[1].get_payload() == payload);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This test may fail due to what seems to be an rdkafka bug. Skip it for now until we're
|
||||||
|
// certain of what to do
|
||||||
|
TEST_CASE("Event consumption", "[!hide][consumer]") {
|
||||||
|
// Create a consumer and subscribe to the topic
|
||||||
|
Consumer consumer(make_consumer_config());
|
||||||
|
consumer.subscribe({ KAFKA_TOPICS[0] });
|
||||||
|
|
||||||
|
vector<rd_kafka_event_type_t> types = {
|
||||||
|
RD_KAFKA_EVENT_NONE
|
||||||
|
};
|
||||||
|
Queue queue = consumer.get_main_queue();
|
||||||
|
for (const auto type : types) {
|
||||||
|
const Event event = queue.next_event();
|
||||||
|
CHECK(event.get_type() == type);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
226
tests/headers_test.cpp
Normal file
226
tests/headers_test.cpp
Normal file
@@ -0,0 +1,226 @@
|
|||||||
|
#include <vector>
|
||||||
|
#include <thread>
|
||||||
|
#include <set>
|
||||||
|
#include <mutex>
|
||||||
|
#include <chrono>
|
||||||
|
#include <iterator>
|
||||||
|
#include <condition_variable>
|
||||||
|
#include <catch.hpp>
|
||||||
|
#include "cppkafka/consumer.h"
|
||||||
|
#include "cppkafka/producer.h"
|
||||||
|
#include "cppkafka/header_list.h"
|
||||||
|
#include "test_utils.h"
|
||||||
|
|
||||||
|
using std::vector;
|
||||||
|
using std::move;
|
||||||
|
using std::string;
|
||||||
|
using std::thread;
|
||||||
|
using std::set;
|
||||||
|
using std::mutex;
|
||||||
|
using std::tie;
|
||||||
|
using std::condition_variable;
|
||||||
|
using std::lock_guard;
|
||||||
|
using std::unique_lock;
|
||||||
|
using std::make_move_iterator;
|
||||||
|
using std::chrono::seconds;
|
||||||
|
using std::chrono::milliseconds;
|
||||||
|
using std::chrono::system_clock;
|
||||||
|
|
||||||
|
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
|
||||||
|
|
||||||
|
using namespace cppkafka;
|
||||||
|
using StringHeader = Header<std::string>;
|
||||||
|
using BufferHeader = Header<Buffer>;
|
||||||
|
|
||||||
|
TEST_CASE("creation", "[headers]") {
|
||||||
|
SECTION("empty") {
|
||||||
|
HeaderList<StringHeader> list;
|
||||||
|
REQUIRE(!!list == false);
|
||||||
|
}
|
||||||
|
|
||||||
|
SECTION("default") {
|
||||||
|
HeaderList<StringHeader> list(2);
|
||||||
|
REQUIRE(!!list == true);
|
||||||
|
REQUIRE(list.size() == 0);
|
||||||
|
REQUIRE(list.empty() == true);
|
||||||
|
REQUIRE(list.get_handle() != nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SECTION("from handle") {
|
||||||
|
HeaderList<StringHeader> list(rd_kafka_headers_new(1));
|
||||||
|
REQUIRE(!!list == true);
|
||||||
|
REQUIRE(list.size() == 0);
|
||||||
|
REQUIRE(list.empty() == true);
|
||||||
|
REQUIRE(list.get_handle() != nullptr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("release", "[headers]") {
|
||||||
|
HeaderList<StringHeader> list(2);
|
||||||
|
auto handle = list.release_handle();
|
||||||
|
REQUIRE(handle != nullptr);
|
||||||
|
REQUIRE(list.release_handle() == nullptr); //release again
|
||||||
|
REQUIRE(!!list == false);
|
||||||
|
rd_kafka_headers_destroy(handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("modify", "[headers]") {
|
||||||
|
SECTION("add") {
|
||||||
|
HeaderList<StringHeader> list(10);
|
||||||
|
//empty header name
|
||||||
|
list.add({{}, "payload1"});
|
||||||
|
//empty payload
|
||||||
|
list.add({"header2", {}});
|
||||||
|
list.add({"header3", "payload3"});
|
||||||
|
//both null
|
||||||
|
list.add({{}, {}});
|
||||||
|
//both empty (0-length strings)
|
||||||
|
list.add({"", ""});
|
||||||
|
|
||||||
|
//validate
|
||||||
|
REQUIRE(list.size() == 5);
|
||||||
|
REQUIRE_FALSE(list.empty());
|
||||||
|
|
||||||
|
//access a header
|
||||||
|
REQUIRE(list.at(1).get_name() == "header2");
|
||||||
|
REQUIRE(list.at(1).get_value().empty());
|
||||||
|
REQUIRE(list.at(2).get_value() == "payload3");
|
||||||
|
}
|
||||||
|
|
||||||
|
SECTION("remove") {
|
||||||
|
HeaderList<StringHeader> list(10);
|
||||||
|
//empty header name
|
||||||
|
list.add({{}, "payload1"});
|
||||||
|
//empty payload
|
||||||
|
list.add({"header2", {}});
|
||||||
|
list.add({"header3", "payload3"});
|
||||||
|
//both null
|
||||||
|
list.add({{}, {}});
|
||||||
|
//both empty (0 length strings)
|
||||||
|
list.add({"", ""});
|
||||||
|
|
||||||
|
//Remove a bogus name
|
||||||
|
Error err = list.remove("bogus");
|
||||||
|
REQUIRE(err.get_error() == RD_KAFKA_RESP_ERR__NOENT);
|
||||||
|
//Remove header with name
|
||||||
|
list.remove("header2");
|
||||||
|
REQUIRE(list.size() == 4);
|
||||||
|
list.remove("header3");
|
||||||
|
REQUIRE(list.size() == 3);
|
||||||
|
//Remove headers without name
|
||||||
|
list.remove({});
|
||||||
|
REQUIRE(list.size() == 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("copy and move", "[headers]") {
|
||||||
|
SECTION("copy owning") {
|
||||||
|
//Create an owning header list and copy it
|
||||||
|
HeaderList<StringHeader> list(3), list2(3);
|
||||||
|
list.add({"header1", "payload1"});
|
||||||
|
list.add({"header2", "payload2"});
|
||||||
|
list.add({"header3", "payload3"});
|
||||||
|
REQUIRE(list2.size() == 0);
|
||||||
|
list2 = list;
|
||||||
|
REQUIRE(list2.size() == 3);
|
||||||
|
REQUIRE(list2.size() == list.size());
|
||||||
|
//make sure the handles are different
|
||||||
|
CHECK(list.get_handle() != list2.get_handle());
|
||||||
|
CHECK(list.at(0) == list2.at(0));
|
||||||
|
CHECK(list.at(1) == list2.at(1));
|
||||||
|
CHECK(list.at(2) == list2.at(2));
|
||||||
|
CHECK(list == list2);
|
||||||
|
}
|
||||||
|
|
||||||
|
SECTION("copy owning with buffers") {
|
||||||
|
//Create an owning header list and copy it
|
||||||
|
HeaderList<BufferHeader> list(3), list2(3);
|
||||||
|
string payload1 = "payload1", payload2 = "payload2", payload3 = "payload3";
|
||||||
|
list.add({"header1", payload1});
|
||||||
|
list.add({"header2", payload2});
|
||||||
|
list.add({"header3", payload3});
|
||||||
|
REQUIRE(list2.size() == 0);
|
||||||
|
list2 = list;
|
||||||
|
REQUIRE(list2.size() == 3);
|
||||||
|
REQUIRE(list2.size() == list.size());
|
||||||
|
//make sure the handles are different
|
||||||
|
CHECK(list.get_handle() != list2.get_handle());
|
||||||
|
CHECK(list.at(0) == list2.at(0));
|
||||||
|
CHECK(list.at(1) == list2.at(1));
|
||||||
|
CHECK(list.at(2) == list2.at(2));
|
||||||
|
CHECK(list == list2);
|
||||||
|
}
|
||||||
|
|
||||||
|
SECTION("copy non-owning") {
|
||||||
|
//Create an owning header list and copy it
|
||||||
|
HeaderList<StringHeader> list(3), list2(3), list3(HeaderList<StringHeader>::make_non_owning(list.get_handle()));
|
||||||
|
list.add({"header1", "payload1"});
|
||||||
|
list.add({"header2", "payload2"});
|
||||||
|
list.add({"header3", "payload3"});
|
||||||
|
list2 = list3; //copy non-owning list
|
||||||
|
REQUIRE(list.size() == 3);
|
||||||
|
REQUIRE(list3.size() == list.size());
|
||||||
|
REQUIRE(list2.size() == list.size());
|
||||||
|
//make sure the handles are the same
|
||||||
|
CHECK(list2.get_handle() == list3.get_handle());
|
||||||
|
CHECK(list2.at(0) == list3.at(0));
|
||||||
|
CHECK(list2.at(1) == list3.at(1));
|
||||||
|
CHECK(list2.at(2) == list3.at(2));
|
||||||
|
CHECK(list2 == list3);
|
||||||
|
}
|
||||||
|
|
||||||
|
SECTION("move") {
|
||||||
|
HeaderList<StringHeader> list(3), list2;
|
||||||
|
list.add({"header1", "payload1"});
|
||||||
|
list.add({"header2", "payload2"});
|
||||||
|
list.add({"header3", "payload3"});
|
||||||
|
auto handle = list.get_handle();
|
||||||
|
list2 = std::move(list);
|
||||||
|
CHECK_FALSE(!!list);
|
||||||
|
CHECK(!!list2);
|
||||||
|
CHECK(list2.size() == 3);
|
||||||
|
CHECK(handle == list2.get_handle());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("access", "[headers]") {
|
||||||
|
HeaderList<StringHeader> list(3);
|
||||||
|
list.add({"header1", "payload1"});
|
||||||
|
list.add({"header2", "payload2"});
|
||||||
|
list.add({"header3", "payload3"});
|
||||||
|
CHECK(list.at(0).get_value() == "payload1");
|
||||||
|
CHECK(list.at(1).get_value() == "payload2");
|
||||||
|
CHECK(list.at(2).get_value() == "payload3");
|
||||||
|
CHECK_THROWS_AS(list.at(3), Exception);
|
||||||
|
CHECK(list.front() == list.at(0));
|
||||||
|
CHECK(list.back() == list.at(2));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("iterate", "[headers]") {
|
||||||
|
HeaderList<StringHeader> list(3);
|
||||||
|
REQUIRE(list.begin() == list.end());
|
||||||
|
list.add({"header1", "payload1"});
|
||||||
|
REQUIRE(list.begin() != list.end());
|
||||||
|
CHECK(++list.begin() == list.end());
|
||||||
|
list.add({"header2", "payload2"});
|
||||||
|
list.add({"header3", "payload3"});
|
||||||
|
int i = 0;
|
||||||
|
for (auto it = list.begin(); it != list.end(); ++it, ++i) {
|
||||||
|
CHECK(it->get_name().length() == 7);
|
||||||
|
if (i == 0) {
|
||||||
|
CHECK(it->get_name() == "header1");
|
||||||
|
}
|
||||||
|
else if (i == 1) {
|
||||||
|
CHECK(it->get_name() == "header2");
|
||||||
|
}
|
||||||
|
else if (i == 2) {
|
||||||
|
CHECK(it->get_name() == "header3");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
//rewind end() iterator
|
||||||
|
CHECK((--list.end())->get_name() == "header3");
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif //RD_KAFKA_HEADERS_SUPPORT_VERSION
|
||||||
|
|
||||||
|
|
||||||
@@ -24,6 +24,8 @@ using std::condition_variable;
|
|||||||
using std::chrono::system_clock;
|
using std::chrono::system_clock;
|
||||||
using std::chrono::seconds;
|
using std::chrono::seconds;
|
||||||
using std::chrono::milliseconds;
|
using std::chrono::milliseconds;
|
||||||
|
using std::chrono::time_point;
|
||||||
|
using std::chrono::duration_cast;
|
||||||
using std::ref;
|
using std::ref;
|
||||||
|
|
||||||
using namespace cppkafka;
|
using namespace cppkafka;
|
||||||
@@ -42,7 +44,7 @@ static Configuration make_consumer_config() {
|
|||||||
Configuration config = {
|
Configuration config = {
|
||||||
{ "metadata.broker.list", KAFKA_TEST_INSTANCE },
|
{ "metadata.broker.list", KAFKA_TEST_INSTANCE },
|
||||||
{ "enable.auto.commit", false },
|
{ "enable.auto.commit", false },
|
||||||
{ "group.id", "producer_test" },
|
{ "group.id", make_consumer_group_id() },
|
||||||
{ "api.version.request", true }
|
{ "api.version.request", true }
|
||||||
};
|
};
|
||||||
return config;
|
return config;
|
||||||
@@ -164,7 +166,7 @@ TEST_CASE("simple production", "[producer]") {
|
|||||||
SECTION("message with key") {
|
SECTION("message with key") {
|
||||||
const string payload = "Hello world! 2";
|
const string payload = "Hello world! 2";
|
||||||
const string key = "such key";
|
const string key = "such key";
|
||||||
const milliseconds timestamp{15};
|
auto timestamp = system_clock::now();
|
||||||
Producer producer(config);
|
Producer producer(config);
|
||||||
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition)
|
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition)
|
||||||
.key(key)
|
.key(key)
|
||||||
@@ -181,9 +183,46 @@ TEST_CASE("simple production", "[producer]") {
|
|||||||
CHECK(message.get_partition() == partition);
|
CHECK(message.get_partition() == partition);
|
||||||
CHECK(!!message.get_error() == false);
|
CHECK(!!message.get_error() == false);
|
||||||
REQUIRE(!!message.get_timestamp() == true);
|
REQUIRE(!!message.get_timestamp() == true);
|
||||||
CHECK(message.get_timestamp()->get_timestamp() == timestamp);
|
CHECK(message.get_timestamp()->get_timestamp() == duration_cast<milliseconds>(timestamp.time_since_epoch()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
|
||||||
|
SECTION("message with key and move-able headers") {
|
||||||
|
using Hdr = MessageBuilder::HeaderType;
|
||||||
|
const string payload = "Hello world! 2";
|
||||||
|
const string key = "such key";
|
||||||
|
const string header1, header2 = "", header3 = "header3";
|
||||||
|
|
||||||
|
const milliseconds timestamp{15};
|
||||||
|
Producer producer(config);
|
||||||
|
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition)
|
||||||
|
.key(key)
|
||||||
|
.payload(payload)
|
||||||
|
.timestamp(timestamp)
|
||||||
|
.header(Hdr{})
|
||||||
|
.header(Hdr{"", header2})
|
||||||
|
.header(Hdr{"header3", header3}));
|
||||||
|
runner.try_join();
|
||||||
|
|
||||||
|
const auto& messages = runner.get_messages();
|
||||||
|
REQUIRE(messages.size() == 1);
|
||||||
|
const auto& message = messages[0];
|
||||||
|
CHECK(message.get_payload() == payload);
|
||||||
|
CHECK(message.get_key() == key);
|
||||||
|
CHECK(message.get_topic() == KAFKA_TOPICS[0]);
|
||||||
|
CHECK(message.get_partition() == partition);
|
||||||
|
CHECK(!!message.get_error() == false);
|
||||||
|
REQUIRE(!!message.get_timestamp() == true);
|
||||||
|
CHECK(message.get_timestamp()->get_timestamp() == timestamp);
|
||||||
|
//validate headers
|
||||||
|
REQUIRE(!!message.get_header_list());
|
||||||
|
REQUIRE(message.get_header_list().size() == 3);
|
||||||
|
CHECK(message.get_header_list().front() == Hdr{});
|
||||||
|
CHECK(message.get_header_list().at(1) == Hdr{"", header2});
|
||||||
|
CHECK(message.get_header_list().back() == Hdr{"header3", header3});
|
||||||
|
}
|
||||||
|
#endif //RD_KAFKA_HEADERS_SUPPORT_VERSION
|
||||||
|
|
||||||
SECTION("message without message builder") {
|
SECTION("message without message builder") {
|
||||||
const string payload = "Goodbye cruel world!";
|
const string payload = "Goodbye cruel world!";
|
||||||
const string key = "replay key";
|
const string key = "replay key";
|
||||||
@@ -315,6 +354,52 @@ TEST_CASE("multiple messages", "[producer]") {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION)
|
||||||
|
TEST_CASE("multiple messages with copy-able headers", "[producer][headers]") {
|
||||||
|
using Hdr = MessageBuilder::HeaderType;
|
||||||
|
size_t message_count = 2;
|
||||||
|
string payload = "Hello world with headers";
|
||||||
|
const string header1, header2 = "", header3 = "header3";
|
||||||
|
|
||||||
|
// Create a consumer and subscribe to this topic
|
||||||
|
Consumer consumer(make_consumer_config());
|
||||||
|
consumer.subscribe({ KAFKA_TOPICS[0] });
|
||||||
|
ConsumerRunner runner(consumer, message_count, KAFKA_NUM_PARTITIONS);
|
||||||
|
|
||||||
|
// Now create a producer and produce a message
|
||||||
|
Producer producer(make_producer_config());
|
||||||
|
MessageBuilder builder(KAFKA_TOPICS[0]);
|
||||||
|
builder.payload(payload)
|
||||||
|
.header(Hdr{})
|
||||||
|
.header(Hdr{"", header2})
|
||||||
|
.header(Hdr{"header3", header3});
|
||||||
|
producer.produce(builder);
|
||||||
|
producer.produce(builder);
|
||||||
|
|
||||||
|
//Check we still have the messages after production
|
||||||
|
CHECK(!!builder.header_list());
|
||||||
|
CHECK(builder.header_list().size() == 3);
|
||||||
|
|
||||||
|
runner.try_join();
|
||||||
|
|
||||||
|
const auto& messages = runner.get_messages();
|
||||||
|
REQUIRE(messages.size() == message_count);
|
||||||
|
const auto& message = messages[0];
|
||||||
|
CHECK(message.get_payload() == payload);
|
||||||
|
CHECK(!!message.get_error() == false);
|
||||||
|
//validate headers
|
||||||
|
REQUIRE(!!message.get_header_list());
|
||||||
|
REQUIRE(message.get_header_list().size() == 3);
|
||||||
|
CHECK(message.get_header_list().front() == Hdr{});
|
||||||
|
CHECK(message.get_header_list().at(1) == Hdr{"", header2});
|
||||||
|
CHECK(message.get_header_list().back() == Hdr{"header3", header3});
|
||||||
|
|
||||||
|
//validate second message
|
||||||
|
CHECK(messages[0].get_header_list() == messages[1].get_header_list());
|
||||||
|
CHECK(messages[0].get_header_list().get_handle() != messages[1].get_header_list().get_handle());
|
||||||
|
}
|
||||||
|
#endif //RD_KAFKA_HEADERS_SUPPORT_VERSION
|
||||||
|
|
||||||
TEST_CASE("multiple sync messages", "[producer][buffered_producer][sync]") {
|
TEST_CASE("multiple sync messages", "[producer][buffered_producer][sync]") {
|
||||||
size_t message_count = 10;
|
size_t message_count = 10;
|
||||||
set<string> payloads;
|
set<string> payloads;
|
||||||
|
|||||||
@@ -7,13 +7,14 @@
|
|||||||
#include <condition_variable>
|
#include <condition_variable>
|
||||||
#include <catch.hpp>
|
#include <catch.hpp>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <iostream>
|
#include <stdexcept>
|
||||||
#include "cppkafka/cppkafka.h"
|
#include "cppkafka/cppkafka.h"
|
||||||
#include "test_utils.h"
|
#include "test_utils.h"
|
||||||
|
|
||||||
using std::vector;
|
using std::vector;
|
||||||
using std::move;
|
using std::move;
|
||||||
using std::string;
|
using std::string;
|
||||||
|
using std::exception;
|
||||||
using std::thread;
|
using std::thread;
|
||||||
using std::set;
|
using std::set;
|
||||||
using std::mutex;
|
using std::mutex;
|
||||||
@@ -29,25 +30,29 @@ using std::chrono::system_clock;
|
|||||||
|
|
||||||
using namespace cppkafka;
|
using namespace cppkafka;
|
||||||
|
|
||||||
|
#define ENABLE_STRICT_RR_ORDER 0
|
||||||
|
|
||||||
//==================================================================================
|
//==================================================================================
|
||||||
// Helper functions
|
// Helper functions
|
||||||
//==================================================================================
|
//==================================================================================
|
||||||
static Configuration make_producer_config() {
|
static Configuration make_producer_config() {
|
||||||
Configuration config;
|
Configuration config = {
|
||||||
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
|
{ "metadata.broker.list", KAFKA_TEST_INSTANCE },
|
||||||
|
{ "max.in.flight", 1 }
|
||||||
|
};
|
||||||
return config;
|
return config;
|
||||||
}
|
}
|
||||||
|
|
||||||
static Configuration make_consumer_config(const string& group_id = "rr_consumer_test") {
|
static Configuration make_consumer_config(const string& group_id = make_consumer_group_id()) {
|
||||||
Configuration config;
|
Configuration config = {
|
||||||
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
|
{ "metadata.broker.list", KAFKA_TEST_INSTANCE },
|
||||||
config.set("enable.auto.commit", true);
|
{ "enable.auto.commit", false },
|
||||||
config.set("enable.auto.offset.store", true );
|
{ "group.id", group_id },
|
||||||
config.set("auto.commit.interval.ms", 100);
|
};
|
||||||
config.set("group.id", group_id);
|
|
||||||
return config;
|
return config;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if ENABLE_STRICT_RR_ORDER
|
||||||
static vector<int> make_roundrobin_partition_vector(int total_messages) {
|
static vector<int> make_roundrobin_partition_vector(int total_messages) {
|
||||||
vector<int> partition_order;
|
vector<int> partition_order;
|
||||||
for (int i = 0, partition = 0; i < total_messages+1; ++i) {
|
for (int i = 0, partition = 0; i < total_messages+1; ++i) {
|
||||||
@@ -58,49 +63,12 @@ static vector<int> make_roundrobin_partition_vector(int total_messages) {
|
|||||||
}
|
}
|
||||||
return partition_order;
|
return partition_order;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
//========================================================================
|
//========================================================================
|
||||||
// TESTS
|
// TESTS
|
||||||
//========================================================================
|
//========================================================================
|
||||||
|
|
||||||
TEST_CASE("serial consumer test", "[roundrobin consumer]") {
|
|
||||||
int messages_per_partition = 3;
|
|
||||||
int total_messages = KAFKA_NUM_PARTITIONS * messages_per_partition;
|
|
||||||
|
|
||||||
// Create a consumer and subscribe to the topic
|
|
||||||
Consumer consumer(make_consumer_config());
|
|
||||||
TopicPartitionList partitions;
|
|
||||||
for (int i = 0; i < KAFKA_NUM_PARTITIONS; partitions.emplace_back(KAFKA_TOPICS[0], i++));
|
|
||||||
consumer.assign(partitions);
|
|
||||||
|
|
||||||
// Start the runner with the original consumer
|
|
||||||
ConsumerRunner runner(consumer, total_messages, KAFKA_NUM_PARTITIONS);
|
|
||||||
|
|
||||||
// Produce messages so we stop the consumer
|
|
||||||
Producer producer(make_producer_config());
|
|
||||||
string payload = "Serial";
|
|
||||||
|
|
||||||
// push 3 messages in each partition
|
|
||||||
for (int i = 0; i < total_messages; ++i) {
|
|
||||||
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(i%KAFKA_NUM_PARTITIONS).payload(payload));
|
|
||||||
}
|
|
||||||
producer.flush();
|
|
||||||
runner.try_join();
|
|
||||||
|
|
||||||
// Check that we have all messages
|
|
||||||
REQUIRE(runner.get_messages().size() == total_messages);
|
|
||||||
|
|
||||||
// messages should have sequential identical partition ids in groups of <messages_per_partition>
|
|
||||||
int expected_partition;
|
|
||||||
for (int i = 0; i < total_messages; ++i) {
|
|
||||||
if ((i % messages_per_partition) == 0) {
|
|
||||||
expected_partition = runner.get_messages()[i].get_partition();
|
|
||||||
}
|
|
||||||
REQUIRE(runner.get_messages()[i].get_partition() == expected_partition);
|
|
||||||
REQUIRE((string)runner.get_messages()[i].get_payload() == payload);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_CASE("roundrobin consumer test", "[roundrobin consumer]") {
|
TEST_CASE("roundrobin consumer test", "[roundrobin consumer]") {
|
||||||
TopicPartitionList assignment;
|
TopicPartitionList assignment;
|
||||||
int messages_per_partition = 3;
|
int messages_per_partition = 3;
|
||||||
@@ -114,19 +82,23 @@ TEST_CASE("roundrobin consumer test", "[roundrobin consumer]") {
|
|||||||
PollConsumerRunner runner(consumer, total_messages, KAFKA_NUM_PARTITIONS);
|
PollConsumerRunner runner(consumer, total_messages, KAFKA_NUM_PARTITIONS);
|
||||||
|
|
||||||
// Produce messages so we stop the consumer
|
// Produce messages so we stop the consumer
|
||||||
Producer producer(make_producer_config());
|
BufferedProducer<string> producer(make_producer_config());
|
||||||
string payload = "RoundRobin";
|
string payload = "RoundRobin";
|
||||||
|
|
||||||
// push 3 messages in each partition
|
// push 3 messages in each partition
|
||||||
for (int i = 0; i < total_messages; ++i) {
|
for (int i = 0; i < total_messages; ++i) {
|
||||||
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(i%KAFKA_NUM_PARTITIONS).payload(payload));
|
producer.sync_produce(MessageBuilder(KAFKA_TOPICS[0])
|
||||||
|
.partition(i % KAFKA_NUM_PARTITIONS)
|
||||||
|
.payload(payload));
|
||||||
}
|
}
|
||||||
producer.flush();
|
producer.flush();
|
||||||
|
|
||||||
runner.try_join();
|
runner.try_join();
|
||||||
|
|
||||||
// Check that we have all messages
|
// Check that we have all messages
|
||||||
REQUIRE(runner.get_messages().size() == total_messages);
|
REQUIRE(runner.get_messages().size() == total_messages);
|
||||||
|
|
||||||
|
#if ENABLE_STRICT_RR_ORDER
|
||||||
// Check that we have one message from each partition in desired order
|
// Check that we have one message from each partition in desired order
|
||||||
vector<int> partition_order = make_roundrobin_partition_vector(total_messages+KAFKA_NUM_PARTITIONS);
|
vector<int> partition_order = make_roundrobin_partition_vector(total_messages+KAFKA_NUM_PARTITIONS);
|
||||||
int partition_idx;
|
int partition_idx;
|
||||||
@@ -135,12 +107,11 @@ TEST_CASE("roundrobin consumer test", "[roundrobin consumer]") {
|
|||||||
// find first polled partition index
|
// find first polled partition index
|
||||||
partition_idx = runner.get_messages()[i].get_partition();
|
partition_idx = runner.get_messages()[i].get_partition();
|
||||||
}
|
}
|
||||||
REQUIRE(runner.get_messages()[i].get_partition() == partition_order[i+partition_idx]);
|
CHECK(runner.get_messages()[i].get_partition() == partition_order[i+partition_idx]);
|
||||||
REQUIRE((string)runner.get_messages()[i].get_payload() == payload);
|
REQUIRE((string)runner.get_messages()[i].get_payload() == payload);
|
||||||
}
|
}
|
||||||
|
|
||||||
//============ resume original poll strategy =============//
|
//============ resume original poll strategy =============//
|
||||||
|
|
||||||
//validate that once the round robin strategy is deleted, normal poll works as before
|
//validate that once the round robin strategy is deleted, normal poll works as before
|
||||||
consumer.delete_polling_strategy();
|
consumer.delete_polling_strategy();
|
||||||
|
|
||||||
@@ -149,7 +120,7 @@ TEST_CASE("roundrobin consumer test", "[roundrobin consumer]") {
|
|||||||
payload = "SerialPolling";
|
payload = "SerialPolling";
|
||||||
// push 3 messages in each partition
|
// push 3 messages in each partition
|
||||||
for (int i = 0; i < total_messages; ++i) {
|
for (int i = 0; i < total_messages; ++i) {
|
||||||
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(i%KAFKA_NUM_PARTITIONS).payload(payload));
|
producer.sync_produce(MessageBuilder(KAFKA_TOPICS[0]).partition(i%KAFKA_NUM_PARTITIONS).payload(payload));
|
||||||
}
|
}
|
||||||
producer.flush();
|
producer.flush();
|
||||||
serial_runner.try_join();
|
serial_runner.try_join();
|
||||||
@@ -160,5 +131,11 @@ TEST_CASE("roundrobin consumer test", "[roundrobin consumer]") {
|
|||||||
for (int i = 0; i < total_messages; ++i) {
|
for (int i = 0; i < total_messages; ++i) {
|
||||||
REQUIRE((string)serial_runner.get_messages()[i].get_payload() == payload);
|
REQUIRE((string)serial_runner.get_messages()[i].get_payload() == payload);
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
// Simple payload check
|
||||||
|
for (int i = 0; i < total_messages; ++i) {
|
||||||
|
REQUIRE((string)runner.get_messages()[i].get_payload() == payload);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -15,8 +15,7 @@ using Catch::TestCaseStats;
|
|||||||
using Catch::Totals;
|
using Catch::Totals;
|
||||||
using Catch::Session;
|
using Catch::Session;
|
||||||
|
|
||||||
std::vector<std::string> KAFKA_TOPICS = {"cppkafka_test1", "cppkafka_test2"};
|
std::vector<std::string> KAFKA_TOPICS = {KAFKA_TOPIC_NAMES};
|
||||||
int KAFKA_NUM_PARTITIONS = 3;
|
|
||||||
|
|
||||||
namespace cppkafka {
|
namespace cppkafka {
|
||||||
|
|
||||||
|
|||||||
94
tests/test_utils.cpp
Normal file
94
tests/test_utils.cpp
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
#include <cstdint>
|
||||||
|
#include <iomanip>
|
||||||
|
#include <limits>
|
||||||
|
#include <sstream>
|
||||||
|
#include <random>
|
||||||
|
#include "test_utils.h"
|
||||||
|
|
||||||
|
using std::chrono::duration_cast;
|
||||||
|
using std::chrono::milliseconds;
|
||||||
|
using std::chrono::seconds;
|
||||||
|
using std::chrono::system_clock;
|
||||||
|
using std::hex;
|
||||||
|
using std::move;
|
||||||
|
using std::numeric_limits;
|
||||||
|
using std::ostringstream;
|
||||||
|
using std::random_device;
|
||||||
|
using std::string;
|
||||||
|
using std::uniform_int_distribution;
|
||||||
|
using std::unique_ptr;
|
||||||
|
using std::vector;
|
||||||
|
|
||||||
|
//==================================================================================
|
||||||
|
// PollStrategyAdapter
|
||||||
|
//==================================================================================
|
||||||
|
|
||||||
|
PollStrategyAdapter::PollStrategyAdapter(Configuration config)
|
||||||
|
: Consumer(config) {
|
||||||
|
}
|
||||||
|
|
||||||
|
void PollStrategyAdapter::add_polling_strategy(unique_ptr<PollInterface> poll_strategy) {
|
||||||
|
strategy_ = move(poll_strategy);
|
||||||
|
}
|
||||||
|
|
||||||
|
void PollStrategyAdapter::delete_polling_strategy() {
|
||||||
|
strategy_.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
Message PollStrategyAdapter::poll() {
|
||||||
|
if (strategy_) {
|
||||||
|
return strategy_->poll();
|
||||||
|
}
|
||||||
|
return Consumer::poll();
|
||||||
|
}
|
||||||
|
|
||||||
|
Message PollStrategyAdapter::poll(milliseconds timeout) {
|
||||||
|
if (strategy_) {
|
||||||
|
return strategy_->poll(timeout);
|
||||||
|
}
|
||||||
|
return Consumer::poll(timeout);
|
||||||
|
}
|
||||||
|
|
||||||
|
vector<Message> PollStrategyAdapter::poll_batch(size_t max_batch_size) {
|
||||||
|
if (strategy_) {
|
||||||
|
return strategy_->poll_batch(max_batch_size);
|
||||||
|
}
|
||||||
|
return Consumer::poll_batch(max_batch_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
vector<Message> PollStrategyAdapter::poll_batch(size_t max_batch_size, milliseconds timeout) {
|
||||||
|
if (strategy_) {
|
||||||
|
return strategy_->poll_batch(max_batch_size, timeout);
|
||||||
|
}
|
||||||
|
return Consumer::poll_batch(max_batch_size, timeout);
|
||||||
|
}
|
||||||
|
|
||||||
|
void PollStrategyAdapter::set_timeout(milliseconds timeout) {
|
||||||
|
if (strategy_) {
|
||||||
|
strategy_->set_timeout(timeout);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
Consumer::set_timeout(timeout);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
milliseconds PollStrategyAdapter::get_timeout() {
|
||||||
|
if (strategy_) {
|
||||||
|
return strategy_->get_timeout();
|
||||||
|
}
|
||||||
|
return Consumer::get_timeout();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Misc
|
||||||
|
|
||||||
|
string make_consumer_group_id() {
|
||||||
|
ostringstream output;
|
||||||
|
output << hex;
|
||||||
|
|
||||||
|
random_device rd;
|
||||||
|
uniform_int_distribution<uint64_t> distribution(0, numeric_limits<uint64_t>::max());
|
||||||
|
const auto now = duration_cast<seconds>(system_clock::now().time_since_epoch());
|
||||||
|
const auto random_number = distribution(rd);
|
||||||
|
output << now.count() << random_number;
|
||||||
|
return output.str();
|
||||||
|
}
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
#ifndef CPPKAFKA_TEST_UTILS_H
|
#ifndef CPPKAFKA_TEST_UTILS_H
|
||||||
#define CPPKAFKA_TEST_UTILS_H
|
#define CPPKAFKA_TEST_UTILS_H
|
||||||
|
|
||||||
|
#include <string>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include "cppkafka/consumer.h"
|
#include "cppkafka/consumer.h"
|
||||||
@@ -8,7 +9,6 @@
|
|||||||
#include "cppkafka/utils/consumer_dispatcher.h"
|
#include "cppkafka/utils/consumer_dispatcher.h"
|
||||||
|
|
||||||
extern const std::vector<std::string> KAFKA_TOPICS;
|
extern const std::vector<std::string> KAFKA_TOPICS;
|
||||||
extern const int KAFKA_NUM_PARTITIONS;
|
|
||||||
|
|
||||||
using namespace cppkafka;
|
using namespace cppkafka;
|
||||||
|
|
||||||
@@ -48,18 +48,23 @@ public:
|
|||||||
void delete_polling_strategy();
|
void delete_polling_strategy();
|
||||||
Message poll();
|
Message poll();
|
||||||
Message poll(std::chrono::milliseconds timeout);
|
Message poll(std::chrono::milliseconds timeout);
|
||||||
MessageList poll_batch(size_t max_batch_size);
|
std::vector<Message> poll_batch(size_t max_batch_size);
|
||||||
MessageList poll_batch(size_t max_batch_size,
|
std::vector<Message> poll_batch(size_t max_batch_size,
|
||||||
std::chrono::milliseconds timeout);
|
std::chrono::milliseconds timeout);
|
||||||
void set_timeout(std::chrono::milliseconds timeout);
|
void set_timeout(std::chrono::milliseconds timeout);
|
||||||
std::chrono::milliseconds get_timeout();
|
std::chrono::milliseconds get_timeout();
|
||||||
private:
|
private:
|
||||||
std::unique_ptr<PollInterface> strategy_;
|
std::unique_ptr<PollInterface> strategy_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Misc
|
||||||
|
|
||||||
|
std::string make_consumer_group_id();
|
||||||
|
|
||||||
using PollConsumerRunner = BasicConsumerRunner<PollStrategyAdapter>;
|
using PollConsumerRunner = BasicConsumerRunner<PollStrategyAdapter>;
|
||||||
using ConsumerRunner = BasicConsumerRunner<Consumer>;
|
using ConsumerRunner = BasicConsumerRunner<Consumer>;
|
||||||
|
|
||||||
|
|
||||||
#include "test_utils_impl.h"
|
#include "test_utils_impl.h"
|
||||||
|
|
||||||
#endif // CPPKAFKA_TEST_UTILS_H
|
#endif // CPPKAFKA_TEST_UTILS_H
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <condition_variable>
|
#include <condition_variable>
|
||||||
#include "test_utils.h"
|
|
||||||
#include "cppkafka/utils/consumer_dispatcher.h"
|
#include "cppkafka/utils/consumer_dispatcher.h"
|
||||||
|
|
||||||
using std::vector;
|
using std::vector;
|
||||||
@@ -19,7 +18,6 @@ using cppkafka::Consumer;
|
|||||||
using cppkafka::BasicConsumerDispatcher;
|
using cppkafka::BasicConsumerDispatcher;
|
||||||
|
|
||||||
using cppkafka::Message;
|
using cppkafka::Message;
|
||||||
using cppkafka::MessageList;
|
|
||||||
using cppkafka::TopicPartition;
|
using cppkafka::TopicPartition;
|
||||||
|
|
||||||
//==================================================================================
|
//==================================================================================
|
||||||
@@ -46,7 +44,8 @@ BasicConsumerRunner<ConsumerType>::BasicConsumerRunner(ConsumerType& consumer,
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
// EOF callback
|
// EOF callback
|
||||||
[&](typename BasicConsumerDispatcher<ConsumerType>::EndOfFile, const TopicPartition& topic_partition) {
|
[&](typename BasicConsumerDispatcher<ConsumerType>::EndOfFile,
|
||||||
|
const TopicPartition& topic_partition) {
|
||||||
if (number_eofs != partitions) {
|
if (number_eofs != partitions) {
|
||||||
number_eofs++;
|
number_eofs++;
|
||||||
if (number_eofs == partitions) {
|
if (number_eofs == partitions) {
|
||||||
@@ -89,7 +88,7 @@ BasicConsumerRunner<ConsumerType>::~BasicConsumerRunner() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <typename ConsumerType>
|
template <typename ConsumerType>
|
||||||
const MessageList& BasicConsumerRunner<ConsumerType>::get_messages() const {
|
const std::vector<Message>& BasicConsumerRunner<ConsumerType>::get_messages() const {
|
||||||
return messages_;
|
return messages_;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -100,73 +99,4 @@ void BasicConsumerRunner<ConsumerType>::try_join() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//==================================================================================
|
|
||||||
// PollStrategyAdapter
|
|
||||||
//==================================================================================
|
|
||||||
inline
|
|
||||||
PollStrategyAdapter::PollStrategyAdapter(Configuration config)
|
|
||||||
: Consumer(config) {
|
|
||||||
}
|
|
||||||
|
|
||||||
inline
|
|
||||||
void PollStrategyAdapter::add_polling_strategy(std::unique_ptr<PollInterface> poll_strategy) {
|
|
||||||
strategy_ = std::move(poll_strategy);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline
|
|
||||||
void PollStrategyAdapter::delete_polling_strategy() {
|
|
||||||
strategy_.reset();
|
|
||||||
}
|
|
||||||
|
|
||||||
inline
|
|
||||||
Message PollStrategyAdapter::poll() {
|
|
||||||
if (strategy_) {
|
|
||||||
return strategy_->poll();
|
|
||||||
}
|
|
||||||
return Consumer::poll();
|
|
||||||
}
|
|
||||||
|
|
||||||
inline
|
|
||||||
Message PollStrategyAdapter::poll(milliseconds timeout) {
|
|
||||||
if (strategy_) {
|
|
||||||
return strategy_->poll(timeout);
|
|
||||||
}
|
|
||||||
return Consumer::poll(timeout);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline
|
|
||||||
MessageList PollStrategyAdapter::poll_batch(size_t max_batch_size) {
|
|
||||||
if (strategy_) {
|
|
||||||
return strategy_->poll_batch(max_batch_size);
|
|
||||||
}
|
|
||||||
return Consumer::poll_batch(max_batch_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline
|
|
||||||
MessageList PollStrategyAdapter::poll_batch(size_t max_batch_size,
|
|
||||||
milliseconds timeout) {
|
|
||||||
if (strategy_) {
|
|
||||||
return strategy_->poll_batch(max_batch_size, timeout);
|
|
||||||
}
|
|
||||||
return Consumer::poll_batch(max_batch_size, timeout);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline
|
|
||||||
void PollStrategyAdapter::set_timeout(milliseconds timeout) {
|
|
||||||
if (strategy_) {
|
|
||||||
strategy_->set_timeout(timeout);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
Consumer::set_timeout(timeout);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
inline
|
|
||||||
milliseconds PollStrategyAdapter::get_timeout() {
|
|
||||||
if (strategy_) {
|
|
||||||
return strategy_->get_timeout();
|
|
||||||
}
|
|
||||||
return Consumer::get_timeout();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user