mirror of
https://github.com/Telecominfraproject/wlan-cloud-lib-cppkafka.git
synced 2025-11-01 02:57:53 +00:00
Added commit for current assignment and message state checks (#53)
This commit is contained in:
committed by
Matias Fontanini
parent
c95d790547
commit
83a963c1db
@@ -64,14 +64,14 @@ class TopicConfiguration;
|
||||
* Consumer consumer(config);
|
||||
*
|
||||
* // Set the assignment callback
|
||||
* consumer.set_assignment_callback([&](vector<TopicPartition>& topic_partitions) {
|
||||
* consumer.set_assignment_callback([&](TopicPartitionList& topic_partitions) {
|
||||
* // Here you could fetch offsets and do something, altering the offsets on the
|
||||
* // topic_partitions vector if needed
|
||||
* cout << "Got assigned " << topic_partitions.size() << " partitions!" << endl;
|
||||
* });
|
||||
*
|
||||
* // Set the revocation callback
|
||||
* consumer.set_revocation_callback([&](const vector<TopicPartition>& topic_partitions) {
|
||||
* consumer.set_revocation_callback([&](const TopicPartitionList& topic_partitions) {
|
||||
* cout << topic_partitions.size() << " partitions revoked!" << endl;
|
||||
* });
|
||||
*
|
||||
@@ -126,7 +126,7 @@ public:
|
||||
* \brief Sets the topic/partition assignment callback
|
||||
*
|
||||
* The Consumer class will use rd_kafka_conf_set_rebalance_cb and will handle the
|
||||
* rebalance, converting from rdkafka topic partition list handles into vector<TopicPartition>
|
||||
* rebalance, converting from rdkafka topic partition list handles into TopicPartitionList
|
||||
* and executing the assignment/revocation/rebalance_error callbacks.
|
||||
*
|
||||
* \note You *do not need* to call Consumer::assign with the provided topic parttitions. This
|
||||
@@ -140,7 +140,7 @@ public:
|
||||
* \brief Sets the topic/partition revocation callback
|
||||
*
|
||||
* The Consumer class will use rd_kafka_conf_set_rebalance_cb and will handle the
|
||||
* rebalance, converting from rdkafka topic partition list handles into vector<TopicPartition>
|
||||
* rebalance, converting from rdkafka topic partition list handles into TopicPartitionList
|
||||
* and executing the assignment/revocation/rebalance_error callbacks.
|
||||
*
|
||||
* \note You *do not need* to call Consumer::assign with an empty topic partition list or
|
||||
@@ -155,7 +155,7 @@ public:
|
||||
* \brief Sets the rebalance error callback
|
||||
*
|
||||
* The Consumer class will use rd_kafka_conf_set_rebalance_cb and will handle the
|
||||
* rebalance, converting from rdkafka topic partition list handles into vector<TopicPartition>
|
||||
* rebalance, converting from rdkafka topic partition list handles into TopicPartitionList
|
||||
* and executing the assignment/revocation/rebalance_error callbacks.
|
||||
*
|
||||
* \param callback The rebalance error callback
|
||||
@@ -192,6 +192,24 @@ public:
|
||||
* parameter
|
||||
*/
|
||||
void unassign();
|
||||
|
||||
/**
|
||||
* \brief Commits the current partition assignment
|
||||
*
|
||||
* This translates into a call to rd_kafka_commit with a null partition list.
|
||||
*
|
||||
* \remark This function is equivalent to calling commit(get_assignment())
|
||||
*/
|
||||
void commit();
|
||||
|
||||
/**
|
||||
* \brief Commits the current partition assignment asynchronously
|
||||
*
|
||||
* This translates into a call to rd_kafka_commit with a null partition list.
|
||||
*
|
||||
* \remark This function is equivalent to calling async_commit(get_assignment())
|
||||
*/
|
||||
void async_commit();
|
||||
|
||||
/**
|
||||
* \brief Commits the given message synchronously
|
||||
@@ -349,7 +367,7 @@ private:
|
||||
|
||||
void close();
|
||||
void commit(const Message& msg, bool async);
|
||||
void commit(const TopicPartitionList& topic_partitions, bool async);
|
||||
void commit(const TopicPartitionList* topic_partitions, bool async);
|
||||
void handle_rebalance(rd_kafka_resp_err_t err, TopicPartitionList& topic_partitions);
|
||||
|
||||
AssignmentCallback assignment_callback_;
|
||||
|
||||
@@ -33,6 +33,7 @@
|
||||
#include <memory>
|
||||
#include <cstdint>
|
||||
#include <chrono>
|
||||
#include <cassert>
|
||||
#include <boost/optional.hpp>
|
||||
#include <librdkafka/rdkafka.h>
|
||||
#include "buffer.h"
|
||||
@@ -83,6 +84,7 @@ public:
|
||||
* Gets the error attribute
|
||||
*/
|
||||
Error get_error() const {
|
||||
assert(handle_);
|
||||
return handle_->err;
|
||||
}
|
||||
|
||||
@@ -97,6 +99,7 @@ public:
|
||||
* Gets the topic that this message belongs to
|
||||
*/
|
||||
std::string get_topic() const {
|
||||
assert(handle_);
|
||||
return rd_kafka_topic_name(handle_->rkt);
|
||||
}
|
||||
|
||||
@@ -104,6 +107,7 @@ public:
|
||||
* Gets the partition that this message belongs to
|
||||
*/
|
||||
int get_partition() const {
|
||||
assert(handle_);
|
||||
return handle_->partition;
|
||||
}
|
||||
|
||||
@@ -125,6 +129,7 @@ public:
|
||||
* Gets the message offset
|
||||
*/
|
||||
int64_t get_offset() const {
|
||||
assert(handle_);
|
||||
return handle_->offset;
|
||||
}
|
||||
|
||||
@@ -135,6 +140,7 @@ public:
|
||||
* attribute
|
||||
*/
|
||||
void* get_user_data() const {
|
||||
assert(handle_);
|
||||
return handle_->_private;
|
||||
}
|
||||
|
||||
|
||||
@@ -49,9 +49,9 @@ using TopicPartitionsListPtr = std::unique_ptr<rd_kafka_topic_partition_list_t,
|
||||
using TopicPartitionList = std::vector<TopicPartition>;
|
||||
|
||||
// Conversions between rdkafka handles and TopicPartitionList
|
||||
CPPKAFKA_API TopicPartitionsListPtr convert(const std::vector<TopicPartition>& topic_partitions);
|
||||
CPPKAFKA_API std::vector<TopicPartition> convert(const TopicPartitionsListPtr& topic_partitions);
|
||||
CPPKAFKA_API std::vector<TopicPartition> convert(rd_kafka_topic_partition_list_t* topic_partitions);
|
||||
CPPKAFKA_API TopicPartitionsListPtr convert(const TopicPartitionList& topic_partitions);
|
||||
CPPKAFKA_API TopicPartitionList convert(const TopicPartitionsListPtr& topic_partitions);
|
||||
CPPKAFKA_API TopicPartitionList convert(rd_kafka_topic_partition_list_t* topic_partitions);
|
||||
CPPKAFKA_API TopicPartitionsListPtr make_handle(rd_kafka_topic_partition_list_t* handle);
|
||||
|
||||
CPPKAFKA_API std::ostream& operator<<(std::ostream& output, const TopicPartitionList& rhs);
|
||||
|
||||
@@ -116,6 +116,14 @@ void Consumer::unassign() {
|
||||
check_error(error);
|
||||
}
|
||||
|
||||
void Consumer::commit() {
|
||||
commit(nullptr, false);
|
||||
}
|
||||
|
||||
void Consumer::async_commit() {
|
||||
commit(nullptr, true);
|
||||
}
|
||||
|
||||
void Consumer::commit(const Message& msg) {
|
||||
commit(msg, false);
|
||||
}
|
||||
@@ -125,11 +133,11 @@ void Consumer::async_commit(const Message& msg) {
|
||||
}
|
||||
|
||||
void Consumer::commit(const TopicPartitionList& topic_partitions) {
|
||||
commit(topic_partitions, false);
|
||||
commit(&topic_partitions, false);
|
||||
}
|
||||
|
||||
void Consumer::async_commit(const TopicPartitionList& topic_partitions) {
|
||||
commit(topic_partitions, true);
|
||||
commit(&topic_partitions, true);
|
||||
}
|
||||
|
||||
KafkaHandleBase::OffsetTuple Consumer::get_offsets(const TopicPartition& topic_partition) const {
|
||||
@@ -238,15 +246,15 @@ void Consumer::close() {
|
||||
|
||||
void Consumer::commit(const Message& msg, bool async) {
|
||||
rd_kafka_resp_err_t error;
|
||||
error = rd_kafka_commit_message(get_handle(), msg.get_handle(),
|
||||
async ? 1 : 0);
|
||||
error = rd_kafka_commit_message(get_handle(), msg.get_handle(), async ? 1 : 0);
|
||||
check_error(error);
|
||||
}
|
||||
|
||||
void Consumer::commit(const TopicPartitionList& topic_partitions, bool async) {
|
||||
TopicPartitionsListPtr topic_list_handle = convert(topic_partitions);
|
||||
void Consumer::commit(const TopicPartitionList* topic_partitions, bool async) {
|
||||
rd_kafka_resp_err_t error;
|
||||
error = rd_kafka_commit(get_handle(), topic_list_handle.get(), async ? 1 : 0);
|
||||
error = rd_kafka_commit(get_handle(),
|
||||
!topic_partitions ? nullptr : convert(*topic_partitions).get(),
|
||||
async ? 1 : 0);
|
||||
check_error(error);
|
||||
}
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ Message Message::make_non_owning(rd_kafka_message_t* handle) {
|
||||
return Message(handle, NonOwningTag());
|
||||
}
|
||||
|
||||
Message::Message()
|
||||
Message::Message()
|
||||
: handle_(nullptr, nullptr) {
|
||||
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ using std::ostream;
|
||||
|
||||
namespace cppkafka {
|
||||
|
||||
TopicPartitionsListPtr convert(const vector<TopicPartition>& topic_partitions) {
|
||||
TopicPartitionsListPtr convert(const TopicPartitionList& topic_partitions) {
|
||||
TopicPartitionsListPtr handle(rd_kafka_topic_partition_list_new(topic_partitions.size()),
|
||||
&rd_kafka_topic_partition_list_destroy);
|
||||
for (const auto& item : topic_partitions) {
|
||||
@@ -50,12 +50,12 @@ TopicPartitionsListPtr convert(const vector<TopicPartition>& topic_partitions) {
|
||||
return handle;
|
||||
}
|
||||
|
||||
vector<TopicPartition> convert(const TopicPartitionsListPtr& topic_partitions) {
|
||||
TopicPartitionList convert(const TopicPartitionsListPtr& topic_partitions) {
|
||||
return convert(topic_partitions.get());
|
||||
}
|
||||
|
||||
vector<TopicPartition> convert(rd_kafka_topic_partition_list_t* topic_partitions) {
|
||||
vector<TopicPartition> output;
|
||||
TopicPartitionList convert(rd_kafka_topic_partition_list_t* topic_partitions) {
|
||||
TopicPartitionList output;
|
||||
for (int i = 0; i < topic_partitions->cnt; ++i) {
|
||||
const auto& elem = topic_partitions->elems[i];
|
||||
output.emplace_back(elem.topic, elem.partition, elem.offset);
|
||||
|
||||
@@ -51,12 +51,12 @@ public:
|
||||
const string ConsumerTest::KAFKA_TOPIC = "cppkafka_test1";
|
||||
|
||||
TEST_F(ConsumerTest, AssignmentCallback) {
|
||||
vector<TopicPartition> assignment;
|
||||
TopicPartitionList assignment;
|
||||
int partition = 0;
|
||||
|
||||
// Create a consumer and subscribe to the topic
|
||||
Consumer consumer(make_consumer_config());
|
||||
consumer.set_assignment_callback([&](const vector<TopicPartition>& topic_partitions) {
|
||||
consumer.set_assignment_callback([&](const TopicPartitionList& topic_partitions) {
|
||||
assignment = topic_partitions;
|
||||
});
|
||||
consumer.subscribe({ KAFKA_TOPIC });
|
||||
@@ -90,17 +90,17 @@ TEST_F(ConsumerTest, AssignmentCallback) {
|
||||
}
|
||||
|
||||
TEST_F(ConsumerTest, Rebalance) {
|
||||
vector<TopicPartition> assignment1;
|
||||
vector<TopicPartition> assignment2;
|
||||
TopicPartitionList assignment1;
|
||||
TopicPartitionList assignment2;
|
||||
bool revocation_called = false;
|
||||
int partition = 0;
|
||||
|
||||
// Create a consumer and subscribe to the topic
|
||||
Consumer consumer1(make_consumer_config());
|
||||
consumer1.set_assignment_callback([&](const vector<TopicPartition>& topic_partitions) {
|
||||
consumer1.set_assignment_callback([&](const TopicPartitionList& topic_partitions) {
|
||||
assignment1 = topic_partitions;
|
||||
});
|
||||
consumer1.set_revocation_callback([&](const vector<TopicPartition>&) {
|
||||
consumer1.set_revocation_callback([&](const TopicPartitionList&) {
|
||||
revocation_called = true;
|
||||
});
|
||||
consumer1.subscribe({ KAFKA_TOPIC });
|
||||
@@ -108,7 +108,7 @@ TEST_F(ConsumerTest, Rebalance) {
|
||||
|
||||
// Create a second consumer and subscribe to the topic
|
||||
Consumer consumer2(make_consumer_config());
|
||||
consumer2.set_assignment_callback([&](const vector<TopicPartition>& topic_partitions) {
|
||||
consumer2.set_assignment_callback([&](const TopicPartitionList& topic_partitions) {
|
||||
assignment2 = topic_partitions;
|
||||
});
|
||||
consumer2.subscribe({ KAFKA_TOPIC });
|
||||
|
||||
@@ -126,12 +126,12 @@ TEST_F(KafkaHandleBaseTest, ConsumerGroups) {
|
||||
|
||||
MemberAssignmentInformation assignment = member.get_member_assignment();
|
||||
EXPECT_EQ(0, assignment.get_version());
|
||||
vector<TopicPartition> expected_topic_partitions = {
|
||||
TopicPartitionList expected_topic_partitions = {
|
||||
{ KAFKA_TOPIC, 0 },
|
||||
{ KAFKA_TOPIC, 1 },
|
||||
{ KAFKA_TOPIC, 2 }
|
||||
};
|
||||
vector<TopicPartition> topic_partitions = assignment.get_topic_partitions();
|
||||
TopicPartitionList topic_partitions = assignment.get_topic_partitions();
|
||||
sort(topic_partitions.begin(), topic_partitions.end());
|
||||
EXPECT_EQ(expected_topic_partitions, topic_partitions);
|
||||
/*for (const auto c : ) {
|
||||
|
||||
Reference in New Issue
Block a user