mirror of
https://github.com/Telecominfraproject/wlan-cloud-lib-cppkafka.git
synced 2025-11-02 03:28:01 +00:00
added test case for polling strategy refactored the strategy class
This commit is contained in:
@@ -7,14 +7,10 @@ set(KAFKA_TEST_INSTANCE "kafka-vm:9092"
|
||||
add_custom_target(tests)
|
||||
|
||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
||||
add_library(cppkafka-test EXCLUDE_FROM_ALL test_utils.cpp)
|
||||
target_link_libraries(cppkafka-test cppkafka ${RDKAFKA_LIBRARY} pthread)
|
||||
|
||||
add_definitions("-DKAFKA_TEST_INSTANCE=\"${KAFKA_TEST_INSTANCE}\"")
|
||||
|
||||
add_executable(
|
||||
cppkafka_tests
|
||||
EXCLUDE_FROM_ALL
|
||||
buffer_test.cpp
|
||||
compacted_topic_processor_test.cpp
|
||||
configuration_test.cpp
|
||||
@@ -22,10 +18,11 @@ add_executable(
|
||||
kafka_handle_base_test.cpp
|
||||
producer_test.cpp
|
||||
consumer_test.cpp
|
||||
roundrobin_poll_test.cpp
|
||||
|
||||
# Main file
|
||||
test_main.cpp
|
||||
)
|
||||
target_link_libraries(cppkafka_tests cppkafka-test)
|
||||
target_link_libraries(cppkafka_tests cppkafka ${RDKAFKA_LIBRARY} pthread)
|
||||
add_dependencies(tests cppkafka_tests)
|
||||
add_test(cppkafka cppkafka_tests)
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
#include "cppkafka/producer.h"
|
||||
#include "cppkafka/consumer.h"
|
||||
#include "cppkafka/utils/compacted_topic_processor.h"
|
||||
#include "test_utils.h"
|
||||
|
||||
using std::string;
|
||||
using std::to_string;
|
||||
@@ -29,8 +30,6 @@ using std::chrono::milliseconds;
|
||||
|
||||
using namespace cppkafka;
|
||||
|
||||
static const string KAFKA_TOPIC = "cppkafka_test1";
|
||||
|
||||
static Configuration make_producer_config() {
|
||||
Configuration config;
|
||||
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
|
||||
@@ -65,7 +64,7 @@ TEST_CASE("consumption", "[consumer][compacted]") {
|
||||
compacted_consumer.set_event_handler([&](const Event& event) {
|
||||
events.push_back(event);
|
||||
});
|
||||
consumer.subscribe({ KAFKA_TOPIC });
|
||||
consumer.subscribe({ KAFKA_TOPICS[0] });
|
||||
consumer.poll();
|
||||
consumer.poll();
|
||||
consumer.poll();
|
||||
@@ -82,13 +81,13 @@ TEST_CASE("consumption", "[consumer][compacted]") {
|
||||
};
|
||||
for (const auto& element_pair : elements) {
|
||||
const ElementType& element = element_pair.second;
|
||||
MessageBuilder builder(KAFKA_TOPIC);
|
||||
MessageBuilder builder(KAFKA_TOPICS[0]);
|
||||
builder.partition(element.partition).key(element_pair.first).payload(element.value);
|
||||
producer.produce(builder);
|
||||
}
|
||||
// Now erase the first element
|
||||
string deleted_key = "42";
|
||||
producer.produce(MessageBuilder(KAFKA_TOPIC).partition(0).key(deleted_key));
|
||||
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(0).key(deleted_key));
|
||||
|
||||
for (size_t i = 0; i < 10; ++i) {
|
||||
compacted_consumer.process_event();
|
||||
|
||||
@@ -29,8 +29,6 @@ using std::chrono::system_clock;
|
||||
|
||||
using namespace cppkafka;
|
||||
|
||||
const string KAFKA_TOPIC = "cppkafka_test1";
|
||||
|
||||
static Configuration make_producer_config() {
|
||||
Configuration config;
|
||||
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
|
||||
@@ -54,31 +52,32 @@ TEST_CASE("message consumption", "[consumer]") {
|
||||
consumer.set_assignment_callback([&](const TopicPartitionList& topic_partitions) {
|
||||
assignment = topic_partitions;
|
||||
});
|
||||
consumer.subscribe({ KAFKA_TOPIC });
|
||||
ConsumerRunner runner(consumer, 1, 3);
|
||||
consumer.subscribe({ KAFKA_TOPICS[0] });
|
||||
ConsumerRunner runner(consumer, 1, KAFKA_NUM_PARTITIONS);
|
||||
|
||||
// Produce a message just so we stop the consumer
|
||||
Producer producer(make_producer_config());
|
||||
string payload = "Hello world!";
|
||||
producer.produce(MessageBuilder(KAFKA_TOPIC).partition(partition).payload(payload));
|
||||
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition).payload(payload));
|
||||
runner.try_join();
|
||||
|
||||
// All 3 partitions should be ours
|
||||
REQUIRE(assignment.size() == 3);
|
||||
set<int> partitions = { 0, 1, 2 };
|
||||
// All partitions should be ours
|
||||
REQUIRE(assignment.size() == KAFKA_NUM_PARTITIONS);
|
||||
set<int> partitions;
|
||||
for (int i = 0; i < KAFKA_NUM_PARTITIONS; partitions.emplace(i++));
|
||||
for (const auto& topic_partition : assignment) {
|
||||
CHECK(topic_partition.get_topic() == KAFKA_TOPIC);
|
||||
CHECK(topic_partition.get_topic() == KAFKA_TOPICS[0]);
|
||||
CHECK(partitions.erase(topic_partition.get_partition()) == true);
|
||||
}
|
||||
REQUIRE(runner.get_messages().size() == 1);
|
||||
CHECK(consumer.get_subscription() == vector<string>{ KAFKA_TOPIC });
|
||||
CHECK(consumer.get_subscription() == vector<string>{ KAFKA_TOPICS[0] });
|
||||
|
||||
assignment = consumer.get_assignment();
|
||||
CHECK(assignment.size() == 3);
|
||||
CHECK(assignment.size() == KAFKA_NUM_PARTITIONS);
|
||||
|
||||
int64_t low;
|
||||
int64_t high;
|
||||
tie(low, high) = consumer.get_offsets({ KAFKA_TOPIC, partition });
|
||||
tie(low, high) = consumer.get_offsets({ KAFKA_TOPICS[0], partition });
|
||||
CHECK(high > low);
|
||||
CHECK(runner.get_messages().back().get_offset() + 1 == high);
|
||||
}
|
||||
@@ -97,15 +96,15 @@ TEST_CASE("consumer rebalance", "[consumer]") {
|
||||
consumer1.set_revocation_callback([&](const TopicPartitionList&) {
|
||||
revocation_called = true;
|
||||
});
|
||||
consumer1.subscribe({ KAFKA_TOPIC });
|
||||
ConsumerRunner runner1(consumer1, 1, 3);
|
||||
consumer1.subscribe({ KAFKA_TOPICS[0] });
|
||||
ConsumerRunner runner1(consumer1, 1, KAFKA_NUM_PARTITIONS);
|
||||
|
||||
// Create a second consumer and subscribe to the topic
|
||||
Consumer consumer2(make_consumer_config());
|
||||
consumer2.set_assignment_callback([&](const TopicPartitionList& topic_partitions) {
|
||||
assignment2 = topic_partitions;
|
||||
});
|
||||
consumer2.subscribe({ KAFKA_TOPIC });
|
||||
consumer2.subscribe({ KAFKA_TOPICS[0] });
|
||||
ConsumerRunner runner2(consumer2, 1, 1);
|
||||
|
||||
CHECK(revocation_called == true);
|
||||
@@ -113,19 +112,20 @@ TEST_CASE("consumer rebalance", "[consumer]") {
|
||||
// Produce a message just so we stop the consumer
|
||||
Producer producer(make_producer_config());
|
||||
string payload = "Hello world!";
|
||||
producer.produce(MessageBuilder(KAFKA_TOPIC).partition(partition).payload(payload));
|
||||
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition).payload(payload));
|
||||
runner1.try_join();
|
||||
runner2.try_join();
|
||||
|
||||
// All 3 partitions should be assigned
|
||||
CHECK(assignment1.size() + assignment2.size() == 3);
|
||||
set<int> partitions = { 0, 1, 2 };
|
||||
// All partitions should be assigned
|
||||
CHECK(assignment1.size() + assignment2.size() == KAFKA_NUM_PARTITIONS);
|
||||
set<int> partitions;
|
||||
for (int i = 0; i < KAFKA_NUM_PARTITIONS; partitions.emplace(i++));
|
||||
for (const auto& topic_partition : assignment1) {
|
||||
CHECK(topic_partition.get_topic() == KAFKA_TOPIC);
|
||||
CHECK(topic_partition.get_topic() == KAFKA_TOPICS[0]);
|
||||
CHECK(partitions.erase(topic_partition.get_partition()) == true);
|
||||
}
|
||||
for (const auto& topic_partition : assignment2) {
|
||||
CHECK(topic_partition.get_topic() == KAFKA_TOPIC);
|
||||
CHECK(topic_partition.get_topic() == KAFKA_TOPICS[0]);
|
||||
CHECK(partitions.erase(topic_partition.get_partition()) == true);
|
||||
}
|
||||
CHECK(runner1.get_messages().size() + runner2.get_messages().size() == 1);
|
||||
@@ -143,18 +143,18 @@ TEST_CASE("consumer offset commit", "[consumer]") {
|
||||
offset_commit_called = true;
|
||||
CHECK(!!error == false);
|
||||
REQUIRE(topic_partitions.size() == 1);
|
||||
CHECK(topic_partitions[0].get_topic() == KAFKA_TOPIC);
|
||||
CHECK(topic_partitions[0].get_topic() == KAFKA_TOPICS[0]);
|
||||
CHECK(topic_partitions[0].get_partition() == 0);
|
||||
CHECK(topic_partitions[0].get_offset() == message_offset + 1);
|
||||
});
|
||||
Consumer consumer(config);
|
||||
consumer.assign({ { KAFKA_TOPIC, 0 } });
|
||||
consumer.assign({ { KAFKA_TOPICS[0], 0 } });
|
||||
ConsumerRunner runner(consumer, 1, 1);
|
||||
|
||||
// Produce a message just so we stop the consumer
|
||||
Producer producer(make_producer_config());
|
||||
string payload = "Hello world!";
|
||||
producer.produce(MessageBuilder(KAFKA_TOPIC).partition(partition).payload(payload));
|
||||
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition).payload(payload));
|
||||
runner.try_join();
|
||||
|
||||
REQUIRE(runner.get_messages().size() == 1);
|
||||
@@ -173,7 +173,7 @@ TEST_CASE("consumer throttle", "[consumer]") {
|
||||
// Create a consumer and subscribe to the topic
|
||||
Configuration config = make_consumer_config("offset_commit");
|
||||
Consumer consumer(config);
|
||||
consumer.assign({ { KAFKA_TOPIC, 0 } });
|
||||
consumer.assign({ { KAFKA_TOPICS[0], 0 } });
|
||||
|
||||
{
|
||||
ConsumerRunner runner(consumer, 0, 1);
|
||||
@@ -183,7 +183,7 @@ TEST_CASE("consumer throttle", "[consumer]") {
|
||||
// Produce a message just so we stop the consumer
|
||||
BufferedProducer<string> producer(make_producer_config());
|
||||
string payload = "Hello world!";
|
||||
producer.produce(MessageBuilder(KAFKA_TOPIC).partition(partition).payload(payload));
|
||||
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition).payload(payload));
|
||||
producer.flush();
|
||||
|
||||
size_t callback_executed_count = 0;
|
||||
@@ -213,7 +213,7 @@ TEST_CASE("consume batch", "[consumer]") {
|
||||
// Create a consumer and subscribe to the topic
|
||||
Configuration config = make_consumer_config("test");
|
||||
Consumer consumer(config);
|
||||
consumer.assign({ { KAFKA_TOPIC, 0 } });
|
||||
consumer.assign({ { KAFKA_TOPICS[0], 0 } });
|
||||
|
||||
{
|
||||
ConsumerRunner runner(consumer, 0, 1);
|
||||
@@ -224,8 +224,8 @@ TEST_CASE("consume batch", "[consumer]") {
|
||||
BufferedProducer<string> producer(make_producer_config());
|
||||
string payload = "Hello world!";
|
||||
// Produce it twice
|
||||
producer.produce(MessageBuilder(KAFKA_TOPIC).partition(partition).payload(payload));
|
||||
producer.produce(MessageBuilder(KAFKA_TOPIC).partition(partition).payload(payload));
|
||||
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition).payload(payload));
|
||||
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition).payload(payload));
|
||||
producer.flush();
|
||||
|
||||
MessageList all_messages;
|
||||
|
||||
@@ -14,8 +14,6 @@ using std::string;
|
||||
|
||||
using namespace cppkafka;
|
||||
|
||||
static const string KAFKA_TOPIC = "cppkafka_test1";
|
||||
|
||||
Configuration make_config() {
|
||||
Configuration config;
|
||||
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
|
||||
@@ -45,6 +43,9 @@ uint16_t get_kafka_port() {
|
||||
}
|
||||
|
||||
TEST_CASE("metadata", "[handle_base]") {
|
||||
if (KAFKA_TOPICS.size() < 2) {
|
||||
return; //skip test
|
||||
}
|
||||
Producer producer({});
|
||||
producer.add_brokers(KAFKA_TEST_INSTANCE);
|
||||
Metadata metadata = producer.get_metadata();
|
||||
@@ -59,7 +60,7 @@ TEST_CASE("metadata", "[handle_base]") {
|
||||
}
|
||||
|
||||
SECTION("topics") {
|
||||
unordered_set<string> topic_names = { "cppkafka_test1", "cppkafka_test2" };
|
||||
unordered_set<string> topic_names = { KAFKA_TOPICS[0], KAFKA_TOPICS[1] };
|
||||
size_t found_topics = 0;
|
||||
|
||||
const vector<TopicMetadata>& topics = metadata.get_topics();
|
||||
@@ -68,8 +69,9 @@ TEST_CASE("metadata", "[handle_base]") {
|
||||
for (const auto& topic : topics) {
|
||||
if (topic_names.count(topic.get_name()) == 1) {
|
||||
const vector<PartitionMetadata>& partitions = topic.get_partitions();
|
||||
REQUIRE(partitions.size() == 3);
|
||||
set<int32_t> expected_ids = { 0, 1, 2 };
|
||||
REQUIRE(partitions.size() == KAFKA_NUM_PARTITIONS);
|
||||
set<int32_t> expected_ids;
|
||||
for (int i = 0; i < KAFKA_NUM_PARTITIONS; expected_ids.emplace(i++));
|
||||
for (const PartitionMetadata& partition : partitions) {
|
||||
REQUIRE(expected_ids.erase(partition.get_id()) == 1);
|
||||
for (int32_t replica : partition.get_replicas()) {
|
||||
@@ -90,8 +92,8 @@ TEST_CASE("metadata", "[handle_base]") {
|
||||
CHECK(metadata.get_topics_prefixed("cppkafka_").size() == topic_names.size());
|
||||
|
||||
// Now get the whole metadata only for this topic
|
||||
Topic topic = producer.get_topic(KAFKA_TOPIC);
|
||||
CHECK(producer.get_metadata(topic).get_name() == KAFKA_TOPIC);
|
||||
Topic topic = producer.get_topic(KAFKA_TOPICS[0]);
|
||||
CHECK(producer.get_metadata(topic).get_name() == KAFKA_TOPICS[0]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -106,7 +108,7 @@ TEST_CASE("consumer groups", "[handle_base]") {
|
||||
|
||||
// Build consumer
|
||||
Consumer consumer(config);
|
||||
consumer.subscribe({ KAFKA_TOPIC });
|
||||
consumer.subscribe({ KAFKA_TOPICS[0] });
|
||||
ConsumerRunner runner(consumer, 0, 3);
|
||||
runner.try_join();
|
||||
|
||||
@@ -120,11 +122,8 @@ TEST_CASE("consumer groups", "[handle_base]") {
|
||||
|
||||
MemberAssignmentInformation assignment = member.get_member_assignment();
|
||||
CHECK(assignment.get_version() == 0);
|
||||
TopicPartitionList expected_topic_partitions = {
|
||||
{ KAFKA_TOPIC, 0 },
|
||||
{ KAFKA_TOPIC, 1 },
|
||||
{ KAFKA_TOPIC, 2 }
|
||||
};
|
||||
TopicPartitionList expected_topic_partitions;
|
||||
for (int i = 0; i < KAFKA_NUM_PARTITIONS; expected_topic_partitions.emplace_back(KAFKA_TOPICS[0], i++));
|
||||
TopicPartitionList topic_partitions = assignment.get_topic_partitions();
|
||||
sort(topic_partitions.begin(), topic_partitions.end());
|
||||
CHECK(topic_partitions == expected_topic_partitions);
|
||||
|
||||
@@ -28,8 +28,6 @@ using std::ref;
|
||||
|
||||
using namespace cppkafka;
|
||||
|
||||
static const string KAFKA_TOPIC = "cppkafka_test1";
|
||||
|
||||
static Configuration make_producer_config() {
|
||||
Configuration config = {
|
||||
{ "metadata.broker.list", KAFKA_TEST_INSTANCE },
|
||||
@@ -93,7 +91,7 @@ TEST_CASE("simple production", "[producer]") {
|
||||
|
||||
// Create a consumer and assign this topic/partition
|
||||
Consumer consumer(make_consumer_config());
|
||||
consumer.assign({ TopicPartition(KAFKA_TOPIC, partition) });
|
||||
consumer.assign({ TopicPartition(KAFKA_TOPICS[0], partition) });
|
||||
ConsumerRunner runner(consumer, 1, 1);
|
||||
|
||||
Configuration config = make_producer_config();
|
||||
@@ -101,7 +99,7 @@ TEST_CASE("simple production", "[producer]") {
|
||||
// Now create a producer and produce a message
|
||||
const string payload = "Hello world! 1";
|
||||
Producer producer(config);
|
||||
producer.produce(MessageBuilder(KAFKA_TOPIC).partition(partition).payload(payload));
|
||||
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition).payload(payload));
|
||||
runner.try_join();
|
||||
|
||||
const auto& messages = runner.get_messages();
|
||||
@@ -109,13 +107,13 @@ TEST_CASE("simple production", "[producer]") {
|
||||
const auto& message = messages[0];
|
||||
CHECK(message.get_payload() == payload);
|
||||
CHECK(!!message.get_key() == false);
|
||||
CHECK(message.get_topic() == KAFKA_TOPIC);
|
||||
CHECK(message.get_topic() == KAFKA_TOPICS[0]);
|
||||
CHECK(message.get_partition() == partition);
|
||||
CHECK(!!message.get_error() == false);
|
||||
|
||||
int64_t low;
|
||||
int64_t high;
|
||||
tie(low, high) = producer.query_offsets({ KAFKA_TOPIC, partition });
|
||||
tie(low, high) = producer.query_offsets({ KAFKA_TOPICS[0], partition });
|
||||
CHECK(high > low);
|
||||
}
|
||||
|
||||
@@ -124,7 +122,7 @@ TEST_CASE("simple production", "[producer]") {
|
||||
const string key = "such key";
|
||||
const milliseconds timestamp{15};
|
||||
Producer producer(config);
|
||||
producer.produce(MessageBuilder(KAFKA_TOPIC).partition(partition)
|
||||
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition)
|
||||
.key(key)
|
||||
.payload(payload)
|
||||
.timestamp(timestamp));
|
||||
@@ -135,7 +133,7 @@ TEST_CASE("simple production", "[producer]") {
|
||||
const auto& message = messages[0];
|
||||
CHECK(message.get_payload() == payload);
|
||||
CHECK(message.get_key() == key);
|
||||
CHECK(message.get_topic() == KAFKA_TOPIC);
|
||||
CHECK(message.get_topic() == KAFKA_TOPICS[0]);
|
||||
CHECK(message.get_partition() == partition);
|
||||
CHECK(!!message.get_error() == false);
|
||||
REQUIRE(!!message.get_timestamp() == true);
|
||||
@@ -188,14 +186,14 @@ TEST_CASE("simple production", "[producer]") {
|
||||
topic_config.set_partitioner_callback([&](const Topic& topic, const Buffer& msg_key,
|
||||
int32_t partition_count) {
|
||||
CHECK(msg_key == key);
|
||||
CHECK(partition_count == 3);
|
||||
CHECK(topic.get_name() == KAFKA_TOPIC);
|
||||
CHECK(partition_count == KAFKA_NUM_PARTITIONS);
|
||||
CHECK(topic.get_name() == KAFKA_TOPICS[0]);
|
||||
return 0;
|
||||
});
|
||||
config.set_default_topic_configuration(topic_config);
|
||||
|
||||
Producer producer(config);
|
||||
producer.produce(MessageBuilder(KAFKA_TOPIC).key(key).payload(payload));
|
||||
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).key(key).payload(payload));
|
||||
while (producer.get_out_queue_length() > 0) {
|
||||
producer.poll();
|
||||
}
|
||||
@@ -206,7 +204,7 @@ TEST_CASE("simple production", "[producer]") {
|
||||
const auto& message = messages[0];
|
||||
CHECK(message.get_payload() == payload);
|
||||
CHECK(message.get_key() == key);
|
||||
CHECK(message.get_topic() == KAFKA_TOPIC);
|
||||
CHECK(message.get_topic() == KAFKA_TOPICS[0]);
|
||||
CHECK(message.get_partition() == partition);
|
||||
CHECK(!!message.get_error() == false);
|
||||
CHECK(delivery_report_called == true);
|
||||
@@ -222,15 +220,15 @@ TEST_CASE("simple production", "[producer]") {
|
||||
topic_config.set_partitioner_callback([&](const Topic& topic, const Buffer& msg_key,
|
||||
int32_t partition_count) {
|
||||
CHECK(msg_key == key);
|
||||
CHECK(partition_count == 3);
|
||||
CHECK(topic.get_name() == KAFKA_TOPIC);
|
||||
CHECK(partition_count == KAFKA_NUM_PARTITIONS);
|
||||
CHECK(topic.get_name() == KAFKA_TOPICS[0]);
|
||||
callback_called = true;
|
||||
return 0;
|
||||
});
|
||||
config.set_default_topic_configuration(topic_config);
|
||||
Producer producer(config);
|
||||
|
||||
producer.produce(MessageBuilder(KAFKA_TOPIC).key(key).payload(payload));
|
||||
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).key(key).payload(payload));
|
||||
producer.poll();
|
||||
runner.try_join();
|
||||
|
||||
@@ -244,13 +242,12 @@ TEST_CASE("simple production", "[producer]") {
|
||||
|
||||
TEST_CASE("multiple messages", "[producer]") {
|
||||
size_t message_count = 10;
|
||||
int partitions = 3;
|
||||
set<string> payloads;
|
||||
|
||||
// Create a consumer and subscribe to this topic
|
||||
Consumer consumer(make_consumer_config());
|
||||
consumer.subscribe({ KAFKA_TOPIC });
|
||||
ConsumerRunner runner(consumer, message_count, partitions);
|
||||
consumer.subscribe({ KAFKA_TOPICS[0] });
|
||||
ConsumerRunner runner(consumer, message_count, KAFKA_NUM_PARTITIONS);
|
||||
|
||||
// Now create a producer and produce a message
|
||||
Producer producer(make_producer_config());
|
||||
@@ -258,19 +255,19 @@ TEST_CASE("multiple messages", "[producer]") {
|
||||
for (size_t i = 0; i < message_count; ++i) {
|
||||
const string payload = payload_base + to_string(i);
|
||||
payloads.insert(payload);
|
||||
producer.produce(MessageBuilder(KAFKA_TOPIC).payload(payload));
|
||||
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).payload(payload));
|
||||
}
|
||||
runner.try_join();
|
||||
|
||||
const auto& messages = runner.get_messages();
|
||||
REQUIRE(messages.size() == message_count);
|
||||
for (const auto& message : messages) {
|
||||
CHECK(message.get_topic() == KAFKA_TOPIC);
|
||||
CHECK(message.get_topic() == KAFKA_TOPICS[0]);
|
||||
CHECK(payloads.erase(message.get_payload()) == 1);
|
||||
CHECK(!!message.get_error() == false);
|
||||
CHECK(!!message.get_key() == false);
|
||||
CHECK(message.get_partition() >= 0);
|
||||
CHECK(message.get_partition() < 3);
|
||||
CHECK(message.get_partition() < KAFKA_NUM_PARTITIONS);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -279,22 +276,22 @@ TEST_CASE("buffered producer", "[producer][buffered_producer]") {
|
||||
|
||||
// Create a consumer and assign this topic/partition
|
||||
Consumer consumer(make_consumer_config());
|
||||
consumer.assign({ TopicPartition(KAFKA_TOPIC, partition) });
|
||||
consumer.assign({ TopicPartition(KAFKA_TOPICS[0], partition) });
|
||||
ConsumerRunner runner(consumer, 3, 1);
|
||||
|
||||
// Now create a buffered producer and produce two messages
|
||||
BufferedProducer<string> producer(make_producer_config());
|
||||
const string payload = "Hello world! 2";
|
||||
const string key = "such key";
|
||||
producer.add_message(MessageBuilder(KAFKA_TOPIC).partition(partition)
|
||||
producer.add_message(MessageBuilder(KAFKA_TOPICS[0]).partition(partition)
|
||||
.key(key)
|
||||
.payload(payload));
|
||||
producer.add_message(producer.make_builder(KAFKA_TOPIC).partition(partition).payload(payload));
|
||||
producer.add_message(producer.make_builder(KAFKA_TOPICS[0]).partition(partition).payload(payload));
|
||||
producer.flush();
|
||||
producer.produce(MessageBuilder(KAFKA_TOPIC).partition(partition).payload(payload));
|
||||
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(partition).payload(payload));
|
||||
producer.wait_for_acks();
|
||||
// Add another one but then clear it
|
||||
producer.add_message(producer.make_builder(KAFKA_TOPIC).partition(partition).payload(payload));
|
||||
producer.add_message(producer.make_builder(KAFKA_TOPICS[0]).partition(partition).payload(payload));
|
||||
producer.clear();
|
||||
runner.try_join();
|
||||
|
||||
@@ -302,7 +299,7 @@ TEST_CASE("buffered producer", "[producer][buffered_producer]") {
|
||||
REQUIRE(messages.size() == 3);
|
||||
const auto& message = messages[0];
|
||||
CHECK(message.get_key() == key);
|
||||
CHECK(message.get_topic() == KAFKA_TOPIC);
|
||||
CHECK(message.get_topic() == KAFKA_TOPICS[0]);
|
||||
CHECK(message.get_partition() == partition);
|
||||
CHECK(!!message.get_error() == false);
|
||||
|
||||
|
||||
162
tests/roundrobin_poll_test.cpp
Normal file
162
tests/roundrobin_poll_test.cpp
Normal file
@@ -0,0 +1,162 @@
|
||||
#include <vector>
|
||||
#include <thread>
|
||||
#include <set>
|
||||
#include <mutex>
|
||||
#include <chrono>
|
||||
#include <iterator>
|
||||
#include <condition_variable>
|
||||
#include <catch.hpp>
|
||||
#include <memory>
|
||||
#include <iostream>
|
||||
#include "cppkafka/cppkafka.h"
|
||||
#include "test_utils.h"
|
||||
|
||||
using std::vector;
|
||||
using std::move;
|
||||
using std::string;
|
||||
using std::thread;
|
||||
using std::set;
|
||||
using std::mutex;
|
||||
using std::tie;
|
||||
using std::condition_variable;
|
||||
using std::lock_guard;
|
||||
using std::unique_lock;
|
||||
using std::unique_ptr;
|
||||
using std::make_move_iterator;
|
||||
using std::chrono::seconds;
|
||||
using std::chrono::milliseconds;
|
||||
using std::chrono::system_clock;
|
||||
|
||||
using namespace cppkafka;
|
||||
|
||||
//==================================================================================
|
||||
// Helper functions
|
||||
//==================================================================================
|
||||
static Configuration make_producer_config() {
|
||||
Configuration config;
|
||||
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
|
||||
return config;
|
||||
}
|
||||
|
||||
static Configuration make_consumer_config(const string& group_id = "rr_consumer_test") {
|
||||
Configuration config;
|
||||
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
|
||||
config.set("enable.auto.commit", true);
|
||||
config.set("enable.auto.offset.store", true );
|
||||
config.set("auto.commit.interval.ms", 100);
|
||||
config.set("group.id", group_id);
|
||||
return config;
|
||||
}
|
||||
|
||||
static vector<int> make_roundrobin_partition_vector(int total_messages) {
|
||||
vector<int> partition_order;
|
||||
for (int i = 0, partition = 0; i < total_messages+1; ++i) {
|
||||
if ((i % KAFKA_NUM_PARTITIONS) == 0) {
|
||||
partition = 0;
|
||||
}
|
||||
partition_order.push_back(partition++);
|
||||
}
|
||||
return partition_order;
|
||||
}
|
||||
|
||||
//========================================================================
|
||||
// TESTS
|
||||
//========================================================================
|
||||
|
||||
TEST_CASE("serial consumer test", "[roundrobin consumer]") {
|
||||
int messages_per_partition = 3;
|
||||
int total_messages = KAFKA_NUM_PARTITIONS * messages_per_partition;
|
||||
|
||||
// Create a consumer and subscribe to the topic
|
||||
Consumer consumer(make_consumer_config());
|
||||
TopicPartitionList partitions;
|
||||
for (int i = 0; i < KAFKA_NUM_PARTITIONS; partitions.emplace_back(KAFKA_TOPICS[0], i++));
|
||||
consumer.assign(partitions);
|
||||
|
||||
// Start the runner with the original consumer
|
||||
ConsumerRunner runner(consumer, total_messages, KAFKA_NUM_PARTITIONS);
|
||||
|
||||
// Produce messages so we stop the consumer
|
||||
Producer producer(make_producer_config());
|
||||
string payload = "Serial";
|
||||
|
||||
// push 3 messages in each partition
|
||||
for (int i = 0; i < total_messages; ++i) {
|
||||
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(i%KAFKA_NUM_PARTITIONS).payload(payload));
|
||||
}
|
||||
producer.flush();
|
||||
runner.try_join();
|
||||
|
||||
// Check that we have all messages
|
||||
REQUIRE(runner.get_messages().size() == total_messages);
|
||||
|
||||
// messages should have sequential identical partition ids in groups of <messages_per_partition>
|
||||
int expected_partition;
|
||||
for (int i = 0; i < total_messages; ++i) {
|
||||
if ((i % messages_per_partition) == 0) {
|
||||
expected_partition = runner.get_messages()[i].get_partition();
|
||||
}
|
||||
REQUIRE(runner.get_messages()[i].get_partition() == expected_partition);
|
||||
REQUIRE((string)runner.get_messages()[i].get_payload() == payload);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("roundrobin consumer test", "[roundrobin consumer]") {
|
||||
TopicPartitionList assignment;
|
||||
int messages_per_partition = 3;
|
||||
int total_messages = KAFKA_NUM_PARTITIONS * messages_per_partition;
|
||||
|
||||
// Create a consumer and subscribe to the topic
|
||||
PollStrategyAdapter consumer(make_consumer_config());
|
||||
TopicPartitionList partitions;
|
||||
for (int i = 0; i < KAFKA_NUM_PARTITIONS; partitions.emplace_back(KAFKA_TOPICS[0], i++));
|
||||
consumer.assign(partitions);
|
||||
consumer.add_polling_strategy(unique_ptr<PollInterface>(new RoundRobinPollStrategy(consumer)));
|
||||
|
||||
PollConsumerRunner runner(consumer, total_messages, KAFKA_NUM_PARTITIONS);
|
||||
|
||||
// Produce messages so we stop the consumer
|
||||
Producer producer(make_producer_config());
|
||||
string payload = "RoundRobin";
|
||||
|
||||
// push 3 messages in each partition
|
||||
for (int i = 0; i < total_messages; ++i) {
|
||||
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(i%KAFKA_NUM_PARTITIONS).payload(payload));
|
||||
}
|
||||
producer.flush();
|
||||
runner.try_join();
|
||||
|
||||
// Check that we have all messages
|
||||
REQUIRE(runner.get_messages().size() == total_messages);
|
||||
|
||||
// Check that we have one message from each partition in desired order
|
||||
vector<int> partition_order = make_roundrobin_partition_vector(total_messages);
|
||||
|
||||
for (int i = 0; i < total_messages; ++i) {
|
||||
REQUIRE(runner.get_messages()[i].get_partition() == partition_order[i+1]);
|
||||
REQUIRE((string)runner.get_messages()[i].get_payload() == payload);
|
||||
}
|
||||
|
||||
//============ resume original poll strategy =============//
|
||||
|
||||
//validate that once the round robin strategy is deleted, normal poll works as before
|
||||
consumer.delete_polling_strategy();
|
||||
|
||||
ConsumerRunner serial_runner(consumer, total_messages, KAFKA_NUM_PARTITIONS);
|
||||
|
||||
payload = "SerialPolling";
|
||||
// push 3 messages in each partition
|
||||
for (int i = 0; i < total_messages; ++i) {
|
||||
producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(i%KAFKA_NUM_PARTITIONS).payload(payload));
|
||||
}
|
||||
producer.flush();
|
||||
serial_runner.try_join();
|
||||
|
||||
// Check that we have all messages
|
||||
REQUIRE(serial_runner.get_messages().size() == total_messages);
|
||||
|
||||
for (int i = 0; i < total_messages; ++i) {
|
||||
REQUIRE((string)serial_runner.get_messages()[i].get_payload() == payload);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,6 +15,9 @@ using Catch::TestCaseStats;
|
||||
using Catch::Totals;
|
||||
using Catch::Session;
|
||||
|
||||
std::vector<std::string> KAFKA_TOPICS = {"cppkafka_test1", "cppkafka_test2"};
|
||||
int KAFKA_NUM_PARTITIONS = 3;
|
||||
|
||||
namespace cppkafka {
|
||||
|
||||
class InstantTestReporter : public ConsoleReporter {
|
||||
|
||||
@@ -1,91 +0,0 @@
|
||||
#include <mutex>
|
||||
#include <chrono>
|
||||
#include <condition_variable>
|
||||
#include "test_utils.h"
|
||||
#include "cppkafka/utils/consumer_dispatcher.h"
|
||||
|
||||
using std::vector;
|
||||
using std::move;
|
||||
using std::thread;
|
||||
using std::mutex;
|
||||
using std::lock_guard;
|
||||
using std::unique_lock;
|
||||
using std::condition_variable;
|
||||
using std::chrono::system_clock;
|
||||
using std::chrono::milliseconds;
|
||||
using std::chrono::seconds;
|
||||
|
||||
using cppkafka::Consumer;
|
||||
using cppkafka::ConsumerDispatcher;
|
||||
using cppkafka::Message;
|
||||
using cppkafka::MessageList;
|
||||
using cppkafka::TopicPartition;
|
||||
|
||||
ConsumerRunner::ConsumerRunner(Consumer& consumer, size_t expected, size_t partitions)
|
||||
: consumer_(consumer) {
|
||||
bool booted = false;
|
||||
mutex mtx;
|
||||
condition_variable cond;
|
||||
thread_ = thread([&, expected, partitions]() {
|
||||
consumer_.set_timeout(milliseconds(500));
|
||||
size_t number_eofs = 0;
|
||||
auto start = system_clock::now();
|
||||
ConsumerDispatcher dispatcher(consumer_);
|
||||
dispatcher.run(
|
||||
// Message callback
|
||||
[&](Message msg) {
|
||||
if (number_eofs == partitions) {
|
||||
messages_.push_back(move(msg));
|
||||
}
|
||||
},
|
||||
// EOF callback
|
||||
[&](ConsumerDispatcher::EndOfFile, const TopicPartition& topic_partition) {
|
||||
if (number_eofs != partitions) {
|
||||
number_eofs++;
|
||||
if (number_eofs == partitions) {
|
||||
lock_guard<mutex> _(mtx);
|
||||
booted = true;
|
||||
cond.notify_one();
|
||||
}
|
||||
}
|
||||
},
|
||||
// Every time there's any event callback
|
||||
[&](ConsumerDispatcher::Event) {
|
||||
if (expected > 0 && messages_.size() == expected) {
|
||||
dispatcher.stop();
|
||||
}
|
||||
if (expected == 0 && number_eofs >= partitions) {
|
||||
dispatcher.stop();
|
||||
}
|
||||
if (system_clock::now() - start >= seconds(20)) {
|
||||
dispatcher.stop();
|
||||
}
|
||||
}
|
||||
);
|
||||
if (number_eofs < partitions) {
|
||||
lock_guard<mutex> _(mtx);
|
||||
booted = true;
|
||||
cond.notify_one();
|
||||
}
|
||||
});
|
||||
|
||||
unique_lock<mutex> lock(mtx);
|
||||
while (!booted) {
|
||||
cond.wait(lock);
|
||||
}
|
||||
}
|
||||
|
||||
ConsumerRunner::~ConsumerRunner() {
|
||||
try_join();
|
||||
}
|
||||
|
||||
const MessageList& ConsumerRunner::get_messages() const {
|
||||
return messages_;
|
||||
}
|
||||
|
||||
void ConsumerRunner::try_join() {
|
||||
if (thread_.joinable()) {
|
||||
thread_.join();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,21 +4,63 @@
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
#include "cppkafka/consumer.h"
|
||||
#include "cppkafka/utils/roundrobin_poll_strategy.h"
|
||||
#include "cppkafka/utils/consumer_dispatcher.h"
|
||||
|
||||
class ConsumerRunner {
|
||||
extern const std::vector<std::string> KAFKA_TOPICS;
|
||||
extern const int KAFKA_NUM_PARTITIONS;
|
||||
|
||||
using namespace cppkafka;
|
||||
|
||||
//==================================================================================
|
||||
// BasicConsumerRunner
|
||||
//==================================================================================
|
||||
template <typename ConsumerType>
|
||||
class BasicConsumerRunner {
|
||||
public:
|
||||
ConsumerRunner(cppkafka::Consumer& consumer, size_t expected, size_t partitions);
|
||||
ConsumerRunner(const ConsumerRunner&) = delete;
|
||||
ConsumerRunner& operator=(const ConsumerRunner&) = delete;
|
||||
~ConsumerRunner();
|
||||
BasicConsumerRunner(ConsumerType& consumer,
|
||||
size_t expected,
|
||||
size_t partitions);
|
||||
BasicConsumerRunner(const BasicConsumerRunner&) = delete;
|
||||
BasicConsumerRunner& operator=(const BasicConsumerRunner&) = delete;
|
||||
~BasicConsumerRunner();
|
||||
|
||||
const std::vector<cppkafka::Message>& get_messages() const;
|
||||
|
||||
void try_join();
|
||||
private:
|
||||
cppkafka::Consumer& consumer_;
|
||||
ConsumerType& consumer_;
|
||||
std::thread thread_;
|
||||
std::vector<cppkafka::Message> messages_;
|
||||
};
|
||||
|
||||
//==================================================================================
|
||||
// PollStrategyAdapter
|
||||
//==================================================================================
|
||||
/**
|
||||
* \brief Specific implementation which can be used with other
|
||||
* util classes such as BasicConsumerDispatcher.
|
||||
*/
|
||||
class PollStrategyAdapter : public Consumer
|
||||
{
|
||||
public:
|
||||
PollStrategyAdapter(Configuration config);
|
||||
void add_polling_strategy(std::unique_ptr<PollInterface> poll_strategy);
|
||||
void delete_polling_strategy();
|
||||
Message poll();
|
||||
Message poll(std::chrono::milliseconds timeout);
|
||||
MessageList poll_batch(size_t max_batch_size);
|
||||
MessageList poll_batch(size_t max_batch_size,
|
||||
std::chrono::milliseconds timeout);
|
||||
void set_timeout(std::chrono::milliseconds timeout);
|
||||
std::chrono::milliseconds get_timeout();
|
||||
private:
|
||||
std::unique_ptr<PollInterface> strategy_;
|
||||
};
|
||||
|
||||
using PollConsumerRunner = BasicConsumerRunner<PollStrategyAdapter>;
|
||||
using ConsumerRunner = BasicConsumerRunner<Consumer>;
|
||||
|
||||
#include "test_utils_impl.h"
|
||||
|
||||
#endif // CPPKAFKA_TEST_UTILS_H
|
||||
|
||||
172
tests/test_utils_impl.h
Normal file
172
tests/test_utils_impl.h
Normal file
@@ -0,0 +1,172 @@
|
||||
#include <mutex>
|
||||
#include <chrono>
|
||||
#include <condition_variable>
|
||||
#include "test_utils.h"
|
||||
#include "cppkafka/utils/consumer_dispatcher.h"
|
||||
|
||||
using std::vector;
|
||||
using std::move;
|
||||
using std::thread;
|
||||
using std::mutex;
|
||||
using std::lock_guard;
|
||||
using std::unique_lock;
|
||||
using std::condition_variable;
|
||||
using std::chrono::system_clock;
|
||||
using std::chrono::milliseconds;
|
||||
using std::chrono::seconds;
|
||||
|
||||
using cppkafka::Consumer;
|
||||
using cppkafka::BasicConsumerDispatcher;
|
||||
|
||||
using cppkafka::Message;
|
||||
using cppkafka::MessageList;
|
||||
using cppkafka::TopicPartition;
|
||||
|
||||
//==================================================================================
|
||||
// BasicConsumerRunner
|
||||
//==================================================================================
|
||||
template <typename ConsumerType>
|
||||
BasicConsumerRunner<ConsumerType>::BasicConsumerRunner(ConsumerType& consumer,
|
||||
size_t expected,
|
||||
size_t partitions)
|
||||
: consumer_(consumer) {
|
||||
bool booted = false;
|
||||
mutex mtx;
|
||||
condition_variable cond;
|
||||
thread_ = thread([&, expected, partitions]() {
|
||||
consumer_.set_timeout(milliseconds(500));
|
||||
size_t number_eofs = 0;
|
||||
auto start = system_clock::now();
|
||||
BasicConsumerDispatcher<ConsumerType> dispatcher(consumer_);
|
||||
dispatcher.run(
|
||||
// Message callback
|
||||
[&](Message msg) {
|
||||
if (number_eofs == partitions) {
|
||||
messages_.push_back(move(msg));
|
||||
}
|
||||
},
|
||||
// EOF callback
|
||||
[&](typename BasicConsumerDispatcher<ConsumerType>::EndOfFile, const TopicPartition& topic_partition) {
|
||||
if (number_eofs != partitions) {
|
||||
number_eofs++;
|
||||
if (number_eofs == partitions) {
|
||||
lock_guard<mutex> _(mtx);
|
||||
booted = true;
|
||||
cond.notify_one();
|
||||
}
|
||||
}
|
||||
},
|
||||
// Every time there's any event callback
|
||||
[&](typename BasicConsumerDispatcher<ConsumerType>::Event) {
|
||||
if (expected > 0 && messages_.size() == expected) {
|
||||
dispatcher.stop();
|
||||
}
|
||||
if (expected == 0 && number_eofs >= partitions) {
|
||||
dispatcher.stop();
|
||||
}
|
||||
if (system_clock::now() - start >= seconds(20)) {
|
||||
dispatcher.stop();
|
||||
}
|
||||
}
|
||||
);
|
||||
// dispatcher has stopped
|
||||
if (number_eofs < partitions) {
|
||||
lock_guard<mutex> _(mtx);
|
||||
booted = true;
|
||||
cond.notify_one();
|
||||
}
|
||||
});
|
||||
|
||||
unique_lock<mutex> lock(mtx);
|
||||
while (!booted) {
|
||||
cond.wait(lock);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename ConsumerType>
|
||||
BasicConsumerRunner<ConsumerType>::~BasicConsumerRunner() {
|
||||
try_join();
|
||||
}
|
||||
|
||||
template <typename ConsumerType>
|
||||
const MessageList& BasicConsumerRunner<ConsumerType>::get_messages() const {
|
||||
return messages_;
|
||||
}
|
||||
|
||||
template <typename ConsumerType>
|
||||
void BasicConsumerRunner<ConsumerType>::try_join() {
|
||||
if (thread_.joinable()) {
|
||||
thread_.join();
|
||||
}
|
||||
}
|
||||
|
||||
//==================================================================================
|
||||
// PollStrategyAdapter
|
||||
//==================================================================================
|
||||
inline
|
||||
PollStrategyAdapter::PollStrategyAdapter(Configuration config)
|
||||
: Consumer(config) {
|
||||
}
|
||||
|
||||
inline
|
||||
void PollStrategyAdapter::add_polling_strategy(std::unique_ptr<PollInterface> poll_strategy) {
|
||||
strategy_ = std::move(poll_strategy);
|
||||
}
|
||||
|
||||
inline
|
||||
void PollStrategyAdapter::delete_polling_strategy() {
|
||||
strategy_.reset();
|
||||
}
|
||||
|
||||
inline
|
||||
Message PollStrategyAdapter::poll() {
|
||||
if (strategy_) {
|
||||
return strategy_->poll();
|
||||
}
|
||||
return Consumer::poll();
|
||||
}
|
||||
|
||||
inline
|
||||
Message PollStrategyAdapter::poll(milliseconds timeout) {
|
||||
if (strategy_) {
|
||||
return strategy_->poll(timeout);
|
||||
}
|
||||
return Consumer::poll(timeout);
|
||||
}
|
||||
|
||||
inline
|
||||
MessageList PollStrategyAdapter::poll_batch(size_t max_batch_size) {
|
||||
if (strategy_) {
|
||||
return strategy_->poll_batch(max_batch_size);
|
||||
}
|
||||
return Consumer::poll_batch(max_batch_size);
|
||||
}
|
||||
|
||||
inline
|
||||
MessageList PollStrategyAdapter::poll_batch(size_t max_batch_size,
|
||||
milliseconds timeout) {
|
||||
if (strategy_) {
|
||||
return strategy_->poll_batch(max_batch_size, timeout);
|
||||
}
|
||||
return Consumer::poll_batch(max_batch_size, timeout);
|
||||
}
|
||||
|
||||
inline
|
||||
void PollStrategyAdapter::set_timeout(milliseconds timeout) {
|
||||
if (strategy_) {
|
||||
strategy_->set_timeout(timeout);
|
||||
}
|
||||
else {
|
||||
Consumer::set_timeout(timeout);
|
||||
}
|
||||
}
|
||||
|
||||
inline
|
||||
milliseconds PollStrategyAdapter::get_timeout() {
|
||||
if (strategy_) {
|
||||
return strategy_->get_timeout();
|
||||
}
|
||||
return Consumer::get_timeout();
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user