From cb2c8877d865dbc311e110fad52b5bcbfdb2c703 Mon Sep 17 00:00:00 2001 From: Matias Fontanini Date: Tue, 24 Apr 2018 03:20:48 +0100 Subject: [PATCH] Move tests to use catch instead of googletest (#56) * Port buffer test to use Catch2 * Move compacted topic processor test to Catch2 * Move configuration tests to Catch2 * Rename configuration test cases * Move topic partition list test to Catch2 * Move handle base tests to Catch2 * Move producer tests to Catch2 * Move consumer tests to catch2 * Use CHECK on tests when appropriate * Remove googletest * Show tests' progress as they run * Update message when Catch2 is not checked out * Remove references to googletest * Run cppkafka_tests manually on travis * Print amount of time taken by each test case --- .gitmodules | 6 +- .travis.yml | 2 +- CMakeLists.txt | 24 +- tests/CMakeLists.txt | 40 +-- tests/buffer_test.cpp | 94 +++---- tests/compacted_topic_processor_test.cpp | 51 ++-- tests/configuration_test.cpp | 156 +++++------ tests/consumer_test.cpp | 96 ++++--- tests/kafka_handle_base_test.cpp | 158 +++++------ tests/producer_test.cpp | 330 +++++++++++------------ tests/test_main.cpp | 74 +++++ tests/topic_partition_list_test.cpp | 25 +- third_party/Catch2 | 1 + third_party/googletest | 1 - 14 files changed, 528 insertions(+), 530 deletions(-) create mode 100644 tests/test_main.cpp create mode 160000 third_party/Catch2 delete mode 160000 third_party/googletest diff --git a/.gitmodules b/.gitmodules index 5a4e85a..61932e2 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,3 @@ -[submodule "third_party/googletest"] - path = third_party/googletest - url = https://github.com/google/googletest.git +[submodule "third_party/Catch2"] + path = third_party/Catch2 + url = https://github.com/catchorg/Catch2.git diff --git a/.travis.yml b/.travis.yml index 5fd1597..1196aa0 100644 --- a/.travis.yml +++ b/.travis.yml @@ -40,4 +40,4 @@ script: - cmake .. -DRDKAFKA_ROOT_DIR=../librdkafka/install/ -DKAFKA_TEST_INSTANCE=localhost:9092 - make examples - make tests - - ctest -V + - ./tests/cppkafka_tests diff --git a/CMakeLists.txt b/CMakeLists.txt index d79fe77..30690fe 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -62,30 +62,14 @@ if(DOXYGEN_FOUND) endif(DOXYGEN_FOUND) if(NOT CPPKAFKA_DISABLE_TESTS) - set(GOOGLETEST_ROOT ${CMAKE_SOURCE_DIR}/third_party/googletest) - if(EXISTS "${GOOGLETEST_ROOT}/CMakeLists.txt") - set(GOOGLETEST_INCLUDE ${GOOGLETEST_ROOT}/googletest/include) - set(GOOGLETEST_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/googletest) - set(GOOGLETEST_LIBRARY ${GOOGLETEST_BINARY_DIR}/googletest) - - include(ExternalProject) - - ExternalProject_Add( - googletest - DOWNLOAD_COMMAND "" - SOURCE_DIR ${GOOGLETEST_ROOT} - BINARY_DIR ${GOOGLETEST_BINARY_DIR} - CMAKE_CACHE_ARGS "-DBUILD_GTEST:bool=ON" "-DBUILD_GMOCK:bool=OFF" - "-Dgtest_force_shared_crt:bool=ON" - INSTALL_COMMAND "" - ) + set(CATCH_ROOT ${CMAKE_SOURCE_DIR}/third_party/Catch2) + if(EXISTS ${CATCH_ROOT}/CMakeLists.txt) + set(CATCH_INCLUDE ${CATCH_ROOT}/single_include) enable_testing() add_subdirectory(tests) - # Make sure we build googletest before anything else - add_dependencies(cppkafka googletest) else() - message(STATUS "Disabling tests because submodule googletest isn't pulled out") + message(STATUS "Disabling tests because submodule Catch2 isn't checked out") endif() endif() diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 24e9143..317f7cd 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,31 +1,31 @@ -include_directories(${GOOGLETEST_INCLUDE}) include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include/) +include_directories(SYSTEM ${CATCH_INCLUDE}) include_directories(SYSTEM ${Boost_INCLUDE_DIRS} ${RDKAFKA_INCLUDE_DIR}) -link_directories(${GOOGLETEST_LIBRARY}) -link_libraries(cppkafka ${RDKAFKA_LIBRARY} gtest gtest_main pthread) - set(KAFKA_TEST_INSTANCE "kafka-vm:9092" CACHE STRING "The kafka instance to which to connect to run tests") add_custom_target(tests) -macro(create_test test_name) - add_executable(${test_name}_test EXCLUDE_FROM_ALL "${test_name}_test.cpp") - add_test(${test_name} ${test_name}_test) - add_dependencies(tests ${test_name}_test) - add_dependencies(${test_name}_test cppkafka) - target_link_libraries(${test_name}_test cppkafka-test) -endmacro() - include_directories(${CMAKE_CURRENT_SOURCE_DIR}) add_library(cppkafka-test EXCLUDE_FROM_ALL test_utils.cpp) -add_dependencies(cppkafka-test cppkafka) +target_link_libraries(cppkafka-test cppkafka ${RDKAFKA_LIBRARY} pthread) add_definitions("-DKAFKA_TEST_INSTANCE=\"${KAFKA_TEST_INSTANCE}\"") -create_test(consumer) -create_test(producer) -create_test(kafka_handle_base) -create_test(topic_partition_list) -create_test(configuration) -create_test(buffer) -create_test(compacted_topic_processor) + +add_executable( + cppkafka_tests + EXCLUDE_FROM_ALL + buffer_test.cpp + compacted_topic_processor_test.cpp + configuration_test.cpp + topic_partition_list_test.cpp + kafka_handle_base_test.cpp + producer_test.cpp + consumer_test.cpp + + # Main file + test_main.cpp +) +target_link_libraries(cppkafka_tests cppkafka-test) +add_dependencies(tests cppkafka_tests) +add_test(cppkafka cppkafka_tests) diff --git a/tests/buffer_test.cpp b/tests/buffer_test.cpp index a7fa69d..a709faf 100644 --- a/tests/buffer_test.cpp +++ b/tests/buffer_test.cpp @@ -1,7 +1,7 @@ #include #include #include -#include +#include #include "cppkafka/buffer.h" using std::string; @@ -10,69 +10,59 @@ using std::ostringstream; using namespace cppkafka; -class BufferTest : public testing::Test { -public: - -}; +TEST_CASE("conversions", "[buffer]") { + const string data = "Hello world!"; + const Buffer buffer(data); + const Buffer empty_buffer; -TEST_F(BufferTest, OperatorBool) { - string data = "Hello world!"; - Buffer buffer1(data); - Buffer buffer2; - EXPECT_TRUE(buffer1); - EXPECT_FALSE(buffer2); + SECTION("bool conversion") { + CHECK(!!buffer == true); + CHECK(!!empty_buffer == false); + } + + SECTION("string conversion") { + CHECK(static_cast(buffer) == data); + CHECK(static_cast(empty_buffer).empty()); + } + + SECTION("vector conversion") { + const vector buffer_as_vector = buffer; + CHECK(string(buffer_as_vector.begin(), buffer_as_vector.end()) == data); + } } -TEST_F(BufferTest, StringConversion) { - string data = "Hello world!"; - Buffer buffer(data); - string buffer_as_string = buffer; - EXPECT_EQ(data, buffer_as_string); -} - -TEST_F(BufferTest, StringConversionOnEmptyBuffer) { - Buffer buffer; - EXPECT_EQ("", static_cast(buffer)); -} - -TEST_F(BufferTest, VectorConversion) { - string data = "Hello world!"; - Buffer buffer(data); - vector buffer_as_vector = buffer; - EXPECT_EQ(data, string(buffer_as_vector.begin(), buffer_as_vector.end())); -} - -TEST_F(BufferTest, VectorConstruction) { +TEST_CASE("construction", "[buffer]") { const string str_data = "Hello world!"; const vector data(str_data.begin(), str_data.end()); - Buffer buffer(data); - EXPECT_EQ(str_data, buffer); + const Buffer buffer(data); + CHECK(str_data == buffer); } -TEST_F(BufferTest, Equality) { - string data = "Hello world!"; - Buffer buffer1(data); - Buffer buffer2(data); - EXPECT_EQ(buffer1, buffer2); +TEST_CASE("comparison", "[buffer]") { + const string data = "Hello world!"; + const Buffer buffer1(data); + const Buffer buffer2(data); + const Buffer empty_buffer; + + SECTION("equality") { + CHECK(buffer1 == buffer2); + CHECK(buffer2 == buffer1); + } + + SECTION("inequality") { + CHECK(buffer1 != empty_buffer); + CHECK(empty_buffer != buffer1); + } } -TEST_F(BufferTest, InEquality) { - string data1 = "Hello world!"; - string data2 = "Hello worldz"; - Buffer buffer1(data1); - Buffer buffer2(data2); - - EXPECT_NE(buffer1, buffer2); -} - -TEST_F(BufferTest, OutputOperator) { - string data = "Hello \x7fwor\x03ld!"; - string pretty_string = "Hello \\x7fwor\\x03ld!"; - Buffer buffer(data); +TEST_CASE("stream extraction", "[buffer]") { + const string data = "Hello \x7fwor\x03ld!"; + const string pretty_string = "Hello \\x7fwor\\x03ld!"; + const Buffer buffer(data); ostringstream output; output << buffer; - EXPECT_EQ(pretty_string, output.str()); + CHECK(output.str() == pretty_string ); } diff --git a/tests/compacted_topic_processor_test.cpp b/tests/compacted_topic_processor_test.cpp index 1a8220a..f72f580 100644 --- a/tests/compacted_topic_processor_test.cpp +++ b/tests/compacted_topic_processor_test.cpp @@ -4,7 +4,7 @@ #include #include #include -#include +#include #include "cppkafka/producer.h" #include "cppkafka/consumer.h" #include "cppkafka/utils/compacted_topic_processor.h" @@ -29,28 +29,23 @@ using std::chrono::milliseconds; using namespace cppkafka; -class CompactedTopicProcessorTest : public testing::Test { -public: - static const string KAFKA_TOPIC; +static const string KAFKA_TOPIC = "cppkafka_test1"; - Configuration make_producer_config() { - Configuration config; - config.set("metadata.broker.list", KAFKA_TEST_INSTANCE); - return config; - } +static Configuration make_producer_config() { + Configuration config; + config.set("metadata.broker.list", KAFKA_TEST_INSTANCE); + return config; +} - Configuration make_consumer_config() { - Configuration config; - config.set("metadata.broker.list", KAFKA_TEST_INSTANCE); - config.set("enable.auto.commit", false); - config.set("group.id", "compacted_topic_test"); - return config; - } -}; +static Configuration make_consumer_config() { + Configuration config; + config.set("metadata.broker.list", KAFKA_TEST_INSTANCE); + config.set("enable.auto.commit", false); + config.set("group.id", "compacted_topic_test"); + return config; +} -const string CompactedTopicProcessorTest::KAFKA_TOPIC = "cppkafka_test1"; - -TEST_F(CompactedTopicProcessorTest, Consume) { +TEST_CASE("consumption", "[consumer][compacted]") { Consumer consumer(make_consumer_config()); // We'll use ints as the key, strings as the value using CompactedConsumer = CompactedTopicProcessor; @@ -101,27 +96,27 @@ TEST_F(CompactedTopicProcessorTest, Consume) { size_t set_count = 0; size_t delete_count = 0; - ASSERT_FALSE(events.empty()); + CHECK(events.empty() == false); for (const Event& event : events) { switch (event.get_type()) { case Event::SET_ELEMENT: { auto iter = elements.find(to_string(event.get_key())); - ASSERT_NE(iter, elements.end()); - EXPECT_EQ(iter->second.value, event.get_value()); - EXPECT_EQ(iter->second.partition, event.get_partition()); + REQUIRE(iter != elements.end()); + CHECK(iter->second.value == event.get_value()); + CHECK(iter->second.partition == event.get_partition()); set_count++; } break; case Event::DELETE_ELEMENT: - EXPECT_EQ(0, event.get_partition()); - EXPECT_EQ(42, event.get_key()); + CHECK(event.get_partition() == 0); + CHECK(event.get_key() == 42); delete_count++; break; default: break; } } - EXPECT_EQ(2, set_count); - EXPECT_EQ(1, delete_count); + CHECK(set_count == 2); + CHECK(delete_count == 1); } diff --git a/tests/configuration_test.cpp b/tests/configuration_test.cpp index 19ff60c..1e23057 100644 --- a/tests/configuration_test.cpp +++ b/tests/configuration_test.cpp @@ -1,4 +1,4 @@ -#include +#include #include "cppkafka/configuration.h" #include "cppkafka/exceptions.h" @@ -6,86 +6,88 @@ using namespace cppkafka; using std::string; -class ConfigurationTest : public testing::Test { -public: - -}; - -TEST_F(ConfigurationTest, GetSetConfig) { +TEST_CASE("normal config", "[config]") { Configuration config; - config.set("group.id", "foo").set("metadata.broker.list", "asd:9092"); - EXPECT_EQ("foo", config.get("group.id")); - EXPECT_EQ("asd:9092", config.get("metadata.broker.list")); - EXPECT_EQ("foo", config.get("group.id")); - EXPECT_THROW(config.get("asd"), ConfigOptionNotFound); + SECTION("get existing") { + config.set("group.id", "foo").set("metadata.broker.list", "asd:9092"); + CHECK(config.get("group.id") == "foo"); + CHECK(config.get("metadata.broker.list") == "asd:9092"); + CHECK(config.get("group.id") == "foo"); + } + + SECTION("get non existent") { + REQUIRE_THROWS_AS(config.get("asd"), ConfigOptionNotFound); + } + + SECTION("set overloads") { + config.set("enable.auto.commit", true); + config.set("auto.commit.interval.ms", 100); + + CHECK(config.get("enable.auto.commit") == "true"); + CHECK(config.get("auto.commit.interval.ms") == "100"); + CHECK(config.get("auto.commit.interval.ms") == 100); + } + + SECTION("set multiple") { + config = { + { "group.id", "foo" }, + { "metadata.broker.list", string("asd:9092") }, + { "message.max.bytes", 2000 }, + { "topic.metadata.refresh.sparse", true } + }; + + CHECK(config.get("group.id") == "foo"); + CHECK(config.get("metadata.broker.list") == "asd:9092"); + CHECK(config.get("message.max.bytes") == 2000); + CHECK(config.get("topic.metadata.refresh.sparse") == true); + } + + SECTION("default topic config") { + config.set_default_topic_configuration({{ "request.required.acks", 2 }}); + + const auto& topic_config = config.get_default_topic_configuration(); + CHECK(!!topic_config == true); + CHECK(topic_config->get("request.required.acks") == 2); + } + + SECTION("get all") { + config.set("enable.auto.commit", false); + auto option_map = config.get_all(); + CHECK(option_map.at("enable.auto.commit") == "false"); + } } -TEST_F(ConfigurationTest, GetSetTopicConfig) { +TEST_CASE("topic config", "[config]") { TopicConfiguration config; - config.set("auto.commit.enable", true).set("offset.store.method", "broker"); - EXPECT_EQ("true", config.get("auto.commit.enable")); - EXPECT_EQ("broker", config.get("offset.store.method")); - EXPECT_EQ(true, config.get("auto.commit.enable")); - EXPECT_THROW(config.get("asd"), ConfigOptionNotFound); -} - -TEST_F(ConfigurationTest, ConfigSetMultiple) { - Configuration config = { - { "group.id", "foo" }, - { "metadata.broker.list", string("asd:9092") }, - { "message.max.bytes", 2000 }, - { "topic.metadata.refresh.sparse", true } - }; - EXPECT_EQ("foo", config.get("group.id")); - EXPECT_EQ("asd:9092", config.get("metadata.broker.list")); - EXPECT_EQ(2000, config.get("message.max.bytes")); - EXPECT_EQ(true, config.get("topic.metadata.refresh.sparse")); -} - -TEST_F(ConfigurationTest, TopicConfigSetMultiple) { - TopicConfiguration config = { - { "compression.codec", "none" }, - { "offset.store.method", string("file") }, - { "request.required.acks", 2 }, - { "produce.offset.report", true } - }; - EXPECT_EQ("none", config.get("compression.codec")); - EXPECT_EQ("file", config.get("offset.store.method")); - EXPECT_EQ(2, config.get("request.required.acks")); - EXPECT_EQ(true, config.get("produce.offset.report")); -} - -TEST_F(ConfigurationTest, SetDefaultTopicConfiguration) { - Configuration config; - config.set_default_topic_configuration({{ "request.required.acks", 2 }}); - - const auto& topic_config = config.get_default_topic_configuration(); - EXPECT_TRUE(topic_config); - EXPECT_EQ(2, topic_config->get("request.required.acks")); -} - -TEST_F(ConfigurationTest, SetOverloads) { - Configuration config; - config.set("enable.auto.commit", true); - config.set("auto.commit.interval.ms", 100); - - EXPECT_EQ("true", config.get("enable.auto.commit")); - EXPECT_EQ("100", config.get("auto.commit.interval.ms")); - EXPECT_EQ(100, config.get("auto.commit.interval.ms")); -} - -TEST_F(ConfigurationTest, GetAll) { - Configuration config; - config.set("enable.auto.commit", false); - auto option_map = config.get_all(); - EXPECT_EQ("false", option_map.at("enable.auto.commit")); -} - -TEST_F(ConfigurationTest, TopicGetAll) { - TopicConfiguration config; - config.set("auto.commit.enable", false); - auto option_map = config.get_all(); - EXPECT_EQ("false", option_map.at("auto.commit.enable")); + SECTION("get existing") { + config.set("auto.commit.enable", true).set("offset.store.method", "broker"); + CHECK(config.get("auto.commit.enable") == "true"); + CHECK(config.get("offset.store.method") == "broker"); + CHECK(config.get("auto.commit.enable") == true); + } + + SECTION("get non existent") { + REQUIRE_THROWS_AS(config.get("asd"), ConfigOptionNotFound); + } + + SECTION("set multiple") { + config = { + { "compression.codec", "none" }, + { "offset.store.method", string("file") }, + { "request.required.acks", 2 }, + { "produce.offset.report", true } + }; + CHECK(config.get("compression.codec") == "none"); + CHECK(config.get("offset.store.method") == "file"); + CHECK(config.get("request.required.acks") == 2); + CHECK(config.get("produce.offset.report") == true); + } + + SECTION("get all") { + config.set("auto.commit.enable", false); + auto option_map = config.get_all(); + CHECK(option_map.at("auto.commit.enable") == "false"); + } } diff --git a/tests/consumer_test.cpp b/tests/consumer_test.cpp index a41dd8c..87592ae 100644 --- a/tests/consumer_test.cpp +++ b/tests/consumer_test.cpp @@ -5,7 +5,7 @@ #include #include #include -#include +#include #include "cppkafka/consumer.h" #include "cppkafka/producer.h" #include "cppkafka/utils/consumer_dispatcher.h" @@ -29,28 +29,23 @@ using std::chrono::system_clock; using namespace cppkafka; -class ConsumerTest : public testing::Test { -public: - static const string KAFKA_TOPIC; +const string KAFKA_TOPIC = "cppkafka_test1"; - Configuration make_producer_config() { - Configuration config; - config.set("metadata.broker.list", KAFKA_TEST_INSTANCE); - return config; - } +static Configuration make_producer_config() { + Configuration config; + config.set("metadata.broker.list", KAFKA_TEST_INSTANCE); + return config; +} - Configuration make_consumer_config(const string& group_id = "consumer_test") { - Configuration config; - config.set("metadata.broker.list", KAFKA_TEST_INSTANCE); - config.set("enable.auto.commit", false); - config.set("group.id", group_id); - return config; - } -}; +static Configuration make_consumer_config(const string& group_id = "consumer_test") { + Configuration config; + config.set("metadata.broker.list", KAFKA_TEST_INSTANCE); + config.set("enable.auto.commit", false); + config.set("group.id", group_id); + return config; +} -const string ConsumerTest::KAFKA_TOPIC = "cppkafka_test1"; - -TEST_F(ConsumerTest, AssignmentCallback) { +TEST_CASE("message consumption", "[consumer]") { TopicPartitionList assignment; int partition = 0; @@ -69,27 +64,26 @@ TEST_F(ConsumerTest, AssignmentCallback) { runner.try_join(); // All 3 partitions should be ours - EXPECT_EQ(3, assignment.size()); + REQUIRE(assignment.size() == 3); set partitions = { 0, 1, 2 }; for (const auto& topic_partition : assignment) { - EXPECT_EQ(KAFKA_TOPIC, topic_partition.get_topic()); - EXPECT_TRUE(partitions.erase(topic_partition.get_partition())); + CHECK(topic_partition.get_topic() == KAFKA_TOPIC); + CHECK(partitions.erase(topic_partition.get_partition()) == true); } - EXPECT_EQ(1, runner.get_messages().size()); - - EXPECT_EQ(vector{ KAFKA_TOPIC }, consumer.get_subscription()); + REQUIRE(runner.get_messages().size() == 1); + CHECK(consumer.get_subscription() == vector{ KAFKA_TOPIC }); assignment = consumer.get_assignment(); - EXPECT_EQ(3, assignment.size()); + CHECK(assignment.size() == 3); int64_t low; int64_t high; tie(low, high) = consumer.get_offsets({ KAFKA_TOPIC, partition }); - EXPECT_GT(high, low); - EXPECT_EQ(high, runner.get_messages().back().get_offset() + 1); + CHECK(high > low); + CHECK(runner.get_messages().back().get_offset() + 1 == high); } -TEST_F(ConsumerTest, Rebalance) { +TEST_CASE("consumer rebalance", "[consumer]") { TopicPartitionList assignment1; TopicPartitionList assignment2; bool revocation_called = false; @@ -114,7 +108,7 @@ TEST_F(ConsumerTest, Rebalance) { consumer2.subscribe({ KAFKA_TOPIC }); ConsumerRunner runner2(consumer2, 1, 1); - EXPECT_TRUE(revocation_called); + CHECK(revocation_called == true); // Produce a message just so we stop the consumer Producer producer(make_producer_config()); @@ -124,20 +118,20 @@ TEST_F(ConsumerTest, Rebalance) { runner2.try_join(); // All 3 partitions should be assigned - EXPECT_EQ(3, assignment1.size() + assignment2.size()); + CHECK(assignment1.size() + assignment2.size() == 3); set partitions = { 0, 1, 2 }; for (const auto& topic_partition : assignment1) { - EXPECT_EQ(KAFKA_TOPIC, topic_partition.get_topic()); - EXPECT_TRUE(partitions.erase(topic_partition.get_partition())); + CHECK(topic_partition.get_topic() == KAFKA_TOPIC); + CHECK(partitions.erase(topic_partition.get_partition()) == true); } for (const auto& topic_partition : assignment2) { - EXPECT_EQ(KAFKA_TOPIC, topic_partition.get_topic()); - EXPECT_TRUE(partitions.erase(topic_partition.get_partition())); + CHECK(topic_partition.get_topic() == KAFKA_TOPIC); + CHECK(partitions.erase(topic_partition.get_partition()) == true); } - EXPECT_EQ(1, runner1.get_messages().size() + runner2.get_messages().size()); + CHECK(runner1.get_messages().size() + runner2.get_messages().size() == 1); } -TEST_F(ConsumerTest, OffsetCommit) { +TEST_CASE("consumer offset commit", "[consumer]") { int partition = 0; int64_t message_offset = 0; bool offset_commit_called = false; @@ -147,11 +141,11 @@ TEST_F(ConsumerTest, OffsetCommit) { config.set_offset_commit_callback([&](Consumer&, Error error, const TopicPartitionList& topic_partitions) { offset_commit_called = true; - EXPECT_FALSE(error); - ASSERT_EQ(1, topic_partitions.size()); - EXPECT_EQ(KAFKA_TOPIC, topic_partitions[0].get_topic()); - EXPECT_EQ(0, topic_partitions[0].get_partition()); - EXPECT_EQ(message_offset + 1, topic_partitions[0].get_offset()); + CHECK(!!error == false); + REQUIRE(topic_partitions.size() == 1); + CHECK(topic_partitions[0].get_topic() == KAFKA_TOPIC); + CHECK(topic_partitions[0].get_partition() == 0); + CHECK(topic_partitions[0].get_offset() == message_offset + 1); }); Consumer consumer(config); consumer.assign({ { KAFKA_TOPIC, 0 } }); @@ -163,17 +157,17 @@ TEST_F(ConsumerTest, OffsetCommit) { producer.produce(MessageBuilder(KAFKA_TOPIC).partition(partition).payload(payload)); runner.try_join(); - ASSERT_EQ(1, runner.get_messages().size()); + REQUIRE(runner.get_messages().size() == 1); const Message& msg = runner.get_messages()[0]; message_offset = msg.get_offset(); consumer.commit(msg); for (size_t i = 0; i < 3 && !offset_commit_called; ++i) { consumer.poll(); } - EXPECT_TRUE(offset_commit_called); + CHECK(offset_commit_called == true); } -TEST_F(ConsumerTest, Throttle) { +TEST_CASE("consumer throttle", "[consumer]") { int partition = 0; // Create a consumer and subscribe to the topic @@ -210,10 +204,10 @@ TEST_F(ConsumerTest, Throttle) { } ); - EXPECT_EQ(3, callback_executed_count); + CHECK(callback_executed_count == 3); } -TEST_F(ConsumerTest, ConsumeBatch) { +TEST_CASE("consume batch", "[consumer]") { int partition = 0; // Create a consumer and subscribe to the topic @@ -242,7 +236,7 @@ TEST_F(ConsumerTest, ConsumeBatch) { make_move_iterator(messages.end())); ++i; } - ASSERT_EQ(2, all_messages.size()); - EXPECT_EQ(payload, all_messages[0].get_payload()); - EXPECT_EQ(payload, all_messages[1].get_payload()); + REQUIRE(all_messages.size() == 2); + CHECK(all_messages[0].get_payload() == payload); + CHECK(all_messages[1].get_payload() == payload); } diff --git a/tests/kafka_handle_base_test.cpp b/tests/kafka_handle_base_test.cpp index acba3da..b33603e 100644 --- a/tests/kafka_handle_base_test.cpp +++ b/tests/kafka_handle_base_test.cpp @@ -1,6 +1,6 @@ #include #include -#include +#include #include "cppkafka/consumer.h" #include "cppkafka/producer.h" #include "cppkafka/metadata.h" @@ -14,94 +14,88 @@ using std::string; using namespace cppkafka; -class KafkaHandleBaseTest : public testing::Test { -public: - static const string KAFKA_TOPIC; +static const string KAFKA_TOPIC = "cppkafka_test1"; - Configuration make_config() { - Configuration config; - config.set("metadata.broker.list", KAFKA_TEST_INSTANCE); - return config; +Configuration make_config() { + Configuration config; + config.set("metadata.broker.list", KAFKA_TEST_INSTANCE); + return config; +} + +string get_kafka_host() { + string uri = KAFKA_TEST_INSTANCE; + size_t index = uri.find(':'); + if (index == string::npos) { + return uri; } - - string get_kafka_host() { - string uri = KAFKA_TEST_INSTANCE; - size_t index = uri.find(':'); - if (index == string::npos) { - return uri; - } - else { - return uri.substr(0, index); - } + else { + return uri.substr(0, index); } +} - uint16_t get_kafka_port() { - string uri = KAFKA_TEST_INSTANCE; - size_t index = uri.find(':'); - if (index == string::npos) { - return 9092; - } - else { - return stoul(uri.substr(index + 1)); - } +uint16_t get_kafka_port() { + string uri = KAFKA_TEST_INSTANCE; + size_t index = uri.find(':'); + if (index == string::npos) { + return 9092; } -}; + else { + return stoul(uri.substr(index + 1)); + } +} -const string KafkaHandleBaseTest::KAFKA_TOPIC = "cppkafka_test1"; - -TEST_F(KafkaHandleBaseTest, BrokersMetadata) { +TEST_CASE("metadata", "[handle_base]") { Producer producer({}); producer.add_brokers(KAFKA_TEST_INSTANCE); Metadata metadata = producer.get_metadata(); - vector brokers = metadata.get_brokers(); - ASSERT_EQ(1, brokers.size()); - const auto& broker = brokers[0]; - // TODO: resolve this - //EXPECT_EQ(get_kafka_host(), broker.get_host()); - EXPECT_EQ(get_kafka_port(), broker.get_port()); -} - -TEST_F(KafkaHandleBaseTest, TopicsMetadata) { - unordered_set topic_names = { "cppkafka_test1", "cppkafka_test2" }; - size_t found_topics = 0; - - Producer producer(make_config()); - Metadata metadata = producer.get_metadata(); - - const vector& topics = metadata.get_topics(); - ASSERT_GE(topics.size(), 2); - - for (const auto& topic : topics) { - if (topic_names.count(topic.get_name()) == 1) { - const vector& partitions = topic.get_partitions(); - EXPECT_EQ(3, partitions.size()); - set expected_ids = { 0, 1, 2 }; - for (const PartitionMetadata& partition : partitions) { - EXPECT_EQ(1, expected_ids.erase(partition.get_id())); - for (int32_t replica : partition.get_replicas()) { - EXPECT_EQ(0, replica); - } - for (int32_t isr : partition.get_in_sync_replica_brokers()) { - EXPECT_EQ(0, isr); - } - } - found_topics++; - } + SECTION("brokers") { + vector brokers = metadata.get_brokers(); + REQUIRE(brokers.size() == 1); + const auto& broker = brokers[0]; + // TODO: resolve this + //REQUIRE(broker.get_host() == get_kafka_host()); + CHECK(broker.get_port() == get_kafka_port()); } - EXPECT_EQ(topic_names.size(), found_topics); - // Find by names - EXPECT_EQ(topic_names.size(), metadata.get_topics(topic_names).size()); - // Find by prefix - EXPECT_EQ(topic_names.size(), metadata.get_topics_prefixed("cppkafka_").size()); + SECTION("topics") { + unordered_set topic_names = { "cppkafka_test1", "cppkafka_test2" }; + size_t found_topics = 0; - // Now get the whole metadata only for this topic - Topic topic = producer.get_topic(KAFKA_TOPIC); - EXPECT_EQ(KAFKA_TOPIC, producer.get_metadata(topic).get_name()); + const vector& topics = metadata.get_topics(); + CHECK(topics.size() >= 2); + + for (const auto& topic : topics) { + if (topic_names.count(topic.get_name()) == 1) { + const vector& partitions = topic.get_partitions(); + REQUIRE(partitions.size() == 3); + set expected_ids = { 0, 1, 2 }; + for (const PartitionMetadata& partition : partitions) { + REQUIRE(expected_ids.erase(partition.get_id()) == 1); + for (int32_t replica : partition.get_replicas()) { + REQUIRE(replica == 0); + } + for (int32_t isr : partition.get_in_sync_replica_brokers()) { + REQUIRE(isr == 0); + } + } + found_topics++; + } + } + CHECK(found_topics == topic_names.size()); + + // Find by names + CHECK(metadata.get_topics(topic_names).size() == topic_names.size()); + // Find by prefix + CHECK(metadata.get_topics_prefixed("cppkafka_").size() == topic_names.size()); + + // Now get the whole metadata only for this topic + Topic topic = producer.get_topic(KAFKA_TOPIC); + CHECK(producer.get_metadata(topic).get_name() == KAFKA_TOPIC); + } } -TEST_F(KafkaHandleBaseTest, ConsumerGroups) { +TEST_CASE("consumer groups", "[handle_base]") { string consumer_group = "kafka_handle_test"; string client_id = "my_client_id"; @@ -117,15 +111,15 @@ TEST_F(KafkaHandleBaseTest, ConsumerGroups) { runner.try_join(); GroupInformation information = consumer.get_consumer_group(consumer_group); - EXPECT_EQ(consumer_group, information.get_name()); - EXPECT_EQ("consumer", information.get_protocol_type()); - ASSERT_EQ(1, information.get_members().size()); + CHECK(information.get_name() == consumer_group); + CHECK(information.get_protocol_type() == "consumer"); + CHECK(information.get_members().size() == 1); auto member = information.get_members()[0]; - EXPECT_EQ(client_id, member.get_client_id()); + CHECK(member.get_client_id() == client_id); MemberAssignmentInformation assignment = member.get_member_assignment(); - EXPECT_EQ(0, assignment.get_version()); + CHECK(assignment.get_version() == 0); TopicPartitionList expected_topic_partitions = { { KAFKA_TOPIC, 0 }, { KAFKA_TOPIC, 1 }, @@ -133,9 +127,5 @@ TEST_F(KafkaHandleBaseTest, ConsumerGroups) { }; TopicPartitionList topic_partitions = assignment.get_topic_partitions(); sort(topic_partitions.begin(), topic_partitions.end()); - EXPECT_EQ(expected_topic_partitions, topic_partitions); - /*for (const auto c : ) { - printf("%0d,", (int)c & 0xff); - } - std::cout << std::endl;*/ + CHECK(topic_partitions == expected_topic_partitions); } diff --git a/tests/producer_test.cpp b/tests/producer_test.cpp index 7b7238d..4454ac6 100644 --- a/tests/producer_test.cpp +++ b/tests/producer_test.cpp @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include "cppkafka/producer.h" #include "cppkafka/consumer.h" #include "cppkafka/utils/buffered_producer.h" @@ -26,34 +26,29 @@ using std::chrono::milliseconds; using namespace cppkafka; -class ProducerTest : public testing::Test { -public: - static const string KAFKA_TOPIC; +static const string KAFKA_TOPIC = "cppkafka_test1"; - Configuration make_producer_config() { - Configuration config = { - { "metadata.broker.list", KAFKA_TEST_INSTANCE }, - { "queue.buffering.max.ms", 0 }, - { "api.version.request", true }, - { "queue.buffering.max.ms", 50 } - }; - return config; - } +static Configuration make_producer_config() { + Configuration config = { + { "metadata.broker.list", KAFKA_TEST_INSTANCE }, + { "queue.buffering.max.ms", 0 }, + { "api.version.request", true }, + { "queue.buffering.max.ms", 50 } + }; + return config; +} - Configuration make_consumer_config() { - Configuration config = { - { "metadata.broker.list", KAFKA_TEST_INSTANCE }, - { "enable.auto.commit", false }, - { "group.id", "producer_test" }, - { "api.version.request", true } - }; - return config; - } -}; +static Configuration make_consumer_config() { + Configuration config = { + { "metadata.broker.list", KAFKA_TEST_INSTANCE }, + { "enable.auto.commit", false }, + { "group.id", "producer_test" }, + { "api.version.request", true } + }; + return config; +} -const string ProducerTest::KAFKA_TOPIC = "cppkafka_test1"; - -TEST_F(ProducerTest, OneMessageOnFixedPartition) { +TEST_CASE("simple production", "[producer]") { int partition = 0; // Create a consumer and assign this topic/partition @@ -61,59 +56,121 @@ TEST_F(ProducerTest, OneMessageOnFixedPartition) { consumer.assign({ TopicPartition(KAFKA_TOPIC, partition) }); ConsumerRunner runner(consumer, 1, 1); - // Now create a producer and produce a message - Producer producer(make_producer_config()); - string payload = "Hello world! 1"; - producer.produce(MessageBuilder(KAFKA_TOPIC).partition(partition).payload(payload)); - runner.try_join(); + Configuration config = make_producer_config(); + SECTION("message with no key") { + // Now create a producer and produce a message + const string payload = "Hello world! 1"; + Producer producer(config); + producer.produce(MessageBuilder(KAFKA_TOPIC).partition(partition).payload(payload)); + runner.try_join(); - const auto& messages = runner.get_messages(); - ASSERT_EQ(1, messages.size()); - const auto& message = messages[0]; - EXPECT_EQ(Buffer(payload), message.get_payload()); - EXPECT_FALSE(message.get_key()); - EXPECT_EQ(KAFKA_TOPIC, message.get_topic()); - EXPECT_EQ(partition, message.get_partition()); - EXPECT_FALSE(message.get_error()); + const auto& messages = runner.get_messages(); + REQUIRE(messages.size() == 1); + const auto& message = messages[0]; + CHECK(message.get_payload() == payload); + CHECK(!!message.get_key() == false); + CHECK(message.get_topic() == KAFKA_TOPIC); + CHECK(message.get_partition() == partition); + CHECK(!!message.get_error() == false); - int64_t low; - int64_t high; - tie(low, high) = producer.query_offsets({ KAFKA_TOPIC, partition }); - EXPECT_GT(high, low); + int64_t low; + int64_t high; + tie(low, high) = producer.query_offsets({ KAFKA_TOPIC, partition }); + CHECK(high > low); + } + + SECTION("message with key") { + const string payload = "Hello world! 2"; + const string key = "such key"; + const milliseconds timestamp{15}; + Producer producer(config); + producer.produce(MessageBuilder(KAFKA_TOPIC).partition(partition) + .key(key) + .payload(payload) + .timestamp(timestamp)); + runner.try_join(); + + const auto& messages = runner.get_messages(); + REQUIRE(messages.size() == 1); + const auto& message = messages[0]; + CHECK(message.get_payload() == payload); + CHECK(message.get_key() == key); + CHECK(message.get_topic() == KAFKA_TOPIC); + CHECK(message.get_partition() == partition); + CHECK(!!message.get_error() == false); + REQUIRE(!!message.get_timestamp() == true); + CHECK(message.get_timestamp()->get_timestamp() == timestamp); + } + + SECTION("callbacks") { + // Now create a producer and produce a message + const string payload = "Hello world! 3"; + const string key = "hehe"; + bool delivery_report_called = false; + config.set_delivery_report_callback([&](Producer&, const Message& msg) { + CHECK(msg.get_payload() == payload); + delivery_report_called = true; + }); + + TopicConfiguration topic_config; + topic_config.set_partitioner_callback([&](const Topic& topic, const Buffer& msg_key, + int32_t partition_count) { + CHECK(msg_key == key); + CHECK(partition_count == 3); + CHECK(topic.get_name() == KAFKA_TOPIC); + return 0; + }); + config.set_default_topic_configuration(topic_config); + + Producer producer(config); + producer.produce(MessageBuilder(KAFKA_TOPIC).key(key).payload(payload)); + while (producer.get_out_queue_length() > 0) { + producer.poll(); + } + runner.try_join(); + + const auto& messages = runner.get_messages(); + REQUIRE(messages.size() == 1); + const auto& message = messages[0]; + CHECK(message.get_payload() == payload); + CHECK(message.get_key() == key); + CHECK(message.get_topic() == KAFKA_TOPIC); + CHECK(message.get_partition() == partition); + CHECK(!!message.get_error() == false); + CHECK(delivery_report_called == true); + } + + SECTION("partitioner callback") { + // Now create a producer and produce a message + const string payload = "Hello world! 4"; + const string key = "hehe"; + bool callback_called = false; + + TopicConfiguration topic_config; + topic_config.set_partitioner_callback([&](const Topic& topic, const Buffer& msg_key, + int32_t partition_count) { + CHECK(msg_key == key); + CHECK(partition_count == 3); + CHECK(topic.get_name() == KAFKA_TOPIC); + callback_called = true; + return 0; + }); + config.set_default_topic_configuration(topic_config); + Producer producer(config); + + producer.produce(MessageBuilder(KAFKA_TOPIC).key(key).payload(payload)); + producer.poll(); + runner.try_join(); + + const auto& messages = runner.get_messages(); + REQUIRE(messages.size() == 1); + const auto& message = messages[0]; + CHECK(message.get_partition() == partition); + CHECK(callback_called == true); + } } -TEST_F(ProducerTest, OneMessageUsingKey) { - int partition = 0; - - // Create a consumer and assign this topic/partition - Consumer consumer(make_consumer_config()); - consumer.assign({ TopicPartition(KAFKA_TOPIC, partition) }); - ConsumerRunner runner(consumer, 1, 1); - - // Now create a producer and produce a message - Producer producer(make_producer_config()); - string payload = "Hello world! 2"; - string key = "such key"; - const milliseconds timestamp{15}; - producer.produce(MessageBuilder(KAFKA_TOPIC).partition(partition) - .key(key) - .payload(payload) - .timestamp(timestamp)); - runner.try_join(); - - const auto& messages = runner.get_messages(); - ASSERT_EQ(1, messages.size()); - const auto& message = messages[0]; - EXPECT_EQ(Buffer(payload), message.get_payload()); - EXPECT_EQ(Buffer(key), message.get_key()); - EXPECT_EQ(KAFKA_TOPIC, message.get_topic()); - EXPECT_EQ(partition, message.get_partition()); - EXPECT_FALSE(message.get_error()); - EXPECT_TRUE(message.get_timestamp()); - EXPECT_EQ(timestamp, message.get_timestamp()->get_timestamp()); -} - -TEST_F(ProducerTest, MultipleMessagesUnassignedPartitions) { +TEST_CASE("multiple messages", "[producer]") { size_t message_count = 10; int partitions = 3; set payloads; @@ -125,110 +182,27 @@ TEST_F(ProducerTest, MultipleMessagesUnassignedPartitions) { // Now create a producer and produce a message Producer producer(make_producer_config()); - string payload_base = "Hello world "; + const string payload_base = "Hello world "; for (size_t i = 0; i < message_count; ++i) { - string payload = payload_base + to_string(i); + const string payload = payload_base + to_string(i); payloads.insert(payload); producer.produce(MessageBuilder(KAFKA_TOPIC).payload(payload)); } runner.try_join(); const auto& messages = runner.get_messages(); - ASSERT_EQ(message_count, messages.size()); + REQUIRE(messages.size() == message_count); for (const auto& message : messages) { - EXPECT_EQ(KAFKA_TOPIC, message.get_topic()); - EXPECT_EQ(1, payloads.erase(message.get_payload())); - EXPECT_FALSE(message.get_error()); - EXPECT_FALSE(message.get_key()); - EXPECT_GE(message.get_partition(), 0); - EXPECT_LT(message.get_partition(), 3); + CHECK(message.get_topic() == KAFKA_TOPIC); + CHECK(payloads.erase(message.get_payload()) == 1); + CHECK(!!message.get_error() == false); + CHECK(!!message.get_key() == false); + CHECK(message.get_partition() >= 0); + CHECK(message.get_partition() < 3); } } -TEST_F(ProducerTest, Callbacks) { - int partition = 0; - - // Create a consumer and assign this topic/partition - Consumer consumer(make_consumer_config()); - consumer.assign({ TopicPartition(KAFKA_TOPIC, partition) }); - ConsumerRunner runner(consumer, 1, 1); - - // Now create a producer and produce a message - string payload = "Hello world! 3"; - string key = "hehe"; - bool delivery_report_called = false; - Configuration config = make_producer_config(); - config.set_delivery_report_callback([&](Producer&, const Message& msg) { - EXPECT_EQ(Buffer(payload), msg.get_payload()); - delivery_report_called = true; - }); - - TopicConfiguration topic_config; - topic_config.set_partitioner_callback([&](const Topic& topic, const Buffer& msg_key, - int32_t partition_count) { - EXPECT_EQ(Buffer(key), msg_key); - EXPECT_EQ(3, partition_count); - EXPECT_EQ(KAFKA_TOPIC, topic.get_name()); - return 0; - }); - config.set_default_topic_configuration(topic_config); - - Producer producer(move(config)); - producer.produce(MessageBuilder(KAFKA_TOPIC).key(key).payload(payload)); - while (producer.get_out_queue_length() > 0) { - producer.poll(); - } - runner.try_join(); - - const auto& messages = runner.get_messages(); - ASSERT_EQ(1, messages.size()); - const auto& message = messages[0]; - EXPECT_EQ(Buffer(payload), message.get_payload()); - EXPECT_EQ(Buffer(key), message.get_key()); - EXPECT_EQ(KAFKA_TOPIC, message.get_topic()); - EXPECT_EQ(partition, message.get_partition()); - EXPECT_FALSE(message.get_error()); - EXPECT_TRUE(delivery_report_called); -} - -TEST_F(ProducerTest, PartitionerCallbackOnDefaultTopicConfig) { - int partition = 0; - - // Create a consumer and assign this topic/partition - Consumer consumer(make_consumer_config()); - consumer.assign({ TopicPartition(KAFKA_TOPIC, partition) }); - ConsumerRunner runner(consumer, 1, 1); - - // Now create a producer and produce a message - string payload = "Hello world! 4"; - string key = "hehe"; - bool callback_called = false; - - Configuration config = make_producer_config(); - TopicConfiguration topic_config; - topic_config.set_partitioner_callback([&](const Topic& topic, const Buffer& msg_key, - int32_t partition_count) { - EXPECT_EQ(Buffer(key), msg_key); - EXPECT_EQ(3, partition_count); - EXPECT_EQ(KAFKA_TOPIC, topic.get_name()); - callback_called = true; - return 0; - }); - config.set_default_topic_configuration(topic_config); - - Producer producer(move(config)); - producer.produce(MessageBuilder(KAFKA_TOPIC).key(key).payload(payload)); - producer.poll(); - runner.try_join(); - - const auto& messages = runner.get_messages(); - ASSERT_EQ(1, messages.size()); - const auto& message = messages[0]; - EXPECT_EQ(partition, message.get_partition()); - EXPECT_TRUE(callback_called); -} - -TEST_F(ProducerTest, BufferedProducer) { +TEST_CASE("buffered producer", "[producer]") { int partition = 0; // Create a consumer and assign this topic/partition @@ -238,8 +212,8 @@ TEST_F(ProducerTest, BufferedProducer) { // Now create a buffered producer and produce two messages BufferedProducer producer(make_producer_config()); - string payload = "Hello world! 2"; - string key = "such key"; + const string payload = "Hello world! 2"; + const string key = "such key"; producer.add_message(MessageBuilder(KAFKA_TOPIC).partition(partition) .key(key) .payload(payload)); @@ -253,16 +227,16 @@ TEST_F(ProducerTest, BufferedProducer) { runner.try_join(); const auto& messages = runner.get_messages(); - ASSERT_EQ(3, messages.size()); + REQUIRE(messages.size() == 3); const auto& message = messages[0]; - EXPECT_EQ(Buffer(key), message.get_key()); - EXPECT_EQ(KAFKA_TOPIC, message.get_topic()); - EXPECT_EQ(partition, message.get_partition()); - EXPECT_FALSE(message.get_error()); + CHECK(message.get_key() == key); + CHECK(message.get_topic() == KAFKA_TOPIC); + CHECK(message.get_partition() == partition); + CHECK(!!message.get_error() == false); - EXPECT_FALSE(messages[1].get_key()); - EXPECT_FALSE(messages[2].get_key()); + CHECK(!!messages[1].get_key() == false); + CHECK(!!messages[2].get_key() == false); for (const auto& message : messages) { - EXPECT_EQ(Buffer(payload), message.get_payload()); + CHECK(message.get_payload() == payload); } } diff --git a/tests/test_main.cpp b/tests/test_main.cpp new file mode 100644 index 0000000..6eec4ca --- /dev/null +++ b/tests/test_main.cpp @@ -0,0 +1,74 @@ +#include +#define CATCH_CONFIG_RUNNER +#include + +using std::string; +using std::chrono::steady_clock; +using std::chrono::milliseconds; +using std::chrono::duration_cast; + +using Catch::ConsoleReporter; +using Catch::ReporterConfig; +using Catch::ReporterPreferences; +using Catch::TestCaseInfo; +using Catch::TestCaseStats; +using Catch::Totals; +using Catch::Session; + +namespace cppkafka { + +class InstantTestReporter : public ConsoleReporter { +public: + using ClockType = steady_clock; + + InstantTestReporter(const ReporterConfig& config) + : ConsoleReporter(config) { + } + + static string getDescription() { + return "Reports the tests' progress as they run"; + } + + ReporterPreferences getPreferences() const override { + ReporterPreferences output; + output.shouldRedirectStdOut = false; + return output; + } + + void testCaseStarting(const TestCaseInfo& info) override { + ConsoleReporter::testCaseStarting(info); + stream << "Running test \"" << info.name << "\" @ " << info.lineInfo << "\n"; + test_start_ts_ = ClockType::now(); + } + + void testCaseEnded(const TestCaseStats& stats) override { + const Totals& totals = stats.totals; + const size_t totalTestCases = totals.assertions.passed + totals.assertions.failed; + const auto elapsed = ClockType::now() - test_start_ts_; + stream << "Done. " << totals.assertions.passed << "/" << totalTestCases + << " assertions succeeded in " << duration_cast(elapsed).count() + << "ms\n"; + } +private: + ClockType::time_point test_start_ts_; +}; + +CATCH_REGISTER_REPORTER("instant", InstantTestReporter) + +} // cppkafka + +int main(int argc, char* argv[]) { + Session session; + + int returnCode = session.applyCommandLine( argc, argv ); + if (returnCode != 0) { + return returnCode; + } + if (session.configData().reporterNames.empty()) { + // Set our reporter as the default one + session.configData().reporterNames.emplace_back("instant"); + } + + int numFailed = session.run(); + return numFailed; +} diff --git a/tests/topic_partition_list_test.cpp b/tests/topic_partition_list_test.cpp index ce325ff..f2170d7 100644 --- a/tests/topic_partition_list_test.cpp +++ b/tests/topic_partition_list_test.cpp @@ -1,5 +1,5 @@ #include -#include +#include #include "cppkafka/topic_partition_list.h" #include "cppkafka/topic_partition.h" @@ -7,12 +7,7 @@ using std::ostringstream; using namespace cppkafka; -class TopicPartitionListTest : public testing::Test { -public: - -}; - -TEST_F(TopicPartitionListTest, Conversion) { +TEST_CASE("rdkafka conversion", "[topic_partition]") { TopicPartitionList list1; list1.push_back("foo"); list1.push_back({ "bar", 2 }); @@ -20,24 +15,24 @@ TEST_F(TopicPartitionListTest, Conversion) { TopicPartitionList list2 = convert(convert(list1)); - EXPECT_EQ(list1.size(), list2.size()); + CHECK(list1.size() == list2.size()); for (size_t i = 0; i < list1.size(); ++i) { const auto& item1 = list1[i]; const auto& item2 = list2[i]; - EXPECT_EQ(item1.get_topic(), item2.get_topic()); - EXPECT_EQ(item1.get_partition(), item2.get_partition()); - EXPECT_EQ(item1.get_offset(), item2.get_offset()); + CHECK(item1.get_topic() == item2.get_topic()); + CHECK(item1.get_partition() == item2.get_partition()); + CHECK(item1.get_offset() == item2.get_offset()); } } -TEST_F(TopicPartitionListTest, AsString) { +TEST_CASE("topic partition to string", "[topic_partition]") { ostringstream output; TopicPartition topic_partition("foo", 5); output << topic_partition; - EXPECT_EQ("foo[5:#]", output.str()); + CHECK(output.str() == "foo[5:#]"); } -TEST_F(TopicPartitionListTest, ListAsString) { +TEST_CASE("topic partition list to string", "[topic_partition]") { ostringstream output; TopicPartitionList list; list.push_back("foo"); @@ -45,5 +40,5 @@ TEST_F(TopicPartitionListTest, ListAsString) { list.push_back({ "foobar", 3, 4 }); output << list; - EXPECT_EQ("[ foo[-1:#], bar[2:#], foobar[3:4] ]", output.str()); + CHECK(output.str() == "[ foo[-1:#], bar[2:#], foobar[3:4] ]"); } diff --git a/third_party/Catch2 b/third_party/Catch2 new file mode 160000 index 0000000..d2d8455 --- /dev/null +++ b/third_party/Catch2 @@ -0,0 +1 @@ +Subproject commit d2d8455b571b6c66c4b7003500a77f9a93ecdc28 diff --git a/third_party/googletest b/third_party/googletest deleted file mode 160000 index 0a43962..0000000 --- a/third_party/googletest +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 0a439623f75c029912728d80cb7f1b8b48739ca4