Move tests to use catch instead of googletest (#56)

* Port buffer test to use Catch2

* Move compacted topic processor test to Catch2

* Move configuration tests to Catch2

* Rename configuration test cases

* Move topic partition list test to Catch2

* Move handle base tests to Catch2

* Move producer tests to Catch2

* Move consumer tests to catch2

* Use CHECK on tests when appropriate

* Remove googletest

* Show tests' progress as they run

* Update message when Catch2 is not checked out

* Remove references to googletest

* Run cppkafka_tests manually on travis

* Print amount of time taken by each test case
This commit is contained in:
Matias Fontanini
2018-04-24 03:20:48 +01:00
committed by GitHub
parent 30b3652a94
commit cb2c8877d8
14 changed files with 528 additions and 530 deletions

View File

@@ -1,6 +1,6 @@
#include <set>
#include <unordered_set>
#include <gtest/gtest.h>
#include <catch.hpp>
#include "cppkafka/consumer.h"
#include "cppkafka/producer.h"
#include "cppkafka/metadata.h"
@@ -14,94 +14,88 @@ using std::string;
using namespace cppkafka;
class KafkaHandleBaseTest : public testing::Test {
public:
static const string KAFKA_TOPIC;
static const string KAFKA_TOPIC = "cppkafka_test1";
Configuration make_config() {
Configuration config;
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
return config;
Configuration make_config() {
Configuration config;
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
return config;
}
string get_kafka_host() {
string uri = KAFKA_TEST_INSTANCE;
size_t index = uri.find(':');
if (index == string::npos) {
return uri;
}
string get_kafka_host() {
string uri = KAFKA_TEST_INSTANCE;
size_t index = uri.find(':');
if (index == string::npos) {
return uri;
}
else {
return uri.substr(0, index);
}
else {
return uri.substr(0, index);
}
}
uint16_t get_kafka_port() {
string uri = KAFKA_TEST_INSTANCE;
size_t index = uri.find(':');
if (index == string::npos) {
return 9092;
}
else {
return stoul(uri.substr(index + 1));
}
uint16_t get_kafka_port() {
string uri = KAFKA_TEST_INSTANCE;
size_t index = uri.find(':');
if (index == string::npos) {
return 9092;
}
};
else {
return stoul(uri.substr(index + 1));
}
}
const string KafkaHandleBaseTest::KAFKA_TOPIC = "cppkafka_test1";
TEST_F(KafkaHandleBaseTest, BrokersMetadata) {
TEST_CASE("metadata", "[handle_base]") {
Producer producer({});
producer.add_brokers(KAFKA_TEST_INSTANCE);
Metadata metadata = producer.get_metadata();
vector<BrokerMetadata> brokers = metadata.get_brokers();
ASSERT_EQ(1, brokers.size());
const auto& broker = brokers[0];
// TODO: resolve this
//EXPECT_EQ(get_kafka_host(), broker.get_host());
EXPECT_EQ(get_kafka_port(), broker.get_port());
}
TEST_F(KafkaHandleBaseTest, TopicsMetadata) {
unordered_set<string> topic_names = { "cppkafka_test1", "cppkafka_test2" };
size_t found_topics = 0;
Producer producer(make_config());
Metadata metadata = producer.get_metadata();
const vector<TopicMetadata>& topics = metadata.get_topics();
ASSERT_GE(topics.size(), 2);
for (const auto& topic : topics) {
if (topic_names.count(topic.get_name()) == 1) {
const vector<PartitionMetadata>& partitions = topic.get_partitions();
EXPECT_EQ(3, partitions.size());
set<int32_t> expected_ids = { 0, 1, 2 };
for (const PartitionMetadata& partition : partitions) {
EXPECT_EQ(1, expected_ids.erase(partition.get_id()));
for (int32_t replica : partition.get_replicas()) {
EXPECT_EQ(0, replica);
}
for (int32_t isr : partition.get_in_sync_replica_brokers()) {
EXPECT_EQ(0, isr);
}
}
found_topics++;
}
SECTION("brokers") {
vector<BrokerMetadata> brokers = metadata.get_brokers();
REQUIRE(brokers.size() == 1);
const auto& broker = brokers[0];
// TODO: resolve this
//REQUIRE(broker.get_host() == get_kafka_host());
CHECK(broker.get_port() == get_kafka_port());
}
EXPECT_EQ(topic_names.size(), found_topics);
// Find by names
EXPECT_EQ(topic_names.size(), metadata.get_topics(topic_names).size());
// Find by prefix
EXPECT_EQ(topic_names.size(), metadata.get_topics_prefixed("cppkafka_").size());
SECTION("topics") {
unordered_set<string> topic_names = { "cppkafka_test1", "cppkafka_test2" };
size_t found_topics = 0;
// Now get the whole metadata only for this topic
Topic topic = producer.get_topic(KAFKA_TOPIC);
EXPECT_EQ(KAFKA_TOPIC, producer.get_metadata(topic).get_name());
const vector<TopicMetadata>& topics = metadata.get_topics();
CHECK(topics.size() >= 2);
for (const auto& topic : topics) {
if (topic_names.count(topic.get_name()) == 1) {
const vector<PartitionMetadata>& partitions = topic.get_partitions();
REQUIRE(partitions.size() == 3);
set<int32_t> expected_ids = { 0, 1, 2 };
for (const PartitionMetadata& partition : partitions) {
REQUIRE(expected_ids.erase(partition.get_id()) == 1);
for (int32_t replica : partition.get_replicas()) {
REQUIRE(replica == 0);
}
for (int32_t isr : partition.get_in_sync_replica_brokers()) {
REQUIRE(isr == 0);
}
}
found_topics++;
}
}
CHECK(found_topics == topic_names.size());
// Find by names
CHECK(metadata.get_topics(topic_names).size() == topic_names.size());
// Find by prefix
CHECK(metadata.get_topics_prefixed("cppkafka_").size() == topic_names.size());
// Now get the whole metadata only for this topic
Topic topic = producer.get_topic(KAFKA_TOPIC);
CHECK(producer.get_metadata(topic).get_name() == KAFKA_TOPIC);
}
}
TEST_F(KafkaHandleBaseTest, ConsumerGroups) {
TEST_CASE("consumer groups", "[handle_base]") {
string consumer_group = "kafka_handle_test";
string client_id = "my_client_id";
@@ -117,15 +111,15 @@ TEST_F(KafkaHandleBaseTest, ConsumerGroups) {
runner.try_join();
GroupInformation information = consumer.get_consumer_group(consumer_group);
EXPECT_EQ(consumer_group, information.get_name());
EXPECT_EQ("consumer", information.get_protocol_type());
ASSERT_EQ(1, information.get_members().size());
CHECK(information.get_name() == consumer_group);
CHECK(information.get_protocol_type() == "consumer");
CHECK(information.get_members().size() == 1);
auto member = information.get_members()[0];
EXPECT_EQ(client_id, member.get_client_id());
CHECK(member.get_client_id() == client_id);
MemberAssignmentInformation assignment = member.get_member_assignment();
EXPECT_EQ(0, assignment.get_version());
CHECK(assignment.get_version() == 0);
TopicPartitionList expected_topic_partitions = {
{ KAFKA_TOPIC, 0 },
{ KAFKA_TOPIC, 1 },
@@ -133,9 +127,5 @@ TEST_F(KafkaHandleBaseTest, ConsumerGroups) {
};
TopicPartitionList topic_partitions = assignment.get_topic_partitions();
sort(topic_partitions.begin(), topic_partitions.end());
EXPECT_EQ(expected_topic_partitions, topic_partitions);
/*for (const auto c : ) {
printf("%0d,", (int)c & 0xff);
}
std::cout << std::endl;*/
CHECK(topic_partitions == expected_topic_partitions);
}