Files
wlan-cloud-lib-cppkafka/tests/kafka_handle_base_test.cpp
Matias Fontanini cb2c8877d8 Move tests to use catch instead of googletest (#56)
* Port buffer test to use Catch2

* Move compacted topic processor test to Catch2

* Move configuration tests to Catch2

* Rename configuration test cases

* Move topic partition list test to Catch2

* Move handle base tests to Catch2

* Move producer tests to Catch2

* Move consumer tests to catch2

* Use CHECK on tests when appropriate

* Remove googletest

* Show tests' progress as they run

* Update message when Catch2 is not checked out

* Remove references to googletest

* Run cppkafka_tests manually on travis

* Print amount of time taken by each test case
2018-04-24 03:20:48 +01:00

132 lines
4.1 KiB
C++

#include <set>
#include <unordered_set>
#include <catch.hpp>
#include "cppkafka/consumer.h"
#include "cppkafka/producer.h"
#include "cppkafka/metadata.h"
#include "cppkafka/group_information.h"
#include "test_utils.h"
using std::vector;
using std::set;
using std::unordered_set;
using std::string;
using namespace cppkafka;
static const string KAFKA_TOPIC = "cppkafka_test1";
Configuration make_config() {
Configuration config;
config.set("metadata.broker.list", KAFKA_TEST_INSTANCE);
return config;
}
string get_kafka_host() {
string uri = KAFKA_TEST_INSTANCE;
size_t index = uri.find(':');
if (index == string::npos) {
return uri;
}
else {
return uri.substr(0, index);
}
}
uint16_t get_kafka_port() {
string uri = KAFKA_TEST_INSTANCE;
size_t index = uri.find(':');
if (index == string::npos) {
return 9092;
}
else {
return stoul(uri.substr(index + 1));
}
}
TEST_CASE("metadata", "[handle_base]") {
Producer producer({});
producer.add_brokers(KAFKA_TEST_INSTANCE);
Metadata metadata = producer.get_metadata();
SECTION("brokers") {
vector<BrokerMetadata> brokers = metadata.get_brokers();
REQUIRE(brokers.size() == 1);
const auto& broker = brokers[0];
// TODO: resolve this
//REQUIRE(broker.get_host() == get_kafka_host());
CHECK(broker.get_port() == get_kafka_port());
}
SECTION("topics") {
unordered_set<string> topic_names = { "cppkafka_test1", "cppkafka_test2" };
size_t found_topics = 0;
const vector<TopicMetadata>& topics = metadata.get_topics();
CHECK(topics.size() >= 2);
for (const auto& topic : topics) {
if (topic_names.count(topic.get_name()) == 1) {
const vector<PartitionMetadata>& partitions = topic.get_partitions();
REQUIRE(partitions.size() == 3);
set<int32_t> expected_ids = { 0, 1, 2 };
for (const PartitionMetadata& partition : partitions) {
REQUIRE(expected_ids.erase(partition.get_id()) == 1);
for (int32_t replica : partition.get_replicas()) {
REQUIRE(replica == 0);
}
for (int32_t isr : partition.get_in_sync_replica_brokers()) {
REQUIRE(isr == 0);
}
}
found_topics++;
}
}
CHECK(found_topics == topic_names.size());
// Find by names
CHECK(metadata.get_topics(topic_names).size() == topic_names.size());
// Find by prefix
CHECK(metadata.get_topics_prefixed("cppkafka_").size() == topic_names.size());
// Now get the whole metadata only for this topic
Topic topic = producer.get_topic(KAFKA_TOPIC);
CHECK(producer.get_metadata(topic).get_name() == KAFKA_TOPIC);
}
}
TEST_CASE("consumer groups", "[handle_base]") {
string consumer_group = "kafka_handle_test";
string client_id = "my_client_id";
Configuration config = make_config();
config.set("group.id", consumer_group);
config.set("client.id", client_id);
config.set("enable.auto.commit", false);
// Build consumer
Consumer consumer(config);
consumer.subscribe({ KAFKA_TOPIC });
ConsumerRunner runner(consumer, 0, 3);
runner.try_join();
GroupInformation information = consumer.get_consumer_group(consumer_group);
CHECK(information.get_name() == consumer_group);
CHECK(information.get_protocol_type() == "consumer");
CHECK(information.get_members().size() == 1);
auto member = information.get_members()[0];
CHECK(member.get_client_id() == client_id);
MemberAssignmentInformation assignment = member.get_member_assignment();
CHECK(assignment.get_version() == 0);
TopicPartitionList expected_topic_partitions = {
{ KAFKA_TOPIC, 0 },
{ KAFKA_TOPIC, 1 },
{ KAFKA_TOPIC, 2 }
};
TopicPartitionList topic_partitions = assignment.get_topic_partitions();
sort(topic_partitions.begin(), topic_partitions.end());
CHECK(topic_partitions == expected_topic_partitions);
}