Callback invoker to sink all thrown exceptions (#74)

This commit is contained in:
Alex Damian
2018-06-01 19:35:56 -04:00
committed by Matias Fontanini
parent 15fdab6943
commit 9714bec5bf
9 changed files with 180 additions and 66 deletions

View File

@@ -1,4 +1,4 @@
link_libraries(cppkafka ${RDKAFKA_LIBRARY} ${Boost_LIBRARIES} pthread) link_libraries(cppkafka ${RDKAFKA_LIBRARY} ${Boost_LIBRARIES} pthread rt ssl crypto dl z)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include) include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include)
include_directories(SYSTEM ${RDKAFKA_INCLUDE_DIR}) include_directories(SYSTEM ${RDKAFKA_INCLUDE_DIR})

View File

@@ -38,6 +38,7 @@
#include "queue.h" #include "queue.h"
#include "macros.h" #include "macros.h"
#include "error.h" #include "error.h"
#include "detail/callback_invoker.h"
namespace cppkafka { namespace cppkafka {

View File

@@ -0,0 +1,127 @@
/*
* Copyright (c) 2017, Matias Fontanini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef CPPKAFKA_CALLBACK_INVOKER_H
#define CPPKAFKA_CALLBACK_INVOKER_H
#include <sstream>
#include <assert.h>
#include "../logging.h"
#include "../kafka_handle_base.h"
namespace cppkafka {
// Error values
template <typename T>
T error_value() { return T{}; }
template<> inline
void error_value<void>() {};
template<> inline
bool error_value<bool>() { return false; }
template<> inline
int error_value<int>() { return -1; }
/**
* \brief Wraps an std::function object and runs it while preventing all exceptions from escaping
* \tparam Func An std::function object
*/
template <typename Func>
class CallbackInvoker
{
public:
using RetType = typename Func::result_type;
using LogCallback = std::function<void(KafkaHandleBase& handle,
int level,
const std::string& facility,
const std::string& message)>;
CallbackInvoker(const char* callback_name,
const Func& callback,
KafkaHandleBase* handle)
: callback_name_(callback_name),
callback_(callback),
handle_(handle) {
}
explicit operator bool() const {
return (bool)callback_;
}
template <typename ...Args>
RetType operator()(Args&&... args) const {
static const char* library_name = "cppkafka";
std::ostringstream error_msg;
try {
if (callback_) {
return callback_(std::forward<Args>(args)...);
}
return error_value<RetType>();
}
catch (const std::exception& ex) {
if (handle_) {
error_msg << "Caught exception in " << callback_name_ << " callback: " << ex.what();
}
}
catch (...) {
if (handle_) {
error_msg << "Caught unknown exception in " << callback_name_ << " callback";
}
}
// Log error
if (handle_) {
if (handle_->get_configuration().get_log_callback()) {
try {
// Log it
handle_->get_configuration().get_log_callback()(*handle_,
static_cast<int>(LogLevel::LOG_ERR),
library_name,
error_msg.str());
}
catch (...) {} // sink everything
}
else {
rd_kafka_log_print(handle_->get_handle(),
static_cast<int>(LogLevel::LOG_ERR),
library_name,
error_msg.str().c_str());
}
}
return error_value<RetType>();
}
private:
const char* callback_name_;
const Func& callback_;
KafkaHandleBase* handle_;
};
}
#endif

View File

@@ -33,6 +33,7 @@
#include <chrono> #include <chrono>
#include <functional> #include <functional>
#include <thread> #include <thread>
#include <string>
#include "../consumer.h" #include "../consumer.h"
#include "backoff_performer.h" #include "backoff_performer.h"
@@ -118,6 +119,7 @@ public:
*/ */
void commit(const TopicPartitionList& topic_partitions); void commit(const TopicPartitionList& topic_partitions);
private: private:
// Return true to abort and false to continue committing
template <typename T> template <typename T>
bool do_commit(const T& object) { bool do_commit(const T& object) {
try { try {
@@ -131,14 +133,12 @@ private:
if (ex.get_error() == RD_KAFKA_RESP_ERR__NO_OFFSET) { if (ex.get_error() == RD_KAFKA_RESP_ERR__NO_OFFSET) {
return true; return true;
} }
// If there's a callback and it returns false for this message, abort // If there's a callback and it returns false for this message, abort.
if (callback_ && !callback_(ex.get_error())) { // Otherwise keep committing.
return true; CallbackInvoker<ErrorCallback> callback("backoff committer", callback_, &consumer_);
return callback && !callback(ex.get_error());
} }
} }
// In any other case, we failed. Keep committing
return false;
}
Consumer& consumer_; Consumer& consumer_;
ErrorCallback callback_; ErrorCallback callback_;

View File

@@ -364,9 +364,9 @@ void BufferedProducer<BufferType>::flush() {
produce_message(flush_queue.front()); produce_message(flush_queue.front());
} }
catch (const HandleException& ex) { catch (const HandleException& ex) {
if (flush_failure_callback_ && // If we have a flush failure callback and it returns true, we retry producing this message later
flush_failure_callback_(flush_queue.front(), ex.get_error())) { CallbackInvoker<FlushFailureCallback> callback("flush failure", flush_failure_callback_, &producer_);
// retry again later if (callback && callback(flush_queue.front(), ex.get_error())) {
do_add_message(std::move(flush_queue.front()), MessagePriority::Low, false); do_add_message(std::move(flush_queue.front()), MessagePriority::Low, false);
} }
} }
@@ -519,19 +519,18 @@ void BufferedProducer<BufferType>::on_delivery_report(const Message& message) {
--pending_acks_; --pending_acks_;
assert(pending_acks_ != (size_t)-1); // Prevent underflow assert(pending_acks_ != (size_t)-1); // Prevent underflow
// We should produce this message again if it has an error and we either don't have a if (message.get_error()) {
// produce failure callback or we have one but it returns true // We should produce this message again if we don't have a produce failure callback
bool should_produce = message.get_error() && // or we have one but it returns true
(!produce_failure_callback_ || produce_failure_callback_(message)); CallbackInvoker<ProduceFailureCallback> callback("produce failure", produce_failure_callback_, &producer_);
if (should_produce) { if (!callback || callback(message)) {
// Re-enqueue for later retransmission with higher priority (i.e. front of the queue) // Re-enqueue for later retransmission with higher priority (i.e. front of the queue)
do_add_message(Builder(message), MessagePriority::High, false); do_add_message(Builder(message), MessagePriority::High, false);
} }
}
else { else {
// Successful delivery // Successful delivery
if (produce_success_callback_) { CallbackInvoker<ProduceSuccessCallback>("delivery success", produce_success_callback_, &producer_)(message);
produce_success_callback_(message);
}
// Increment the total successful transmissions // Increment the total successful transmissions
++total_messages_produced_; ++total_messages_produced_;
} }

View File

@@ -52,66 +52,56 @@ namespace cppkafka {
void delivery_report_callback_proxy(rd_kafka_t*, const rd_kafka_message_t* msg, void *opaque) { void delivery_report_callback_proxy(rd_kafka_t*, const rd_kafka_message_t* msg, void *opaque) {
Producer* handle = static_cast<Producer*>(opaque); Producer* handle = static_cast<Producer*>(opaque);
Message message = Message::make_non_owning((rd_kafka_message_t*)msg); Message message = Message::make_non_owning((rd_kafka_message_t*)msg);
const auto& callback = handle->get_configuration().get_delivery_report_callback(); CallbackInvoker<Configuration::DeliveryReportCallback>
if (callback) { ("delivery report", handle->get_configuration().get_delivery_report_callback(), handle)
callback(*handle, message); (*handle, message);
}
} }
void offset_commit_callback_proxy(rd_kafka_t*, rd_kafka_resp_err_t err, void offset_commit_callback_proxy(rd_kafka_t*, rd_kafka_resp_err_t err,
rd_kafka_topic_partition_list_t *offsets, void *opaque) { rd_kafka_topic_partition_list_t *offsets, void *opaque) {
Consumer* handle = static_cast<Consumer*>(opaque); Consumer* handle = static_cast<Consumer*>(opaque);
TopicPartitionList list = offsets ? convert(offsets) : TopicPartitionList{}; TopicPartitionList list = offsets ? convert(offsets) : TopicPartitionList{};
const auto& callback = handle->get_configuration().get_offset_commit_callback(); CallbackInvoker<Configuration::OffsetCommitCallback>
if (callback) { ("offset commit", handle->get_configuration().get_offset_commit_callback(), handle)
callback(*handle, err, list); (*handle, err, list);
}
} }
void error_callback_proxy(rd_kafka_t*, int err, const char *reason, void *opaque) { void error_callback_proxy(rd_kafka_t*, int err, const char *reason, void *opaque) {
KafkaHandleBase* handle = static_cast<KafkaHandleBase*>(opaque); KafkaHandleBase* handle = static_cast<KafkaHandleBase*>(opaque);
const auto& callback = handle->get_configuration().get_error_callback(); CallbackInvoker<Configuration::ErrorCallback>
if (callback) { ("error", handle->get_configuration().get_error_callback(), handle)
callback(*handle, err, reason); (*handle, err, reason);
}
} }
void throttle_callback_proxy(rd_kafka_t*, const char* broker_name, void throttle_callback_proxy(rd_kafka_t*, const char* broker_name,
int32_t broker_id, int throttle_time_ms, void *opaque) { int32_t broker_id, int throttle_time_ms, void *opaque) {
KafkaHandleBase* handle = static_cast<KafkaHandleBase*>(opaque); KafkaHandleBase* handle = static_cast<KafkaHandleBase*>(opaque);
const auto& callback = handle->get_configuration().get_throttle_callback(); CallbackInvoker<Configuration::ThrottleCallback>
if (callback) { ("throttle", handle->get_configuration().get_throttle_callback(), handle)
callback(*handle, broker_name, broker_id, milliseconds(throttle_time_ms)); (*handle, broker_name, broker_id, milliseconds(throttle_time_ms));
}
} }
void log_callback_proxy(const rd_kafka_t* h, int level, void log_callback_proxy(const rd_kafka_t* h, int level,
const char* facility, const char* message) { const char* facility, const char* message) {
KafkaHandleBase* handle = static_cast<KafkaHandleBase*>(rd_kafka_opaque(h)); KafkaHandleBase* handle = static_cast<KafkaHandleBase*>(rd_kafka_opaque(h));
const auto& callback = handle->get_configuration().get_log_callback(); CallbackInvoker<Configuration::LogCallback>
if (callback) { ("log", handle->get_configuration().get_log_callback(), nullptr)
callback(*handle, level, facility, message); (*handle, level, facility, message);
}
} }
int stats_callback_proxy(rd_kafka_t*, char *json, size_t json_len, void *opaque) { int stats_callback_proxy(rd_kafka_t*, char *json, size_t json_len, void *opaque) {
KafkaHandleBase* handle = static_cast<KafkaHandleBase*>(opaque); KafkaHandleBase* handle = static_cast<KafkaHandleBase*>(opaque);
const auto& callback = handle->get_configuration().get_stats_callback(); CallbackInvoker<Configuration::StatsCallback>
if (callback) { ("statistics", handle->get_configuration().get_stats_callback(), handle)
callback(*handle, string(json, json + json_len)); (*handle, string(json, json + json_len));
}
return 0; return 0;
} }
int socket_callback_proxy(int domain, int type, int protocol, void* opaque) { int socket_callback_proxy(int domain, int type, int protocol, void* opaque) {
KafkaHandleBase* handle = static_cast<KafkaHandleBase*>(opaque); KafkaHandleBase* handle = static_cast<KafkaHandleBase*>(opaque);
const auto& callback = handle->get_configuration().get_socket_callback(); return CallbackInvoker<Configuration::SocketCallback>
if (callback) { ("socket", handle->get_configuration().get_socket_callback(), handle)
return callback(domain, type, protocol); (domain, type, protocol);
}
else {
return -1;
}
} }
// Configuration // Configuration

View File

@@ -34,6 +34,7 @@
#include "logging.h" #include "logging.h"
#include "configuration.h" #include "configuration.h"
#include "topic_partition_list.h" #include "topic_partition_list.h"
#include "detail/callback_invoker.h"
using std::vector; using std::vector;
using std::string; using std::string;
@@ -79,12 +80,12 @@ Consumer::~Consumer() {
close(); close();
} }
catch (const Exception& ex) { catch (const Exception& ex) {
constexpr const char* library_name = "cppkafka"; const char* library_name = "cppkafka";
ostringstream error_msg; ostringstream error_msg;
error_msg << "Failed to close consumer [" << get_name() << "]: " << ex.what(); error_msg << "Failed to close consumer [" << get_name() << "]: " << ex.what();
const auto& callback = get_configuration().get_log_callback(); CallbackInvoker<Configuration::LogCallback> logger("log", get_configuration().get_log_callback(), nullptr);
if (callback) { if (logger) {
callback(*this, static_cast<int>(LogLevel::LOG_ERR), library_name, error_msg.str()); logger(*this, static_cast<int>(LogLevel::LOG_ERR), library_name, error_msg.str());
} }
else { else {
rd_kafka_log_print(get_handle(), static_cast<int>(LogLevel::LOG_ERR), library_name, error_msg.str().c_str()); rd_kafka_log_print(get_handle(), static_cast<int>(LogLevel::LOG_ERR), library_name, error_msg.str().c_str());
@@ -292,21 +293,15 @@ void Consumer::commit(const TopicPartitionList* topic_partitions, bool async) {
void Consumer::handle_rebalance(rd_kafka_resp_err_t error, void Consumer::handle_rebalance(rd_kafka_resp_err_t error,
TopicPartitionList& topic_partitions) { TopicPartitionList& topic_partitions) {
if (error == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) { if (error == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) {
if (assignment_callback_) { CallbackInvoker<AssignmentCallback>("assignment", assignment_callback_, this)(topic_partitions);
assignment_callback_(topic_partitions);
}
assign(topic_partitions); assign(topic_partitions);
} }
else if (error == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS) { else if (error == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS) {
if (revocation_callback_) { CallbackInvoker<RevocationCallback>("revocation", revocation_callback_, this)(topic_partitions);
revocation_callback_(topic_partitions);
}
unassign(); unassign();
} }
else { else {
if (rebalance_error_callback_) { CallbackInvoker<RebalanceErrorCallback>("rebalance error", rebalance_error_callback_, this)(error);
rebalance_error_callback_(error);
}
unassign(); unassign();
} }
} }

View File

@@ -33,6 +33,7 @@
#include "exceptions.h" #include "exceptions.h"
#include "topic.h" #include "topic.h"
#include "buffer.h" #include "buffer.h"
#include "detail/callback_invoker.h"
using std::string; using std::string;
using std::map; using std::map;
@@ -49,7 +50,8 @@ int32_t partitioner_callback_proxy(const rd_kafka_topic_t* handle, const void *k
if (callback) { if (callback) {
Topic topic = Topic::make_non_owning(const_cast<rd_kafka_topic_t*>(handle)); Topic topic = Topic::make_non_owning(const_cast<rd_kafka_topic_t*>(handle));
Buffer key(static_cast<const char*>(key_ptr), key_size); Buffer key(static_cast<const char*>(key_ptr), key_size);
return callback(topic, key, partition_count); return CallbackInvoker<TopicConfiguration::PartitionerCallback>("topic partitioner", callback, nullptr)
(topic, key, partition_count);
} }
else { else {
return rd_kafka_msg_partitioner_consistent_random(handle, key_ptr, key_size, return rd_kafka_msg_partitioner_consistent_random(handle, key_ptr, key_size,

View File

@@ -23,6 +23,6 @@ add_executable(
# Main file # Main file
test_main.cpp test_main.cpp
) )
target_link_libraries(cppkafka_tests cppkafka ${RDKAFKA_LIBRARY} pthread) target_link_libraries(cppkafka_tests cppkafka ${RDKAFKA_LIBRARY} pthread rt ssl crypto dl z)
add_dependencies(tests cppkafka_tests) add_dependencies(tests cppkafka_tests)
add_test(cppkafka cppkafka_tests) add_test(cppkafka cppkafka_tests)