mirror of
https://github.com/Telecominfraproject/wlan-cloud-lib-cppkafka.git
synced 2025-11-02 19:47:55 +00:00
Express async_flush in terms of flush since the logic is identical except for the timeout
This commit is contained in:
@@ -703,25 +703,7 @@ void BufferedProducer<BufferType, Allocator>::produce(const Message& message) {
|
|||||||
|
|
||||||
template <typename BufferType, typename Allocator>
|
template <typename BufferType, typename Allocator>
|
||||||
void BufferedProducer<BufferType, Allocator>::async_flush() {
|
void BufferedProducer<BufferType, Allocator>::async_flush() {
|
||||||
CounterGuard<size_t> counter_guard(flushes_in_progress_);
|
flush(std::chrono::milliseconds::zero(), false);
|
||||||
auto queue_flusher = [this](QueueType& queue, std::mutex & mutex)->void
|
|
||||||
{
|
|
||||||
QueueType flush_queue; // flush from temporary queue
|
|
||||||
swap_queues(queue, flush_queue, mutex);
|
|
||||||
|
|
||||||
while (!flush_queue.empty()) {
|
|
||||||
async_produce(std::move(flush_queue.front()), false);
|
|
||||||
flush_queue.pop_front();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
//Produce retry queue first since these messages were produced first
|
|
||||||
queue_flusher(retry_messages_, retry_mutex_);
|
|
||||||
//Produce recently enqueued messages
|
|
||||||
queue_flusher(messages_, mutex_);
|
|
||||||
// Flush the producer but don't wait. It is necessary to poll
|
|
||||||
// the producer at least once during this operation because
|
|
||||||
// async_produce() will not.
|
|
||||||
wait_for_acks(std::chrono::milliseconds(0));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename BufferType, typename Allocator>
|
template <typename BufferType, typename Allocator>
|
||||||
@@ -732,26 +714,34 @@ void BufferedProducer<BufferType, Allocator>::flush(bool preserve_order) {
|
|||||||
template <typename BufferType, typename Allocator>
|
template <typename BufferType, typename Allocator>
|
||||||
bool BufferedProducer<BufferType, Allocator>::flush(std::chrono::milliseconds timeout,
|
bool BufferedProducer<BufferType, Allocator>::flush(std::chrono::milliseconds timeout,
|
||||||
bool preserve_order) {
|
bool preserve_order) {
|
||||||
if (preserve_order) {
|
CounterGuard<size_t> counter_guard(flushes_in_progress_);
|
||||||
CounterGuard<size_t> counter_guard(flushes_in_progress_);
|
auto queue_flusher = [timeout, preserve_order, this]
|
||||||
auto queue_flusher = [&timeout, this](QueueType& queue, std::mutex & mutex)->void
|
(QueueType& queue, std::mutex & mutex)->void
|
||||||
{
|
{
|
||||||
QueueType flush_queue; // flush from temporary queue
|
QueueType flush_queue; // flush from temporary queue
|
||||||
swap_queues(queue, flush_queue, mutex);
|
swap_queues(queue, flush_queue, mutex);
|
||||||
//Produce one message at a time and wait for acks until queue is empty
|
//Produce one message at a time and wait for acks until queue is empty
|
||||||
while (!flush_queue.empty()) {
|
while (!flush_queue.empty()) {
|
||||||
|
if (preserve_order) {
|
||||||
|
//When preserving order, we must ensure that each message
|
||||||
|
//gets delivered before producing the next one.
|
||||||
sync_produce(flush_queue.front(), timeout);
|
sync_produce(flush_queue.front(), timeout);
|
||||||
flush_queue.pop_front();
|
|
||||||
}
|
}
|
||||||
};
|
else {
|
||||||
//Produce retry queue first since these messages were produced first
|
//Produce as fast as possible w/o waiting. If one or more
|
||||||
queue_flusher(retry_messages_, retry_mutex_);
|
//messages fail, they will be re-enqueued for retry
|
||||||
//Produce recently enqueued messages
|
//on the next flush cycle, which causes re-ordering.
|
||||||
queue_flusher(messages_, mutex_);
|
async_produce(flush_queue.front(), false);
|
||||||
}
|
}
|
||||||
else {
|
flush_queue.pop_front();
|
||||||
//Produce all messages at once then wait for acks.
|
}
|
||||||
async_flush();
|
};
|
||||||
|
//Produce retry queue first since these messages were produced first.
|
||||||
|
queue_flusher(retry_messages_, retry_mutex_);
|
||||||
|
//Produce recently enqueued messages
|
||||||
|
queue_flusher(messages_, mutex_);
|
||||||
|
if (!preserve_order) {
|
||||||
|
//Wait for acks from the messages produced above via async_produce
|
||||||
wait_for_acks(timeout);
|
wait_for_acks(timeout);
|
||||||
}
|
}
|
||||||
return pending_acks_ == 0;
|
return pending_acks_ == 0;
|
||||||
@@ -759,20 +749,7 @@ bool BufferedProducer<BufferType, Allocator>::flush(std::chrono::milliseconds ti
|
|||||||
|
|
||||||
template <typename BufferType, typename Allocator>
|
template <typename BufferType, typename Allocator>
|
||||||
void BufferedProducer<BufferType, Allocator>::wait_for_acks() {
|
void BufferedProducer<BufferType, Allocator>::wait_for_acks() {
|
||||||
while (pending_acks_ > 0) {
|
wait_for_acks(producer_.get_timeout());
|
||||||
try {
|
|
||||||
producer_.flush();
|
|
||||||
}
|
|
||||||
catch (const HandleException& ex) {
|
|
||||||
// If we just hit the timeout, keep going, otherwise re-throw
|
|
||||||
if (ex.get_error() == RD_KAFKA_RESP_ERR__TIMED_OUT) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
throw;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename BufferType, typename Allocator>
|
template <typename BufferType, typename Allocator>
|
||||||
|
|||||||
Reference in New Issue
Block a user