// Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ------------------------------------------------------------------
// Modification history:
// feature: listener code decoupling
// feature: support for request discovery
// feature: change E2E packet format
// feature: heartbeat optimize
// feature: refacting  dispatcher discover
// feature: RemoteEndpointInfo support reliabilityKind
// feature: develop dispatcher
// feature: content filter topic
// feature: Split history as an independent common module
// feature: Unified Transport Abstraction Layer Unified Transport Abstraction Layer
// feature: discovery support client and server
// feature: interval report metrics
// feature: add statictic lookup
// feature: Crop code for in-process communication
// feature: Development of new requirements for E2E functionality
// feature: E2E Profile04 and Configuration Function Development
// ------------------------------------------------------------------

#include <edds/rtps/writer/StatefulWriter.h>

#include <history/RemoteEndpointInfo.h>
#include <history/WriterListener.h>
#include <edds/rtps/writer/ReaderProxy.h>
#include <history/WriterHistory.h>

#include <rtps/participant/RTPSParticipantImpl.h>

#include <edds/rtps/messages/RTPSMessageCreator.h>
#include <edds/rtps/messages/RTPSMessageGroup.h>

#include <edds/rtps/participant/RTPSParticipant.h>
#include <deps/resources/ResourceEvent.h>
#include <deps/resources/TimedEvent.h>
#include <ertps/utils/LocatorAlias.h>

#include <ertps/utils/TimeConversion.h>

#include <edds/rtps/builtin/BuiltinProtocols.h>
#include <edds/rtps/builtin/liveliness/WLP.h>

#include <history/CacheChangePool.h>
#include <ecdr/exceptions/BadParamException.h>

#include <mutex>
#include <vector>
#include <stdexcept>

#include <history/BasicPayloadPool.hpp>

#include <elog/Log.hpp>

#include <rtps/RTPSDomainImpl.hpp>
#include <rtps/messages/RTPSGapBuilder.hpp>
#include <rtps/network/ExternalLocatorsProcessor.hpp>

#include <edds/rtps/interfaces/ContentFilterUtils.hpp>

#include "../flowcontrol/FlowController.hpp"

#if !defined(_WIN32)

#include <StaticTracepoint.h>
#include <tracepoint/TracePointManager.hpp>
#include <deps/common/SystemInfo.hpp>

#define SEC(name) __attribute__((section(name), used))
unsigned short ertps_ertps_ful_writer_udp_send_semaphore SEC(".probes");

using evbs::ertps::rtps::c_Version_vbs;
using evbs::ertps::rtps::ReaderProxy;
void usdt_in_stateful_writer(CacheChange_t* change, const std::string& topic_name, uint8_t trace_point_status,
                             uint32_t domain_id, const ResourceLimitedVector<ReaderProxy*>& matched_remote_readers) {
    auto persistent_trace_point_status =
        vbsutil::tracepoint::TracePointManager::get_persistent_trace_point_status(topic_name);
    if (persistent_trace_point_status != vbsutil::tracepoint::INVALID &&
        persistent_trace_point_status > trace_point_status) {
        trace_point_status = persistent_trace_point_status;
    }
    change->trace_point_status = trace_point_status;
    switch (trace_point_status) {
        case vbsutil::tracepoint::OFF:
            logDebug(RTPS_WRITER, "Trace point status is OFF");
            break;
        case vbsutil::tracepoint::ON_ENTRANCE: {
            if (ertps_ertps_ful_writer_udp_send_semaphore) {
                logDebug(RTPS_WRITER, "Trace point status is ON_ENTRANCE");
                uint64_t seq_num = change->sequenceNumber.to64long();
                uint64_t timestamp = change->sourceTimestamp.to_ns();
                octet* serialized_data = change->serializedPayload.data;
                uint32_t data_size = change->serializedPayload.length;
                uint32_t vbs_version =
                    (static_cast<uint32_t>(c_Version_vbs[0]) | (static_cast<uint32_t>(c_Version_vbs[1]) << 8) |
                     (static_cast<uint32_t>(c_Version_vbs[2]) << 16) | (static_cast<uint32_t>(c_Version_vbs[3]) << 24));
                uint32_t src_ip = 0;
                uint32_t dst_ip = 0;
                uint32_t mask_len = 0;
                if (matched_remote_readers.size() > 0) {
                    auto tmp_addr = matched_remote_readers[0]->general_locator_selector_entry()->unicast[0].address;
                    uint32_t remote_ip =
                        static_cast<uint32_t>(tmp_addr[12]) | (static_cast<uint32_t>(tmp_addr[13]) << 8) |
                        (static_cast<uint32_t>(tmp_addr[14]) << 16) | (static_cast<uint32_t>(tmp_addr[15]) << 24);
                    if (vbsutil::tracepoint::TracePointManager::find_same_subnet_ip(remote_ip, src_ip, mask_len)) {
                        if (matched_remote_readers.size() == 1) {
                            dst_ip = remote_ip;
                        } else {
                            dst_ip = 0;
                        }
                        vbsutil::tracepoint::UsdtInfo usdt_info = {
                            seq_num, timestamp, vbs_version, domain_id,
                            src_ip,  dst_ip,    mask_len,    vbsutil::tracepoint::ROLE_PUB};
                        unsigned char* usdt_msg_addr = reinterpret_cast<unsigned char*>(&usdt_info);
                        FOLLY_SDT_WITH_SEMAPHORE4(ertps, ertps_ful_writer_udp_send, usdt_msg_addr, topic_name.c_str(),
                                                  serialized_data, data_size);
                    }
                }
            }
        } break;
        case vbsutil::tracepoint::ON_EXPORT: {
            logDebug(RTPS_WRITER, "Trace point status is ON_EXPORT");
            change->topic_name = topic_name;
            uint32_t vbs_version =
                (static_cast<uint32_t>(c_Version_vbs[0]) | (static_cast<uint32_t>(c_Version_vbs[1]) << 8) |
                 (static_cast<uint32_t>(c_Version_vbs[2]) << 16) | (static_cast<uint32_t>(c_Version_vbs[3]) << 24));
            change->vbs_version = vbs_version;
        } break;
        case vbsutil::tracepoint::DISABLE:
            logDebug(RTPS_WRITER, "Trace point status is DISABLE");
            break;
        case vbsutil::tracepoint::INVALID:
            logDebug(RTPS_WRITER, "Trace point status is INVALID");
            break;
    }
}
#endif

using evbs::ertps::rtps::DeliveryRetCode;
using evbs::ertps::rtps::RTPSMessageGroup;
using namespace vbs::common;
namespace evbs {
namespace ertps {
namespace rtps {

using ReaderProxyIterator = ResourceLimitedVector<ReaderProxy*>::iterator;
using edds::rtps::ExternalLocatorsProcessor::filter_remote_locators;
/**
 * Loops over all the readers in the vector, applying the given routine.
 * The loop continues until the result of the routine is true for any reader
 * or all readers have been processes.
 * The returned value is true if the routine returned true at any point,
 * or false otherwise.
 */
bool for_matched_readers(ResourceLimitedVector<ReaderProxy*>& reader_vector_1, std::function<bool(ReaderProxy*)> fun) {
    for (ReaderProxy* remote_reader : reader_vector_1) {
        if (fun(remote_reader)) {
            return true;
        }
    }

    return false;
}

bool for_matched_readers(ResourceLimitedVector<ReaderProxy*>& reader_vector_1,
                         ResourceLimitedVector<ReaderProxy*>& reader_vector_2, std::function<bool(ReaderProxy*)> fun) {
    if (for_matched_readers(reader_vector_1, fun)) {
        return true;
    }
    return for_matched_readers(reader_vector_2, std::move(fun));
}

bool for_matched_readers(ResourceLimitedVector<ReaderProxy*>& reader_vector_1,
                         ResourceLimitedVector<ReaderProxy*>& reader_vector_2,
                         ResourceLimitedVector<ReaderProxy*>& reader_vector_3, std::function<bool(ReaderProxy*)> fun) {
    if (for_matched_readers(reader_vector_1, reader_vector_2, fun)) {
        return true;
    }
    return for_matched_readers(reader_vector_3, std::move(fun));
}

/**
 * Loops over all the readers in the vector, applying the given routine.
 * The loop continues until the result of the routine is true for any reader
 * or all readers have been processes.
 * The returned value is true if the routine returned true at any point,
 * or false otherwise.
 *
 * const version
 */
bool for_matched_readers(const ResourceLimitedVector<ReaderProxy*>& reader_vector_1,
                         std::function<bool(const ReaderProxy*)> fun) {
    for (const ReaderProxy* remote_reader : reader_vector_1) {
        if (fun(remote_reader)) {
            return true;
        }
    }

    return false;
}

bool for_matched_readers(const ResourceLimitedVector<ReaderProxy*>& reader_vector_1,
                         const ResourceLimitedVector<ReaderProxy*>& reader_vector_2,
                         std::function<bool(const ReaderProxy*)> fun) {
    if (for_matched_readers(reader_vector_1, fun)) {
        return true;
    }
    return for_matched_readers(reader_vector_2, std::move(fun));
}

bool for_matched_readers(const ResourceLimitedVector<ReaderProxy*>& reader_vector_1,
                         const ResourceLimitedVector<ReaderProxy*>& reader_vector_2,
                         const ResourceLimitedVector<ReaderProxy*>& reader_vector_3,
                         std::function<bool(const ReaderProxy*)> fun) {
    if (for_matched_readers(reader_vector_1, reader_vector_2, fun)) {
        return true;
    }
    return for_matched_readers(reader_vector_3, std::move(fun));
}

using std::chrono::duration_cast;
using std::chrono::hours;
using std::chrono::microseconds;
using std::chrono::milliseconds;
using std::chrono::nanoseconds;
using std::chrono::steady_clock;
using std::chrono::system_clock;
using std::chrono::time_point;

StatefulWriter::StatefulWriter(RTPSParticipantImpl* pimpl, const GUID_t& guid, const WriterAttributes& att,
                               edds::rtps::FlowController* flow_controller, WriterHistory* history,
                               WriterListener* listener)
    : RTPSWriter(pimpl, guid, att, flow_controller, history, listener),
      periodic_hb_event_(nullptr),
      nack_response_event_(nullptr),
      ack_event_(nullptr),
      m_heartbeatCount(0U),
      m_times(att.times),
      matched_remote_readers_(att.matched_readers_allocation),
      matched_readers_pool_(att.matched_readers_allocation),
      next_all_acked_notify_sequence_(0, 1U),
      all_acked_(false),
      may_remove_change_cond_(),
      may_remove_change_(0U),
      disable_heartbeat_piggyback_(att.disable_heartbeat_piggyback),
      disable_positive_acks_(att.disable_positive_acks),
      keep_duration_us_(att.keep_duration.to_ns() * 1e-3),
      last_sequence_number_(),
      biggest_removed_sequence_number_(),
      sendBufferSize_(pimpl->get_min_network_send_buffer_size()),
      currentUsageSendBufferSize_(static_cast<int32_t>(pimpl->get_min_network_send_buffer_size())),
#ifdef INTRA_PROCESS_ENABLE
      matched_local_readers_(att.matched_readers_allocation),
#endif
      locator_selector_general_(*this, att.matched_readers_allocation),
      locator_selector_async_(*this, att.matched_readers_allocation),
      high_watermark(att.high_watermark),
      low_watermark(att.low_watermark) {
    init(pimpl, att);
}

StatefulWriter::StatefulWriter(RTPSParticipantImpl* pimpl, const GUID_t& guid, const WriterAttributes& att,
                               const std::shared_ptr<IPayloadPool>& payload_pool,
                               edds::rtps::FlowController* flow_controller, WriterHistory* history,
                               WriterListener* listener)
    : RTPSWriter(pimpl, guid, att, payload_pool, flow_controller, history, listener),
      periodic_hb_event_(nullptr),
      nack_response_event_(nullptr),
      ack_event_(nullptr),
      m_heartbeatCount(0U),
      m_times(att.times),
      matched_remote_readers_(att.matched_readers_allocation),
      matched_readers_pool_(att.matched_readers_allocation),
      next_all_acked_notify_sequence_(0, 1U),
      all_acked_(false),
      may_remove_change_cond_(),
      may_remove_change_(0U),
      disable_heartbeat_piggyback_(att.disable_heartbeat_piggyback),
      disable_positive_acks_(att.disable_positive_acks),
      keep_duration_us_(att.keep_duration.to_ns() * 1e-3),
      last_sequence_number_(),
      biggest_removed_sequence_number_(),
      sendBufferSize_(pimpl->get_min_network_send_buffer_size()),
      currentUsageSendBufferSize_(static_cast<int32_t>(pimpl->get_min_network_send_buffer_size())),
#ifdef INTRA_PROCESS_ENABLE
      matched_local_readers_(att.matched_readers_allocation),
#endif
      locator_selector_general_(*this, att.matched_readers_allocation),
      locator_selector_async_(*this, att.matched_readers_allocation),
      high_watermark(att.high_watermark),
      low_watermark(att.low_watermark) {
    init(pimpl, att);
}

StatefulWriter::StatefulWriter(RTPSParticipantImpl* pimpl, const GUID_t& guid, const WriterAttributes& att,
                               const std::shared_ptr<IPayloadPool>& payload_pool,
                               const std::shared_ptr<IChangePool>& change_pool,
                               edds::rtps::FlowController* flow_controller, WriterHistory* hist, WriterListener* listen)
    : RTPSWriter(pimpl, guid, att, payload_pool, change_pool, flow_controller, hist, listen),
      periodic_hb_event_(nullptr),
      nack_response_event_(nullptr),
      ack_event_(nullptr),
      m_heartbeatCount(0U),
      m_times(att.times),
      matched_remote_readers_(att.matched_readers_allocation),
      matched_readers_pool_(att.matched_readers_allocation),
      next_all_acked_notify_sequence_(0, 1U),
      all_acked_(false),
      may_remove_change_cond_(),
      may_remove_change_(0U),
      disable_heartbeat_piggyback_(att.disable_heartbeat_piggyback),
      disable_positive_acks_(att.disable_positive_acks),
      keep_duration_us_(att.keep_duration.to_ns() * 1e-3),
      last_sequence_number_(),
      biggest_removed_sequence_number_(),
      sendBufferSize_(pimpl->get_min_network_send_buffer_size()),
      currentUsageSendBufferSize_(static_cast<int32_t>(pimpl->get_min_network_send_buffer_size())),
#ifdef INTRA_PROCESS_ENABLE
      matched_local_readers_(att.matched_readers_allocation),
#endif
      locator_selector_general_(*this, att.matched_readers_allocation),
      locator_selector_async_(*this, att.matched_readers_allocation),
      high_watermark(att.high_watermark),
      low_watermark(att.low_watermark) {
    init(pimpl, att);
}

void StatefulWriter::init(RTPSParticipantImpl* pimpl, const WriterAttributes& att) {
    const RTPSParticipantAttributes& part_att = pimpl->getRTPSParticipantAttributes();

    // 100：当最大sample限制到100且未主动配置fast heartbeart时，开启heartbeart自适应
    if (mp_history->m_att.maximumReservedCaches > 100) {
        if (high_watermark == -1) {
            high_watermark = mp_history->m_att.maximumReservedCaches / 2;
        }

        if (low_watermark == -1) {
            low_watermark = mp_history->m_att.maximumReservedCaches / 10;
        }
    }

    periodic_hb_event_ = new TimedEvent(
        pimpl->getEventResource(), [&]() -> bool { return send_periodic_heartbeat(); },
        TimeConv::Time_t2MilliSecondsDouble(m_times.heartbeatPeriod));

    nack_response_event_ = new TimedEvent(
        pimpl->getEventResource(),
        [&]() -> bool {
            perform_nack_response();
            return false;
        },
        TimeConv::Time_t2MilliSecondsDouble(m_times.nackResponseDelay));

    if (disable_positive_acks_ || (part_att.builtin.discovery_config.is_compatible && this->m_guid.is_edp_writer())) {
        ack_event_ = new TimedEvent(
            pimpl->getEventResource(), [&]() -> bool { return ack_timer_expired(); },
            att.keep_duration.to_ns() * 1e-6);  // in milliseconds
    }

    if ((!this->m_guid.is_builtin()) && mp_history->autoMemoryMode) {
        mem_watermark = mp_history->m_att.maximumReservedCaches > 0 ? mp_history->m_att.maximumReservedCaches : 5000;
        auto period = TimeConv::Time_t2MilliSecondsDouble(m_times.heartbeatPeriod) > 30000
                          ? TimeConv::Time_t2MilliSecondsDouble(m_times.heartbeatPeriod) * 5
                          : 30000;
        free_event_ = new TimedEvent(
            pimpl->getEventResource(), [&]() -> bool { return payload_pool_->free_payload(mem_watermark); }, period);
        free_event_->restart_timer();
    } else {
        free_event_ = nullptr;
    }

    for (size_t n = 0U; n < att.matched_readers_allocation.initial; ++n) {
        (void)matched_readers_pool_.push_back(new ReaderProxy(m_times, part_att.allocation.locators, this));
    }

    if (att.heartbeat_per_max_samples == 0) {
        samples_per_hb = 0U;
    } else {
        samples_per_hb = static_cast<uint32_t>(mp_history->m_att.maximumReservedCaches / att.heartbeat_per_max_samples);
    }
}

StatefulWriter::~StatefulWriter() {
    logDebug(RTPS_WRITER, "StatefulWriter in topic " << getTopicName() << " destructor");

    // Disable timed events, because their callbacks use cache changes
    if (ack_event_ != nullptr) {
        delete (ack_event_);
        ack_event_ = nullptr;
    }

    if (nack_response_event_ != nullptr) {
        delete (nack_response_event_);
        nack_response_event_ = nullptr;
    }

    // This must be the next action, as it frees CacheChange_t from the async thread.
    deinit();

    // Stop all active proxies and pass them to the pool
    {
        std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
        while (!matched_remote_readers_.empty()) {
            ReaderProxy* remote_reader = matched_remote_readers_.back();
            matched_remote_readers_.pop_back();
            remote_reader->stop();
            (void)matched_readers_pool_.push_back(remote_reader);
        }
#ifdef INTRA_PROCESS_ENABLE
        while (!matched_local_readers_.empty()) {
            ReaderProxy* remote_reader = matched_local_readers_.back();
            matched_local_readers_.pop_back();
            remote_reader->stop();
            (void)matched_readers_pool_.push_back(remote_reader);
        }
#endif
    }

    // PeriodicHeartbeatEvent must be released after releasing all proxies
    // because proxy's NackSuppressionEvent could restart this event.
    if (periodic_hb_event_ != nullptr) {
        delete (periodic_hb_event_);
        periodic_hb_event_ = nullptr;
    }

    if (free_event_ != nullptr) {
        delete (free_event_);
        free_event_ = nullptr;
    }
    // Delete all proxies in the pool
    std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
    for (ReaderProxy* remote_reader : matched_readers_pool_) {
        delete (remote_reader);
    }
    matched_readers_pool_.clear();
}

void StatefulWriter::unsent_change_added_to_history(
    CacheChange_t* change, const std::chrono::time_point<std::chrono::steady_clock>& max_blocking_time,
    bool /*in_history*/) {
    std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
    bool add_sample = false;
    bool is_request_enable = false;
    bool specified_reader = (change->readerGUID.guidPrefix != c_GuidPrefix_Unknown);
    auto payload_length = change->serializedPayload.length;

    if (liveliness_lease_duration_ < c_TimeInfinite) {
        (void)mp_RTPSParticipant->wlp()->assert_liveliness(getGuid(), liveliness_kind_, liveliness_lease_duration_);
    }
    // Now for the rest of readers
    if (!matched_remote_readers_.empty() || !matched_local_readers_.empty()) {
#ifdef INTRA_PROCESS_ENABLE
        (void)for_matched_readers(matched_remote_readers_, [this, &change, &max_blocking_time, &add_sample,
                                                            &specified_reader,
                                                            &is_request_enable](ReaderProxy* reader) {
#else
        (void)for_matched_readers(matched_remote_readers_, [this, &change, &max_blocking_time, &add_sample,
                                                            &specified_reader,
                                                            &is_request_enable](ReaderProxy* reader) {
#endif
                // Specify readers by guidPrefix to send
                if (specified_reader) {
                    if (reader->guid().guidPrefix != change->readerGUID.guidPrefix) {
                        return false;
                    }
                }
#ifdef BATCH_SEND_ENABLE
                if (change->batch_send) {
                    for (auto it = change->batch_changes.begin(); it != change->batch_changes.end(); it++) {
                        ChangeForReader_t changeForReader(*it);
                        reader->add_change(changeForReader, false, false, max_blocking_time);
                    }
                } else {
#else
            {
#endif
                    ChangeForReader_t changeForReader(change);
                    bool is_relevant = reader->rtps_is_relevant(change);
                    //ChangeForReader_t construct sets status to UNSENT.
                    if (reader->is_request_enable()) {
                        is_request_enable = true;
                        // 如果是request_enable，则将状态设置为UNACKNOWLEDGED，等待reader确认
                        changeForReader.setStatus(UNACKNOWLEDGED);
                        logDebug(RTPS_WRITER,
                                 "request is enable, only to add change to reader guid: " << reader->guid());
                    }

                    if (!reader->is_request_enable() || !reader->is_reliable() || reader->is_local_reader()) {
                        add_sample |= is_relevant;
                    }

                    reader->add_change(changeForReader, is_relevant, false, max_blocking_time);
                }
                return false;
            });

        if (disable_positive_acks_ || is_request_enable) {
            auto source_timestamp = system_clock::time_point() + nanoseconds(change->sourceTimestamp.to_ns());
            auto now = system_clock::now();
            auto interval =
                disable_positive_acks_ ? (source_timestamp - now + keep_duration_us_) : (source_timestamp - now);
            assert(interval.count() >= 0);

            (void)ack_event_->update_interval_millisec((double_t)duration_cast<milliseconds>(interval).count());
            ack_event_->restart_timer(max_blocking_time);
        }
#if !defined(_WIN32)
        usdt_in_stateful_writer(change, getTopicName(), getTracePointStatus(), getRTPSParticipant()->get_domain_id(),
                                matched_remote_readers_);
#endif
        if (add_sample) {
            // After adding the CacheChange_t to flowcontroller, its pointer cannot be used because it may be removed
            // internally before exiting the call. For example if the writer matched with a best-effort reader.
            flow_controller_->add_new_sample(this, change, max_blocking_time);
        } else {
            periodic_hb_event_->restart_timer(max_blocking_time);
        }
    } else {
        logDebug(RTPS_WRITER, "No reader proxy to add change.");
        check_acked_status();
    }

#if defined(EDDS_METRICS)
    on_send_frequency(ReliabilityKind_t::RELIABLE, getGuid());
    on_send_throughput(ReliabilityKind_t::RELIABLE, getGuid(), payload_length);
#else
    (void)payload_length;
#endif
}

#ifdef INTRA_PROCESS_ENABLE
bool StatefulWriter::intraprocess_delivery(CacheChange_t* change, ReaderProxy* reader_proxy) {
    RTPSReader* reader = reader_proxy->local_reader();
    if (reader) {
        if (change->write_params.related_sample_identity() != SampleIdentity::unknown()) {
            (void)change->write_params.sample_identity(change->write_params.related_sample_identity());
        }
        return reader->processDataMsg(change);
    }
    return false;
}

bool StatefulWriter::intraprocess_gap(ReaderProxy* reader_proxy, const SequenceNumber_t& first_seq,
                                      const SequenceNumber_t& last_seq) {
    RTPSReader* reader = reader_proxy->local_reader();
    if (reader) {
        return reader->processGapMsg(m_guid, first_seq, SequenceNumberSet_t(last_seq));
    }

    return false;
}

bool StatefulWriter::intraprocess_heartbeat(ReaderProxy* reader_proxy, bool liveliness) {
    bool returned_value = false;

    std::lock_guard<RecursiveTimedMutex> guardW(mp_mutex);
    RTPSReader* reader = RTPSDomainImpl::find_local_reader(reader_proxy->guid());

    if (reader) {
        SequenceNumber_t first_seq = get_seq_num_min();
        SequenceNumber_t last_seq = get_seq_num_max();

        if (first_seq == c_SequenceNumber_Unknown || last_seq == c_SequenceNumber_Unknown) {
            if (liveliness) {
                first_seq = next_sequence_number();
                last_seq = first_seq - 1;
            }
        }

        if ((first_seq != c_SequenceNumber_Unknown && last_seq != c_SequenceNumber_Unknown) &&
            (liveliness || reader_proxy->has_changes())) {
            incrementHBCount();
            returned_value =
                reader->processHeartbeatMsg(m_guid, m_heartbeatCount, first_seq, last_seq, true, liveliness);
        }
    }

    return returned_value;
}
#endif

bool StatefulWriter::change_removed_by_history(CacheChange_t* const change) {
    SequenceNumber_t sequence_number = change->sequenceNumber;

    std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
    logDebug(RTPS_WRITER, "Change " << sequence_number << " to be removed.");

    flow_controller_->remove_change(change);

    // Take note of biggest removed sequence number to improve sending of gaps
    if (sequence_number > biggest_removed_sequence_number_) {
        biggest_removed_sequence_number_ = sequence_number;
    }
#ifdef INTRA_PROCESS_ENABLE
    // Invalidate CacheChange pointer in ReaderProxies.
    (void)for_matched_readers(matched_local_readers_, matched_remote_readers_, [sequence_number](ReaderProxy* reader) {
#else
    (void)for_matched_readers(matched_remote_readers_, [sequence_number](ReaderProxy* reader) {
#endif
        reader->change_has_been_removed(sequence_number);
        return false;
    });

    may_remove_change_ = 2U;
    may_remove_change_cond_.notify_one();

    return true;
}

#ifdef INTRA_PROCESS_ENABLE
void StatefulWriter::deliver_sample_to_intraprocesses(CacheChange_t* change) {
    for (ReaderProxy* remoteReader : matched_local_readers_) {
        SequenceNumber_t gap_seq;
        FragmentNumber_t dummy = 0U;
        bool dumb = false;
        if (remoteReader->change_is_unsent(change->sequenceNumber, dummy, gap_seq, get_seq_num_min(), dumb)) {
            // If there is a hole (removed from history or not relevants) between previous sample
            // and this one, send it a personal GAP.
            if (SequenceNumber_t::unknown() != gap_seq) {
                (void)intraprocess_gap(remoteReader, gap_seq, change->sequenceNumber);
                remoteReader->acked_changes_set(change->sequenceNumber);
            }
            bool delivered = intraprocess_delivery(change, remoteReader);
            if (!remoteReader->is_reliable()) {
                remoteReader->acked_changes_set(change->sequenceNumber + 1U);
            } else {
                (void)intraprocess_heartbeat(remoteReader, false);
                remoteReader->from_unsent_to_status(change->sequenceNumber, delivered ? ACKNOWLEDGED : UNACKNOWLEDGED,
                                                    false, delivered);
            }
        }
    }
}
#endif
DeliveryRetCode StatefulWriter::deliver_sample_to_network(
    CacheChange_t* change, RTPSMessageGroup& group,
    LocatorSelectorSender& locator_selector,  // Object locked by FlowControllerImpl
    const std::chrono::time_point<std::chrono::steady_clock>& max_blocking_time) {
    DeliveryRetCode ret_code = DeliveryRetCode::DELIVERED;
    uint32_t n_fragments = change->getFragmentCount();
    FragmentNumber_t min_unsent_fragment = 0U;
    bool need_reactivate_periodic_heartbeat = false;

    while ((DeliveryRetCode::DELIVERED == ret_code) && (min_unsent_fragment != n_fragments + 1U)) {
        SequenceNumber_t gap_seq_for_all = SequenceNumber_t::unknown();
        locator_selector.locator_selector.reset(false);
        auto first_relevant_reader = matched_remote_readers_.begin();
        bool should_be_sent = false;
        min_unsent_fragment = n_fragments + 1U;

        for (auto remote_reader = first_relevant_reader; remote_reader != matched_remote_readers_.end();
             ++remote_reader) {
            SequenceNumber_t gap_seq;
            FragmentNumber_t next_unsent_frag = 0U;
            if ((*remote_reader)
                    ->change_is_unsent(change->sequenceNumber, next_unsent_frag, gap_seq, get_seq_num_min(),
                                       need_reactivate_periodic_heartbeat) &&
                ((0U == n_fragments) || (min_unsent_fragment >= next_unsent_frag))) {
                octet align = static_cast<octet>((4U - change->serializedPayload.length % 4U) & 3U);
                if (change->serializedPayload.length > 4) {
                    change->serializedPayload.data[3] = align;
                }

                if (min_unsent_fragment > next_unsent_frag) {
                    locator_selector.locator_selector.reset(false);
                    first_relevant_reader = remote_reader;
                    min_unsent_fragment = next_unsent_frag;
                }

                (*remote_reader)->active(true);
                locator_selector.locator_selector.enable((*remote_reader)->guid());
                should_be_sent = true;

                // If there is a hole (removed from history or not relevants) between previous
                // sample and this one, send it a personal GAP.
                if (SequenceNumber_t::unknown() != gap_seq) {
                    if (SequenceNumber_t::unknown() == gap_seq_for_all) {  // Calculate if the hole is for all readers
                        std::lock_guard<RecursiveTimedMutex> history_lock(mp_history->getMutex());
                        History::const_iterator chit = mp_history->find_change_nts(change);
                        if (chit == mp_history->changesBegin()) {
                            gap_seq_for_all = gap_seq;
                        } else {
                            SequenceNumber_t prev = (*std::prev(chit))->sequenceNumber + 1U;

                            if (prev == gap_seq) {
                                gap_seq_for_all = gap_seq;
                            }
                        }
                    }

                    if (gap_seq_for_all != gap_seq) {  // If it is an individual GAP, sent it to repective reader.
                        group.sender(this, (*remote_reader)->message_sender());
                        (void)group.add_gap(gap_seq, SequenceNumberSet_t(change->sequenceNumber),
                                            (*remote_reader)->guid());
                        try {
                            send_heartbeat_nts_(1u, group, disable_positive_acks_, false, *remote_reader);
                        } catch (vbsutil::ecdr::exception::BadParamException& e) {  //LCOV_EXCL_START
                            elogError(RTPS_WRITER, RetCode_t::RETCODE_BAD_PARAMETER,
                                      "Error send_heartbeat_nts_ catch error" << e.what() << ".");
                            return DeliveryRetCode::NOT_DELIVERED;
                        }  //LCOV_EXCL_STOP

                        group.sender(this, &locator_selector);  // This makes the flush_and_reset().
                    }
                }
            } else {
                (*remote_reader)->active(false);
            }
        }

        bool should_send_global_gap = SequenceNumber_t::unknown() != gap_seq_for_all;

        if (should_send_global_gap) {  // Send GAP for all readers
            (void)group.add_gap(gap_seq_for_all, SequenceNumberSet_t(change->sequenceNumber));
        }

        if (locator_selector.locator_selector.state_has_changed() &&
            ((should_be_sent && (!m_separateSendingEnabled)) || should_send_global_gap)) {
            group.flush_and_reset();
            mp_RTPSParticipant->select_locators(locator_selector.locator_selector);
            compute_selected_guids(locator_selector);
        }

        try {
            if (should_be_sent) {
                uint32_t last_processed = group.get_current_bytes_processed();
                if (!m_separateSendingEnabled) {
                    size_t num_locators = locator_selector.locator_selector.selected_size();
                    if (num_locators > 0U) {
                        if (0U < n_fragments) {
                            if (min_unsent_fragment != n_fragments + 1U) {
                                if (group.add_data_frag(*change, min_unsent_fragment, enable_e2e_protection_,
                                                        e2e_profile04_)) {
                                    for (auto remote_reader = first_relevant_reader;
                                         remote_reader != matched_remote_readers_.end(); ++remote_reader) {
                                        if ((*remote_reader)->active()) {
                                            bool allFragmentsSent = false;
                                            (void)(*remote_reader)
                                                ->mark_fragment_as_sent_for_change(
                                                    change->sequenceNumber, min_unsent_fragment, allFragmentsSent);

                                            if (allFragmentsSent) {
                                                if (!(*remote_reader)->is_reliable()) {
                                                    (*remote_reader)->acked_changes_set(change->sequenceNumber + 1U);
                                                } else {
                                                    (*remote_reader)
                                                        ->from_unsent_to_status(change->sequenceNumber, UNDERWAY, true);
                                                }
                                            }
                                        }
                                    }
                                } else {
                                    ret_code = DeliveryRetCode::NOT_DELIVERED;
                                }
                            }
                            if (min_unsent_fragment == n_fragments) {
                                send_heartbeat_piggyback_nts_(group, locator_selector, last_processed);
                            }
                        } else {
                            if (group.add_data(*change, enable_e2e_protection_, e2e_profile04_)) {
                                for (auto remote_reader = first_relevant_reader;
                                     remote_reader != matched_remote_readers_.end(); ++remote_reader) {
                                    if ((*remote_reader)->active()) {
                                        if (!(*remote_reader)->is_reliable()) {
                                            if (change->batch_send) {
                                                (*remote_reader)
                                                    ->acked_changes_set(change->sequenceNumber +
                                                                        change->batchSampleCount);
                                            } else {
                                                (*remote_reader)->acked_changes_set(change->sequenceNumber + 1U);
                                            }
                                        } else {
#ifdef BATCH_SEND_ENABLE
                                            if (change->batch_send) {
                                                (*remote_reader)
                                                    ->from_unsent_batch_to_underway(change->sequenceNumber, true);
                                            } else {
#else
                                            {
#endif
                                                (*remote_reader)
                                                    ->from_unsent_to_status(change->sequenceNumber, UNDERWAY, true);
                                            }
                                        }
                                    }
                                }
                            } else {
                                ret_code = DeliveryRetCode::NOT_DELIVERED;
                            }
                            send_heartbeat_piggyback_nts_(group, locator_selector, last_processed);
                        }
                    } else {
                        ret_code = DeliveryRetCode::NOT_DELIVERED;
                    }
                } else {
                    for (auto remote_reader = first_relevant_reader; remote_reader != matched_remote_readers_.end();
                         ++remote_reader) {
                        if ((*remote_reader)->active()) {
                            group.sender(this, (*remote_reader)->message_sender());

                            if (0U < n_fragments) {
                                if (min_unsent_fragment != n_fragments + 1U) {
                                    if (group.add_data_frag(*change, min_unsent_fragment, enable_e2e_protection_,
                                                            e2e_profile04_)) {
                                        bool allFragmentsSent = false;
                                        (void)(*remote_reader)
                                            ->mark_fragment_as_sent_for_change(change->sequenceNumber,
                                                                               min_unsent_fragment, allFragmentsSent);

                                        if (allFragmentsSent) {
                                            if (!(*remote_reader)->is_reliable()) {
                                                (*remote_reader)->acked_changes_set(change->sequenceNumber + 1U);
                                            } else {
                                                (*remote_reader)
                                                    ->from_unsent_to_status(change->sequenceNumber, UNDERWAY, true);
                                            }
                                        }
                                    } else {
                                        ret_code = DeliveryRetCode::NOT_DELIVERED;
                                    }
                                }
                            } else {
                                if (group.add_data(*change, enable_e2e_protection_, e2e_profile04_)) {
                                    if (!(*remote_reader)->is_reliable()) {
#ifdef BATCH_SEND_ENABLE
                                        if (change->batch_send) {
                                            (*remote_reader)
                                                ->acked_changes_set(change->sequenceNumber + change->batchSampleCount);
                                        } else {
#else
                                        {
#endif
                                            (*remote_reader)->acked_changes_set(change->sequenceNumber + 1);
                                        }
                                    } else {
#ifdef BATCH_SEND_ENABLE
                                        if (change->batch_send) {
                                            (*remote_reader)
                                                ->from_unsent_batch_to_underway(change->sequenceNumber, true);
                                        } else {
#else
                                        {
#endif
                                            (*remote_reader)
                                                ->from_unsent_to_status(change->sequenceNumber, UNDERWAY, true);
                                        }
                                    }
                                } else {
                                    elogErrorKeyT(RTPS_WRITER, RetCode_t::RETCODE_NOT_DELIVERED,
                                                  getTopicName(),  //LCOV_EXCL_START
                                                  "Topic " << getTopicName() << " sending change "
                                                           << change->sequenceNumber << " fail.");
                                    ret_code = DeliveryRetCode::NOT_DELIVERED;
                                }  //LCOV_EXCL_STOP
                            }

                            send_heartbeat_nts_(1u, group, false, false, *remote_reader);
                        }
                    }
                }
            }
        } catch (const RTPSMessageGroup::timeout&) {
            elogErrorKeyT(RTPS_WRITER, RetCode_t::RETCODE_TIMEOUT, getTopicName(),  //LCOV_EXCL_START
                          "Topic " << getTopicName() << " max blocking time reached.");
            ret_code = DeliveryRetCode::NOT_DELIVERED;
        } catch (const RTPSMessageGroup::limit_exceeded&) {  //LCOV_EXCL_STOP
            ret_code = DeliveryRetCode::EXCEEDED_LIMIT;
        }

        if (disable_positive_acks_ && (last_sequence_number_ == SequenceNumber_t())) {
            last_sequence_number_ = change->sequenceNumber;
        }

        // Restore in case a exception was launched by RTPSMessageGroup.
        group.sender(this, &locator_selector);
    }

    if (need_reactivate_periodic_heartbeat) {
        periodic_hb_event_->restart_timer(max_blocking_time);
    }

    return ret_code;
}

/*
 * MATCHED_READER-RELATED METHODS
 */
void StatefulWriter::update_reader_info(LocatorSelectorSender& locator_selector, bool create_sender_resources) {
    update_cached_info_nts(locator_selector);
    compute_selected_guids(locator_selector);

    if (create_sender_resources) {
        RTPSParticipantImpl* part = getRTPSParticipant();
        locator_selector.locator_selector.for_each([part](const Locator_t& loc) { part->createSenderResources(loc); });
    }

    // Check if we have local or remote readers
    there_are_remote_readers_ = !matched_remote_readers_.empty();
#ifdef INTRA_PROCESS_ENABLE
    there_are_local_readers_ = !matched_local_readers_.empty();
#endif
}

void StatefulWriter::select_all_readers_nts(RTPSMessageGroup& group, LocatorSelectorSender& locator_selector) {
    locator_selector.locator_selector.reset(true);
    if (locator_selector.locator_selector.state_has_changed()) {
        group.flush_and_reset();
        mp_RTPSParticipant->select_locators(locator_selector.locator_selector);
        compute_selected_guids(locator_selector);
    }
}

size_t StatefulWriter::getMatchedReadersSize() const {
    const std::lock_guard<RecursiveTimedMutex> guard(const_cast<RecursiveTimedMutex&>(mp_mutex));
#ifdef INTRA_PROCESS_ENABLE
    return matched_remote_readers_.size() + matched_local_readers_.size();
#else
    return matched_remote_readers_.size();
#endif
}

bool StatefulWriter::matched_reader_add(ReaderProxyData& rdata) {

    if (rdata.guid() == c_Guid_Unknown) {
        elogError(RTPS_WRITER, RetCode_t::RETCODE_BAD_PARAMETER,  //LCOV_EXCL_START
                  "Reliable Writer in topic " << getTopicName() << " need GUID_t of matched readers");
        return false;
    }  //LCOV_EXCL_STOP

    std::unique_lock<RecursiveTimedMutex> guard(mp_mutex);
    std::unique_lock<LocatorSelectorSender> guard_locator_selector_general(locator_selector_general_);
    std::unique_lock<LocatorSelectorSender> guard_locator_selector_async(locator_selector_async_);
#ifdef INTRA_PROCESS_ENABLE
    // Check if it is already matched.
    if (for_matched_readers(matched_local_readers_, matched_remote_readers_, [this, &rdata](ReaderProxy* reader) {
#else
    // Check if it is already matched.
    ReaderProxy* found_reader = nullptr;
    if (for_matched_readers(matched_remote_readers_, [this, &rdata, &found_reader](ReaderProxy* reader) {
#endif
            if (reader->guid() == rdata.guid()) {
                logDebug(RTPS_WRITER, "Attempting to add existing reader, updating information.");
                found_reader = reader;
                return true;
            }
            return false;
        })) {
        if (nullptr != mp_listener) {
            // call the listener without locks taken
            guard_locator_selector_async.unlock();
            guard_locator_selector_general.unlock();
            guard.unlock();

            vbs::RemoteEndpointInfo rinfo;
            rinfo.guid(rdata.guid());
            rinfo.topicName(rdata.topicName());
            rinfo.typeName(rdata.typeName());
            rinfo.topicKind(rdata.topicKind());
            rinfo.content_filter_property(rdata.content_filter());
            rinfo.type_plain(rdata.type_plain());
            rinfo.host_id(rdata.get_host_id());
            rinfo.process_id(rdata.get_pid());

            if (rdata.m_qos.m_durability.kind == DurabilityQosPolicyKind::PERSISTENT_DURABILITY_QOS) {
                rinfo.durabilityKind(DurabilityKind_t::PERSISTENT);
            } else if (rdata.m_qos.m_durability.kind == DurabilityQosPolicyKind::TRANSIENT_DURABILITY_QOS) {
                rinfo.durabilityKind(DurabilityKind_t::TRANSIENT);
            } else if (rdata.m_qos.m_durability.kind == DurabilityQosPolicyKind::TRANSIENT_LOCAL_DURABILITY_QOS) {
                rinfo.durabilityKind(DurabilityKind_t::TRANSIENT_LOCAL);
            } else {
                rinfo.durabilityKind(DurabilityKind_t::VOLATILE);
            }

            if (rdata.m_qos.m_reliability.kind == ReliabilityQosPolicyKind::BEST_EFFORT_RELIABILITY_QOS) {
                rinfo.reliabilityKind(ReliabilityKind_t::BEST_EFFORT);
            } else {
                rinfo.reliabilityKind(ReliabilityKind_t::RELIABLE);
            }

            uint16_t local_host_id = m_guid.guidPrefix.is_guid_static() ? vbs::SystemInfo::instance().host_id()
                                                                        : m_guid.guidPrefix.get_host_id();
            for (auto loc : rdata.remote_locators().unicast) {
                //本机之间通信，只上报SHM类型locator
                if (loc.kind == LOCATOR_KIND_DSF &&
                    m_guid.is_on_same_host_as(local_host_id, rdata.guid(), rdata.get_host_id())) {
                    rinfo.set_locator_valid(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_DSF);
                    rinfo.locator(loc);
                    break;
                } else if (loc.kind == LOCATOR_KIND_UDPv4) {
                    rinfo.set_locator_valid(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDP);
                } else if (loc.kind == LOCATOR_KIND_UDS) {
                    rinfo.set_locator_valid(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDS);
                }
            }

            mp_listener->on_reader_discovery(READER_DISCOVERY_STATUS::CHANGED_QOS_READER, rdata.guid(), rinfo);

            guard.lock();
            guard_locator_selector_general.lock();
            guard_locator_selector_async.lock();
            // get_matched_status()的保障已经正常通信的实体间不更新locator信息，例如：本地writer和本地reader通过SHM通信，
            // 远端reader上线，本地writer会创建udp transport，然后远端writer上线，本地reader会更新edp携带udp，这时
            // 本地writer和本地reader会通过udp进行通信
            // locator类型为DSF时，直接进行处理，是因为simple模式初始edp默认带udp locator，如果发现是域内通信，第二个edp
            // 只携带dsf locator，所以需要进行过滤更新
            if (!rinfo.get_matched_status() &&
                ((!rdata.is_filtered(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDP) &&
                  rinfo.locator_valid(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDP)) ||
                 (!rdata.is_filtered(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDS) &&
                  rinfo.locator_valid(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDS)) ||
                 rinfo.locator_valid(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_DSF))) {
                FilterReaderLocators(rdata, found_reader);
            }

            if (!this->m_guid.is_builtin()) {
                elogInfo(RTPS_WRITER, "Remote reader locator when CHANGED_QOS_READER: " << rdata.remote_locators()
                                                                                        << " guid: " << rdata.guid());
            }
        }
        return false;
    }

    // Get a reader proxy from the inactive pool (or create a new one if necessary and allowed)
    ReaderProxy* rp = nullptr;
    if (matched_readers_pool_.empty()) {
        size_t max_readers = matched_readers_pool_.max_size();
        if (getMatchedReadersSize() + matched_readers_pool_.size() < max_readers) {
            const RTPSParticipantAttributes& part_att = mp_RTPSParticipant->getRTPSParticipantAttributes();
            rp = new ReaderProxy(m_times, part_att.allocation.locators, this);
        } else {
            logWarning(RTPS_WRITER,
                       "Maximum number of reader proxies (" << max_readers << ") reached for writer " << m_guid);
            return false;
        }
    } else {
        rp = matched_readers_pool_.back();
        matched_readers_pool_.pop_back();
    }

    // Add info of new datareader.
    rp->start(rdata);
    filter_remote_locators(*rp->general_locator_selector_entry(), m_att.external_unicast_locators,
                           m_att.ignore_non_matching_locators);
    filter_remote_locators(*rp->async_locator_selector_entry(), m_att.external_unicast_locators,
                           m_att.ignore_non_matching_locators);
    (void)locator_selector_general_.locator_selector.add_entry(rp->general_locator_selector_entry());
    (void)locator_selector_async_.locator_selector.add_entry(rp->async_locator_selector_entry());
#ifdef INTRA_PROCESS_ENABLE
    if (rp->is_local_reader()) {
        (void)matched_local_readers_.push_back(rp);
        logDebug(RTPS_WRITER,
                 "Adding reader " << rdata.guid() << " to " << this->m_guid.entityId << " as local reader");
    } else {
#else
    {
#endif
        (void)matched_remote_readers_.push_back(rp);
        logDebug(RTPS_WRITER,
                 "Adding reader " << rdata.guid() << " to " << this->m_guid.entityId << " as remote reader");
    }

    update_reader_info(locator_selector_general_, true);
    update_reader_info(locator_selector_async_, true);

    logDebug(RTPS_WRITER, "Reader Proxy " << rp->guid() << " added to " << this->m_guid.entityId << " with "
                                          << rdata.remote_locators().unicast.size() << "(u)-"
                                          << rdata.remote_locators().multicast.size() << "(m) locators");

    bool is_reliable = rp->is_reliable();
    if (is_reliable) {
        SequenceNumber_t min_seq = get_seq_num_min();
        SequenceNumber_t last_seq = get_seq_num_max();
        RTPSMessageGroup group(mp_RTPSParticipant, this, rp->message_sender());

        // History not empty
        if (min_seq != SequenceNumber_t::unknown()) {
            (void)last_seq;
            assert(last_seq != SequenceNumber_t::unknown());
            assert(min_seq <= last_seq);

            try {
                // Late-joiner
                if ((TRANSIENT_LOCAL <= rp->durability_kind()) && (TRANSIENT_LOCAL <= m_att.durabilityKind)) {
                    std::unique_lock<RecursiveTimedMutex> history_lock(mp_history->getMutex());
                    logDebug(RTPS_WRITER, "Send old changes to new reader " << rp->guid());
                    if (!rdata.type_plain() && nullptr != mp_listener) {
                        mp_listener->on_non_plain_reader_discovery();
                    }
                    for (History::iterator cit = mp_history->changesBegin(); cit != mp_history->changesEnd(); ++cit) {
                        // Holes are managed when deliver_sample(), sending GAP messages.
                        if (rp->rtps_is_relevant(*cit, rdata.content_filter())) {
                            ChangeForReader_t changeForReader(*cit);

                            // If it is local, maintain in UNSENT status and add to flow controller.
                            if (rp->is_local_reader()) {
                                (void)flow_controller_->add_old_sample(this, *cit);
                            } else {
                                // In other case, set as UNACKNOWLEDGED and expects the reader request
                                // them.
                                changeForReader.setStatus(UNACKNOWLEDGED);
                            }

                            rp->add_change(changeForReader, true, false);
                        }
                    }
                } else {
#ifdef INTRA_PROCESS_ENABLE
                    if (rp->is_local_reader()) {
                        (void)intraprocess_gap(rp, min_seq, mp_history->next_sequence_number());
                    } else {
#else
                    {
#endif
                        // Send a GAP of the whole history.
                        (void)group.add_gap(min_seq, SequenceNumberSet_t(mp_history->next_sequence_number()),
                                            rp->guid());
                    }
                }

                // Always activate heartbeat period. We need a confirmation of the reader.
                // The state has to be updated.
                periodic_hb_event_->restart_timer(std::chrono::steady_clock::now() + std::chrono::hours(24));
            } catch (const RTPSMessageGroup::timeout&) {  //LCOV_EXCL_START
                elogError(RTPS_WRITER, RetCode_t::RETCODE_TIMEOUT,
                          "Topic " << getTopicName() << " max blocking time reached");
            }  //LCOV_EXCL_STOP
        }
#ifdef INTRA_PROCESS_ENABLE
        if (rp->is_local_reader()) {
            (void)intraprocess_heartbeat(rp);
        } else {
#else
        {
#endif
            send_heartbeat_nts_(1u, group, disable_positive_acks_, false, rp);
            group.flush_and_reset();
        }
    } else {
        // Acknowledged all for best-effort reader.
        rp->acked_changes_set(mp_history->next_sequence_number());
    }

    if (nullptr != mp_listener) {
        // call the listener without locks taken
        vbs::RemoteEndpointInfo rinfo;
        rinfo.guid(rdata.guid());
        rinfo.topicName(rdata.topicName());
        rinfo.typeName(rdata.typeName());
        rinfo.topicKind(rdata.topicKind());
        rinfo.content_filter_property(rdata.content_filter());
        rinfo.type_plain(rdata.type_plain());
        rinfo.host_id(rdata.get_host_id());
        rinfo.process_id(rdata.get_pid());

        if (rdata.m_qos.m_durability.kind == DurabilityQosPolicyKind::PERSISTENT_DURABILITY_QOS) {
            rinfo.durabilityKind(DurabilityKind_t::PERSISTENT);
        } else if (rdata.m_qos.m_durability.kind == DurabilityQosPolicyKind::TRANSIENT_DURABILITY_QOS) {
            rinfo.durabilityKind(DurabilityKind_t::TRANSIENT);
        } else if (rdata.m_qos.m_durability.kind == DurabilityQosPolicyKind::TRANSIENT_LOCAL_DURABILITY_QOS) {
            rinfo.durabilityKind(DurabilityKind_t::TRANSIENT_LOCAL);
        } else {
            rinfo.durabilityKind(DurabilityKind_t::VOLATILE);
        }

        if (rdata.m_qos.m_reliability.kind == ReliabilityQosPolicyKind::BEST_EFFORT_RELIABILITY_QOS) {
            rinfo.reliabilityKind(ReliabilityKind_t::BEST_EFFORT);
        } else {
            rinfo.reliabilityKind(ReliabilityKind_t::RELIABLE);
        }
        for (auto loc : rdata.remote_locators().unicast) {
            if (loc.kind == LOCATOR_KIND_DSF) {
                rinfo.set_locator_valid(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_DSF);
                rinfo.locator(loc);
            } else if (loc.kind == LOCATOR_KIND_UDPv4) {
                rinfo.set_locator_valid(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDP);
            } else if (loc.kind == LOCATOR_KIND_UDS) {
                rinfo.set_locator_valid(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDS);
            }
        }

        guard_locator_selector_async.unlock();
        guard_locator_selector_general.unlock();
        guard.unlock();

        mp_listener->on_reader_discovery(READER_DISCOVERY_STATUS::DISCOVERED_READER, rdata.guid(), rinfo);

        guard.lock();
        guard_locator_selector_general.lock();
        guard_locator_selector_async.lock();

        //补充转换和过滤udp 或 uds locator
        if ((!rdata.is_filtered(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDP) &&
             rinfo.locator_valid(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDP)) ||
            (!rdata.is_filtered(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDS) &&
             rinfo.locator_valid(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDS))) {
            FilterReaderLocators(rdata, rp);
        }
        if (!this->m_guid.is_builtin()) {
            elogInfo(RTPS_WRITER, "Remote reader locator when DISCOVERED_READER: " << rdata.remote_locators()
                                                                                   << " guid: " << rdata.guid());
        }
    }
    return true;
}

bool StatefulWriter::matched_reader_remove(const GUID_t& reader_guid) {
    ReaderProxy* rproxy = nullptr;
    std::unique_lock<RecursiveTimedMutex> lock(mp_mutex);
    std::unique_lock<LocatorSelectorSender> guard_locator_selector_general(locator_selector_general_);
    std::unique_lock<LocatorSelectorSender> guard_locator_selector_async(locator_selector_async_);
#ifdef INTRA_PROCESS_ENABLE
    for (ReaderProxyIterator it = matched_local_readers_.begin(); it != matched_local_readers_.end(); ++it) {
        if ((*it)->guid() == reader_guid) {
            logDebug(RTPS_WRITER, "Reader Proxy removed: " << reader_guid);
            rproxy = std::move(*it);
            it = matched_local_readers_.erase(it);
            break;
        }
    }
#endif
    if (rproxy == nullptr) {
        for (ReaderProxyIterator it = matched_remote_readers_.begin(); it != matched_remote_readers_.end(); ++it) {
            if ((*it)->guid() == reader_guid) {
                logDebug(RTPS_WRITER, "Reader Proxy removed: " << reader_guid);
                rproxy = std::move(*it);
                it = matched_remote_readers_.erase(it);
                break;
            }
        }
    }

    (void)locator_selector_general_.locator_selector.remove_entry(reader_guid);
    (void)locator_selector_async_.locator_selector.remove_entry(reader_guid);
    update_reader_info(locator_selector_general_, false);
    update_reader_info(locator_selector_async_, false);

    if (getMatchedReadersSize() == 0U) {
        periodic_hb_event_->cancel_timer();
    }

    if (rproxy != nullptr) {
        vbs::RemoteEndpointInfo rinfo;
        rinfo.type_plain(rproxy->type_plain());
        rinfo.domainId(mp_RTPSParticipant->get_domain_id());
        rinfo.guid(reader_guid);
        rproxy->stop();
        (void)matched_readers_pool_.push_back(rproxy);

        check_acked_status();
        if (nullptr != mp_listener) {
            // call the listener without locks taken
            guard_locator_selector_async.unlock();
            guard_locator_selector_general.unlock();
            lock.unlock();

            mp_listener->on_reader_discovery(READER_DISCOVERY_STATUS::REMOVED_READER, reader_guid, rinfo);
        }
        return true;
    }

    logDebug(RTPS_HISTORY, "Reader Proxy doesn't exist in this writer");
    return false;
}

bool StatefulWriter::matched_reader_is_matched(const GUID_t& reader_guid) {
    std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
#ifdef INTRA_PROCESS_ENABLE
    return for_matched_readers(matched_local_readers_, matched_remote_readers_,
#else
    return for_matched_readers(matched_remote_readers_,
#endif
                               [&reader_guid](ReaderProxy* reader) { return (reader->guid() == reader_guid); });
}

bool StatefulWriter::matched_reader_lookup(GUID_t& readerGuid, ReaderProxy** RP) {
    std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
    return for_matched_readers(
#ifdef INTRA_PROCESS_ENABLE
        matched_local_readers_, matched_remote_readers_, [&readerGuid, RP](ReaderProxy* reader) {
#else
        matched_remote_readers_,
        [&readerGuid, RP](ReaderProxy* reader) {
#endif
            if (reader->guid() == readerGuid) {
                *RP = reader;
                return true;
            }
            return false;
        });
}

bool StatefulWriter::has_been_fully_delivered(const SequenceNumber_t& seq_num) const {
    std::lock_guard<RecursiveTimedMutex> guard(const_cast<RecursiveTimedMutex&>(mp_mutex));
    bool found = false;
#if 0
    // Sequence number has not been generated by this WriterHistory.
    if (seq_num >= mp_history->next_sequence_number()) {
        return false;
    }
#endif
    for (auto reader : matched_remote_readers_) {
        bool ret_code = reader->has_been_delivered(seq_num, found);
        if (found && (!ret_code)) {
            // The change has not been fully delivered if it is pending delivery on at least one ReaderProxy.
            return false;
        }
    }
    return true;
}

bool StatefulWriter::is_acked_by_all(const CacheChange_t* change) const {
    std::lock_guard<RecursiveTimedMutex> guard(const_cast<RecursiveTimedMutex&>(mp_mutex));

    if (change->writerGUID != this->getGuid()) {
        logWarning(RTPS_WRITER,  //LCOV_EXCL_START
                   "The given change " << change->writerGUID << "is not from this Writer " << this->getGuid());
        return false;
    }  //LCOV_EXCL_STOP

    return is_acked_by_all(change->sequenceNumber);
}

bool StatefulWriter::is_acked_by_all(const SequenceNumber_t seq) const {
    //assert(mp_history->next_sequence_number() > seq);
#ifdef INTRA_PROCESS_ENABLE
    return (seq < next_all_acked_notify_sequence_) ||
           (!for_matched_readers(matched_local_readers_, matched_remote_readers_,
                                 [seq](const ReaderProxy* reader) { return !(reader->change_is_acked(seq)); }));
#else
    return (seq < next_all_acked_notify_sequence_) ||
           (!for_matched_readers(matched_remote_readers_,
                                 [seq](const ReaderProxy* reader) { return !(reader->change_is_acked(seq)); }));
#endif
}

bool StatefulWriter::all_readers_updated() {
    std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
#ifdef INTRA_PROCESS_ENABLE
    return !for_matched_readers(matched_local_readers_, matched_remote_readers_,
                                [](const ReaderProxy* reader) { return (reader->has_changes()); });
#else
    return !for_matched_readers(matched_remote_readers_,
                                [](const ReaderProxy* reader) { return (reader->has_changes()); });
#endif
}

bool StatefulWriter::wait_for_all_acked(const Duration_t& max_wait) {
    std::unique_lock<RecursiveTimedMutex> lock(mp_mutex);
    std::unique_lock<std::mutex> all_acked_lock(all_acked_mutex_);
#ifdef INTRA_PROCESS_ENABLE
    all_acked_ = !for_matched_readers(matched_local_readers_, matched_remote_readers_,
                                      [](const ReaderProxy* reader) { return reader->has_changes(); });
#else
    all_acked_ =
        !for_matched_readers(matched_remote_readers_, [](const ReaderProxy* reader) { return reader->has_changes(); });
#endif
    lock.unlock();

    if (!all_acked_) {
        std::chrono::microseconds max_w(TimeConv::Duration_t2MicroSecondsInt64(max_wait));
        (void)all_acked_cond_.wait_for(all_acked_lock, max_w, [&]() { return all_acked_; });
    }

    return all_acked_;
}

void StatefulWriter::rebuild_status_after_load() {
    SequenceNumber_t min_seq = get_seq_num_min();
    if (min_seq != SequenceNumber_t::unknown()) {
        biggest_removed_sequence_number_ = min_seq - 1U;
        may_remove_change_ = 1U;
    }

    SequenceNumber_t next_seq = mp_history->next_sequence_number();
    next_all_acked_notify_sequence_ = next_seq;
    min_readers_low_mark_ = next_seq - 1U;
    all_acked_ = true;
}

void StatefulWriter::check_acked_status() {
    std::unique_lock<RecursiveTimedMutex> lock(mp_mutex);

    bool all_acked = true;
    bool has_min_low_mark = false;
    // #8945 If no readers matched, notify all old changes.
    SequenceNumber_t min_low_mark = mp_history->next_sequence_number() - 1U;
#ifdef INTRA_PROCESS_ENABLE
    (void)for_matched_readers(matched_local_readers_, matched_remote_readers_,
#else
    (void)for_matched_readers(matched_remote_readers_,
#endif
                              [&all_acked, &has_min_low_mark, &min_low_mark](ReaderProxy* reader) {
                                  SequenceNumber_t reader_low_mark = reader->changes_low_mark();
                                  if ((reader_low_mark < min_low_mark) || (!has_min_low_mark)) {
                                      has_min_low_mark = true;
                                      min_low_mark = reader_low_mark;
                                  }

                                  if (reader->has_changes()) {
                                      all_acked = false;
                                  }

                                  return false;
                              });

    bool something_changed = all_acked;
    SequenceNumber_t min_seq = get_seq_num_min();
    if (min_seq != SequenceNumber_t::unknown()) {
        // In the case where we haven't received an acknack from a recently matched reader,
        // min_low_mark will be zero, and no change will be notified as received by all
        if (next_all_acked_notify_sequence_ <= min_low_mark) {
            if ((mp_listener != nullptr) && (min_low_mark >= get_seq_num_min())) {
                // We will inform backwards about the changes received by all readers, starting
                // on min_low_mark down until next_all_acked_notify_sequence_. This way we can
                // safely proceed with the traversal, in case a change is removed from the history
                // inside the callback
                std::unique_lock<RecursiveTimedMutex> history_lock(mp_history->getMutex());
                History::iterator history_end = mp_history->changesEnd();
                History::iterator cit = std::lower_bound(mp_history->changesBegin(), history_end, min_low_mark,
                                                         [](const CacheChange_t* change, const SequenceNumber_t& seq) {
                                                             return change->sequenceNumber < seq;
                                                         });
                if ((cit != history_end) && (*cit)->sequenceNumber == min_low_mark) {
                    ++cit;
                }

                SequenceNumber_t seq {};
                SequenceNumber_t end_seq =
                    (min_seq > next_all_acked_notify_sequence_) ? min_seq : next_all_acked_notify_sequence_;
                // 可能出现history中当前最小的change序列号都大于min_low_mark，这种情况下找到的cit等于changesBegin(),不能做--操作，也不能做acked处理
                if (cit != mp_history->changesBegin()) {
                    // The iterator starts pointing to the change inmediately after min_low_mark
                    --cit;
                    do {
                        // Avoid notifying changes before next_all_acked_notify_sequence_
                        CacheChange_t* change = *cit;
                        seq = change->sequenceNumber;
                        if (seq < next_all_acked_notify_sequence_) {
                            break;
                        }

                        // Change iterator before it possibly becomes invalidated
                        if (cit != mp_history->changesBegin()) {
                            --cit;
                        }

                        // Notify reception of change (may remove that change on VOLATILE writers)
                        mp_listener->onWriterChangeReceivedByAll(this, change);

                        // Stop if we got to either next_all_acked_notify_sequence_ or the first change
                    } while (seq > end_seq);
                }
            }

            next_all_acked_notify_sequence_ = min_low_mark + 1U;
        }
#ifdef BATCH_SEND_ENABLE
        if (min_low_mark >= (mp_history->batch_qos_.enable ? get_last_seq_from_min_batch() : get_seq_num_min())) {
#else
        if (min_low_mark >= get_seq_num_min()) {
#endif
            may_remove_change_ = 1U;
        }

        min_readers_low_mark_ = min_low_mark;
        something_changed = true;
    }
    if (all_acked) {
        std::unique_lock<std::mutex> all_acked_lock(all_acked_mutex_);
        SequenceNumber_t next_seq = mp_history->next_sequence_number();
        next_all_acked_notify_sequence_ = next_seq;
        min_readers_low_mark_ = next_seq - 1U;
        all_acked_ = true;
        all_acked_cond_.notify_all();
    }

    if (something_changed) {
        may_remove_change_cond_.notify_one();
    }
}

bool StatefulWriter::try_remove_change(const std::chrono::steady_clock::time_point& max_blocking_time_point,
                                       std::unique_lock<RecursiveTimedMutex>& /*lock*/) {
    SequenceNumber_t min_low_mark;
    std::unique_lock<RecursiveTimedMutex> writer_lock(mp_mutex);

    min_low_mark = next_all_acked_notify_sequence_ - 1U;

    SequenceNumber_t calc =
        (min_low_mark < get_seq_num_min()) ? SequenceNumber_t() : (min_low_mark - get_seq_num_min()) + 1;
    uint32_t may_remove_change = 1U;

    if (calc <= SequenceNumber_t()) {
        may_remove_change_ = 0U;
        (void)may_remove_change_cond_.wait_until(writer_lock, max_blocking_time_point,
                                                 [&]() { return may_remove_change_ > 0U; });
        may_remove_change = may_remove_change_;
    }

    // Some changes acked
    if (may_remove_change == 1U) {
#ifdef BATCH_SEND_ENABLE
        if (mp_history->batch_qos_.enable) {
            return mp_history->remove_min_batch_change(min_readers_low_mark_);
        } else {
#else
        {
#endif
            return mp_history->remove_min_change();
        }
    } else if (may_remove_change == 2U) {
        // Waiting a change was removed.
        return true;
    }

    return false;
}

bool StatefulWriter::wait_for_acknowledgement(const SequenceNumber_t& seq,
                                              const std::chrono::steady_clock::time_point& max_blocking_time_point,
                                              std::unique_lock<RecursiveTimedMutex>& /*lock*/) {
    std::unique_lock<RecursiveTimedMutex> writer_lock(mp_mutex);
    return may_remove_change_cond_.wait_until(writer_lock, max_blocking_time_point,
                                              [this, &seq]() { return is_acked_by_all(seq); });
}

/*
 * PARAMETER_RELATED METHODS
 */
void StatefulWriter::updateAttributes(const WriterAttributes& att) {
    this->updateTimes(att.times);
}

void StatefulWriter::updateTimes(const WriterTimes& times) {
    std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
    if (m_times.heartbeatPeriod != times.heartbeatPeriod) {
        (void)periodic_hb_event_->update_interval(times.heartbeatPeriod);
    }
    if (m_times.nackResponseDelay != times.nackResponseDelay) {
        if (nack_response_event_ != nullptr) {
            (void)nack_response_event_->update_interval(times.nackResponseDelay);
        }
    }
    if (m_times.nackSupressionDuration != times.nackSupressionDuration) {
#ifdef INTRA_PROCESS_ENABLE
        (void)for_matched_readers(matched_local_readers_, matched_remote_readers_, [&times](ReaderProxy* reader) {
#else
        (void)for_matched_readers(matched_remote_readers_, [&times](ReaderProxy* reader) {
#endif
            reader->update_nack_supression_interval(times.nackSupressionDuration);
            return false;
        });

        for (ReaderProxy* it : matched_readers_pool_) {
            it->update_nack_supression_interval(times.nackSupressionDuration);
        }
    }
    m_times = times;
}

SequenceNumber_t StatefulWriter::next_sequence_number() const {
    return mp_history->next_sequence_number();
}

void StatefulWriter::acknack_request() {
    if (getGuid().is_builtin()) {
        return;
    }
    send_periodic_heartbeat();
}

bool StatefulWriter::send_periodic_heartbeat(bool final, bool liveliness) {
    std::lock_guard<RecursiveTimedMutex> guardW(mp_mutex);
    std::lock_guard<LocatorSelectorSender> guard_locator_selector_general(locator_selector_general_);

    bool unacked_changes = false;
    if (!liveliness) {
        try {
            for (ReaderProxy* reader : matched_remote_readers_) {
                if (send_heartbeat_to_nts(*reader)) {
                    unacked_changes = true;
                }
            }
#ifdef INTRA_PROCESS_ENABLE
            for (ReaderProxy* reader : matched_local_readers_) {
                if (intraprocess_heartbeat(reader, false)) {
                    unacked_changes = true;
                }
            }
#endif
        } catch (const RTPSMessageGroup::timeout&) {  //LCOV_EXCL_START
            elogError(RTPS_WRITER, RetCode_t::RETCODE_TIMEOUT,
                      "Topic " << getTopicName() << " max blocking time reached");
        }  //LCOV_EXCL_STOP
    } else {
        if (m_separateSendingEnabled) {
            // Send individual liveliness heartbeat to each reader
#ifdef INTRA_PROCESS_ENABLE
            (void)for_matched_readers(matched_local_readers_, matched_remote_readers_,
#else
            (void)for_matched_readers(matched_remote_readers_,
#endif
                                      [this, &liveliness, &unacked_changes](ReaderProxy* reader) {
                                          (void)send_heartbeat_to_nts(*reader, liveliness);
                                          unacked_changes = true;
                                          return false;
                                      });
        } else {
            // This is a liveliness heartbeat, we don't care about checking sequence numbers
            try {
#ifdef INTRA_PROCESS_ENABLE
                for (ReaderProxy* reader : matched_local_readers_) {
                    (void)intraprocess_heartbeat(reader, true);
                    unacked_changes = true;
                }
#endif
                if (there_are_remote_readers_) {
                    unacked_changes = true;
                    RTPSMessageGroup group(mp_RTPSParticipant, this, &locator_selector_general_);
                    send_heartbeat_nts_(locator_selector_general_.all_remote_readers.size(), group, final, liveliness);
                }
            } catch (const RTPSMessageGroup::timeout&) {  //LCOV_EXCL_START
                elogError(RTPS_WRITER, RetCode_t::RETCODE_TIMEOUT,
                          "Topic " << getTopicName() << " max blocking time reached");
            }  //LCOV_EXCL_STOP
        }
    }

    if ((high_watermark != -1) && (low_watermark != -1) && (m_times.heartbeatPeriod != m_times.fastHeartbeatPeriod)) {
        if (fast_heartbeat_ && (get_seq_num_max() < (min_readers_low_mark_ + low_watermark))) {
            (void)periodic_hb_event_->update_interval(m_times.heartbeatPeriod);
            fast_heartbeat_ = false;
        } else if (!fast_heartbeat_ && (get_seq_num_max() >= (min_readers_low_mark_ + high_watermark))) {
            (void)periodic_hb_event_->update_interval(m_times.fastHeartbeatPeriod);
            fast_heartbeat_ = true;
        }
    }

    if (mem_watermark != 0) {
        if (get_seq_num_max() != c_SequenceNumber_Unknown) {
            mem_watermark += static_cast<uint32_t>((get_seq_num_max() - min_readers_low_mark_).to64long());
        }
        mem_watermark = mem_watermark / 2;
        mem_watermark = mem_watermark > 1 ? mem_watermark : 1;
    }

    return unacked_changes;
}

bool StatefulWriter::send_heartbeat_to_nts(ReaderProxy& remoteReaderProxy, bool liveliness, bool force /* = false */) {
    SequenceNumber_t first_seq_to_check_acknowledge = get_seq_num_min();
    if (SequenceNumber_t::unknown() == first_seq_to_check_acknowledge) {
        first_seq_to_check_acknowledge = mp_history->next_sequence_number() - 1U;
    }
    bool unacked_changes = false;
    if (remoteReaderProxy.is_reliable() &&
        (force || liveliness || remoteReaderProxy.has_unacknowledged(first_seq_to_check_acknowledge))) {
#ifdef INTRA_PROCESS_ENABLE
        if (remoteReaderProxy.is_local_reader()) {
            (void)intraprocess_heartbeat(&remoteReaderProxy, liveliness);
        } else {
#else
        {
#endif
            try {
                RTPSMessageGroup group(mp_RTPSParticipant, this, remoteReaderProxy.message_sender());
                SequenceNumber_t firstSeq = get_seq_num_min();
                SequenceNumber_t lastSeq = get_seq_num_max();

                if (firstSeq != c_SequenceNumber_Unknown && lastSeq != c_SequenceNumber_Unknown) {
                    assert(firstSeq <= lastSeq);
                    if (!liveliness) {
                        add_gaps_for_holes_in_history_(group);
                    }
                }

                send_heartbeat_nts_(1u, group, disable_positive_acks_, liveliness, &remoteReaderProxy);
                unacked_changes = true;
            } catch (const RTPSMessageGroup::timeout&) {  //LCOV_EXCL_START
                elogError(RTPS_WRITER, RetCode_t::RETCODE_TIMEOUT,
                          "Topic " << getTopicName() << " max blocking time reached");
            }  //LCOV_EXCL_STOP
        }
    }
    return unacked_changes;
}

void StatefulWriter::send_heartbeat_nts_(size_t number_of_readers, RTPSMessageGroup& message_group, bool final,
                                         bool liveliness, ReaderProxy* reader) {
    if (!number_of_readers) {
        return;
    }

    SequenceNumber_t firstSeq = get_seq_num_min();

    if (reader != nullptr && !m_guid.is_builtin() && firstSeq != c_SequenceNumber_Unknown &&
        (firstSeq < reader->changes_low_mark() + 1)) {
        firstSeq = reader->changes_low_mark() + 1;
    }

    SequenceNumber_t lastSeq = get_seq_num_max();

    if ((firstSeq == c_SequenceNumber_Unknown) || (lastSeq == c_SequenceNumber_Unknown)) {
        assert((firstSeq == c_SequenceNumber_Unknown) && (lastSeq == c_SequenceNumber_Unknown));

        if ((number_of_readers == 1) || liveliness) {
            firstSeq = next_sequence_number();
            lastSeq = firstSeq - 1;
        } else {
            return;
        }
    } else {
        assert(firstSeq <= lastSeq);

        // Check if it has to be sent a GAP with the gaps in the history
    }

    incrementHBCount();

    (void)message_group.add_heartbeat(firstSeq, lastSeq, m_heartbeatCount, final, liveliness);

    // Update calculate of heartbeat piggyback.
    currentUsageSendBufferSize_ = static_cast<int32_t>(sendBufferSize_);
}

void StatefulWriter::send_heartbeat_piggyback_nts_(RTPSMessageGroup& message_group,
                                                   LocatorSelectorSender& locator_selector,
                                                   uint32_t& last_bytes_processed) {
    if (!disable_heartbeat_piggyback_) {
        if (mp_history->isFull() || (next_all_acked_notify_sequence_ < get_seq_num_min())) {
            select_all_readers_nts(message_group, locator_selector);
            size_t number_of_readers = locator_selector.all_remote_readers.size();
            send_heartbeat_nts_(number_of_readers, message_group, disable_positive_acks_);
        } else if (this->m_guid.entityId == ENTITYID_SEDP_BUILTIN_PUBLICATIONS_WRITER ||
                   this->m_guid.entityId == ENTITYID_SEDP_BUILTIN_SUBSCRIPTIONS_WRITER) {
            send_heartbeat_nts_(1U, message_group, disable_positive_acks_);
        } else {

            uint32_t current_bytes = message_group.get_current_bytes_processed();
            currentUsageSendBufferSize_ -= static_cast<int32_t>(current_bytes - last_bytes_processed);
            last_bytes_processed = current_bytes;
            if (currentUsageSendBufferSize_ < 0) {
                select_all_readers_nts(message_group, locator_selector);
                size_t number_of_readers = locator_selector.all_remote_readers.size();
                send_heartbeat_nts_(number_of_readers, message_group, disable_positive_acks_);
            } else if (samples_per_hb > 0U) {
                piggyback_sample_count++;
                if (piggyback_sample_count >= samples_per_hb) {
                    piggyback_sample_count = 0U;
                    select_all_readers_nts(message_group, locator_selector);
                    size_t number_of_readers = locator_selector.all_remote_readers.size();
                    send_heartbeat_nts_(number_of_readers, message_group, disable_positive_acks_);
                }
            }
        }
    }
}

void StatefulWriter::perform_nack_response() {
    std::unique_lock<RecursiveTimedMutex> lock(mp_mutex);

    uint32_t changes_to_resend = 0U;
    for (ReaderProxy* reader : matched_remote_readers_) {
        changes_to_resend += reader->perform_acknack_response([&](ChangeForReader_t& change) {
            // This labmda is called if the ChangeForReader_t pass from REQUESTED to UNSENT.
            assert(nullptr != change.getChange());
            (void)flow_controller_->add_old_sample(this, change.getChange());
        });
    }

    lock.unlock();
}

void StatefulWriter::perform_nack_supression(const GUID_t& reader_guid) {
    std::unique_lock<RecursiveTimedMutex> lock(mp_mutex);
#ifdef INTRA_PROCESS_ENABLE
    (void)for_matched_readers(matched_local_readers_, matched_remote_readers_,
#else
    (void)for_matched_readers(matched_remote_readers_,
#endif
                              [this, &reader_guid](ReaderProxy* reader) {
                                  if (reader->guid() == reader_guid) {
                                      (void)reader->perform_nack_supression();
                                      periodic_hb_event_->restart_timer();
                                      return true;
                                  }
                                  return false;
                              });
}

bool StatefulWriter::process_acknack(const GUID_t& writer_guid, const GUID_t& reader_guid, uint32_t ack_count,
                                     const SequenceNumberSet_t& sn_set, const bool final_flag, bool& result) {
    std::unique_lock<RecursiveTimedMutex> lock(mp_mutex);
    result = (m_guid == writer_guid);

    if (result) {
        SequenceNumber_t received_sequence_number = sn_set.empty() ? sn_set.base() : sn_set.max();
        if (received_sequence_number <= next_sequence_number()) {
#ifdef INTRA_PROCESS_ENABLE
            (void)for_matched_readers(matched_local_readers_, matched_remote_readers_, [&](ReaderProxy* remote_reader) {
#else
            (void)for_matched_readers(matched_remote_readers_, [&](ReaderProxy* remote_reader) {
#endif
                if (remote_reader->guid() == reader_guid) {
                    if (remote_reader->check_and_set_acknack_count(ack_count,
                                                                   sn_set.base() == SequenceNumber_t(0, 0U))) {
                        // Sequence numbers before Base are set as Acknowledged.
                        remote_reader->acked_changes_set(sn_set.base());
                        if (sn_set.base() > SequenceNumber_t(0, 0U)) {
                            // Prepare GAP for requested  samples that are not in history or are
                            // irrelevants.
                            RTPSMessageGroup group(mp_RTPSParticipant, this, remote_reader->message_sender());
                            RTPSGapBuilder gap_builder(group);

                            if (remote_reader->requested_changes_set(sn_set, gap_builder, get_seq_num_min())) {
                                nack_response_event_->restart_timer();
                            } else if (!final_flag) {
                                periodic_hb_event_->restart_timer();
                            }
                            (void)gap_builder.flush();
                        } else if (sn_set.empty() && (!final_flag)) {
                            // This is the preemptive acknack.
                            if (remote_reader->process_initial_acknack([&](ChangeForReader_t& change_reader) {
                                    assert(nullptr != change_reader.getChange());
                                    (void)flow_controller_->add_old_sample(this, change_reader.getChange());
                                })) {
                                if (remote_reader->is_remote_and_reliable()) {
                                    // Send heartbeat if requested
                                    (void)send_heartbeat_to_nts(*remote_reader, false, true);
                                    periodic_hb_event_->restart_timer();
                                }
                            }
#ifdef INTRA_PROCESS_ENABLE
                            if (remote_reader->is_local_reader()) {
                                (void)intraprocess_heartbeat(remote_reader);
                            }
#endif
                        }

                        // Check if all CacheChange are acknowledge, because a user could be
                        // waiting for this, or some CacheChanges could be removed if we are
                        // VOLATILE
                        check_acked_status();
                    }
                    return true;
                }
                return false;
            });
        } else {
            print_inconsistent_acknack(writer_guid, reader_guid, sn_set.base(), received_sequence_number,
                                       next_sequence_number());
        }
    }

    return result;
}

bool StatefulWriter::process_nack_frag(const GUID_t& writer_guid, const GUID_t& reader_guid, uint32_t ack_count,
                                       const SequenceNumber_t& seq_num, const FragmentNumberSet_t fragments_state,
                                       bool& result) {
    std::unique_lock<RecursiveTimedMutex> lock(mp_mutex);
    result = false;
    if (m_guid == writer_guid) {
        result = true;
        (void)for_matched_readers(
#ifdef INTRA_PROCESS_ENABLE
            matched_local_readers_, matched_remote_readers_,
#else
            matched_remote_readers_,
#endif
            [this, &reader_guid, &ack_count, &seq_num, &fragments_state](ReaderProxy* reader) {
                if (reader->guid() == reader_guid) {
                    if (reader->process_nack_frag(reader_guid, ack_count, seq_num, fragments_state)) {
                        nack_response_event_->restart_timer();
                    }
                    return true;
                }
                return false;
            });
    }

    return result;
}

bool StatefulWriter::ack_timer_expired() {
    std::unique_lock<RecursiveTimedMutex> lock(mp_mutex);

    // The timer has expired so the earliest non-acked change must be marked as acknowledged
    // This will be done in the first while iteration, as we start with a negative interval

    auto interval = -keep_duration_us_;

    // On the other hand, we've seen in the tests that if samples are sent very quickly with little
    // time between consecutive samples, the timer interval could end up being negative
    // In this case, we keep marking changes as acknowledged until the timer is able to keep up,
    // hence the while loop

    while (interval.count() < 0) {
#ifdef INTRA_PROCESS_ENABLE
        (void)for_matched_readers(matched_local_readers_, matched_remote_readers_, [this](ReaderProxy* reader) {
#else
        (void)for_matched_readers(matched_remote_readers_, [this](ReaderProxy* reader) {
#endif
            if (reader->disable_positive_acks()) {
                reader->acked_changes_set(last_sequence_number_ + 1U);
            }
            return false;
        });
        last_sequence_number_++;

        // Get the next cache change from the history
        CacheChange_t* change;

        if (!mp_history->get_change(last_sequence_number_, getGuid(), &change)) {
            return false;
        }

        auto source_timestamp = system_clock::time_point() + nanoseconds(change->sourceTimestamp.to_ns());
        auto now = system_clock::now();
        interval = source_timestamp - now + keep_duration_us_;
    }
    assert(interval.count() >= 0);

    (void)ack_event_->update_interval_millisec((double_t)duration_cast<milliseconds>(interval).count());
    return true;
}

void StatefulWriter::print_inconsistent_acknack(const GUID_t& writer_guid, const GUID_t& reader_guid,
                                                const SequenceNumber_t& min_requested_sequence_number,
                                                const SequenceNumber_t& max_requested_sequence_number,
                                                const SequenceNumber_t& next_sequence_number) {
    logWarning(RTPS_WRITER, "Inconsistent acknack received. Local Writer "
                                << writer_guid << " next SequenceNumber " << next_sequence_number << ". Remote Reader "
                                << reader_guid << " requested range is  [" << min_requested_sequence_number << ", "
                                << max_requested_sequence_number << "].");
    // This is necessary to avoid Warning of unused variable in case warning log level is disable
    static_cast<void>(writer_guid);
    static_cast<void>(reader_guid);
    static_cast<void>(min_requested_sequence_number);
    static_cast<void>(max_requested_sequence_number);
    static_cast<void>(next_sequence_number);
}

DeliveryRetCode StatefulWriter::deliver_sample_nts(
    CacheChange_t* cache_change, RTPSMessageGroup& group,
    LocatorSelectorSender& locator_selector,  // Object locked by FlowControllerImpl
    const std::chrono::time_point<std::chrono::steady_clock>& max_blocking_time, bool /*in_history*/) {
    DeliveryRetCode ret_code = DeliveryRetCode::DELIVERED;
#ifdef INTRA_PROCESS_ENABLE
    if (there_are_local_readers_) {
#ifdef BATCH_SEND_ENABLE
        if (cache_change->batch_send) {
            for (auto it = cache_change->batch_changes.begin(); it != cache_change->batch_changes.end(); it++) {
                deliver_sample_to_intraprocesses(*it);
            }
        } else {
#else
        {
#endif
            deliver_sample_to_intraprocesses(cache_change);
        }
    }
#endif
    if (there_are_remote_readers_) {
        ret_code = deliver_sample_to_network(cache_change, group, locator_selector, max_blocking_time);
    }
    check_acked_status();
    return ret_code;
}

void StatefulWriter::add_gaps_for_holes_in_history_(RTPSMessageGroup& group) {
    SequenceNumber_t firstSeq = get_seq_num_min();
    SequenceNumber_t lastSeq = get_seq_num_max();

    if ((SequenceNumber_t::unknown() != firstSeq) &&
        (lastSeq.to64long() - firstSeq.to64long() + 1U != mp_history->getHistorySize())) {
        RTPSGapBuilder gaps(group);
        // There are holes in the history.

        std::unique_lock<RecursiveTimedMutex> history_lock(mp_history->getMutex());
        History::const_iterator cit = mp_history->changesBegin();
        if (cit == mp_history->changesEnd()) {
            elogError(RTPS_WRITER, RetCode_t::RETCODE_NO_DATA, "Topic " << getTopicName() << " mp_history is empty.");
            return;
        }
        SequenceNumber_t prev = (*cit)->sequenceNumber + 1U;
        ++cit;
        while (cit != mp_history->changesEnd()) {
            while (prev != (*cit)->sequenceNumber) {
                (void)gaps.add(prev);
                ++prev;
            }
            ++prev;
            ++cit;
        }
        history_lock.unlock();
        (void)gaps.flush();
    }
}

evbs::edds::dds::builtin::StatisticMatchGuids StatefulWriter::get_remote_guids() {
    evbs::edds::dds::builtin::StatisticMatchGuids guids;

    std::unique_lock<RecursiveTimedMutex> guard(mp_mutex);
    for (uint32_t i = 0; i < matched_remote_readers_.size(); i++) {
        guids.push_back(matched_remote_readers_[i]->guid());
    }
    return guids;
}

edds::dds::builtin::StatisticProxyInfos StatefulWriter::get_proxy_infos() {
    edds::dds::builtin::StatisticProxyInfos infos;

    std::unique_lock<RecursiveTimedMutex> guard(mp_mutex);
    for (uint32_t i = 0; i < matched_remote_readers_.size(); i++) {
        edds::dds::builtin::ProxyInfo info {};
        info.type = evbs::edds::dds::builtin::STATICTIC_ENTITY_READER;
        info.is_alive = matched_remote_readers_[i]->active();
        info.start_time = matched_remote_readers_[i]->get_start_time();
        info.last_heartbeat_count = 0;
        info.last_acknack_count = matched_remote_readers_[i]->get_last_acknack_count();
        info.last_nackfrag_count = matched_remote_readers_[i]->get_last_nackfrag_count();
        info.changes_low_mark = matched_remote_readers_[i]->changes_low_mark().to64long();
        info.reader_proxy_async_locator_entry = *matched_remote_readers_[i]->async_locator_selector_entry();
        info.reader_proxy_general_locator_entry = *matched_remote_readers_[i]->general_locator_selector_entry();
        infos.push_back(std::move(info));
    }
    return infos;
}

void StatefulWriter::FilterReaderLocators(ReaderProxyData& rdata, ReaderProxy* rp) {
    LocatorList tmp_unicast;
    for (auto loc : rdata.remote_locators().unicast) {
        Locator_t temp_locator;
        vbs::RemoteEndpointInfo::LocatorType locator_type;
        if (loc.kind == LOCATOR_KIND_UDS) {
            locator_type = vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDS;
        } else if (loc.kind == LOCATOR_KIND_UDPv4) {
            locator_type = vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDP;
        } else if (loc.kind == LOCATOR_KIND_DSF) {
            tmp_unicast.push_back(loc);
            continue;
        }
        if ((this->m_guid.is_builtin() || mp_RTPSParticipant->registed_user_transport(locator_type)) &&
            mp_RTPSParticipant->network_factory()->transform_remote_locator(mp_RTPSParticipant->get_transport_name(),
                                                                            loc, temp_locator) &&
            mp_RTPSParticipant->is_valid_remote_locator(loc)) {
            tmp_unicast.push_back(temp_locator);
        }
    }
    rdata.set_announced_unicast_locators(tmp_unicast);
    if (rp->update(rdata)) {
        filter_remote_locators(*rp->general_locator_selector_entry(), m_att.external_unicast_locators,
                               m_att.ignore_non_matching_locators);
        filter_remote_locators(*rp->async_locator_selector_entry(), m_att.external_unicast_locators,
                               m_att.ignore_non_matching_locators);
        update_reader_info(locator_selector_general_, true);
        update_reader_info(locator_selector_async_, true);
    }
}

}  // namespace rtps
}  // namespace ertps
}  // namespace evbs
