// Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ------------------------------------------------------------------
// Modification history:
// feature: listener code decoupling
// feature: change E2E packet format
// feature: add SetTrustRemoteEndpoint API to dispatcherReader and add locator to dispatcherWriter
// feature: support auth token
// feature: RemoteEndpointInfo support reliabilityKind
// feature: develop dispatcher
// feature: resolve andriod compile problem
// feature: Split history as an independent common module
// feature: discovery support client and server
// feature: interval report metrics
// feature: another method of counting packet lost
// feature: destination order support by reception timestamp
// feature: Crop code for in-process communication
// feature: E2E New Requirement Development
// feature: log namespace change
// feature: Development of new requirements for E2E functionality
// ------------------------------------------------------------------

#include <edds/rtps/reader/StatefulReader.h>
#include <history/RemoteEndpointInfo.h>
#include <history/ReaderListener.h>
#include <history/ReaderHistory.h>
#include <elog/log/Log.h>
#include <edds/rtps/messages/RTPSMessageCreator.h>
#include <rtps/participant/RTPSParticipantImpl.h>
#include <rtps/reader/WriterProxy.h>
#include <ertps/utils/TimeConversion.h>

#include <edds/rtps/builtin/BuiltinProtocols.h>
#include <edds/rtps/builtin/liveliness/WLP.h>
#include <edds/rtps/writer/LivelinessManager.h>

#include <mutex>
#include <thread>

#include <cassert>
#include <map>

#include <deps/core/status/E2EExceptionStatus.hpp>

#include <history/HistoryAttributesExtension.hpp>

#include "rtps/RTPSDomainImpl.hpp"
#include "reader_utils.hpp"

#define IDSTRING "(ID:" << std::this_thread::get_id() << ") " <<

using namespace evbs::ertps::rtps;
using namespace vbs::common;

#if !defined(_WIN32)

#include <StaticTracepoint.h>
#include <tracepoint/TracePointManager.hpp>
#include <deps/common/SystemInfo.hpp>

#define SEC(name) __attribute__((section(name), used))
unsigned short ertps_ertps_ful_reader_udp_recv_semaphore SEC(".probes");

void usdt_in_stateful_reader(const CacheChange_t* a_change, const std::string& topic_name, uint8_t trace_point_status,
                             uint32_t domain_id) {
    auto persistent_trace_point_status =
        vbsutil::tracepoint::TracePointManager::get_persistent_trace_point_status(topic_name);
    if (persistent_trace_point_status != vbsutil::tracepoint::INVALID &&
        persistent_trace_point_status > trace_point_status) {
        trace_point_status = persistent_trace_point_status;
    }
    if ((trace_point_status == vbsutil::tracepoint::ON_ENTRANCE ||
         trace_point_status == vbsutil::tracepoint::ON_EXPORT) &&
        ertps_ertps_ful_reader_udp_recv_semaphore) {
        logDebug(RTPS_READER, "Trace point status is OPEN");
        uint64_t seq_num = a_change->sequenceNumber.to64long();
        octet* serialized_data = a_change->serializedPayload.data;
        uint32_t data_size = a_change->serializedPayload.length;
        uint32_t vbs_version =
            (static_cast<uint32_t>(c_Version_vbs[0]) | (static_cast<uint32_t>(c_Version_vbs[1]) << 8) |
             (static_cast<uint32_t>(c_Version_vbs[2]) << 16) | (static_cast<uint32_t>(c_Version_vbs[3]) << 24));
        uint32_t src_ip = a_change->src_ip;
        uint32_t dst_ip = a_change->dst_ip;
        uint32_t mask_len = a_change->mask_len;
        uint64_t timestamp = static_cast<uint64_t>(a_change->reader_info.receptionTimestamp.to_ns());
        vbsutil::tracepoint::UsdtInfo usdt_info = {seq_num, timestamp, vbs_version, domain_id,
                                                   src_ip,  dst_ip,    mask_len,    vbsutil::tracepoint::ROLE_SUB};

        unsigned char* usdt_msg_addr = reinterpret_cast<unsigned char*>(&usdt_info);
        FOLLY_SDT_WITH_SEMAPHORE4(ertps, ertps_ful_reader_udp_recv, usdt_msg_addr, topic_name.c_str(), serialized_data,
                                  data_size);
    } else {
        logDebug(RTPS_READER, "Trace point status is NOT_OPEN");
    }
}
#endif

StatefulReader::~StatefulReader() {
    logDebug(RTPS_READER, "StatefulReader " << getGuid() << " destructor.");

    // Only is_alive_ assignment needs to be protected, as
    // matched_writers_ and matched_writers_pool_ are only used
    // when is_alive_ is true
    {
        std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
        is_alive_ = false;
    }

    for (WriterProxy* writer : matched_writers_) {
        delete (writer);
    }
    for (WriterProxy* writer : matched_writers_pool_) {
        delete (writer);
    }
}

StatefulReader::StatefulReader(RTPSParticipantImpl* pimpl, const GUID_t& guid, const ReaderAttributes& att,
                               ReaderHistory* hist, ReaderListener* listen)
    : RTPSReader(pimpl, guid, att, hist, listen),
      acknack_count_(0U),
      nackfrag_count_(0U),
      times_(att.times),
      matched_writers_(att.matched_writers_allocation),
      matched_writers_pool_(att.matched_writers_allocation),
      proxy_changes_config_(resource_limits_from_history(hist->m_att, 0U)),
      disable_positive_acks_(att.disable_positive_acks),
      is_alive_(true) {
    // AppID = 0;
    init(pimpl, att);
}

StatefulReader::StatefulReader(RTPSParticipantImpl* pimpl, const GUID_t& guid, const ReaderAttributes& att,
                               const std::shared_ptr<IChangePool>& change_pool, ReaderHistory* hist,
                               ReaderListener* listen)
    : RTPSReader(pimpl, guid, att, change_pool, hist, listen),
      acknack_count_(0U),
      nackfrag_count_(0U),
      times_(att.times),
      matched_writers_(att.matched_writers_allocation),
      matched_writers_pool_(att.matched_writers_allocation),
      proxy_changes_config_(resource_limits_from_history(hist->m_att, 0U)),
      disable_positive_acks_(att.disable_positive_acks),
      is_alive_(true) {
    // AppID = 0;
    init(pimpl, att);
}

StatefulReader::StatefulReader(RTPSParticipantImpl* pimpl, const GUID_t& guid, const ReaderAttributes& att,
                               const std::shared_ptr<IPayloadPool>& payload_pool, ReaderHistory* hist,
                               ReaderListener* listen)
    : RTPSReader(pimpl, guid, att, payload_pool, hist, listen),
      acknack_count_(0U),
      nackfrag_count_(0U),
      times_(att.times),
      matched_writers_(att.matched_writers_allocation),
      matched_writers_pool_(att.matched_writers_allocation),
      proxy_changes_config_(resource_limits_from_history(hist->m_att, 0U)),
      disable_positive_acks_(att.disable_positive_acks),
      is_alive_(true) {
    // AppID = 0;
    init(pimpl, att);
}

StatefulReader::StatefulReader(RTPSParticipantImpl* pimpl, const GUID_t& guid, const ReaderAttributes& att,
                               const std::shared_ptr<IPayloadPool>& payload_pool,
                               const std::shared_ptr<IChangePool>& change_pool, ReaderHistory* hist,
                               ReaderListener* listen)
    : RTPSReader(pimpl, guid, att, payload_pool, change_pool, hist, listen),
      acknack_count_(0U),
      nackfrag_count_(0U),
      times_(att.times),
      matched_writers_(att.matched_writers_allocation),
      matched_writers_pool_(att.matched_writers_allocation),
      proxy_changes_config_(resource_limits_from_history(hist->m_att, 0U)),
      disable_positive_acks_(att.disable_positive_acks),
      is_alive_(true) {
    // AppID = 0;
    init(pimpl, att);
}

void StatefulReader::init(RTPSParticipantImpl* pimpl, const ReaderAttributes& att) {
    const RTPSParticipantAttributes& part_att = pimpl->getRTPSParticipantAttributes();
    for (size_t n = 0U; n < att.matched_writers_allocation.initial; ++n) {
        try {
            (void)matched_writers_pool_.push_back(
                new WriterProxy(this, part_att.allocation.locators, proxy_changes_config_));
        } catch (const foonathan::memory::out_of_memory& e) {  //LCOV_EXCL_START
            (void)e;

            logWarning(RTPS_READER, std::string("catch out_of_memory exception:") << e.what());
        }  //LCOV_EXCL_STOP
    }
    mp_history->change_removed_by_history =
        std::bind(&StatefulReader::change_removed_by_history, this, std::placeholders::_1);
}

bool StatefulReader::matched_writer_add(WriterProxyData& wdata) {
    assert(wdata.guid() != c_Guid_Unknown);
    ReaderListener* listener = nullptr;
    WriterProxy* wp = nullptr;

    {
        std::unique_lock<RecursiveTimedMutex> guard(mp_mutex);

        if (!is_alive_) {
            return false;
        }

        listener = mp_listener;
#ifdef INTRA_PROCESS_ENABLE
        bool is_same_process = RTPSDomainImpl::should_intraprocess_between(m_guid, wdata.guid());
#endif
        for (WriterProxy* it : matched_writers_) {
            if (it->guid() == wdata.guid()) {
                logDebug(RTPS_READER, "Attempting to add existing writer " << wdata.guid() << ", updating information");
                // If Ownership strength changes then update all history instances.
                if ((EXCLUSIVE_OWNERSHIP_QOS == m_att.ownershipKind) &&
                    (it->ownership_strength() != wdata.m_qos.m_ownershipStrength.value)) {
                    mp_history->writer_update_its_ownership_strength(it->guid(), wdata.m_qos.m_ownershipStrength.value);
                }

                if (nullptr != listener) {
                    // call the listener without the lock taken
                    guard.unlock();

                    vbs::RemoteEndpointInfo winfo;
                    winfo.guid(wdata.guid());
                    winfo.topicName(wdata.topicName());
                    winfo.typeName(wdata.typeName());
                    winfo.topicKind(wdata.topicKind());
                    winfo.reliabilityKind(ReliabilityKind_t::RELIABLE);
                    winfo.host_id(wdata.get_host_id());
                    winfo.process_id(wdata.get_pid());

                    //没有持有锁
                    uint16_t local_host_id = m_guid.guidPrefix.is_guid_static() ? vbs::SystemInfo::instance().host_id()
                                                                                : m_guid.guidPrefix.get_host_id();
                    for (auto loc : wdata.remote_locators().unicast) {
                        if (loc.kind == LOCATOR_KIND_DSF &&
                            m_guid.is_on_same_host_as(local_host_id, wdata.guid(), wdata.get_host_id())) {
                            winfo.set_locator_valid(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_DSF);
                            winfo.locator(loc);
                            break;
                        } else if (loc.kind == LOCATOR_KIND_UDPv4) {
                            winfo.set_locator_valid(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDP);
                        } else if (loc.kind == LOCATOR_KIND_UDS) {
                            winfo.set_locator_valid(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDS);
                        }
                    }

                    listener->on_writer_discovery(WRITER_DISCOVERY_STATUS::CHANGED_QOS_WRITER, wdata.guid(), &winfo);
                    // Consistent with the writer filtering rules, prevent the creation of multiple senderResource reply ack
                    if (!winfo.get_matched_status() &&
                        ((!wdata.is_filtered(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDP) &&
                          winfo.locator_valid(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDP)) ||
                         (!wdata.is_filtered(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDS) &&
                          winfo.locator_valid(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDS)))) {
                        FilterWriterLocators(wdata, it);
                    }
                    it->update(wdata);
#ifdef INTRA_PROCESS_ENABLE
                    if (!is_same_process) {
#else
                    {
#endif
                        for (const Locator_t& locator : it->remote_locators_shrinked()) {
                            getRTPSParticipant()->createSenderResources(locator);
                        }
                    }

                    if (!this->m_guid.is_builtin()) {
                        elogInfo(RTPS_READER, "Remote writer locator when CHANGED_QOS_WRITER: "
                                                  << wdata.remote_locators() << " guid: " << wdata.guid());
                    }
                }
                (void)wp;
                return false;
            }
        }

        // Get a writer proxy from the inactive pool (or create a new one if necessary and allowed)
        if (matched_writers_pool_.empty()) {
            size_t max_readers = matched_writers_pool_.max_size();
            if (getMatchedWritersSize() + matched_writers_pool_.size() < max_readers) {
                const RTPSParticipantAttributes& part_att = mp_RTPSParticipant->getRTPSParticipantAttributes();
                try {
                    wp = new WriterProxy(this, part_att.allocation.locators, proxy_changes_config_);
                } catch (const foonathan::memory::out_of_memory& e) {  //LCOV_EXCL_START
                    (void)e;

                    logWarning(RTPS_READER, std::string("catch out_of_memory exception:") << e.what());
                }  //LCOV_EXCL_STOP
            } else {
                logWarning(RTPS_READER,
                           "Maximum number of reader proxies (" << max_readers << ") reached for writer " << m_guid);
                return false;
            }
        } else {
            wp = matched_writers_pool_.back();
            matched_writers_pool_.pop_back();
        }

        SequenceNumber_t initial_sequence;
        try {
            add_persistence_guid(wdata.guid(), wdata.persistence_guid());
        } catch (const foonathan::memory::out_of_memory& e) {  //LCOV_EXCL_START
            (void)e;

            logWarning(RTPS_READER, std::string("catch out_of_memory exception:") << e.what());
        } catch (const foonathan::memory::bad_alignment& e) {
            (void)e;

            logWarning(RTPS_READER, std::string("catch bad_alignment exception:") << e.what());
        } catch (const std::exception& e) {
            (void)e;

            logWarning(RTPS_READER, std::string("catch other exception:") << e.what());
        }  //LCOV_EXCL_STOP
        initial_sequence = get_last_notified(wdata.guid());

        wp->start(wdata, initial_sequence);
#ifdef INTRA_PROCESS_ENABLE
        if (!is_same_process) {
#else
        {
#endif
            for (const Locator_t& locator : wp->remote_locators_shrinked()) {
                getRTPSParticipant()->createSenderResources(locator);
            }
        }

        (void)matched_writers_.push_back(wp);
        logDebug(RTPS_READER, "Writer Proxy " << wp->guid() << " added to " << m_guid.entityId);
    }
    if (liveliness_lease_duration_ < c_TimeInfinite) {
        auto wlp = this->mp_RTPSParticipant->wlp();
        if (wlp != nullptr) {
            (void)wlp->sub_liveliness_manager_->add_writer(wdata.guid(), liveliness_kind_, liveliness_lease_duration_);
        } else {
            elogError(RTPS_LIVELINESS, RetCode_t::RETCODE_NOT_ENABLED,
                      "Finite liveliness lease duration but WLP not enabled, cannot add writer");
        }
    }

    if (nullptr != listener) {
        vbs::RemoteEndpointInfo winfo;
        winfo.guid(wdata.guid());
        winfo.topicName(wdata.topicName());
        winfo.typeName(wdata.typeName());
        winfo.topicKind(wdata.topicKind());
        winfo.host_id(wdata.get_host_id());
        winfo.process_id(wdata.get_pid());

        if (wdata.m_qos.m_reliability.kind == ReliabilityQosPolicyKind::BEST_EFFORT_RELIABILITY_QOS) {
            winfo.reliabilityKind(ReliabilityKind_t::BEST_EFFORT);
        } else {
            winfo.reliabilityKind(ReliabilityKind_t::RELIABLE);
        }
        for (auto loc : wdata.remote_locators().unicast) {
            if (loc.kind == LOCATOR_KIND_DSF) {
                winfo.set_locator_valid(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_DSF);
                winfo.locator(loc);
            } else if (loc.kind == LOCATOR_KIND_UDPv4) {
                winfo.set_locator_valid(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDP);
            } else if (loc.kind == LOCATOR_KIND_UDS) {
                winfo.set_locator_valid(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDS);
            }
        }

        //没有持有锁
        listener->on_writer_discovery(WRITER_DISCOVERY_STATUS::DISCOVERED_WRITER, wdata.guid(), &winfo);

        //补充转换和过滤udp 或 uds locator
        if ((!wdata.is_filtered(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDP) &&
             winfo.locator_valid(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDP)) ||
            (!wdata.is_filtered(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDS) &&
             winfo.locator_valid(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDS))) {
            FilterWriterLocators(wdata, wp);
            wp->update(wdata);
            for (const Locator_t& locator : wp->remote_locators_shrinked()) {
                getRTPSParticipant()->createSenderResources(locator);
            }
        }

        if (!this->m_guid.is_builtin()) {
            elogInfo(RTPS_READER, "Remote writer locator when DISCOVERED_WRITER: " << wdata.remote_locators()
                                                                                   << " guid: " << wdata.guid());
        }
    }

    return true;
}

bool StatefulReader::matched_writer_remove(const GUID_t& writer_guid, bool removed_by_lease) {
    if (is_alive_ && liveliness_lease_duration_ < c_TimeInfinite) {
        auto wlp = this->mp_RTPSParticipant->wlp();
        if (wlp != nullptr) {
            (void)wlp->sub_liveliness_manager_->remove_writer(writer_guid, liveliness_kind_,
                                                              liveliness_lease_duration_);
        } else {
            elogError(RTPS_LIVELINESS, RetCode_t::RETCODE_NOT_ENABLED,
                      "Finite liveliness lease duration but WLP not enabled, cannot remove writer");
        }
    }

    std::unique_lock<RecursiveTimedMutex> lock(mp_mutex);
    WriterProxy* wproxy = nullptr;
    if (is_alive_) {
        // Remove cachechanges belonging to the unmatched writer
        mp_history->writer_unmatched(writer_guid, get_last_notified(writer_guid));

        for (ResourceLimitedVector<WriterProxy*>::iterator it = matched_writers_.begin(); it != matched_writers_.end();
             ++it) {
            if ((*it)->guid() == writer_guid) {
                logDebug(RTPS_READER, "Writer proxy " << writer_guid << " removed from " << m_guid.entityId);
                wproxy = *it;
                (void)matched_writers_.erase(it);

                break;
            }
        }

#if HAVE_SECURITY
        if (mp_RTPSParticipant->security_manager().authTokenAlwaysOn || mp_RTPSParticipant->is_secure()) {
            mp_RTPSParticipant->security_manager().removeRemoteAuthToken(writer_guid);
        }
#endif

        if (wproxy != nullptr) {
            remove_persistence_guid(wproxy->guid(), wproxy->persistence_guid(), removed_by_lease);

            lock.unlock();
            wproxy->stop();
            lock.lock();
            (void)matched_writers_pool_.push_back(wproxy);
            if (nullptr != mp_listener) {
                // call the listener without the lock taken
                ReaderListener* listener = mp_listener;
                lock.unlock();
                //没有持有锁
                listener->on_writer_discovery(WRITER_DISCOVERY_STATUS::REMOVED_WRITER, writer_guid, nullptr);
            }
        } else {
            logDebug(RTPS_READER,
                     "Writer Proxy " << writer_guid << " doesn't exist in reader " << this->getGuid().entityId);
        }
    }

    return (wproxy != nullptr);
}

bool StatefulReader::matched_writer_is_matched(const GUID_t& writer_guid) {
    std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
    if (is_alive_) {
        for (WriterProxy* it : matched_writers_) {
            if (it->guid() == writer_guid && it->is_alive()) {
                return true;
            }
        }
    }

    return false;
}

bool StatefulReader::findWriterProxy(const GUID_t& writerGUID, WriterProxy** WP) const {
    assert(WP);

    for (WriterProxy* it : matched_writers_) {
        if (it->guid() == writerGUID && it->is_alive()) {
            *WP = it;
            return true;
        }
    }
    return false;
}

inline void StatefulReader::assert_writer_liveliness(const GUID_t& writer) const {
    if (liveliness_lease_duration_ < c_TimeInfinite) {
        const auto wlp = this->mp_RTPSParticipant->wlp();
        if (wlp != nullptr) {
            (void)wlp->sub_liveliness_manager_->assert_liveliness(writer, liveliness_kind_, liveliness_lease_duration_);
        } else {
            elogError(RTPS_LIVELINESS, RetCode_t::RETCODE_NOT_ENABLED,
                      "Finite liveliness lease duration but WLP not enabled");
        }
    }
}

bool StatefulReader::processDataBatchMsg(CacheChange_t* change) {  //LCOV_EXCL_START
#ifdef BATCH_SEND_ENABLE
    WriterProxy* pWP = nullptr;

    assert(change);

    std::unique_lock<RecursiveTimedMutex> lock(mp_mutex);
    if (!is_alive_) {
        return false;
    }

    if (!acceptMsgFrom(change->writerGUID, &pWP)) {
        return false;
    }

    // Check if CacheChange was received or is framework data
    if ((!pWP) || (!pWP->change_was_received(change->sequenceNumber))) {
        // Always assert liveliness on scope exit
        auto assert_liveliness_lambda = [&lock, this, change](void*) {
            lock.unlock();  // Avoid deadlock with LivelinessManager.
            assert_writer_liveliness(change->writerGUID);
        };
        std::unique_ptr<void, decltype(assert_liveliness_lambda)> p {this, assert_liveliness_lambda};

        size_t unknown_missing_changes_up_to = pWP ? pWP->unknown_missing_changes_up_to(change->sequenceNumber) : 0U;
        CacheChange_t* change_to_add = nullptr;
        uint32_t sample_pos = 0U, data_pos = 0U;
        bool shouldNotify = false;
        uint32_t payload_length = 0U;
        SequenceNumber_t first_seq = change->sequenceNumber;
        SequenceNumber_t lastest_seq = first_seq;
        for (uint32_t i = 0U; i < change->batchSampleCount; i++) {
            // Ask the pool for a cache change
            if (!change_pool_->reserve_cache(change_to_add)) {
                logWarning(RTPS_MSG_IN, IDSTRING
                                            "Reached the maximum number of samples allowed by "
                                            "this reader's QoS. Rejecting change for reader: "
                                            << m_guid);
                return false;
            }
            (void)parseChangeFromBatch(change_to_add, change, sample_pos, data_pos);
            change_to_add->sequenceNumber = change->sequenceNumber + i;
            uint32_t payload_size;
            payload_size = change_to_add->serializedPayload.length;
            if (payload_size >= 4) {
                change_to_add->serializedPayload.encapsulationCompletion =
                    static_cast<uint16_t>(change_to_add->serializedPayload.data[3]);
                change_to_add->serializedPayload.data[3] = 0;
            }
            bool will_never_be_accepted = false;
            if (!mp_history->can_change_be_added_nts(change->writerGUID, change_to_add->serializedPayload.length,
                                                     unknown_missing_changes_up_to, will_never_be_accepted)) {
                if (will_never_be_accepted && pWP) {
                    (void)pWP->irrelevant_change_set(change_to_add->sequenceNumber);
                    shouldNotify = true;
                }
                lastest_seq = change_to_add->sequenceNumber;
                (void)change_pool_->release_cache(change_to_add);
                continue;
            }

            SerializedPayload_t payload = change_to_add->serializedPayload;
            if (!payload_pool_->get_payload(payload, *change_to_add)) {
                logWarning(
                    RTPS_MSG_IN,
                    IDSTRING "Problem copying CacheChange, received data is: "
                        << change->serializedPayload.length << " bytes and max size in reader " << m_guid << " is "
                        << (fixed_payload_size_ > 0 ? fixed_payload_size_ : std::numeric_limits<uint32_t>::max()));
                (void)change_pool_->release_cache(change_to_add);
                payload.data = nullptr;
                break;
            }
            payload.data = nullptr;

            // Perform reception of cache change
            if (!change_batch_received(change_to_add, pWP, unknown_missing_changes_up_to, shouldNotify)) {
                logDebug(RTPS_MSG_IN, IDSTRING change_to_add->writerGUID << " Change " << change_to_add->sequenceNumber
                                                                         << " not added to history");
                if (change_to_add->payload_owner()) {
                    (void)change_to_add->payload_owner()->release_payload(*change_to_add);
                    (void)change_pool_->release_cache(change_to_add);
                }
                break;
            }
            lastest_seq = change_to_add->sequenceNumber;
            payload_length += change_to_add->serializedPayload.length;
        }

        if (shouldNotify) {
#if defined(EDDS_METRICS)
            on_receive_throughput(ReliabilityKind_t::RELIABLE, change->writerGUID, getGuid(), payload_length);
#endif
            if (!pWP) {
                auto listener = getListener();
                if (listener != nullptr) {
                    bool notify_single = false;
                    listener->on_data_available(change->writerGUID, first_seq, lastest_seq, notify_single);
                }
            } else {
                NotifyChanges(pWP);
            }
        }
    } else {
        logDebug(RTPS_MSG_IN, IDSTRING "Change  is received not added to history");
    }
#else
    (void)change;
    elogError(RTPS_HISTORY, RetCode_t::RETCODE_NOT_ENABLED, "Compile macro BATCH_SEND_ENABLE off.");
#endif
    return true;
}  //LCOV_EXCL_STOP

bool StatefulReader::processDataMsg(CacheChange_t* change) {
    WriterProxy* pWP = nullptr;

    assert(change);

    std::unique_lock<RecursiveTimedMutex> lock(mp_mutex);
    if (!is_alive_) {
        return false;
    }

    uint32_t payload_size;
    payload_size = change->serializedPayload.length;
    if (payload_size >= 4) {
        change->serializedPayload.encapsulationCompletion = static_cast<uint16_t>(change->serializedPayload.data[3]);
        change->serializedPayload.data[3] = 0;
    }

    if (acceptMsgFrom(change->writerGUID, &pWP)) {
        auto listener = getListener();
        // Check if CacheChange was received or is framework data
        if ((!pWP) || (!pWP->change_was_received(change->sequenceNumber)) ||
            (listener && listener->isReconstructMsg(this, change))) {
            // Always assert liveliness on scope exit
            auto assert_liveliness_lambda = [&lock, this, change](void*) {
                lock.unlock();  // Avoid deadlock with LivelinessManager.
                assert_writer_liveliness(change->writerGUID);
            };
            std::unique_ptr<void, decltype(assert_liveliness_lambda)> p {this, assert_liveliness_lambda};

            size_t unknown_missing_changes_up_to =
                pWP ? pWP->unknown_missing_changes_up_to(change->sequenceNumber) : 0U;
            bool will_never_be_accepted = false;
            if (!mp_history->can_change_be_added_nts(change->writerGUID, change->serializedPayload.length,
                                                     unknown_missing_changes_up_to, will_never_be_accepted)) {
                if (will_never_be_accepted && pWP) {
                    (void)pWP->irrelevant_change_set(change->sequenceNumber);
                    NotifyChanges(pWP);
                }
                return false;
            }

            if (get_content_filter() && !change_is_relevant_for_filter(*change, m_guid, get_content_filter())) {
                if (pWP) {
                    pWP->irrelevant_change_set(change->sequenceNumber);
                    NotifyChanges(pWP);
                }
                // Change was filtered out, so there isn't anything else to do
                return true;
            }
            {
                std::unique_lock<RecursiveTimedMutex> history_lock(mp_history->getMutex());
                // Ask the pool for a cache change
                CacheChange_t* change_to_add = nullptr;
                if (!change_pool_->reserve_cache(change_to_add)) {
                    logWarning(RTPS_MSG_IN, IDSTRING  //LCOV_EXCL_START
                                                "Reached the maximum number of samples allowed by "
                                                "this reader's QoS. Rejecting change for reader: "
                                                << m_guid);
                    return false;
                }  //LCOV_EXCL_STOP

                // Copy metadata to reserved change
                change_to_add->copy_not_memcpy(change);

                // Ask payload pool to copy the payload
                IPayloadPool* payload_owner = change->payload_owner();

                if (payload_pool_->get_payload(change->serializedPayload, payload_owner, *change_to_add)) {
                    change->payload_owner(payload_owner);
                } else {
                    logWarning(  //LCOV_EXCL_START
                        RTPS_MSG_IN, IDSTRING "Problem copying CacheChange, received data is: "
                                         << change->serializedPayload.length << " bytes and max size in reader "
                                         << m_guid << " is "
                                         << ((fixed_payload_size_ > 0U) ? fixed_payload_size_
                                                                        : std::numeric_limits<uint32_t>::max()));
                    (void)change_pool_->release_cache(change_to_add);
                    return false;
                }  //LCOV_EXCL_STOP

                // Perform reception of cache change
                history_lock.unlock();
                if (!change_received(change_to_add, pWP, unknown_missing_changes_up_to)) {
                    logDebug(RTPS_MSG_IN, IDSTRING change_to_add->writerGUID << " Change "
                                                                             << change_to_add->sequenceNumber
                                                                             << " not added to history");
                    if (change_to_add->payload_owner()) {
                        history_lock.lock();
                        (void)change_to_add->payload_owner()->release_payload(*change_to_add);
                        (void)change_pool_->release_cache(change_to_add);
                        history_lock.unlock();
                    }
                    return false;
                }
            }
        } else {
            logDebug(RTPS_MSG_IN, IDSTRING "Topic " << getTopicName() << " Reader " << getGuid() << " Change "
                                                    << change->sequenceNumber << " is received not added to history");
        }

        return true;
    }

    return false;
}

bool StatefulReader::processDataFragMsg(CacheChange_t* incomingChange, uint32_t sampleSize,
                                        uint32_t fragmentStartingNum, uint16_t fragmentsInSubmessage) {
    WriterProxy* pWP = nullptr;

    assert(incomingChange);

    std::unique_lock<RecursiveTimedMutex> lock(mp_mutex);
    if (!is_alive_) {
        return false;
    }

    // TODO: see if we need manage framework fragmented DATA message
    if (acceptMsgFrom(incomingChange->writerGUID, &pWP) && pWP) {
        // Always assert liveliness on scope exit
        auto assert_liveliness_lambda = [&lock, this, incomingChange](void*) {
            lock.unlock();  // Avoid deadlock with LivelinessManager.
            assert_writer_liveliness(incomingChange->writerGUID);
        };
        std::function<void(void*)> assert_liveliness = assert_liveliness_lambda;
        std::unique_ptr<void, decltype(assert_liveliness)> p {this, assert_liveliness};

        // Check if CacheChange was received.
        if (!pWP->change_was_received(incomingChange->sequenceNumber)) {
            size_t changes_up_to = pWP->unknown_missing_changes_up_to(incomingChange->sequenceNumber);
            bool will_never_be_accepted = false;
            if (!mp_history->can_change_be_added_nts(incomingChange->writerGUID, sampleSize, changes_up_to,
                                                     will_never_be_accepted)) {
                if (will_never_be_accepted) {
                    (void)pWP->irrelevant_change_set(incomingChange->sequenceNumber);
                    NotifyChanges(pWP);
                }
                return false;
            }

            CacheChange_t* change_to_add = incomingChange;

            CacheChange_t* change_created = nullptr;
            CacheChange_t* work_change = nullptr;
            if (!mp_history->get_change(change_to_add->sequenceNumber, change_to_add->writerGUID, &work_change)) {
                // A new change should be reserved
                if (reserveCache(&work_change, sampleSize)) {
                    if (work_change->serializedPayload.max_size < sampleSize) {
                        releaseCache(work_change);
                        work_change = nullptr;
                    } else {
                        work_change->copy_not_memcpy(change_to_add);
                        work_change->serializedPayload.length = sampleSize;
                        work_change->instanceHandle.clear();
                        work_change->setFragmentSize(change_to_add->getFragmentSize(), true);
                        change_created = work_change;
                    }
                }
            }

            if (work_change != nullptr) {
                // Set the instanceHandle only when fragment number 1 is received
                if (!work_change->instanceHandle.isDefined() && fragmentStartingNum == 1) {
                    work_change->instanceHandle = change_to_add->instanceHandle;
                }

                work_change->add_fragments(change_to_add->serializedPayload, fragmentStartingNum,
                                           fragmentsInSubmessage);
            }

            // If this is the first time we have received fragments for this change, add it to
            // history
            if (change_created != nullptr) {
                if (!change_received(change_created, pWP, changes_up_to)) {
                    logDebug(RTPS_MSG_IN, IDSTRING change_created->writerGUID
                                              << " MessageReceiver not add change "
                                              << change_created->sequenceNumber.to64long());
                    releaseCache(change_created);
                    work_change = nullptr;
                }
            }

            // If change has been fully reassembled, mark as received and add notify user
            if ((work_change != nullptr) && (work_change->is_fully_assembled())) {
                edds::dds::SampleRejectedStatusKind rejection_reason;
                uint32_t payload_size;
                payload_size = work_change->serializedPayload.length;
                if (payload_size >= 4) {
                    work_change->serializedPayload.encapsulationCompletion =
                        static_cast<uint16_t>(work_change->serializedPayload.data[3]);
                    work_change->serializedPayload.data[3] = 0;
                }

                if (mp_history->completed_change(work_change, changes_up_to, rejection_reason)) {
                    (void)pWP->received_change_set(work_change->sequenceNumber);

                    // Temporarilly assign the inline qos while evaluating the data filter
                    work_change->inline_qos = std::move(incomingChange->inline_qos);
                    bool filtered_out = get_content_filter() &&
                                        !change_is_relevant_for_filter(*work_change, m_guid, get_content_filter());
                    incomingChange->inline_qos = std::move(work_change->inline_qos);
                    work_change->inline_qos.data = nullptr;
                    if (filtered_out) {
                        mp_history->remove_change(work_change);
                    }

                    NotifyChanges(pWP);
                } else {
                    bool has_to_notify = false;
                    if (edds::dds::NOT_REJECTED != rejection_reason &&
                        edds::dds::REJECTED_BY_OWERSHIP != rejection_reason) {
                        if (getListener()) {
                            lock.unlock();
                            getListener()->on_sample_rejected(rejection_reason, work_change);
                            lock.lock();
                        }

                        /* Special case: rejected by REJECTED_BY_INSTANCES_LIMIT should never be
                         * received again.
                         */
                        if (edds::dds::REJECTED_BY_INSTANCES_LIMIT == rejection_reason ||
                            edds::dds::REJECTED_BY_DEPTH_LIMIT == rejection_reason ||
                            edds::dds::REJECTED_BY_OWERSHIP == rejection_reason) {
                            (void)pWP->irrelevant_change_set(work_change->sequenceNumber);
                            has_to_notify = true;
                        }
                    }
                    if (edds::dds::REJECTED_BY_OWERSHIP != rejection_reason) {
                        std::unique_lock<RecursiveTimedMutex> history_lock(mp_history->getMutex());
                        History::const_iterator chit = mp_history->find_change_nts(work_change);
                        if (chit != mp_history->changesEnd()) {
                            (void)mp_history->remove_change_nts(chit);
                        } else {
                            elogError(RTPS_READER, RetCode_t::RETCODE_ERROR, "Change should exist but didn't find it");
                        }
                    }

                    if (has_to_notify) {
                        NotifyChanges(pWP);
                    }
                }
            }
        }
    }

    return true;
}

bool StatefulReader::processHeartbeatMsg(const GUID_t& writerGUID, uint32_t hbCount, const SequenceNumber_t& firstSN,
                                         const SequenceNumber_t& lastSN, bool finalFlag, bool livelinessFlag) {
    WriterProxy* writer = nullptr;

    std::unique_lock<RecursiveTimedMutex> lock(mp_mutex);
    if (!is_alive_) {
        return false;
    }

    if (acceptMsgFrom(writerGUID, &writer) && writer) {
        bool assert_liveliness = false;
        int32_t current_sample_lost = 0;
        if (writer->process_heartbeat(hbCount, firstSN, lastSN, finalFlag, livelinessFlag, disable_positive_acks_,
                                      assert_liveliness, current_sample_lost)) {
            (void)mp_history->remove_fragmented_changes_until(firstSN, writerGUID);

            if (0 < current_sample_lost) {
                if (getListener() != nullptr) {
                    lock.unlock();
                    getListener()->on_sample_lost(current_sample_lost, LOST_BY_HISTORY_QOS);
                    lock.lock();
                }
#if defined(EDDS_METRICS)
                on_packet_lost(ReliabilityKind_t::RELIABLE, writerGUID, getGuid(),
                               firstSN.to64long() - current_sample_lost - 1U, firstSN.to64long());
#endif
            }

            // Maybe now we have to notify user from new CacheChanges.
            NotifyChanges(writer);

            // Try to assert liveliness if requested by proxy's logic
            if (assert_liveliness) {
                if (liveliness_lease_duration_ < c_TimeInfinite) {
                    if ((liveliness_kind_ == MANUAL_BY_TOPIC_LIVELINESS_QOS) ||
                        (writer->liveliness_kind() == MANUAL_BY_TOPIC_LIVELINESS_QOS)) {
                        auto wlp = this->mp_RTPSParticipant->wlp();
                        if (wlp != nullptr) {
                            lock.unlock();  // Avoid deadlock with LivelinessManager.
                            (void)wlp->sub_liveliness_manager_->assert_liveliness(writerGUID, liveliness_kind_,
                                                                                  liveliness_lease_duration_);
                        } else {
                            elogError(RTPS_LIVELINESS, RetCode_t::RETCODE_NOT_ENABLED,
                                      "Finite liveliness lease duration but WLP not enabled");
                        }
                    }
                }
            }
        }

        return true;
    }

    return false;
}

bool StatefulReader::processGapMsg(const GUID_t& writerGUID, const SequenceNumber_t& gapStart,
                                   const SequenceNumberSet_t& gapList) {
    WriterProxy* pWP = nullptr;

    std::unique_lock<RecursiveTimedMutex> lock(mp_mutex);
    if (!is_alive_ || gapStart < SequenceNumber_t(0, 1) || gapList.base() <= gapStart) {
        return false;
    }

    if (acceptMsgFrom(writerGUID, &pWP) && pWP) {
        // TODO (Miguel C): Refactor this inside WriterProxy
        SequenceNumber_t auxSN;
        SequenceNumber_t finalSN = gapList.base();

        std::unique_lock<RecursiveTimedMutex> history_lock(mp_history->getMutex());
        History::const_iterator history_iterator = mp_history->changesBegin();
        for (auxSN = gapStart; auxSN < finalSN; auxSN++) {
            if (pWP->irrelevant_change_set(auxSN)) {
                CacheChange_t* to_remove = nullptr;
                auto ret_iterator = findCacheInFragmentedProcess(auxSN, pWP->guid(), &to_remove, history_iterator);
                if (to_remove != nullptr) {
                    // we called the History version to avoid callbacks
                    history_iterator = mp_history->History::remove_change_nts(ret_iterator);
                } else if (ret_iterator != mp_history->changesEnd()) {
                    history_iterator = ret_iterator;
                }
            }
        }

        gapList.for_each([&](SequenceNumber_t it) {
            if (pWP->irrelevant_change_set(it)) {
                CacheChange_t* to_remove = nullptr;
                auto ret_iterator = findCacheInFragmentedProcess(auxSN, pWP->guid(), &to_remove, history_iterator);
                if (to_remove != nullptr) {
                    // we called the History version to avoid callbacks
                    history_iterator = mp_history->History::remove_change_nts(ret_iterator);
                } else if (ret_iterator != mp_history->changesEnd()) {
                    history_iterator = ret_iterator;
                }
            }
        });
        history_lock.unlock();
        // Maybe now we have to notify user from new CacheChanges.
        NotifyChanges(pWP);

        return true;
    }

    return false;
}

bool StatefulReader::processE2EMsg(const E2EProfile04_t& e2eHeader) {
    this->e2e_profile04_ = e2eHeader;

    return true;
}
bool StatefulReader::acceptMsgFrom(const GUID_t& writerId, WriterProxy** wp) const {
    assert(wp != nullptr);

    for (WriterProxy* it : matched_writers_) {
        if (it->guid() == writerId && it->is_alive()) {
            *wp = it;
            return true;
        }
    }

    // Check if it's a framework's one. In this case, m_acceptMessagesFromUnkownWriters
    // is an enabler for the trusted entity comparison
    if (m_acceptMessagesFromUnkownWriters && (writerId.entityId == m_trustedWriterEntityId)) {
        *wp = nullptr;
        return true;
    }
    logDebug(RTPS_READER, "Find matched writer fail " << writerId);
    return false;
}

bool StatefulReader::change_removed_by_history(const CacheChange_t* const change) {
    const std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);

    if (is_alive_) {
        if (change->is_fully_assembled()) {
            if ((!change->isRead) && (get_last_notified(change->writerGUID) >= change->sequenceNumber)) {
                if (0U < mp_history->total_unread_) {
                    --mp_history->total_unread_;
                }
            }
        } else {
            WriterProxy* proxy;
            if (!findWriterProxy(change->writerGUID, &proxy)) {
                return false;
            }
            (void)proxy->irrelevant_change_set(change->sequenceNumber);
        }

        return true;
    }

    return false;
}

bool StatefulReader::change_received(CacheChange_t* a_change, WriterProxy* prox, size_t unknown_missing_changes_up_to) {
    // First look for WriterProxy in case is not provided
    auto listener = getListener();
    if (prox == nullptr || (listener && listener->isReconstructMsg(this, a_change))) {
        if (!findWriterProxy(a_change->writerGUID, &prox) || (listener && listener->isReconstructMsg(this, a_change))) {
            // discard non framework messages from unknown writer
            if (a_change->writerGUID.entityId != m_trustedWriterEntityId) {
                elogError(RTPS_MSG_IN, RetCode_t::RETCODE_ERROR,
                          "Writer Proxy " << a_change->writerGUID << " not matched to this Reader " << m_guid.entityId);
                return false;
            } else if (a_change->kind != vbs::common::ChangeKind_t::ALIVE) {
                elogErrorKeyT(RTPS_MSG_IN, RetCode_t::RETCODE_ERROR, getTopicName(),
                              "Not alive change " << a_change->writerGUID << " has not WriterProxy");
                return false;
            } else {
                // handle framework messages in a stateless fashion
                // Only make visible the change if there is not other with bigger sequence number.
                if (get_last_notified(a_change->writerGUID) < a_change->sequenceNumber ||
                    (listener && listener->isReconstructMsg(this, a_change))) {
                    bool update = builtin ? false : true;
                    if (mp_history->received_change(a_change, 0U, update)) {
                        Time_t::now(a_change->reader_info.receptionTimestamp);
                        // If we use the real a_change->sequenceNumber no DATA(p) with a lower one
                        // will ever be received. That happens because the WriterProxy created when
                        // the listener matches the PDP endpoints is initialized using this
                        // SequenceNumber_t. Note that on a SERVER the own DATA(p) may be in any
                        // position within the WriterHistory preventing effective data exchange.
                        (void)update_last_notified(a_change->writerGUID, SequenceNumber_t(0, 1U));
                        if (listener != nullptr) {
                            bool notify_single = false;
                            auto guid = a_change->writerGUID;
                            auto seq = a_change->sequenceNumber;
                            mp_mutex.unlock();
                            listener->on_data_available(guid, seq, seq, notify_single);
                            mp_mutex.lock();
                            if (notify_single) {
                                listener->onNewCacheChangeAdded(this, a_change);
                            }
                            if (findWriterProxy(guid, &prox) && prox) {
                                (void)prox->received_change_set(seq);
                            }
                        }

                        return true;
                    }
                }

                logDebug(RTPS_READER, "Change received from "
                                          << a_change->writerGUID
                                          << " with sequence number: " << a_change->sequenceNumber
                                          << " skipped. Higher sequence numbers have been received.");
                return false;
            }
        } else {
            unknown_missing_changes_up_to = prox->unknown_missing_changes_up_to(a_change->sequenceNumber);
        }
    }

    // Update Ownership strength.
    if (EXCLUSIVE_OWNERSHIP_QOS == m_att.ownershipKind) {
        a_change->reader_info.writer_ownership_strength = prox->ownership_strength();
    } else {
        a_change->reader_info.writer_ownership_strength = std::numeric_limits<uint32_t>::max();
    }

    if (enable_e2e_protection_) {
        a_change->e2e_header.counter_ = a_change->sequenceNumber;
        this->e2e_profile04_.counter_ = a_change->sequenceNumber;
        this->e2e_profile04_.crc_ = a_change->e2e_header.crc_;
        this->e2e_profile04_.length_ = a_change->e2e_header.length_;
    }

    // NOTE: Depending on QoS settings, one change can be removed from history
    // inside the call to mp_history->received_change
    edds::dds::SampleRejectedStatusKind rejection_reason;
    Time_t::now(a_change->reader_info.receptionTimestamp);
    bool update = builtin ? false : true;
    if (!a_change->is_fully_assembled()) {
        update = false;
    }
    if (mp_history->received_change(a_change, unknown_missing_changes_up_to, rejection_reason, update)) {
        auto payload_length = a_change->serializedPayload.length;

        if (a_change->is_fully_assembled()) {
            (void)prox->received_change_set(a_change->sequenceNumber);
        } else {
            /* Search if the first fragment was stored, because it may have been discarded due to
             * being older and KEEP_LAST policy. In this case this samples should be set as
             * irrelevant.
             */
            if (mp_history->changesEnd() == mp_history->find_change(a_change)) {
                (void)prox->irrelevant_change_set(a_change->sequenceNumber);
            }
        }
#if defined(EDDS_METRICS)
        // statistics callback
        on_receive_throughput(ReliabilityKind_t::RELIABLE, a_change->writerGUID, getGuid(), payload_length);
#else
        (void)payload_length;
#endif
#if !defined(_WIN32)
        usdt_in_stateful_reader(a_change, getTopicName(), getTracePointStatus(), getRTPSParticipant()->get_domain_id());
#endif
        // WARNING! This method could destroy a_change
        NotifyChanges(prox);
        return true;
    } else {
        if (edds::dds::NOT_REJECTED != rejection_reason) {
            if (edds::dds::REJECTED_BY_DEPTH_LIMIT != rejection_reason &&
                edds::dds::REJECTED_BY_OWERSHIP != rejection_reason) {
                if (getListener() && (a_change->is_fully_assembled() || (a_change->contains_first_fragment()))) {
                    mp_mutex.unlock();
                    getListener()->on_sample_rejected(rejection_reason, a_change);
                    mp_mutex.lock();
                }
            }

            /* Special case: rejected by REJECTED_BY_INSTANCES_LIMIT should never be received again.
             */
            if (edds::dds::REJECTED_BY_INSTANCES_LIMIT == rejection_reason ||
                edds::dds::REJECTED_BY_DEPTH_LIMIT == rejection_reason ||
                edds::dds::REJECTED_BY_OWERSHIP == rejection_reason) {
                (void)prox->irrelevant_change_set(a_change->sequenceNumber);
                NotifyChanges(prox);
            }
        }
    }

    return false;
}

#ifdef BATCH_SEND_ENABLE
bool StatefulReader::change_batch_received(CacheChange_t* a_change, WriterProxy* prox,
                                           size_t unknown_missing_changes_up_to, bool& shouldNotify) {
    // First look for WriterProxy in case is not provided
    if (prox == nullptr) {
        if (!findWriterProxy(a_change->writerGUID, &prox)) {
            // discard non framework messages from unknown writer
            if (a_change->writerGUID.entityId != m_trustedWriterEntityId) {
                logDebug(RTPS_READER,
                         "Writer Proxy " << a_change->writerGUID << " not matched to this Reader " << m_guid.entityId);
                return false;
            } else if (a_change->kind != vbs::common::ChangeKind_t::ALIVE) {
                logDebug(RTPS_READER, "Not alive change " << a_change->writerGUID << " has not WriterProxy");
                return false;
            } else {
                // handle framework messages in a stateless fashion
                // Only make visible the change if there is not other with bigger sequence number.
                if (get_last_notified(a_change->writerGUID) < a_change->sequenceNumber) {
                    Time_t::now(a_change->reader_info.receptionTimestamp);
                    if (mp_history->received_change(a_change, 0U)) {

                        // If we use the real a_change->sequenceNumber no DATA(p) with a lower one
                        // will ever be received. That happens because the WriterProxy created when
                        // the listener matches the PDP endpoints is initialized using this
                        // SequenceNumber_t. Note that on a SERVER the own DATA(p) may be in any
                        // position within the WriterHistory preventing effective data exchange.
                        (void)update_last_notified(a_change->writerGUID, SequenceNumber_t(0, 1U));
                        shouldNotify = true;
                        return true;
                    }
                }

                logDebug(RTPS_READER, "Change received from "
                                          << a_change->writerGUID
                                          << " with sequence number: " << a_change->sequenceNumber
                                          << " skipped. Higher sequence numbers have been received.");
                return false;
            }
        } else {
            unknown_missing_changes_up_to = prox->unknown_missing_changes_up_to(a_change->sequenceNumber);
        }
    }

    // Update Ownership strength.
    if (EXCLUSIVE_OWNERSHIP_QOS == m_att.ownershipKind) {
        a_change->reader_info.writer_ownership_strength = prox->ownership_strength();
    } else {
        a_change->reader_info.writer_ownership_strength = std::numeric_limits<uint32_t>::max();
    }

    // NOTE: Depending on QoS settings, one change can be removed from history
    // inside the call to mp_history->received_change
    edds::dds::SampleRejectedStatusKind rejection_reason;
    Time_t::now(a_change->reader_info.receptionTimestamp);
    if (mp_history->received_change(a_change, unknown_missing_changes_up_to, rejection_reason)) {
        (void)prox->received_change_set(a_change->sequenceNumber);
        shouldNotify = true;
        return true;
    } else {
        if (edds::dds::NOT_REJECTED != rejection_reason) {
            if (getListener() && (a_change->is_fully_assembled() || (a_change->contains_first_fragment()))) {
                getListener()->on_sample_rejected(rejection_reason, a_change);
            }

            /* Special case: rejected by REJECTED_BY_INSTANCES_LIMIT should never be received again.
             */
            if (edds::dds::REJECTED_BY_INSTANCES_LIMIT == rejection_reason) {
                (void)prox->irrelevant_change_set(a_change->sequenceNumber);
                shouldNotify = true;
            }
        }
    }
    return false;
}
#endif

void StatefulReader::NotifyChanges(WriterProxy* prox) {
    CacheChange_t* aux_ch = nullptr;
    GUID_t proxGUID = prox->guid();
    SequenceNumber_t max_seq = prox->available_changes_max();
    SequenceNumber_t first_seq = prox->next_cache_change_to_be_notified();

    bool new_data_available = false;
    // Update state before notifying
    (void)update_last_notified(proxGUID, max_seq);

    std::unique_lock<RecursiveTimedMutex> history_lock(mp_history->getMutex());
    History::const_iterator it = mp_history->changesBegin();
    SequenceNumber_t next_seq = first_seq;

    if (this->m_guid.is_builtin() && prox->get_reliability_kind() == BEST_EFFORT_RELIABILITY_QOS &&
        it != mp_history->changesEnd()) {  // VBS-MVBS命令行兼容性适配
        // 这里默认收到的包是reliable模式的，但是buildin wirter如果是besteffort，报文也能从这里进来, 每个包都上报，不等待连续包
        aux_ch = *it;
        assert(false == aux_ch->isRead);
        auto listener = getListener();
        if (nullptr != listener) {
            aux_ch->bCanRead = true;
            history_lock.unlock();
            listener->onNewCacheChangeAdded(this, aux_ch);
            history_lock.lock();
            new_notification_cv_.notify_all();
        }
        return;
    }

    while (next_seq != c_SequenceNumber_Unknown &&
           mp_history->changesEnd() != (it = mp_history->get_change_nts(next_seq, proxGUID, &aux_ch, it)) &&
           (*it)->sequenceNumber <= max_seq) {
        aux_ch = *it;
        assert(false == aux_ch->isRead);
        new_data_available = true;
        mp_history->total_unread_increase_nts();

        if (enable_e2e_protection_) {
            auto listener = getListener();
            E2EProfile04_t e2eHeader = aux_ch->e2e_header;
            edds::dds::E2EExceptionStatus e2e_status;
            std::string guidPrefixStr(reinterpret_cast<char*>(aux_ch->writerGUID.guidPrefix.value), 12);
            std::string entityIdStr(reinterpret_cast<char*>(aux_ch->writerGUID.entityId.value), 4);
            std::string guidStr = guidPrefixStr + entityIdStr;
            SequenceNumber_t counter = counter_map_[guidStr];
            bool isNotFirstReceive = isNotFirstReceive_map_[guidStr];

            aux_ch->serializedPayload.data[3] = static_cast<octet>(aux_ch->serializedPayload.encapsulationCompletion);
            unsigned int status =
                e2eCommon::doCheckP04(e2eHeader, aux_ch, &counter, e2e_p04_min_data_length_, e2e_p04_max_data_length_,
                                      e2e_p04_max_delta_counter_, isNotFirstReceive);
            counter_map_[guidStr] = counter;
            isNotFirstReceive_map_[guidStr] = isNotFirstReceive;
            aux_ch->serializedPayload.data[3] = 0;
            e2e_status.status = (E2E_StatusType)status;
            e2e_status.counter = (uint16_t)e2eHeader.counter_.to64long();
            if (e2e_status.status) {
                listener->on_e2e_exception(e2e_status);
            }
            aux_ch->e2eCounter = e2e_status.counter;
            aux_ch->e2eStatus = e2e_status.status;
        }
#if defined(EDDS_METRICS)
        // Get current timestamp
        ertps::rtps::Time_t current_time;
        ertps::rtps::Time_t::now(current_time);
        // Calc latency
        auto ns = (current_time - aux_ch->sourceTimestamp).to_ns();
        on_history_latency(ReliabilityKind_t::RELIABLE, proxGUID, getGuid(), static_cast<float>(ns));
#endif
        aux_ch->bCanRead = true;
        ++it;

        do {
            next_seq = prox->next_cache_change_to_be_notified();
        } while (next_seq != c_SequenceNumber_Unknown && next_seq <= aux_ch->sequenceNumber);
    }
    // Ensure correct state of proxy when max_seq is not present in history
    while (c_SequenceNumber_Unknown != prox->next_cache_change_to_be_notified()) {}

    // Notify listener if new data is available
    auto listener = getListener();
    if (new_data_available && (nullptr != listener)) {
        bool notify_individual = true;
        if (!builtin) {
            history_lock.unlock();
            mp_mutex.unlock();
            listener->on_data_available(proxGUID, first_seq, max_seq, notify_individual);
            mp_mutex.lock();
            history_lock.lock();
        }
        if (notify_individual) {
            it = mp_history->changesBegin();
            next_seq = first_seq;
            while (next_seq <= max_seq &&
                   mp_history->changesEnd() != (it = mp_history->get_change_nts(next_seq, proxGUID, &aux_ch, it)) &&
                   (*it)->sequenceNumber <= max_seq) {
                aux_ch = *it;
                next_seq = aux_ch->sequenceNumber + 1U;
                history_lock.unlock();
                listener->onNewCacheChangeAdded(this, aux_ch);
                history_lock.lock();

                // Reset the iterator to the beginning, since it may be invalidated inside the
                // callback
                it = mp_history->changesBegin();
            }
        }
    }
    // Notify in case someone is waiting for unread messages
    if (new_data_available) {
        new_notification_cv_.notify_all();
    }
}

bool StatefulReader::nextUntakenCache(CacheChange_t** change, WriterProxy** wpout) {
    std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
    if (!is_alive_) {
        return false;
    }

    bool takeok = false;
    WriterProxy* wp;

    std::unique_lock<RecursiveTimedMutex> history_lock(mp_history->getMutex());
    std::vector<CacheChange_t*>::iterator it = mp_history->changesBegin();
    while (it != mp_history->changesEnd()) {
        if (this->findWriterProxy((*it)->writerGUID, &wp)) {
            // TODO Revisar la comprobacion
            SequenceNumber_t seq;
            seq = wp->available_changes_max();
            if (seq < (*it)->sequenceNumber) {
                ++it;
                continue;
            }
            takeok = true;
        } else {
            logWarning(RTPS_READER, "Removing change " << (*it)->sequenceNumber << " from " << (*it)->writerGUID
                                                       << " because is no longer paired");
            it = mp_history->remove_change_nts(it);
        }

        if (takeok) {
            *change = *it;

            if (wpout != nullptr) {
                *wpout = wp;
            }

            break;
        }
    }

    return takeok;
}

bool StatefulReader::updateTimes(const ReaderTimes& ti) {
    std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
    if (is_alive_) {
        if (times_.heartbeatResponseDelay != ti.heartbeatResponseDelay) {
            times_ = ti;
            for (WriterProxy* writer : matched_writers_) {
                writer->update_heartbeat_response_interval(times_.heartbeatResponseDelay);
            }
        }
    }
    return true;
}

bool StatefulReader::is_in_clean_state() {
    std::unique_lock<RecursiveTimedMutex> lock(mp_mutex);

    if (is_alive_) {
        for (WriterProxy* wp : matched_writers_) {
            if (wp->number_of_changes_from_writer() != 0) {
                return false;
            }
        }
    }

    return true;
}

bool StatefulReader::begin_sample_access_nts(CacheChange_t* change, bool& is_future_change) {
    WriterProxy* wp = nullptr;
    const GUID_t& writer_guid = change->writerGUID;
    is_future_change = false;
    std::unique_lock<RecursiveTimedMutex> lock(mp_mutex);
    if (findWriterProxy(writer_guid, &wp)) {
        SequenceNumber_t seq;
        seq = wp->available_changes_max();
        if (seq < change->sequenceNumber) {
            is_future_change = true;
        }
    }

    return true;
}

void StatefulReader::end_sample_access_nts(CacheChange_t* change, /*WriterProxy*& wp,*/ bool mark_as_read) {
    change_read_by_user(change, nullptr, mark_as_read);
}

void StatefulReader::change_read_by_user(CacheChange_t* const change, const WriterProxy* const writer,
                                         const bool mark_as_read) {
    //assert(!writer || change->writerGUID == writer->guid());
    UNUSED_PARAMETER(writer);

    // Mark change as read
    if (mark_as_read && (!change->isRead)) {
        change->isRead = true;
        if (0U < mp_history->total_unread_) {
            --mp_history->total_unread_;
        }
    }
}

evbs::edds::dds::builtin::StatisticMatchGuids StatefulReader::get_remote_guids() {
    evbs::edds::dds::builtin::StatisticMatchGuids guids;

    std::unique_lock<RecursiveTimedMutex> guard(mp_mutex);
    for (uint32_t i = 0; i < matched_writers_.size(); i++) {
        guids.push_back(matched_writers_[i]->guid());
    }
    return guids;
}

evbs::edds::dds::builtin::StatisticProxyInfos StatefulReader::get_proxy_infos() {
    evbs::edds::dds::builtin::StatisticProxyInfos infos;

    std::unique_lock<RecursiveTimedMutex> guard(mp_mutex);
    for (uint32_t i = 0; i < matched_writers_.size(); i++) {
        evbs::edds::dds::builtin::ProxyInfo info;
        info.type = evbs::edds::dds::builtin::STATICTIC_ENTITY_WRITER;
        info.is_alive = matched_writers_[i]->is_alive();
        info.start_time = matched_writers_[i]->get_start_time();
        info.last_heartbeat_count = matched_writers_[i]->get_last_heartbeart_count();
        info.last_notified = matched_writers_[i]->get_last_notified().to64long();
        info.available_changes_max = matched_writers_[i]->available_changes_max().to64long();
        info.max_sequence_number = matched_writers_[i]->max_sequence_number_.to64long();
        info.writer_proxy_locators_entry = *matched_writers_[i]->get_locator_entry();
        infos.push_back(std::move(info));
    }
    return infos;
}

void StatefulReader::send_acknack(const WriterProxy* writer, const SequenceNumberSet_t& sns,
                                  RTPSMessageSenderInterface* sender, bool is_final) {
    std::unique_lock<RecursiveTimedMutex> lock(mp_mutex);

    if (!writer->is_alive()) {
        return;
    }

    if (writer->is_on_same_process()) {
        return;
    }

    acknack_count_++;

    RTPSMessageGroup group(getRTPSParticipant(), this, sender);
    (void)group.add_acknack(sns, static_cast<int32_t>(acknack_count_), is_final);
}

void StatefulReader::send_acknack(const WriterProxy* writer, RTPSMessageSenderInterface* sender,
                                  bool heartbeat_was_final) {
    // Protect reader
    std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
    if (!writer->is_alive()) {
        return;
    }

    SequenceNumberSet_t missing_changes = writer->missing_changes();

    try {
        RTPSMessageGroup group(getRTPSParticipant(), this, sender);
        if ((!missing_changes.empty()) || (!heartbeat_was_final)) {
            GUID_t guid = sender->remote_guids().at(0U);
            SequenceNumberSet_t sns(writer->available_changes_max() + 1U);
            {
                std::unique_lock<RecursiveTimedMutex> history_lock(mp_history->getMutex());
                History::const_iterator history_iterator = mp_history->changesBegin();
                missing_changes.for_each([&](const SequenceNumber_t& seq) {
                    // Check if the CacheChange_t is uncompleted.
                    CacheChange_t* uncomplete_change = nullptr;
                    auto ret_iterator = findCacheInFragmentedProcess(seq, guid, &uncomplete_change, history_iterator);
                    if (ret_iterator != mp_history->changesEnd()) {
                        history_iterator = ret_iterator;
                    }
                    if (uncomplete_change == nullptr) {
                        if (!sns.add(seq)) {
                            logDebug(RTPS_READER,
                                     "Sequence number "
                                         << seq << " exceeded bitmap limit of AckNack. SeqNumSet Base: " << sns.base());
                        }
                    } else {
                        history_lock.unlock();
                        FragmentNumberSet_t frag_sns;
                        uncomplete_change->get_missing_fragments(frag_sns);
                        ++nackfrag_count_;
                        logDebug(RTPS_READER, "Sending NACKFRAG for sample" << seq << ": " << frag_sns;);
                        (void)group.add_nackfrag(seq, frag_sns, static_cast<int32_t>(nackfrag_count_));
                        history_lock.lock();
                    }
                });
            }

            acknack_count_++;

            bool final = sns.empty();
            (void)group.add_acknack(sns, acknack_count_, final);
        }
    } catch (const RTPSMessageGroup::timeout&) {  //LCOV_EXCL_START
        elogError(RTPS_READER, RetCode_t::RETCODE_TIMEOUT, "Topic " << getTopicName() << " max blocking time reached");
    }  //LCOV_EXCL_STOP
}

bool StatefulReader::send_sync_nts(CDRMessage_t* message, const Locators& locators_begin,
                                   const Locators& locators_end) {

    return mp_RTPSParticipant->sendSync(message, m_guid, locators_begin, locators_end, this->getSendMulti());
}

void StatefulReader::FilterWriterLocators(WriterProxyData& wdata, WriterProxy* wp) {
    LocatorList tmp_unicast;
    for (auto loc : wdata.remote_locators().unicast) {
        Locator_t temp_locator;
        vbs::RemoteEndpointInfo::LocatorType locator_type;
        if (loc.kind == LOCATOR_KIND_UDS) {
            locator_type = vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDS;
        } else if (loc.kind == LOCATOR_KIND_UDPv4) {
            locator_type = vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDP;
        } else if (loc.kind == LOCATOR_KIND_DSF) {
            tmp_unicast.push_back(loc);
            continue;
        }
        if ((this->m_guid.is_builtin() || mp_RTPSParticipant->registed_user_transport(locator_type)) &&
            mp_RTPSParticipant->network_factory()->transform_remote_locator(mp_RTPSParticipant->get_transport_name(),
                                                                            loc, temp_locator) &&
            mp_RTPSParticipant->is_valid_remote_locator(loc)) {
            tmp_unicast.push_back(temp_locator);
        }
    }
    wdata.set_announced_unicast_locators(tmp_unicast);
}
