// Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ------------------------------------------------------------------
// Modification history:
// feature: listener code decoupling
// feature: change E2E packet format
// feature: add SetTrustRemoteEndpoint API to dispatcherReader and add locator to dispatcherWriter
// feature: DServer support simple entity
// feature: support auth token
// feature: RemoteEndpointInfo support reliabilityKind
// feature: develop dispatcher
// feature: resolve andriod compile problem
// feature: Split history as an independent common module
// feature: interval report metrics
// feature: another method of counting packet lost
// feature: destination order support by reception timestamp
// feature: E2E New Requirement Development
// feature: Development of new requirements for E2E functionality
// ------------------------------------------------------------------

#include <edds/rtps/reader/StatelessReader.h>
#include <history/RemoteEndpointInfo.h>
#include <history/ReaderHistory.h>
#include <history/ReaderListener.h>
#include <elog/log/Log.h>
#include <history/CacheChange.h>
#include <edds/rtps/builtin/BuiltinProtocols.h>
#include <edds/rtps/builtin/liveliness/WLP.h>
#include <edds/rtps/writer/LivelinessManager.h>
#include <rtps/participant/RTPSParticipantImpl.h>
#include <rtps/reader/reader_utils.hpp>

#include <mutex>
#include <thread>

#include <cassert>
#include <map>

#include <deps/core/status/E2EExceptionStatus.hpp>

#include "rtps/RTPSDomainImpl.hpp"

#define IDSTRING "(ID:" << std::this_thread::get_id() << ") " <<

using namespace evbs::ertps::rtps;
using namespace vbs::common;

#if !defined(_WIN32)

#include <StaticTracepoint.h>
#include <tracepoint/TracePointManager.hpp>
#include <deps/common/SystemInfo.hpp>

#define SEC(name) __attribute__((section(name), used))
unsigned short ertps_ertps_less_reader_udp_recv_semaphore SEC(".probes");

void usdt_in_stateless_reader(const CacheChange_t* change, const std::string& topic_name, uint8_t trace_point_status,
                              uint32_t domain_id) {
    auto persistent_trace_point_status =
        vbsutil::tracepoint::TracePointManager::get_persistent_trace_point_status(topic_name);
    if (persistent_trace_point_status != vbsutil::tracepoint::INVALID &&
        persistent_trace_point_status > trace_point_status) {
        trace_point_status = persistent_trace_point_status;
    }
    if ((trace_point_status == vbsutil::tracepoint::ON_ENTRANCE ||
         trace_point_status == vbsutil::tracepoint::ON_EXPORT) &&
        ertps_ertps_less_reader_udp_recv_semaphore) {
        logDebug(RTPS_READER, "Trace point status is OPEN");
        uint64_t seq_num = change->sequenceNumber.to64long();
        octet* serialized_data = change->serializedPayload.data;
        uint32_t data_size = change->serializedPayload.length;
        uint32_t vbs_version =
            (static_cast<uint32_t>(c_Version_vbs[0]) | (static_cast<uint32_t>(c_Version_vbs[1]) << 8) |
             (static_cast<uint32_t>(c_Version_vbs[2]) << 16) | (static_cast<uint32_t>(c_Version_vbs[3]) << 24));
        uint32_t src_ip = change->src_ip;
        uint32_t dst_ip = change->dst_ip;
        uint32_t mask_len = change->mask_len;
        uint64_t timestamp = std::chrono::duration_cast<std::chrono::nanoseconds>(
                                 std::chrono::high_resolution_clock::now().time_since_epoch())
                                 .count();
        vbsutil::tracepoint::UsdtInfo usdt_info = {seq_num, timestamp, vbs_version, domain_id,
                                                   src_ip,  dst_ip,    mask_len,    vbsutil::tracepoint::ROLE_SUB};

        unsigned char* usdt_msg_addr = reinterpret_cast<unsigned char*>(&usdt_info);
        FOLLY_SDT_WITH_SEMAPHORE4(ertps, ertps_less_reader_udp_recv, usdt_msg_addr, topic_name.c_str(), serialized_data,
                                  data_size);
    } else {
        logDebug(RTPS_READER, "Trace point status is NOT_OPEN");
    }
}
#endif
StatelessReader::~StatelessReader() {
    logDebug(RTPS_READER, "Removing reader " << m_guid);
}

StatelessReader::StatelessReader(RTPSParticipantImpl* pimpl, const GUID_t& guid, const ReaderAttributes& att,
                                 ReaderHistory* hist, ReaderListener* listen)
    : RTPSReader(pimpl, guid, att, hist, listen), matched_writers_(att.matched_writers_allocation) {
    // AppID = 0;
}

StatelessReader::StatelessReader(RTPSParticipantImpl* pimpl, const GUID_t& guid, const ReaderAttributes& att,
                                 const std::shared_ptr<IChangePool>& change_pool, ReaderHistory* hist,
                                 ReaderListener* listen)
    : RTPSReader(pimpl, guid, att, change_pool, hist, listen), matched_writers_(att.matched_writers_allocation) {
    // AppID = 0;
}

StatelessReader::StatelessReader(RTPSParticipantImpl* pimpl, const GUID_t& guid, const ReaderAttributes& att,
                                 const std::shared_ptr<IPayloadPool>& payload_pool, ReaderHistory* hist,
                                 ReaderListener* listen)
    : RTPSReader(pimpl, guid, att, payload_pool, hist, listen), matched_writers_(att.matched_writers_allocation) {
    // AppID = 0;
}

StatelessReader::StatelessReader(RTPSParticipantImpl* pimpl, const GUID_t& guid, const ReaderAttributes& att,
                                 const std::shared_ptr<IPayloadPool>& payload_pool,
                                 const std::shared_ptr<IChangePool>& change_pool, ReaderHistory* hist,
                                 ReaderListener* listen)
    : RTPSReader(pimpl, guid, att, payload_pool, change_pool, hist, listen),
      matched_writers_(att.matched_writers_allocation) {
    // AppID = 0;
}

bool StatelessReader::matched_writer_add(WriterProxyData& wdata) {
    ReaderListener* listener = nullptr;

    {
        std::unique_lock<RecursiveTimedMutex> guard(mp_mutex);
        listener = mp_listener;

        for (RemoteWriterInfo_t& writer : matched_writers_) {
            if (writer.guid == wdata.guid()) {
                logDebug(RTPS_READER, "Attempting to add existing writer, updating information");

                if ((EXCLUSIVE_OWNERSHIP_QOS == m_att.ownershipKind) &&
                    (writer.ownership_strength != wdata.m_qos.m_ownershipStrength.value)) {
                    mp_history->writer_update_its_ownership_strength(writer.guid,
                                                                     wdata.m_qos.m_ownershipStrength.value);
                }
                writer.ownership_strength = wdata.m_qos.m_ownershipStrength.value;

                if (nullptr != listener) {
                    // call the listener without the lock taken
                    guard.unlock();

                    vbs::RemoteEndpointInfo winfo;
                    winfo.guid(wdata.guid());
                    winfo.topicName(wdata.topicName());
                    winfo.typeName(wdata.typeName());
                    winfo.topicKind(wdata.topicKind());
                    winfo.reliabilityKind(ReliabilityKind_t::BEST_EFFORT);
                    winfo.host_id(wdata.get_host_id());
                    winfo.process_id(wdata.get_pid());

                    uint16_t local_host_id = m_guid.guidPrefix.is_guid_static() ? vbs::SystemInfo::instance().host_id()
                                                                                : m_guid.guidPrefix.get_host_id();
                    for (auto loc : wdata.remote_locators().unicast) {
                        if (loc.kind == LOCATOR_KIND_DSF &&
                            m_guid.is_on_same_host_as(local_host_id, wdata.guid(), wdata.get_host_id())) {
                            winfo.set_locator_valid(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_DSF);
                            winfo.locator(loc);
                            break;
                        } else if (loc.kind == LOCATOR_KIND_UDPv4) {
                            winfo.set_locator_valid(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDP);
                        } else if (loc.kind == LOCATOR_KIND_UDS) {
                            winfo.set_locator_valid(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDS);
                        }
                    }

                    if (!this->m_guid.is_builtin()) {
                        elogInfo(RTPS_READER, "Remote writer locator when CHANGED_QOS_WRITER: "
                                                  << wdata.remote_locators() << " guid: " << wdata.guid());
                    }
                    listener->on_writer_discovery(WRITER_DISCOVERY_STATUS::CHANGED_QOS_WRITER, wdata.guid(), &winfo);
                }
                return false;
            }
        }

        RemoteWriterInfo_t info;
        info.guid = wdata.guid();
        info.persistence_guid = wdata.persistence_guid();
        info.has_manual_topic_liveliness = (MANUAL_BY_TOPIC_LIVELINESS_QOS == wdata.m_qos.m_liveliness.kind);
        info.ownership_strength = wdata.m_qos.m_ownershipStrength.value;
        info.remote_locator_list = wdata.remote_locators();
        evbs::ertps::rtps::Time_t::now(info.start_time);

        if (matched_writers_.emplace_back(info) == nullptr) {
            logWarning(RTPS_READER, "No space to add writer " << wdata.guid() << " to reader " << m_guid);
            return false;
        }
        logDebug(RTPS_READER, "Writer " << wdata.guid() << " added to reader " << m_guid);

        add_persistence_guid(info.guid, info.persistence_guid);

        m_acceptMessagesFromUnkownWriters = false;
    }

    if (liveliness_lease_duration_ < c_TimeInfinite) {
        auto wlp = mp_RTPSParticipant->wlp();
        if (wlp != nullptr) {
            (void)wlp->sub_liveliness_manager_->add_writer(wdata.guid(), liveliness_kind_, liveliness_lease_duration_);
        } else {
            elogError(RTPS_LIVELINESS, RetCode_t::RETCODE_NOT_ENABLED,
                      "Finite liveliness lease duration but WLP not enabled");
        }
    }

    if (nullptr != listener) {
        vbs::RemoteEndpointInfo winfo;
        winfo.guid(wdata.guid());
        winfo.topicName(wdata.topicName());
        winfo.typeName(wdata.typeName());
        winfo.topicKind(wdata.topicKind());
        winfo.host_id(wdata.get_host_id());
        winfo.process_id(wdata.get_pid());

        if (wdata.m_qos.m_reliability.kind == ReliabilityQosPolicyKind::BEST_EFFORT_RELIABILITY_QOS) {
            winfo.reliabilityKind(ReliabilityKind_t::BEST_EFFORT);
        } else {
            winfo.reliabilityKind(ReliabilityKind_t::RELIABLE);
        }
        for (auto loc : wdata.remote_locators().unicast) {
            if (loc.kind == LOCATOR_KIND_DSF) {
                winfo.set_locator_valid(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_DSF);
                winfo.locator(loc);
            } else if (loc.kind == LOCATOR_KIND_UDPv4) {
                winfo.set_locator_valid(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDP);
            } else if (loc.kind == LOCATOR_KIND_UDS) {
                winfo.set_locator_valid(vbs::RemoteEndpointInfo::LocatorType::LOCATOR_TYPE_UDS);
            }
        }
        if (!this->m_guid.is_builtin()) {
            elogInfo(RTPS_READER, "Remote writer locator when DISCOVERED_WRITER: " << wdata.remote_locators()
                                                                                   << " guid: " << wdata.guid());
        }
        //没有持有锁
        listener->on_writer_discovery(WRITER_DISCOVERY_STATUS::DISCOVERED_WRITER, wdata.guid(), &winfo);
    }

    return true;
}

bool StatelessReader::matched_writer_remove(const GUID_t& writer_guid, bool removed_by_lease) {
    if (liveliness_lease_duration_ < c_TimeInfinite) {
        auto wlp = mp_RTPSParticipant->wlp();
        if (wlp != nullptr) {
            (void)wlp->sub_liveliness_manager_->remove_writer(writer_guid, liveliness_kind_,
                                                              liveliness_lease_duration_);
        } else {
            elogError(RTPS_LIVELINESS, RetCode_t::RETCODE_NOT_ENABLED,
                      "Finite liveliness lease duration but WLP not enabled, cannot remove writer");
        }
    }
    {
        std::unique_lock<RecursiveTimedMutex> guard(mp_mutex);

        // Remove cachechanges belonging to the unmatched writer
        mp_history->writer_unmatched(writer_guid, get_last_notified(writer_guid));

        ResourceLimitedVector<RemoteWriterInfo_t>::iterator it;
        for (it = matched_writers_.begin(); it != matched_writers_.end(); ++it) {
            if (it->guid == writer_guid) {
                logDebug(RTPS_READER, "Writer " << writer_guid << " removed from " << m_guid);

                remove_persistence_guid(it->guid, it->persistence_guid, removed_by_lease);
                (void)matched_writers_.erase(it);
                if (nullptr != mp_listener) {
                    // call the listener without lock
                    ReaderListener* listener = mp_listener;
                    guard.unlock();
                    //没有持有锁
                    listener->on_writer_discovery(WRITER_DISCOVERY_STATUS::REMOVED_WRITER, writer_guid, nullptr);
                }
                return true;
            }
        }
#if HAVE_SECURITY
        if (mp_RTPSParticipant->security_manager().authTokenAlwaysOn || mp_RTPSParticipant->is_secure()) {
            mp_RTPSParticipant->security_manager().removeRemoteAuthToken(writer_guid);
        }
#endif
    }
    return false;
}

bool StatelessReader::matched_writer_is_matched(const GUID_t& writer_guid) {
    std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
    if (std::any_of(matched_writers_.begin(), matched_writers_.end(),
                    [writer_guid](const RemoteWriterInfo_t& item) { return item.guid == writer_guid; })) {
        return true;
    }

    return false;
}

inline bool StatelessReader::change_received(CacheChange_t* change) {
    if (getGuid().entityId == participant_stateless_message_reader_entity_id) {
        std::ostringstream oss;
        oss << change->writerGUID << " " << getGuid();
        elogInfoKeyT(SECURITY, oss.str(),
                     "Change received handshake message from " << change->writerGUID << " to " << getGuid()
                                                               << " length " << change->serializedPayload.length);
    }
    // Only make the change visible if there is not another with a bigger sequence number.
    // TODO Revisar si no hay que incluirlo.
    auto listener = getListener();
    change->isPDPRecived = thereIsUpperRecordOf(change->writerGUID, change->sequenceNumber);
    if ((!change->isPDPRecived) || this->getGuid().entityId == ENTITYID_SPDP_BUILTIN_RTPSParticipant_READER ||
        this->getGuid().entityId == ENTITYID_P2P_BUILTIN_PARTICIPANT_STATELESS_READER ||
        (listener && listener->isReconstructMsg(this, change))) {
        // Update Ownership strength.
        if (EXCLUSIVE_OWNERSHIP_QOS == m_att.ownershipKind) {
            auto writer =
                std::find_if(matched_writers_.begin(), matched_writers_.end(),
                             [change](const RemoteWriterInfo_t& item) { return item.guid == change->writerGUID; });
            if (matched_writers_.end() == writer) {
                elogErrorKeyT(RTPS_READER_HISTORY, RetCode_t::RETCODE_ERROR, getTopicName(),  //LCOV_EXCL_START
                              "Topic " << getTopicName() << " change_received cannot find matched_writers_");
                return false;
            }  //LCOV_EXCL_STOP
            change->reader_info.writer_ownership_strength = writer->ownership_strength;
        } else {
            change->reader_info.writer_ownership_strength = std::numeric_limits<uint32_t>::max();
        }

        Time_t::now(change->reader_info.receptionTimestamp);
        bool update = builtin ? false : true;
        change->bCanRead = true;
        if (mp_history->received_change(change, 0U, update)) {
            auto guid = change->writerGUID;
            auto payload_length = change->serializedPayload.length;
            auto seq = change->sequenceNumber;

            SequenceNumber_t previous_seq = update_last_notified(change->writerGUID, change->sequenceNumber);
            mp_history->total_unread_increase();
#if defined(EDDS_METRICS)
            // Get current timestamp
            ertps::rtps::Time_t current_time;
            ertps::rtps::Time_t::now(current_time);
            // Calc latency
            auto ns = (current_time - change->sourceTimestamp).to_ns();
            on_history_latency(ReliabilityKind_t::BEST_EFFORT, guid, getGuid(), static_cast<float>(ns));
#endif
            auto listener = getListener();
            if (listener != nullptr) {
                if (SequenceNumber_t {0, 0U} != previous_seq &&
                    this->getGuid().entityId != ENTITYID_SPDP_BUILTIN_RTPSParticipant_READER &&
                    this->getGuid().entityId != ENTITYID_SPDP_BUILTIN_RTPSParticipant_COMPATIBLE_READER) {
                    uint64_t tmp = (seq - previous_seq).to64long() - 1U;
                    int32_t lost_samples = (tmp > static_cast<uint64_t>(std::numeric_limits<int32_t>::max()))
                                               ? std::numeric_limits<int32_t>::max()
                                               : static_cast<int32_t>(tmp);
                    if (0 < lost_samples) {
                        mp_mutex.unlock();
                        listener->on_sample_lost(lost_samples, LOST_BY_TRANSPORT_OR_QOS);
                        mp_mutex.lock();
#if defined(EDDS_METRICS)
                        on_packet_lost(ReliabilityKind_t::BEST_EFFORT, guid, this->getGuid(), previous_seq.to64long(),
                                       seq.to64long());
#endif
                    }
                }

                if (enable_e2e_protection_) {
                    this->e2e_profile04_.counter_ = change->sequenceNumber;
                    this->e2e_profile04_.crc_ = change->e2e_header.crc_;
                    this->e2e_profile04_.length_ = change->e2e_header.length_;
                    edds::dds::E2EExceptionStatus e2e_status;
                    E2EProfile04_t e2eHeader = this->e2e_profile04_;
                    std::string guidPrefixStr(reinterpret_cast<char*>(change->writerGUID.guidPrefix.value), 12);
                    std::string entityIdStr(reinterpret_cast<char*>(change->writerGUID.entityId.value), 4);
                    std::string guidStr = guidPrefixStr + entityIdStr;
                    SequenceNumber_t counter = counter_map_[guidStr];
                    bool isNotFirstReceive = isNotFirstReceive_map_[guidStr];

                    change->serializedPayload.data[3] =
                        static_cast<octet>(change->serializedPayload.encapsulationCompletion);
                    unsigned int status =
                        e2eCommon::doCheckP04(e2eHeader, change, &counter, e2e_p04_min_data_length_,
                                              e2e_p04_max_data_length_, e2e_p04_max_delta_counter_, isNotFirstReceive);
                    counter_map_[guidStr] = counter;
                    isNotFirstReceive_map_[guidStr] = isNotFirstReceive;
                    change->serializedPayload.data[3] = 0;
                    e2e_status.status = (E2E_StatusType)status;
                    e2e_status.counter = (uint16_t)e2eHeader.counter_.to64long();
                    if (e2e_status.status) {
                        listener->on_e2e_exception(e2e_status);
                    }
                    change->e2eCounter = e2e_status.counter;
                    change->e2eStatus = e2e_status.status;
                }
#if !defined(_WIN32)
                usdt_in_stateless_reader(change, getTopicName(), getTracePointStatus(),
                                         getRTPSParticipant()->get_domain_id());
#endif
                mp_mutex.unlock();
                //通知用户listener中处理全部去掉endpoint lock。
                // WARNING! These methods could destroy the change
                bool notify_single = false;
                listener->on_data_available(guid, seq, seq, notify_single);
                mp_mutex.lock();
                if (notify_single) {
                    listener->onNewCacheChangeAdded(this, change);
                }
            }

            new_notification_cv_.notify_all();
#if defined(EDDS_METRICS)
            on_receive_throughput(ReliabilityKind_t::BEST_EFFORT, guid, getGuid(), payload_length);
#else
            (void)payload_length;
#endif
            return true;
        } else {
            mp_mutex.unlock();
            report_sample_lost(LOST_BY_SAMPLES_LIMIT);
            mp_mutex.lock();
        }
    }
    return false;
}

bool StatelessReader::nextUntakenCache(CacheChange_t** change, WriterProxy** /*wpout*/) {
    //std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
    return mp_history->get_min_change(change);
}

bool StatelessReader::begin_sample_access_nts(CacheChange_t* /*change*/, bool& is_future_change) {
    is_future_change = false;
    return true;
}

void StatelessReader::end_sample_access_nts(CacheChange_t* change, bool mark_as_read) {
    change_read_by_user(change, nullptr, mark_as_read);
}
void StatelessReader::change_read_by_user(CacheChange_t* const change, const WriterProxy* /*writer*/,
                                          const bool mark_as_read) {
    // Mark change as read
    if (mark_as_read && (!change->isRead)) {
        change->isRead = true;
        if (0U < mp_history->total_unread_) {
            --mp_history->total_unread_;
        }
    }
}

evbs::edds::dds::builtin::StatisticMatchGuids StatelessReader::get_remote_guids() {
    evbs::edds::dds::builtin::StatisticMatchGuids guids;

    std::unique_lock<RecursiveTimedMutex> guard(mp_mutex);
    for (uint32_t i = 0; i < matched_writers_.size(); i++) {
        guids.push_back(matched_writers_[i].guid);
    }
    return guids;
}

evbs::edds::dds::builtin::StatisticProxyInfos StatelessReader::get_proxy_infos() {
    evbs::edds::dds::builtin::StatisticProxyInfos infos;

    std::unique_lock<RecursiveTimedMutex> guard(mp_mutex);
    for (uint32_t i = 0; i < matched_writers_.size(); i++) {
        evbs::edds::dds::builtin::ProxyInfo info {};
        info.type = evbs::edds::dds::builtin::STATICTIC_ENTITY_WRITER;
        info.is_alive = true;
        info.start_time = matched_writers_[i].start_time.to_ns();
        info.last_heartbeat_count = 0;
        info.last_acknack_count = 0;
        info.last_nackfrag_count = 0;
        info.last_notified = get_last_notified(matched_writers_[i].guid).to64long();
        info.available_changes_max = 0;
        info.max_sequence_number = 0;
        info.writer_proxy_locators_entry.enabled = true;
        info.writer_proxy_locators_entry.remote_guid = matched_writers_[i].guid;
        info.writer_proxy_locators_entry.unicast = matched_writers_[i].remote_locator_list.unicast;
        info.writer_proxy_locators_entry.multicast = matched_writers_[i].remote_locator_list.multicast;
        infos.push_back(std::move(info));
    }
    return infos;
}

bool StatelessReader::processDataMsg(CacheChange_t* change) {
    assert(change);
    if (getGuid().entityId == participant_stateless_message_reader_entity_id &&
        change->writerGUID.entityId == participant_stateless_message_writer_entity_id) {
        std::ostringstream oss;
        oss << change->writerGUID << " " << getGuid();
        elogInfoKeyT(SECURITY, oss.str(),
                     "Process data handshake message from " << change->writerGUID << " to " << getGuid() << " length "
                                                            << change->serializedPayload.length);
    }

    std::unique_lock<RecursiveTimedMutex> lock(mp_mutex);

    uint32_t payload_size;
    payload_size = change->serializedPayload.length;
    if (payload_size >= 4) {
        change->serializedPayload.encapsulationCompletion = static_cast<uint16_t>(change->serializedPayload.data[3]);
        change->serializedPayload.data[3] = 0;
    }

    if (acceptMsgFrom(change->writerGUID, change->kind)) {
        // Always assert liveliness on scope exit
        auto assert_liveliness_lambda = [&lock, this, change](void*) {
            lock.unlock();  // Avoid deadlock with LivelinessManager.
            assert_writer_liveliness(change->writerGUID);
        };
        std::unique_ptr<void, decltype(assert_liveliness_lambda)> p {this, assert_liveliness_lambda};

        // Check rejection by history
        auto listener = getListener();
        if ((!thereIsUpperRecordOf(change->writerGUID, change->sequenceNumber)) ||
            this->getGuid().entityId == ENTITYID_SPDP_BUILTIN_RTPSParticipant_READER ||
            this->getGuid().entityId == ENTITYID_P2P_BUILTIN_PARTICIPANT_STATELESS_READER ||
            (listener && listener->isReconstructMsg(this, change))) {
            bool will_never_be_accepted = false;
            if (!mp_history->can_change_be_added_nts(change->writerGUID, change->serializedPayload.length, 0U,
                                                     will_never_be_accepted)) {
                if (will_never_be_accepted) {
                    (void)update_last_notified(change->writerGUID, change->sequenceNumber);
                }
                return false;
            }

            if (get_content_filter() && !change_is_relevant_for_filter(*change, m_guid, get_content_filter())) {
                update_last_notified(change->writerGUID, change->sequenceNumber);
                // Change was filtered out, so there isn't anything else to do
                return true;
            }
            {
                std::unique_lock<RecursiveTimedMutex> history_lock(mp_history->getMutex());
                // Ask the pool for a cache change
                CacheChange_t* change_to_add = nullptr;
                if (!change_pool_->reserve_cache(change_to_add)) {
                    report_sample_lost(LOST_BY_SAMPLES_LIMIT);  //LCOV_EXCL_START
                    logWarning(RTPS_MSG_IN, IDSTRING
                                                "Reached the maximum number of samples allowed by "
                                                "this reader's QoS. Rejecting change for reader: "
                                                << m_guid);
                    return false;
                }  //LCOV_EXCL_STOP

                // Copy metadata to reserved change
                change_to_add->copy_not_memcpy(change);

                // Ask payload pool to copy the payload
                IPayloadPool* payload_owner = change->payload_owner();

                if (payload_pool_->get_payload(change->serializedPayload, payload_owner, *change_to_add)) {
                    change->payload_owner(payload_owner);
                } else {
                    report_sample_lost(LOST_BY_OUT_OF_MEMORY);  //LCOV_EXCL_START
                    logWarning(RTPS_MSG_IN, IDSTRING "Problem copying CacheChange, received data is: "
                                                << change->serializedPayload.length << " bytes and max size in reader "
                                                << m_guid << " is "
                                                << ((fixed_payload_size_ > 0U) ? fixed_payload_size_
                                                                               : std::numeric_limits<uint32_t>::max()));
                    (void)change_pool_->release_cache(change_to_add);
                    return false;
                }  //LCOV_EXCL_STOP

                // Perform reception of cache change
                history_lock.unlock();
                if (!change_received(change_to_add)) {
                    logWarning(RTPS_MSG_IN,
                               IDSTRING "MessageReceiver not add change " << change_to_add->sequenceNumber);
                    if (change_to_add->payload_owner()) {
                        history_lock.lock();
                        (void)change_to_add->payload_owner()->release_payload(*change_to_add);
                        (void)change_pool_->release_cache(change_to_add);
                        history_lock.unlock();
                    }
                    return false;
                }
            }
        }
    }
    return true;
}

bool StatelessReader::processDataBatchMsg(CacheChange_t* change) {  //LCOV_EXCL_START
#ifdef BATCH_SEND_ENABLE
    std::unique_lock<RecursiveTimedMutex> lock(mp_mutex);

    if (!acceptMsgFrom(change->writerGUID, change->kind)) {
        return false;
    }

    // Always assert liveliness on scope exit
    auto assert_liveliness_lambda = [&lock, this, change](void*) {
        lock.unlock();  // Avoid deadlock with LivelinessManager.
        assert_writer_liveliness(change->writerGUID);
    };
    std::unique_ptr<void, decltype(assert_liveliness_lambda)> p {this, assert_liveliness_lambda};

    SequenceNumber_t batch_max_seq = change->sequenceNumber + change->batchSampleCount - 1U;

    // Check rejection by history
    if (thereIsUpperRecordOf(change->writerGUID, batch_max_seq) &&
        (this->getGuid().entityId != ENTITYID_SPDP_BUILTIN_RTPSParticipant_READER) &&
        (this->getGuid().entityId != ENTITYID_SPDP_BUILTIN_RTPSParticipant_COMPATIBLE_READER)) {
        return false;
    }

    SequenceNumber_t previous_seq = update_last_notified(change->writerGUID, batch_max_seq);

    CacheChange_t* change_to_add = nullptr;
    uint32_t payload_length = 0U;
    auto guid = change->writerGUID;
    uint32_t sample_pos = 0U, data_pos = 0U;
    SequenceNumber_t lastest_seq = change->sequenceNumber;
    SequenceNumber_t first_seq = change->sequenceNumber;
    for (uint32_t i = 0U; i < change->batchSampleCount; i++) {
        // Ask the pool for a cache change
        if (!change_pool_->reserve_cache(change_to_add)) {
            report_sample_lost(LOST_BY_SAMPLES_LIMIT, change->batchSampleCount - i);
            logWarning(RTPS_MSG_IN, IDSTRING
                                        "Reached the maximum number of samples allowed by "
                                        "this reader's QoS. Rejecting change for reader: "
                                        << m_guid);
            return false;
        }

        (void)parseChangeFromBatch(change_to_add, change, sample_pos, data_pos);

        bool will_never_be_accepted = false;
        if (!mp_history->can_change_be_added_nts(change->writerGUID, change_to_add->serializedPayload.length, 0U,
                                                 will_never_be_accepted)) {
            report_sample_lost(LOST_BY_SAMPLES_LIMIT);
            (void)change_pool_->release_cache(change_to_add);
            continue;
        }

        uint32_t payload_size;
        payload_size = change_to_add->serializedPayload.length;
        if (payload_size >= 4) {
            change_to_add->serializedPayload.encapsulationCompletion =
                static_cast<uint16_t>(change_to_add->serializedPayload.data[3]);
            change_to_add->serializedPayload.data[3] = 0;
        }

        change_to_add->sequenceNumber = change->sequenceNumber + i;
        SerializedPayload_t payload = change_to_add->serializedPayload;
        if (!payload_pool_->get_payload(payload, *change_to_add)) {
            report_sample_lost(LOST_BY_OUT_OF_MEMORY, change->batchSampleCount - i);
            logWarning(RTPS_MSG_IN,
                       IDSTRING "Problem copying CacheChange, received data is: "
                           << change->serializedPayload.length << " bytes and max size in reader " << m_guid << " is "
                           << (fixed_payload_size_ > 0 ? fixed_payload_size_ : std::numeric_limits<uint32_t>::max()));
            (void)change_pool_->release_cache(change_to_add);
            payload.data = nullptr;
            return false;
        }
        payload.data = nullptr;

        if (EXCLUSIVE_OWNERSHIP_QOS == m_att.ownershipKind) {
            auto writer = std::find_if(
                matched_writers_.begin(), matched_writers_.end(),
                [change_to_add](const RemoteWriterInfo_t& item) { return item.guid == change_to_add->writerGUID; });
            assert(matched_writers_.end() != writer);
            change_to_add->reader_info.writer_ownership_strength = writer->ownership_strength;
        } else {
            change_to_add->reader_info.writer_ownership_strength = std::numeric_limits<uint32_t>::max();
        }

        Time_t::now(change_to_add->reader_info.receptionTimestamp);
        if (mp_history->received_change(change_to_add, 0U)) {
            payload_length += change_to_add->serializedPayload.length;

            lastest_seq = change_to_add->sequenceNumber;

            ++mp_history->total_unread_;
#if defined(EDDS_METRICS)
            // Get current timestamp
            ertps::rtps::Time_t current_time;
            ertps::rtps::Time_t::now(current_time);
            // Calc latency
            auto ns = (current_time - change_to_add->sourceTimestamp).to_ns();
            on_history_latency(ReliabilityKind_t::BEST_EFFORT, guid, getGuid(), static_cast<float>(ns));
#endif
        } else {
            report_sample_lost(LOST_BY_SAMPLES_LIMIT, change->batchSampleCount - i);
            logDebug(RTPS_MSG_IN, IDSTRING "MessageReceiver not add change " << change_to_add->sequenceNumber);
            if (change_to_add->payload_owner()) {
                (void)change_to_add->payload_owner()->release_payload(*change_to_add);
                (void)change_pool_->release_cache(change_to_add);
            }
            break;
        }
    }

    if (payload_length > 0U) {
        auto listener = getListener();
        if (listener != nullptr) {
            if (SequenceNumber_t {0, 0U} != previous_seq &&
                this->getGuid().entityId != ENTITYID_SPDP_BUILTIN_RTPSParticipant_READER &&
                this->getGuid().entityId != ENTITYID_SPDP_BUILTIN_RTPSParticipant_COMPATIBLE_READER) {
                uint64_t tmp = (lastest_seq - previous_seq).to64long() - 1U;
                int32_t lost_samples = tmp > static_cast<uint64_t>(std::numeric_limits<int32_t>::max())
                                           ? std::numeric_limits<int32_t>::max()
                                           : static_cast<int32_t>(tmp);
                if (0 < lost_samples) {
                    lock.unlock();
                    listener->on_sample_lost(lost_samples, LOST_BY_TRANSPORT_OR_QOS);
                    lock.lock();
#if defined(EDDS_METRICS)
                    on_packet_lost(ReliabilityKind_t::BEST_EFFORT, guid, this->getGuid(), previous_seq.to64long(),
                                   lastest_seq.to64long());
#endif
                }
            }

            if (enable_e2e_protection_) {
                this->e2e_profile04_.counter_ = change->sequenceNumber;
                this->e2e_profile04_.crc_ = change->e2e_header.crc_;
                this->e2e_profile04_.length_ = change->e2e_header.length_;
                edds::dds::E2EExceptionStatus e2e_status;
                E2EProfile04_t e2eHeader = this->e2e_profile04_;
                std::string guidPrefixStr(reinterpret_cast<char*>(change->writerGUID.guidPrefix.value), 12);
                std::string entityIdStr(reinterpret_cast<char*>(change->writerGUID.entityId.value), 4);
                std::string guidStr = guidPrefixStr + entityIdStr;
                SequenceNumber_t counter = counter_map_[guidStr];
                bool isNotFirstReceive = isNotFirstReceive_map_[guidStr];

                change->serializedPayload.data[3] =
                    static_cast<octet>(change->serializedPayload.encapsulationCompletion);
                unsigned int status =
                    e2eCommon::doCheckP04(e2eHeader, change, &counter, e2e_p04_min_data_length_,
                                          e2e_p04_max_data_length_, e2e_p04_max_delta_counter_, isNotFirstReceive);
                counter_map_[guidStr] = counter;
                isNotFirstReceive_map_[guidStr] = isNotFirstReceive;
                change->serializedPayload.data[3] = 0;
                e2e_status.status = (E2E_StatusType)status;
                e2e_status.counter = (uint16_t)e2eHeader.counter_.to64long();
                if (e2e_status.status) {
                    listener->on_e2e_exception(e2e_status);
                }
                change->e2eCounter = e2e_status.counter;
                change->e2eStatus = e2e_status.status;
            }

            // WARNING! These methods could destroy the change
            bool notify_single = false;
            listener->on_data_available(guid, first_seq, lastest_seq, notify_single);
        }
        new_notification_cv_.notify_all();
#if defined(EDDS_METRICS)
        // statistics callback
        on_receive_throughput(ReliabilityKind_t::BEST_EFFORT, guid, getGuid(), payload_length);
#endif
    }
#else
    (void)change;
    elogError(RTPS_HISTORY, RetCode_t::RETCODE_NOT_ENABLED, "Compile macro BATCH_SEND_ENABLE off.");
#endif
    return true;
}  //LCOV_EXCL_STOP

bool StatelessReader::processDataFragMsg(CacheChange_t* incomingChange, uint32_t sampleSize,
                                         uint32_t fragmentStartingNum, uint16_t fragmentsInSubmessage) {
    assert(incomingChange);
    if (getGuid().entityId == participant_stateless_message_reader_entity_id &&
        incomingChange->writerGUID.entityId == participant_stateless_message_writer_entity_id) {
        std::ostringstream oss;
        oss << incomingChange->writerGUID << " " << getGuid();
        elogInfoKeyT(SECURITY, oss.str(),
                     "Process data handshake message(frag) from " << incomingChange->writerGUID << " to " << getGuid()
                                                                  << " length "
                                                                  << incomingChange->serializedPayload.length);
    }

    GUID_t writer_guid = incomingChange->writerGUID;

    std::unique_lock<RecursiveTimedMutex> lock(mp_mutex);
    for (RemoteWriterInfo_t& writer : matched_writers_) {
        if (writer.guid == writer_guid) {
            // Always assert liveliness on scope exit
            auto assert_liveliness_lambda = [&lock, this, &writer_guid](void*) {
                lock.unlock();  // Avoid deadlock with LivelinessManager.
                assert_writer_liveliness(writer_guid);
            };
            std::unique_ptr<void, decltype(assert_liveliness_lambda)> p {this, assert_liveliness_lambda};

            // Check if CacheChange was received.
            if (!thereIsUpperRecordOf(writer_guid, incomingChange->sequenceNumber)) {

                // Early return if we already know about a greater sequence number
                CacheChange_t* work_change = writer.fragmented_change;
                if ((work_change != nullptr) && (work_change->sequenceNumber > incomingChange->sequenceNumber)) {
                    return true;
                }

                bool will_never_be_accepted = false;
                if (!mp_history->can_change_be_added_nts(writer_guid, sampleSize, 0U, will_never_be_accepted)) {
                    if (will_never_be_accepted) {
                        (void)update_last_notified(writer_guid, incomingChange->sequenceNumber);
                    }
                    report_sample_lost(LOST_BY_OUT_OF_MEMORY);
                    return false;
                }

                CacheChange_t* change_to_add = incomingChange;

                // Check if pending fragmented change should be dropped
                if (work_change != nullptr) {
                    if (work_change->sequenceNumber < change_to_add->sequenceNumber) {
                        SequenceNumber_t updated_seq = work_change->sequenceNumber;
                        SequenceNumber_t previous_seq {0, 0};
                        previous_seq = update_last_notified(writer_guid, updated_seq);

                        // Notify lost samples
                        auto listener = getListener();
                        if (listener != nullptr) {
                            if (SequenceNumber_t {0, 0} != previous_seq && previous_seq < updated_seq) {
                                uint64_t tmp = (updated_seq - previous_seq).to64long();
                                int32_t lost_samples = tmp > static_cast<uint64_t>(std::numeric_limits<int32_t>::max())
                                                           ? std::numeric_limits<int32_t>::max()
                                                           : static_cast<int32_t>(tmp);
                                if (0 < lost_samples) {
                                    lock.unlock();
                                    listener->on_sample_lost(lost_samples, LOST_BY_TRANSPORT_OR_QOS);
                                    lock.lock();
#if defined(EDDS_METRICS)
                                    // updated_seq 分片未结束的序列号，previous_seq 上一次分片已结束的序列号，当前流程判定updated_seq序列的包丢弃
                                    uint64_t temp_seq =
                                        (updated_seq).to64long() + 1;  // updated_seq + 1 作为右边界进行丢包计算
                                    on_packet_lost(ReliabilityKind_t::BEST_EFFORT, writer_guid, this->getGuid(),
                                                   previous_seq.to64long(), temp_seq);
#endif
                                }
                            } else {
                                logWarning(RTPS_MSG_IN, IDSTRING "Reader " << m_guid << " previous seq " << previous_seq
                                                                           << " is zero or >=  fragmented change seq "
                                                                           << updated_seq << " from writer "
                                                                           << writer_guid);
                            }
                        }

                        // Pending change should be dropped. Check if it can be reused
                        if (sampleSize <= work_change->serializedPayload.max_size) {
                            // Sample fits inside pending change. Reuse it.
                            work_change->copy_not_memcpy(change_to_add);
                            work_change->serializedPayload.length = sampleSize;
                            work_change->instanceHandle.clear();
                            work_change->setFragmentSize(change_to_add->getFragmentSize(), true);
                        } else {
                            // Release change, and let it be reserved later
                            releaseCache(work_change);
                            work_change = nullptr;
                        }
                    }
                }

                // Check if a new change should be reserved
                if (work_change == nullptr) {
                    if (reserveCache(&work_change, sampleSize)) {
                        if (work_change->serializedPayload.max_size < sampleSize) {
                            releaseCache(work_change);
                            work_change = nullptr;
                            report_sample_lost(LOST_BY_OUT_OF_MEMORY);
                        } else {
                            work_change->copy_not_memcpy(change_to_add);
                            work_change->serializedPayload.length = sampleSize;
                            work_change->instanceHandle.clear();
                            work_change->setFragmentSize(change_to_add->getFragmentSize(), true);
                        }
                    }
                }

                // Process fragment and set change_completed if it is fully reassembled
                CacheChange_t* change_completed = nullptr;
                if (work_change != nullptr) {
                    // Set the instanceHandle only when fragment number 1 is received
                    if (!work_change->instanceHandle.isDefined() && fragmentStartingNum == 1) {
                        work_change->instanceHandle = change_to_add->instanceHandle;
                    }

                    if (work_change->add_fragments(change_to_add->serializedPayload, fragmentStartingNum,
                                                   fragmentsInSubmessage)) {
                        change_completed = work_change;
                        work_change = nullptr;
                    }
                }

                writer.fragmented_change = work_change;

                // If the change was completed, process it.
                if (change_completed != nullptr) {
                    uint32_t payload_size;
                    payload_size = change_completed->serializedPayload.length;
                    if (payload_size >= 4) {
                        change_completed->serializedPayload.encapsulationCompletion =
                            static_cast<uint16_t>(change_completed->serializedPayload.data[3]);
                        change_completed->serializedPayload.data[3] = 0;
                    }
                    // Temporarilly assign the inline qos while evaluating the data filter
                    change_completed->inline_qos = std::move(incomingChange->inline_qos);
                    bool filtered_out = get_content_filter() &&
                                        !change_is_relevant_for_filter(*change_completed, m_guid, get_content_filter());
                    incomingChange->inline_qos = std::move(change_completed->inline_qos);
                    change_completed->inline_qos.data = nullptr;
                    change_completed->e2e_header = incomingChange->e2e_header;
                    if (filtered_out) {
                        update_last_notified(change_completed->writerGUID, change_completed->sequenceNumber);
                        releaseCache(change_completed);
                    } else if (!change_received(change_completed)) {
                        logDebug(RTPS_MSG_IN, IDSTRING "MessageReceiver not add change "
                                                  << change_completed->sequenceNumber.to64long());
                        // Release CacheChange_t.
                        releaseCache(change_completed);
                    }
                }
            }
            return true;
        }
    }

    logWarning(RTPS_MSG_IN, IDSTRING "Reader " << m_guid << " received DATA_FRAG from unknown writer" << writer_guid);
    return true;
}

bool StatelessReader::processHeartbeatMsg(const GUID_t& /*writerGUID*/, uint32_t /*hbCount*/,
                                          const SequenceNumber_t& /*firstSN*/, const SequenceNumber_t& /*lastSN*/,
                                          bool /*finalFlag*/, bool /*livelinessFlag*/) {
    return true;
}

bool StatelessReader::processGapMsg(const GUID_t& /*writerGUID*/, const SequenceNumber_t& /*gapStart*/,
                                    const SequenceNumberSet_t& /*gapList*/) {
    return true;
}

bool StatelessReader::processE2EMsg(const E2EProfile04_t& e2eHeader) {
    this->e2e_profile04_ = e2eHeader;
    return true;
}

inline bool StatelessReader::acceptMsgFrom(const GUID_t& writerId, ChangeKind_t change_kind) {
    if (change_kind == ChangeKind_t::ALIVE) {
        if (m_acceptMessagesFromUnkownWriters) {
            return true;
        } else if (writerId.entityId == m_trustedWriterEntityId) {
            return true;
        }
    }

    bool ret = std::any_of(matched_writers_.begin(), matched_writers_.end(),
                           [&writerId](const RemoteWriterInfo_t& writer) { return writer.guid == writerId; });
    if (ret == false && writerId.entityId == participant_stateless_message_writer_entity_id) {
        std::ostringstream oss;
        oss << writerId << " " << getGuid();
        elogInfoKeyT(
            SECURITY, oss.str(),
            "Accept handshake message from " << writerId << " to " << getGuid() << " not found matched writer");
    }
    return ret;
}

inline bool StatelessReader::thereIsUpperRecordOf(const GUID_t& guid, const SequenceNumber_t& seq) {
    return get_last_notified(guid) >= seq;
}

inline void StatelessReader::assert_writer_liveliness(const GUID_t& writer) const {
    if (liveliness_lease_duration_ < c_TimeInfinite) {
        const auto wlp = mp_RTPSParticipant->wlp();
        if (wlp != nullptr) {
            (void)wlp->sub_liveliness_manager_->assert_liveliness(writer, liveliness_kind_, liveliness_lease_duration_);
        } else {
            elogError(RTPS_LIVELINESS, RetCode_t::RETCODE_NOT_ENABLED,
                      "Finite liveliness lease duration but WLP not enabled");
        }
    }
}

bool StatelessReader::writer_has_manual_liveliness(const GUID_t& guid) {
    for (const RemoteWriterInfo_t& writer : matched_writers_) {
        if (writer.guid == guid) {
            return writer.has_manual_topic_liveliness;
        }
    }
    return false;
}

inline void StatelessReader::report_sample_lost(SampleLostStatusKind statusKind, uint32_t lost_num) {
    if (getListener()) {
        getListener()->on_sample_lost(lost_num, statusKind);
    }
}
