#pragma once

#include "compi/context.h"
#include "compi/fixed_type.h"
#include "compi/frame.h"
#include "compi/serializer.h"
#include "compi/tag.h"

#include <cstddef>
#include <cstring>
#include <limits>
#include <mpi.h>
#include <stdexcept>
#include <type_traits>
#include <utility>
#include <vector>

namespace compi
{

// 点对点发送模式：默认（标准）或同步（Ssend/Issend）
enum class SendMode
{
    Default,
    Synchronous
};

// 集体/交换的同步策略：
// - Default：按现有实现选择（通常为 counts+displs 路径）
// - AlltoallCounts：先交换大小，再进行 Alltoallv 字节传输
// - NBX：轻量非阻塞交换（成对 size+payload 的 Isend/Irecv + Waitall）
enum class SyncType
{
    Default,
    AlltoallCounts,
    NBX
};

namespace collectives
{

// C++17: 检测是否支持 operator+
namespace detail
{
template <typename U, typename = void>
struct has_plus_op : std::false_type
{
};

template <typename U>
struct has_plus_op<U, std::void_t<decltype(std::declval<U>() + std::declval<U>())>> : std::true_type
{
};
}

/**
 * TLV 兼容与消息结构统一
 *
 * - 非固定类型统一采用双帧：METADATA(int payload_bytes) + 数据帧（SERIALIZED_VECTOR/RAW_BYTES）。
 * - 所有 gather/scatter/alltoall(v) 的变长路径均以帧为单位发布，NBX 路径也以帧为单位。
 * - 接口参数统一为 `Context&`；如出现 `Communicator&`，其为 `Context` 的别名，语义一致。
 * - 发送端：先构造 METADATA 帧（声明后续数据负载大小），再发送数据帧。
 * - 接收端：先接收并解析 METADATA，根据声明的大小接收数据帧，再通过 `frame::*` 解包元素。
 *
 * 多帧传输：
 * - 大型数据可分割为多个帧进行传输
 * - 每个帧独立包含完整的帧头和元素数信息
 * - 接收端按帧循环解包，直到处理完所有字节
 */

// -----------------------------
// 内部辅助：counts + displs 计算
// -----------------------------

/**
 * 检查 size_t 值是否可以安全转换为 int
 * @param size 要检查的 size_t 值
 * @return 如果可以安全转换则返回 int 值
 * @throws std::overflow_error 如果值超过 INT_MAX
 */
inline int
safe_size_to_int(size_t size)
{
    constexpr size_t max_int = static_cast<size_t>(std::numeric_limits<int>::max());
    if (size > max_int)
    {
        throw std::overflow_error("Size exceeds INT_MAX, cannot convert to int safely");
    }
    return static_cast<int>(size);
}

/**
 * 检查累加是否会导致 int 溢出
 * @param current_total 当前累计值
 * @param increment 要添加的增量
 * @return 如果溢出则抛出异常
 */
inline void
check_int_overflow_for_accumulation(MPI_Aint current_total, int increment)
{
    constexpr MPI_Aint max_int = std::numeric_limits<int>::max();
    if (current_total > max_int - increment)
    {
        throw std::overflow_error(
            "Integer overflow detected in count accumulation: total would exceed INT_MAX");
    }
}

/**
 * 安全地计算 counts 数组的总和，使用 MPI_Aint 避免溢出
 * @param counts 计数数组
 * @return 总和，如果超过 INT_MAX 则抛出异常
 * @throws std::overflow_error 如果总和超过 INT_MAX
 */
inline int
safe_sum_counts(const std::vector<int> & counts)
{
    MPI_Aint total = 0;
    for (int c : counts)
    {
        check_int_overflow_for_accumulation(total, c);
        total += c;
    }
    return static_cast<int>(total);
}

/**
 * 从 counts 数组计算 displs 数组，使用 MPI_Aint 避免溢出
 * @param counts 每个段的元素计数
 * @return displs 每个段的起始偏移
 * @throws std::overflow_error 如果累计偏移超过 INT_MAX
 */
inline std::vector<int>
make_displs_from_counts(const std::vector<int> & counts)
{
    std::vector<int> displs(counts.size(), 0);
    MPI_Aint acc = 0; // 使用 MPI_Aint 避免中间累加溢出
    for (size_t i = 0; i < counts.size(); ++i)
    {
        check_int_overflow_for_accumulation(acc, counts[i]);
        displs[i] = static_cast<int>(acc);
        acc += counts[i];
    }
    return displs;
}

// -----------------------------
// 打包/解包：packed-range 协议（使用 serializer 批量编解码）
// -----------------------------

/**
 * 将单个帧的元素打包为字节流（使用统一TLV格式）
 * @param values 要打包的元素向量
 * @return 包含帧头和序列化数据的字节向量
 */
template <typename T>
inline std::vector<std::byte>
pack_frame(const std::vector<T> & values)
{
    return frame::pack_frame(values);
}

/**
 * 将元素向量打包为单个帧（兼容性函数）
 * @param values 要打包的元素向量
 * @return 包含单个帧的字节向量
 */
// 兼容函数已移除：请直接使用 pack_frame(values)

/**
 * 从字节流中解析单个帧（使用统一帧处理器，支持TLV和旧格式）
 * @param bytes 包含帧数据的字节向量
 * @param offset 开始解析的偏移量
 * @param frame_elements 输出：解析出的元素向量
 * @return 下一个帧的偏移量，如果没有更多帧则返回bytes.size()
 */
template <typename T>
inline size_t
unpack_single_frame(const std::vector<std::byte> & bytes,
                    size_t offset,
                    std::vector<T> & frame_elements)
{
    return frame::unpack_single_frame(bytes, offset, frame_elements);
}

/**
 * 按帧循环解包字节流中的所有元素
 * @param bytes 包含一个或多个帧的字节向量
 * @return 解包出的所有元素组成的向量
 */
/**
 * 从字节流中解析所有帧并返回所有元素（使用统一帧处理器）
 * @param bytes 包含多个帧的字节向量
 * @return 所有帧中的元素组成的向量
 */
template <typename T>
inline std::vector<T>
unpack_range_by_frames(const std::vector<std::byte> & bytes)
{
    return frame::unpack_range<T>(bytes);
}

/**
 * 解包字节流（兼容性函数，优先使用按帧解包）
 * @param bytes 包含序列化数据的字节向量
 * @return 解包出的元素向量
 */
/**
 * 从字节流中解析数据（使用统一帧处理器，支持TLV和旧格式）
 * @param bytes 包含数据的字节向量
 * @return 解析出的元素向量
 */
template <typename T>
inline std::vector<T>
unpack_range(const std::vector<std::byte> & bytes)
{
    return frame::unpack_range<T>(bytes);
}

/**
 * 按帧迭代解包字节流，通过回调函数处理每个帧的元素（使用统一帧处理器）
 * 这避免了一次性解包大量数据到内存中
 * @param bytes 包含一个或多个帧的字节向量
 * @param frame_handler 处理每个帧元素的回调函数
 */
template <typename T, typename FrameHandler>
inline void
unpack_range_iteratively(const std::vector<std::byte> & bytes, FrameHandler && frame_handler)
{
    frame::unpack_range_iteratively<T>(bytes, std::forward<FrameHandler>(frame_handler));
}

// -----------------------------
// counts+displs 字节级工具：仅 root 收集（Gatherv）与所有进程收集（Allgatherv）
// -----------------------------
/**
 * 收集所有进程的字节数据到 root 进程
 * @param comm 通信器
 * @param root 根进程 rank
 * @param local_bytes 本地字节数据
 * @return 收集到的所有字节数据（仅在 root 进程有效）
 * @throws std::overflow_error 如果本地数据大小或总大小超过 INT_MAX
 */
inline std::vector<std::byte>
gatherv_bytes(Context & comm, int root, const std::vector<std::byte> & local_bytes)
{
    int world_size = comm.size();
    int rank = comm.rank();
    int n = safe_size_to_int(local_bytes.size()); // 安全转换
    std::vector<int> counts(world_size, 0);
    {
        const int rc = MPI_Gather(&n, 1, MPI_INT, counts.data(), 1, MPI_INT, root, comm.comm());
        (void)call_mpi(rc, "MPI_Gather failed (gatherv_bytes counts)", comm.comm());
    }
    std::vector<std::byte> out;
    std::vector<int> displs;
    if (rank == root)
    {
        displs = make_displs_from_counts(counts);
        int total = safe_sum_counts(counts); // 安全计算总和
        out.resize(total);
    }
    {
        const int rc = MPI_Gatherv(local_bytes.data(),
                                   n,
                                   MPI_BYTE,
                                   rank == root ? out.data() : nullptr,
                                   rank == root ? counts.data() : nullptr,
                                   rank == root ? displs.data() : nullptr,
                                   MPI_BYTE,
                                   root,
                                   comm.comm());
        (void)call_mpi(rc, "MPI_Gatherv failed (gatherv_bytes data)", comm.comm());
    }
    // 切换为双帧输出：METADATA(size:int) + RAW_BYTES(segment)
    if (rank != root)
        return {};
    std::vector<std::byte> frames;
    frames.reserve(static_cast<std::size_t>(out.size()) +
                   world_size * (frame::TLV_FRAME_HEADER_SIZE * 2));
    for (int i = 0; i < world_size; ++i)
    {
        int sz = counts[i];
        auto meta_payload = serialize_to_byte_vector(sz);
        frame::FrameBuilder meta_builder(
            frame::DataType::METADATA, frame::Flags::NONE, std::move(meta_payload));
        auto meta_frame = meta_builder.to_bytes();
        frames.insert(frames.end(), meta_frame.begin(), meta_frame.end());

        std::vector<std::byte> seg;
        if (sz > 0)
        {
            seg.resize(sz);
            std::memcpy(seg.data(), out.data() + displs[i], static_cast<std::size_t>(sz));
        }
        frame::FrameBuilder raw_builder(
            frame::DataType::RAW_BYTES, frame::Flags::NONE, std::move(seg));
        auto raw_frame = raw_builder.to_bytes();
        frames.insert(frames.end(), raw_frame.begin(), raw_frame.end());
    }
    return frames;
}

// -----------------------------
// broadcast: 单值广播（固定类型或序列化）
// -----------------------------
template <typename T>
inline void
broadcast(Context & comm, int root, T & value)
{
    if constexpr (types::is_fixed_type_v<T>)
    {
        MPI_Datatype dt = types::mpi_datatype_of<T>();
        const int rc = MPI_Bcast(&value, 1, dt, root, comm.comm());
        (void)call_mpi(rc, "MPI_Bcast failed (broadcast fixed)", comm.comm());
    }
    else
    {
        int rank = comm.rank();
        std::vector<std::byte> bytes;
        if (rank == root)
        {
            bytes = serialize_to_byte_vector(value);
        }
        int n = static_cast<int>(bytes.size());
        {
            const int rc = MPI_Bcast(&n, 1, MPI_INT, root, comm.comm());
            (void)call_mpi(rc, "MPI_Bcast failed (broadcast varlen size)", comm.comm());
        }
        bytes.resize(n);
        if (n > 0)
        {
            const int rc = MPI_Bcast(bytes.data(), n, MPI_BYTE, root, comm.comm());
            (void)call_mpi(rc, "MPI_Bcast failed (broadcast varlen data)", comm.comm());
        }
        if (rank != root)
        {
            value = deserialize<T>(bytes);
        }
    }
}

// -----------------------------
// gather: 单值收集到 root（固定类型或序列化）
// 返回：在 root 进程返回 size 元素向量；其他进程返回空向量
// -----------------------------
template <typename T>
inline std::vector<T>
gather(Context & comm, int root, const T & send_value)
{
    int world_size = comm.size();
    int rank = comm.rank();
    if constexpr (types::is_fixed_type_v<T>)
    {
        std::vector<T> recvbuf;
        if (rank == root)
            recvbuf.resize(world_size);
        MPI_Datatype dt = types::mpi_datatype_of<T>();
        {
            const int rc =
                MPI_Gather((void *)&send_value, 1, dt, recvbuf.data(), 1, dt, root, comm.comm());
            (void)call_mpi(rc, "MPI_Gather failed (gather fixed)", comm.comm());
        }
        return recvbuf;
    }
    else
    {
        // 每个进程打包自己的值
        auto bytes = serialize_to_byte_vector(send_value);
        int n = static_cast<int>(bytes.size());
        std::vector<int> counts(world_size);
        {
            const int rc = MPI_Gather(&n, 1, MPI_INT, counts.data(), 1, MPI_INT, root, comm.comm());
            (void)call_mpi(rc, "MPI_Gather failed (gather varlen counts)", comm.comm());
        }

        std::vector<std::byte> all_bytes;
        if (rank == root)
        {
            int total = 0;
            for (int c : counts)
                total += c;
            all_bytes.resize(total);
        }
        auto displs = (rank == root) ? make_displs_from_counts(counts) : std::vector<int>{};
        {
            const int rc = MPI_Gatherv(bytes.data(),
                                       n,
                                       MPI_BYTE,
                                       rank == root ? all_bytes.data() : nullptr,
                                       rank == root ? counts.data() : nullptr,
                                       rank == root ? displs.data() : nullptr,
                                       MPI_BYTE,
                                       root,
                                       comm.comm());
            (void)call_mpi(rc, "MPI_Gatherv failed (gather varlen data)", comm.comm());
        }

        std::vector<T> result;
        if (rank == root)
        {
            // 依次解包每段
            result.reserve(world_size);
            for (int i = 0; i < world_size; ++i)
            {
                int offset = displs[i];
                int len = counts[i];
                std::vector<std::byte> chunk(len);
                if (len > 0)
                {
                    std::memcpy(chunk.data(), all_bytes.data() + offset, len);
                }
                result.emplace_back(deserialize<T>(chunk));
            }
        }
        return result;
    }
}

// -----------------------------
// scatter: root 将 size 个 T 分发给所有进程（固定或序列化）
// -----------------------------
template <typename T>
inline void
scatter(Context & comm, int root, const std::vector<T> & send_values, T & recv_value)
{
    int world_size = comm.size();
    int rank = comm.rank();
    if constexpr (types::is_fixed_type_v<T>)
    {
        MPI_Datatype dt = types::mpi_datatype_of<T>();
        {
            const int rc = MPI_Scatter(rank == root ? (void *)send_values.data() : nullptr,
                                       1,
                                       dt,
                                       &recv_value,
                                       1,
                                       dt,
                                       root,
                                       comm.comm());
            (void)call_mpi(rc, "MPI_Scatter failed (scatter fixed)", comm.comm());
        }
    }
    else
    {
        // TLV 双帧协议：先分发 METADATA(size:int) 帧，再分发数据帧（SERIALIZED_VECTOR）
        const int meta_len = frame::TLV_FRAME_HEADER_SIZE + static_cast<int>(sizeof(int));
        std::vector<std::byte> meta_recv(static_cast<std::size_t>(meta_len));
        if (rank == root)
        {
            // 为每个目标构建数据帧与对应的元信息帧
            std::vector<int> data_counts(world_size, 0);
            std::vector<std::byte> meta_flat(static_cast<std::size_t>(world_size * meta_len));
            std::vector<std::byte> data_flat;
            std::vector<std::vector<std::byte>> data_frames(world_size);
            for (int i = 0; i < world_size; ++i)
            {
                data_frames[i] = pack_frame(std::vector<T>{send_values[i]});
                int payload_size =
                    static_cast<int>(data_frames[i].size()) - frame::TLV_FRAME_HEADER_SIZE;
                auto meta_payload = serialize_to_byte_vector(payload_size);
                frame::FrameBuilder meta_builder(
                    frame::DataType::METADATA, frame::Flags::NONE, std::move(meta_payload));
                auto meta_frame = meta_builder.to_bytes();
                std::memcpy(meta_flat.data() + static_cast<std::size_t>(i * meta_len),
                            meta_frame.data(),
                            static_cast<std::size_t>(meta_len));
                data_counts[i] = static_cast<int>(data_frames[i].size());
            }
            auto data_displs = make_displs_from_counts(data_counts);
            int total = safe_sum_counts(data_counts);
            data_flat.resize(static_cast<std::size_t>(total));
            for (int i = 0; i < world_size; ++i)
            {
                if (data_counts[i] > 0)
                {
                    std::memcpy(data_flat.data() + static_cast<std::size_t>(data_displs[i]),
                                data_frames[i].data(),
                                static_cast<std::size_t>(data_counts[i]));
                }
            }
            // 分发固定长度的 METADATA 帧
            {
                const int rc = MPI_Scatter(meta_flat.data(),
                                           meta_len,
                                           MPI_BYTE,
                                           meta_recv.data(),
                                           meta_len,
                                           MPI_BYTE,
                                           root,
                                           comm.comm());
                (void)call_mpi(rc, "MPI_Scatter failed (scatter TLV metadata root)", comm.comm());
            }
            int recv_payload = 0;
            {
                frame::FrameReader mr(meta_recv);
                auto mb = mr.boundary();
                if (mb.is_valid() && mb.data_type == frame::DataType::METADATA)
                    recv_payload = deserialize<int>(mr.payload());
            }
            int recv_frame_size = recv_payload + frame::TLV_FRAME_HEADER_SIZE;
            std::vector<std::byte> recv_frame(static_cast<std::size_t>(recv_frame_size));
            // 分发数据帧（变长）
            {
                const int rc = MPI_Scatterv(data_flat.data(),
                                            data_counts.data(),
                                            data_displs.data(),
                                            MPI_BYTE,
                                            recv_frame.data(),
                                            recv_frame_size,
                                            MPI_BYTE,
                                            root,
                                            comm.comm());
                (void)call_mpi(rc, "MPI_Scatterv failed (scatter TLV data root)", comm.comm());
            }
            auto elems = frame::unpack_range<T>(recv_frame);
            if (!elems.empty())
                recv_value = elems.front();
        }
        else
        {
            // 接收固定长度 METADATA 帧，得到后续数据帧大小
            {
                const int rc = MPI_Scatter(nullptr,
                                           meta_len,
                                           MPI_BYTE,
                                           meta_recv.data(),
                                           meta_len,
                                           MPI_BYTE,
                                           root,
                                           comm.comm());
                (void)call_mpi(
                    rc, "MPI_Scatter failed (scatter TLV metadata non-root)", comm.comm());
            }
            int recv_payload = 0;
            {
                frame::FrameReader mr(meta_recv);
                auto mb = mr.boundary();
                if (mb.is_valid() && mb.data_type == frame::DataType::METADATA)
                    recv_payload = deserialize<int>(mr.payload());
            }
            int recv_frame_size = recv_payload + frame::TLV_FRAME_HEADER_SIZE;
            std::vector<std::byte> recv_frame(static_cast<std::size_t>(recv_frame_size));
            {
                const int rc = MPI_Scatterv(nullptr,
                                            nullptr,
                                            nullptr,
                                            MPI_BYTE,
                                            recv_frame.data(),
                                            recv_frame_size,
                                            MPI_BYTE,
                                            root,
                                            comm.comm());
                (void)call_mpi(rc, "MPI_Scatterv failed (scatter TLV data non-root)", comm.comm());
            }
            auto elems = frame::unpack_range<T>(recv_frame);
            if (!elems.empty())
                recv_value = elems.front();
        }
    }
}

// -----------------------------
// allgather: 单值收集到所有进程（固定或序列化）
// -----------------------------
template <typename T>
inline std::vector<T>
allgather(Context & comm, const T & send_value)
{
    int world_size = comm.size();
    if constexpr (types::is_fixed_type_v<T>)
    {
        std::vector<T> recvbuf(world_size);
        MPI_Datatype dt = types::mpi_datatype_of<T>();
        {
            const int rc =
                MPI_Allgather((void *)&send_value, 1, dt, recvbuf.data(), 1, dt, comm.comm());
            (void)call_mpi(rc, "MPI_Allgather failed (allgather fixed)", comm.comm());
        }
        return recvbuf;
    }
    else
    {
        auto bytes = serialize_to_byte_vector(send_value);
        int n = static_cast<int>(bytes.size());
        std::vector<int> counts(world_size, 0);
        {
            const int rc = MPI_Allgather(&n, 1, MPI_INT, counts.data(), 1, MPI_INT, comm.comm());
            (void)call_mpi(rc, "MPI_Allgather failed (allgather varlen counts)", comm.comm());
        }
        auto displs = make_displs_from_counts(counts);
        int total = 0;
        for (int c : counts)
            total += c;
        std::vector<std::byte> all_bytes(total);
        {
            const int rc = MPI_Allgatherv(bytes.data(),
                                          n,
                                          MPI_BYTE,
                                          all_bytes.data(),
                                          counts.data(),
                                          displs.data(),
                                          MPI_BYTE,
                                          comm.comm());
            (void)call_mpi(rc, "MPI_Allgatherv failed (allgather varlen data)", comm.comm());
        }
        std::vector<T> result;
        result.reserve(world_size);
        for (int i = 0; i < world_size; ++i)
        {
            int offset = displs[i];
            int len = counts[i];
            std::vector<std::byte> chunk(len);
            if (len > 0)
            {
                std::memcpy(chunk.data(), all_bytes.data() + offset, len);
            }
            result.emplace_back(deserialize<T>(chunk));
        }
        return result;
    }
}

// -----------------------------
// alltoall: 每进程向每个目标发送一个值（固定或序列化）
// 固定类型：send_values.size() == world_size
// 非固定类型：使用两阶段大小交换 + Alltoallv 字节路径
// -----------------------------
template <typename T>
inline std::vector<T>
alltoall(Context & comm,
         const std::vector<T> & send_values,
         SyncType sync = SyncType::Default,
         SendMode mode = SendMode::Default)
{
    int world_size = comm.size();
    if constexpr (types::is_fixed_type_v<T>)
    {
        std::vector<T> recv_values(world_size);
        MPI_Datatype dt = types::mpi_datatype_of<T>();
        if (sync == SyncType::NBX)
        {
            // 轻量 NBX：点对点非阻塞交换（可选同步发送）
            auto & alloc = comm.tag_allocator();
            auto ns = alloc.create_namespace("alltoall_nbx_fixed");
            MessageTag data_tag = ns.request();

            std::vector<MPI_Request> send_reqs;
            std::vector<MPI_Request> recv_reqs;
            send_reqs.reserve(world_size);
            recv_reqs.reserve(world_size);

            int rank = comm.rank();
            for (int i = 0; i < world_size; ++i)
            {
                if (i == rank)
                {
                    recv_values[i] = send_values[i];
                    continue;
                }
                // 接收来自 i 的一个值
                MPI_Request rreq = MPI_REQUEST_NULL;
                const int rrc =
                    MPI_Irecv(&recv_values[i], 1, dt, i, data_tag.value, comm.comm(), &rreq);
                (void)call_mpi(rrc, "MPI_Irecv failed (alltoall fixed NBX)", comm.comm());
                recv_reqs.push_back(rreq);

                // 发送给 i 一个值（标准或同步）
                MPI_Request sreq = MPI_REQUEST_NULL;
                int src = rank; // for clarity
                const int src_rc =
                    (mode == SendMode::Synchronous)
                        ? MPI_Issend(
                              (void *)&send_values[i], 1, dt, i, data_tag.value, comm.comm(), &sreq)
                        : MPI_Isend((void *)&send_values[i],
                                    1,
                                    dt,
                                    i,
                                    data_tag.value,
                                    comm.comm(),
                                    &sreq);
                (void)call_mpi(src_rc, "MPI_Isend/Issend failed (alltoall fixed NBX)", comm.comm());
                send_reqs.push_back(sreq);
            }
            if (!recv_reqs.empty())
            {
                const int wrc = MPI_Waitall(
                    safe_size_to_int(recv_reqs.size()), recv_reqs.data(), MPI_STATUSES_IGNORE);
                (void)call_mpi(wrc, "MPI_Waitall failed (alltoall fixed NBX recv)", comm.comm());
            }
            if (!send_reqs.empty())
            {
                const int wrc2 = MPI_Waitall(
                    safe_size_to_int(send_reqs.size()), send_reqs.data(), MPI_STATUSES_IGNORE);
                (void)call_mpi(wrc2, "MPI_Waitall failed (alltoall fixed NBX send)", comm.comm());
            }
            ns.release(data_tag);
            // 销毁临时命名空间，避免后续重复创建同名命名空间导致异常
            alloc.destroy_namespace(ns.id());
            return recv_values;
        }
        else
        {
            // 默认/AlltoallCounts：固定类型直接使用 Alltoall
            const int rc = MPI_Alltoall(
                (void *)send_values.data(), 1, dt, recv_values.data(), 1, dt, comm.comm());
            (void)call_mpi(rc, "MPI_Alltoall failed (alltoall fixed blocking)", comm.comm());
            return recv_values;
        }
    }
    else
    {
        // 非固定类型
        if (sync == SyncType::NBX)
        {
            // 轻量 NBX：以帧为单位发布与接收（METADATA + 数据帧）
            auto & alloc = comm.tag_allocator();
            auto ns = alloc.create_namespace("alltoall_nbx_varlen");
            MessageTag counts_tag = ns.request();
            MessageTag data_tag = ns.request();

            int rank = comm.rank();
            // 预打包每个目标的帧
            const int meta_len = frame::TLV_FRAME_HEADER_SIZE + static_cast<int>(sizeof(int));
            std::vector<std::vector<std::byte>> data_frames(world_size);
            std::vector<std::vector<std::byte>> meta_frames(world_size);
            std::vector<int> send_counts(world_size, 0);
            for (int i = 0; i < world_size; ++i)
            {
                data_frames[i] = pack_frame(std::vector<T>{send_values[i]});
                send_counts[i] = safe_size_to_int(data_frames[i].size());
                int payload_size = send_counts[i] - frame::TLV_FRAME_HEADER_SIZE;
                auto meta_payload = serialize_to_byte_vector(payload_size);
                frame::FrameBuilder meta_builder(
                    frame::DataType::METADATA, frame::Flags::NONE, std::move(meta_payload));
                meta_frames[i] = meta_builder.to_bytes();
            }

            // 第一阶段：交换 METADATA 帧
            std::vector<std::vector<std::byte>> recv_meta(
                world_size, std::vector<std::byte>(static_cast<std::size_t>(meta_len)));
            std::vector<MPI_Request> meta_sreqs;
            std::vector<MPI_Request> meta_rreqs;
            meta_sreqs.reserve(world_size);
            meta_rreqs.reserve(world_size);
            for (int i = 0; i < world_size; ++i)
            {
                if (i == rank)
                    continue;
                MPI_Request rreq = MPI_REQUEST_NULL;
                const int rrc = MPI_Irecv(recv_meta[i].data(),
                                          meta_len,
                                          MPI_BYTE,
                                          i,
                                          counts_tag.value,
                                          comm.comm(),
                                          &rreq);
                (void)call_mpi(rrc, "MPI_Irecv failed (alltoall varlen NBX metadata)", comm.comm());
                meta_rreqs.push_back(rrc == MPI_SUCCESS ? rreq : MPI_REQUEST_NULL);

                MPI_Request sreq = MPI_REQUEST_NULL;
                const int src_rc = (mode == SendMode::Synchronous)
                                       ? MPI_Issend(meta_frames[i].data(),
                                                    meta_len,
                                                    MPI_BYTE,
                                                    i,
                                                    counts_tag.value,
                                                    comm.comm(),
                                                    &sreq)
                                       : MPI_Isend(meta_frames[i].data(),
                                                   meta_len,
                                                   MPI_BYTE,
                                                   i,
                                                   counts_tag.value,
                                                   comm.comm(),
                                                   &sreq);
                (void)call_mpi(
                    src_rc, "MPI_Isend/Issend failed (alltoall varlen NBX metadata)", comm.comm());
                meta_sreqs.push_back(sreq);
            }
            if (!meta_rreqs.empty())
            {
                const int wrc_meta_recv = MPI_Waitall(
                    static_cast<int>(meta_rreqs.size()), meta_rreqs.data(), MPI_STATUSES_IGNORE);
                (void)call_mpi(wrc_meta_recv,
                               "MPI_Waitall failed (alltoall varlen NBX metadata recv)",
                               comm.comm());
            }
            if (!meta_sreqs.empty())
            {
                const int wrc_meta_send = MPI_Waitall(
                    static_cast<int>(meta_sreqs.size()), meta_sreqs.data(), MPI_STATUSES_IGNORE);
                (void)call_mpi(wrc_meta_send,
                               "MPI_Waitall failed (alltoall varlen NBX metadata send)",
                               comm.comm());
            }

            // 第二阶段：按 METADATA 声明的大小交换数据帧
            std::vector<std::vector<std::byte>> recv_frames(world_size);
            std::vector<MPI_Request> data_sreqs;
            std::vector<MPI_Request> data_rreqs;
            data_sreqs.reserve(world_size);
            data_rreqs.reserve(world_size);
            for (int i = 0; i < world_size; ++i)
            {
                if (i == rank)
                {
                    // 本地：直接复制打包好的帧
                    recv_frames[i] = data_frames[i];
                    continue;
                }
                int payload = 0;
                {
                    frame::FrameReader mr(recv_meta[i]);
                    auto mb = mr.boundary();
                    if (mb.is_valid() && mb.data_type == frame::DataType::METADATA)
                        payload = deserialize<int>(mr.payload());
                }
                int frame_size = payload + frame::TLV_FRAME_HEADER_SIZE;
                recv_frames[i].resize(static_cast<std::size_t>(frame_size));
                MPI_Request rreq = MPI_REQUEST_NULL;
                const int rrc =
                    (frame_size > 0)
                        ? MPI_Irecv(recv_frames[i].data(),
                                    frame_size,
                                    MPI_BYTE,
                                    i,
                                    data_tag.value,
                                    comm.comm(),
                                    &rreq)
                        : MPI_Irecv(nullptr, 0, MPI_BYTE, i, data_tag.value, comm.comm(), &rreq);
                (void)call_mpi(rrc, "MPI_Irecv failed (alltoall varlen NBX data)", comm.comm());
                data_rreqs.push_back(rreq);

                MPI_Request sreq = MPI_REQUEST_NULL;
                const int src_rc = (mode == SendMode::Synchronous)
                                       ? MPI_Issend(data_frames[i].data(),
                                                    send_counts[i],
                                                    MPI_BYTE,
                                                    i,
                                                    data_tag.value,
                                                    comm.comm(),
                                                    &sreq)
                                       : MPI_Isend(data_frames[i].data(),
                                                   send_counts[i],
                                                   MPI_BYTE,
                                                   i,
                                                   data_tag.value,
                                                   comm.comm(),
                                                   &sreq);
                (void)call_mpi(
                    src_rc, "MPI_Isend/Issend failed (alltoall varlen NBX data)", comm.comm());
                data_sreqs.push_back(sreq);
            }
            if (!data_rreqs.empty())
            {
                const int wrc_data_recv = MPI_Waitall(
                    safe_size_to_int(data_rreqs.size()), data_rreqs.data(), MPI_STATUSES_IGNORE);
                (void)call_mpi(wrc_data_recv,
                               "MPI_Waitall failed (alltoall varlen NBX data recv)",
                               comm.comm());
            }
            if (!data_sreqs.empty())
            {
                const int wrc_data_send = MPI_Waitall(
                    safe_size_to_int(data_sreqs.size()), data_sreqs.data(), MPI_STATUSES_IGNORE);
                (void)call_mpi(wrc_data_send,
                               "MPI_Waitall failed (alltoall varlen NBX data send)",
                               comm.comm());
            }

            // 解包每段（单元素）
            std::vector<T> result(world_size);
            for (int i = 0; i < world_size; ++i)
            {
                auto elems = frame::unpack_range<T>(recv_frames[i]);
                result[i] = elems.empty() ? T{} : elems.front();
            }
            ns.release(counts_tag);
            ns.release(data_tag);
            // 销毁临时命名空间，避免后续重复创建同名命名空间导致异常
            alloc.destroy_namespace(ns.id());
            return result;
        }
        else
        {
            // Default/AlltoallCounts：统一为双帧方案（先交换 METADATA，再 Alltoallv 数据帧）
            const int meta_len = frame::TLV_FRAME_HEADER_SIZE + static_cast<int>(sizeof(int));
            // 零拷贝优化：直接将 METADATA 帧写入扁平缓冲，并记录数据帧大小
            std::vector<int> send_counts(world_size, 0);
            std::vector<std::byte> send_meta_flat(static_cast<std::size_t>(world_size * meta_len));
            for (int i = 0; i < world_size; ++i)
            {
                // 序列化单元素向量为数据载荷，使用 writer 的 span
                auto payload_writer = serialize_to_vector_writer(std::vector<T>{send_values[i]});
                kp::span<const std::byte> payload_span = payload_writer.written_data();
                const int payload_size = safe_size_to_int(payload_span.size());
                // 数据帧总长度 = TLV 头 + 载荷
                send_counts[i] = frame::TLV_FRAME_HEADER_SIZE + payload_size;

                // 构造并写入 METADATA 帧（固定长度）：头 + payload_size(int, 小端)
                auto hdr = frame::make_header_bytes(frame::DataType::METADATA,
                                                    frame::Flags::NONE,
                                                    static_cast<std::uint64_t>(sizeof(int)));
                std::byte * meta_dst =
                    send_meta_flat.data() + static_cast<std::size_t>(i) * meta_len;
                std::memcpy(meta_dst, hdr.data(), hdr.size());
                meta_dst[frame::TLV_FRAME_HEADER_SIZE + 0] =
                    static_cast<std::byte>(payload_size & 0xFF);
                meta_dst[frame::TLV_FRAME_HEADER_SIZE + 1] =
                    static_cast<std::byte>((payload_size >> 8) & 0xFF);
                meta_dst[frame::TLV_FRAME_HEADER_SIZE + 2] =
                    static_cast<std::byte>((payload_size >> 16) & 0xFF);
                meta_dst[frame::TLV_FRAME_HEADER_SIZE + 3] =
                    static_cast<std::byte>((payload_size >> 24) & 0xFF);
            }

            // 交换固定长度 METADATA 帧（Alltoall）
            std::vector<std::byte> recv_meta_flat(static_cast<std::size_t>(world_size * meta_len));
            {
                const int rc = MPI_Alltoall(send_meta_flat.data(),
                                            meta_len,
                                            MPI_BYTE,
                                            recv_meta_flat.data(),
                                            meta_len,
                                            MPI_BYTE,
                                            comm.comm());
                (void)call_mpi(
                    rc, "MPI_Alltoall failed (alltoall varlen TLV metadata)", comm.comm());
            }

            // 根据接收的 METADATA 解析各源数据帧大小
            std::vector<int> recv_counts(world_size, 0);
            for (int i = 0; i < world_size; ++i)
            {
                std::vector<std::byte> meta_buf(static_cast<std::size_t>(meta_len));
                std::memcpy(meta_buf.data(),
                            recv_meta_flat.data() + static_cast<std::size_t>(i * meta_len),
                            static_cast<std::size_t>(meta_len));
                frame::FrameReader mr(meta_buf);
                auto b = mr.boundary();
                int payload = 0;
                if (b.is_valid() && b.data_type == frame::DataType::METADATA)
                    payload = deserialize<int>(mr.payload());
                recv_counts[i] = payload + frame::TLV_FRAME_HEADER_SIZE;
            }

            // 就地写入数据帧头和载荷到扁平缓冲，执行 Alltoallv
            auto send_displs = make_displs_from_counts(send_counts);
            auto recv_displs = make_displs_from_counts(recv_counts);
            int send_total = safe_sum_counts(send_counts);
            int recv_total = safe_sum_counts(recv_counts);
            std::vector<std::byte> send_big(static_cast<std::size_t>(send_total));
            for (int i = 0; i < world_size; ++i)
            {
                if (send_counts[i] <= 0)
                    continue;
                // 为该目标重新获取payload span（避免构建中间data_frames）
                auto payload_writer = serialize_to_vector_writer(std::vector<T>{send_values[i]});
                kp::span<const std::byte> payload_span = payload_writer.written_data();
                // 写 TLV 头
                auto hdr =
                    frame::make_header_bytes(frame::DataType::SERIALIZED_VECTOR,
                                             frame::Flags::NONE,
                                             static_cast<std::uint64_t>(payload_span.size()));
                std::byte * dst = send_big.data() + static_cast<std::size_t>(send_displs[i]);
                std::memcpy(dst, hdr.data(), hdr.size());
                // 写载荷
                if (!payload_span.empty())
                    std::memcpy(dst + frame::TLV_FRAME_HEADER_SIZE,
                                payload_span.data(),
                                payload_span.size());
            }
            std::vector<std::byte> recv_big(static_cast<std::size_t>(recv_total));
            {
                const int rc = MPI_Alltoallv(send_big.data(),
                                             send_counts.data(),
                                             send_displs.data(),
                                             MPI_BYTE,
                                             recv_big.data(),
                                             recv_counts.data(),
                                             recv_displs.data(),
                                             MPI_BYTE,
                                             comm.comm());
                (void)call_mpi(rc, "MPI_Alltoallv failed (alltoall varlen TLV data)", comm.comm());
            }

            // 解包每段数据帧，提取单元素
            std::vector<T> result(world_size);
            for (int i = 0; i < world_size; ++i)
            {
                int off = recv_displs[i];
                int len = recv_counts[i];
                std::vector<std::byte> frame_buf(static_cast<std::size_t>(len));
                if (len > 0)
                    std::memcpy(frame_buf.data(),
                                recv_big.data() + static_cast<std::size_t>(off),
                                static_cast<std::size_t>(len));
                auto elems = frame::unpack_range<T>(frame_buf);
                result[i] = elems.empty() ? T{} : elems.front();
            }
            return result;
        }
    }
}

// -----------------------------
// reduce/allreduce：固定类型用 MPI_Op；非固定类型使用用户二元操作并走 gather/allgather 路径
// -----------------------------
template <typename T>
inline void
reduce(Context & comm, int root, const T & send_value, T & recv_value, MPI_Op op = MPI_SUM)
{
    if constexpr (types::is_fixed_type_v<T>)
    {
        MPI_Datatype dt = types::mpi_datatype_of<T>();
        const int rc = MPI_Reduce((void *)&send_value, &recv_value, 1, dt, op, root, comm.comm());
        (void)call_mpi(rc, "MPI_Reduce failed (reduce fixed)", comm.comm());
    }
    else
    {
        // 非固定类型：收集到 root，然后由 root 自定义处理（默认保留第一个）
        auto gathered = gather(comm, root, send_value);
        if (comm.rank() == root)
        {
            // 默认策略：顺序折叠，要求 T 支持 operator+
            if constexpr (detail::has_plus_op<T>::value)
            {
                T acc = gathered.empty() ? T{} : gathered.front();
                for (size_t i = 1; i < gathered.size(); ++i)
                    acc = acc + gathered[i];
                recv_value = acc;
            }
            else
            {
                recv_value = gathered.empty() ? T{} : gathered.front();
            }
        }
    }
}

template <typename T>
inline T
allreduce(Context & comm, const T & send_value, MPI_Op op = MPI_SUM)
{
    if constexpr (types::is_fixed_type_v<T>)
    {
        MPI_Datatype dt = types::mpi_datatype_of<T>();
        T recv{};
        const int rc = MPI_Allreduce((void *)&send_value, &recv, 1, dt, op, comm.comm());
        (void)call_mpi(rc, "MPI_Allreduce failed (allreduce fixed)", comm.comm());
        return recv;
    }
    else
    {
        // 非固定类型：收集到所有进程，本地折叠（默认 operator+）
        auto all = allgather(comm, send_value);
        if constexpr (detail::has_plus_op<T>::value)
        {
            T acc = all.empty() ? T{} : all.front();
            for (size_t i = 1; i < all.size(); ++i)
                acc = acc + all[i];
            return acc;
        }
        else
        {
            return all.empty() ? T{} : all.front();
        }
    }
}

// 自定义二元操作版本（非固定类型）：用户提供折叠函数 op(lhs, rhs) -> T
template <typename T, typename Fn>
inline void
reduce(Context & comm, int root, const T & send_value, T & recv_value, Fn op)
{
    if constexpr (types::is_fixed_type_v<T>)
    {
        // 固定类型仍建议使用 MPI_Op 版本；此处回退到 gather + 折叠
    }
    auto gathered = gather(comm, root, send_value);
    if (comm.rank() == root)
    {
        if (gathered.empty())
        {
            recv_value = T{};
            return;
        }
        T acc = gathered.front();
        for (size_t i = 1; i < gathered.size(); ++i)
        {
            acc = op(acc, gathered[i]);
        }
        recv_value = acc;
    }
}

template <typename T, typename Fn>
inline T
allreduce(Context & comm, const T & send_value, Fn op)
{
    if constexpr (types::is_fixed_type_v<T>)
    {
        // 固定类型仍建议使用 MPI_Op 版本；此处回退到 allgather + 折叠
    }
    auto all = allgather(comm, send_value);
    if (all.empty())
        return T{};
    T acc = all.front();
    for (size_t i = 1; i < all.size(); ++i)
    {
        acc = op(acc, all[i]);
    }
    return acc;
}

// -----------------------------
// counts+displs 路径：显式传入各进程大小并进行 gatherv/allgatherv/scatterv/alltoallv（字节级）
// 适合大规模变长集合，配合 packed-range 使用
// -----------------------------
/**
 * 收集所有进程的字节数据到所有进程
 * @param comm 通信器
 * @param local_bytes 本地字节数据
 * @return 收集到的所有字节数据
 * @throws std::overflow_error 如果本地数据大小或总大小超过 INT_MAX
 */
inline std::vector<std::byte>
allgatherv_bytes(Context & comm, const std::vector<std::byte> & local_bytes)
{
    int world_size = comm.size();
    int n = safe_size_to_int(local_bytes.size()); // 安全转换
    std::vector<int> counts(world_size);
    {
        const int rc = MPI_Allgather(&n, 1, MPI_INT, counts.data(), 1, MPI_INT, comm.comm());
        (void)call_mpi(rc, "MPI_Allgather failed (allgatherv_bytes counts)", comm.comm());
    }
    auto displs = make_displs_from_counts(counts);
    int total = safe_sum_counts(counts); // 安全计算总和
    std::vector<std::byte> out(total);
    {
        const int rc = MPI_Allgatherv(local_bytes.data(),
                                      n,
                                      MPI_BYTE,
                                      out.data(),
                                      counts.data(),
                                      displs.data(),
                                      MPI_BYTE,
                                      comm.comm());
        (void)call_mpi(rc, "MPI_Allgatherv failed (allgatherv_bytes data)", comm.comm());
    }
    // 切换为双帧输出：每个进程返回 METADATA(size:int) + RAW_BYTES(segment)
    std::vector<std::byte> frames;
    frames.reserve(static_cast<std::size_t>(out.size()) +
                   world_size * (frame::TLV_FRAME_HEADER_SIZE * 2));
    for (int i = 0; i < world_size; ++i)
    {
        int sz = counts[i];
        auto meta_payload = serialize_to_byte_vector(sz);
        frame::FrameBuilder meta_builder(
            frame::DataType::METADATA, frame::Flags::NONE, std::move(meta_payload));
        auto meta_frame = meta_builder.to_bytes();
        frames.insert(frames.end(), meta_frame.begin(), meta_frame.end());

        std::vector<std::byte> seg;
        if (sz > 0)
        {
            seg.resize(sz);
            std::memcpy(seg.data(), out.data() + displs[i], static_cast<std::size_t>(sz));
        }
        frame::FrameBuilder raw_builder(
            frame::DataType::RAW_BYTES, frame::Flags::NONE, std::move(seg));
        auto raw_frame = raw_builder.to_bytes();
        frames.insert(frames.end(), raw_frame.begin(), raw_frame.end());
    }
    return frames;
}

template <typename T>
inline std::vector<T>
allgather_packed_range(Context & comm, const std::vector<T> & local_values)
{
    // 使用双帧方案：每个 rank 发送 METADATA(size) + RAW_BYTES(payload)
    auto local_bytes = pack_frame(local_values);
    auto all_bytes = allgatherv_bytes(comm, local_bytes);
    std::vector<T> out;
    std::size_t offset = 0;
    int pending_size = -1;
    while (offset < all_bytes.size())
    {
        frame::FrameReader reader(all_bytes, offset);
        auto b = reader.boundary();
        if (!b.is_valid())
            break;
        auto payload = reader.payload();
        if (b.data_type == frame::DataType::METADATA)
        {
            pending_size = deserialize<int>(payload);
        }
        else if (b.data_type == frame::DataType::RAW_BYTES)
        {
            (void)pending_size; // 当前实现不强制校验大小匹配
            auto elems = frame::unpack_range<T>(payload);
            if (!elems.empty())
                out.insert(out.end(), elems.begin(), elems.end());
            pending_size = -1;
        }
        offset = b.end_offset();
    }
    return out;
}

// -----------------------------
// 泛型 Gatherv/Allgatherv（按类型自动打包/解包）
// -----------------------------
template <typename T>
inline std::vector<T>
gatherv(Context & comm, int root, const std::vector<T> & local_values)
{
    int rank = comm.rank();
    int world_size = comm.size();
    if constexpr (types::is_fixed_type_v<T>)
    {
        int count = safe_size_to_int(local_values.size()); // 安全转换
        std::vector<int> counts(world_size, 0);
        {
            const int rc =
                MPI_Gather(&count, 1, MPI_INT, counts.data(), 1, MPI_INT, root, comm.comm());
            (void)call_mpi(rc, "MPI_Gather failed (gatherv fixed counts)", comm.comm());
        }
        std::vector<int> displs;
        std::vector<T> out;
        if (rank == root)
        {
            displs = make_displs_from_counts(counts);
            int total = safe_sum_counts(counts); // 安全计算总和
            out.resize(total);
        }
        MPI_Datatype dt = types::mpi_datatype_of<T>();
        {
            const int rc = MPI_Gatherv((void *)local_values.data(),
                                       count,
                                       dt,
                                       rank == root ? (void *)out.data() : nullptr,
                                       rank == root ? counts.data() : nullptr,
                                       rank == root ? displs.data() : nullptr,
                                       dt,
                                       root,
                                       comm.comm());
            (void)call_mpi(rc, "MPI_Gatherv failed (gatherv fixed data)", comm.comm());
        }
        return out;
    }
    else
    {
        // 变长路径：通过双帧方案传输，每个 rank 发送 METADATA(size) + RAW_BYTES(payload)
        auto bytes = pack_frame(local_values);
        auto all_bytes = gatherv_bytes(comm, root, bytes);
        if (rank != root)
            return {};

        std::vector<T> out;
        std::size_t offset = 0;
        int pending_size = -1;
        while (offset < all_bytes.size())
        {
            frame::FrameReader reader(all_bytes, offset);
            auto b = reader.boundary();
            if (!b.is_valid())
                break;
            auto payload = reader.payload();
            if (b.data_type == frame::DataType::METADATA)
            {
                pending_size = deserialize<int>(payload);
            }
            else if (b.data_type == frame::DataType::RAW_BYTES)
            {
                (void)pending_size; // 当前实现不强制校验大小匹配
                auto elems = frame::unpack_range<T>(payload);
                if (!elems.empty())
                    out.insert(out.end(), elems.begin(), elems.end());
                pending_size = -1;
            }
            offset = b.end_offset();
        }
        return out;
    }
}

template <typename T>
inline std::vector<T>
allgatherv(Context & comm, const std::vector<T> & local_values)
{
    int world_size = comm.size();
    if constexpr (types::is_fixed_type_v<T>)
    {
        int count = safe_size_to_int(local_values.size()); // 安全转换
        std::vector<int> counts(world_size, 0);
        {
            const int rc =
                MPI_Allgather(&count, 1, MPI_INT, counts.data(), 1, MPI_INT, comm.comm());
            (void)call_mpi(rc, "MPI_Allgather failed (allgatherv fixed counts)", comm.comm());
        }
        auto displs = make_displs_from_counts(counts);
        int total = safe_sum_counts(counts); // 安全计算总和
        std::vector<T> out(total);
        MPI_Datatype dt = types::mpi_datatype_of<T>();
        {
            const int rc = MPI_Allgatherv((void *)local_values.data(),
                                          count,
                                          dt,
                                          out.data(),
                                          counts.data(),
                                          displs.data(),
                                          dt,
                                          comm.comm());
            (void)call_mpi(rc, "MPI_Allgatherv failed (allgatherv fixed data)", comm.comm());
        }
        return out;
    }
    else
    {
        // 变长路径：通过双帧方案传输，每个 rank 发送 METADATA(size) + RAW_BYTES(payload)
        auto local_bytes = pack_frame(local_values);
        auto all_bytes = allgatherv_bytes(comm, local_bytes);
        std::vector<T> out;
        std::size_t offset = 0;
        int pending_size = -1;
        while (offset < all_bytes.size())
        {
            frame::FrameReader reader(all_bytes, offset);
            auto b = reader.boundary();
            if (!b.is_valid())
                break;
            auto payload = reader.payload();
            if (b.data_type == frame::DataType::METADATA)
            {
                pending_size = deserialize<int>(payload);
            }
            else if (b.data_type == frame::DataType::RAW_BYTES)
            {
                (void)pending_size; // 当前实现不强制校验大小匹配
                auto elems = frame::unpack_range<T>(payload);
                if (!elems.empty())
                    out.insert(out.end(), elems.begin(), elems.end());
                pending_size = -1;
            }
            offset = b.end_offset();
        }
        return out;
    }
}

// -----------------------------
// 泛型 Scatterv（按类型自动打包/解包）
// 1) 扁平 + counts：root 将 flat_values 切分并分发
// 2) 每目标独立 range：root 为每个 rank 指定一个向量
// -----------------------------
template <typename T>
inline void
scatterv(Context & comm,
         int root,
         const std::vector<T> & flat_values,
         const std::vector<int> & counts,
         std::vector<T> & recv_values)
{
    int world_size = comm.size();
    int rank = comm.rank();
    if constexpr (types::is_fixed_type_v<T>)
    {
        auto displs = make_displs_from_counts(counts);
        int recv_count = 0;
        {
            const int rc =
                MPI_Scatter(counts.data(), 1, MPI_INT, &recv_count, 1, MPI_INT, root, comm.comm());
            (void)call_mpi(rc, "MPI_Scatter failed (scatterv fixed sizes)", comm.comm());
        }
        recv_values.resize(recv_count);
        MPI_Datatype dt = types::mpi_datatype_of<T>();
        {
            const int rc = MPI_Scatterv(rank == root ? (void *)flat_values.data() : nullptr,
                                        rank == root ? counts.data() : nullptr,
                                        rank == root ? displs.data() : nullptr,
                                        dt,
                                        recv_values.data(),
                                        recv_count,
                                        dt,
                                        root,
                                        comm.comm());
            (void)call_mpi(rc, "MPI_Scatterv failed (scatterv fixed data)", comm.comm());
        }
    }
    else
    {
        // 变长路径：双帧协议（METADATA + 数据帧）
        const int meta_len = frame::TLV_FRAME_HEADER_SIZE + static_cast<int>(sizeof(int));
        std::vector<std::byte> meta_recv(static_cast<std::size_t>(meta_len));
        if (rank == root)
        {
            // 逐目标构建数据帧与对应的 METADATA 帧
            auto displs = make_displs_from_counts(counts);
            std::vector<std::vector<T>> per_rank(world_size);
            for (int i = 0; i < world_size; ++i)
            {
                int off = displs[i];
                int len = counts[i];
                per_rank[i].assign(flat_values.begin() + off, flat_values.begin() + off + len);
            }
            std::vector<int> data_counts(world_size, 0);
            std::vector<std::vector<std::byte>> data_frames(world_size);
            std::vector<std::byte> meta_flat(static_cast<std::size_t>(world_size * meta_len));
            for (int i = 0; i < world_size; ++i)
            {
                data_frames[i] = pack_frame(per_rank[i]);
                data_counts[i] = safe_size_to_int(data_frames[i].size());
                int payload_size = data_counts[i] - frame::TLV_FRAME_HEADER_SIZE;
                auto meta_payload = serialize_to_byte_vector(payload_size);
                frame::FrameBuilder mb(
                    frame::DataType::METADATA, frame::Flags::NONE, std::move(meta_payload));
                auto meta_frame = mb.to_bytes();
                std::memcpy(meta_flat.data() + static_cast<std::size_t>(i * meta_len),
                            meta_frame.data(),
                            static_cast<std::size_t>(meta_len));
            }
            // 合并数据帧字节并分发
            auto data_displs = make_displs_from_counts(data_counts);
            int total = safe_sum_counts(data_counts);
            std::vector<std::byte> data_flat(static_cast<std::size_t>(total));
            for (int i = 0; i < world_size; ++i)
            {
                if (data_counts[i] > 0)
                    std::memcpy(data_flat.data() + static_cast<std::size_t>(data_displs[i]),
                                data_frames[i].data(),
                                static_cast<std::size_t>(data_counts[i]));
            }
            // 分发固定长度 METADATA 帧
            {
                const int rc = MPI_Scatter(meta_flat.data(),
                                           meta_len,
                                           MPI_BYTE,
                                           meta_recv.data(),
                                           meta_len,
                                           MPI_BYTE,
                                           root,
                                           comm.comm());
                (void)call_mpi(rc, "MPI_Scatter failed (scatterv TLV metadata root)", comm.comm());
            }
            int recv_payload = 0;
            {
                frame::FrameReader mr(meta_recv);
                auto b = mr.boundary();
                if (b.is_valid() && b.data_type == frame::DataType::METADATA)
                    recv_payload = deserialize<int>(mr.payload());
            }
            int recv_frame_size = recv_payload + frame::TLV_FRAME_HEADER_SIZE;
            std::vector<std::byte> recv_frame(static_cast<std::size_t>(recv_frame_size));
            // 分发变长数据帧
            {
                const int rc = MPI_Scatterv(data_flat.data(),
                                            data_counts.data(),
                                            data_displs.data(),
                                            MPI_BYTE,
                                            recv_frame.data(),
                                            recv_frame_size,
                                            MPI_BYTE,
                                            root,
                                            comm.comm());
                (void)call_mpi(rc, "MPI_Scatterv failed (scatterv TLV data root)", comm.comm());
            }
            recv_values = frame::unpack_range<T>(recv_frame);
        }
        else
        {
            // 接收固定长度 METADATA 帧，得到数据帧大小
            {
                const int rc = MPI_Scatter(nullptr,
                                           meta_len,
                                           MPI_BYTE,
                                           meta_recv.data(),
                                           meta_len,
                                           MPI_BYTE,
                                           root,
                                           comm.comm());
                (void)call_mpi(
                    rc, "MPI_Scatter failed (scatterv TLV metadata non-root)", comm.comm());
            }
            int recv_payload = 0;
            {
                frame::FrameReader mr(meta_recv);
                auto b = mr.boundary();
                if (b.is_valid() && b.data_type == frame::DataType::METADATA)
                    recv_payload = deserialize<int>(mr.payload());
            }
            int recv_frame_size = recv_payload + frame::TLV_FRAME_HEADER_SIZE;
            std::vector<std::byte> recv_frame(static_cast<std::size_t>(recv_frame_size));
            {
                const int rc = MPI_Scatterv(nullptr,
                                            nullptr,
                                            nullptr,
                                            MPI_BYTE,
                                            recv_frame.data(),
                                            recv_frame_size,
                                            MPI_BYTE,
                                            root,
                                            comm.comm());
                (void)call_mpi(rc, "MPI_Scatterv failed (scatterv TLV data non-root)", comm.comm());
            }
            recv_values = frame::unpack_range<T>(recv_frame);
        }
    }
}

template <typename T>
inline void
scatterv_ranges(Context & comm,
                int root,
                const std::vector<std::vector<T>> & per_target_values,
                std::vector<T> & recv_values)
{
    int world_size = comm.size();
    int rank = comm.rank();
    if constexpr (types::is_fixed_type_v<T>)
    {
        std::vector<int> counts(world_size, 0);
        std::vector<T> flat;
        std::vector<int> displs;
        if (rank == root)
        {
            for (int i = 0; i < world_size; ++i)
                counts[i] = safe_size_to_int(per_target_values[i].size()); // 安全转换
            displs = make_displs_from_counts(counts);
            int total = safe_sum_counts(counts); // 安全计算总和
            flat.resize(total);
            for (int i = 0; i < world_size; ++i)
            {
                if (counts[i] > 0)
                    std::memcpy(flat.data() + displs[i],
                                per_target_values[i].data(),
                                counts[i] * sizeof(T));
            }
        }
        int recv_count = 0;
        MPI_Scatter(rank == root ? counts.data() : nullptr,
                    1,
                    MPI_INT,
                    &recv_count,
                    1,
                    MPI_INT,
                    root,
                    comm.comm());
        recv_values.resize(recv_count);
        MPI_Datatype dt = types::mpi_datatype_of<T>();
        MPI_Scatterv(rank == root ? (void *)flat.data() : nullptr,
                     rank == root ? counts.data() : nullptr,
                     rank == root ? displs.data() : nullptr,
                     dt,
                     recv_values.data(),
                     recv_count,
                     dt,
                     root,
                     comm.comm());
    }
    else
    {
        // 变长路径：双帧协议（METADATA + 数据帧）
        const int meta_len = frame::TLV_FRAME_HEADER_SIZE + static_cast<int>(sizeof(int));
        std::vector<std::byte> meta_recv(static_cast<std::size_t>(meta_len));
        if (rank == root)
        {
            std::vector<int> data_counts(world_size, 0);
            std::vector<std::vector<std::byte>> data_frames(world_size);
            std::vector<std::byte> meta_flat(static_cast<std::size_t>(world_size * meta_len));
            for (int i = 0; i < world_size; ++i)
            {
                data_frames[i] = pack_frame(per_target_values[i]);
                data_counts[i] = safe_size_to_int(data_frames[i].size());
                int payload_size = data_counts[i] - frame::TLV_FRAME_HEADER_SIZE;
                auto meta_payload = serialize_to_byte_vector(payload_size);
                frame::FrameBuilder mb(
                    frame::DataType::METADATA, frame::Flags::NONE, std::move(meta_payload));
                auto meta_frame = mb.to_bytes();
                std::memcpy(meta_flat.data() + static_cast<std::size_t>(i * meta_len),
                            meta_frame.data(),
                            static_cast<std::size_t>(meta_len));
            }
            auto data_displs = make_displs_from_counts(data_counts);
            int total = safe_sum_counts(data_counts);
            std::vector<std::byte> data_flat(static_cast<std::size_t>(total));
            for (int i = 0; i < world_size; ++i)
            {
                if (data_counts[i] > 0)
                    std::memcpy(data_flat.data() + static_cast<std::size_t>(data_displs[i]),
                                data_frames[i].data(),
                                static_cast<std::size_t>(data_counts[i]));
            }
            // 分发 METADATA 帧
            MPI_Scatter(meta_flat.data(),
                        meta_len,
                        MPI_BYTE,
                        meta_recv.data(),
                        meta_len,
                        MPI_BYTE,
                        root,
                        comm.comm());
            int recv_payload = 0;
            {
                frame::FrameReader mr(meta_recv);
                auto b = mr.boundary();
                if (b.is_valid() && b.data_type == frame::DataType::METADATA)
                    recv_payload = deserialize<int>(mr.payload());
            }
            int recv_frame_size = recv_payload + frame::TLV_FRAME_HEADER_SIZE;
            std::vector<std::byte> recv_frame(static_cast<std::size_t>(recv_frame_size));
            // 分发数据帧
            MPI_Scatterv(data_flat.data(),
                         data_counts.data(),
                         data_displs.data(),
                         MPI_BYTE,
                         recv_frame.data(),
                         recv_frame_size,
                         MPI_BYTE,
                         root,
                         comm.comm());
            recv_values = frame::unpack_range<T>(recv_frame);
        }
        else
        {
            // 接收 METADATA 帧以获取数据大小
            MPI_Scatter(nullptr,
                        meta_len,
                        MPI_BYTE,
                        meta_recv.data(),
                        meta_len,
                        MPI_BYTE,
                        root,
                        comm.comm());
            int recv_payload = 0;
            {
                frame::FrameReader mr(meta_recv);
                auto b = mr.boundary();
                if (b.is_valid() && b.data_type == frame::DataType::METADATA)
                    recv_payload = deserialize<int>(mr.payload());
            }
            int recv_frame_size = recv_payload + frame::TLV_FRAME_HEADER_SIZE;
            std::vector<std::byte> recv_frame(static_cast<std::size_t>(recv_frame_size));
            MPI_Scatterv(nullptr,
                         nullptr,
                         nullptr,
                         MPI_BYTE,
                         recv_frame.data(),
                         recv_frame_size,
                         MPI_BYTE,
                         root,
                         comm.comm());
            recv_values = frame::unpack_range<T>(recv_frame);
        }
    }
}

} // namespace collectives
} // namespace compi