#include "compi/channel.h"
#include "compi/result.h"

#include <mutex>
#include <algorithm>
#include <thread>
#include <unordered_map>
#include <vector>

namespace compi
{

Result<bool>
FramedChannel::send_frame(const Endpoint & dst,
                          const std::vector<std::byte> & frame_bytes,
                          int tag) noexcept
{
    Context & ctx = Context::for_comm(dst.comm());
    const int count = static_cast<int>(frame_bytes.size());
    auto rc_res = ctx.call_mpi(
        [&dst, &frame_bytes, count, tag]() -> int
        { return MPI_Send(frame_bytes.data(), count, MPI_BYTE, dst.rank(), tag, dst.comm()); },
        "MPI_Send (FramedChannel::send_frame)",
        _failure_override);
    if (rc_res.is_error())
        return Result<bool>::error(rc_res.error_code(), rc_res.error_message());
    return Result<bool>::success(true);
}

Result<bool>
FramedChannel::recv_frame(const Endpoint & src,
                          std::vector<std::byte> & out_frame_bytes,
                          int tag) noexcept
{
    Context & ctx = Context::for_comm(src.comm());
    MPI_Status status{};
    // 阻塞探测指定 tag 的消息以确定大小
    auto probe_res =
        ctx.call_mpi([&src, tag, &status]() -> int
                     { return MPI_Probe(src.rank(), tag, src.comm(), &status); },
                     "MPI_Probe (FramedChannel::recv_frame)",
                     _failure_override);
    if (probe_res.is_error())
        return Result<bool>::error(probe_res.error_code(), probe_res.error_message());
    int count_bytes = 0;
    auto count_res = ctx.call_mpi<int>([&status, &count_bytes]() -> int
                                       { return MPI_Get_count(&status, MPI_BYTE, &count_bytes); },
                                       "MPI_Get_count (FramedChannel::recv_frame)",
                                       [&count_bytes]() -> int { return count_bytes; },
                                       _failure_override);
    if (count_res.is_error())
        return Result<bool>::error(count_res.error_code(), count_res.error_message());
    out_frame_bytes.resize(static_cast<std::size_t>(count_bytes));
    auto recv_res = ctx.call_mpi(
        [&src, &status, &out_frame_bytes, count_bytes, tag]() -> int
        {
            (void)status; // status 已由 Probe 填充
            return MPI_Recv(out_frame_bytes.data(),
                            count_bytes,
                            MPI_BYTE,
                            src.rank(),
                            tag,
                            src.comm(),
                            &status);
        },
        "MPI_Recv (FramedChannel::recv_frame)",
        _failure_override);
    if (recv_res.is_error())
        return Result<bool>::error(recv_res.error_code(), recv_res.error_message());
    return Result<bool>::success(true);
}

Result<bool>
FramedChannel::send_frame(const Endpoint & dst,
                          const std::vector<std::byte> & frame_bytes,
                          TagNamespace ns) noexcept
{
    MessageTag tag = ns.request();
    Result<bool> res = send_frame(dst, frame_bytes, static_cast<int>(tag));
    // 发送完成后释放标签（即便失败也确保释放，以免泄漏）。
    ns.release(tag);
    return res;
}

Result<bool>
FramedChannel::recv_frame(const Endpoint & src,
                          std::vector<std::byte> & out_frame_bytes,
                          TagNamespace ns) noexcept
{
    // ANY_TAG 过滤接收：仅在允许 ANY_TAG 时执行，否则报错
    if (!_allow_any_tag)
    {
        return Result<bool>::error(ResultCode::InvalidRequest,
                                   "ANY_TAG is disabled by FramedChannel options");
    }

    Context & ctx = Context::for_comm(src.comm());
    MPI_Status status{};
    int probed = 0;

    // 非阻塞探测 + 命名空间过滤，避免队首阻塞
    while (true)
    {
        int rc = MPI_Iprobe(src.rank(), MPI_ANY_TAG, src.comm(), &probed, &status);
        if (!call_mpi(ctx, rc, "MPI_Iprobe (FramedChannel::recv_frame/ns)", _failure_override))
        {
            return Result<bool>::error(ResultCode::MpiError, "MPI_Iprobe failed");
        }
        if (probed && ns.owns(status.MPI_TAG))
            break;
        // 轻量让步：避免忙轮询过度；保持MPI可重入性不引入sleep
        std::this_thread::yield();
    }

    int count_bytes = 0;
    auto count_res = ctx.call_mpi<int>([&status, &count_bytes]() -> int
                                       { return MPI_Get_count(&status, MPI_BYTE, &count_bytes); },
                                       "MPI_Get_count (FramedChannel::recv_frame/ns)",
                                       [&count_bytes]() -> int { return count_bytes; },
                                       _failure_override);
    if (count_res.is_error())
        return Result<bool>::error(count_res.error_code(), count_res.error_message());

    out_frame_bytes.resize(static_cast<std::size_t>(count_bytes));
    auto recv_res = ctx.call_mpi(
        [&src, &status, &out_frame_bytes, count_bytes]() -> int
        {
            return MPI_Recv(out_frame_bytes.data(),
                            count_bytes,
                            MPI_BYTE,
                            src.rank(),
                            status.MPI_TAG,
                            src.comm(),
                            &status);
        },
        "MPI_Recv (FramedChannel::recv_frame/ns)",
        _failure_override);
    if (recv_res.is_error())
        return Result<bool>::error(recv_res.error_code(), recv_res.error_message());
    return Result<bool>::success(true);
}

TransportRequest
FramedChannel::isend_frame(const Endpoint & dst,
                           const std::vector<std::byte> & frame_bytes,
                           TagNamespace ns,
                           SendMode mode) noexcept
{
    Context & ctx = Context::for_comm(dst.comm());
    auto req = isend_serialized(ctx, dst.rank(), frame_bytes, ns, mode);
    return req;
}

TransportRequest
FramedChannel::isend_frame(const Endpoint & dst,
                           const std::vector<std::byte> & frame_bytes,
                           const TagLease & lease,
                           SendMode mode) noexcept
{
    Context & ctx = Context::for_comm(dst.comm());
    auto req = isend_serialized(ctx, dst.rank(), frame_bytes, lease, mode);
    return req;
}

TransportRequest
FramedChannel::irecv_frame(const Endpoint & src,
                           std::vector<std::byte> & out_frame_bytes,
                           TagNamespace ns) noexcept
{
    Context & ctx = Context::for_comm(src.comm());
    auto req = irecv_serialized(ctx, src.rank(), out_frame_bytes, ns);
    return req;
}

// Channel实现

Result<std::shared_ptr<Channel>>
Channel::create(const ChannelId & id, MPI_Comm parent_comm, bool use_dedicated_comm) noexcept
{
    Context * context = &Context::for_comm(parent_comm);

    MPI_Comm channel_comm = parent_comm;
    bool owns_comm = false;

    if (use_dedicated_comm)
    {
        // 创建独立的通信器
        auto dup_result =
            context->call_mpi<MPI_Comm>([parent_comm, &channel_comm]() -> int
                                        { return MPI_Comm_dup(parent_comm, &channel_comm); },
                                        "MPI_Comm_dup",
                                        [&channel_comm]() -> MPI_Comm { return channel_comm; });

        if (dup_result.is_error())
        {
            return Result<std::shared_ptr<Channel>>::error(
                dup_result.error_code(),
                "Failed to create dedicated communicator: " + dup_result.error_message());
        }

        channel_comm = dup_result.value();
        owns_comm = true;
    }

    // 创建Channel实例
    auto channel = std::shared_ptr<Channel>(new Channel(id, channel_comm, context, owns_comm));

    // 初始化通信器属性
    auto init_result = channel->initialize_comm_properties();
    if (init_result.is_error())
    {
        return Result<std::shared_ptr<Channel>>::error(init_result.error_code(),
                                                       "Failed to initialize channel properties: " +
                                                           init_result.error_message());
    }

    return Result<std::shared_ptr<Channel>>::success(channel);
}

Channel::Channel(const ChannelId & id, MPI_Comm comm, Context * context, bool owns_comm) noexcept
    : _id(id), _comm(comm), _context(context), _owns_comm(owns_comm), _state(ChannelState::Created)
{
}

Channel::~Channel()
{
    (void)release_default_namespace();
    if (_owns_comm && _comm != MPI_COMM_NULL)
    {
        // 通过Context安全地释放通信器
        auto policy_override = _options.override_failure_policy
                                    ? std::optional<FailurePolicy>(_options.failure_policy)
                                    : std::nullopt;
        _context->call_mpi([this]() -> int { return MPI_Comm_free(&_comm); },
                           "MPI_Comm_free",
                           policy_override);
    }
}

Result<void>
Channel::initialize_comm_properties() noexcept
{
    // 通过Context获取通信器属性
    auto rank_result = _context->call_mpi<int>(
        [this]() -> int
        {
            int rank;
            int result = MPI_Comm_rank(_comm, &rank);
            return result;
        },
        "MPI_Comm_rank",
        [this]() -> int
        {
            int rank;
            MPI_Comm_rank(_comm, &rank);
            return rank;
        });

    if (rank_result.is_error())
    {
        return Result<void>::error(rank_result.error_code(),
                                   "Failed to get communicator rank: " +
                                       rank_result.error_message());
    }

    auto size_result = _context->call_mpi<int>(
        [this]() -> int
        {
            int size;
            int result = MPI_Comm_size(_comm, &size);
            return result;
        },
        "MPI_Comm_size",
        [this]() -> int
        {
            int size;
            MPI_Comm_size(_comm, &size);
            return size;
        });

    if (size_result.is_error())
    {
        return Result<void>::error(size_result.error_code(),
                                   "Failed to get communicator size: " +
                                       size_result.error_message());
    }

    return COMPI_SUCCESS_VOID();
}

Result<void>
Channel::ensure_default_namespace() noexcept
{
    _default_namespace.reset();
    _owns_default_namespace = false;
    if (_options.default_namespace_name.empty() || _context == nullptr)
    {
        return COMPI_SUCCESS_VOID();
    }

    try
    {
        auto & alloc = _context->tag_allocator();
        try
        {
            _default_namespace = alloc.find_namespace(_options.default_namespace_name);
            _owns_default_namespace = false;
        }
        catch (const std::runtime_error &)
        {
            _default_namespace = alloc.create_namespace(_options.default_namespace_name);
            _owns_default_namespace = true;
        }
    }
    catch (const std::exception & e)
    {
        return Result<void>::error(ResultCode::ResourceExhausted,
                                   std::string("Failed to ensure default namespace: ") +
                                       e.what(),
                                   "Channel::ensure_default_namespace");
    }

    return COMPI_SUCCESS_VOID();
}

Result<void>
Channel::release_default_namespace() noexcept
{
    if (_default_namespace.has_value() && _owns_default_namespace && _context != nullptr)
    {
        try
        {
            auto & alloc = _context->tag_allocator();
            alloc.destroy_namespace(_default_namespace->id());
        }
        catch (const std::exception & e)
        {
            return Result<void>::error(ResultCode::ResourceExhausted,
                                       std::string("Failed to destroy default namespace: ") +
                                           e.what(),
                                       "Channel::release_default_namespace");
        }
    }
    _default_namespace.reset();
    _owns_default_namespace = false;
    return COMPI_SUCCESS_VOID();
}

void
Channel::prune_completed_requests_locked() noexcept
{
    auto it = std::remove_if(_pending_requests.begin(),
                              _pending_requests.end(),
                              [](TransportRequest & req) { return req.completed(); });
    _pending_requests.erase(it, _pending_requests.end());
}

TransportRequest
Channel::track_request(TransportRequest req) noexcept
{
    std::lock_guard<std::mutex> lock(_pending_mutex);
    prune_completed_requests_locked();
    _pending_requests.push_back(req);
    return req;
}

Result<void>
Channel::activate() noexcept
{
    if (_state != ChannelState::Created)
    {
        return Result<void>::error(ResultCode::InvalidRequest,
                                   "Channel can only be activated from Created state");
    }

    auto ns_res = ensure_default_namespace();
    if (ns_res.is_error())
    {
        return ns_res;
    }

    _framed.set_allow_any_tag(_options.allow_any_tag);
    if (_options.override_failure_policy)
    {
        _framed.set_failure_policy_override(_options.failure_policy);
    }
    else
    {
        _framed.set_failure_policy_override(std::nullopt);
    }

    _state = ChannelState::Active;
    return COMPI_SUCCESS_VOID();
}

Result<void>
Channel::close() noexcept
{
    if (_state == ChannelState::Closed)
    {
        return COMPI_SUCCESS_VOID(); // 已经关闭
    }

    _state = ChannelState::Closing;

    Result<void> wait_result = COMPI_SUCCESS_VOID();
    {
        std::lock_guard<std::mutex> lock(_pending_mutex);
        for (auto & req : _pending_requests)
        {
            auto res = req.wait();
            if (res.is_error() && !wait_result.is_error())
            {
                wait_result = res;
            }
        }
        _pending_requests.clear();
    }
    if (wait_result.is_error())
    {
        _state = ChannelState::Active;
        return wait_result;
    }

    auto ns_release = release_default_namespace();
    if (ns_release.is_error())
    {
        _state = ChannelState::Active;
        return ns_release;
    }

    if (_owns_comm && _comm != MPI_COMM_NULL)
    {
        auto policy_override = _options.override_failure_policy
                                    ? std::optional<FailurePolicy>(_options.failure_policy)
                                    : std::nullopt;
        auto free_res = _context->call_mpi(
            [this]() -> int { return MPI_Comm_free(&_comm); },
            "MPI_Comm_free (Channel::close)",
            policy_override);
        if (free_res.is_error())
        {
            _state = ChannelState::Active;
            return free_res;
        }
        _owns_comm = false;
        _comm = MPI_COMM_NULL;
    }

    _state = ChannelState::Closed;
    return COMPI_SUCCESS_VOID();
}

// 注意：stats() 与 reset_stats() 在头文件中已内联定义，
// 这里不再提供重复的 out-of-line 定义以避免 ODR 冲突。

// 资源管理：按 communicator 的真实单例分配器
MessageTagAllocator &
Channel::tag_allocator() noexcept
{
    return _context->tag_allocator();
}

// Framing：发送统一帧字节流（显式 tag）
Result<bool>
Channel::send_frame(int dest_rank, const std::vector<std::byte> & frame_bytes, int tag) noexcept
{
    if (auto * ns = default_namespace(); ns && tag == 0)
    {
        return send_frame(dest_rank, frame_bytes, *ns);
    }
    Endpoint ep(_comm, dest_rank);
    auto res = _framed.send_frame(ep, frame_bytes, tag);
    if (res.is_error())
        return res;
    _stats.messages_sent++;
    _stats.bytes_sent += frame_bytes.size();
    return Result<bool>::success(true);
}

// Framing：接收统一帧字节流（显式 tag，阻塞探测获取大小）
Result<bool>
Channel::recv_frame(int src_rank, std::vector<std::byte> & out_frame_bytes, int tag) noexcept
{
    if (auto * ns = default_namespace(); ns && tag == 0)
    {
        return recv_frame(src_rank, out_frame_bytes, *ns);
    }
    Endpoint ep(_comm, src_rank);
    auto res = _framed.recv_frame(ep, out_frame_bytes, tag);
    if (res.is_error())
        return res;
    _stats.messages_received++;
    _stats.bytes_received += out_frame_bytes.size();
    return Result<bool>::success(true);
}

// Framing（命名空间）：发送统一帧（申请/释放标签）
Result<bool>
Channel::send_frame(int dest_rank,
                    const std::vector<std::byte> & frame_bytes,
                    TagNamespace ns) noexcept
{
    Endpoint ep(_comm, dest_rank);
    auto res = _framed.send_frame(ep, frame_bytes, ns);
    if (res.is_error())
        return res;
    _stats.messages_sent++;
    _stats.bytes_sent += frame_bytes.size();
    return Result<bool>::success(true);
}

// Framing（命名空间 + ANY_TAG）：接收统一帧（命名空间过滤）
Result<bool>
Channel::recv_frame(int src_rank,
                    std::vector<std::byte> & out_frame_bytes,
                    TagNamespace ns) noexcept
{
    // 通过 FramedChannel 执行 ANY_TAG 过滤与接收
    Endpoint ep(_comm, src_rank);
    _framed.set_allow_any_tag(_options.allow_any_tag);
    auto res = _framed.recv_frame(ep, out_frame_bytes, ns);
    if (res.is_error())
        return res;
    _stats.messages_received++;
    _stats.bytes_received += out_frame_bytes.size();
    return Result<bool>::success(true);
}

TransportRequest
Channel::isend_frame_async(int dest_rank,
                           const std::vector<std::byte> & frame_bytes,
                           TagNamespace ns,
                           SendMode mode) noexcept
{
    Endpoint ep(_comm, dest_rank);
    auto req = _framed.isend_frame(ep, frame_bytes, ns, mode);
    _stats.messages_sent++;
    _stats.bytes_sent += frame_bytes.size();
    return track_request(std::move(req));
}

TransportRequest
Channel::isend_frame_async(int dest_rank,
                           const std::vector<std::byte> & frame_bytes,
                           const TagLease & lease,
                           SendMode mode) noexcept
{
    Endpoint ep(_comm, dest_rank);
    auto req = _framed.isend_frame(ep, frame_bytes, lease, mode);
    _stats.messages_sent++;
    _stats.bytes_sent += frame_bytes.size();
    return track_request(std::move(req));
}

TransportRequest
Channel::irecv_frame_async(int src_rank,
                           std::vector<std::byte> & out_frame_bytes,
                           TagNamespace ns) noexcept
{
    Endpoint ep(_comm, src_rank);
    auto req = _framed.irecv_frame(ep, out_frame_bytes, ns);
    return track_request(std::move(req));
}

// ChannelManager实现

// 全局管理器注册表（统一使用 shared_ptr 持有）
static std::mutex g_manager_mutex;
static std::unordered_map<MPI_Comm, std::shared_ptr<ChannelManager>> g_managers;

ChannelManager &
ChannelManager::for_comm(MPI_Comm comm)
{
    std::lock_guard<std::mutex> lock(g_manager_mutex);

    auto it = g_managers.find(comm);
    if (it != g_managers.end())
    {
        return *it->second;
    }

    // 创建新的管理器实例（shared_ptr，使用 new 以允许私有构造）
    auto manager = std::shared_ptr<ChannelManager>(new ChannelManager(comm));
    auto * ptr = manager.get();
    g_managers[comm] = std::move(manager);

    return *ptr;
}

void
ChannelManager::destroy_for_comm(MPI_Comm comm)
{
    std::lock_guard<std::mutex> lock(g_manager_mutex);
    g_managers.erase(comm);
}

ChannelManager::ChannelManager(MPI_Comm comm) : _comm(comm) {}

ChannelManager::~ChannelManager() { cleanup_all(); }

Result<std::shared_ptr<Channel>>
ChannelManager::create_channel(const ChannelId & id, bool use_dedicated_comm)
{
    std::lock_guard<std::mutex> lock(_mutex);

    // 检查是否已存在
    auto it = _channels.find(id);
    if (it != _channels.end())
    {
        return Result<std::shared_ptr<Channel>>::error(
            ResultCode::InvalidRequest, "Channel with id '" + id.name + "' already exists");
    }

    // 创建新通道
    auto channel_result = Channel::create(id, _comm, use_dedicated_comm);
    if (channel_result.is_error())
    {
        return Result<std::shared_ptr<Channel>>::error(channel_result.error_code(),
                                                       "Failed to create channel: " +
                                                           channel_result.error_message());
    }
    auto channel = channel_result.value();
    _channels[id] = channel;

    return Result<std::shared_ptr<Channel>>::success(channel);
}

Result<std::shared_ptr<Channel>>
ChannelManager::create_channel(const ChannelId & id, const ChannelOptions & opts)
{
    std::lock_guard<std::mutex> lock(_mutex);

    // 检查是否已存在
    auto it = _channels.find(id);
    if (it != _channels.end())
    {
        return Result<std::shared_ptr<Channel>>::error(
            ResultCode::InvalidRequest, "Channel with id '" + id.name + "' already exists");
    }

    // 创建新通道（按选项确定是否复制通信器）
    auto channel_result = Channel::create(id, _comm, opts.use_dedicated_comm);
    if (channel_result.is_error())
    {
        return Result<std::shared_ptr<Channel>>::error(channel_result.error_code(),
                                                       "Failed to create channel: " +
                                                           channel_result.error_message());
    }
    auto channel = channel_result.value();
    // 写入集中选项
    channel->set_options(opts);
    _channels[id] = channel;

    return Result<std::shared_ptr<Channel>>::success(channel);
}

// ChannelGroup实现

ChannelGroup::ChannelGroup(const std::string & group_name) : _name(group_name) {}

ChannelGroup::~ChannelGroup() { (void)close_all(); }

bool
ChannelGroup::add_channel(std::shared_ptr<Channel> channel) noexcept
{
    if (!channel)
        return false;
    std::lock_guard<std::mutex> lock(_mutex);
    _channels[channel->id()] = std::move(channel);
    return true;
}

bool
ChannelGroup::remove_channel(const ChannelId & id) noexcept
{
    std::lock_guard<std::mutex> lock(_mutex);
    return _channels.erase(id) > 0;
}

std::shared_ptr<Channel>
ChannelGroup::get_channel(const ChannelId & id) const noexcept
{
    std::lock_guard<std::mutex> lock(_mutex);
    auto it = _channels.find(id);
    if (it != _channels.end())
        return it->second;
    return nullptr;
}

std::size_t
ChannelGroup::activate_all() noexcept
{
    std::lock_guard<std::mutex> lock(_mutex);
    std::size_t ok = 0;
    for (auto & kv : _channels)
    {
        auto res = kv.second->activate();
        if (!res.is_error())
            ++ok;
    }
    return ok;
}

std::size_t
ChannelGroup::close_all() noexcept
{
    std::lock_guard<std::mutex> lock(_mutex);
    std::size_t ok = 0;
    for (auto & kv : _channels)
    {
        auto res = kv.second->close();
        if (!res.is_error())
            ++ok;
    }
    return ok;
}

std::size_t
ChannelGroup::size() const noexcept
{
    std::lock_guard<std::mutex> lock(_mutex);
    return _channels.size();
}
std::shared_ptr<Channel>
ChannelManager::get_channel(const ChannelId & id) const
{
    std::lock_guard<std::mutex> lock(_mutex);

    auto it = _channels.find(id);
    if (it != _channels.end())
    {
        return it->second;
    }

    return nullptr;
}

Result<void>
ChannelManager::destroy_channel(const ChannelId & id)
{
    std::lock_guard<std::mutex> lock(_mutex);

    auto it = _channels.find(id);
    if (it == _channels.end())
    {
        return Result<void>::error(ResultCode::InvalidRequest,
                                   "Channel with id '" + id.name + "' not found");
    }

    // 关闭通道
    auto close_result = it->second->close();
    if (close_result.is_error())
    {
        return close_result;
    }

    _channels.erase(it);
    return COMPI_SUCCESS_VOID();
}

std::size_t
ChannelManager::active_channel_count() const
{
    std::lock_guard<std::mutex> lock(_mutex);

    std::size_t count = 0;
    for (const auto & pair : _channels)
    {
        if (pair.second->is_active())
        {
            count++;
        }
    }

    return count;
}

void
ChannelManager::cleanup_all()
{
    std::lock_guard<std::mutex> lock(_mutex);

    // 关闭所有通道
    for (auto & pair : _channels)
    {
        pair.second->close();
    }

    _channels.clear();
}

} // namespace compi
