#include "compi/compi.h"

#include <gtest/gtest.h>

#include <chrono>
#include <string>
#include <thread>

using namespace compi;

// 轻量封装：在测试内实现基于命名空间的 ANY_TAG 非阻塞接收与发送
struct RecvPlan
{
    Context * ctx{};
    int src{-1};
    const TagNamespace * ns{nullptr};
    bool is_string{false};
    int * out_int{nullptr};
    std::string * out_str{nullptr};
    MPI_Request req{MPI_REQUEST_NULL};
    std::vector<std::byte> buf; // 用于非固定类型
    bool posted{false};

    static RecvPlan for_int(Context & ctx, int src, int & out, const TagNamespace & ns)
    {
        RecvPlan p;
        p.ctx = &ctx;
        p.src = src;
        p.ns = &ns;
        p.is_string = false;
        p.out_int = &out;
        return p;
    }
    static RecvPlan for_string(Context & ctx, int src, std::string & out, const TagNamespace & ns)
    {
        RecvPlan p;
        p.ctx = &ctx;
        p.src = src;
        p.ns = &ns;
        p.is_string = true;
        p.out_str = &out;
        return p;
    }
};

struct SendPlan
{
    Context * ctx{};
    int dest{-1};
    const TagNamespace * ns{nullptr};
    MessageTag tag{0, 0};
    MPI_Request req{MPI_REQUEST_NULL};
    bool is_bytes{false};
    std::vector<std::byte> bytes;

    static SendPlan send_int(Context & ctx, int dest, int value, const TagNamespace & ns)
    {
        SendPlan p;
        p.ctx = &ctx;
        p.dest = dest;
        p.ns = &ns;
        p.is_bytes = false;
        p.tag = ns.request();
        p.ns->retain(p.tag);
        const int rc = MPI_Isend(&value, 1, MPI_INT, dest, p.tag.value, ctx.comm(), &p.req);
        (void)call_mpi(rc, "MPI_Isend failed (int)", ctx.comm());
        return p;
    }
    static SendPlan
    send_string(Context & ctx, int dest, const std::string & s, const TagNamespace & ns)
    {
        SendPlan p;
        p.ctx = &ctx;
        p.dest = dest;
        p.ns = &ns;
        p.is_bytes = true;
        p.bytes = serialize_to_byte_vector(s);
        p.tag = ns.request();
        p.ns->retain(p.tag);
        const int rc = MPI_Isend(p.bytes.data(),
                                 static_cast<int>(p.bytes.size()),
                                 MPI_BYTE,
                                 dest,
                                 p.tag.value,
                                 ctx.comm(),
                                 &p.req);
        (void)call_mpi(rc, "MPI_Isend failed (string bytes)", ctx.comm());
        return p;
    }
    bool wait()
    {
        MPI_Status st{};
        const int rc = MPI_Wait(&req, &st);
        bool ok = call_mpi(rc, "MPI_Wait failed (send)", ctx->comm());
        ns->release(tag);
        return ok;
    }
};

// 前置声明：辅助函数在文件底部定义，但需先声明以便在测试体内使用
static bool ns_consistent(MPI_Comm comm, TagNamespace ns);
static bool wait_with_timeout(RecvPlan & plan, int timeout_ms);

// 全局测试环境，由自定义 main 显式管理 MPI 生命周期
static Environment * g_test_env = nullptr;

class AnyTagConcurrencyTest : public ::testing::Test
{
protected:
    void SetUp() override
    {
        env_ = g_test_env;
        // 使用 Context::for_comm 获取非拥有的上下文实例
        comm_ = &Context::for_comm(MPI_COMM_WORLD, *env_);
    }
    void TearDown() override
    {
        // 通过 Context 释放与 communicator 关联的资源
        Context::destroy_for_comm(MPI_COMM_WORLD);
        // 非拥有指针清空，保留全局 Environment 直至进程退出
        comm_ = nullptr;
        env_ = nullptr;
    }
    Environment * env_{};
    Context * comm_{};
};

// 自定义 main：统一初始化与析构，避免 mpiexec 对 Finalize 的误判
int
main(int argc, char ** argv)
{
    ::testing::InitGoogleTest(&argc, argv);
    g_test_env = new Environment();
    const int code = RUN_ALL_TESTS();
    delete g_test_env;
    g_test_env = nullptr;
    return code;
}

// 并发竞争：交错命名空间（int）
TEST_F(AnyTagConcurrencyTest, MixedNamespacesRaceInt)
{
    auto & alloc = comm_->tag_allocator();
    auto nsA = alloc.create_namespace("any_tag_ns_A_int");
    auto nsB = alloc.create_namespace("any_tag_ns_B_int");

    // 命名空间 ID 必须跨进程一致，否则 ANY_TAG 过滤将无法匹配正确标签
    if (!ns_consistent(comm_->comm(), nsA) || !ns_consistent(comm_->comm(), nsB))
        GTEST_SKIP() << "TagNamespace IDs are inconsistent across ranks; skipping to avoid hang.";

    const int rank = comm_->rank();
    const int size = comm_->size();
    const int dest = (rank + 1) % size;
    const int src = (rank - 1 + size) % size;

    const int total_msgs = 8;
    const int half = total_msgs / 2;

    std::vector<int> recvA(half, -1), recvB(half, -1);
    std::vector<RecvPlan> rreqA, rreqB;
    rreqA.reserve(half);
    rreqB.reserve(half);
    // 顺序创建接收计划，避免在未提供 MPI_THREAD_MULTIPLE 时并发调用 MPI
    for (int i = 0; i < half; ++i)
        rreqA.emplace_back(RecvPlan::for_int(*comm_, src, recvA[i], nsA));
    for (int i = 0; i < half; ++i)
        rreqB.emplace_back(RecvPlan::for_int(*comm_, src, recvB[i], nsB));

    // 所有进程在发送前先同步，确保接收已准备
    MPI_Barrier(comm_->comm());

    // 交错命名空间发送，制造标签竞争与队列乱序
    std::vector<SendPlan> sreqs;
    sreqs.reserve(total_msgs);
    for (int k = 0; k < total_msgs; ++k)
    {
        int payload = rank * 1000 + k;
        if (k % 2 == 0)
            sreqs.emplace_back(SendPlan::send_int(*comm_, dest, payload, nsA));
        else
            sreqs.emplace_back(SendPlan::send_int(*comm_, dest, payload, nsB));
    }

    // 先等待所有接收完成（ANY_TAG 在等待中进行匹配与接收）
    for (int i = 0; i < half; ++i)
        ASSERT_TRUE(wait_with_timeout(rreqA[i], 10000));
    for (int i = 0; i < half; ++i)
        ASSERT_TRUE(wait_with_timeout(rreqB[i], 10000));
    // 再等待所有发送完成
    for (auto & p : sreqs)
        ASSERT_TRUE(p.wait());

    // 仅验证无死锁且请求在合理时间内完成
    SUCCEED();
}

// 并发竞争：交错命名空间（string）
TEST_F(AnyTagConcurrencyTest, MixedNamespacesRaceString)
{
    auto & alloc = comm_->tag_allocator();
    auto nsA = alloc.create_namespace("any_tag_ns_A_str");
    auto nsB = alloc.create_namespace("any_tag_ns_B_str");

    if (!ns_consistent(comm_->comm(), nsA) || !ns_consistent(comm_->comm(), nsB))
        GTEST_SKIP() << "TagNamespace IDs are inconsistent across ranks; skipping to avoid hang.";

    const int rank = comm_->rank();
    const int size = comm_->size();
    const int dest = (rank + 1) % size;
    const int src = (rank - 1 + size) % size;

    const int total_msgs = 6;
    const int half = total_msgs / 2;

    std::vector<std::string> recvA(half), recvB(half);
    std::vector<RecvPlan> rreqA, rreqB;
    rreqA.reserve(half);
    rreqB.reserve(half);
    for (int i = 0; i < half; ++i)
        rreqA.emplace_back(RecvPlan::for_string(*comm_, src, recvA[i], nsA));
    for (int i = 0; i < half; ++i)
        rreqB.emplace_back(RecvPlan::for_string(*comm_, src, recvB[i], nsB));

    // 发送前同步，确保所有进程的接收都已准备
    MPI_Barrier(comm_->comm());

    std::vector<SendPlan> sreqs;
    sreqs.reserve(total_msgs);
    for (int k = 0; k < total_msgs; ++k)
    {
        std::string payload =
            std::string("rank_") + std::to_string(rank) + "_k_" + std::to_string(k);
        if (k % 2 == 0)
            sreqs.emplace_back(SendPlan::send_string(*comm_, dest, payload, nsA));
        else
            sreqs.emplace_back(SendPlan::send_string(*comm_, dest, payload, nsB));
    }

    // 先等待所有接收完成
    for (int i = 0; i < half; ++i)
        ASSERT_TRUE(wait_with_timeout(rreqA[i], 10000));
    for (int i = 0; i < half; ++i)
        ASSERT_TRUE(wait_with_timeout(rreqB[i], 10000));
    // 再等待所有发送完成
    for (auto & p : sreqs)
        ASSERT_TRUE(p.wait());

    // 仅验证无死锁且请求在合理时间内完成
    SUCCEED();
}
// 工具函数：检查 TagNamespace 的 ID 在所有 rank 间是否一致
static bool
ns_consistent(MPI_Comm comm, TagNamespace ns)
{
    int world_size = 0;
    MPI_Comm_size(comm, &world_size);
    int my_id = static_cast<int>(ns.id());
    std::vector<int> all_ids(world_size, 0);
    MPI_Allgather(&my_id, 1, MPI_INT, all_ids.data(), 1, MPI_INT, comm);
    for (int i = 1; i < world_size; ++i)
        if (all_ids[i] != all_ids[0])
            return false;
    return true;
}
// 等待工具：为接收计划添加超时，避免测试在异常情况下卡住
static bool
wait_with_timeout(RecvPlan & plan, int timeout_ms)
{
    using namespace std::chrono;
    const auto deadline = steady_clock::now() + milliseconds(timeout_ms);
    MPI_Status status{};
    while (true)
    {
        if (plan.posted)
        {
            int flag = 0;
            const int trc = MPI_Test(&plan.req, &flag, &status);
            if (!call_mpi(trc, "MPI_Test failed (recv)", plan.ctx->comm()))
                return false;
            if (flag)
            {
                // 完成后，对非固定类型进行解包
                if (plan.is_string)
                {
                    *(plan.out_str) = deserialize<std::string>(plan.buf);
                }
                return true;
            }
        }
        else
        {
            // ANY_TAG 非阻塞探测：优先使用命名空间的 iprobe 辅助，若就绪则发布接收
            MessageTag tag(0, plan.ns->id());
            if (plan.ns->iprobe(plan.ctx->comm(), plan.src, tag))
            {
                if (plan.is_string)
                {
                    // 以具体标签进行 Improbe/Imrecv 获取字节负载
                    MPI_Message msg = MPI_MESSAGE_NULL;
                    int ready = 0;
                    const int irc =
                        MPI_Improbe(plan.src, tag.value, plan.ctx->comm(), &ready, &msg, &status);
                    if (!call_mpi(irc, "MPI_Improbe failed (string)", plan.ctx->comm()) || !ready)
                        return false;
                    int count_bytes = 0;
                    const int crc = MPI_Get_count(&status, MPI_BYTE, &count_bytes);
                    if (!call_mpi(crc, "MPI_Get_count failed (string)", plan.ctx->comm()))
                        return false;
                    plan.buf.resize(static_cast<std::size_t>(count_bytes));
                    const int rrc =
                        MPI_Imrecv(plan.buf.data(), count_bytes, MPI_BYTE, &msg, &plan.req);
                    if (!call_mpi(rrc, "MPI_Imrecv failed (string)", plan.ctx->comm()))
                        return false;
                    plan.posted = true;
                }
                else
                {
                    const int rrc = MPI_Irecv(
                        plan.out_int, 1, MPI_INT, plan.src, tag.value, plan.ctx->comm(), &plan.req);
                    if (!call_mpi(rrc, "MPI_Irecv failed (int)", plan.ctx->comm()))
                        return false;
                    plan.posted = true;
                }
            }
        }
        if (steady_clock::now() > deadline)
            return false;
        std::this_thread::sleep_for(milliseconds(2));
    }
}