#include <cuda_runtime.h>
#include "PCFG.h"
using namespace std;

// 优化后的核函数：减少循环次数，合并内存操作
__global__ void generate_guesses_kernel_optimized(
    char* output,                     // [total_count * max_guess_len]
    const char* values_pool,          // concatenated all segment values
    const int* offsets,               // [total_count]
    const int* lengths,               // [total_count]
    const char* prefix_pool,          // concatenated prefixes, 固定长度 max_prefix
    const int* prefix_lengths,        // [total_count]
    int max_prefix,                   // 每条前缀占用的字符数
    int total_count,
    int max_guess_len
) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    if (idx >= total_count) return;

    char* out = output + idx * max_guess_len;
    const char* pre = prefix_pool + idx * max_prefix;
    int plen = prefix_lengths[idx];
    int val_start = offsets[idx];
    int vlen = lengths[idx];

    // 使用memcpy替代循环拷贝（需要保证内存对齐）
    if (plen > 0) {
        for (int i = 0; i < plen; i++) {
            out[i] = pre[i];
        }
    }
    if (vlen > 0) {
        for (int j = 0; j < vlen; j++) {
            out[plen + j] = values_pool[val_start + j];
        }
    }
    out[plen + vlen] = '\0';
}

void PriorityQueue::Generate(PT pt) {
    CalProb(pt);

    if (pt.content.size() == 1) {
        // 单段优化：减少临时变量，合并内存分配
        segment* a = nullptr;
        auto& seg = pt.content[0];
        if (seg.type == 1) a = &m.letters[m.FindLetter(seg)];
        if (seg.type == 2) a = &m.digits[m.FindDigit(seg)];
        if (seg.type == 3) a = &m.symbols[m.FindSymbol(seg)];

        int count = pt.max_indices[0];
        int max_val_len = 0;
        // 修复：迭代字符串而不是整数
        for (const string& s : a->ordered_values) {
            if (static_cast<int>(s.size()) > max_val_len)
                max_val_len = s.size();
        }
        int max_guess_len = max_val_len + 1;

        // 合并内存分配
        size_t total_vals_size = 0;
        for (const string& s : a->ordered_values) {
            total_vals_size += s.size();
        }

        char* d_vals;
        cudaMalloc(&d_vals, total_vals_size);

        // 拷贝所有字符串值
        size_t offset = 0;
        for (const string& s : a->ordered_values) {
            cudaMemcpy(d_vals + offset, s.data(), s.size(), cudaMemcpyHostToDevice);
            offset += s.size();
        }

        // 生成输出buffer
        char* d_output;
        cudaMalloc(&d_output, count * max_guess_len);

        // 准备偏移和长度数组
        vector<int> offsets_host(count), lengths_host(count);
        int pos = 0;
        for (int i = 0; i < count; i++) {
            offsets_host[i] = pos;
            lengths_host[i] = a->ordered_values[i].size();
            pos += a->ordered_values[i].size();
        }

        // 设备端偏移和长度
        int* d_offsets, * d_lengths;
        cudaMalloc(&d_offsets, count * sizeof(int));
        cudaMalloc(&d_lengths, count * sizeof(int));
        cudaMemcpy(d_offsets, offsets_host.data(), count * sizeof(int), cudaMemcpyHostToDevice);
        cudaMemcpy(d_lengths, lengths_host.data(), count * sizeof(int), cudaMemcpyHostToDevice);

        // 启动内核
        dim3 block(256);
        dim3 grid((count + block.x - 1) / block.x);
        generate_guesses_kernel_optimized << <grid, block >> > (
            d_output,
            d_vals,
            d_offsets,
            d_lengths,
            nullptr,
            nullptr,
            0,
            count,
            max_guess_len
            );

        // 提取结果
        vector<char> host_out(count * max_guess_len);
        cudaMemcpy(host_out.data(), d_output, host_out.size(), cudaMemcpyDeviceToHost);

        for (int i = 0; i < count; ++i) {
            guesses.emplace_back(host_out.data() + i * max_guess_len);
            ++total_guesses;
        }

        // 释放资源
        cudaFree(d_vals);
        cudaFree(d_output);
        cudaFree(d_offsets);
        cudaFree(d_lengths);
    }
    else {
        // 多段优化：预计算prefix_pool，减少拷贝次数
        string prefix;
        int seg_idx = 0;
        for (int idx : pt.curr_indices) {
            const auto& segc = pt.content[seg_idx];
            if (segc.type == 1) prefix += m.letters[m.FindLetter(segc)].ordered_values[idx];
            if (segc.type == 2) prefix += m.digits[m.FindDigit(segc)].ordered_values[idx];
            if (segc.type == 3) prefix += m.symbols[m.FindSymbol(segc)].ordered_values[idx];
            if (++seg_idx == pt.content.size() - 1) break;
        }
        segment* a = nullptr;
        auto& last = pt.content.back();
        if (last.type == 1) a = &m.letters[m.FindLetter(last)];
        if (last.type == 2) a = &m.digits[m.FindDigit(last)];
        if (last.type == 3) a = &m.symbols[m.FindSymbol(last)];

        int count = pt.max_indices.back();
        int max_val_len = 0;
        // 修复：迭代字符串而不是整数
        for (const string& s : a->ordered_values) {
            if (static_cast<int>(s.size()) > max_val_len)
                max_val_len = s.size();
        }
        int max_prefix = prefix.size(); // 初始化max_prefix
        int max_guess_len = max_prefix + max_val_len + 1;

        // 准备所有值的连接字符串
        string all_vals;
        vector<int> offsets_host(count), lengths_host(count);
        int pos = 0;
        for (int i = 0; i < count; i++) {
            const string& s = a->ordered_values[i];
            offsets_host[i] = pos;
            lengths_host[i] = s.size();
            all_vals += s;
            pos += s.size();
        }

        // 准备前缀长度数组
        vector<int> prefix_lengths_host(count, max_prefix);

        // 准备前缀池
        string prefix_pool;
        for (int i = 0; i < count; i++) {
            prefix_pool += prefix;
            // 填充到统一长度
            if (static_cast<int>(prefix.size()) < max_prefix) {
                prefix_pool.append(max_prefix - prefix.size(), '\0');
            }
        }

        // 分配设备内存
        char* d_vals, * d_output, * d_prefix_pool;
        int* d_offsets, * d_lengths, * d_prefix_lengths;

        cudaMalloc(&d_vals, all_vals.size());
        cudaMalloc(&d_offsets, count * sizeof(int));
        cudaMalloc(&d_lengths, count * sizeof(int));
        cudaMalloc(&d_prefix_pool, prefix_pool.size());
        cudaMalloc(&d_prefix_lengths, count * sizeof(int));
        cudaMalloc(&d_output, count * max_guess_len);

        // 拷贝数据
        cudaMemcpy(d_vals, all_vals.data(), all_vals.size(), cudaMemcpyHostToDevice);
        cudaMemcpy(d_offsets, offsets_host.data(), count * sizeof(int), cudaMemcpyHostToDevice);
        cudaMemcpy(d_lengths, lengths_host.data(), count * sizeof(int), cudaMemcpyHostToDevice);
        cudaMemcpy(d_prefix_pool, prefix_pool.data(), prefix_pool.size(), cudaMemcpyHostToDevice);
        cudaMemcpy(d_prefix_lengths, prefix_lengths_host.data(), count * sizeof(int), cudaMemcpyHostToDevice);

        // 启动内核
        dim3 block(256);
        dim3 grid((count + block.x - 1) / block.x);
        generate_guesses_kernel_optimized << <grid, block >> > (
            d_output,
            d_vals,
            d_offsets,
            d_lengths,
            d_prefix_pool,
            d_prefix_lengths,
            max_prefix,
            count,
            max_guess_len
            );

        // 提取结果
        vector<char> host_out(count * max_guess_len);
        cudaMemcpy(host_out.data(), d_output, host_out.size(), cudaMemcpyDeviceToHost);

        for (int i = 0; i < count; ++i) {
            guesses.emplace_back(host_out.data() + i * max_guess_len);
            ++total_guesses;
        }

        // 释放资源
        cudaFree(d_vals);
        cudaFree(d_offsets);
        cudaFree(d_lengths);
        cudaFree(d_prefix_pool);
        cudaFree(d_prefix_lengths);
        cudaFree(d_output);
    }
}

void PriorityQueue::PopNextBatch(size_t batch_size) {
    // 1. 确定实际出队数量（保持原有逻辑）
    size_t real_batch = min(batch_size, priority.size());
    if (real_batch == 0) return;

    // 2. 收集 batch_pts（保持原有逻辑）
    vector<PT> batch_pts(priority.begin(), priority.begin() + real_batch);

    // 3. 构造任务列表（保持原有逻辑）
    struct GenTask {
        string prefix;
        segment* a;
        int count;
        vector<int> prefix_lens;  // 存储每个前缀的实际长度
    };
    vector<GenTask> tasks;
    tasks.reserve(real_batch);

    for (auto& pt : batch_pts) {
        // 构造 prefix（保持原有逻辑）
        string prefix;
        for (int i = 0; i + 1 < (int)pt.content.size(); ++i) {
            auto& c = pt.content[i];
            int idx = pt.curr_indices[i];
            if (c.type == 1) prefix += m.letters[m.FindLetter(c)].ordered_values[idx];
            else if (c.type == 2) prefix += m.digits[m.FindDigit(c)].ordered_values[idx];
            else             prefix += m.symbols[m.FindSymbol(c)].ordered_values[idx];
        }
        // 最后一个 segment（保持原有逻辑）
        auto& last = pt.content.back();
        segment* a = (last.type == 1 ? &m.letters[m.FindLetter(last)]
            : last.type == 2 ? &m.digits[m.FindDigit(last)]
            : &m.symbols[m.FindSymbol(last)]);

        // 预计算当前task的prefix长度（新增）
        vector<int> prefix_lens(pt.max_indices.back(), prefix.size());

        tasks.push_back({ move(prefix), a, pt.max_indices.back(), move(prefix_lens) });
    }

    // 4. 合并数据准备（关键优化：类型匹配与内存连续）
    vector<int> offsets, lengths;
    string all_vals;
    size_t total_count = 0;
    int max_val_len = 0;
    int max_prefix = 0;

    // 预计算最大前缀长度
    for (auto& t : tasks) {
        max_prefix = max(max_prefix, static_cast<int>(t.prefix.size()));
    }

    // 预计算所有segment值的总长度和最大值长度
    for (auto& t : tasks) {
        for (int i = 0; i < t.count; ++i) {
            const string& s = t.a->ordered_values[i];
            lengths.push_back(s.size());
            all_vals += s;
            max_val_len = max(max_val_len, static_cast<int>(s.size()));
        }
        total_count += t.count;
    }

    // 预计算prefix_lengths数组
    vector<int> prefix_lengths;
    for (auto& t : tasks) {
        prefix_lengths.insert(prefix_lengths.end(), t.prefix_lens.begin(), t.prefix_lens.end());
    }

    // 计算最大猜测长度
    int max_guess_len = max_prefix + max_val_len + 1;

    // 5. 分配GPU内存
    size_t vals_size = all_vals.size();
    size_t offsets_size = total_count * sizeof(int);
    size_t lengths_size = total_count * sizeof(int);
    size_t prefix_pool_size = total_count * max_prefix;
    size_t prefix_lengths_size = total_count * sizeof(int);
    size_t output_size = total_count * max_guess_len;

    // 分配连续GPU内存块（总大小 = 所有部分之和）
    char* d_gpu_mem;
    size_t total_mem_size = vals_size + offsets_size + lengths_size +
        prefix_pool_size + prefix_lengths_size + output_size;

    cudaMalloc(&d_gpu_mem, total_mem_size);

    // 定位各部分内存地址（通过偏移量）
    char* d_vals = d_gpu_mem;
    int* d_offsets = reinterpret_cast<int*>(d_vals + vals_size);
    int* d_lengths = d_offsets + total_count;
    char* d_prefix_pool = reinterpret_cast<char*>(d_lengths + total_count);
    int* d_prefix_lengths = reinterpret_cast<int*>(d_prefix_pool + prefix_pool_size);
    char* d_output = reinterpret_cast<char*>(d_prefix_lengths + total_count);

    // 6. 异步拷贝数据到GPU
    cudaStream_t stream;
    cudaStreamCreate(&stream);

    // 准备主机数据
    // 计算offsets
    vector<int> offsets_host;
    int pos = 0;
    for (auto& t : tasks) {
        for (int i = 0; i < t.count; i++) {
            offsets_host.push_back(pos);
            pos += t.a->ordered_values[i].size();
        }
    }

    // 准备prefix_pool
    string prefix_pool;
    prefix_pool.reserve(total_count * max_prefix);
    for (auto& t : tasks) {
        for (int i = 0; i < t.count; i++) {
            prefix_pool += t.prefix;
            // 填充到统一长度
            if (static_cast<int>(t.prefix.size()) < max_prefix) {
                prefix_pool.append(max_prefix - t.prefix.size(), '\0');
            }
        }
    }

    // 异步拷贝
    cudaMemcpyAsync(d_vals, all_vals.data(), vals_size, cudaMemcpyHostToDevice, stream);
    cudaMemcpyAsync(d_offsets, offsets_host.data(), offsets_size, cudaMemcpyHostToDevice, stream);
    cudaMemcpyAsync(d_lengths, lengths.data(), lengths_size, cudaMemcpyHostToDevice, stream);
    cudaMemcpyAsync(d_prefix_pool, prefix_pool.data(), prefix_pool_size, cudaMemcpyHostToDevice, stream);
    cudaMemcpyAsync(d_prefix_lengths, prefix_lengths.data(), prefix_lengths_size, cudaMemcpyHostToDevice, stream);

    // 7. 启动核函数
    dim3 threads(256);
    dim3 blocks((total_count + threads.x - 1) / threads.x);
    generate_guesses_kernel_optimized << <blocks, threads, 0, stream >> > (
        d_output,
        d_vals,
        d_offsets,
        d_lengths,
        d_prefix_pool,
        d_prefix_lengths,
        max_prefix,
        total_count,
        max_guess_len
        );

    // 8. 同步流并拷贝结果
    cudaStreamSynchronize(stream);

    // 拷贝输出结果回主机
    vector<char> host_out(output_size);
    cudaMemcpyAsync(host_out.data(), d_output, output_size, cudaMemcpyDeviceToHost, stream);
    cudaStreamSynchronize(stream);

    // 9. 填充猜测结果
    for (size_t i = 0; i < total_count; ++i) {
        guesses.emplace_back(host_out.data() + i * max_guess_len);
        ++total_guesses;
    }

    // 10. 释放GPU资源
    cudaFree(d_gpu_mem);
    cudaStreamDestroy(stream);

    // 11. 移除已处理PT
    priority.erase(priority.begin(), priority.begin() + real_batch);

    // 12. 生成并入队新PT
    for (auto& pt : batch_pts) {
        auto new_pts = pt.NewPTs();
        for (auto& npt : new_pts) {
            CalProb(npt);
            auto it = upper_bound(
                priority.begin(), priority.end(), npt,
                [](auto const& a, auto const& b) { return a.prob > b.prob; }
            );
            priority.insert(it, npt);
        }
    }
}