// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <vector>

#include <turbo/log/logging.h>
#include <nebula/future/memory.h>
#include <nebula/future/thread_pool.h>

namespace nebula::internal {

    inline uint8_t *pointer_logical_and(const uint8_t *address, uintptr_t bits) {
        auto value = reinterpret_cast<uintptr_t>(address);
        return reinterpret_cast<uint8_t *>(value & bits);
    }

    // This function is just for avoiding MinGW-w64 32bit crash.
    // See also: https://sourceforge.net/p/mingw-w64/bugs/767/
    void *wrap_memcpy(void *dst, const void *src, size_t n) { return memcpy(dst, src, n); }

    void parallel_memcopy(uint8_t *dst, const uint8_t *src, int64_t nbytes,
                          uintptr_t block_size, int num_threads) {
        // XXX This function is really using `num_threads + 1` threads.
        auto pool = get_cpu_thread_pool();

        uint8_t *left = pointer_logical_and(src + block_size - 1, ~(block_size - 1));
        uint8_t *right = pointer_logical_and(src + nbytes, ~(block_size - 1));
        int64_t num_blocks = (right - left) / block_size;

        // Update right address
        right = right - (num_blocks % num_threads) * block_size;

        // Now we divide these blocks between available threads. The remainder is
        // handled separately.
        size_t chunk_size = (right - left) / num_threads;
        int64_t prefix = left - src;
        int64_t suffix = src + nbytes - right;
        // Now the data layout is | prefix | k * num_threads * block_size | suffix |.
        // We have chunk_size = k * block_size, therefore the data layout is
        // | prefix | num_threads * chunk_size | suffix |.
        // Each thread gets a "chunk" of k blocks.

        // Start all parallel memcpy tasks and handle leftovers while threads run.
        std::vector<Future<void *>> futures;

        for (int i = 0; i < num_threads; i++) {
            futures.push_back(*pool->submit(wrap_memcpy, dst + prefix + i * chunk_size,
                                            left + i * chunk_size, chunk_size));
        }
        memcpy(dst, src, prefix);
        memcpy(dst + prefix + num_threads * chunk_size, right, suffix);

        for (auto &fut: futures) {
            KCHECK_OK(fut.status());
        }
    }

}  // namespace nebula::internal
