#include "doca_dma_common.h"
#include "doca_utils.h"
#include "timer.h"
#include "common_unit.h"

#define SLEEP_IN_NANOS (10 * 1000)

#include <future>

#if defined(__aarch64__)
DEFINE_string(doca_nic_pcei_addr, "03:00.0", "doca nic PCIe address");
#else
DEFINE_string(doca_nic_pcei_addr, "c3:00.0", "doca nic PCIe address");
#endif

template<typename T, typename... Args>
std::unique_ptr<T> make_unique(Args&&... args) {
    return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}

class DmaUnit{
public:
    class PromiseWrapper {
    public:
        PromiseWrapper() : promise_(make_unique<std::promise<int>>()) {}
            std::future<int> GetFuture() {
            return promise_->get_future();
        }
        void SetValue(int value) {
            promise_->set_value(value);
        }
        void Reset() {
            promise_ = make_unique<std::promise<int>>();
        }

    private:
        std::unique_ptr<std::promise<int>> promise_;
    };

    enum TaskStatus {
        SUCCESS=11,
        FAIL=-11
    };

    void * get_ptr() {
        return DmaMemory.ptr;
    }

    DmaUnit(int device_type, int device_id, int mmap_type, int task_num, std::vector<size_t> offsets, void * premalloc_ptr = nullptr) {
        this->device_id = device_id;
        this->mmap_type = mmap_type;
        this->device_type = device_type;
        this->task_num = task_num;
        this->task_offsets = offsets;

        //calc total malloc size
        DmaMemory.nbytes = offsets[(int)offsets.size() - 1];

        // 初始化 handler
        DmaConfig.nic_pcie_addr = FLAGS_doca_nic_pcei_addr;

        DmaConfig.complet_cb = [](struct doca_dma_task_memcpy *task,
                                union doca_data task_user_data,
                                union doca_data ctx_user_data) {
            (void)task;
            auto *p = (PromiseWrapper*)task_user_data.ptr;
            DmaUnit *handler = (DmaUnit *)ctx_user_data.ptr;
            handler->left_task_num.fetch_sub(1);
            p->SetValue(TaskStatus::SUCCESS);
        };

        DmaConfig.error_cb = [](struct doca_dma_task_memcpy *task,
                                union doca_data task_user_data,
                                union doca_data ctx_user_data) {
            (void)task;
            auto *p = (PromiseWrapper*)task_user_data.ptr;
            DmaUnit *handler = (DmaUnit *)ctx_user_data.ptr;
            handler->left_task_num.fetch_sub(1);
            p->SetValue(TaskStatus::FAIL);
        };

        DmaConfig.task_num = task_num;
        DmaConfig.ctx_user_data = {(void*)this};
        // 初始化 doca-device doca-dma dpdk
        DOCA_CHECK_FATAL(Dma_create(&DmaHandler, &DmaConfig, mmap_type) );

        // add: if ptr already malloc, use it
        if (premalloc_ptr == nullptr) {
            DmaMemory.ptr = CommonUtils::prepare_host_memory(DmaMemory.nbytes);
        }
        else {
            DmaMemory.ptr = premalloc_ptr;
        }

        DmaMemory.dev = DmaHandler.dev;
        CHECK(DmaMemory.ptr);
        // 这部分初始化在 host 和 dpu 侧类似
        DOCA_CHECK_FATAL(doca_mmap_create(&DmaMemory.mmap));
        DOCA_CHECK_FATAL(doca_mmap_add_dev(DmaMemory.mmap, DmaMemory.dev));
        DOCA_CHECK_FATAL(doca_mmap_set_memrange(DmaMemory.mmap, DmaMemory.ptr, DmaMemory.nbytes));
        DOCA_CHECK_FATAL(doca_mmap_set_permissions(DmaMemory.mmap, DOCA_ACCESS_FLAG_PCI_READ_WRITE));
        DOCA_CHECK_FATAL(doca_mmap_start(DmaMemory.mmap));
        DOCA_CHECK_FATAL(doca_mmap_export_pci(DmaMemory.mmap, DmaMemory.dev, (const void**)&DmaMemory.export_desc, &DmaMemory.export_desc_len));
    
        pe_thread = std::thread([this]() {this->RunningProgressEngine();} );
    }

    void ExportFromDesc(std::string desc) {
        EnemyMemory.enemy_export_desc = desc;
        EnemyMemory.dev = DmaMemory.dev;

        LOG(INFO) << "recieve desc from host" << desc;
    
        DOCA_CHECK_FATAL(
            doca_mmap_create_from_export(
                NULL, EnemyMemory.enemy_export_desc.data(), EnemyMemory.enemy_export_desc.length(), EnemyMemory.dev, &EnemyMemory.mmap));

        DOCA_CHECK_FATAL(doca_mmap_get_memrange(EnemyMemory.mmap, (void**)&EnemyMemory.ptr, &EnemyMemory.nbytes) );
        // 创建 inventory
        DOCA_CHECK_FATAL(doca_buf_inventory_create(task_num * 2, &dma_inv) );
        DOCA_CHECK_FATAL(doca_buf_inventory_start(dma_inv) );
        
        dma_src_bufs.resize(task_num);
        dma_dst_bufs.resize(task_num);
        promise_wrappers_.resize(task_num);
        dma_memcpy_tasks.resize(task_num);
        dma_tasks.resize(task_num);

        for (int i = 0; i < task_num; ++i) {
            // void * cur_ptr = DmaMemory.ptr + task_offsets[i];
            // void * cur_enemy_ptr = EnemyMemory.ptr + task_offsets[i];
            
            void* cur_ptr = static_cast<void*>(static_cast<char*>(DmaMemory.ptr) + task_offsets[i]);
            void* cur_enemy_ptr = static_cast<void*>(static_cast<char*>(EnemyMemory.ptr) + task_offsets[i]);

            DOCA_CHECK_FATAL(doca_buf_inventory_buf_get_by_data(dma_inv, DmaMemory.mmap, cur_ptr, task_offsets[i+1] - task_offsets[i], &dma_src_bufs[i]));
            DOCA_CHECK_FATAL(doca_buf_inventory_buf_get_by_addr(dma_inv, EnemyMemory.mmap, cur_enemy_ptr, task_offsets[i+1] - task_offsets[i], &dma_dst_bufs[i]));

            doca_data memcpy_task_user_data = {.ptr=(void*)(&promise_wrappers_[i])};
            DOCA_CHECK_FATAL(doca_dma_task_memcpy_alloc_init(
                DmaHandler.dma, dma_src_bufs[i], dma_dst_bufs[i], memcpy_task_user_data, &dma_memcpy_tasks[i]));
            // 这里是不是alloc_init一次，后续都不需要init了，但是为了能够重复DMA，需要做什么操作呢？
            dma_tasks[i] = doca_dma_task_memcpy_as_task(dma_memcpy_tasks[i]);
        }
    }

    void RunningProgressEngine() {
        timespec ts = {
            .tv_sec = 0,
            .tv_nsec = SLEEP_IN_NANOS,
        };
        LOG(INFO) << "Start RunningProcessEngine";
        while(!exit_flag) {
            if (left_task_num.load() > 0) {
                doca_pe_progress(DmaHandler.pe);
            }
            nanosleep(&ts, &ts);
        }
    }

    void StartDmaCopy(const std::vector<int> & task_ids) {
        // xmh::Timer time_calcer("dma_time");

        for (auto & task_id : task_ids) {
            promise_wrappers_[task_id].Reset();
            left_task_num.fetch_add(1);
            DOCA_CHECK_FATAL(doca_buf_reset_data_len(dma_dst_bufs[task_id]) );
        }

        for (auto & task_id : task_ids) {
            DOCA_CHECK_FATAL(doca_task_submit(dma_tasks[task_id]) );
        }

        for (auto & task_id : task_ids) {
            auto cur_future = promise_wrappers_[task_id].GetFuture();
            cur_future.get();
        }
        // time_calcer.end();
    }

    std::string GetExportDesc() { //把自己的export信息复制搞过去
        std::string desc;
        desc.resize(DmaMemory.export_desc_len);
        memcpy(const_cast<char*>(desc.data()), DmaMemory.export_desc, DmaMemory.export_desc_len);
        return desc;
    }

    ~DmaUnit() {
        exit_flag = true;
        pe_thread.join();
        doca_mmap_stop(DmaMemory.mmap);
        doca_mmap_destroy(DmaMemory.mmap);
        free(get_ptr());
        doca_ctx_stop(DmaHandler.ctx);
        doca_dma_destroy(DmaHandler.dma);
        doca_pe_destroy(DmaHandler.pe);
    }

public:
    int task_num; //任务数量
    // int task_bytes; // 每个任务大小，如果每个任务长度不一样，可以改成vector。
    std::vector<size_t> task_offsets; // 每个task起始内存位置相对于memory_start_ptr的偏移
    int device_type; // 0/1 cpu/gpu
    int device_id; //
    int mmap_type; // 0/1 from/to

    dma_handler DmaHandler;
    dma_config DmaConfig;
    dma_memory DmaMemory;
    dma_memory EnemyMemory; // 对面的，需要export from desc的memory信息。

    doca_buf_inventory * dma_inv;
    std::atomic_int32_t left_task_num; // 还剩多少dma task没做完。
    std::vector<doca_buf*> dma_src_bufs;
    std::vector<doca_buf*> dma_dst_bufs;
    std::vector<doca_task*> dma_tasks;
    std::vector<doca_dma_task_memcpy*> dma_memcpy_tasks;
    std::vector<PromiseWrapper> promise_wrappers_; //用于查看某个task是否完成
    bool exit_flag = false;
    std::thread pe_thread;

    // new add for pipeline_dma_task; 
    std::map<int , std::vector<int> > task_id_map; // task_id : task_id_list (some task size exceed 2000000)
};