#include "memory.h"
#include "util.h"

namespace sp{

/**
 * @brief 共享内存基本类型数组
 * 
 */
template<typename T>
SharedMemory<T>::SharedMemory(std::string name, long capacity){
    this->name = name;
    this->element_size = sizeof(T);
    this->capacity = capacity;
    // 建立共享内存
    size_t size = capacity + element_size*capacity;
    shm_id = shmget(IPC_PRIVATE, size, IPC_CREAT | 0666);
    assert(shm_id >= 0);
    // 接入共享内存并给地址变量赋值
    ptr = (char*)shmat(shm_id, NULL, 0);
    occ_ptr = (std::atomic<bool>*)ptr;
    data_ptr = ptr + capacity;
    // 建立occ数组并初始化为0
    occ_ptr = new (occ_ptr)std::atomic<bool>[capacity]();
    memset(occ_ptr, 0, size);
}

template<typename T>
SharedMemory<T>::~SharedMemory(){
    // 解绑
    shmdt(ptr);
    // 释放共享内存
    shmid_ds sds;
    shmctl(shm_id, IPC_RMID, &sds);
}

template<typename T>
void SharedMemory<T>::setValue(long pos, T value){
    pos = pos%capacity;
    bool old_value = false;
    bool new_value = true;
    // 写 & 写 互斥
    if(occ_ptr[pos].compare_exchange_strong(old_value, new_value)){
        // clear memory
        data_ptr[pos] = value;
    }
    occ_ptr[pos] = false;
}

template<typename T>
T SharedMemory<T>::getValue(long pos){
    pos = pos%capacity;
    T res;
    bool old_value = false;
    bool new_value = true;
    // 写 & 写 互斥
    if(occ_ptr[pos].compare_exchange_strong(old_value, new_value)){
        // clear memory
        res = data_ptr[pos];
    }
    occ_ptr[pos] = false;
    return res;
}

/**
 * @brief 相同元素连续共享内存池
 * 
 */
SharedMemoryPool::SharedMemoryPool(uint8_t type, std::string name, long capacity){
    this->type = type;
    this->name = name;
    this->element_size = typeToElementSize(type);
    this->capacity = capacity;
    // 建立共享内存
    size_t size = capacity + element_size*capacity;
    shm_id = shmget(IPC_PRIVATE, size, IPC_CREAT | 0666);
    assert(shm_id >= 0);
    // 接入共享内存并给地址变量赋值
    ptr = (char*)shmat(shm_id, NULL, 0);
    occ_ptr = (std::atomic<bool>*)ptr;
    data_ptr = ptr + capacity;
    // 建立occ数组并初始化为0
    occ_ptr = new (occ_ptr)std::atomic<bool>[capacity]();
    memset(occ_ptr, 0, capacity); 
}

SharedMemoryPool::SharedMemoryPool(uint8_t type, std::string name, size_t element_size, long capacity){
    this->type = type;
    this->name = name;
    this->element_size = element_size;
    this->capacity = capacity;
    // 建立共享内存
    size_t size = capacity + element_size*capacity;
    shm_id = shmget(IPC_PRIVATE, size, IPC_CREAT | 0666);
    assert(shm_id >= 0);
    // 接入共享内存并给地址变量赋值
    ptr = (char*)shmat(shm_id, NULL, 0);
    occ_ptr = (std::atomic<bool>*)ptr;
    data_ptr = ptr + capacity;
    // 建立occ数组并初始化为0
    occ_ptr = new (occ_ptr)std::atomic<bool>[capacity]();
    memset(occ_ptr, 0, capacity); 
}

SharedMemoryPool::~SharedMemoryPool(){
    // 解绑
    shmdt(ptr);
    // 释放共享内存
    shmid_ds sds;
    shmctl(shm_id, IPC_RMID, &sds);
}

void SharedMemoryPool::bindFileOut(std::ofstream& fout){
    this->fout_ptr = &fout;
}

size_t SharedMemoryPool::typeToElementSize(uint8_t type){
    if(type == Data_Memory){
        return sizeof(Data);
    }else if(type == Cv_Mat_Data_3840_2160_Memory){
        return 3840*2160*3;
    }else if(type == Cv_Mat_Data_1920_1080_Memory){
        return 1920*1080*3;
    }else if(type == Cv_Mat_Data_1280_720_Memory){
        return 1280*720*3;
    }else if(type == Cv_Mat_Data_960_540_Memory){
        return 960*540*3;
    }else{
        std::cout << " type not support." << std::endl;
        assert(false);
    }
}

uint8_t SharedMemoryPool::getType(){
    return type;
}

bool SharedMemoryPool::contain(char* addr){
    if(addr >= data_ptr && addr <= data_ptr + element_size*capacity)
        return true;
    else
        return false;
}

char* SharedMemoryPool::malloc(){
    for(int i = 0; i < capacity; i++){
        bool old_value = false;
        bool new_value = true;
        if(occ_ptr[i].compare_exchange_strong(old_value, new_value)){
            // clear memory
            memset(data_ptr + element_size*i, 0, element_size);
            return data_ptr + element_size*i;
        }
    }
    std::cout << name << " bad malloc: no memory." << std::endl;
    assert(false);
}

void SharedMemoryPool::free(char* addr){
    if((addr-data_ptr)%element_size == 0){
        long target = (addr-data_ptr)/element_size;
        if(target >= 0 && target < capacity){
            occ_ptr[target] = false;
        }else{
            std::cout << name  << "bad free: invaild ptr." << std::endl;
        }
    }else{
        std::cout << name  << "bad free: invaild ptr." << std::endl;
    }
}

long SharedMemoryPool::getFreeCount(){
    long count = 0;
    for(int i = 0; i < capacity; i++){
        if(occ_ptr[i] == false){
            count++;
        }else{
            // 此处发生内存泄露
            if(DEBUG){
                *(fout_ptr) << (void*)(data_ptr+i*element_size) << "," << SharedMemoryManager::typeToString(type);
                if(type == 0x00){
                    *(fout_ptr) << "," << Util::DataTypeToString(((Data*)(data_ptr+i*element_size))->type);
                }
                *(fout_ptr) << std::endl;
            }
        }
    }
    return count;
}

long SharedMemoryPool::getCapacity(){
    return capacity;
}

char* SharedMemoryPool::getDataPtr(){
    return data_ptr;
}

/**
 * @brief 主机端内存管理
 * 
 */

SharedMemoryManager::SharedMemoryManager(){
    // 普通用途的共享内存
    n_pools_map["data"] = new SharedMemoryPool(Data_Memory, "Data", sizeof(Data), 20000);
    n_pools_map["img2k"] = new SharedMemoryPool(Cv_Mat_Data_3840_2160_Memory, "Mat 3840*2160", 3840*2160*3, 500);
    n_pools_map["img1080p"] = new SharedMemoryPool(Cv_Mat_Data_1920_1080_Memory, "Mat 1920*1080", 1920*1080*3, 500);
    n_pools_map["img720p"] = new SharedMemoryPool(Cv_Mat_Data_1280_720_Memory, "Mat 1280*720", 1280*720*3, 500);
    n_pools_map["img540p"] = new SharedMemoryPool(Cv_Mat_Data_960_540_Memory, "Mat 960*540", 960*540*3, 500);
    n_pools_map["tensor1K"] = new SharedMemoryPool(Tensor_Data_1K_Memory, "tensor below 1K", 1*1024, 500);
    n_pools_map["tensor10K"] = new SharedMemoryPool(Tensor_Data_10K_Memory, "tensor below 10K", 10*1024, 500);
    n_pools_map["tensor100K"] = new SharedMemoryPool(Tensor_Data_100K_Memory, "tensor below 100K", 100*1024, 500);
    n_pools_map["tensor1M"] = new SharedMemoryPool(Tensor_Data_1M_Memory, "tensor below 1M", 1*1024*1024, 500);
    n_pools_map["tensor10M"] = new SharedMemoryPool(Tensor_Data_10M_Memory, "tensor below 10M", 10*1024*1024, 500);
    n_pools_map["tensor100M"] = new SharedMemoryPool(Tensor_Data_100M_Memory, "tensor below 100M", 100*1024*1024, 500);
    n_pools_map["tensor500M"] = new SharedMemoryPool(Tensor_Data_500M_Memory, "tensor below 500M", 100*1024*1024, 5);
    // 打开文件
    if(DEBUG){
        fout.open("./logs/shared_memory.csv");
        // bind fout
        for(auto i : n_pools_map){
            i.second->bindFileOut(fout);
        }
    }
}

SharedMemoryManager::~SharedMemoryManager(){
    // 析构共享内存池
    for(auto i : n_pools_map){
        delete i.second;
    }
    for(auto i : o_pools_map){
        delete i.second;
    }
    // 关闭文件描述符
    if(DEBUG)
        fout.close();
}

SharedMemoryPool SharedMemoryManager::createSharedMemoryPool(std::string name, uint8_t type, long cnt){
    o_pools_map[name] = new SharedMemoryPool(type, name, cnt);
    return *o_pools_map[name];
}

SharedMemoryPool SharedMemoryManager::createSharedMemoryPool(std::string name, uint8_t type, size_t size, long cnt){
    o_pools_map[name] = new SharedMemoryPool(type, name, size, cnt);
    return *o_pools_map[name];
}

char* SharedMemoryManager::malloc(uint8_t type){
    for(auto i : n_pools_map){
        if(i.second->getType() == type){
            return i.second->malloc();
        }
    }
    std::cout << "malloc type error: " << (int)type << std::endl;
    assert(false);
}

char* SharedMemoryManager::malloc(size_t size){
    if(size <= 1*1024){
        return n_pools_map["tensor1K"]->malloc();
    }else if(size <= 10*1024){
        return n_pools_map["tensor10K"]->malloc();
    }else if(size <= 100*1024){
        return n_pools_map["tensor100K"]->malloc();
    }else if(size <= 1*1024*1024){
        return n_pools_map["tensor1M"]->malloc();
    }else if(size <= 10*1024*1024){
        return n_pools_map["tensor10M"]->malloc();
    }else if(size <= 100*1024*1024){
        return n_pools_map["tensor100M"]->malloc();
    }else if(size <= 500*1024*1024){
        return n_pools_map["tensor500M"]->malloc();
    }else{
        std::cout << " malloc size cannot exceed 100MB: " << size/1024/1024 << " MB" << std::endl;
        assert(false);
    }
}

void SharedMemoryManager::free(char* ptr){
    for(auto i : n_pools_map){
        if(i.second->contain(ptr)){
            i.second->free(ptr);
            break;
        }
    }
}

void SharedMemoryManager::check(){
    // 固定内存池的使用情况
    std::cout << std::endl << "========================= MEMORY CHECK ========================= " << std::endl;
    std::cout << "                   data remain: " << n_pools_map["data"]->getFreeCount() << " / " << n_pools_map["data"]->getCapacity() << std::endl;
    std::cout << "               mat 3840*2160 remain: " << n_pools_map["img2k"]->getFreeCount() << " / " << n_pools_map["img2k"]->getCapacity() << std::endl;
    std::cout << "               mat 1920*1080 remain: " << n_pools_map["img1080p"]->getFreeCount() << " / " << n_pools_map["img1080p"]->getCapacity() << std::endl;
    std::cout << "                mat 1280*720 remain: " << n_pools_map["img720p"]->getFreeCount() << " / " << n_pools_map["img720p"]->getCapacity() << std::endl;
    std::cout << "                mat 960*540 remain: " << n_pools_map["img540p"]->getFreeCount() << " / " << n_pools_map["img540p"]->getCapacity() << std::endl;
    std::cout << "                   tensor 1k remain: " << n_pools_map["tensor1K"]->getFreeCount() << " / " << n_pools_map["tensor1K"]->getCapacity() << std::endl;
    std::cout << "                  tensor 10k remain: " << n_pools_map["tensor10K"]->getFreeCount() << " / " << n_pools_map["tensor10K"]->getCapacity() << std::endl;
    std::cout << "                 tensor 100k remain: " << n_pools_map["tensor100K"]->getFreeCount() << " / " << n_pools_map["tensor100K"]->getCapacity() << std::endl;
    std::cout << "                   tensor 1m remain: " << n_pools_map["tensor1M"]->getFreeCount() << " / " << n_pools_map["tensor1M"]->getCapacity() << std::endl;
    std::cout << "                 tensor 10m remain: " << n_pools_map["tensor10M"]->getFreeCount() << " / " << n_pools_map["tensor10M"]->getCapacity() << std::endl;
    std::cout << "                  tensor 100m remain: " << n_pools_map["tensor100M"]->getFreeCount() << " / " << n_pools_map["tensor100M"]->getCapacity() << std::endl;
    std::cout << "                  tensor 500m remain: " << n_pools_map["tensor500M"]->getFreeCount() << " / " << n_pools_map["tensor500M"]->getCapacity() << std::endl;
    std::cout << "================================================================ " << std::endl;
}

uint8_t SharedMemoryManager::addrToType(void* ptr){
    for(auto i : n_pools_map){
        if(i.second->contain((char*)ptr))
            return i.second->getType();
    }
    for(auto i : o_pools_map){
        if(i.second->contain((char*)ptr))
            return i.second->getType();
    }
}

std::string SharedMemoryManager::typeToString(uint8_t type){
    if(type == 0x00){
        return "Data";
    }else if(type == 0x01){
        return "Mat 3840*2160";
    }else if(type == 0x02){
        return "Mat 1920*1080";
    }else if(type == 0x03){
        return "Mat 1280*720";
    }else if(type == 0x04){
        return "Mat 960*540";
    }else if(type == 0x05){
        return "Tensor 1k";
    }else if(type == 0x06){
        return "Tensor 10k";
    }else if(type == 0x07){
        return "Tensor 100k";
    }else if(type == 0x08){
        return "Tensor 1M";
    }else if(type == 0x09){
        return "Tensor 10M";
    }else if(type == 0x0a){
        return "Tensor 100M";
    }else if(type == 0x0b){
        return "Tensor 500M";
    }else{
        std::cout << "type error: " << (int)type << std::endl;
        assert(false);
    }
}

void SharedMemoryManager::writeToFile(std::string move, std::string name, void* addr){
    if(DEBUG)
        fout << move << "," << name << "," << typeToString(addrToType(addr)) << "," << addr << std::endl;
}

/**
 * @brief 设备端显存管理
 * 
 */
// DONE: 完善显存管理

Gpu_SharedMemoryManager::Gpu_SharedMemoryManager(){
    TOTAL_SHARED_MEMORY_SIZE = long(9*1024UL*1024UL*1024UL);            // 总显存大小
    DATA_SHARED_MEMORY_SIZE = long(2UL*1024UL*1024UL*1024UL);           // DATA区显存大小
    DATA_STREAM_COUNT = 8;                                              // DATA区划分stream数量
    MAX_DATA_STREAM_COUNT = 256;                                        // DATA区划分stream数量最大值
    GPU_NUMBER = 4;                                                     // GPU数量
    DATA_EVERY_STREAM_SIZE = DATA_SHARED_MEMORY_SIZE/DATA_STREAM_COUNT; // DATA区每个stream的大小
    MODEL_SHARED_MEMORY_SIZE = long(1UL*1024UL*1024UL*1024UL);          // MODEL区显存大小
    COMPUTION_SHARED_MEMORY_SIZE = long(6*1024UL*1024UL*1024UL);        // COMPUTION区显存大小
    // 划分显存空间并存入block_map
    /**
    |   2GB(data)   |   1GB(model)  |                               6GB(calculate)                                  |
    */
    // 判断空间划分是否合理
    assert(TOTAL_SHARED_MEMORY_SIZE == DATA_SHARED_MEMORY_SIZE + MODEL_SHARED_MEMORY_SIZE + COMPUTION_SHARED_MEMORY_SIZE);
    // 初始化blocks_map
    assert(DATA_SHARED_MEMORY_SIZE%DATA_STREAM_COUNT == 0); // 可以整除
    for(int i = 0; i < DATA_STREAM_COUNT; i++){
        blocks_map[i] = std::make_pair(i*DATA_EVERY_STREAM_SIZE, DATA_EVERY_STREAM_SIZE); // 存放数据的空间
    }
    blocks_map[-1] = std::make_pair(DATA_SHARED_MEMORY_SIZE, MODEL_SHARED_MEMORY_SIZE); // 模型参数空间
    blocks_map[-2] = std::make_pair(DATA_SHARED_MEMORY_SIZE + MODEL_SHARED_MEMORY_SIZE, COMPUTION_SHARED_MEMORY_SIZE); // 计算空间
    // 初始化显存管理块
    stream_occ_shm_id = shmget(IPC_PRIVATE, MAX_DATA_STREAM_COUNT*GPU_NUMBER, IPC_CREAT | 0666);
    assert(stream_occ_shm_id >= 0);
    // 接入显存管理内存
    stream_occ_shm_ptr = (char*)shmat(stream_occ_shm_id, NULL, 0);
    // 初始化原子化操作指针,四个GPU有四个占用的值
    stream_occ_ptr = new std::atomic<bool>*[GPU_NUMBER];
    for(int i = 0; i < GPU_NUMBER; i++){
        stream_occ_ptr[i] = new (stream_occ_shm_ptr)std::atomic<bool>[DATA_STREAM_COUNT]();
        assert(stream_occ_ptr[i] != nullptr);
    }
    assert(stream_occ_shm_ptr != nullptr);
}

Gpu_SharedMemoryManager::~Gpu_SharedMemoryManager(){
    // 解绑显存管理内存
    shmdt(stream_occ_shm_ptr);
    // 置空
    stream_occ_shm_ptr = nullptr;
    delete[] stream_occ_ptr;
    stream_occ_ptr = nullptr;
    // 释放显存管理
    shmid_ds sds;
    shmctl(stream_occ_shm_id, IPC_RMID, &sds);
}

void Gpu_SharedMemoryManager::cuda_env_init(short id){
    // set visible gpu
    std::string command = "os.environ['CUDA_VISIBLE_DEVICES'] = '" + std::to_string(id) + "'";
    PyRun_SimpleString("import os");
    PyRun_SimpleString(command.c_str());
    PyRun_SimpleString("import torch");
    PyRun_SimpleString("torch.cuda.init()");
}

void Gpu_SharedMemoryManager::allotSharedCache(){
    std::string command = "torch.cuda.allot_shared_cache(" + std::to_string(TOTAL_SHARED_MEMORY_SIZE) + ")";
    PyRun_SimpleString(command.c_str());
}

void Gpu_SharedMemoryManager::freeSharedCache(){
    PyRun_SimpleString("torch.cuda.free_shared_cache(1)");
}

void Gpu_SharedMemoryManager::sendSharedCache(){
    PyRun_SimpleString("torch.cuda.send_shared_cache()");
}

void Gpu_SharedMemoryManager::recvSharedCache(){
    PyRun_SimpleString("torch.cuda.recv_shared_cache()");
}

short Gpu_SharedMemoryManager::getStreamId(size_t pos){   // 获取streamId。
    for(int i = 0; i < DATA_STREAM_COUNT; i++){
        if(pos == i*DATA_EVERY_STREAM_SIZE)
            return i;
    }
    if(pos == DATA_SHARED_MEMORY_SIZE + MODEL_SHARED_MEMORY_SIZE)
        return -2;
    else if(pos == DATA_SHARED_MEMORY_SIZE)
        return -1;
    assert(false);
}

size_t Gpu_SharedMemoryManager::getPos(short stream_id){  // 获取stream显存起始地址。
    assert(stream_id >= -2 && stream_id < DATA_STREAM_COUNT);
    if(stream_id == -2)
        return DATA_SHARED_MEMORY_SIZE + MODEL_SHARED_MEMORY_SIZE;
    else if(stream_id == -1)
        return DATA_SHARED_MEMORY_SIZE;
    return stream_id*DATA_EVERY_STREAM_SIZE;
}

size_t Gpu_SharedMemoryManager::getSize(short stream_id){ // 获取stream大小。
    assert(stream_id >= -2 && stream_id < DATA_STREAM_COUNT);
    if(stream_id == -2)
        return COMPUTION_SHARED_MEMORY_SIZE;
    else if(stream_id == -1)
        return MODEL_SHARED_MEMORY_SIZE;
    return DATA_EVERY_STREAM_SIZE;
}

short Gpu_SharedMemoryManager::malloc(short gpu_id){                              // 申请一块空间，非阻塞，有则返回stream_id，没有则返回-1
    for(int i = 0; i < DATA_STREAM_COUNT; i++){
        bool old_value = false;
        bool new_value = true;
        if(stream_occ_ptr[gpu_id][i].compare_exchange_strong(old_value, new_value)){
            return i; // 申请成功
        }
    }
    return -1;
}

void Gpu_SharedMemoryManager::free(short gpu_id, short stream_id){                  // 释放一块空间
    assert(stream_id >= 0 && stream_id < DATA_STREAM_COUNT);
    stream_occ_ptr[gpu_id][stream_id] = false;
}

};