/*
 * Copyright (C) 2025. Huawei Technologies Co., Ltd. All rights reserved.
 */
#include <cstring>
#include <iomanip>
#include "base.h"
#include "ascend.h"

void XTensor::Init(std::vector<long> shape, enum XDtype dtype, void *ptr, enum XTensorType type)
{
    size_t numel = 1;

    if (shape.size() == 0) {
        numel = 0;
    }

    for (uint32_t i = 0; i < shape.size(); i++) {
        numel *= shape[i];
    }

    this->numel = numel;
    this->shape = shape;
    this->dtype = dtype;
    this->ptr = ptr;
    this->type = type;
}

XTensor::XTensor(std::vector<long> shape, enum XDtype dtype, void *ptr)
{
    Init(shape, dtype, ptr, XTENSOR_STATIC);
}

void XTensor::Init(std::vector<long> shape, enum XDtype dtype, void *ptr)
{
    Init(shape, dtype, ptr, XTENSOR_STATIC);
}

void XTensor::PrintMemoryVal(void *p, uint64_t off, XDtype dtype)
{
    switch (dtype) {
        case BIT1: {
            uint64_t *raw = (uint64_t *)p + off / 64;
            uint32_t val = ((*raw) & (1ull << (off % 64))) ? 1 : 0;
            std::cout << val;
            break;
        } case INT8: {
            int32_t val = ((int8_t *)p)[off];
            if (val >= 0) {
                std::cout << " ";
            }
            std::cout << val;
            break;
        } case INT32: {
            int32_t val = ((int32_t *)p)[off];
            if (val >= 0) {
                std::cout << " ";
            }
            std::cout << val;
            break;
        } case INT64: {
            int64_t val = ((int64_t *)p)[off];
            if (val >= 0) {
                std::cout << " ";
            }
            std::cout << val;
            break;
        } case FP16: {
            __fp16 val = ((__fp16 *)p)[off];
            if (val >= 0) {
                std::cout << " ";
            }
            std::cout << std::fixed << std::setprecision(4);
            std::cout << val;
            std::cout.unsetf(std::ios::fixed);
            break;
        } case BF16: {
            uint16_t data = ((uint16_t *)p)[off];
            uint32_t float32Data = (static_cast<uint32_t>(data) << 16);
            float val;
            std::memcpy(&val, &float32Data, sizeof(float));
            if (val >= 0) {
                std::cout << " ";
            }
            std::cout << std::fixed << std::setprecision(4);
            std::cout << val;
            std::cout.unsetf(std::ios::fixed);
            break;
        } case FP32: {
            float val = ((float *)p)[off];
            if (val >= 0) {
                std::cout << " ";
            }
            std::cout << std::fixed << std::setprecision(4);
            std::cout << val;
            std::cout.unsetf(std::ios::fixed);
            break;
        } case CPLXF: {
            std::complex<float> val = ((std::complex<float> *)p)[off];
            if (val.real() >= 0 && val.imag() >= 0) {
                std::cout << " ";
            }
            std::cout << std::scientific << std::setprecision(4);
            std::cout << val.real();
            if (val.imag() >= 0) {
                std::cout << " + ";
            } else {
                std::cout << " - ";
            }
            std::cout << std::abs(val.imag()) << "j";
            std::cout.unsetf(std::ios::scientific);
            break;
        } default:
            break;
    }
}

void XTensor::Print(uint32_t nRow, uint32_t nCol)
{
    uint32_t i, j;
    uint32_t hRow = DIV_ROUND_UP(nRow, 2);
    uint32_t hCol = DIV_ROUND_UP(nCol, 2);
    size_t size = numel * XDtypeBit(dtype) / 8;
    aclError err;

    if (size == 0) {
        return;
    }

    void *p = malloc(size);
    if (!p) {
        return;
    }

    err = aclrtMemcpy(p, size, ptr, size, ACL_MEMCPY_DEVICE_TO_HOST);
    if (err != ACL_ERROR_NONE) {
        free(p);
        return;
    }

    std::cout << "XTensor(";
    for (uint32_t i = 0; i < shape.size(); i++) {
        std::cout << "[";
    }

    size_t col = shape[shape.size() - 1];
    size_t row = numel / col;
    for (j = 0; j < row && j < hRow; j++) {
        for (i = 0; i < col && i < hCol; i++) {
            PrintMemoryVal(p, j * col + i, dtype);
            if (i != col - 1) {
                std::cout << ", ";
            }
        }

        if (col > hCol && i < col - hCol) {
            std::cout << " ..., ";
            i = col - hCol;
        }

        for (; i < col; i++) {
            PrintMemoryVal(p, j * col + i, dtype);
            if (i != col - 1) {
                std::cout << ", ";
            }
        }
        if (j != row - 1) {
            std::cout << "]," << std::endl << "        ";
            for (uint32_t i = 0; i < shape.size() - 1; i++) {
                std::cout << " ";
            }
            if (j != hRow - 1 || j >= row - hRow - 1) {
                std::cout << "[";
            }
        }
    }

    if (row > hRow && j < row - hRow) {
        std::cout << "...," << std::endl << "        ";
        for (uint32_t i = 0; i < shape.size() - 1; i++) {
            std::cout << " ";
        }
        std::cout << "[";
        j = row - hRow;
    }

    for (; j < row; j++) {
        for (i = 0; i < col && i < hCol; i++) {
            PrintMemoryVal(p, j * col + i, dtype);
            if (i != col - 1) {
                std::cout << ", ";
            }
        }

        if (col > hCol && i < col - hCol) {
            std::cout << " ..., ";
            i = col - hCol;
        }

        for (; i < col; i++) {
            PrintMemoryVal(p, j * col + i, dtype);
            if (i != col - 1) {
                std::cout << ", ";
            }
        }
        if (j != row - 1) {
            std::cout << "]," << std::endl << "        ";
            for (uint32_t i = 0; i < shape.size() - 1; i++) {
                std::cout << " ";
            }
            std::cout << "[";
        }
    }

    for (uint32_t i = 0; i < shape.size(); i++) {
        std::cout << "]";
    }
    std::cout << ", shape=(";
    for (uint32_t i = 0; i < shape.size(); i++) {
        std::cout << shape[i];
        if (i != shape.size() - 1) {
            std::cout << ", ";
        }
    }
    std::cout << "), dtype=" << XDtypeStr(dtype) << ")" << std::endl;
    free(p);
}

std::ostream& operator<<(std::ostream& os, const XTensor& p)
{
    os << "[(";
    for (size_t i = 0; i < p.shape.size(); i++) {
        os << p.shape[i];
        if (i != p.shape.size() - 1) {
            os << ", ";
        }
    }
    os << ") " << XDtypeStr(p.dtype) << " (" << XTensorTypeStr(p.type) << ")]";
    return os;
}

int XTensorPool::Init(void)
{
    CHECK_ACL_RET(aclrtMalloc(&_ptr, _size, ACL_MEM_MALLOC_HUGE_FIRST), -ENOMEM);
    for (int i = 0; i < XLITE_MAX_NUM_DYNAMIC_TENSOR; i++) {
        _free.push_back(this->_t[i]);
    }
    return 0;
}

XTensorPool::~XTensorPool(void)
{
    CHECK_ACL(aclrtFree(_ptr));
}

XTensor& XTensorPool::GetTensor(std::vector<long> shape, enum XDtype dtype)
{
    size_t numel = 1, size, free = _size;
    void *ptr = _ptr;

    if (shape.size() == 0) {
        std::cerr << __FILE__ << ":" << __LINE__ << ": size is 0" << std::endl;
        throw std::invalid_argument("get tensor shape size is 0");
    }

    if (_free.empty()) {
        std::cerr << __FILE__ << ":" << __LINE__ << ": dynamic tensor too many, please put after use" << std::endl;
        throw std::runtime_error("dynamic tensor too many, please put after use");
    }
    XTensor &t = _free.front();

    for (uint64_t i = 0; i < shape.size(); i++) {
        numel *= shape[i];
    }
    size = ROUND_UP(numel * XDtypeBit(dtype) / 8, XLITE_TENSOR_ALIGN);

    for (auto it = _used.begin(); it != _used.end(); it++) {
        XTensor &use = it->get();
        free = reinterpret_cast<uintptr_t>(use.ptr) - reinterpret_cast<uintptr_t>(ptr);
        if (free >= size) {
            t.Init(shape, dtype, ptr, XTENSOR_DYNAMIC);
            _free.pop_front();
            _used.insert(it, t);
            return t;
        }
        ptr = (void *)((uint64_t)use.ptr + ROUND_UP(use.numel * XDtypeBit(use.dtype) / 8, XLITE_TENSOR_ALIGN));
    }
    if ((uint64_t)_ptr + _size - (uint64_t)ptr >= size) {
        t.Init(shape, dtype, ptr, XTENSOR_DYNAMIC);
        _free.pop_front();
        _used.push_back(t);
        return t;
    }

    std::cerr << __FILE__ << ":" << __LINE__ << ": get " << size << " B failed, no free tensor";
    std::cerr << ", shape=(";
    for (uint32_t i = 0; i < shape.size(); i++) {
        std::cerr << shape[i];
        if (i != shape.size() - 1) {
            std::cerr << ", ";
        }
    }
    std::cerr << "), dtype=" << XDtypeStr(dtype) << std::endl;
    throw std::runtime_error("no free tensor");
}

void XTensorPool::PutTensor(XTensor &t)
{
    if (t.type != XTENSOR_DYNAMIC) {
        return;
    }
    for (auto it = _used.begin(); it != _used.end(); ++it) {
        if (&it->get() == &t) {
            _used.erase(it);
            _free.push_back(t);
            break;
        }
    }
}
