/*
 * Copyright (C) 2023 Coder.AN
 * Email: an.hongjun@foxmail.com
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <https://www.gnu.org/licenses/>.
 */
#include <assert.h>
#include "edge_tensor.h"
#include "edge_tensor_kernel.h"
using namespace tensor;


size_t get_dtype_size(DType dtype)
{
    if (dtype == uint8 || dtype == int8)
    {
        return 1;
    }
    else if (dtype == uint16 || dtype == int16)
    {
        return 2;
    }
    else if (dtype == uint32 || dtype == int32 || dtype == float32)
    {
        return 4;
    }
    else if (dtype == uint64 || dtype == int64 || dtype == float64)
    {
        return 8;
    }
    return 0;
}

Tensor::Tensor()
{
    empty = true;
    cpu_buffer = NULL;
    gpu_buffer = NULL;
}

Tensor::Tensor(void* cpu_buffer, Dim ddim, DimOrder ddim_order, DType dtype)
{
    unit_size = get_dtype_size(dtype);
    unit_count = ddim.N * ddim.H * ddim.W * ddim.C;
    total_size = unit_count * unit_size;
    dim = ddim;
    dim_order = ddim_order;
    dType = dtype;

    CHECK(cudaMalloc(&gpu_buffer, total_size));
    CHECK(cudaMemcpy(gpu_buffer, cpu_buffer, total_size, cudaMemcpyHostToDevice));
    
    empty = false;
}

Tensor::Tensor(Dim ddim, DimOrder ddim_order, DType dtype)
{
    unit_size = get_dtype_size(dtype);
    unit_count = ddim.N * ddim.H * ddim.W * ddim.C;
    total_size = unit_count * unit_size;
    dim = ddim;
    dim_order = ddim_order;
    dType = dtype;
    CHECK(cudaMalloc(&gpu_buffer, total_size));
    empty = false;
}

Tensor::~Tensor()
{
    if (cpu_buffer)
    {
        free(cpu_buffer);
        cpu_buffer = NULL;
    }
    if (gpu_buffer)
    {
        CHECK(cudaFree(gpu_buffer));
    }
}

// init & clear
void Tensor::update_from_cpu(void* cpu_buffer)
{
    if (empty)
    {
        std::cout << "You should set the shape of Tensor at first." << std::endl;
        return;
    }
    CHECK(cudaMemcpy(gpu_buffer, cpu_buffer, this->total_size, cudaMemcpyHostToDevice));
}

void Tensor::update_from_gpu(void* new_gpu_buffer)
{
    if (empty)
    {
        std::cout << "You should set the shape of Tensor at first." << std::endl;
        return;
    }
    CHECK(cudaMemcpy(gpu_buffer, new_gpu_buffer, this->total_size, cudaMemcpyDeviceToDevice));
}

void Tensor::set_shape(Dim ddim, DimOrder ddim_order, DType dtype)
{
    if (!empty)
    {
        std::cout << "The Tensor is not empty!" << std::endl;
        return;
    }
    unit_size = get_dtype_size(dtype);
    unit_count = ddim.N * ddim.H * ddim.W * ddim.C;
    total_size = unit_count * unit_size;
    dim = ddim;
    dim_order = ddim_order;
    dType = dtype;
    CHECK(cudaMalloc(&gpu_buffer, total_size));
    empty = false;
}

void Tensor::clear()
{
    if (gpu_buffer)
    {
        CHECK(cudaFree(gpu_buffer));
        gpu_buffer = NULL;
    }
    if (cpu_buffer)
    {
        free(cpu_buffer);
        cpu_buffer = NULL;
    }
    empty = true;
}

// basic algorithm
void Tensor::resize(Dim new_dim)
{
    if (empty)
    {
        std::cout << "The Tensor is empty!" << std::endl;
        return;
    }
    assert(new_dim.N == dim.N);
    assert(new_dim.C == dim.C);
    assert(dType == uint8);
    unit_count = new_dim.N * new_dim.C * new_dim.W * new_dim.H;
    total_size = unit_count * unit_size;
    uint8_t* new_gpu_buffer;
    CHECK(cudaMalloc(&new_gpu_buffer, total_size));
    Resize((uint8_t*)gpu_buffer, new_gpu_buffer, dim, new_dim, dim_order, unit_count);
    CHECK(cudaFree(gpu_buffer));
    gpu_buffer = NULL;
    CHECK(cudaMalloc(&gpu_buffer, total_size));
    CHECK(cudaMemcpy(gpu_buffer, new_gpu_buffer, total_size, cudaMemcpyDeviceToDevice));
    CHECK(cudaFree(new_gpu_buffer));
    dim.W = new_dim.W;
    dim.H = new_dim.H;
}

void Tensor::rect(Point p1, Point p2)
{
    if (empty)
    {
        std::cout << "The Tensor is empty!" << std::endl;
        return;
    }
    assert(p1.w >= 0 && p2.w >= 0);
    assert(p1.h >= 0 && p2.h >= 0);
    assert(p1.w < dim.W && p2.w < dim.W);
    assert(p1.h < dim.H && p2.h < dim.H);
    
    int min_w = p1.w < p2.w ? p1.w : p2.w;
    int max_w = p1.w > p2.w ? p1.w : p2.w;
    int min_h = p1.h < p2.h ? p1.h : p2.h;
    int max_h = p1.h > p2.h ? p1.h : p2.h;

    int new_w = max_w - min_w + 1;
    int new_h = max_h - min_h + 1;

    int delta_w = min_w;
    int delta_h = min_h;

    unit_count = dim.N * dim.C * new_h * new_w;
    total_size = unit_size * unit_count;
    void* new_gpu_buffer;
    CHECK(cudaMalloc(&new_gpu_buffer, total_size));
    Rect(gpu_buffer, new_gpu_buffer, delta_w, delta_h, new_w, new_h,
            dim, dim_order, unit_size, unit_count);
    CHECK(cudaFree(gpu_buffer));
    gpu_buffer = NULL;
    CHECK(cudaMalloc(&gpu_buffer, total_size));
    CHECK(cudaMemcpy(gpu_buffer, new_gpu_buffer, total_size, cudaMemcpyDeviceToDevice));
    CHECK(cudaFree(new_gpu_buffer));

    dim.W = new_w;
    dim.H = new_h;
}

void Tensor::transpose(DimOrder target_order)
{
    if (empty)
    {
        std::cout << "The Tensor is empty!" << std::endl;
        return;
    }
    void* new_gpu_buffer;
    CHECK(cudaMalloc(&new_gpu_buffer, total_size));
    Transpose(gpu_buffer, new_gpu_buffer, dim_order, target_order,
                dim, unit_size, unit_count);
    CHECK(cudaFree(gpu_buffer));
    gpu_buffer = NULL;
    CHECK(cudaMalloc(&gpu_buffer, total_size));
    CHECK(cudaMemcpy(gpu_buffer, new_gpu_buffer, total_size, cudaMemcpyDeviceToDevice));
    CHECK(cudaFree(new_gpu_buffer));
    dim_order = target_order;
}

void Tensor::convertType(DType target_type)
{
    if (empty)
    {
        std::cout << "The Tensor is empty!" << std::endl;
        return;
    }
    unit_size = get_dtype_size(target_type);
    total_size = unit_count * unit_size;
    
    if (target_type == float32)
    {
        float* new_gpu_buffer;
        CHECK(cudaMalloc(&new_gpu_buffer, total_size));
        if (dType == uint8)
        {
            ConvertUint8ToFloat32((uint8_t*)gpu_buffer, new_gpu_buffer, unit_count);
        }
        else
        {
            std::cout << "Warning! Not support!!!" << std::endl;
        }
        CHECK(cudaFree(gpu_buffer));
        gpu_buffer = NULL;
        CHECK(cudaMalloc(&gpu_buffer, total_size));
        CHECK(cudaMemcpy(gpu_buffer, new_gpu_buffer, total_size, cudaMemcpyDeviceToDevice));
        CHECK(cudaFree(new_gpu_buffer));
    }
    else if (target_type == uint8)
    {
        uint8_t* new_gpu_buffer;
        CHECK(cudaMalloc(&new_gpu_buffer, total_size));
        if (dType == int32)
        {
            ConvertInt32ToUint8((int32_t*)gpu_buffer, new_gpu_buffer, unit_count);
        }
        else
        {
            std::cout << "Warning! Not support!!!" << std::endl;
        }
        CHECK(cudaFree(gpu_buffer));
        gpu_buffer = NULL;
        CHECK(cudaMalloc(&gpu_buffer, total_size));
        CHECK(cudaMemcpy(gpu_buffer, new_gpu_buffer, total_size, cudaMemcpyDeviceToDevice));
        CHECK(cudaFree(new_gpu_buffer));
    }
    else
    {
        std::cout << "Warning! Not support!!!" << std::endl;
    }

    dType = target_type;
}

void Tensor::convertColor(DColor src_color, DColor dst_color)
{
    if (empty)
    {
        std::cout << "The Tensor is empty!" << std::endl;
        return;
    }
    if (src_color == COLOR_BGR)
    {
        assert(dim.C == 3);
        assert(dType == uint8);
        uint8_t* new_gpu_buffer;
        if (dst_color == COLOR_BGR)
        {
            goto ret;
        }
        else if(dst_color == COLOR_RGB)
        {
            CHECK(cudaMalloc(&new_gpu_buffer, total_size));
            ConvertBGRToRGB((uint8_t*)gpu_buffer, new_gpu_buffer, unit_count, dim, dim_order);
        }
        else if (dst_color == COLOR_GRAY)
        {
            dim.C = 1;
            unit_count = dim.N * dim.C * dim.W * dim.H;
            total_size = unit_size * unit_count;
            CHECK(cudaMalloc(&new_gpu_buffer, total_size));
            ConvertBGRToGray((uint8_t*)gpu_buffer, new_gpu_buffer, unit_count, dim, dim_order);
        }
        else
        {
            std::cout << "Warning! Not support!!!" << std::endl;
            goto ret;
        }
        CHECK(cudaFree(gpu_buffer));
        gpu_buffer = NULL;
        CHECK(cudaMalloc(&gpu_buffer, total_size));
        CHECK(cudaMemcpy(gpu_buffer, new_gpu_buffer, total_size, cudaMemcpyDeviceToDevice));
        CHECK(cudaFree(new_gpu_buffer));
    }
    else if (src_color == COLOR_RGB)
    {
        assert(dim.C == 3);
        assert(dType == uint8);
        uint8_t* new_gpu_buffer;
        if (dst_color == COLOR_BGR)
        {
            CHECK(cudaMalloc(&new_gpu_buffer, total_size));
            ConvertRGBToBGR((uint8_t*)gpu_buffer, new_gpu_buffer, unit_count, dim, dim_order);
        }
        else if(dst_color == COLOR_RGB)
        {
            goto ret;
        }
        else if (dst_color == COLOR_GRAY)
        {
            dim.C = 1;
            unit_count = dim.N * dim.C * dim.W * dim.H;
            total_size = unit_size * unit_count;
            CHECK(cudaMalloc(&new_gpu_buffer, total_size));
            ConvertRGBToGray((uint8_t*)gpu_buffer, new_gpu_buffer, unit_count, dim, dim_order);    
        }
        else
        {
            std::cout << "Warning! Not support!!!" << std::endl;
            goto ret;
        }
        CHECK(cudaFree(gpu_buffer));
        gpu_buffer = NULL;
        CHECK(cudaMalloc(&gpu_buffer, total_size));
        CHECK(cudaMemcpy(gpu_buffer, new_gpu_buffer, total_size, cudaMemcpyDeviceToDevice));
        CHECK(cudaFree(new_gpu_buffer));
    }
    else if (src_color == COLOR_GRAY)
    {
        assert(dim.C == 1);
        assert(dType == uint8);
        uint8_t* new_gpu_buffer;
        if (dst_color == COLOR_BGR)
        {
            total_size *= 3;
            CHECK(cudaMalloc(&new_gpu_buffer, total_size));
            ConvertGrayToBGR((uint8_t*)gpu_buffer, new_gpu_buffer, unit_count, dim, dim_order);
            dim.C = 3;
            unit_count = dim.N * dim.C * dim.W * dim.H;
        }
        else if(dst_color == COLOR_RGB)
        {
            total_size *= 3;
            CHECK(cudaMalloc(&new_gpu_buffer, total_size));
            ConvertGrayToRGB((uint8_t*)gpu_buffer, new_gpu_buffer, unit_count, dim, dim_order);
            dim.C = 3;
            unit_count = dim.N * dim.C * dim.W * dim.H;
        }
        else if (dst_color == COLOR_GRAY)
        {
            goto ret;
        }
        else
        {
            std::cout << "Warning! Not support!!!" << std::endl;
            goto ret;
        }
        CHECK(cudaFree(gpu_buffer));
        gpu_buffer = NULL;
        CHECK(cudaMalloc(&gpu_buffer, total_size));
        CHECK(cudaMemcpy(gpu_buffer, new_gpu_buffer, total_size, cudaMemcpyDeviceToDevice));
        CHECK(cudaFree(new_gpu_buffer));
    }
    else
    {
        std::cout << "Warning! Not support!!!" << std::endl;
    }
ret:
    return;
}

void Tensor::normalize()
{
    if (empty)
    {
        std::cout << "The Tensor is empty!" << std::endl;
        return;
    }
    assert(dType == float32 || dType == float64);
    if (dType == float32)
    {
        NormalizeFloat32((float*)gpu_buffer, (float*)gpu_buffer, unit_count);
    }
    else if (dType == float64)
    {
        NormalizeFloat64((double*)gpu_buffer, (double*)gpu_buffer, unit_count);
    }
}

void Tensor::hconcat(Tensor& t)
{
    assert(!empty && !t.isEmpty());     // 非空
    assert(dType == t.get_dtype());     // 数据类型一致
    assert(dim_order == t.get_order()); // 维度排列一致
    
    Dim dim2 = t.get_dim();
    assert(dim.N == dim2.N);
    assert(dim.C == dim2.C);
    assert(dim.H == dim2.H);

    unit_count = dim.N * dim.C * dim.H * dim.W + dim2.N * dim2.C * dim2.H * dim2.W;
    total_size = unit_count * unit_size;

    void* new_gpu_buffer;
    CHECK(cudaMalloc(&new_gpu_buffer, total_size));
    HConcat(gpu_buffer, t.get_val(), new_gpu_buffer, unit_count, unit_size, dim, dim2, dim_order);
    CHECK(cudaFree(gpu_buffer));
    gpu_buffer = NULL;
    CHECK(cudaMalloc(&gpu_buffer, total_size));
    CHECK(cudaMemcpy(gpu_buffer, new_gpu_buffer, total_size, cudaMemcpyDeviceToDevice));
    CHECK(cudaFree(new_gpu_buffer));

    dim.W += dim2.W;
}

void Tensor::vconcat(Tensor& t)
{
    assert(!empty && !t.isEmpty());     // 非空
    assert(dType == t.get_dtype());     // 数据类型一致
    assert(dim_order == t.get_order()); // 维度排列一致
    
    Dim dim2 = t.get_dim();
    assert(dim.N == dim2.N);
    assert(dim.C == dim2.C);
    assert(dim.W == dim2.W);

    unit_count = dim.N * dim.C * dim.H * dim.W + dim2.N * dim2.C * dim2.H * dim2.W;
    total_size = unit_count * unit_size;

    void* new_gpu_buffer;
    CHECK(cudaMalloc(&new_gpu_buffer, total_size));
    VConcat(gpu_buffer, t.get_val(), new_gpu_buffer, unit_count, unit_size, dim, dim2, dim_order);
    CHECK(cudaFree(gpu_buffer));
    gpu_buffer = NULL;
    CHECK(cudaMalloc(&gpu_buffer, total_size));
    CHECK(cudaMemcpy(gpu_buffer, new_gpu_buffer, total_size, cudaMemcpyDeviceToDevice));
    CHECK(cudaFree(new_gpu_buffer));

    dim.H += dim2.H;
}

void Tensor::hflip()
{
    if (empty)
    {
        std::cout << "The Tensor is empty!" << std::endl;
        return;
    }
    void* new_gpu_buffer;
    CHECK(cudaMalloc(&new_gpu_buffer, total_size));
    HFlip(gpu_buffer, new_gpu_buffer, dim, dim_order, unit_size, unit_count);
    CHECK(cudaFree(gpu_buffer));
    gpu_buffer = NULL;
    CHECK(cudaMalloc(&gpu_buffer, total_size));
    CHECK(cudaMemcpy(gpu_buffer, new_gpu_buffer, total_size, cudaMemcpyDeviceToDevice));
    CHECK(cudaFree(new_gpu_buffer));
}

void Tensor::vflip()
{
    if (empty)
    {
        std::cout << "The Tensor is empty!" << std::endl;
        return;
    }
    void* new_gpu_buffer;
    CHECK(cudaMalloc(&new_gpu_buffer, total_size));
    VFlip(gpu_buffer, new_gpu_buffer, dim, dim_order, unit_size, unit_count);
    CHECK(cudaFree(gpu_buffer));
    gpu_buffer = NULL;
    CHECK(cudaMalloc(&gpu_buffer, total_size));
    CHECK(cudaMemcpy(gpu_buffer, new_gpu_buffer, total_size, cudaMemcpyDeviceToDevice));
    CHECK(cudaFree(new_gpu_buffer));
}

// draw
void Tensor::draw_line(Point start, Point end, uint8_t grayVal, int r)
{
    if (empty)
    {
        std::cout << "The Tensor is empty!" << std::endl;
        return;
    }
    assert(dim.C == 1 || dim.C == 3);
    assert(dType == uint8);
    if (dim.C == 3)
    {
        draw_line(start, end, Color(grayVal, grayVal, grayVal), r);
        return;
    }
    int x_min = start.h < end.h ? start.h : end.h;
    int x_max = start.h > end.h ? start.h : end.h;
    int y_min = start.w < end.w ? start.w : end.w;
    int y_max = start.w > end.w ? start.w : end.w;
    x_min -= r, y_min -= r;
    x_max += r, y_max += r;
    int A = start.w - end.w;
    int B = end.h - start.h;
    int C = start.h * end.w - end.h * start.w;
    DrawGrayLine((uint8_t*)gpu_buffer, unit_count, dim, dim_order, 
                    grayVal, x_min, x_max, y_min, y_max, A, B, C, r);
}

void Tensor::draw_line(Point start, Point end, Color colorVal, int r)
{
    if (empty)
    {
        std::cout << "The Tensor is empty!" << std::endl;
        return;
    }
    assert(dim.C == 3);
    assert(dType == uint8);
    int x_min = start.h < end.h ? start.h : end.h;
    int x_max = start.h > end.h ? start.h : end.h;
    int y_min = start.w < end.w ? start.w : end.w;
    int y_max = start.w > end.w ? start.w : end.w;
    x_min -= r, y_min -= r;
    x_max += r, y_max += r;
    int A = start.w - end.w;
    int B = end.h - start.h;
    int C = start.h * end.w - end.h * start.w;
    DrawColorLine((uint8_t*)gpu_buffer, unit_count, dim, dim_order, 
                    colorVal, x_min, x_max, y_min, y_max, A, B, C, r);
}

void Tensor::draw_circle(Point center, uint8_t grayVal, int r)
{
    if (empty)
    {
        std::cout << "The Tensor is empty!" << std::endl;
        return;
    }
    assert(dim.C == 1 || dim.C == 3);
    assert(dType == uint8);
    if (dim.C == 3)
    {
        draw_circle(center, Color(grayVal, grayVal, grayVal), r);
        return;
    }
    DrawGrayCircle((uint8_t*)gpu_buffer, unit_count, dim, dim_order,
                        center, grayVal, r*r);
}

void Tensor::draw_circle(Point center, Color colorVal, int r)
{
    if (empty)
    {
        std::cout << "The Tensor is empty!" << std::endl;
        return;
    }
    assert(dim.C == 3);
    assert(dType == uint8);
    DrawColorCircle((uint8_t*)gpu_buffer, unit_count, dim, dim_order,
                        center, colorVal, r*r);
}

// export
void* Tensor::cpu()
{
    if (empty)
    {
        std::cout << "The Tensor is empty!" << std::endl;
        return NULL;
    }
    if (cpu_buffer)
    {
        free(cpu_buffer);
        cpu_buffer = NULL;
    }
    cpu_buffer = (uint8_t*)malloc(total_size);
    CHECK(cudaMemcpy(cpu_buffer, gpu_buffer, total_size, cudaMemcpyDeviceToHost));
    return (void*)cpu_buffer;
}

void Tensor::print()
{
    printf("Dim(%ld,%ld,%ld,%ld)-%d dtype:%d unit_size:%lu unit_count:%lu total_size:%lu empty:%d\n",
            dim.N, dim.C, dim.W, dim.H, dim_order, dType, unit_size, unit_count, total_size, empty);
}

void Tensor::copyTo(Tensor* t)
{
    if (empty)
    {
        std::cout << "The Tensor is empty!" << std::endl;
        return;
    }
    t->clear();
    t->set_shape(dim, dim_order, dType);
    t->update_from_gpu(gpu_buffer);
}

// tools for segmentation
void Tensor::map_seg_res(std::vector<uint8_t> map_list)
{
    if (empty)
    {
        std::cout << "The Tensor is empty!" << std::endl;
        return;
    }
    assert(dType == uint8);
    MapSegRes((uint8_t*)gpu_buffer, (uint8_t*)gpu_buffer, unit_count, map_list);
}

// get tensor info
void* Tensor::get_val()
{
    if (empty)
    {
        std::cout << "The Tensor is empty!" << std::endl;
        return NULL;
    }
    return this->gpu_buffer;
}

Dim Tensor::get_dim()
{
    if (empty)
    {
        std::cout << "The Tensor is empty!" << std::endl;
        return Dim(0, 0, 0, 0);
    }
    return dim;
}

DType Tensor::get_dtype()
{
    if (empty)
    {
        std::cout << "The Tensor is empty!" << std::endl;
        return err_type;
    }
    return dType;
}

DimOrder Tensor::get_order()
{
    if (empty)
    {
        std::cout << "The Tensor is empty!" << std::endl;
        return err_order;
    }
    return dim_order;
}

bool Tensor::isEmpty()
{
    return empty;
}
