#pragma once

#include <string>
#include <cstdint>
#include <cassert>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include "core.h"

namespace uzu
{
    
    template<typename T>
    class Image
    {
    public:
        uint32_t width;
        uint32_t height;
        uint32_t channels;
        T* data = nullptr;
        T* dataGpu = nullptr;
        DeviceType device_type;

    public:
        Image() : width(0), height(0), channels(0), data(nullptr), dataGpu(nullptr), device_type(DeviceType::CPU) {}
        Image(T* d, uint32_t w, uint32_t h, uint32_t c) 
        {
            // allocate host memory
            Alloc(w, h, c);
            uint32_t size = w * h * c;
            memcpy(data, d, size * sizeof(T));
            device_type = DeviceType::CPU;
        }
        
        ~Image()
        {
            if (data != nullptr)
                delete data;
            if (dataGpu != nullptr)
                cudaFree(dataGpu);
        } 

        uint32_t Width() { return width; }
        uint32_t Height() { return height; }
        uint32_t Size() { return width * height; }

        T* RowPtr(int row) { return data + row * width * channels; }
        T* PixelPtr(int row, int col) { return data + row * width * channels + col * channels; }

        T* DataGpu() { return dataGpu; }
        T* Data() { return data; }

        void Create(uint32_t w, uint32_t h, uint32_t c, DeviceType device_type)
        {
            if (device_type == DeviceType::CPU)
                Alloc(w, h, c);
            else
                AllocGpu(w, h, c);
            this->device_type = device_type;
        }

        bool IsValid() const 
        {
            return (width > 0) && (height > 0) && (channels > 0);
        }

        void ToDevice()
        {
            if (device_type == DeviceType::CUDA)
                return;
            assert(IsValid());
            assert(data != nullptr);
            int data_size = width * height * channels * sizeof(T);
            if (dataGpu == nullptr)
                cudaMalloc((void**)&dataGpu, data_size);
            cudaMemcpy(dataGpu, data, data_size, cudaMemcpyHostToDevice);
            device_type = DeviceType::CUDA;
        }

        void ToHost()
        {
            if (device_type == DeviceType::CPU)
                return;
            assert(IsValid());
            assert(dataGpu != nullptr);
            if (data == nullptr)
                data = new T[width * height * channels];
            int data_size = width * height * channels * sizeof(T);
            cudaMemcpy(data, dataGpu, data_size, cudaMemcpyDeviceToHost);
            device_type = DeviceType::CPU;
        }

    private:
        void AllocGpu(uint32_t w, uint32_t h, uint32_t c)
        {
            // alloc device data
            if (width * height * channels == w * h * c)
            {
                if (dataGpu == nullptr)
                    cudaMalloc((void**)&dataGpu, width * height * channels * sizeof(T));
                return;
            }
            if (dataGpu != nullptr) cudaFree(dataGpu);
            width = w;
            height = h;
            channels = c;
            cudaMalloc((void**)&dataGpu, width * height * channels * sizeof(T));
        }

        void Alloc(uint32_t w, uint32_t h, uint32_t c)
        {
            // alloc host data
            if (width * height * channels == w * h * c)
            {
                width = w;
                height = h;
                channels = c;
                if (data == nullptr)
                    data = new T[width * height * channels];
                return;
            }
            // size mismatch
            if (data != nullptr) delete data;
            width = w;
            height = h;
            channels = c;
            data = new T[width * height * channels];
        }
    };

    typedef Image<uint8_t> ImageU8;

    void ReadImage(const std::string& filepath, ImageU8& image, uint32_t desired_channels=3);
    void WriteImage(const std::string& filepath, ImageU8& image);
}
