#![allow(dead_code, unused)]

use std::{
    ffi::*, ptr::{null, null_mut, copy, copy_nonoverlapping}, sync::{Arc, Mutex}
};
use ash::vk::{self, Handle};
use getset::{CloneGetters, CopyGetters, Getters, MutGetters, Setters, WithSetters};
use crate::engine::njm_device::NjmDevice;

pub const DEFAULT_MIN_OFFSET_ALIGN: vk::DeviceSize = 1;
pub const DEFAULT_MAP_SIZE: vk::DeviceSize = vk::WHOLE_SIZE;
pub const DEFAULT_MAP_OFFSET: vk::DeviceSize = 0;

#[derive(Getters, Setters, WithSetters, MutGetters, CopyGetters, CloneGetters)]
pub struct NjmBuffer {
    dev: Arc<Mutex<NjmDevice>>,

    #[getset(get = "pub")]
    buf: vk::Buffer,

    #[getset(get = "pub")]
    mem: vk::DeviceMemory,

    #[getset(get = "pub")]
    mapped: *mut c_void,

    #[getset(get = "pub")]
    buf_size: vk::DeviceSize,

    #[getset(get = "pub")]
    ins_cnt: u32,

    #[getset(get = "pub")]
    ins_size: vk::DeviceSize,

    #[getset(get = "pub")]
    buf_usage_flags: vk::BufferUsageFlags,

    #[getset(get = "pub")]
    mem_property_flags: vk::MemoryPropertyFlags,

    #[getset(get = "pub")]
    align_size: vk::DeviceSize,
}

impl NjmBuffer {

    pub fn get_align(ins_sz: vk::DeviceSize, min_offset_align: vk::DeviceSize) -> vk::DeviceSize {
        if min_offset_align > 0 {
            return (ins_sz + min_offset_align - 1) & !(min_offset_align - 1);
        }
        return ins_sz;
    }

    pub fn new( 
        device: Arc<Mutex<NjmDevice>>,
        ins_sz: vk::DeviceSize,
        ins_cnt: u32,
        usage_flags: vk::BufferUsageFlags,
        mem_prop_flags: vk::MemoryPropertyFlags,
        min_offset_align: vk::DeviceSize,
    ) -> Self {
        let align_sz = Self::get_align(ins_sz, min_offset_align);
        let buf_sz = align_sz * ins_cnt as vk::DeviceSize;
        let njm_dev = device.lock().unwrap();
        let (buf, mem) = njm_dev.create_buffer(buf_sz, usage_flags, mem_prop_flags).unwrap(); 
        return Self {
            dev: device.clone(),
            buf: buf,
            mem: mem,
            mapped: null_mut(),
            buf_size: buf_sz,
            ins_cnt: ins_cnt,
            ins_size: ins_sz,
            buf_usage_flags: usage_flags,
            mem_property_flags: mem_prop_flags,
            align_size: align_sz,
        };
    }
    
    pub fn map(&mut self, size: vk::DeviceSize, offset: vk::DeviceSize) {
        assert!(!self.buf.is_null() && !self.mem.is_null(), "call buffer.map before create!");
        let njm_dev = self.dev.lock().unwrap();
        self.mapped = unsafe { njm_dev.device().map_memory(self.mem, offset, size, vk::MemoryMapFlags::empty()).unwrap() };
    }
    pub fn unmap(&mut self) {
        if !self.mapped.is_null() {
            let njm_dev = self.dev.lock().unwrap();
            unsafe { njm_dev.device().unmap_memory(self.mem) };
            self.mapped = null_mut();
        }
    }

    pub fn write_to_buffer(&self, data: *const c_void, size: vk::DeviceSize, offset: vk::DeviceSize) {
        assert!(!self.mapped.is_null(), "can't copy data to unmapped buffer");
        if size == vk::WHOLE_SIZE {
            unsafe { copy_nonoverlapping(data, self.mapped, self.buf_size as usize) };
        } else {
            let start = self.mapped;
            unsafe { start.byte_add(offset as usize) };
            unsafe { copy_nonoverlapping(data, start, size as usize) };
        }
    }

    pub fn flush(&self, size: vk::DeviceSize, offset: vk::DeviceSize) {
        let mapped_range = vk::MappedMemoryRange::default()
            .memory(self.mem)
            .offset(offset)
            .size(size);
        let ranges = [mapped_range];
        let njm_dev = self.dev.lock().unwrap();
        let r = unsafe { njm_dev.device().flush_mapped_memory_ranges(&ranges).unwrap() };
    }

    pub fn invalidate(&self, size: vk::DeviceSize, offset: vk::DeviceSize) {
        let mapped_range = vk::MappedMemoryRange::default()
            .memory(self.mem)
            .offset(offset)
            .size(size);
        let ranges = [mapped_range];
        let njm_dev = self.dev.lock().unwrap();
        let r = unsafe { njm_dev.device().invalidate_mapped_memory_ranges(&ranges).unwrap() };
    }

    pub fn descriptor_info(&self, size: vk::DeviceSize, offset: vk::DeviceSize) -> vk::DescriptorBufferInfo {
        return vk::DescriptorBufferInfo::default()
            .buffer(self.buf)
            .offset(offset)
            .range(size);
    }

    // index based fn
    pub fn write_to_index(&self, data: *const c_void, index: usize) {
        self.write_to_buffer(data, self.ins_size, self.align_size * index as vk::DeviceSize);
    }

    pub fn flush_index(&self, index: usize) {
        self.flush(self.ins_size, self.align_size * index as vk::DeviceSize);
    }

    pub fn descriptor_info_for_index(&self, index: usize) -> vk::DescriptorBufferInfo {
        return self.descriptor_info(self.ins_size, self.align_size * index as vk::DeviceSize);
    }

    pub fn invalidate_index(&self, index: usize) {
        return self.invalidate(self.ins_size, self.align_size * index as vk::DeviceSize);
    }
  
}

impl Drop for NjmBuffer {
    fn drop(&mut self) {
        self.unmap();
        let njm_dev = self.dev.lock().unwrap();
        unsafe { njm_dev.device().destroy_buffer(self.buf, None) };
        unsafe { njm_dev.device().free_memory(self.mem, None) };
    }
}