// Copyright (c) 2025 Shenzhen Kaihong Digital Industry Development Co., Ltd.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

use arrow::datatypes::ToByteSlice;
use bytes::{BufMut, Bytes, BytesMut};
use rerun::{components::TensorData, external::{arrow2, log::error}, TensorBuffer};
use std::convert::TryFrom;

pub mod trajectory_utils;
pub mod dora_data;

#[derive(Debug, Clone, Copy, PartialEq)]
#[repr(u8)]
pub enum TensorBufferType {
    U8,
    U16,
    U32,
    U64,
    I8,
    I16,
    I32,
    I64,
    F16,
    F32,
    F64,
    Jpeg,
    Nv12,
    Yuy2,
}

#[derive(Debug)]
pub enum TensorBufferTypeError {
    UnsupportedType,
}

impl TryFrom<&TensorBuffer> for TensorBufferType {
    type Error = TensorBufferTypeError;

    fn try_from(buffer: &TensorBuffer) -> Result<Self, Self::Error> {
        match buffer {
            TensorBuffer::U8(_) => Ok(Self::U8),
            TensorBuffer::U16(_) => Ok(Self::U16),
            TensorBuffer::U32(_) => Ok(Self::U32),
            TensorBuffer::U64(_) => Ok(Self::U64),
            TensorBuffer::I8(_) => Ok(Self::I8),
            TensorBuffer::I16(_) => Ok(Self::I16),
            TensorBuffer::I32(_) => Ok(Self::I32),
            TensorBuffer::I64(_) => Ok(Self::I64),
            TensorBuffer::F16(_) => Ok(Self::F16),
            TensorBuffer::F32(_) => Ok(Self::F32),
            TensorBuffer::F64(_) => Ok(Self::F64),
            TensorBuffer::Jpeg(_) => Ok(Self::Jpeg),
            TensorBuffer::Nv12(_) => Ok(Self::Nv12),
            TensorBuffer::Yuy2(_) => Ok(Self::Yuy2),
        }
    }
}


impl TryFrom<u8> for TensorBufferType {
    type Error = &'static str;

    fn try_from(tensor_buffer_type: u8) -> Result<Self, Self::Error> {
        match tensor_buffer_type {
            0 => Ok(TensorBufferType::U8),
            1 => Ok(TensorBufferType::U16),
            2 => Ok(TensorBufferType::U32),
            3 => Ok(TensorBufferType::U64),
            4 => Ok(TensorBufferType::I8),
            5 => Ok(TensorBufferType::I16),
            6 => Ok(TensorBufferType::I32),
            7 => Ok(TensorBufferType::I64),
            8 => Ok(TensorBufferType::F16),
            9 => Ok(TensorBufferType::F32),
            10 => Ok(TensorBufferType::F64),
            11 => Ok(TensorBufferType::Jpeg),
            12 => Ok(TensorBufferType::Nv12),
            13 => Ok(TensorBufferType::Yuy2),
            _ => Err("Invalid tensor buffer type"),
        }
    }
}

pub fn tensor_data_to_bytes_with_shape(tensor_data: TensorData) -> Bytes {
    let tensor_buffer_type = TensorBufferType::try_from(&tensor_data.buffer)
        .expect("read tensor buffer type failed");
    let tensor_buffer_type = tensor_buffer_type as u8; 
    let mut nbytes = size_of::<u64>() + size_of::<u64>() + tensor_data.shape.len() * size_of::<u64>() + size_of::<u8>() + tensor_data.buffer.size_in_bytes();
    for dimension in &tensor_data.shape {
        if let Some(name) = &dimension.name {
            nbytes += size_of::<u64>() + name.0.as_slice().len();
        } else {
            nbytes += size_of::<u64>();
        }
    }
    let mut bytes = BytesMut::with_capacity(nbytes);
    bytes.put_u64(nbytes as u64);
    bytes.put_u64(tensor_data.shape.len().try_into().unwrap());
    for dimension in &tensor_data.shape {
        bytes.put_u64(dimension.size);
        if let Some(name) = &dimension.name {
            let name_bytes = name.0.as_slice();
            bytes.put_u64(name_bytes.len() as u64);
            bytes.put_slice(name_bytes);
        } else {
            error!("dimension name is empty");
            bytes.put_u64(0);
            bytes.put_slice(b"");
        }
    }
    bytes.put_u8(tensor_buffer_type);
    bytes.put_slice( match &tensor_data.buffer {
        TensorBuffer::U8(arrow_buffer) => arrow_buffer.to_byte_slice(),
        TensorBuffer::U16(arrow_buffer) => arrow_buffer.to_byte_slice(),
        TensorBuffer::U32(arrow_buffer) => arrow_buffer.to_byte_slice(),
        TensorBuffer::U64(arrow_buffer) => arrow_buffer.to_byte_slice(),
        TensorBuffer::I8(arrow_buffer) => arrow_buffer.to_byte_slice(),
        TensorBuffer::I16(arrow_buffer) => arrow_buffer.to_byte_slice(),
        TensorBuffer::I32(arrow_buffer) => arrow_buffer.to_byte_slice(),
        TensorBuffer::I64(arrow_buffer) => arrow_buffer.to_byte_slice(),
        TensorBuffer::F16(arrow_buffer) => {
            unsafe { 
                std::slice::from_raw_parts(
                    arrow_buffer.as_ptr() as *const u8,
                    arrow_buffer.len() * size_of::<arrow2::types::f16>()
                ) 
            }
        },
        TensorBuffer::F32(arrow_buffer) => arrow_buffer.to_byte_slice(),
        TensorBuffer::F64(arrow_buffer) => arrow_buffer.to_byte_slice(),
        TensorBuffer::Jpeg(arrow_buffer) => arrow_buffer.to_byte_slice(),
        TensorBuffer::Nv12(arrow_buffer) => arrow_buffer.to_byte_slice(),
        TensorBuffer::Yuy2(arrow_buffer) => arrow_buffer.to_byte_slice(),
    });
    bytes.freeze()
}