#include <napi/napi.h>
#include "../CameraDevice.h"
#include <string>
#include <native_window/external_window.h>
#include <OHOSExtensions/OpenGLHelpers.h>
#include <window_manager/oh_display_info.h>
#include <window_manager/oh_display_manager.h>
#include <native_window/graphic_error_code.h>
#include <native_image/native_image.h>
#include <hilog/log.h>
#include <bgfx/bgfx.h>
#include <bgfx/platform.h>
#include <arcana/threading/task.h>
#include <arcana/threading/dispatcher.h>
#include <Babylon/JsRuntimeScheduler.h>
#include <Babylon/Graphics/DeviceContext.h>
#include <memory>
#include "ohcamera/camera.h"
#include "ohcamera/camera_device.h"
#include "ohcamera/camera_input.h"
#include "ohcamera/capture_session.h"
#include "ohcamera/photo_output.h"
#include "ohcamera/preview_output.h"
#include "ohcamera/camera_manager.h"

#undef LOG_DOMAIN
#undef LOG_TAG
#define LOG_DOMAIN 0x3200
#define LOG_TAG "BabylonNative_Camera"

namespace Babylon::Plugins
{
    struct CameraTrack::Impl
    {
        int32_t width{};
        int32_t height{};
    };

    struct CameraDevice::Impl
    {
        Impl(Napi::Env env)
            : env{env}
        {
        }

        GLuint GenerateOESTexture()
        {
            GLuint oesTexture;
            glGenTextures(1, &oesTexture);
            glBindTexture(GL_TEXTURE_EXTERNAL_OES, oesTexture);
            glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
            glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
            glBindTexture(GL_TEXTURE_EXTERNAL_OES, 0);
            return oesTexture;
        }

        int GetCurrentSensorRotationDiff()
        {
            // Get the phone's current rotation so we can determine if the camera image needs to be rotated based on the sensor's natural orientation
            NativeDisplayManager_Rotation rotation;
            OH_NativeDisplayManager_GetDefaultDisplayRotation(&rotation);
            int32_t phoneRotation{rotation * 90};

            // The sensor rotation dictates the orientation of the camera when the phone is in it's default orientation
            // Subtracting the phone's rotation from the camera's rotation will give us the current orientation
            // of the sensor. Then add 360 and modulus 360 to ensure we're always talking about positive degrees.
            int currentSensorRotationDiff{(sensorRotation - phoneRotation + 360) % 360};
            bool sensorIsPortrait{currentSensorRotationDiff == 90 || currentSensorRotationDiff == 270};
            if (facingUser && !sensorIsPortrait)
            {
                // Compensate for the front facing camera being naturally mirrored. In the portrait orientation
                // the mirrored behavior matches the browser, but in landscape it would result in the image rendering
                // upside down. Rotate the image by 180 to compensate.
                currentSensorRotationDiff = (currentSensorRotationDiff + 180) % 360;
            }

            return currentSensorRotationDiff;
        }

        int GetCameraIndex()
        {
            for (int i = 0; i < size; i++)
            {
                if (cameraID == cameras[i].cameraId)
                {
                    return i;
                }
            }

            return -1;
        }

        void InitWithTexture(GLuint textureId)
        {
            OH_NativeImage* image = OH_NativeImage_Create(textureId, GL_TEXTURE_EXTERNAL_OES);
            if (image == nullptr)
            {
                OH_LOG_ERROR(LOG_APP, "OH_NativeImage_Create failed.");
                return;
            }

            OHNativeWindow* window = OH_NativeImage_AcquireNativeWindow(image);
            if (window == nullptr)
            {
                OH_NativeImage_Destroy(&image);
                OH_LOG_ERROR(LOG_APP, "OH_NativeImage_AcquireNativeWindow failed.");
                return;
            }

            nativeImage = image;
            textureWindow = window;
        }

        void UpdateTexImage()
        {
            if (nativeImage == nullptr)
            {
                OH_LOG_ERROR(LOG_APP, "nativeImage is nullptr.");
                return;
            }

            int ret = OH_NativeImage_UpdateSurfaceImage(nativeImage);
            if (ret != 0)
            {
                OH_LOG_ERROR(LOG_APP, "OH_NativeImage_UpdateSurfaceImage failed, ret:%{public}d", ret);
            }
        }

        void SetDefaultBufferSize(int width, int height)
        {
            if (textureWindow == nullptr)
            {
                OH_LOG_ERROR(LOG_APP, "textureWindow is nullptr.");
                return;
            }

            int32_t ret = OH_NativeWindow_NativeWindowHandleOpt(textureWindow, SET_BUFFER_GEOMETRY, width, height);
            if (ret != 0)
            {
                OH_LOG_ERROR(LOG_APP, "OH_NativeWindow_NativeWindowHandleOpt failed, ret:%{public}d", ret);
            }
        }

        void GenerateSurfaceId()
        {
            if (nativeImage == nullptr)
            {
                OH_LOG_ERROR(LOG_APP, "nativeImage is nullptr.");
                return;
            }

            uint64_t surfaceIdNum{};
            int32_t ret = OH_NativeImage_GetSurfaceId(nativeImage, &surfaceIdNum);
            if (ret != NATIVE_ERROR_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_NativeImage_GetSurfaceId failed, ret:%{public}d", ret);
                return;
            }

            surfaceId = std::to_string(surfaceIdNum);
        }

        Napi::Env env;

        arcana::affinity threadAffinity{};

        std::vector<CameraTrack> supportedResolutions{};
        std::vector<std::unique_ptr<Capability>> capabilities{};
        std::optional<Plugins::PhotoCapabilities> photoCapabilities{};
        std::optional<Plugins::PhotoSettings> defaultPhotoSettings{};
        std::string cameraID{};
        int32_t sensorRotation{};
        bool facingUser{};
        CameraDimensions cameraDimensions{};
        int32_t sensorRotationDiff{};
        bool updateTextureDimensions{true};

        Graphics::DeviceContext* deviceContext{};

        Camera_Manager* cameraManager{};
        Camera_Device* cameras{};
        uint32_t size{};
        Camera_OutputCapability* cameraOutputCapability{};
        Camera_Input* cameraInput{};
        Camera_PreviewOutput* previewOutput{};
        Camera_PhotoOutput* photoOutput{};
        Camera_CaptureSession* captureSession{};

        OH_NativeImage* nativeImage{};
        OHNativeWindow* textureWindow{};
        std::string surfaceId{};

        GLuint cameraOESTextureId{};
        GLuint cameraRGBATextureId{};
        GLuint cameraShaderProgramId{};
        GLuint frameBufferId{};

        EGLContext context{EGL_NO_CONTEXT};
        EGLDisplay display{};
    };

    // Vertex positions for the camera texture
    constexpr size_t CAMERA_VERTEX_COUNT{4};
    constexpr GLfloat CAMERA_VERTEX_POSITIONS[CAMERA_VERTEX_COUNT * 2]{-1.0f, -1.0f, 1.0f, -1.0f, -1.0f, 1.0f, 1.0f, 1.0f};

    // UV mappings to correct for the different orientations of the screen versus the camera sensor
    constexpr size_t CAMERA_UVS_COUNT{4};
    constexpr GLfloat CAMERA_UVS_ROTATION_0[CAMERA_UVS_COUNT * 2]{0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f};
    constexpr GLfloat CAMERA_UVS_ROTATION_90[CAMERA_UVS_COUNT * 2]{0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f};
    constexpr GLfloat CAMERA_UVS_ROTATION_180[CAMERA_UVS_COUNT * 2]{1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 0.0f};
    constexpr GLfloat CAMERA_UVS_ROTATION_270[CAMERA_UVS_COUNT * 2]{1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f};

    static constexpr char CAMERA_VERT_SHADER[]{R"(#version 300 es
       precision highp float;
       uniform vec2 positions[4];
       uniform vec2 uvs[4];
       out vec2 uv;
       void main() {
           gl_Position = vec4(positions[gl_VertexID], 0.0, 1.0);
           uv = uvs[gl_VertexID];
       }
   )"};

    static constexpr char CAMERA_FRAG_SHADER[]{R"(#version 300 es
       #extension GL_OES_EGL_image_external_essl3 : require
       precision mediump float;
       in vec2 uv;
       uniform samplerExternalOES cameraTexture;
       // Location 0 is GL_COLOR_ATTACHMENT0, which in turn is the babylonTexture
       layout(location = 0) out vec4 oFragColor;
       void main() {
           oFragColor = texture(cameraTexture, uv);
       }
   )"};

    void CaptureSessionOnFocusStateChange(Camera_CaptureSession* session, Camera_FocusState focusState)
    {
        OH_LOG_INFO(LOG_APP, "CaptureSessionOnFocusStateChange");
    }

    void CaptureSessionOnError(Camera_CaptureSession* session, Camera_ErrorCode errorCode)
    {
        OH_LOG_INFO(LOG_APP, "CaptureSessionOnError = %{public}d", errorCode);
    }

    CaptureSession_Callbacks* GetCaptureSessionRegister(void)
    {
        static CaptureSession_Callbacks captureSessionCallbacks = {
            .onFocusStateChange = CaptureSessionOnFocusStateChange,
            .onError = CaptureSessionOnError};
        return &captureSessionCallbacks;
    }

    void PreviewOutputOnFrameStart(Camera_PreviewOutput* previewOutput)
    {
        OH_LOG_INFO(LOG_APP, "PreviewOutputOnFrameStart");
    }

    void PreviewOutputOnFrameEnd(Camera_PreviewOutput* previewOutput, int32_t frameCount)
    {
        OH_LOG_INFO(LOG_APP, "PreviewOutputOnFrameEnd = %{public}d", frameCount);
    }

    void PreviewOutputOnError(Camera_PreviewOutput* previewOutput, Camera_ErrorCode errorCode)
    {
        OH_LOG_INFO(LOG_APP, "PreviewOutputOnError = %{public}d", errorCode);
    }

    PreviewOutput_Callbacks* GetPreviewOutputListener(void)
    {
        static PreviewOutput_Callbacks previewOutputListener = {
            .onFrameStart = PreviewOutputOnFrameStart,
            .onFrameEnd = PreviewOutputOnFrameEnd,
            .onError = PreviewOutputOnError};
        return &previewOutputListener;
    }

    void OnCameraInputError(const Camera_Input* cameraInput, Camera_ErrorCode errorCode)
    {
        OH_LOG_INFO(LOG_APP, "OnCameraInput errorCode = %{public}d", errorCode);
    }

    CameraInput_Callbacks* GetCameraInputListener(void)
    {
        static CameraInput_Callbacks cameraInputCallbacks = {
            .onError = OnCameraInputError};
        return &cameraInputCallbacks;
    }

    void CameraManagerStatusCallback(Camera_Manager* cameraManager, Camera_StatusInfo* status)
    {
        OH_LOG_INFO(LOG_APP, "CameraManagerStatusCallback is called");
    }

    CameraManager_Callbacks* GetCameraManagerListener()
    {
        static CameraManager_Callbacks cameraManagerListener = {
            .onCameraStatus = CameraManagerStatusCallback};
        return &cameraManagerListener;
    }

    arcana::task<CameraDevice::CameraDimensions, std::exception_ptr> CameraDevice::OpenAsync(const CameraTrack& track)
    {
        if (!m_impl->deviceContext)
        {
            m_impl->deviceContext = &Graphics::DeviceContext::GetFromJavaScript(m_impl->env);
            m_impl->threadAffinity = std::this_thread::get_id();
        }
        
        return arcana::make_task(arcana::inline_scheduler, arcana::cancellation::none(), [this, &track]() {
            // Get the phone's current rotation so we can determine if the camera image needs to be rotated based on the sensor's natural orientation
            m_impl->sensorRotationDiff = m_impl->GetCurrentSensorRotationDiff();

            m_impl->cameraDimensions.width = track.Width();
            m_impl->cameraDimensions.height = track.Height();

            // Check if there is an already available context for this thread
            EGLContext currentContext = eglGetCurrentContext();
            if (currentContext == EGL_NO_CONTEXT)
            {
                m_impl->display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
                eglInitialize(m_impl->display, nullptr, nullptr);

                static const EGLint attrs[] = {
                    EGL_RENDERABLE_TYPE, EGL_OPENGL_ES3_BIT_KHR,
                    EGL_BLUE_SIZE, 8,
                    EGL_GREEN_SIZE, 8,
                    EGL_RED_SIZE, 8,
                    EGL_ALPHA_SIZE, 8,
                    EGL_DEPTH_SIZE, 16,
                    EGL_STENCIL_SIZE, 8,
                    EGL_NONE};

                EGLConfig config;
                EGLint numConfig = 0;
                eglChooseConfig(m_impl->display, attrs, &config, 1, &numConfig);

                static const EGLint contextAttribs[] = {
                    EGL_CONTEXT_MAJOR_VERSION_KHR,
                    3,
                    EGL_CONTEXT_MINOR_VERSION_KHR,
                    0,
                    EGL_NONE};

                m_impl->context = eglCreateContext(m_impl->display, config, bgfx::getInternalData()->context, contextAttribs);
                if (eglMakeCurrent(m_impl->display, 0 /*surface*/, 0 /*surface*/, m_impl->context) == EGL_FALSE)
                {
                    OH_LOG_ERROR(LOG_APP, "Unable to create a shared GL context for camera texture.");
                    throw std::runtime_error{"Unable to create a shared GL context for camera texture."};
                }
            }

            m_impl->cameraShaderProgramId = ohos::OpenGLHelpers::CreateShaderProgram(CAMERA_VERT_SHADER, CAMERA_FRAG_SHADER);

            m_impl->cameraOESTextureId = m_impl->GenerateOESTexture();

            // Create the surface and surface texture that will receive the camera preview
            m_impl->InitWithTexture(m_impl->cameraOESTextureId);
            m_impl->SetDefaultBufferSize(track.Width(), track.Height());
            m_impl->GenerateSurfaceId();

            // Create CameraManager object
            Camera_ErrorCode ret = OH_Camera_GetCameraManager(&m_impl->cameraManager);
            if (m_impl->cameraManager == nullptr || ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_Camera_GetCameraMananger failed, ret:%{public}d", ret);
            }

            // Monitor camera status changes
            ret = OH_CameraManager_RegisterCallback(m_impl->cameraManager, GetCameraManagerListener());
            if (ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CameraManager_RegisterCallback failed, ret:%{public}d", ret);
            }

            // Get camera list
            ret = OH_CameraManager_GetSupportedCameras(m_impl->cameraManager, &m_impl->cameras, &m_impl->size);
            if (m_impl->cameras == nullptr || m_impl->size <= 0 || ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CameraManager_GetSupportedCameras failed, ret:%{public}d", ret);
            }

            // Create camera input stream
            ret = OH_CameraManager_CreateCameraInput(m_impl->cameraManager, &m_impl->cameras[0], &m_impl->cameraInput);
            if (m_impl->cameraInput == nullptr || ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CameraManager_CreateCameraInput failed, ret:%{public}d", ret);
            }

            // Monitor cameraInput error messages
            ret = OH_CameraInput_RegisterCallback(m_impl->cameraInput, GetCameraInputListener());
            if (ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CameraInput_RegisterCallback failed, ret:%{public}d", ret);
            }

            // Open the camera
            ret = OH_CameraInput_Open(m_impl->cameraInput);
            if (ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CameraInput_Open failed, ret:%{public}d", ret);
            }

            // Get camera device index
            int index = m_impl->GetCameraIndex();
            if (index < 0)
            {
                index = 0;
                OH_LOG_ERROR(LOG_APP, "GetCameraIndex failed, size:%{public}d, cameraId:%{public}s.", m_impl->size, m_impl->cameraID.c_str());
            }

            // Get the output stream capability supported by the camera device
            ret = OH_CameraManager_GetSupportedCameraOutputCapability(m_impl->cameraManager, &m_impl->cameras[index], &m_impl->cameraOutputCapability);
            if (m_impl->cameraOutputCapability == nullptr || ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CameraManager_GetSupportedCameraOutputCapability failed, ret:%{public}d", ret);
            }
            if (m_impl->cameraOutputCapability->previewProfilesSize <= 0)
            {
                OH_LOG_ERROR(LOG_APP, "previewProfilesSize <= 0");
            }
            if (m_impl->cameraOutputCapability->photoProfilesSize <= 0)
            {
                OH_LOG_ERROR(LOG_APP, "photoProfilesSize <= 0");
            }

            // Create preview output stream
            ret = OH_CameraManager_CreatePreviewOutput(m_impl->cameraManager, m_impl->cameraOutputCapability->previewProfiles[0], m_impl->surfaceId.c_str(), &m_impl->previewOutput);
            if (m_impl->previewOutput == nullptr || ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CameraManager_CreatePreviewOutput failed, ret:%{public}d", ret);
            }

            // Monitor preview output error message
            ret = OH_PreviewOutput_RegisterCallback(m_impl->previewOutput, GetPreviewOutputListener());
            if (ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_PreviewOutput_RegisterCallback failed, ret:%{public}d", ret);
            }

            // Create a photo output stream
            ret = OH_CameraManager_CreatePhotoOutputWithoutSurface(m_impl->cameraManager, m_impl->cameraOutputCapability->photoProfiles[0], &m_impl->photoOutput);
            if (ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CameraManager_CreatePhotoOutputWithoutSurface failed, ret:%{public}d", ret);
            }

            // Monitor single ended camera callback
            // ret = OH_PhotoOutput_RegisterPhotoAvailableCallback(photoOutput, OnPhotoAvailable);

            // Create session
            ret = OH_CameraManager_CreateCaptureSession(m_impl->cameraManager, &m_impl->captureSession);
            if (m_impl->captureSession == nullptr || ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CameraManager_CreateCaptureSession failed, ret:%{public}d", ret);
            }

            // Monitor session error messages
            ret = OH_CaptureSession_RegisterCallback(m_impl->captureSession, GetCaptureSessionRegister());
            if (ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CaptureSession_RegisterCallback failed, ret:%{public}d", ret);
            }

            // Start configuring session
            ret = OH_CaptureSession_BeginConfig(m_impl->captureSession);
            if (ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CaptureSession_BeginConfig failed, ret:%{public}d", ret);
            }

            // Add camera input stream to the session
            ret = OH_CaptureSession_AddInput(m_impl->captureSession, m_impl->cameraInput);
            if (ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CaptureSession_AddInput failed, ret:%{public}d", ret);
            }

            // Add preview output stream to the session
            ret = OH_CaptureSession_AddPreviewOutput(m_impl->captureSession, m_impl->previewOutput);
            if (ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CaptureSession_AddPreviewOutput failed, ret:%{public}d", ret);
            }

            // Add a photo output stream to the session
            ret = OH_CaptureSession_AddPhotoOutput(m_impl->captureSession, m_impl->photoOutput);
            if (ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CaptureSession_AddPhotoOutput failed, ret:%{public}d", ret);
            }

            // Submit session configuration
            ret = OH_CaptureSession_CommitConfig(m_impl->captureSession);
            if (ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CaptureSession_CommitConfig failed, ret:%{public}d", ret);
            }

            // Start session
            ret = OH_CaptureSession_Start(m_impl->captureSession);
            if (ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CaptureSession_Start failed, ret:%{public}d", ret);
            }

            if (eglMakeCurrent(m_impl->display, 0 /*surface*/, 0 /*surface*/, currentContext) == EGL_FALSE)
            {
                OH_LOG_ERROR(LOG_APP, "Unable to restore GL context for camera texture init.");
                throw std::runtime_error{"Unable to restore GL context for camera texture init."};
            }

            // To match the web implementation if the sensor is rotated into a portrait orientation then the width and height
            // of the video should be swapped
            bool sensorIsPortrait{m_impl->sensorRotationDiff == 90 || m_impl->sensorRotationDiff == 270};
            return !sensorIsPortrait ? CameraDimensions{m_impl->cameraDimensions.width, m_impl->cameraDimensions.height} : CameraDimensions{m_impl->cameraDimensions.height, m_impl->cameraDimensions.width};
        });
    }

    std::vector<CameraDevice> CameraDevice::GetCameraDevices(Napi::Env env)
    {
        // Create CameraManager object
        Camera_Manager* localCameraManager = nullptr;
        Camera_ErrorCode ret = OH_Camera_GetCameraManager(&localCameraManager);
        if (localCameraManager == nullptr || ret != CAMERA_OK)
        {
            OH_LOG_ERROR(LOG_APP, "OH_Camera_GetCameraMananger failed, ret:%{public}d", ret);
        }

        // Monitor camera status changes
        ret = OH_CameraManager_RegisterCallback(localCameraManager, GetCameraManagerListener());
        if (ret != CAMERA_OK)
        {
            OH_LOG_ERROR(LOG_APP, "OH_CameraManager_RegisterCallback failed, ret:%{public}d", ret);
        }

        // Get camera list
        Camera_Device* localCameras = nullptr;
        uint32_t localSize = 0;
        ret = OH_CameraManager_GetSupportedCameras(localCameraManager, &localCameras, &localSize);
        if (localCameras == nullptr || localSize < 0 || ret != CAMERA_OK)
        {
            OH_LOG_ERROR(LOG_APP, "OH_CameraManager_GetSupportedCameras failed, ret:%{public}d", ret);
        }

        std::vector<CameraDevice> cameraDevices{};

        for (int i = 0; i < localSize; ++i)
        {
            // Create camera input stream
            Camera_Input* localCameraInput = nullptr;
            ret = OH_CameraManager_CreateCameraInput(localCameraManager, &localCameras[i], &localCameraInput);
            if (localCameraInput == nullptr || ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CameraManager_CreateCameraInput failed, ret:%{public}d", ret);
            }

            // Monitor cameraInput error messages
            ret == OH_CameraInput_RegisterCallback(localCameraInput, GetCameraInputListener());
            if (ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CameraInput_RegisterCallback failed, ret:%{public}d", ret);
            }

            // Open the camera
            ret = OH_CameraInput_Open(localCameraInput);
            if (ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CameraInput_Open failed, ret:%{public}d", ret);
            }

            const char* id = localCameras[i].cameraId;
            auto cameraDeviceImpl{std::make_unique<CameraDevice::Impl>(env)};

            // Obtain the output stream capability supported by the camera device
            Camera_OutputCapability* localCameraOutputCapability = nullptr;
            ret = OH_CameraManager_GetSupportedCameraOutputCapability(localCameraManager, &localCameras[i], &localCameraOutputCapability);
            if (localCameraOutputCapability == nullptr || ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CameraManager_GetSupportedCameraOutputCapability failed, ret:%{public}d", ret);
            }
            if (localCameraOutputCapability->previewProfilesSize <= 0)
            {
                OH_LOG_ERROR(LOG_APP, "previewProfilesSize <= 0");
            }
            if (localCameraOutputCapability->photoProfilesSize <= 0)
            {
                OH_LOG_ERROR(LOG_APP, "photoProfilesSize <= 0");
            }

            for (uint32_t j = 0; j < localCameraOutputCapability->previewProfilesSize; ++j)
            {
                Camera_Format format{localCameraOutputCapability->previewProfiles[j]->format};
                uint32_t width{localCameraOutputCapability->previewProfiles[j]->size.width};
                uint32_t height{localCameraOutputCapability->previewProfiles[j]->size.height};

                if (format != CAMERA_FORMAT_YUV_420_SP)
                {
                    // Ignore the configuration if it is not a preview format
                    continue;
                }

                auto cameraTrackImpl{std::make_unique<CameraTrack::Impl>()};
                cameraTrackImpl->width = width;
                cameraTrackImpl->height = height;
                cameraDeviceImpl->supportedResolutions.push_back(CameraTrack{std::move(cameraTrackImpl)});
            }

            // Camera Position
            auto facing = localCameras[i].cameraPosition;

            // Obtain the sensor orientation attribute of the camera device
            uint32_t orientation{};
            ret = OH_CameraDevice_GetCameraOrientation(&localCameras[i], &orientation);
            if (ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CameraDevice_GetCameraOrientation failed, ret:%{public}d", ret);
            }
            int32_t sensorOrientation = static_cast<int32_t>(orientation);

            // // Create preview output stream
            // Camera_PreviewOutput* localPreviewOutput = nullptr;
            // char* localPreviewSurfaceId = previewId;
            // ret = OH_CameraManager_CreatePreviewOutput(localCameraManager, localCameraOutputCapability->previewProfiles[0], localPreviewSurfaceId, &localPreviewOutput);
            // if (previewProfile == nullptr || previewOutput == nullptr || ret != CAMERA_OK)
            // {
            //     OH_LOG_ERROR(LOG_APP, "OH_CameraManager_CreatePreviewOutput failed, ret:%{public}d", ret);
            // }

            // // Monitor preview output error message
            // ret = OH_PreviewOutput_RegisterCallback(localPreviewOutput, GetPreviewOutputListener());
            // if (ret != CAMERA_OK)
            // {
            //     OH_LOG_ERROR(LOG_APP, "OH_PreviewOutput_RegisterCallback failed, ret:%{public}d", ret);
            // }

            // Create a photo output stream
            Camera_PhotoOutput* localPhotoOutput = nullptr;
            ret = OH_CameraManager_CreatePhotoOutputWithoutSurface(localCameraManager, localCameraOutputCapability->photoProfiles[0], &localPhotoOutput);
            if (ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CameraManager_CreatePhotoOutputWithoutSurface failed, ret:%{public}d", ret);
            }

            // Monitor single ended camera callback
            // ret = OH_PhotoOutput_RegisterPhotoAvailableCallback(photoOutput, OnPhotoAvailable);

            // Create session
            Camera_CaptureSession* localCaptureSession = nullptr;
            ret = OH_CameraManager_CreateCaptureSession(localCameraManager, &localCaptureSession);
            if (localCaptureSession == nullptr || ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CameraManager_CreateCaptureSession failed, ret:%{public}d", ret);
            }

            // Monitor session error messages
            ret = OH_CaptureSession_RegisterCallback(localCaptureSession, GetCaptureSessionRegister());
            if (ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CaptureSession_RegisterCallback failed, ret:%{public}d", ret);
            }

            // Start configuring session
            ret = OH_CaptureSession_BeginConfig(localCaptureSession);
            if (ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CaptureSession_BeginConfig failed, ret:%{public}d", ret);
            }

            // Add camera input stream to the session
            ret = OH_CaptureSession_AddInput(localCaptureSession, localCameraInput);
            if (ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CaptureSession_AddInput failed, ret:%{public}d", ret);
            }

            // // Add preview output stream to the session
            // ret = OH_CaptureSession_AddPreviewOutput(localCaptureSession, localPreviewOutput);
            // if (ret != CAMERA_OK)
            // {
            //     OH_LOG_ERROR(LOG_APP, "OH_CaptureSession_AddPreviewOutput failed, ret:%{public}d", ret);
            // }

            // Add a photo output stream to the session
            ret = OH_CaptureSession_AddPhotoOutput(localCaptureSession, localPhotoOutput);
            if (ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CaptureSession_AddPhotoOutput failed, ret:%{public}d", ret);
            }

            // Submit session configuration
            ret = OH_CaptureSession_CommitConfig(localCaptureSession);
            if (ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CaptureSession_CommitConfig failed, ret:%{public}d", ret);
            }

            // Start session
            ret = OH_CaptureSession_Start(localCaptureSession);
            if (ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CaptureSession_Start failed, ret:%{public}d", ret);
            }

            // Determine whether the device supports flash
            Camera_FlashMode flashMode = FLASH_MODE_AUTO;
            bool torchSupported{false};
            ret = OH_CaptureSession_HasFlash(localCaptureSession, &torchSupported);
            if (ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CaptureSession_HasFlash failed, ret:%{public}d", ret);
            }

            // Get the zoom value of the current device
            float zoomRatio{};
            ret = OH_CaptureSession_GetZoomRatio(localCaptureSession, &zoomRatio);
            if (ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CaptureSession_GetZoomRatio failed, ret:%{public}d",ret);
            }

            // Get the zoom ratio range supported by the camera
            float minZoomRatio{};
            float maxZoomRatio{};
            ret = OH_CaptureSession_GetZoomRatioRange(localCaptureSession, &minZoomRatio, &maxZoomRatio);
            if (ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CaptureSession_GetZoomRatio failed, ret:%{public}d", ret);
            }

            // Stop capturing session
            ret = OH_CaptureSession_Stop(localCaptureSession);
            if (ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CaptureSession_Stop failed, ret:%{public}d", ret);
            }

            // Release camera input stream
            ret = OH_CameraInput_Close(localCameraInput);
            if (ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CameraInput_Close failed, ret:%{public}d", ret);
            }

            // // Release preview output stream
            // ret = OH_PreviewOutput_Release(localPreviewOutput);
            // if (ret != CAMERA_OK)
            // {
            //     OH_LOG_ERROR(LOG_APP, "OH_PreviewOutput_Release failed, ret:%{public}d", ret);
            // }

            // Release the photo output stream
            ret = OH_PhotoOutput_Release(localPhotoOutput);
            if (ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_PhotoOutput_Release failed, ret:%{public}d", ret);
            }

            // Release session
            ret = OH_CaptureSession_Release(localCaptureSession);
            if (ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CaptureSession_Release failed, ret:%{public}d", ret);
            }

            // Delete supported output capability
            ret = OH_CameraManager_DeleteSupportedCameraOutputCapability(localCameraManager, localCameraOutputCapability);
            if (ret != CAMERA_OK)
            {
                OH_LOG_ERROR(LOG_APP, "OH_CameraManager_DeleteSupportedCameraOutputCapability failed, ret:%{public}d", ret);
            }

            // Update the cameraDevice information
            cameraDeviceImpl->cameraID = id;
            cameraDeviceImpl->sensorRotation = sensorOrientation;
            cameraDeviceImpl->facingUser = facing == CAMERA_POSITION_FRONT;

            // Create the capabilities
            cameraDeviceImpl->capabilities.push_back(std::make_unique<CameraCapabilityTemplate<std::string>>(
                Capability::Feature::FacingMode,
                facing == CAMERA_POSITION_FRONT ? "user" : "environment",
                facing == CAMERA_POSITION_FRONT ? "user" : "environment",
                facing == CAMERA_POSITION_FRONT ? std::vector<std::string>{"user"} : std::vector<std::string>{"environment"}));

            cameraDeviceImpl->capabilities.push_back(std::make_unique<CameraCapabilityTemplate<bool>>(
                Capability::Feature::Torch,
                false,
                false,
                torchSupported ? std::vector<bool>{false, true} : std::vector<bool>{false},
                [impl{cameraDeviceImpl.get()}](bool newValue) {
                    Camera_FlashMode torchMode = newValue ? FLASH_MODE_ALWAYS_OPEN : FLASH_MODE_CLOSE;

                    Camera_ErrorCode ret = OH_CaptureSession_SetFlashMode(impl->captureSession, torchMode);
                    if (ret != CAMERA_OK)
                    {
                        OH_LOG_ERROR(LOG_APP, "OH_CaptureSession_SetFlashMode failed, ret:%{public}d", ret);
                    }

                    return true;
                }));

            cameraDeviceImpl->capabilities.push_back(std::make_unique<CameraCapabilityTemplate<double>>(
                Capability::Feature::Zoom,
                zoomRatio,
                1.0, // Set the default target zoom to 1.0 (no zoom)
                std::vector<double>{minZoomRatio, maxZoomRatio},
                [impl{cameraDeviceImpl.get()}](double newValue) {
                    float newZoomRatio{static_cast<float>(newValue)};

                    Camera_ErrorCode ret = OH_CaptureSession_SetZoomRatio(impl->captureSession, newZoomRatio);
                    if (ret != CAMERA_OK)
                    {
                        OH_LOG_ERROR(LOG_APP, "OH_CaptureSession_SetZoomRatio failed, ret:%{public}d", ret);
                    }

                    return true;
                }));

            cameraDevices.push_back(CameraDevice{std::move(cameraDeviceImpl)});
        }

        // Delete supported cameras
        ret = OH_CameraManager_DeleteSupportedCameras(localCameraManager, localCameras, localSize);
        if (ret != CAMERA_OK)
        {
            OH_LOG_ERROR(LOG_APP, "OH_CameraManager_DeleteSupportedCameras failed, ret:%{public}d", ret);
        }

        // Delete CameraManager instance
        ret = OH_Camera_DeleteCameraManager(localCameraManager);
        if (ret != CAMERA_OK)
        {
            OH_LOG_ERROR(LOG_APP, "OH_CameraManager_DeleteSupportedCameras failed, ret:%{public}d", ret);
        }

        return cameraDevices;
    }

    CameraDevice::CameraDimensions CameraDevice::UpdateCameraTexture(bgfx::TextureHandle textureHandle)
    {
        EGLContext currentContext = eglGetCurrentContext();
        if (m_impl->context != EGL_NO_CONTEXT)
        {
            // use the newly created shared context
            if (eglMakeCurrent(m_impl->display, 0 /*surface*/, 0 /*surface*/, m_impl->context) == EGL_FALSE)
            {
                OH_LOG_ERROR(LOG_APP, "Unable to make current shared GL context for camera texture.");
                throw std::runtime_error{"Unable to make current shared GL context for camera texture."};
            }
        }

        int currentSensorRotationDiff = m_impl->GetCurrentSensorRotationDiff();

        // The UI Orientation has changed. Update our internal texture
        if (currentSensorRotationDiff != m_impl->sensorRotationDiff)
        {
            m_impl->sensorRotationDiff = currentSensorRotationDiff;
            m_impl->updateTextureDimensions = true;
        }

        bool sensorIsPortrait{m_impl->sensorRotationDiff == 90 || m_impl->sensorRotationDiff == 270};

        if (m_impl->updateTextureDimensions)
        {
            glGenTextures(1, &m_impl->cameraRGBATextureId);
            glBindTexture(GL_TEXTURE_2D, m_impl->cameraRGBATextureId);
            glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, !sensorIsPortrait ? m_impl->cameraDimensions.width : m_impl->cameraDimensions.height, !sensorIsPortrait ? m_impl->cameraDimensions.height : m_impl->cameraDimensions.width, 0, GL_RGBA, GL_UNSIGNED_BYTE, nullptr);
            glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
            glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
            glGenerateMipmap(GL_TEXTURE_2D);

            glBindTexture(GL_TEXTURE_2D, 0);

            glGenFramebuffers(1, &m_impl->frameBufferId);
            glBindFramebuffer(GL_FRAMEBUFFER, m_impl->frameBufferId);
            glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, m_impl->cameraRGBATextureId, 0);

            glBindFramebuffer(GL_FRAMEBUFFER, 0);

            m_impl->updateTextureDimensions = false;
        }

        m_impl->UpdateTexImage();

        glBindFramebuffer(GL_FRAMEBUFFER, m_impl->frameBufferId);
        glViewport(0, 0, !sensorIsPortrait ? m_impl->cameraDimensions.width : m_impl->cameraDimensions.height, !sensorIsPortrait ? m_impl->cameraDimensions.height : m_impl->cameraDimensions.width);
        glUseProgram(m_impl->cameraShaderProgramId);

        auto vertexPositionsUniformLocation{glGetUniformLocation(m_impl->cameraShaderProgramId, "positions")};
        glUniform2fv(vertexPositionsUniformLocation, CAMERA_VERTEX_COUNT, CAMERA_VERTEX_POSITIONS);

        auto uvsUniformLocation{glGetUniformLocation(m_impl->cameraShaderProgramId, "uvs")};
        glUniform2fv(uvsUniformLocation, CAMERA_UVS_COUNT,
            m_impl->sensorRotationDiff == 90    ? CAMERA_UVS_ROTATION_90
            : m_impl->sensorRotationDiff == 180 ? CAMERA_UVS_ROTATION_180
            : m_impl->sensorRotationDiff == 270 ? CAMERA_UVS_ROTATION_270
                                                : CAMERA_UVS_ROTATION_0);

        // Configure the camera texture
        auto cameraTextureUniformLocation{glGetUniformLocation(m_impl->cameraShaderProgramId, "cameraTexture")};
        glUniform1i(cameraTextureUniformLocation, ohos::OpenGLHelpers::GetTextureUnit(GL_TEXTURE0));
        glActiveTexture(GL_TEXTURE0);
        glBindTexture(GL_TEXTURE_EXTERNAL_OES, m_impl->cameraOESTextureId);
        glBindSampler(ohos::OpenGLHelpers::GetTextureUnit(GL_TEXTURE0), 0);

        // Draw the quad
        glDrawArrays(GL_TRIANGLE_STRIP, 0, CAMERA_VERTEX_COUNT);

        glUseProgram(0);
        glBindFramebuffer(GL_FRAMEBUFFER, 0);

        // bind previously bound context
        if (eglMakeCurrent(m_impl->display, 0 /*surface*/, 0 /*surface*/, currentContext) == EGL_FALSE)
        {
            OH_LOG_ERROR(LOG_APP, "Unable to make current shared GL context for camera texture.");
            throw std::runtime_error{"Unable to make current shared GL context for camera texture."};
        }

        arcana::make_task(m_impl->deviceContext->BeforeRenderScheduler(), arcana::cancellation::none(), [rgbaTextureId = m_impl->cameraRGBATextureId, textureHandle] {
            bgfx::overrideInternal(textureHandle, rgbaTextureId);
        });

        return !sensorIsPortrait
                   ? CameraDimensions{m_impl->cameraDimensions.width, m_impl->cameraDimensions.height}
                   : CameraDimensions{m_impl->cameraDimensions.height, m_impl->cameraDimensions.width};
    }

    CameraDevice::TakePhotoTask CameraDevice::TakePhotoAsync(PhotoSettings /*photoSettings*/)
    {
        OH_LOG_ERROR(LOG_APP, "TakePhoto not implemented for this platform.");
        throw std::runtime_error{"TakePhoto not implemented for this platform."};
    }

    void CameraDevice::Close()
    {
        if (m_impl->captureSession == nullptr)
        {
            // This device was either never opened, or has already been closed.
            // No action is required.
            return;
        }

        // Stop capturing session
        Camera_ErrorCode ret = OH_CaptureSession_Stop(m_impl->captureSession);
        if (ret != CAMERA_OK)
        {
            OH_LOG_ERROR(LOG_APP, "OH_CaptureSession_Stop failed, ret:%{public}d", ret);
        }

        // Release camera input stream
        ret = OH_CameraInput_Close(m_impl->cameraInput);
        if (ret != CAMERA_OK)
        {
            OH_LOG_ERROR(LOG_APP, "OH_CameraInput_Close failed, ret:%{public}d", ret);
        }

        // Release preview output stream
        ret = OH_PreviewOutput_Release(m_impl->previewOutput);
        if (ret != CAMERA_OK)
        {
            OH_LOG_ERROR(LOG_APP, "OH_PreviewOutput_Release failed, ret:%{public}d", ret);
        }

        // Release the photo output stream
        ret = OH_PhotoOutput_Release(m_impl->photoOutput);
        if (ret != CAMERA_OK)
        {
            OH_LOG_ERROR(LOG_APP, "OH_PhotoOutput_Release failed, ret:%{public}d", ret);
        }

        // Release session
        ret = OH_CaptureSession_Release(m_impl->captureSession);
        if (ret != CAMERA_OK)
        {
            OH_LOG_ERROR(LOG_APP, "OH_CaptureSession_Release failed, ret:%{public}d", ret);
        }

        // Release resources
        ret = OH_CameraManager_DeleteSupportedCameras(m_impl->cameraManager, m_impl->cameras, m_impl->size);
        if (ret != CAMERA_OK)
        {
            OH_LOG_ERROR(LOG_APP, "OH_CameraManager_DeleteSupportedCameras failed, ret:%{public}d", ret);
        }
        ret = OH_CameraManager_DeleteSupportedCameraOutputCapability(m_impl->cameraManager, m_impl->cameraOutputCapability);
        if (ret != CAMERA_OK)
        {
            OH_LOG_ERROR(LOG_APP, "OH_CameraManager_DeleteSupportedCameraOutputCapability failed, ret:%{public}d", ret);
        }
        ret = OH_Camera_DeleteCameraManager(m_impl->cameraManager);
        if (ret != CAMERA_OK)
        {
            OH_LOG_ERROR(LOG_APP, "OH_Camera_DeleteCameraManager failed, ret:%{public}d", ret);
        }

        if (m_impl->textureWindow != nullptr)
        {
            OH_NativeWindow_DestroyNativeWindow(m_impl->textureWindow);
        }
        if (m_impl->nativeImage != nullptr)
        {
            OH_NativeImage_Destroy(&m_impl->nativeImage);
        }

        if (m_impl->context != EGL_NO_CONTEXT)
        {
            eglDestroyContext(m_impl->display, m_impl->context);
        }
    }
}

#include "../CameraDeviceSharedPImpl.h"