#include "CloudWangPlugin.h"

#include <stdio.h>
#include <fstream>
#include <string>
#include <ctime>
#include <vector>
#include <map>

extern "C" {
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavutil/avutil.h>
#include <libavutil/imgutils.h>
#include <libswscale/swscale.h>
}

#include "ofxsImageEffect.h"
#include "ofxsInteract.h"
#include "ofxsMultiThread.h"
#include "ofxsProcessing.h"
#include "ofxsLog.h"
#include "ofxDrawSuite.h"
#include "ofxsSupportPrivate.h"
#include "ofxImageEffectExt.h"

#define kPluginName "CloudWang"
#define kPluginGrouping "OpenFX Sample"
#define kPluginDescription "Extract and display the second video stream from a video file"
#define kPluginIdentifier "com.OpenFXSample.CloudWang"
#define kPluginVersionMajor 1
#define kPluginVersionMinor 0

#define kSupportsTiles false
#define kSupportsMultiResolution false
#define kSupportsMultipleClipPARs false

////////////////////////////////////////////////////////////////////////////////
/** @brief The plugin that does our work */
class CloudWangPlugin : public OFX::ImageEffect
{
public:
    explicit CloudWangPlugin(OfxImageEffectHandle p_Handle);
    virtual ~CloudWangPlugin();

    /* Override the render */
    virtual void render(const OFX::RenderArguments& p_Args);

    /* Override is identity */
    virtual bool isIdentity(const OFX::IsIdentityArguments& p_Args, OFX::Clip*& p_IdentityClip, double& p_IdentityTime);

    /* Override getRegionOfDefinition */
    virtual bool getRegionOfDefinition(const OFX::RegionOfDefinitionArguments& p_Args, OfxRectD& p_RoD);

    /* Override changedParam */
    virtual void changedParam(const OFX::InstanceChangedArgs& p_Args, const std::string& p_ParamName);

    /* Override changed clip */
    virtual void changedClip(const OFX::InstanceChangedArgs& p_Args, const std::string& p_ClipName);

    /* 写入日志文件 */
    void writeLog(const std::string& message);

    /* 初始化 FFmpeg */
    bool initializeFFmpeg();

    /* 清理 FFmpeg 资源 */
    void cleanupFFmpeg();

    /* 解码并获取指定时间的帧到缓存 */
    bool decodeFrameAtTime(double time);
    
    /* 从缓存获取帧数据 */
    bool getCachedFrame(double time, std::vector<float>& frameData, int& width, int& height);
    
    /* 合成两路视频流 */
    void compositeStreams(OFX::Image* srcImage, bool secondStreamAvailable, OFX::Image* dstImage, double time);

private:
    // Does not own the following pointers
    OFX::Clip* m_DstClip;
    OFX::Clip* m_SrcClip;

    // FFmpeg related
    AVFormatContext* m_FormatContext;
    AVCodecContext* m_CodecContext;
    AVFrame* m_Frame;
    AVFrame* m_FrameRGB;
    SwsContext* m_SwsContext;
    int m_VideoStreamIndex;
    std::string m_SourceFilePath;
    bool m_FFmpegInitialized;
    double m_FrameRate;
    int m_Width;
    int m_Height;
    uint8_t* m_Buffer;
    
    // 帧缓存：key 是时间（帧号），value 是帧数据（Float RGBA）
    std::map<int64_t, std::vector<float>> m_FrameCache;
    std::map<int64_t, int> m_FrameCacheWidth;
    std::map<int64_t, int> m_FrameCacheHeight;
    
    // 当前缓存的时间（用于判断是否需要重新解码）
    int64_t m_CachedTime;
};

CloudWangPlugin::CloudWangPlugin(OfxImageEffectHandle p_Handle)
    : ImageEffect(p_Handle)
    , m_FormatContext(nullptr)
    , m_CodecContext(nullptr)
    , m_Frame(nullptr)
    , m_FrameRGB(nullptr)
    , m_SwsContext(nullptr)
    , m_VideoStreamIndex(-1)
    , m_FFmpegInitialized(false)
    , m_FrameRate(25.0)
    , m_Width(0)
    , m_Height(0)
    , m_Buffer(nullptr)
    , m_CachedTime(-1)
{
    m_DstClip = fetchClip(kOfxImageEffectOutputClipName);
    m_SrcClip = fetchClip(kOfxImageEffectSimpleSourceClipName);

    // 获取源文件路径并写入日志
    try {
        std::string srcFilePath = getPropertySet().propGetString(kOfxImageEffectPropSrcFilePath, false);
        if (!srcFilePath.empty()) {
            m_SourceFilePath = srcFilePath;
            writeLog("Source file path: " + srcFilePath);
        } else {
            writeLog("Source file path: (empty)");
        }
    } catch (...) {
        writeLog("Source file path: (not available or error)");
    }
}

void CloudWangPlugin::writeLog(const std::string& message)
{
    const std::string logPath = "C:\\Users\\Administrator\\Desktop\\CloudWangPlugin.log";
    
    std::ofstream logFile(logPath, std::ios::app);  // 追加模式
    if (logFile.is_open())
    {
        // 获取当前时间（简单版本）
        time_t now = time(0);
        char* timeStr = ctime(&now);
        if (timeStr)
        {
            // 移除换行符
            std::string timeString(timeStr);
            if (!timeString.empty() && timeString.back() == '\n')
            {
                timeString.pop_back();
            }
            logFile << "[" << timeString << "] " << message << std::endl;
        }
        else
        {
            logFile << message << std::endl;
        }
        logFile.close();
    }
}

bool CloudWangPlugin::initializeFFmpeg()
{
    if (m_FFmpegInitialized || m_SourceFilePath.empty())
    {
        return m_FFmpegInitialized;
    }

    writeLog("Initializing FFmpeg for file: " + m_SourceFilePath);

    // 打开输入文件
    m_FormatContext = avformat_alloc_context();
    if (avformat_open_input(&m_FormatContext, m_SourceFilePath.c_str(), nullptr, nullptr) < 0)
    {
        writeLog("Error: Could not open file " + m_SourceFilePath);
        cleanupFFmpeg();
        return false;
    }

    // 查找流信息
    if (avformat_find_stream_info(m_FormatContext, nullptr) < 0)
    {
        writeLog("Error: Could not find stream info");
        cleanupFFmpeg();
        return false;
    }

    // 查找第二路视频流
    int videoStreamCount = 0;
    for (unsigned int i = 0; i < m_FormatContext->nb_streams; i++)
    {
        if (m_FormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
        {
            videoStreamCount++;
            if (videoStreamCount == 2)
            {
                m_VideoStreamIndex = i;
                break;
            }
        }
    }

    if (m_VideoStreamIndex < 0)
    {
        writeLog("Error: Could not find second video stream");
        cleanupFFmpeg();
        return false;
    }

    writeLog("Found second video stream at index: " + std::to_string(m_VideoStreamIndex));

    // 获取解码器
    AVCodecParameters* codecpar = m_FormatContext->streams[m_VideoStreamIndex]->codecpar;
    const AVCodec* codec = avcodec_find_decoder(codecpar->codec_id);
    if (!codec)
    {
        writeLog("Error: Codec not found");
        cleanupFFmpeg();
        return false;
    }

    // 创建解码器上下文
    m_CodecContext = avcodec_alloc_context3(codec);
    if (avcodec_parameters_to_context(m_CodecContext, codecpar) < 0)
    {
        writeLog("Error: Could not copy codec parameters");
        cleanupFFmpeg();
        return false;
    }

    // 打开解码器
    if (avcodec_open2(m_CodecContext, codec, nullptr) < 0)
    {
        writeLog("Error: Could not open codec");
        cleanupFFmpeg();
        return false;
    }

    m_Width = m_CodecContext->width;
    m_Height = m_CodecContext->height;
    
    // 获取帧率
    AVRational fps = m_FormatContext->streams[m_VideoStreamIndex]->avg_frame_rate;
    if (fps.num > 0 && fps.den > 0)
    {
        m_FrameRate = (double)fps.num / fps.den;
    }

    writeLog("Video stream info: " + std::to_string(m_Width) + "x" + std::to_string(m_Height) + 
             " @ " + std::to_string(m_FrameRate) + " fps");

    // 分配帧
    m_Frame = av_frame_alloc();
    m_FrameRGB = av_frame_alloc();
    if (!m_Frame || !m_FrameRGB)
    {
        writeLog("Error: Could not allocate frames");
        cleanupFFmpeg();
        return false;
    }

    // 分配 RGB 缓冲区
    int numBytes = av_image_get_buffer_size(AV_PIX_FMT_RGBA, m_Width, m_Height, 1);
    m_Buffer = (uint8_t*)av_malloc(numBytes * sizeof(uint8_t));
    if (av_image_fill_arrays(m_FrameRGB->data, m_FrameRGB->linesize, m_Buffer, AV_PIX_FMT_RGBA, m_Width, m_Height, 1) < 0)
    {
        writeLog("Error: Could not fill image arrays");
        cleanupFFmpeg();
        return false;
    }

    // 创建 SwsContext 用于格式转换
    m_SwsContext = sws_getContext(
        m_Width, m_Height, m_CodecContext->pix_fmt,
        m_Width, m_Height, AV_PIX_FMT_RGBA,
        SWS_BILINEAR, nullptr, nullptr, nullptr);

    if (!m_SwsContext)
    {
        writeLog("Error: Could not create SwsContext");
        cleanupFFmpeg();
        return false;
    }

    m_FFmpegInitialized = true;
    writeLog("FFmpeg initialized successfully");
    return true;
}

void CloudWangPlugin::cleanupFFmpeg()
{
    if (m_SwsContext)
    {
        sws_freeContext(m_SwsContext);
        m_SwsContext = nullptr;
    }

    if (m_Buffer)
    {
        av_free(m_Buffer);
        m_Buffer = nullptr;
    }

    if (m_FrameRGB)
    {
        av_frame_free(&m_FrameRGB);
    }

    if (m_Frame)
    {
        av_frame_free(&m_Frame);
    }

    if (m_CodecContext)
    {
        avcodec_free_context(&m_CodecContext);
    }

    if (m_FormatContext)
    {
        avformat_close_input(&m_FormatContext);
    }

    m_FFmpegInitialized = false;
    m_VideoStreamIndex = -1;
}

bool CloudWangPlugin::decodeFrameAtTime(double time)
{
    if (!m_FFmpegInitialized)
    {
        if (!initializeFFmpeg())
        {
            return false;
        }
    }

    // 计算目标帧号（time 是 DaVinci 的时间，单位是帧）
    int64_t targetFrame = (int64_t)time;
    
    // 检查缓存
    if (m_FrameCache.find(targetFrame) != m_FrameCache.end())
    {
        // 缓存命中，不需要重新解码
        m_CachedTime = targetFrame;
        return true;
    }

    // 计算时间戳（转换为流的时间基准）
    AVRational timeBase = m_FormatContext->streams[m_VideoStreamIndex]->time_base;
    AVRational frameRate = m_FormatContext->streams[m_VideoStreamIndex]->avg_frame_rate;
    // 计算帧间隔（1/frameRate）
    AVRational frameInterval = { frameRate.den, frameRate.num };
    int64_t timestamp = av_rescale_q(targetFrame, frameInterval, timeBase);

    // 定位到目标帧
    if (av_seek_frame(m_FormatContext, m_VideoStreamIndex, timestamp, AVSEEK_FLAG_BACKWARD) < 0)
    {
        writeLog("Error: Could not seek to frame at time " + std::to_string(time));
        return false;
    }

    // 清空解码器缓冲区
    avcodec_flush_buffers(m_CodecContext);

    AVPacket* packet = av_packet_alloc();
    bool frameFound = false;

    // 读取并解码帧
    while (av_read_frame(m_FormatContext, packet) >= 0)
    {
        if (packet->stream_index == m_VideoStreamIndex)
        {
            // 发送包到解码器
            if (avcodec_send_packet(m_CodecContext, packet) == 0)
            {
                // 接收解码后的帧
                if (avcodec_receive_frame(m_CodecContext, m_Frame) == 0)
                {
                    // 转换格式
                    sws_scale(m_SwsContext,
                              (const uint8_t* const*)m_Frame->data, m_Frame->linesize,
                              0, m_Height,
                              m_FrameRGB->data, m_FrameRGB->linesize);

                    // 将 RGBA 数据复制到缓存（Float RGBA，使用原始尺寸）
                    size_t bufferSize = m_Width * m_Height * 4;
                    std::vector<float> frameData(bufferSize);

                    // 转换 uint8_t [0-255] 到 float [0.0-1.0]
                    for (int y = 0; y < m_Height; y++)
                    {
                        for (int x = 0; x < m_Width; x++)
                        {
                            int srcIdx = (y * m_Width + x) * 4;
                            int dstIdx = (y * m_Width + x) * 4;

                            frameData[dstIdx + 0] = m_FrameRGB->data[0][srcIdx + 0] / 255.0f; // R
                            frameData[dstIdx + 1] = m_FrameRGB->data[0][srcIdx + 1] / 255.0f; // G
                            frameData[dstIdx + 2] = m_FrameRGB->data[0][srcIdx + 2] / 255.0f; // B
                            frameData[dstIdx + 3] = m_FrameRGB->data[0][srcIdx + 3] / 255.0f; // A
                        }
                    }

                    // 存储到缓存
                    m_FrameCache[targetFrame] = std::move(frameData);
                    m_FrameCacheWidth[targetFrame] = m_Width;
                    m_FrameCacheHeight[targetFrame] = m_Height;
                    m_CachedTime = targetFrame;

                    frameFound = true;
                    break;
                }
            }
        }
        av_packet_unref(packet);
    }

    av_packet_free(&packet);
    return frameFound;
}

bool CloudWangPlugin::getCachedFrame(double time, std::vector<float>& frameData, int& width, int& height)
{
    int64_t targetFrame = (int64_t)time;
    
    auto it = m_FrameCache.find(targetFrame);
    if (it != m_FrameCache.end())
    {
        frameData = it->second;
        width = m_FrameCacheWidth[targetFrame];
        height = m_FrameCacheHeight[targetFrame];
        return true;
    }
    
    return false;
}

void CloudWangPlugin::render(const OFX::RenderArguments& p_Args)
{
    if ((m_DstClip->getPixelDepth() != OFX::eBitDepthFloat) || 
        (m_DstClip->getPixelComponents() != OFX::ePixelComponentRGBA))
    {
        OFX::throwSuiteStatusException(kOfxStatErrUnsupported);
        return;
    }

    // 获取输出图像
    std::unique_ptr<OFX::Image> dst(m_DstClip->fetchImage(p_Args.time));
    if (!dst)
    {
        OFX::throwSuiteStatusException(kOfxStatErrMemory);
        return;
    }

    // 获取源片段图像（第一路视频流）
    std::unique_ptr<OFX::Image> src(m_SrcClip->fetchImage(p_Args.time));
    if (!src)
    {
        // 如果源片段不可用，只输出第二路视频流
        if (decodeFrameAtTime(p_Args.time))
        {
            std::vector<float> frameData;
            int width, height;
            if (getCachedFrame(p_Args.time, frameData, width, height))
            {
                // 将第二路视频流复制到输出
                const OfxRectI& bounds = dst->getBounds();
                const int dstWidth = bounds.x2 - bounds.x1;
                const int dstHeight = bounds.y2 - bounds.y1;
                float* dstPixels = static_cast<float*>(dst->getPixelData());

                // 缩放和复制第二路视频流
                for (int y = 0; y < dstHeight; y++)
                {
                    int srcY = (y * height) / dstHeight;
                    for (int x = 0; x < dstWidth; x++)
                    {
                        int srcX = (x * width) / dstWidth;
                        int srcIdx = (srcY * width + srcX) * 4;
                        int dstIdx = (y * dstWidth + x) * 4;

                        dstPixels[dstIdx + 0] = frameData[srcIdx + 0];
                        dstPixels[dstIdx + 1] = frameData[srcIdx + 1];
                        dstPixels[dstIdx + 2] = frameData[srcIdx + 2];
                        dstPixels[dstIdx + 3] = frameData[srcIdx + 3];
                    }
                }
            }
        }
        else
        {
            // 如果解码失败，填充纯色图片（红色）作为后备
            const OfxRectI& bounds = dst->getBounds();
            const int width = bounds.x2 - bounds.x1;
            const int height = bounds.y2 - bounds.y1;
            float* dstPixels = static_cast<float*>(dst->getPixelData());

            for (int y = 0; y < height; y++)
            {
                for (int x = 0; x < width; x++)
                {
                    int idx = (y * width + x) * 4;
                    dstPixels[idx + 0] = 1.0f; // R
                    dstPixels[idx + 1] = 0.0f; // G
                    dstPixels[idx + 2] = 0.0f; // B
                    dstPixels[idx + 3] = 1.0f; // A
                }
            }
        }
        return;
    }

    // 检查源片段格式
    if ((src->getPixelDepth() != OFX::eBitDepthFloat) || 
        (src->getPixelComponents() != OFX::ePixelComponentRGBA))
    {
        OFX::throwSuiteStatusException(kOfxStatErrUnsupported);
        return;
    }

    // 解码第二路视频流到缓存（如果缓存中没有）
    bool secondStreamAvailable = decodeFrameAtTime(p_Args.time);

    // 合成两路视频流（传递当前时间，用于从缓存获取帧）
    compositeStreams(src.get(), secondStreamAvailable, dst.get(), p_Args.time);
}

void CloudWangPlugin::compositeStreams(OFX::Image* srcImage, bool secondStreamAvailable, OFX::Image* dstImage, double time)
{
    if (!srcImage || !dstImage)
    {
        return;
    }

    const OfxRectI& dstBounds = dstImage->getBounds();
    const int dstWidth = dstBounds.x2 - dstBounds.x1;
    const int dstHeight = dstBounds.y2 - dstBounds.y1;
    
    // 调试：记录尺寸信息
    writeLog("compositeStreams: dstWidth=" + std::to_string(dstWidth) + ", dstHeight=" + std::to_string(dstHeight));
    
    void* dstPixelData = dstImage->getPixelData();
    if (!dstPixelData)
    {
        return;
    }
    float* dstPixels = static_cast<float*>(dstPixelData);

    const OfxRectI& srcBounds = srcImage->getBounds();
    const int srcWidth = srcBounds.x2 - srcBounds.x1;
    const int srcHeight = srcBounds.y2 - srcBounds.y1;
    
    // 调试：记录源图像尺寸信息
    writeLog("compositeStreams: srcWidth=" + std::to_string(srcWidth) + ", srcHeight=" + std::to_string(srcHeight));
    
    void* srcPixelData = srcImage->getPixelData();
    if (!srcPixelData)
    {
        return;
    }
    float* srcPixels = static_cast<float*>(srcPixelData);

    // 左右分屏：左半部分显示第一路视频流，右半部分显示第二路视频流
    // 根据 getRegionOfDefinition，输出宽度应该是源宽度的 2 倍（2N * N）
    // 左半部分应该是完整的源图像（N * N），右半部分应该是第二路视频流（N * N）
    const int halfWidth = dstWidth / 2;
    
    // 调试：记录尺寸信息
    writeLog("compositeStreams: dstWidth=" + std::to_string(dstWidth) + ", dstHeight=" + std::to_string(dstHeight));
    writeLog("compositeStreams: srcWidth=" + std::to_string(srcWidth) + ", srcHeight=" + std::to_string(srcHeight));
    writeLog("compositeStreams: halfWidth=" + std::to_string(halfWidth));

    // 复制第一路视频流到左半部分
    // 左半部分应该显示原始源文件的比例（不拉伸）
    // 如果源文件是 3840 * 3840，Timeline 设置为 7680 * 3840，那么源图像已经被拉伸到 7680 * 3840
    // 我们需要将左半部分（3840 * 3840）映射回原始比例
    // 使用 FFmpeg 获取的原始视频流尺寸（m_Width 和 m_Height）作为参考
    int originalWidth = m_Width;  // 原始源文件宽度（例如 3840）
    int originalHeight = m_Height; // 原始源文件高度（例如 3840）
    
    // 如果 FFmpeg 未初始化或尺寸为 0，使用源图像尺寸作为原始尺寸（假设源图像没有被拉伸）
    if (originalWidth <= 0 || originalHeight <= 0)
    {
        originalWidth = srcWidth;
        originalHeight = srcHeight;
        writeLog("compositeStreams: FFmpeg not initialized, using src dimensions as original");
    }
    
    writeLog("compositeStreams: originalWidth=" + std::to_string(originalWidth) + ", originalHeight=" + std::to_string(originalHeight));
    
    // 检查原始尺寸是否有效
    if (originalWidth <= 0 || originalHeight <= 0)
    {
        writeLog("compositeStreams: ERROR - invalid original dimensions, skipping left half");
        return;
    }
    
    for (int y = 0; y < dstHeight; y++)
    {
        // 计算源图像中的对应位置（垂直方向）
        // 将输出高度映射到原始高度，然后映射到拉伸后的源高度
        int originalY = (y * originalHeight) / dstHeight;
        if (originalY < 0) originalY = 0;
        if (originalY >= originalHeight) originalY = originalHeight - 1;
        
        // 将原始 Y 坐标映射到拉伸后的源图像 Y 坐标
        int srcY = (originalY * srcHeight) / originalHeight;
        if (srcY < 0) srcY = 0;
        if (srcY >= srcHeight) srcY = srcHeight - 1;

        for (int x = 0; x < halfWidth; x++)
        {
            // 计算源图像中的对应位置（水平方向）
            // 将左半部分宽度映射到原始宽度，然后映射到拉伸后的源宽度
            int originalX = (x * originalWidth) / halfWidth;
            if (originalX < 0) originalX = 0;
            if (originalX >= originalWidth) originalX = originalWidth - 1;
            
            // 将原始 X 坐标映射到拉伸后的源图像 X 坐标
            int srcX = (originalX * srcWidth) / originalWidth;
            if (srcX < 0) srcX = 0;
            if (srcX >= srcWidth) srcX = srcWidth - 1;

            int srcIdx = (srcY * srcWidth + srcX) * 4;
            int dstIdx = (y * dstWidth + x) * 4;

            // 直接复制像素（边界检查已在上面完成）
            dstPixels[dstIdx + 0] = srcPixels[srcIdx + 0]; // R
            dstPixels[dstIdx + 1] = srcPixels[srcIdx + 1]; // G
            dstPixels[dstIdx + 2] = srcPixels[srcIdx + 2]; // B
            dstPixels[dstIdx + 3] = srcPixels[srcIdx + 3]; // A
        }
    }

    // 复制第二路视频流到右半部分
    if (secondStreamAvailable)
    {
        std::vector<float> frameData;
        int secondWidth, secondHeight;
        if (getCachedFrame(time, frameData, secondWidth, secondHeight))
        {
            for (int y = 0; y < dstHeight; y++)
            {
                // 计算第二路视频流中的对应位置
                int srcY = (y * secondHeight) / dstHeight;
                if (srcY < 0) srcY = 0;
                if (srcY >= secondHeight) srcY = secondHeight - 1;

                for (int x = halfWidth; x < dstWidth; x++)
                {
                    // 计算第二路视频流中的对应位置（相对于右半部分）
                    int srcX;
                    if (halfWidth > 0)
                    {
                        srcX = ((x - halfWidth) * secondWidth) / halfWidth;
                    }
                    else
                    {
                        continue; // 避免除零
                    }
                    
                    // 边界检查
                    if (srcX < 0) srcX = 0;
                    if (srcX >= secondWidth) srcX = secondWidth - 1;

                    int srcIdx = (srcY * secondWidth + srcX) * 4;
                    int dstIdx = (y * dstWidth + x) * 4;

                    // 直接复制像素（边界检查已在上面完成）
                    dstPixels[dstIdx + 0] = frameData[srcIdx + 0]; // R
                    dstPixels[dstIdx + 1] = frameData[srcIdx + 1]; // G
                    dstPixels[dstIdx + 2] = frameData[srcIdx + 2]; // B
                    dstPixels[dstIdx + 3] = frameData[srcIdx + 3]; // A
                }
            }
        }
        else
        {
            // 如果缓存中没有，右半部分填充黑色
            for (int y = 0; y < dstHeight; y++)
            {
                for (int x = halfWidth; x < dstWidth; x++)
                {
                    int dstIdx = (y * dstWidth + x) * 4;
                    dstPixels[dstIdx + 0] = 0.0f; // R
                    dstPixels[dstIdx + 1] = 0.0f; // G
                    dstPixels[dstIdx + 2] = 0.0f; // B
                    dstPixels[dstIdx + 3] = 1.0f; // A
                }
            }
        }
    }
    else
    {
        // 如果第二路视频流不可用，右半部分填充黑色
        for (int y = 0; y < dstHeight; y++)
        {
            for (int x = halfWidth; x < dstWidth; x++)
            {
                int dstIdx = (y * dstWidth + x) * 4;
                dstPixels[dstIdx + 0] = 0.0f; // R
                dstPixels[dstIdx + 1] = 0.0f; // G
                dstPixels[dstIdx + 2] = 0.0f; // B
                dstPixels[dstIdx + 3] = 1.0f; // A
            }
        }
    }
}

bool CloudWangPlugin::isIdentity(const OFX::IsIdentityArguments& p_Args, OFX::Clip*& p_IdentityClip, double& p_IdentityTime)
{
    // 这个插件总是生成新内容，不是 identity
    return false;
}

bool CloudWangPlugin::getRegionOfDefinition(const OFX::RegionOfDefinitionArguments& p_Args, OfxRectD& p_RoD)
{
    // 调试：记录函数被调用
    writeLog("getRegionOfDefinition: called at time=" + std::to_string(p_Args.time));
    
    // 获取源片段的 RoD（只接受一个参数：时间）
    OfxRectD srcRoD = m_SrcClip->getRegionOfDefinition(p_Args.time);

    // 计算源片段的宽度和高度
    double srcWidth = srcRoD.x2 - srcRoD.x1;
    double srcHeight = srcRoD.y2 - srcRoD.y1;

    writeLog("getRegionOfDefinition: srcRoD x1=" + std::to_string(srcRoD.x1) + ", y1=" + std::to_string(srcRoD.y1) + 
             ", x2=" + std::to_string(srcRoD.x2) + ", y2=" + std::to_string(srcRoD.y2));
    writeLog("getRegionOfDefinition: srcWidth=" + std::to_string(srcWidth) + ", srcHeight=" + std::to_string(srcHeight));

    // 输出应该是两个图像拼在一起（左右分屏）
    // 输出宽度 = 2 * 输入宽度，输出高度 = 输入高度
    // 例如：如果输入是 N * N，输出应该是 2N * N
    if (srcWidth > 0 && srcHeight > 0)
    {
        // 计算输出 RoD（两个图像拼在一起）
        p_RoD.x1 = srcRoD.x1;
        p_RoD.y1 = srcRoD.y1;
        p_RoD.x2 = srcRoD.x1 + srcWidth * 2.0; // 宽度翻倍
        p_RoD.y2 = srcRoD.y2; // 高度不变
        
        // 调试：记录 RoD 信息
        writeLog("getRegionOfDefinition: outputRoD x1=" + std::to_string(p_RoD.x1) + ", y1=" + std::to_string(p_RoD.y1) + 
                 ", x2=" + std::to_string(p_RoD.x2) + ", y2=" + std::to_string(p_RoD.y2));
        writeLog("getRegionOfDefinition: outputWidth=" + std::to_string(p_RoD.x2 - p_RoD.x1) + ", outputHeight=" + std::to_string(p_RoD.y2 - p_RoD.y1));
        writeLog("getRegionOfDefinition: returning true");
        
        return true;
    }

    // 如果无法确定，使用默认值
    writeLog("getRegionOfDefinition: returning false (invalid dimensions)");
    return false;
}

void CloudWangPlugin::changedParam(const OFX::InstanceChangedArgs& p_Args, const std::string& p_ParamName)
{
    // 目前没有参数
}

void CloudWangPlugin::changedClip(const OFX::InstanceChangedArgs& p_Args, const std::string& p_ClipName)
{
    // 如果源片段改变，重新初始化 FFmpeg
    if (p_ClipName == kOfxImageEffectSimpleSourceClipName)
    {
        cleanupFFmpeg();
        // 重新获取源文件路径
        try {
            std::string srcFilePath = getPropertySet().propGetString(kOfxImageEffectPropSrcFilePath, false);
            if (!srcFilePath.empty() && srcFilePath != m_SourceFilePath)
            {
                m_SourceFilePath = srcFilePath;
                writeLog("Source file path changed: " + srcFilePath);
            }
        } catch (...) {
            // 忽略错误
        }
    }
}

CloudWangPlugin::~CloudWangPlugin()
{
    cleanupFFmpeg();
}

////////////////////////////////////////////////////////////////////////////////

using namespace OFX;

CloudWangPluginFactory::CloudWangPluginFactory()
    : OFX::PluginFactoryHelper<CloudWangPluginFactory>(kPluginIdentifier, kPluginVersionMajor, kPluginVersionMinor)
{
}

void CloudWangPluginFactory::describe(OFX::ImageEffectDescriptor& p_Desc)
{
    // Basic labels
    p_Desc.setLabels(kPluginName, kPluginName, kPluginName);
    p_Desc.setPluginGrouping(kPluginGrouping);
    p_Desc.setPluginDescription(kPluginDescription);

    // Add the supported contexts, only filter at the moment
    p_Desc.addSupportedContext(eContextFilter);
    p_Desc.addSupportedContext(eContextGeneral);

    // Add supported pixel depths
    p_Desc.addSupportedBitDepth(eBitDepthFloat);

    // Set a few flags
    p_Desc.setSingleInstance(false);
    p_Desc.setHostFrameThreading(false);
    p_Desc.setSupportsMultiResolution(kSupportsMultiResolution);
    p_Desc.setSupportsTiles(kSupportsTiles);
    p_Desc.setTemporalClipAccess(false);
    p_Desc.setRenderTwiceAlways(false);
    p_Desc.setSupportsMultipleClipPARs(kSupportsMultipleClipPARs);

    // Indicates that the plugin output does not depend on location or neighbours of a given pixel.
    p_Desc.setNoSpatialAwareness(true);
}

void CloudWangPluginFactory::describeInContext(OFX::ImageEffectDescriptor& p_Desc, OFX::ContextEnum /*p_Context*/)
{
    // Source clip only in the filter context
    // Create the mandated source clip
    ClipDescriptor* srcClip = p_Desc.defineClip(kOfxImageEffectSimpleSourceClipName);
    srcClip->addSupportedComponent(ePixelComponentRGBA);
    srcClip->setTemporalClipAccess(false);
    srcClip->setSupportsTiles(kSupportsTiles);
    srcClip->setIsMask(false);

    // Create the mandated output clip
    ClipDescriptor* dstClip = p_Desc.defineClip(kOfxImageEffectOutputClipName);
    dstClip->addSupportedComponent(ePixelComponentRGBA);
    dstClip->addSupportedComponent(ePixelComponentAlpha);
    dstClip->setSupportsTiles(kSupportsTiles);
}

ImageEffect* CloudWangPluginFactory::createInstance(OfxImageEffectHandle p_Handle, ContextEnum /*p_Context*/)
{
    return new CloudWangPlugin(p_Handle);
}

void OFX::Plugin::getPluginIDs(PluginFactoryArray& p_FactoryArray)
{
    static CloudWangPluginFactory cloudWangPlugin;
    p_FactoryArray.push_back(&cloudWangPlugin);
}

