#include "FrameReceiver.h"

#include <opencv2/opencv.hpp>

#include <stdio.h>
#include <fcntl.h>
#include <io.h>
#include <bitset>

#pragma region Init
FrameReceiver::FrameReceiver(CameraInfo info)
    : m_info(info)
{
    Init();
}

FrameReceiver::~FrameReceiver()
{
    Dispose();
}

void FrameReceiver::Init()
{
    dpFrame = 0;
    dMappedFrame = 0;
    cuContext = NULL;
    bReady = false;
    bFirstFrame = true;

    int iGpu = 0;
    try
    {
        ck(cuInit(0));
        int nGpu = 0;
        ck(cuDeviceGetCount(&nGpu));
        if (iGpu < 0 || iGpu >= nGpu)
        {
            std::ostringstream err;
            err << "GPU ordinal out of range. Should be within [" << 0 << ", " << nGpu - 1 << "]" << std::endl;
            throw std::invalid_argument(err.str());
        }

        createCudaContext(&cuContext, iGpu, CU_CTX_SCHED_BLOCKING_SYNC);
        bReady = true;
    }
    catch (const std::exception &ex)
    {
    }

    int result = _setmode(_fileno(stdout), _O_BINARY);
    if (result == -1)
        std::wcout << "can not set mode.";
    else
    {
        // std::wcout << "std::wcout- stdout set binary succeed.";
    }
}

void FrameReceiver::Dispose()
{
    bReady = false;
    ck(cuCtxDestroy(cuContext));
}
#pragma endregion

#pragma region Public
void FrameReceiver::Start()
{
    _threadReceiveFrame.setName("ThreadReceiveFrame");
    _threadReceiveFrame.start(&FrameReceiver::threadReceiveFrame, this);
}

void FrameReceiver::threadReceiveFrame(void *pContext)
{
    FrameReceiver *pFrameReceiver = static_cast<FrameReceiver *>(pContext);
    pFrameReceiver->StartReceiveFrame();
}

void FrameReceiver::Stop()
{
    bReady = false;
    _threadReceiveFrame.tryJoin(5000);
}
#pragma endregion

#pragma region Decode
void FrameReceiver::StartReceiveFrame()
{
    if (!bReady)
    {
        // std::cout << "Failed to find NVIDIA libraries\n";
        return;
    }
    char url[256];
    memcpy(url, m_info.url.data(), 256);
    ck(cuCtxSetCurrent(cuContext));
    FFmpegDemuxer demuxer(url);
    if (!demuxer.IsConnected())
    {
        // std::cout << "Demuxer try connect failed." << std::endl;
        return;
    }

    NvDecoder dec(cuContext, true, FFmpeg2NvCodecId(demuxer.GetVideoCodec()), true);

    int Width = (demuxer.GetWidth() + 1) & ~1;
    int nPitch = Width * 4;
    nVideoWidth = Width;
    nVideoHeight = demuxer.GetHeight();

    if (nVideoWidth == 0 || nVideoHeight == 0)
    {
        // std::cout << "video width and height equal 0" << std::endl;
        return;
    }

    if (m_info.copyHeight < 0 || m_info.copyHeight >= nVideoHeight)
        m_info.copyHeight = nVideoHeight;

    ck(cuMemFree(dpFrame));
    ck(cuMemAlloc(&dpFrame, nVideoWidth * nVideoHeight * 4));
    ck(cuMemsetD8(dpFrame, 0, nVideoWidth * nVideoHeight * 4));

    ck(cuMemFree(dMappedFrame));
    ck(cuMemAlloc(&dMappedFrame, nVideoWidth * nVideoHeight * 4));
    ck(cuMemsetD8(dMappedFrame, 0, nVideoWidth * nVideoHeight * 4));

    cv::cuda::GpuMat gpuMat(nVideoHeight, nVideoWidth, CV_8UC4);
    dMatFrame = gpuMat;

    // uvMap
    int num = 2;                                                                    // 两个三角形映射
    float pixel[12] = {0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0}; // 每两个时一个坐标，每六个代表一个渲染三角形，pixel和uv一一对应
    float uv[12] = {0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0};
    ck(cudaMalloc(&pixelMap, num * 6 * sizeof(float)));
    ck(cudaMalloc(&uvMap, num * 6 * sizeof(float)));
    ck(cudaMemcpy(pixelMap, pixel, num * 6 * sizeof(float), cudaMemcpyHostToDevice));
    ck(cudaMemcpy(uvMap, uv, num * 6 * sizeof(float), cudaMemcpyHostToDevice));

    // Encode
    NvEncoderInitParam encodeCLIOptions;
    std::ostringstream oss;
    oss << "-codec h264 ";
    encodeCLIOptions = NvEncoderInitParam(oss.str().c_str());

    std::unique_ptr<NvEncoderCuda> pEnc(new NvEncoderCuda(cuContext, nVideoWidth, nVideoHeight, NV_ENC_BUFFER_FORMAT_NV12));
    NV_ENC_INITIALIZE_PARAMS initializeParams = {NV_ENC_INITIALIZE_PARAMS_VER};
    NV_ENC_CONFIG encodeConfig = {NV_ENC_CONFIG_VER};
    initializeParams.encodeConfig = &encodeConfig;
    pEnc->CreateDefaultEncoderParams(&initializeParams, encodeCLIOptions.GetEncodeGUID(), encodeCLIOptions.GetPresetGUID(), encodeCLIOptions.GetTuningInfo());
    encodeCLIOptions.SetInitParams(&initializeParams, NV_ENC_BUFFER_FORMAT_NV12);
    pEnc->CreateEncoder(&initializeParams);

    CUdeviceptr dFrame;
    int nVideoBytes = 0, nFrameReturned = 0, iMatrix = 0;
    uint8_t *pVideo = NULL;
    uint8_t *pFrame;

    do
    {
        demuxer.Demux(&pVideo, &nVideoBytes);
        nFrameReturned = dec.Decode(pVideo, nVideoBytes);
        if (bFirstFrame && nFrameReturned)
        {
            // std::cout << dec.GetVideoInfo().data() << std::endl;
            bFirstFrame = false;
            pHostFrame = new uint8_t[nVideoWidth * nVideoHeight * 4];
        }

        for (int i = 0; i < nFrameReturned; i++)
        {
            pFrame = dec.GetFrame();
            if (!GetDeviceFrameBuffer(&dFrame, &nPitch))
                break;
            iMatrix = dec.GetVideoFormatInfo().video_signal_description.matrix_coefficients;

            if (dec.GetBitDepth() == 8)
            {
                if (dec.GetOutputFormat() == cudaVideoSurfaceFormat_YUV444)
                    YUV444ToColor32<BGRA32>(pFrame, dec.GetWidth(), (uint8_t *)dFrame, nPitch, dec.GetWidth(), dec.GetHeight(), iMatrix);
                else // default assumed NV12
                    Nv12ToColor32<RGBA32>(pFrame, dec.GetWidth(), (uint8_t *)dFrame, nPitch, dec.GetWidth(), dec.GetHeight(), iMatrix);
            }
            else
            {
                if (dec.GetOutputFormat() == cudaVideoSurfaceFormat_YUV444)
                    YUV444P16ToColor32<BGRA32>(pFrame, 2 * dec.GetWidth(), (uint8_t *)dFrame, nPitch, dec.GetWidth(), dec.GetHeight(), iMatrix);
                else // default assumed P016
                    P016ToColor32<BGRA32>(pFrame, 2 * dec.GetWidth(), (uint8_t *)dFrame, nPitch, dec.GetWidth(), dec.GetHeight(), iMatrix);
            }

            if (m_info.eFormat == Map)
            {
                // copy到gpumat
                Color32ToUV((uint8_t *)dpFrame, nVideoWidth, nVideoHeight, dMatFrame, nVideoWidth, nVideoHeight, (float *)pixelMap, (float *)uvMap, num);
                cv::Mat img;
                dMatFrame.download(img);
                cv::resize(img, img, cv::Size(nVideoWidth / 2, nVideoHeight / 2));
                cv::imshow("img", img);
                cv::waitKey(1);

                // // copy到显存区域
                // Color32ToUV((uint8_t *)dpFrame, nVideoWidth, nVideoHeight, (uint8_t *)dMappedFrame, nVideoWidth, nVideoHeight, (float *)pixelMap, (float *)uvMap, num);

                // // 呈现测试
                // CUDA_MEMCPY2D m = {0};
                // m.srcMemoryType = CU_MEMORYTYPE_DEVICE;
                // m.srcDevice = dMappedFrame;
                // m.srcPitch = nVideoWidth * 4;
                // m.dstMemoryType = CU_MEMORYTYPE_HOST;
                // m.dstHost = pHostFrame;
                // m.dstPitch = nVideoWidth * 4;
                // m.WidthInBytes = nVideoWidth * 4;
                // m.Height = nVideoHeight;
                // ck(cuMemcpy2DAsync(&m, 0));

                // cv::Mat image(nVideoHeight, nVideoWidth, CV_8UC4);
                // memcpy(image.data, pHostFrame, nVideoWidth * nVideoHeight * 4 * sizeof(unsigned char));
                // cv::resize(image, image, cv::Size(nVideoWidth / 2, nVideoHeight / 2));
                // cv::imshow("image", image);
                // cv::waitKey(1);
            }
            else if (m_info.eFormat == RawData)
            {
                CUDA_MEMCPY2D m = {0};
                m.srcMemoryType = CU_MEMORYTYPE_DEVICE;
                m.srcDevice = dpFrame;
                m.srcPitch = nVideoWidth * 4;
                m.dstMemoryType = CU_MEMORYTYPE_HOST;
                m.dstHost = pHostFrame;
                m.dstPitch = nVideoWidth * 4;
                m.WidthInBytes = nVideoWidth * 4;
                m.Height = m_info.copyHeight;
                ck(cuMemcpy2DAsync(&m, 0));

                // cv::Mat image(nVideoHeight, nVideoWidth, CV_8UC4);
                // memcpy(image.data, pHostFrame, nVideoWidth * nVideoHeight * 4 * sizeof(unsigned char));
                // cv::resize(image, image, cv::Size(nVideoWidth / 2, nVideoHeight / 2));
                // cv::imshow("image", image);
                // cv::waitKey(0);

                auto result = fwrite(pHostFrame, sizeof(unsigned char), nVideoWidth * nVideoHeight * 4, stdout);
                break;
            }
            else if (m_info.eFormat == Mpeg1)
            {
                std::vector<std::vector<uint8_t>> vPacket;
                const NvEncInputFrame *encoderInputFrame = pEnc->GetNextInputFrame();
                NvEncoderCuda::CopyToDeviceFrame(cuContext, (uint8_t *)dFrame, 0, (CUdeviceptr)encoderInputFrame->inputPtr,
                                                 (int)encoderInputFrame->pitch,
                                                 pEnc->GetEncodeWidth(),
                                                 pEnc->GetEncodeHeight(),
                                                 CU_MEMORYTYPE_DEVICE,
                                                 encoderInputFrame->bufferFormat,
                                                 encoderInputFrame->chromaOffsets,
                                                 encoderInputFrame->numChromaPlanes);

                pEnc->EncodeFrame(vPacket);

                for (std::vector<uint8_t> &packet : vPacket)
                {
                    auto result = fwrite(reinterpret_cast<char *>(packet.data()), sizeof(char), packet.size(), stdout);
                }
            }
        }
    } while (nVideoBytes && bReady);

    ck(cuMemFree(dpFrame));
    ck(cuMemFree(dMappedFrame));
    ck(cudaFree(pixelMap));
    ck(cudaFree(uvMap));
    // std::cout << "Finish decode." << std::endl;
}

bool FrameReceiver::GetDeviceFrameBuffer(CUdeviceptr *pdpFrame, int *pnPitch)
{
    if (!bReady || !dpFrame)
    {
        return false;
    }

    *pdpFrame = (CUdeviceptr)dpFrame;
    *pnPitch = nVideoWidth * 4;
    return true;
}
#pragma endregion