#include "appdef.h"
#include "confile.h"
#include <iostream>
#include <QDebug>
IniParser* g_confile = NULL;
extern "C" {
#include "libavutil/avutil.h"
#include "libavutil/pixdesc.h"
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libavdevice/avdevice.h"
#include "libswscale/swscale.h"
#include <libavutil/timestamp.h>
#include <libavutil/imgutils.h>
}
#include <opencv2/opencv.hpp>
void test(const char* filename);
static cv::Mat avFrameToCvMat(AVFrame* frame, SwsContext* sws_ctx) {
    cv::Mat bgr(frame->height, frame->width, CV_8UC3);

    int stride = 3 * frame->width;
    sws_scale(sws_ctx, (const uint8_t* const*)frame->data, frame->linesize, 0, frame->height, &bgr.data,
        &stride);

    //sws_freeContext(sws_ctx);
    return bgr;
}
using namespace cv;
int  iCheckMosaicImage(const Mat& srcFrame)
{
    Mat srcFrameGray;

    int l_iTotalCorner = 0;

    cvtColor(srcFrame, srcFrameGray, COLOR_BGR2GRAY);

    cornerHarris(srcFrameGray, srcFrameGray, 2, 3, 0.04);
    threshold(srcFrameGray, srcFrameGray, 0.010, 255, cv::THRESH_BINARY);

    //imshow("srcFrameGray", srcFrameGray);

    int l_iRow = srcFrameGray.rows;
    int l_iCol = srcFrameGray.cols;


    for (int i = 0; i < l_iRow; i++)
    {
        for (int j = 0; j < l_iCol; j++)
        {
            if (srcFrameGray.at<float>(i, j) == 255)//值255为角点
            {
                l_iTotalCorner++;
            }
        }
    }

    return l_iTotalCorner;

}

bool ColorBlockDetectBGR(AVFrame* frame, int frame_id) {
    if (!frame)
        return false;
    static int block_cols = 20;
    static int block_rows = 10;

    int block_w = frame->width / block_cols;
    int block_h = frame->height / block_rows;
    int error_blocks = 0;
    int color_threshold = 30; // 颜色偏差阈值

    for (int y = 0; y < block_rows; ++y) {
        for (int x = 0; x < block_cols; ++x) {
            int cx = x * block_w + block_w / 2;
            int cy = y * block_h + block_h / 2;
            // RGB24: 每像素3字节，data[0]为首地址
            uint8_t* pixel = frame->data[0] + cy * frame->linesize[0] + cx * 3;
            int r = pixel[0];
            int g = pixel[1];
            int b = pixel[2];

            // 理论颜色
            int r_exp = (frame_id * 7 + x * 13 + y * 17) % 256;
            int g_exp = (frame_id * 11 + x * 19 + y * 23) % 256;
            int b_exp = (frame_id * 17 + x * 29 + y * 31) % 256;

            int diff = abs(r - r_exp) + abs(g - g_exp) + abs(b - b_exp);
            if (diff > color_threshold) error_blocks++;
        }
    }
    if (error_blocks > block_cols * block_rows * 0.1) {
        qDebug("Frame %d error，errorBlockNum: %d\n", frame_id, error_blocks);
        return true;
    }
    return false;
}

bool ColorBlockDetectYUV(AVFrame* frame, int frame_id) {
    if (!frame)
        return false;
    static int block_cols = 20;
    static int block_rows = 10;

    int block_w = frame->width / block_cols;
    int block_h = frame->height / block_rows;
    int error_blocks = 0;
    int color_threshold = 30; // 颜色偏差阈值
    for (int y = 0; y < block_rows; ++y) {
        for (int x = 0; x < block_cols; ++x) {
            int cx = x * block_w + block_w / 2;
            int cy = y * block_h + block_h / 2;

            int y_index = cy * frame->linesize[0] + cx;
            int u_index = (cy / 2) * frame->linesize[1] + (cx / 2);
            int v_index = (cy / 2) * frame->linesize[2] + (cx / 2);

            uint8_t Y = frame->data[0][y_index];
            uint8_t U = frame->data[1][u_index];
            uint8_t V = frame->data[2][v_index];

            // 理论RGB
            int r_exp = (frame_id * 7 + x * 13 + y * 17) % 256;
            int g_exp = (frame_id * 11 + x * 19 + y * 23) % 256;
            int b_exp = (frame_id * 17 + x * 29 + y * 31) % 256;

            // RGB转YUV
            int Y_exp = (66 * r_exp + 129 * g_exp + 25 * b_exp + 128) >> 8; Y_exp += 16;
            int U_exp = (-38 * r_exp - 74 * g_exp + 112 * b_exp + 128) >> 8; U_exp += 128;
            int V_exp = (112 * r_exp - 94 * g_exp - 18 * b_exp + 128) >> 8; V_exp += 128;

            Y_exp = std::min(255, std::max(0, Y_exp));
            U_exp = std::min(255, std::max(0, U_exp));
            V_exp = std::min(255, std::max(0, V_exp));

            int diff = abs(Y - Y_exp) + abs(U - U_exp) + abs(V - V_exp);
            if (diff > color_threshold) error_blocks++;
        }
    }
    if (error_blocks > block_cols * block_rows * 0.1) {
        qDebug("Frame %d 异常，异常色块数: %d\n", frame_id, error_blocks);
        return true;
    }
    return false;
}

#if 0
inline bool ColorBlockDetectYUV(AVFrame* frame) {
    if (!frame)
        return false;
    static int block_cols = 20;
    static int block_rows = 10;

    int block_w = frame->width / block_cols;
    int block_h = frame->height / block_rows;
    int error_blocks = 0;
    int color_threshold = 30; // 颜色偏差阈值

    // 1. 采样左上角色块中心像素，推算A_r, A_g, A_b
    int ax = 0, ay = 0;
    int a_cx = ax * block_w + block_w / 2;
    int a_cy = ay * block_h + block_h / 2;

    int y_index = a_cy * frame->linesize[0] + a_cx;
    int u_index = (a_cy / 2) * frame->linesize[1] + (a_cx / 2);
    int v_index = (a_cy / 2) * frame->linesize[2] + (a_cx / 2);

    uint8_t Y = frame->data[0][y_index];
    uint8_t U = frame->data[1][u_index];
    uint8_t V = frame->data[2][v_index];

    // 直接YUV转RGB，得到A_r, A_g, A_b
    int C = Y - 16;
    int D = U - 128;
    int E = V - 128;
    int A_r = std::min(255, std::max(0, (298 * C + 409 * E + 128) >> 8));
    int A_g = std::min(255, std::max(0, (298 * C - 100 * D - 208 * E + 128) >> 8));
    int A_b = std::min(255, std::max(0, (298 * C + 516 * D + 128) >> 8));

    // 2. 检查所有色块
    for (int y = 0; y < block_rows; ++y) {
        for (int x = 0; x < block_cols; ++x) {
            int cx = x * block_w + block_w / 2;
            int cy = y * block_h + block_h / 2;

            int y_index = cy * frame->linesize[0] + cx;
            int u_index = (cy / 2) * frame->linesize[1] + (cx / 2);
            int v_index = (cy / 2) * frame->linesize[2] + (cx / 2);

            uint8_t Y = frame->data[0][y_index];
            uint8_t U = frame->data[1][u_index];
            uint8_t V = frame->data[2][v_index];

            int r_exp = (A_r + x * 13 + y * 17 + x * y * 7) % 256;
            int g_exp = (A_g + x * 19 + y * 23 + (x ^ y) * 11) % 256;
            int b_exp = (A_b + x * 29 + y * 31 + (x * x + y * y) * 3) % 256;

            int Y_exp = (66 * r_exp + 129 * g_exp + 25 * b_exp + 128) >> 8; Y_exp += 16;
            int U_exp = (-38 * r_exp - 74 * g_exp + 112 * b_exp + 128) >> 8; U_exp += 128;
            int V_exp = (112 * r_exp - 94 * g_exp - 18 * b_exp + 128) >> 8; V_exp += 128;

            Y_exp = std::min(255, std::max(0, Y_exp));
            U_exp = std::min(255, std::max(0, U_exp));
            V_exp = std::min(255, std::max(0, V_exp));

            int diff = abs(Y - Y_exp) + abs(U - U_exp) + abs(V - V_exp);
            if (diff > color_threshold) error_blocks++;
        }
    }
    if (error_blocks > block_cols * block_rows * 0.1) {
        return true; // 异常
    }
    return false; // 正常
}
#endif
#pragma once
#include <stdint.h>
#include <algorithm>
#include <libavutil/frame.h>
#include <opencv2/opencv.hpp>
inline bool ColorBlockDetectYUVRaw(AVFrame* frame) {
    if (!frame)
        return false;
    static int block_cols = 20;
    static int block_rows = 10;

    int block_w = frame->width / block_cols;
    int block_h = frame->height / block_rows;
    int error_blocks = 0;
    int color_threshold = 30;
    double error_ratio = 0.1;

    // 1. 采样左上角色块中心像素，推算A_r, A_g, A_b
    int ax = 0, ay = 0;
    int a_cx = ax * block_w + block_w / 2;
    int a_cy = ay * block_h + block_h / 2;

    int y_index = a_cy * frame->linesize[0] + a_cx;
    int u_index = (a_cy / 2) * frame->linesize[1] + (a_cx / 2);
    int v_index = (a_cy / 2) * frame->linesize[2] + (a_cx / 2);

    uint8_t Y = frame->data[0][y_index];
    uint8_t U = frame->data[1][u_index];
    uint8_t V = frame->data[2][v_index];

    // 直接YUV转RGB，得到A_r, A_g, A_b
    int C = Y - 16;
    int D = U - 128;
    int E = V - 128;
    int A_r = std::min(255, std::max(0, (298 * C + 409 * E + 128) >> 8));
    int A_g = std::min(255, std::max(0, (298 * C - 100 * D - 208 * E + 128) >> 8));
    int A_b = std::min(255, std::max(0, (298 * C + 516 * D + 128) >> 8));

    // 2. 检查所有色块
    for (int y = 0; y < block_rows; ++y) {
        for (int x = 0; x < block_cols; ++x) {
            int abnormal = 0;
            // 采样5点：中心+四角
            int sample_offsets[5][2] = {
                {block_w / 2, block_h / 2},
                {block_w / 4, block_h / 4},
                {3 * block_w / 4, block_h / 4},
                {block_w / 4, 3 * block_h / 4},
                {3 * block_w / 4, 3 * block_h / 4}
            };
            for (int s = 0; s < 5; ++s) {
                int cx = x * block_w + sample_offsets[s][0];
                int cy = y * block_h + sample_offsets[s][1];
                int y_index = cy * frame->linesize[0] + cx;
                int u_index = (cy / 2) * frame->linesize[1] + (cx / 2);
                int v_index = (cy / 2) * frame->linesize[2] + (cx / 2);

                uint8_t Y = frame->data[0][y_index];
                uint8_t U = frame->data[1][u_index];
                uint8_t V = frame->data[2][v_index];

                int r_exp = (A_r + x * 13 + y * 17 + x * y * 7) % 256;
                int g_exp = (A_g + x * 19 + y * 23 + (x ^ y) * 11) % 256;
                int b_exp = (A_b + x * 29 + y * 31 + (x * x + y * y) * 3) % 256;

                int Y_exp = (66 * r_exp + 129 * g_exp + 25 * b_exp + 128) >> 8; Y_exp += 16;
                int U_exp = (-38 * r_exp - 74 * g_exp + 112 * b_exp + 128) >> 8; U_exp += 128;
                int V_exp = (112 * r_exp - 94 * g_exp - 18 * b_exp + 128) >> 8; V_exp += 128;

                Y_exp = std::min(255, std::max(0, Y_exp));
                U_exp = std::min(255, std::max(0, U_exp));
                V_exp = std::min(255, std::max(0, V_exp));

                int diff = abs(Y - Y_exp) + abs(U - U_exp) + abs(V - V_exp);
                if (diff > color_threshold) {
					/*qDebug() << "diff" << diff
						<< "Y:" << Y << "Y_exp:" << Y_exp
						<< "U:" << U << "U_exp:" << U_exp
						<< "V:" << V << "V_exp:" << V_exp;*/
                    abnormal = 1;
                    break; // 只要有一个点异常就算异常色块
                }
            }
            if (abnormal) error_blocks++;
        }
    }
    if (error_blocks > block_cols * block_rows * error_ratio) {
		//qDebug() << "error blocks:" << error_blocks;
        return true; // 异常
    }
    return false; // 正常
}

//wrong
inline bool WrongColorBlockDetectYUV(AVFrame* frame) {
    if (!frame || frame->format != AV_PIX_FMT_YUV420P) return false;

    // 配置参数（可根据实际情况调整）
    static const int block_cols = 20;
    static const int block_rows = 10;
    static const int color_threshold = 12;       // 降低基础阈值
    static const int error_ratio = 5;            // 允许5%的异常块
    static const int sample_points = 9;          // 增加采样点密度
    static const int history_frames = 5;         // 历史帧缓存
    static int frame_history[history_frames][3]; // 存储历史A值
    static int history_idx = 0;

    // 计算块尺寸
    int block_w = frame->width / block_cols;
    int block_h = frame->height / block_rows;
    if (block_w <= 0 || block_h <= 0) return false;

    // 1. 改进的基准颜色A提取（左上角块中心+四角平均）
    int a_r = 0, a_g = 0, a_b = 0;
    int sample_positions[5][2] = {
        {block_w / 2, block_h / 2}, {block_w / 4, block_h / 4},
        {3 * block_w / 4, block_h / 4}, {block_w / 4, 3 * block_h / 4},
        {3 * block_w / 4, 3 * block_h / 4}
    };

    for (int i = 0; i < 5; ++i) {
        int cx = sample_positions[i][0];
        int cy = sample_positions[i][1];
        int y_idx = cy * frame->linesize[0] + cx;
        int u_idx = (cy / 2) * frame->linesize[1] + (cx / 2);
        int v_idx = (cy / 2) * frame->linesize[2] + (cx / 2);

        uint8_t Y = frame->data[0][y_idx];
        uint8_t U = frame->data[1][u_idx];
        uint8_t V = frame->data[2][v_idx];

        // YUV转RGB（BT.601标准转换公式）
        int C = Y - 16;
        int D = U - 128;
        int E = V - 128;
        a_r += std::min(255, std::max(0, (298 * C + 409 * E + 128) >> 8));
        a_g += std::min(255, std::max(0, (298 * C - 100 * D - 208 * E + 128) >> 8));
        a_b += std::min(255, std::max(0, (298 * C + 516 * D + 128) >> 8));
    }
    a_r /= 5; a_g /= 5; a_b /= 5;

    // 3. 块检测（改进采样和阈值策略）
    int error_blocks = 0;
    const int total_blocks = block_cols * block_rows;

    for (int y = 0; y < block_rows; ++y) {
        for (int x = 0; x < block_cols; ++x) {
            // 计算预期颜色（原始算法）
            int r_exp = (a_r + x * 13 + y * 17 + x * y * 7) % 256;
            int g_exp = (a_g + x * 19 + y * 23 + (x ^ y) * 11) % 256;
            int b_exp = (a_b + x * 29 + y * 31 + (x * x + y * y) * 3) % 256;

            // RGB转YUV（精确转换）
            int Y_exp = std::min(255, std::max(0, (66 * r_exp + 129 * g_exp + 25 * b_exp + 128) >> 8) + 16);
            int U_exp = std::min(255, std::max(0, (-38 * r_exp - 74 * g_exp + 112 * b_exp + 128) >> 8) + 128);
            int V_exp = std::min(255, std::max(0, (112 * r_exp - 94 * g_exp - 18 * b_exp + 128) >> 8) + 128);

            // 自适应阈值（边缘块阈值提高）
            int adj_threshold = color_threshold + ((x == 0 || x == block_cols - 1 || y == 0 || y == block_rows - 1) ? 3 : 0);

            // 9点网格采样（提高检测覆盖率）
            int abnormal_points = 0;
            for (int sy = 0; sy < 3; ++sy) {
                for (int sx = 0; sx < 3; ++sx) {
                    int cx = x * block_w + (sx + 1) * block_w / 4;
                    int cy = y * block_h + (sy + 1) * block_h / 4;
                    int y_idx = cy * frame->linesize[0] + cx;
                    int u_idx = (cy / 2) * frame->linesize[1] + (cx / 2);
                    int v_idx = (cy / 2) * frame->linesize[2] + (cx / 2);

                    uint8_t Y = frame->data[0][y_idx];
                    uint8_t U = frame->data[1][u_idx];
                    uint8_t V = frame->data[2][v_idx];

                    // YUV分量差异加权计算
                    int diff = abs(Y - Y_exp) * 2 + abs(U - U_exp) + abs(V - V_exp);
                    if (diff > adj_threshold * 3) {
                        abnormal_points++;
                        if (abnormal_points >= 2) break; // 2点异常即判定为异常块
                    }
                }
                if (abnormal_points >= 2) break;
            }

            if (abnormal_points >= 2) {
                error_blocks++;
                // 快速退出条件（超过阈值比例）
                if (error_blocks > total_blocks * error_ratio / 100) {
                    return true; // 异常
                }
            }
        }
    }

    return error_blocks > total_blocks * error_ratio / 100; // 异常比例判断
}

inline bool ColorBlockDetectYUV(AVFrame* frame) {
    if (!frame)
        return false;
    static int block_cols = 20;
    static int block_rows = 10;

    int block_w = frame->width / block_cols;
    int block_h = frame->height / block_rows;
    int error_blocks = 0;
    int color_threshold = 15; // 更严格
    double error_ratio = 0.05; // 更严格

    // 1. 采样左上角色块中心像素，推算A_r, A_g, A_b
    int ax = 0, ay = 0;
    int a_cx = ax * block_w + block_w / 2;
    int a_cy = ay * block_h + block_h / 2;

    int y_index = a_cy * frame->linesize[0] + a_cx;
    int u_index = (a_cy / 2) * frame->linesize[1] + (a_cx / 2);
    int v_index = (a_cy / 2) * frame->linesize[2] + (a_cx / 2);

    uint8_t Y = frame->data[0][y_index];
    uint8_t U = frame->data[1][u_index];
    uint8_t V = frame->data[2][v_index];

    // 直接YUV转RGB，得到A_r, A_g, A_b
    int C = Y - 16;
    int D = U - 128;
    int E = V - 128;
    int A_r = std::min(255, std::max(0, (298 * C + 409 * E + 128) >> 8));
    int A_g = std::min(255, std::max(0, (298 * C - 100 * D - 208 * E + 128) >> 8));
    int A_b = std::min(255, std::max(0, (298 * C + 516 * D + 128) >> 8));

    // 2. 检查所有色块
    for (int y = 0; y < block_rows; ++y) {
        for (int x = 0; x < block_cols; ++x) {
            int abnormal = 0;
            // 采样5点：中心+四角
            int sample_offsets[5][2] = {
                {block_w / 2, block_h / 2},
                {block_w / 4, block_h / 4},
                {3 * block_w / 4, block_h / 4},
                {block_w / 4, 3 * block_h / 4},
                {3 * block_w / 4, 3 * block_h / 4}
            };
            for (int s = 0; s < 5; ++s) {
                int cx = x * block_w + sample_offsets[s][0];
                int cy = y * block_h + sample_offsets[s][1];
                int y_index = cy * frame->linesize[0] + cx;
                int u_index = (cy / 2) * frame->linesize[1] + (cx / 2);
                int v_index = (cy / 2) * frame->linesize[2] + (cx / 2);

                uint8_t Y = frame->data[0][y_index];
                uint8_t U = frame->data[1][u_index];
                uint8_t V = frame->data[2][v_index];

                int r_exp = (A_r + x * 13 + y * 17 + x * y * 7) % 256;
                int g_exp = (A_g + x * 19 + y * 23 + (x ^ y) * 11) % 256;
                int b_exp = (A_b + x * 29 + y * 31 + (x * x + y * y) * 3) % 256;

                int Y_exp = (66 * r_exp + 129 * g_exp + 25 * b_exp + 128) >> 8; Y_exp += 16;
                int U_exp = (-38 * r_exp - 74 * g_exp + 112 * b_exp + 128) >> 8; U_exp += 128;
                int V_exp = (112 * r_exp - 94 * g_exp - 18 * b_exp + 128) >> 8; V_exp += 128;

                Y_exp = std::min(255, std::max(0, Y_exp));
                U_exp = std::min(255, std::max(0, U_exp));
                V_exp = std::min(255, std::max(0, V_exp));

                int diff = abs(Y - Y_exp) + abs(U - U_exp) + abs(V - V_exp);
                if (diff > color_threshold) {
                    abnormal = 1;
                    break; // 只要有一个点异常就算异常色块
                }
            }
            if (abnormal) error_blocks++;
        }
    }
    if (error_blocks > block_cols * block_rows * error_ratio) {
        return true; // 异常
    }
    return false; // 正常
}

int main() {
	//test("C://Users//lu//Downloads//vlc_record_wrongsome.mp4");
	test("C://Users//lu//Downloads//vlc_record_6sStartWrong.mp4");
    //test("D://codes//panocom//TestMedia//video//build1//pattern_video.mp4");
    //test("D://codes//panocom//TestMedia//video//build1//vlc-record-2025-07-16-10h31m59s-rtsp___192.168.1.123_live_test-.mp4");
	//test("D://ZLMediaKit//release//windows//Debug//Debug//www//record//input11.mp4");
    //test("juren-30s-5.mp4");
    return 0;
}
#define ENCODE_MODE 0
void test(const char* filename) {
    int ret = 0;
    int err;
    int output_pts = 0; //递增pts，用于丢帧后可以无缝衔接上后面的帧
    //打开输入文件
    //动态分配和初始化一个空的AVFormatContext对象，为后续的音视频封装和解封装操作做准备。
    AVFormatContext* fmt_ctx = avformat_alloc_context();
    if (!fmt_ctx)
    {
        qDebug() << "fmt_ctx error code:" << AVERROR(ENOMEM);
        return;
    }

	VideoCapture cap(filename);
	if (cap.isOpened()) {
		qDebug() << "Open video file successfully";
	}
	else {
		qDebug() << "Failed to open video file";
		return;
	}
    //打开文件
    if ((err = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0)
    {
        printf("can not open file %d \n", err);
        return;
    }


    //分配并初始化一个 AVCodecContext 结构体，该结构体用于编解码器的相关信息和参数设置。
    AVCodecContext* avctx = avcodec_alloc_context3(NULL);

    //用于将AVCodecParameters结构体的值赋值给AVCodecContext结构体的对应字段
    //参数值的赋值涉及到了编解码器的基本配置，例如编码类型、码率、帧率等等。这样可以方便地将参数信息传递给编解码器上下文，以供后续的编解码操作使用。
    ret = avcodec_parameters_to_context(avctx, fmt_ctx->streams[0]->codecpar);

    if (ret < 0)
    {
        qDebug() << "avcodec_parameters_to_context error code:" << ret;
        return;
    }

    //查找解码器
    AVCodec* codec = avcodec_find_decoder(avctx->codec_id);

    //打开解码器
    if ((ret = avcodec_open2(avctx, codec, NULL)) < 0)
    {
        qDebug() << "avcodec_open2 error code:" << ret;
        return;
    }
#if ENCODE_MODE
    //打开输出文件容器
    char filename_out[] = "juren-30s-52.mp4";
    AVFormatContext* fmt_ctx_out = NULL;
    
    //该函数会尝试通过指定的输出格式来分配一个输出格式上下文
    err = avformat_alloc_output_context2(&fmt_ctx_out, NULL, NULL, filename_out);

    if (!fmt_ctx_out)
    {
        qDebug() << "error code:" << AVERROR(ENOMEM);
        return;
    }

    //添加一路流到容器上下文
    AVStream* st = avformat_new_stream(fmt_ctx_out, NULL);
    //st->time_base = fmt_ctx->streams[0]->time_base;
#endif
    //分配帧和包资源
    AVCodecContext* enc_ctx = NULL;
    AVPacket* pkt = av_packet_alloc();
    AVFrame* frame = av_frame_alloc();
    AVPacket* pkt_out = av_packet_alloc();
    uint8_t* data[4];
    int linesize[4];
    int frame_num = 0;
    int read_end = 0;
	SwsContext* sws_ctx = NULL;
    
    int skip_frame_count = 0;
    int now_frame_count = 0;

    //不知道是特定格式还是其他原因,此时avctx->framerate还是未知,只能留到后面计算了
    //每次pts需要增加的增量
    int frameIncPts = -1;
    //int frameIncPts = 1;
    while (true)
    {
        if (1 == read_end)
        {
            break;
        }

        ret = av_read_frame(fmt_ctx, pkt);

        //跳过不处理音频包
        if (1 == pkt->stream_index)
        {
            av_packet_unref(pkt);
            continue;
        }

        //读取到文件的结尾了
        if (AVERROR_EOF == ret)
        {
            //读取完文件，这时候 pkt 的 data 跟 size 应该是 null
            avcodec_send_packet(avctx, NULL);
        }
        else
        {
            if (0 != ret)
            {
                qDebug() << "av_read_frame error code:" << ret;
                return;
            }
            else
            {
            retry:
                if (avcodec_send_packet(avctx, pkt) == /*AVERROR(EAGAIN)*/ -11)
                {
                    qDebug() << "Receive_frame and send_packet both returned EAGAIN, which is an API violation";
                    //这里可以考虑休眠 0.1 秒，返回 EAGAIN 通常是 ffmpeg 的内部 api 有bug
                    goto retry;
                }

                //释放 pkt 里面的编码数据
                av_packet_unref(pkt);
            }
        }

        //循环不断从解码器读数据，直到没有数据可读。

        while (true)
        {
            //读取 AVFrame
            ret = avcodec_receive_frame(avctx, frame);
            if (/*AVERROR(EAGAIN)*/ -11 == ret)
            {
                //提示 EAGAIN 代表 解码器 需要 更多的 AVPacket
                //跳出 第一层 for，让 解码器拿到更多的 AVPacket
                break;
            }
            else if (AVERROR_EOF == ret)
            {
                /* 提示 AVERROR_EOF 代表之前已经往 解码器发送了一个 data 跟 size 都是 NULL 的 AVPacket
                 * 发送 NULL 的 AVPacket 是提示解码器把所有的缓存帧全都刷出来。
                 * 通常只有在 读完输入文件才会发送 NULL 的 AVPacket，或者需要用现有的解码器解码另一个的视频流才会这么干。
                 *
                 * */
#if ENCODE_MODE
                 /* 往编码器发送 null 的 AVFrame，让编码器把剩下的数据刷出来。*/
                ret = avcodec_send_frame(enc_ctx, NULL);
                for (;;)
                {
                    ret = avcodec_receive_packet(enc_ctx, pkt_out);
                    //这里不可能返回 EAGAIN，如果有直接退出。
                    if (ret == /*AVERROR(EAGAIN)*/-11) {
                        printf("avcodec_receive_packet error code %d \n", ret);
                        return;
                    }
                    if (AVERROR_EOF == ret) {
                        break;
                    }
                    //编码出 AVPacket ，先打印一些信息，然后把它写入文件。
                    printf("pkt_out size : %d \n", pkt_out->size);
                    //设置 AVPacket 的 stream_index ，这样才知道是哪个流的。
                    pkt_out->stream_index = st->index;
                    //转换 AVPacket 的时间基为 输出流的时间基。
                    //pkt_out->pts = av_rescale_q_rnd(pkt_out->pts, fmt_ctx->streams[0]->time_base, st->time_base, static_cast<AVRounding>(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
                    //pkt_out->dts = av_rescale_q_rnd(pkt_out->dts, fmt_ctx->streams[0]->time_base, st->time_base, static_cast<AVRounding>(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
                    //pkt_out->duration = av_rescale_q_rnd(pkt_out->duration, fmt_ctx->streams[0]->time_base, st->time_base, static_cast<AVRounding>(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));


                    ret = av_interleaved_write_frame(fmt_ctx_out, pkt_out);
                    if (ret < 0) {
                        printf("av_interleaved_write_frame faile %d \n", ret);
                        return;
                    }
                    av_packet_unref(pkt_out);
                }
                av_write_trailer(fmt_ctx_out);
#endif
                //跳出 第二层 for，文件已经解码完毕。
                read_end = 1;
                break;
            }
            else if (ret >= 0)
            {                
#if ENCODE_MODE
                //只有解码出来一个帧，才可以开始初始化编码器。
                sws_ctx = sws_getCachedContext(sws_ctx, frame->width, frame->height, (AVPixelFormat)frame->format,
                    frame->width, frame->height, AV_PIX_FMT_BGR24,
                    SWS_BICUBIC, nullptr, nullptr, nullptr);
                cv::Mat bgr = avFrameToCvMat(frame, sws_ctx);
                if (iCheckMosaicImage(bgr) >= 10) {
                    ++skip_frame_count;
                    continue; // 过滤角点大于10的帧
                }

                if (NULL == enc_ctx)
                {
                    //打开编码器，并且设置 编码信息。
                    //AVCodec* encode = avcodec_find_encoder(AV_CODEC_ID_H264);
                    AVCodec* encode = avcodec_find_encoder(avctx->codec_id);
                    enc_ctx = avcodec_alloc_context3(encode);
                    enc_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
                    enc_ctx->bit_rate = avctx->bit_rate;
                    //todo 这个程序编码后这个framerate老是会被设置成time_base的倒数，待查
                    //if (av_q2d(avctx->framerate) > 60) {    //大于60帧就强制30帧
                    //    avctx->framerate = { 30,1 };    
                    //}
                    //固定帧率才需要设置这个编码器的framerate，这里就不设置了
                    //enc_ctx->framerate = avctx->framerate;
                    
                    //没啥用，下面这2个
                    st->avg_frame_rate = avctx->framerate;
                    st->r_frame_rate = avctx->framerate;

                    //enc_ctx->gop_size = 30;
                    //enc_ctx->max_b_frames = 0;                   
                    enc_ctx->profile = FF_PROFILE_H264_HIGH;
                    
                    /*
                     * 其实下面这些信息在容器那里也有，也可以一开始直接在容器那里打开编码器
                     * 我从 AVFrame 里拿这些编码器参数是因为，容器的信息不一样就是最终的信息。
                     * 因为你解码出来的 AVFrame 可能会经过 filter 滤镜，经过滤镜之后信息就会变化，但是本文没有使用滤镜。
                     */
                     //编码器的时间基要取 AVFrame 的时间基，因为 AVFrame 是输入。
					st->time_base = fmt_ctx->streams[0]->time_base;    //编码流的time_base应该跟源流一样
                    enc_ctx->time_base = av_inv_q(avctx->framerate);  //这里必须得设置成源帧率才对

                    enc_ctx->width = fmt_ctx->streams[0]->codecpar->width;
                    enc_ctx->height = fmt_ctx->streams[0]->codecpar->height;
                    enc_ctx->sample_aspect_ratio = st->sample_aspect_ratio = frame->sample_aspect_ratio;
                    enc_ctx->pix_fmt = static_cast<AVPixelFormat>(frame->format);
                    enc_ctx->color_range = frame->color_range;
                    enc_ctx->color_primaries = frame->color_primaries;
                    enc_ctx->color_trc = frame->color_trc;
                    enc_ctx->colorspace = frame->colorspace;
                    enc_ctx->chroma_sample_location = frame->chroma_location;

                    ret = avcodec_parameters_from_context(st->codecpar, enc_ctx);
                    if (ret < 0)
                    {
                        qDebug() << "avcodec_parameters_from_context codec faile:" << ret;
                        return;
                    }

					av_opt_set(enc_ctx->priv_data, "preset", "veryslow", 0); //设置编码器的预设为 ultrafast
					av_opt_set(enc_ctx->priv_data, "tune", "zerolatency", 0); //设置编码器的调优为 zerolatency
					av_opt_set(enc_ctx->priv_data, "crf", "23", 0); //设置编码器的CRF值，23是一个常用的值，数值越小质量越高
					//av_opt_set(enc_ctx->priv_data, "profile", "high", 0); //设置编码器的profile为 high   

					//av_opt_set(enc_ctx->priv_data, "level", "4.0", 0); //设置编码器的level为 4.0
					//av_opt_set(enc_ctx->priv_data, "preset", "medium", 0); //设置编码器的预设为 medium
					//av_opt_set(enc_ctx->priv_data, "tune", "zerolatency", 0); //设置编码器的调优为 zerolatency
					//av_opt_set(enc_ctx->priv_data, "crf", "23", 0); //设置编码器的CRF值，23是一个常用的值，数值越小质量越高
					//av_opt_set(enc_ctx->priv_data, "x264opts", "keyint=30:min-keyint=30:no-scenecut", 0); //设置编码器的x264选项

                    if ((ret = avcodec_open2(enc_ctx, encode, NULL)) < 0) {
                        qDebug() << "avcodec_open2 codec faile:" << ret;
                        return;
                    }
                    if ((ret = avio_open2(&fmt_ctx_out->pb, filename_out, AVIO_FLAG_WRITE, &fmt_ctx_out->interrupt_callback, NULL)) < 0)
                    {
                        qDebug() << "avio_open2 codec faile:" << ret;
                        return;
                    }

                    //要先写入文件头部。
                    AVDictionary* options = nullptr;
                    av_dict_set(&options, "movflags", "faststart", 0);  // 将moov atom移到文件开头,修复opencv的VideoCapture开启视频时"moov atom not found"问题
                    ret = avformat_write_header(fmt_ctx_out, &options);
                    //av_write_trailer
                    if (ret < 0)
                    {
                        qDebug() << "avformat_write_header codec faile:" << ret;
                        return;
                    }
                }
                if (frameIncPts == -1)                     
                    frameIncPts = av_rescale_q(1, av_inv_q(avctx->framerate), st->time_base);
                    //frameIncPts = (st->time_base.den / st->time_base.num) / (avctx->framerate.num / avctx->framerate.den);

                frame->pts = output_pts;
                frame->pkt_dts = output_pts;
                output_pts += frameIncPts;
                //往编码器发送 AVFrame，然后不断读取 AVPacket
                ret = avcodec_send_frame(enc_ctx, frame);
                if (ret < 0)
                {
                    qDebug() << "avcodec_send_frame fail:" << ret;
                    return;
                }
                for (;;)
                {
                    ret = avcodec_receive_packet(enc_ctx, pkt_out);
                    if (ret == -11/*AVERROR(EAGAIN) */ )
                    {
                        //output_pts++;
                        break;
                    }
                    if (ret < 0)
                    {
                        qDebug() << "avcodec_receive_packet fail:" << ret;
                        return;
                    }
                    //编码出 AVPacket ，先打印一些信息，然后把它写入文件。
                    qDebug() << "pkt_out size:" << pkt_out->size;
                    
                    // 重新计算包的时间戳
                    //av_packet_rescale_ts(pkt_out, enc_ctx->time_base, st->time_base);
                    // 确保DTS <= PTS
                    
                    //设置 AVPacket 的 stream_index ，这样才知道是哪个流的。
                    pkt_out->stream_index = st->index;
                    //转换 AVPacket 的时间基为 输出流的时间基。
                    //pkt_out->pts = av_rescale_q_rnd(pkt_out->pts, fmt_ctx->streams[0]->time_base, st->time_base, static_cast<AVRounding>(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
                    //pkt_out->dts = av_rescale_q_rnd(pkt_out->dts, fmt_ctx->streams[0]->time_base, st->time_base, static_cast<AVRounding>(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
                    // todo duration一直是0，待解决
                    //pkt_out->duration = av_rescale_q_rnd(pkt_out->duration, fmt_ctx->streams[0]->time_base, st->time_base, static_cast<AVRounding>(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
                    ret = av_interleaved_write_frame(fmt_ctx_out, pkt_out);
                    if (ret < 0) {
                        qDebug() << "av_interleaved_write_frame fail:" << ret;
                        return;
                    }
                    av_packet_unref(pkt_out);
                }
#endif
                //auto R = (now_frame_count * 7 + x * 13 + y * 17) % 256;
                //auto G = (now_frame_count * 11 + x * 19 + y * 23) % 256;
                //auto B = (now_frame_count * 17 + x * 29 + y * 31) % 256;
                //sws_ctx = sws_getCachedContext(sws_ctx, frame->width, frame->height, (AVPixelFormat)frame->format,
                //    frame->width, frame->height, AV_PIX_FMT_BGR24,
                //    SWS_BICUBIC, nullptr, nullptr, nullptr);
                //sws_scale(sws_ctx, (const uint8_t* const*)frame->data, frame->linesize, 0, frame->height, &bgr.data,
                //    &stride);
                //ColorBlockDetectYUV(frame, now_frame_count);
				//if (ColorBlockDetectYUV_AVFrame(frame, 20, 10, 30, 0.1)) {
                bool isMosaic = false;
                if((isMosaic = ColorBlockDetectYUV(frame))){
					qDebug() << "frame" << now_frame_count << "got mosaic";
					cap.set(CAP_PROP_POS_FRAMES, now_frame_count); // 跳过当前帧                    
                    cv::Mat mat;
                    cap.read(mat);
                    std::stringstream ss;
                    ss << "FrameCount: " << now_frame_count << " isMosaic: "<< isMosaic;
                    putText(mat, ss.str(), Point(20, 60), FONT_HERSHEY_SIMPLEX, 0.7, Scalar(0, 255, 0), 2);
					imshow("马赛克", mat);
                    //while (waitKey(30) != 27);
                    waitKey(0);
                    /*char key = waitKey(1000);
                    if (key == 27)
                        continue;*/
                }
                else {
					qDebug() << "frame" << now_frame_count << "no mosaic";
                }
                /*if (ColorBlockDetectYUV(frame)) {
                    qDebug() << "frame" << now_frame_count << "got mosaic";
                }*/
                ++now_frame_count;
            }
            else
            {
                qDebug() << "other fail \n";
                return;
            }
        }
    }
	qDebug() << "skip_frame_count: " << skip_frame_count;
	if (sws_ctx) sws_freeContext(sws_ctx);
    av_frame_free(&frame);
    av_packet_free(&pkt);
    av_packet_free(&pkt_out);

    //关闭编码器，解码器。
    avcodec_close(avctx);
    avcodec_close(enc_ctx);

    //释放容器内存。
    avformat_free_context(fmt_ctx);
    
    //必须调 avio_closep ，要不可能会没把数据写进去，会是 0kb
#if ENCODE_MODE
    avio_closep(&fmt_ctx_out->pb);
    avformat_free_context(fmt_ctx_out);
#endif
}
