//
//  main.m
//  FFmpegTest_DCT_iOS
//
//  Created by wangjiangang on 15/9/16.
//  Copyright (c) 2015年 times. All rights reserved.
//
#import <UIKit/UIKit.h>
#import "AppDelegate.h"

#include <stdio.h>
#include <string.h>
#ifdef __cplusplus
extern "C" {
#endif
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#ifdef __cplusplus
}
#endif
#include "watermark.h"
#include <iostream>
short goldx[][127] =
{
    {1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1
        , 1, -1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, 1
        , 1, 1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1
        , 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1
        , 1, -1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, -1
        , -1, -1, -1, -1, -1, 1, -1},
    {-1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, 1, 1, 1
        , 1, 1, -1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, 1, 1, -1, -1
        , -1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1
        , 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, 1
        , -1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1
        , 1, -1, -1, -1, 1, -1, -1},
    {-1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1
        , 1, -1, 1, -1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, 1, 1
        , -1, -1, 1, 1, 1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, 1, 1
        , -1, 1, 1, 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1
        , -1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1
        , 1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1},
    {-1, 1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, 1
        , -1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, 1
        , 1, 1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, 1, 1
        , 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, 1
        , -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1
        , 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, -1},
    {-1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, -1
        , -1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1
        , 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1
        , 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, -1
        , 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1
        , -1, -1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1},
    {-1, -1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1
        , -1, -1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, 1, -1
        , 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1
        , -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1
        , -1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, -1
        , -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, 1, -1},
    {-1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1
        , -1, -1, -1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, 1
        , -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1
        , 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1
        , 1, -1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1
        , -1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, 1},
    {1, 1,  1,  1, -1,  1, -1, -1,  1,  1,  1,  1, -1, -1,  1,  1, -1,  1, -1, -1, -1, -1, -1
        , -1, -1,  1, -1,  1, -1, -1,  1, -1, -1, -1,  1, -1, -1, -1, -1,  1,  1,  1, -1, -1, -1,  1
        , -1,  1,  1,  1, -1,  1, -1,  1, -1, -1, -1,  1,  1,  1,  1,  1,  1, -1, -1, -1, -1,  1, -1
        , -1,  1,  1, -1, -1,  1,  1,  1, -1,  1,  1, -1,  1,  1, -1, -1, -1,  1,  1, -1, -1, -1, -1
        , -1,  1,  1, -1,  1,  1,  1,  1,  1, -1,  1,  1,  1, -1, -1,  1, -1,  1, -1,  1, -1,  1,  1
        , -1, -1,  1, -1, -1,  1, -1,  1,  1, -1,  1, -1},
    {1, -1,  1, -1, -1,  1, -1, -1, -1,  1, -1, -1, -1, -1,  1,  1,  1, -1, -1, -1,  1, -1,  1
        ,  1,  1, -1,  1, -1,  1, -1, -1, -1,  1,  1,  1,  1,  1,  1, -1, -1, -1, -1,  1, -1, -1,  1
        ,  1, -1, -1,  1,  1,  1, -1,  1,  1, -1,  1,  1, -1, -1, -1,  1,  1, -1, -1, -1, -1, -1,  1
        ,  1, -1,  1,  1,  1,  1,  1, -1,  1,  1,  1, -1, -1,  1, -1,  1, -1,  1, -1,  1,  1, -1, -1
        ,  1, -1, -1,  1, -1,  1,  1, -1,  1, -1,  1,  1,  1,  1, -1,  1, -1, -1,  1,  1,  1,  1, -1
        , -1,  1,  1, -1,  1, -1, -1, -1, -1, -1, -1, -1},
    {-1,  1, -1,  1, -1, -1,  1, -1, -1, -1,  1, -1, -1, -1, -1,  1,  1,  1, -1, -1, -1,  1, -1
        ,  1,  1,  1, -1,  1, -1,  1, -1, -1, -1,  1,  1,  1,  1,  1,  1, -1, -1, -1, -1,  1, -1, -1
        ,  1,  1, -1, -1,  1,  1,  1, -1,  1,  1, -1,  1,  1, -1, -1, -1,  1,  1, -1, -1, -1, -1, -1
        ,  1,  1, -1,  1,  1,  1,  1,  1, -1,  1,  1,  1, -1, -1,  1, -1,  1, -1,  1, -1,  1,  1, -1
        , -1,  1, -1, -1,  1, -1,  1,  1, -1,  1, -1,  1,  1,  1,  1, -1,  1, -1, -1,  1,  1,  1,  1
        , -1, -1,  1,  1, -1,  1, -1, -1, -1, -1, -1, -1},
    {1, -1, -1, -1, -1,  1,  1,  1, -1, -1, -1,  1, -1,  1,  1,  1, -1,  1, -1,  1, -1, -1, -1
        ,  1,  1,  1,  1,  1,  1, -1, -1, -1, -1,  1, -1, -1,  1,  1, -1, -1,  1,  1,  1, -1,  1,  1
        , -1,  1,  1, -1, -1, -1,  1,  1, -1, -1, -1, -1, -1,  1,  1, -1,  1,  1,  1,  1,  1, -1,  1
        ,  1,  1, -1, -1,  1, -1,  1, -1,  1, -1,  1,  1, -1, -1,  1, -1, -1,  1, -1,  1,  1, -1,  1
        , -1,  1,  1,  1,  1, -1,  1, -1, -1,  1,  1,  1,  1, -1, -1,  1,  1, -1,  1, -1, -1, -1, -1
        , -1, -1, -1,  1, -1,  1, -1, -1,  1, -1, -1, -1},
    {-1, -1, -1,  1,  1,  1, -1, -1, -1,  1, -1,  1,  1,  1, -1,  1, -1,  1, -1, -1, -1,  1,  1
        ,  1,  1,  1,  1, -1, -1, -1, -1,  1, -1, -1,  1,  1, -1, -1,  1,  1,  1, -1,  1,  1, -1,  1
        ,  1, -1, -1, -1,  1,  1, -1, -1, -1, -1, -1,  1,  1, -1,  1,  1,  1,  1,  1, -1,  1,  1,  1
        , -1, -1,  1, -1,  1, -1,  1, -1,  1,  1, -1, -1,  1, -1, -1,  1, -1,  1,  1, -1,  1, -1,  1
        ,  1,  1,  1, -1,  1, -1, -1,  1,  1,  1,  1, -1, -1,  1,  1, -1,  1, -1, -1, -1, -1, -1, -1
        , -1,  1, -1,  1, -1, -1,  1, -1, -1, -1,  1, -1},
    {-1, -1,  1, -1, -1, -1,  1, -1, -1, -1, -1,  1,  1,  1, -1, -1, -1,  1, -1,  1,  1,  1, -1
        ,  1, -1,  1, -1, -1, -1,  1,  1,  1,  1,  1,  1, -1, -1, -1, -1,  1, -1, -1,  1,  1, -1, -1
        ,  1,  1,  1, -1,  1,  1, -1,  1,  1, -1, -1, -1,  1,  1, -1, -1, -1, -1, -1,  1,  1, -1,  1
        ,  1,  1,  1,  1, -1,  1,  1,  1, -1, -1,  1, -1,  1, -1,  1, -1,  1,  1, -1, -1,  1, -1, -1
        ,  1, -1,  1,  1, -1,  1, -1,  1,  1,  1,  1, -1,  1, -1, -1,  1,  1,  1,  1, -1, -1,  1,  1
        , -1,  1, -1, -1, -1, -1, -1, -1, -1,  1, -1,  1},
    {1,  1,  1, -1,  1, -1, -1,  1,  1,  1,  1, -1, -1,  1,  1, -1,  1, -1, -1, -1, -1, -1, -1
        , -1,  1, -1,  1, -1, -1,  1, -1, -1, -1,  1, -1, -1, -1, -1,  1,  1,  1, -1, -1, -1,  1, -1
        ,  1,  1,  1, -1,  1, -1,  1, -1, -1, -1,  1,  1,  1,  1,  1,  1, -1, -1, -1, -1,  1, -1, -1
        ,  1,  1, -1, -1,  1,  1,  1, -1,  1,  1, -1,  1,  1, -1, -1, -1,  1,  1, -1, -1, -1, -1, -1
        ,  1,  1, -1,  1,  1,  1,  1,  1, -1,  1,  1,  1, -1, -1,  1, -1,  1, -1,  1, -1,  1,  1, -1
        , -1,  1, -1, -1,  1, -1,  1,  1, -1,  1, -1,  1},
    {1, -1, -1, -1,  1, -1, -1, -1, -1,  1,  1,  1, -1, -1, -1,  1, -1,  1,  1,  1, -1,  1, -1
        ,  1, -1, -1, -1,  1,  1,  1,  1,  1,  1, -1, -1, -1, -1,  1, -1, -1,  1,  1, -1, -1,  1,  1
        ,  1, -1,  1,  1, -1,  1,  1, -1, -1, -1,  1,  1, -1, -1, -1, -1, -1,  1,  1, -1,  1,  1,  1
        ,  1,  1, -1,  1,  1,  1, -1, -1,  1, -1,  1, -1,  1, -1,  1,  1, -1, -1,  1, -1, -1,  1, -1
        ,  1,  1, -1,  1, -1,  1,  1,  1,  1, -1,  1, -1, -1,  1,  1,  1,  1, -1, -1,  1,  1, -1,  1
        , -1, -1, -1, -1, -1, -1, -1,  1, -1,  1, -1, -1},
    {-1, -1, -1, -1,  1,  1,  1, -1, -1, -1,  1, -1,  1,  1,  1, -1,  1, -1,  1, -1, -1, -1,  1
        ,  1,  1,  1,  1,  1, -1, -1, -1, -1,  1, -1, -1,  1,  1, -1, -1,  1,  1,  1, -1,  1,  1, -1
        ,  1,  1, -1, -1, -1,  1,  1, -1, -1, -1, -1, -1,  1,  1, -1,  1,  1,  1,  1,  1, -1,  1,  1
        ,  1, -1, -1,  1, -1,  1, -1,  1, -1,  1,  1, -1, -1,  1, -1, -1,  1, -1,  1,  1, -1,  1, -1
        ,  1,  1,  1,  1, -1,  1, -1, -1,  1,  1,  1,  1, -1, -1,  1,  1, -1,  1, -1, -1, -1, -1, -1
        , -1, -1,  1, -1,  1, -1, -1,  1, -1, -1, -1,  1}
};
//void avStreamFPSTimeBase(AVStream *st, CGFloat defaultTimeBase, CGFloat *pFPS, CGFloat *pTimeBase)
//{
//    CGFloat fps, timebase;
//    
//    if (st->time_base.den && st->time_base.num)
//        timebase = av_q2d(st->time_base);
//    else if(st->codec->time_base.den && st->codec->time_base.num)
//        timebase = av_q2d(st->codec->time_base);
//    else
//        timebase = defaultTimeBase;
//    
//    if (st->codec->ticks_per_frame != 1) {
//        printf("WARNING: st.codec.ticks_per_frame=%d", st->codec->ticks_per_frame);
//        //timebase *= st->codec->ticks_per_frame;
//    }


void detectWaterMark(unsigned char* u, AVFrame *pFrame,int &n,double dbVal[16],double dbSum[16],double dbSum1[16]){
    UMeanWMDetect(u, pFrame->width/2, pFrame->height/2, n%127, dbVal, dbSum);
    
    
    n++;
//    printf("n:%d\n",n);
    if((n % 127) == 0)
    {
        std::string strDetect;
        
        int nVal = 0;
        int nDetect = 0;
        for(int i=15; i>=0; i--)
        {
            dbSum[i] /= 127.0;
            dbVal[i] -= dbSum[i]*dbSum1[i];
            dbVal[i] /= (double)(pFrame->width/16)*(double)(pFrame->height/16)*127.0;
            
            printf("dbVal[%d]:%f\n",i,dbVal[i]);
            if(dbVal[i] >= 2.5)
            {
                strDetect += "1";
                nVal |= 0x0001 << i;
                nDetect ++;
            }
            else if(dbVal[i] <= -2.5)
            {
                strDetect += "0";
                nDetect ++;
            }
            else
                strDetect += "?";
        }
        
        std::string strText;
        char str[52];
        sprintf(str, "%d~%d帧",n-127, n-1);
        strText=str;
        if(nDetect == 16)
        {
            strText += "检测到完整抗几何变换水印信息:";
            strText += strDetect;
            //                            strDetect.Format(_T("(%d)"), nVal);
//            strText+="nval";
            strText+= nVal;
            //                            strText += strDetect;
//            strText += ".\r\n";
        }
        else if(nDetect <= 2)
        {
            strText += "没有检测到抗几何变换水印信息.\r\n";
        }
        else if(nDetect < 8)
        {
            strText += "检测到小部分抗几何变换水印信息:";
            strText += strDetect;
            strText += ".\r\n";
        }
        else
        {
            strText += "检测到大部分抗几何变换水印信息:";
            strText += strDetect;
            strText += ".\r\n";
        }
        std::cout<<"检测结果:\n"<<strText<<std::endl;
        
        memset(dbVal, 0, sizeof(double)*16);
        memset(dbSum, 0, sizeof(double)*16);
    }
}
int flush_encoder(AVFormatContext *fmt_ctx,unsigned int stream_index){
    int ret;
    int got_frame;
    AVPacket enc_pkt;
    if (!(fmt_ctx->streams[stream_index]->codec->codec->capabilities &
          CODEC_CAP_DELAY))
        return 0;
    while (1) {
        enc_pkt.data = NULL;
        enc_pkt.size = 0;
        av_init_packet(&enc_pkt);
        ret = avcodec_encode_video2 (fmt_ctx->streams[stream_index]->codec, &enc_pkt,
                                     NULL, &got_frame);
        av_frame_free(NULL);
        if (ret < 0)
            break;
        if (!got_frame){
            ret=0;
            break;
        }
        printf("Flush Encoder: Succeed to encode 1 frame!\tsize:%5d\n",enc_pkt.size);
        /* mux encoded frame */
        ret = av_write_frame(fmt_ctx, &enc_pkt);
        if (ret < 0)
            break;
    }
    return ret;
}
int main(int argc, char * argv[]) {
    AVFormatContext* fmtContext;
    AVCodecContext  *pCodecContext;
    
    AVCodecContext      *_audioCodecCtx;
    AVFrame             *_audioFrame;
    AVCodec         *pCodec;
    AVCodec         *pAudioCodec;
    struct SwsContext *img_convert_ctx, *audio_swrContext;
    AVFrame* pFrame;
    AVPacket        packet;
    int             frameFinished;
    
    int             numBytes;
    uint8_t         *buffer;
    
    
    AVFormatContext* fmtOutContext;
    AVCodecContext *outCodecContext,*outAudioCodecContext;
    AVCodec *outCodec,*outAudioCodec;
    AVOutputFormat* outFmt;
    AVStream *stream,*auStream;
    double video_pts,audio_pts;
    float _audioTimeBase;
    uint8_t *video_outbuf,*audio_outbuf;
    uint8_t* picture_buf;
    int16_t *samples;
    AVFrame* picture;
    int size;
    int ret;
    int video_outbuf_size,audio_outbuf_size;
    int             i, videoStream,audioStream;
    
    const char* filename = "/Users/wangjiangang/DCT/videotest/pre/360.mp4";
    
    const char* outFilename = "/Users/wangjiangang/DCT/videotest/after/testxx_w.mp4";
    
    
    av_register_all();
    ///in
    
    fmtContext = avformat_alloc_context();
    if (!fmtContext)
        return -1;
    
    if (avformat_open_input(&fmtContext, filename, NULL, NULL) < 0) {
        if (fmtContext)
            avformat_free_context(fmtContext);
        return -1;
    }
    if (avformat_find_stream_info(fmtContext, NULL) < 0) {
        return -1;
    }
    av_dump_format(fmtContext, 0, filename, 0);
    
    // Find the first video stream
    // 遍历文件的流，找到第一个视频流，并记录流的编码信息
    videoStream=-1;
    for(i=0; i<fmtContext->nb_streams; i++)
    {
        if(fmtContext->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
        {
            videoStream=i;
            break;
        }
    }
    if(videoStream==-1)
        return -1; // Didn't find a video stream
    audioStream=-1;
    for(i=0; i<fmtContext->nb_streams; i++)
    {
        if(fmtContext->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO)
        {
            audioStream=i;
            break;
        }
    }
    // Get a pointer to the codec context for the video stream
    // 得到视频流编码的上下文指针
    pCodecContext=fmtContext->streams[videoStream]->codec;
    _audioCodecCtx=fmtContext->streams[audioStream]->codec;
    // 在库里面查找支持该格式的解码器
    pCodec=avcodec_find_decoder(pCodecContext->codec_id);
    if(pCodec==NULL)
    {
        fprintf(stderr, "Unsupported codec!\n");
        return -1; // Codec not found
    }
    pAudioCodec=avcodec_find_decoder(_audioCodecCtx->codec_id);
    if(pAudioCodec==NULL)
    {
        fprintf(stderr, "Unsupported codec(pAudioCodec)!\n");
        return -1; // Codec not found
    }
    if ( avcodec_open2(pCodecContext, pCodec, NULL) < 0) {
        av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n");
        return -1;
    }
    if ( avcodec_open2(_audioCodecCtx, pAudioCodec, NULL) < 0) {
        av_log(NULL, AV_LOG_ERROR, "Cannot open audio decoder\n");
        return -1;
    }
    // 分配一个帧指针，指向解码后的原始帧
    pFrame=av_frame_alloc();
    _audioFrame=av_frame_alloc();
    if (!_audioFrame) {
        av_log(NULL, AV_LOG_ERROR, "AllocateFrame audio error\n");
        return -1;
    }
    
    // Determine required buffer size and allocate buffer
    numBytes=avpicture_get_size(AV_PIX_FMT_RGB24, pCodecContext->width, pCodecContext->height);
    buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));  // buffer = new uint8_t[numBytes];
    
    ///out
    
    outFmt = av_guess_format(NULL, outFilename, NULL);
    fmtOutContext = avformat_alloc_context();
    fmtOutContext->oformat = outFmt;
    snprintf(fmtOutContext->filename, sizeof(fmtOutContext->filename), "%s", outFilename);
    
    stream = NULL;
    if (outFmt->video_codec != AV_CODEC_ID_NONE)
    {
        stream = avformat_new_stream(fmtOutContext, 0);
        outCodecContext = stream->codec;
        outCodecContext->codec_id = outFmt->video_codec;
        outCodecContext->codec_type = AVMEDIA_TYPE_VIDEO;
        outCodecContext->bit_rate = 400000;
        outCodecContext->width = pCodecContext->width;
        outCodecContext->height = pCodecContext->height;
        outCodecContext->time_base.num = 1;
        outCodecContext->time_base.den = 25;
        outCodecContext->gop_size = 12;
        outCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
        if (outCodecContext->codec_id == AV_CODEC_ID_MPEG2VIDEO)
        {
            outCodecContext->max_b_frames = 2;
        }
        if (outCodecContext->codec_id == AV_CODEC_ID_MPEG1VIDEO)
        {
            outCodecContext->mb_decision = 2;
        }
        if (!strcmp(fmtOutContext->oformat->name, "mp4"))
        {
            outCodecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;
        }
    }
    if (outFmt->audio_codec != AV_CODEC_ID_NONE)
    {
        AVCodec* codec=avcodec_find_encoder(fmtOutContext->audio_codec_id);
        auStream =avformat_new_stream(fmtOutContext, codec);
        auStream->id=fmtOutContext->nb_streams-1;
        outAudioCodecContext=auStream->codec;
        outAudioCodecContext->codec_id = outFmt->audio_codec;
//        outAudioCodecContext->codec_type = AVMEDIA_TYPE_AUDIO; 

        outAudioCodec=avcodec_find_encoder(outAudioCodecContext->codec_id);
        
        
//        outAudioCodec->sample_fmt  = codec->sample_fmts ?
//        codec->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
//        outAudioCodec->bit_rate    = 64000;
//        outAudioCodec->sample_rate = 44100;
        
        outAudioCodecContext->channels        = av_get_channel_layout_nb_channels(outAudioCodecContext->channel_layout);
        auStream->time_base = (AVRational){ 1, outAudioCodecContext->sample_rate };
        
        if (fmtOutContext->flags & AVFMT_GLOBALHEADER)
            outAudioCodecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;
    }
    av_dump_format(fmtOutContext, 0, outFilename, 1);
    if (!stream)
    {
        av_log(NULL, AV_LOG_ERROR, "av_dump_format if (!stream)\n");
        return -1;
    }
    
    outCodec = avcodec_find_encoder(outCodecContext->codec_id);
    if (!outCodec)
    {
        return 0;
    }
    if (avcodec_open2(outCodecContext, outCodec,NULL) < 0)
    {
        return 0;
    }
//    if (avcodec_open2(outAudioCodecContext, outAudioCodec, NULL)<0) {
//        av_log(NULL, AV_LOG_ERROR, "audio open error\n");
//        return 0;
//    }
    printf("xxx\n");
    if (!(fmtOutContext->oformat->flags & AVFMT_RAWPICTURE))
    {
        video_outbuf_size = 200000;
        video_outbuf = (uint8_t*)av_malloc(video_outbuf_size);
        
        audio_outbuf_size=200000;
        audio_outbuf= (uint8_t*)av_malloc(audio_outbuf_size);
    }
    
    
    if (!(outFmt->flags & AVFMT_NOFILE))
    {
        if (avio_open(&fmtOutContext->pb, outFilename,AVIO_FLAG_WRITE) < 0)//URL_WRONLY
        {
            return 0;
        }
    }
    avformat_write_header(fmtOutContext,NULL);
    
    //write mp4
    
    float _fps=0;
    float _videoTimeBase=0;
    AVStream *st = fmtContext->streams[videoStream];
//    avStreamFPSTimeBase(st, 0.04, &_fps, &_videoTimeBase);
    
    AVStream *stAudio = fmtContext->streams[audioStream];
//    avStreamFPSTimeBase(stAudio, 0.025, 0, &_audioTimeBase);
    
    i=0;
     const char* yuvFilename = "/Users/wangjiangang/DCT/testxx_yuv.yuv";
    FILE *fp = fopen(yuvFilename, "wb");
    int n=0;
        double dbVal[16];
        double dbSum[16];
    double dbSum1[16];
    memset(dbVal, 0, sizeof(double)*16);
    memset(dbSum, 0, sizeof(double)*16);
    memset(dbSum1, 0, sizeof(double)*16);
    for(int i=0; i<16; i++)
    {
        for(int j=0; j<127; j++)
            dbSum1[i] += (double)goldx[i][j];
    }

    while(av_read_frame(fmtContext, &packet)>=0) // 读取一个帧
    {
//        if (stream)
//        {
//            video_pts = (double)(stream->pts.val * stream->time_base.num / stream->time_base.den);
//        }
//        else
//        {
//            video_pts = 0.0;
//        }
        if (!stream/* || video_pts >= 5.0*/)
        {
            break;
        }
        float minDuration=0.1;
        float decodedDuration=0;

        // Is this a packet from the video stream?
        if(packet.stream_index==videoStream)
        {
            int pktSize = packet.size;
            while (pktSize > 0) {
                int len=avcodec_decode_video2(pCodecContext, pFrame, &frameFinished, &packet);
                if (len<0) {
                    av_log(NULL, AV_LOG_ERROR, "avcodec_decode_video2 fail\n");
                    break;
                }
                // Did we get a video frame?
                if(frameFinished)
                {
                    //关键帧
                    if (pFrame->key_frame == 1){
                        printf("关键帧 pFrame->data:%d\n",sizeof(pFrame->data[0]));
                    }
                        unsigned char* u=(unsigned char*)pFrame->data[1];
                        UMeanWatermark(u, pFrame->width/2, pFrame->height/2, 888, n%127);
                        pFrame->data[1]=(uint8_t *)u;
                        n++;
//                        detectWaterMark(u,pFrame,n,dbVal, dbSum,dbSum1);

                        if (fp) {
//                            fwrite(pFrame->data[0], pFrame->width*pFrame->height, 1, fp );
//                            fwrite(pFrame->data[1], pFrame->width*pFrame->height/4, 1, fp );
//                            fwrite(pFrame->data[2], pFrame->width*pFrame->height/4, 1, fp );
                        }
                    const int64_t frameDuration = av_frame_get_pkt_duration(pFrame);
                    int64_t duration=0;
                    if (frameDuration) {
                        
                        duration = frameDuration * _videoTimeBase;
                        duration += pFrame->repeat_pict * _videoTimeBase * 0.5;
                        
                    } else {
                        duration = 1.0 / _fps;
                    }
                    
                    if (duration) {
                        decodedDuration += duration;
                        //                        if (decodedDuration > minDuration)
                        //                            finished = YES;
                    }
                    
                    
//                    printf("frameFinished:%d\n",len);
                    if (fmtOutContext->oformat->flags & AVFMT_RAWPICTURE)
                    {
//                        av_log(NULL, AV_LOG_ERROR, "fmtOutContext->oformat->flags & AVFMT_RAWPICTURE\n");
                        AVPacket pkt;
                        av_init_packet(&pkt);
                        pkt.flags |= AV_PKT_FLAG_KEY;
                        pkt.stream_index = stream->index;
                        pkt.data = (uint8_t*)pFrame;
                        pkt.size = sizeof(AVPicture);
                        ret = av_write_frame(fmtOutContext, &pkt);
                    }
                    else
                    {
                        AVPacket pkt;
//                        av_log(NULL, AV_LOG_ERROR, "fmtOutContext->oformat->flags & AVFMT_RAWPICTURE else\n");
//                        int got_packet_ptr;
//                        ret=avcodec_encode_video2(outCodecContext, &pkt, pFrame, &got_packet_ptr);
//                        if (out_size==0) {
//                            ret = av_write_frame(fmtOutContext, &pkt);
//                        }
//                        if (ret < 0)
//                            continue;
//                        if (got_packet_ptr==0)
//                            continue;
                        /* prepare packet for muxing */
//                        pkt.stream_index = stream->index;
//                        pkt.pts = av_rescale_q(outCodecContext->coded_frame->pts, outCodecContext->time_base, stream->time_base);

//                        pkt.dts =av_rescale_q_rnd(pkt.dts,
//                                                      fmtOutContext->streams[stream->index]->codec->time_base,
//                                                      fmtOutContext->streams[stream->index]->time_base,
//                                                      (AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
//                        pkt.pts =av_rescale_q_rnd(pkt.pts,
//                                                      fmtOutContext->streams[stream->index]->codec->time_base,
//                                                      fmtOutContext->streams[stream->index]->time_base,
//                                                      (AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
//                        av_log(NULL,AV_LOG_DEBUG, "Muxing frame\n");  
//                        /* mux encoded frame */  
//                        ret =av_interleaved_write_frame(fmtOutContext, &pkt);
                        
                        int out_size = avcodec_encode_video(outCodecContext, video_outbuf, video_outbuf_size, pFrame);
                        if (out_size > 0)
                        {
                            av_init_packet(&pkt);
                            pkt.pts = av_rescale_q(outCodecContext->coded_frame->pts, outCodecContext->time_base, stream->time_base);
                            if (outCodecContext->coded_frame->key_frame)
                            {
                                pkt.flags |= AV_PKT_FLAG_KEY;
                            }
                            pkt.stream_index = stream->index;
                            pkt.data = video_outbuf;
                            pkt.size = out_size;
                            ret = av_write_frame(fmtOutContext, &pkt);
                        }
                    }
                    
                    if (0 == len)
                        break;
                    
                    pktSize -= len;
                }
            }
        }
        else if (packet.stream_index==audioStream){
//            av_log(NULL, AV_LOG_ERROR, "packet.stream_index==audioStream\n");
//            int pktSize = packet.size;
////            av_interleaved_write_frame(fmtOutContext, packet);
////            uint8_t * pktdata=packet.data;
////            av_packet_rescale_ts(&packet, outAudioCodecContext->time_base, auStream->time_base);
////            packet.stream_index = auStream->index;
////            av_write_frame(fmtOutContext, &packet);
//            
//            while (pktSize > 0) {
//                
//                int gotframe = 0;
//                
//                int len = avcodec_decode_audio4(_audioCodecCtx,
//                                                _audioFrame,
//                                                &gotframe,
//                                                &packet);
////                int len = avcodec_decode_audio3(_audioCodecCtx, audio_outbuf, &gotframe, pktdata, pktSize);
//                
//                if (len < 0) {
//                    av_log(NULL, AV_LOG_ERROR, "decode audio error, skip packet");
//                    break;
//                }
//
//                if (gotframe) {
// 
//                    AVPacket pkt;
//                    av_init_packet(&pkt);
//                    len=avcodec_encode_audio2(auStream->codec, &pkt, _audioFrame, &gotframe);
////                    pkt.size = avcodec_encode_audio(outAudioCodecContext, audio_outbuf, audio_outbuf_size, samples);
//                    pkt.pts = av_rescale_q(outAudioCodecContext->coded_frame->pts, outAudioCodecContext->time_base, auStream->time_base);
//                    pkt.flags |= AV_PKT_FLAG_KEY;
//                    pkt.stream_index = auStream->index;
////                    pkt.data = audio_outbuf;
//                    if (av_write_frame(fmtOutContext, &pkt) != 0)
//                    {
//                        break;
//                    }
//                }
//                
//                if (0 == len)
//                    break;
//                
//                pktSize -= len;
//            }
//
        }
        
        // Free the packet that was allocated by av_read_frame
        // 释放读取的帧内存
        av_free_packet(&packet);
    }
    ret = flush_encoder(fmtOutContext,0);
    if (ret < 0) {
        printf("Flushing encoder failed\n");
        return -1;
    }
    if (fp) {
        fclose(fp);
    }
    if (stream)
    {
        avcodec_close(stream->codec);
        av_free(picture);
        av_free(video_outbuf);
    }
    if (auStream) {
        avcodec_close(auStream->codec);
//        av_free(picture);
        av_free(audio_outbuf);
    }
    av_write_trailer(fmtOutContext);
    printf("av_write_trailer\n");
    for (int i=0; i<fmtOutContext->nb_streams; i++)
    {
        av_freep(&fmtOutContext->streams[i]->codec);
        av_freep(&fmtOutContext->streams[i]);
    }
    if (!(outFmt->flags & AVFMT_NOFILE))
    {
        avio_close(fmtOutContext->pb);
    }
    
    // Free the RGB image
    av_free(buffer);
    av_free(picture);
    
    // Free the YUV frame
    av_free(pFrame);
    
    // Close the codec
    avcodec_close(pCodecContext);
    
    // Close the video file
    avformat_free_context(fmtContext);
    
    
//    @autoreleasepool {
//        return UIApplicationMain(argc, argv, nil, NSStringFromClass([AppDelegate class]));
//    }
    return 0;
}
int main_(int argc, char * argv[]) {
    AVFormatContext* fmtContext;
    AVCodecContext  *pCodecContext;     AVCodec         *pCodec;
//    struct SwsContext *img_convert_ctx;
    AVFrame* pFrame;
    AVPacket        packet;
    int             frameFinished;
    
    int       numBytes;
    uint8_t    *buffer;
    
    int ret;
    int   i, videoStream,audioStream;
    
    const char* filename = "/Users/wangjiangang/DCT/testxx_w.mp4";
    
    
    av_register_all();
    ///in
    
    fmtContext = avformat_alloc_context();
    if (!fmtContext)
        return -1;
    
    if (avformat_open_input(&fmtContext, filename, NULL, NULL) < 0) {
        if (fmtContext)
            avformat_free_context(fmtContext);
        return -1;
    }
    if (avformat_find_stream_info(fmtContext, NULL) < 0) {
        return -1;
    }
    av_dump_format(fmtContext, 0, filename, 0);
    
    // Find the first video stream
    // 遍历文件的流，找到第一个视频流，并记录流的编码信息
    videoStream=-1;
    for(i=0; i<fmtContext->nb_streams; i++)
    {
        if(fmtContext->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
        {
            videoStream=i;
            break;
        }
    }
    if(videoStream==-1)
        return -1; // Didn't find a video streamz
    // Get a pointer to the codec context for the video stream
    // 得到视频流编码的上下文指针
    pCodecContext=fmtContext->streams[videoStream]->codec;
    
//    img_convert_ctx = sws_getContext(pCodecContext->width, pCodecContext->height, pCodecContext->pix_fmt,
//                                     pCodecContext->width, pCodecContext->height, AV_PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);
//    
//    if(img_convert_ctx == NULL)
//    {
//        fprintf(stderr, "Cannot initialize the conversion context!\n");
//        //      exit(1);
//        return -1;
//    }
    // 在库里面查找支持该格式的解码器
    pCodec=avcodec_find_decoder(pCodecContext->codec_id);
    if(pCodec==NULL)
    {
        fprintf(stderr, "Unsupported codec!\n");
        return -1; // Codec not found
    }
    
    if ( avcodec_open2(pCodecContext, pCodec, NULL) < 0) {
        av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n");
        return -1;
    }
    
    // 分配一个帧指针，指向解码后的原始帧
    pFrame=av_frame_alloc();
    
    // Determine required buffer size and allocate buffer
    numBytes=avpicture_get_size(AV_PIX_FMT_RGB24, pCodecContext->width, pCodecContext->height);
    buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));  // buffer = new uint8_t[numBytes];
    
    
    i=0;
    int n=0;
    double dbVal[16];
    double dbSum[16];
    double dbSum1[16];
    memset(dbVal, 0, sizeof(double)*16);
    memset(dbSum, 0, sizeof(double)*16);
    memset(dbSum1, 0, sizeof(double)*16);
    for(int i=0; i<16; i++)
    {
        for(int j=0; j<127; j++)
            dbSum1[i] += (double)goldx[i][j];
    }
    
    while(av_read_frame(fmtContext, &packet)>=0) // 读取一个帧
    { 
        // Is this a packet from the video stream?
        if(packet.stream_index==videoStream)
        {
            int pktSize = packet.size;
            while (pktSize > 0) {
                int len=avcodec_decode_video2(pCodecContext, pFrame, &frameFinished, &packet);
                if (len<0) {
                    av_log(NULL, AV_LOG_ERROR, "avcodec_decode_video2 fail\n");
                    break;
                }
                // Did we get a video frame?
                if(frameFinished)
                {
                    //关键帧
//                    if (pFrame->key_frame == 1){
//                        printf("关键帧 获取水印 pFrame->data:%d\n",sizeof(pFrame->data[0]));
                        unsigned char* u=(unsigned char*)pFrame->data[1];
                        //检测u的水印
                        detectWaterMark(u,pFrame,n,dbVal, dbSum,dbSum1);
//                    }
                    
                    if (0 == len)
                        break;
                    
                    pktSize -= len;
                }
            }
        }
        // 释放读取的帧内存
        av_free_packet(&packet);
    }
    if (ret < 0) {
        printf("Flushing encoder failed\n");
        return -1;
    }
    // Free the RGB image
    av_free(buffer);
    
    // Free the YUV frame
    av_free(pFrame);
    
    // Close the codec
    avcodec_close(pCodecContext);
    
    // Close the video file
    avformat_free_context(fmtContext);
    return 0;
}