#include "rkmedia_data_process.h"
#include "ffmpeg_video_queue.h"
#include "ffmpeg_audio_queue.h"
#include "rkmedia_ffmpeg_config.h"
#include "rkmedia_data_process.h"
#include "rkmedia_container.h"
#include <sys/time.h>

extern VIDEO_QUEUE *video_queue;
extern AUDIO_QUEUE *audio_queue;
extern RKMEDIA_FFMPEG_CONFIG *ffmpeg_config;

double __get_us(struct timeval t) { return (t.tv_sec * 1000000 + t.tv_usec); }

// 从RV1126视频编码数据赋值到FFMPEG的Video AVPacket中
AVPacket *get_ffmpeg_video_avpacket(AVPacket *pkt)
{
    if (video_queue==NULL)
    {
        printf("video_queue==NULL\n");
        return NULL;
    }
    
    // printf("getVideoPacketQueue start ...\n");
    video_data_packet_t *video_data_packet = video_queue->getVideoPacketQueue(); // 从视频队列获取数据

    if (video_data_packet != NULL)
    {
    /*
     重新分配给定的缓冲区
   1.  如果入参的 AVBufferRef 为空，直接调用 av_realloc 分配一个新的缓存区，并调用 av_buffer_create 返回一个新的 AVBufferRef 结构；
   2.  如果入参的缓存区长度和入参 size 相等，直接返回 0；
   3.  如果对应的 AVBuffer 设置了 BUFFER_FLAG_REALLOCATABLE 标志，或者不可写，再或者 AVBufferRef data 字段指向的数据地址和 AVBuffer 的 data 地址不同，递归调用 av_buffer_realloc 分配一个新
    的 buffer，并将 data 拷贝过去；
   4.  不满足上面的条件，直接调用 av_realloc 重新分配缓存区。
 */
        // printf("getVideoPacketQueue success\n");
        int ret = av_buffer_realloc(&pkt->buf, video_data_packet->video_frame_size + 70);
        if (ret < 0)
        {
            printf("av_buffer_realloc fail \n");
            return NULL;
        }

        pkt->size = video_data_packet->video_frame_size;                                        // rv1126的视频长度赋值到AVPacket Size
        memcpy(pkt->buf->data, video_data_packet->buffer, video_data_packet->video_frame_size); // rv1126的视频数据赋值到AVPacket data
        pkt->data = pkt->buf->data;                                                             // 把pkt->buf->data赋值到pkt->data
        pkt->flags |= AV_PKT_FLAG_KEY;                                                          // 默认flags是AV_PKT_FLAG_KEY
        if (video_data_packet != NULL)
        {
            free(video_data_packet);
            video_data_packet = NULL;
        }
        
        return pkt;
    }
    else
    {
        printf("getVideoPacketQueue fail \n");
        return NULL;
    }
}

AVPacket *get_ffmpeg_audio_avpacket(AVPacket *pkt)
{
    if (audio_queue==NULL)
    {
        printf("audio_queue==NULL\n");
        return NULL;
    }
    
    audio_data_packet_t *audio_data_packet = audio_queue->getAudioPacketQueue();// 从音频队列获取数据

    if (audio_data_packet != NULL)
    {
        /*
  重新分配给定的缓冲区
1.  如果入参的 AVBufferRef 为空，直接调用 av_realloc 分配一个新的缓存区，并调用 av_buffer_create 返回一个新的 AVBufferRef 结构；
2.  如果入参的缓存区长度和入参 size 相等，直接返回 0；
3.  如果对应的 AVBuffer 设置了 BUFFER_FLAG_REALLOCATABLE 标志，或者不可写，再或者 AVBufferRef data 字段指向的数据地址和 AVBuffer 的 data 地址不同，递归调用 av_buffer_realloc 分配一个新
的 buffer，并将 data 拷贝过去；
4.  不满足上面的条件，直接调用 av_realloc 重新分配缓存区。
*/
        int ret = av_buffer_realloc(&pkt->buf, audio_data_packet->audio_frame_size + 70);
        if (ret < 0)
        {
            printf("av_buffer_realloc fail \n");
            return NULL;
        }

        pkt->size = audio_data_packet->audio_frame_size; // rv1126的音频长度赋值到AVPacket Size
        memcpy(pkt->buf->data, audio_data_packet->buffer, audio_data_packet->audio_frame_size); //rv1126的音频数据赋值到AVPacket data
        pkt->data = pkt->buf->data; // 把pkt->buf->data赋值到pkt->data

        if (audio_data_packet != NULL)
        {
            free(audio_data_packet);
            audio_data_packet = NULL;
        }

        return pkt;
    }
    else
    {
        printf("getAudioPacketQueue fail \n");
        return NULL;
    }
}

int write_ffmpeg_avpacket(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
{
    /*将输出数据包时间戳值从编解码器重新调整为流时基 */
    av_packet_rescale_ts(pkt, *time_base, st->time_base);
    pkt->stream_index = st->index;

	// pkt->dts = av_rescale_q(pkt->dts, *time_base, st->time_base);
	// pkt->duration = av_rescale_q(pkt->duration, *time_base, st->time_base);

    // printf("write_ffmpeg_avpacket:%d %d, %d %d, pkt：%d %d %d %d,fmt_ctx:%d %d\n",
    // time_base->den,time_base->num,st->time_base.den,st->time_base.num,
    // pkt->stream_index, pkt->pts, pkt->dts,pkt->duration,
    // fmt_ctx->output_ts_offset,fmt_ctx->probesize
    // );
    
    int ret = av_interleaved_write_frame(fmt_ctx, pkt);
    if(ret != 0)
    {
        printf("av_interleaved_write_frame fail:%d\n",ret);
    }
    return ret;
}

int deal_video_avpacket(AVFormatContext *oc, OutputStream *ost)
{
    int ret;
    AVCodecContext *c = ost->enc;
    // printf("get_ffmpeg_video_avpacket \n");
    AVPacket *video_packet = get_ffmpeg_video_avpacket(ost->packet); // 从RV1126视频编码数据赋值到FFMPEG的Video AVPacket中
    if (video_packet != NULL)
    {
        video_packet->pts = ost->next_timestamp; 
        ost->next_timestamp += 1; // VIDEO_PTS按照帧率进行累加
   }
    
    // printf("deal_video_avpacket:%d %d\n",ost->next_timestamp,video_packet->pts);
    ret = write_ffmpeg_avpacket(oc, &c->time_base, ost->stream, video_packet); // 向复合流写入视频数据
    if (ret != 0)
    {
        printf("write video avpacket error\n");
        return -1;
    }

    return 0;
}

int deal_audio_avpacket(AVFormatContext *oc, OutputStream *ost)
{
    int ret;
    AVCodecContext *c = ost->enc;
    AVPacket *audio_packet = get_ffmpeg_audio_avpacket(ost->packet); // 从RV1126视频编码数据赋值到FFMPEG的Audio AVPacket中
    if (audio_packet != NULL)
    {
        audio_packet->pts = ost->samples_count;
        ost->samples_count += 1024;
        ost->next_timestamp = ost->samples_count; // AUDIO_PTS按照帧率进行累加1024
    }

    ret = write_ffmpeg_avpacket(oc, &c->time_base, ost->stream, audio_packet); // 向复合流写入音频数据
    if (ret != 0)
    {
        printf(" write audio avpacket error");
        return -1;
    }

    return 0;
}

int nv12_border(char *pic, int pic_w, int pic_h, int rect_x, int rect_y,
                int rect_w, int rect_h, int R, int G, int B)
{
  /* Set up the rectangle border size */
  const int border = 5;
  /* RGB convert YUV */
  int Y, U, V;
  Y = 0.299 * R + 0.587 * G + 0.114 * B;
  U = -0.1687 * R + 0.3313 * G + 0.5 * B + 128;
  V = 0.5 * R - 0.4187 * G - 0.0813 * B + 128;
  /* Locking the scope of rectangle border range */
  int j, k;
  for (j = rect_y; j < rect_y + rect_h; j++)
  {
    for (k = rect_x; k < rect_x + rect_w; k++)
    {
      if (k < (rect_x + border) || k > (rect_x + rect_w - border) ||
          j < (rect_y + border) || j > (rect_y + rect_h - border))
      {
        /* Components of YUV's storage address index */
        int y_index = j * pic_w + k;
        int u_index =
            (y_index / 2 - pic_w / 2 * ((j + 1) / 2)) * 2 + pic_w * pic_h;
        int v_index = u_index + 1;
        /* set up YUV's conponents value of rectangle border */
        pic[y_index] = Y;
        pic[u_index] = U;
        pic[v_index] = V;
      }
    }
  }

  return 0;
}

void *camera_recognize_thread(void *args) 
{
    pthread_detach(pthread_self());//将线程状态改为unjoinable状态，确保资源的释放

    open_db();
    map<string, rockx_face_feature_t> database_face_map = FaceFeature();
    map<string, rockx_face_feature_t>::iterator database_iter;

    rockx_ret_t ret;
    rockx_handle_t face_det_handle;
    rockx_handle_t face_5landmarks_handle;
    rockx_handle_t face_recognize_handle;
    char *rockx_data = "/demo/src/rockx_data";
    rockx_config_t *config = rockx_create_config();
    rockx_add_config(config, ROCKX_CONFIG_DATA_PATH, rockx_data);

    ret = rockx_create(&face_det_handle, ROCKX_MODULE_FACE_DETECTION, config,sizeof(rockx_config_t));
    if (ret != ROCKX_RET_SUCCESS) 
    {
        printf("ERROR: init rockx module ROCKX_MODULE_FACE_DETECTION error %d\n", ret);
    }

    ret = rockx_create(&face_5landmarks_handle,ROCKX_MODULE_FACE_LANDMARK_5, config, sizeof(rockx_config_t));
    if (ret != ROCKX_RET_SUCCESS) 
    {
        printf("ERROR: init rockx module ROCKX_MODULE_FACE_LANDMARK_5 error %d\n",ret);
    }

    ret = rockx_create(&face_recognize_handle, ROCKX_MODULE_FACE_RECOGNIZE,config, sizeof(rockx_config_t));
    if (ret != ROCKX_RET_SUCCESS) 
    {
        printf("ERROR: init rockx module ROCKX_MODULE_FACE_RECOGNIZE error %d\n", ret);
        return NULL;
    }
    int frame_count = 0;
    int interval = 5;
    DISPLAY_FACE_DATA display_face;
    memset(&display_face, 0, sizeof(display_face));
    struct timeval start_time, stop_time;
    while (1) 
    {
        MEDIA_BUFFER src_mb = NULL;

        src_mb = RK_MPI_SYS_GetMediaBuffer(RK_ID_RGA, 0, -1);
        if (!src_mb) 
        {
            printf("ERROR: RK_MPI_SYS_GetMediaBuffer get null buffer!\n");
            break;
        }

        rockx_image_t input_image;
        memset(&input_image, 0, sizeof(rockx_image_t));
        rkMB_IMAGE_INFO ImageInfo={0};
        RK_MPI_MB_GetImageInfo(src_mb,&ImageInfo);
        input_image.width=ImageInfo.u32Width;   
        input_image.height=ImageInfo.u32Height;
        input_image.pixel_format = ROCKX_PIXEL_FORMAT_YUV420SP_NV12;
        // input_image.pixel_format = ROCKX_PIXEL_FORMAT_RGB888;
        input_image.size = RK_MPI_MB_GetSize(src_mb);
        input_image.data = (uint8_t *)RK_MPI_MB_GetPtr(src_mb);

        frame_count += 1;
        // frame_count = 1;
        if(interval <= frame_count)
        {
            gettimeofday(&start_time, NULL);

            frame_count = 0;
            memset(&display_face, 0, sizeof(display_face));
        
            rockx_object_array_t face_array;
            memset(&face_array, 0, sizeof(face_array));
            ret =rockx_face_detect(face_det_handle, &input_image, &face_array, nullptr);
            if (ret != ROCKX_RET_SUCCESS) 
            {
                printf("ERROR: rockx_face_detect error %d\n", ret);
            }

            for (int i = 0; i < face_array.count; i++) 
            {
                int is_false_face;
                ret = rockx_face_filter(face_5landmarks_handle, &input_image,&face_array.object[i].box, &is_false_face);
                if (ret != ROCKX_RET_SUCCESS) 
                {
                    printf("ERROR: rockx_face_filter error %d\n", ret);
                }
                if (is_false_face)
                {
                    continue;
                }
                
                int left = face_array.object[i].box.left;
                int top = face_array.object[i].box.top;
                int right = face_array.object[i].box.right;
                int bottom = face_array.object[i].box.bottom;
                float score = face_array.object[i].score;

                int w = face_array.object[i].box.right - face_array.object[i].box.left;
                int h = face_array.object[i].box.bottom - face_array.object[i].box.top;

                if (left < 0)
                {
                    left = 0;
                }
                if (top < 0)
                { 
                    top = 0;
                }
                
                while ((uint32_t)(left + w) >= input_image.width) 
                {
                    w -= 16;
                }
                while ((uint32_t)(top + h) >= input_image.height) 
                {
                    h -= 16;
                }
                
                rockx_object_t max_face,cur_face;
                memset(&max_face, 0, sizeof(rockx_object_t));
                memset(&cur_face, 0, sizeof(rockx_object_t));
                cur_face = face_array.object[i];
                int cur_face_box_area = (cur_face.box.right - cur_face.box.left) *(cur_face.box.bottom - cur_face.box.top);
                int max_face_box_area = (max_face.box.right - max_face.box.left) *(max_face.box.bottom - max_face.box.top);
                if (cur_face_box_area > max_face_box_area) 
                {
                    max_face = cur_face;
                }

                rockx_image_t align_out_img;
                memset(&align_out_img, 0, sizeof(rockx_image_t));
                ret = rockx_face_align(face_5landmarks_handle, &input_image,&(max_face.box), nullptr, &align_out_img);
                
                if (ret != ROCKX_RET_SUCCESS) 
                {
                    printf("rockx_face_align failed\n");
                }
                
                // Face Recognition
                rockx_face_feature_t out_feature;
                memset(&out_feature, 0, sizeof(rockx_face_feature_t));
                bool is_recognize = false;
                rockx_face_recognize(face_recognize_handle, &align_out_img, &out_feature);
                
                rockx_image_release(&align_out_img);
                float similarity;
                for (database_iter = database_face_map.begin();database_iter != database_face_map.end(); database_iter++) 
                {
                    ret = rockx_face_feature_similarity(&database_iter->second,&out_feature, &similarity);
                    printf("simple_value = %lf\n", similarity);
                
                    if (similarity <= 1.0) 
                    {
                        is_recognize = true;
                        break;
                    } 
                    else 
                    {
                        is_recognize = false;
                        continue;
                    }
                }
                
                string person = "unknown";
                if (is_recognize == true) 
                {
                    person = database_iter->first;
                } 

                int index = display_face.count;
                display_face.face[index].left = left;
                display_face.face[index].right = right;
                display_face.face[index].top = top;
                display_face.face[index].bottom = bottom;
                display_face.face[index].similarity = similarity;
                strcpy(display_face.face[index].person, person.c_str());
                display_face.count = index+1;
            }

            gettimeofday(&stop_time, NULL);
            printf("once run use %f ms\n",(__get_us(stop_time) - __get_us(start_time)) / 1000);
        }
        {
            // cv::Mat show_img = cv::Mat(input_image.height, input_image.width, CV_8UC3,RK_MPI_MB_GetPtr(src_mb));
            cv::Mat show_img = cv::Mat(input_image.height, input_image.width, CV_8UC1,RK_MPI_MB_GetPtr(src_mb));
 
            for (int i = 0; i < display_face.count; i++)
            // for (int i = 0; i < 0; i++)
            {
                int left = display_face.face[i].left;
                int right = display_face.face[i].right;
                int top = display_face.face[i].top;
                int bottom = display_face.face[i].bottom;
                float similarity =  display_face.face[i].similarity;
                string person = display_face.face[i].person;

                nv12_border((char *)RK_MPI_MB_GetPtr(src_mb), input_image.width, input_image.height, left, top, right-left, bottom-top, 255, 0, 255);
                cv::rectangle(show_img,cv::Point(left, top),cv::Point(right, bottom),cv::Scalar(0,0,255),3,8,0);
                
                string similar = std::to_string(similarity);
                
                cv::putText(show_img, person, cv::Point(left+5, top+40), cv::FONT_HERSHEY_COMPLEX, 1, cv::Scalar(255, 255, 0), 3);
                cv::putText(show_img, "ED: "+similar, cv::Point(left+5, top+100), cv::FONT_HERSHEY_COMPLEX, 1, cv::Scalar(255, 255, 0), 3);
            
                printf("This Predict Name = %s\n", person.c_str());
            }
            show_img.release();
        }
         
        RK_MPI_SYS_SendMediaBuffer(RK_ID_VENC, 0, src_mb);
        RK_MPI_MB_ReleaseBuffer(src_mb);
        src_mb = NULL;
    }

    database_face_map.erase(database_iter);
    database_face_map.erase(database_face_map.begin(), database_face_map.end());

    rockx_destroy(face_det_handle);
    rockx_destroy(face_5landmarks_handle);
    rockx_destroy(face_recognize_handle);

    RV1126_VI_CONTAINTER vi_container;
    get_vi_container(0, &vi_container);
    MPP_CHN_S vi_channel;
    vi_channel.enModId = RK_ID_VI;  //VI模块ID
    vi_channel.s32ChnId = vi_container.vi_id;//VI通道ID

    RV1126_RGA_CONTAINER rga_container;
    get_rga_container(0, &rga_container);
    MPP_CHN_S rga_channel;
    rga_channel.enModId = RK_ID_RGA;
    rga_channel.s32ChnId = rga_container.rga_id;

    int intRet;
    intRet = RK_MPI_SYS_UnBind(&vi_channel, &rga_channel);
    if (intRet != 0)
    {
        printf("VI UnBind failed \n");
    }
    else
    {
        printf("Vi UnBind success\n");
    }

    // destroy vi
    intRet = RK_MPI_VI_DisableChn(0, 0);
    if (intRet)
    {
        printf("DisableChn VI error! ret=%d\n", ret);
        return 0;
    }
    intRet = RK_MPI_RGA_DestroyChn(rga_container.rga_id);
    if (intRet)
    {
        printf("Destroy RGA error! ret=%d\n", ret);
        return 0;
    }
 
    return NULL;
}


void *camera_venc_thread(void *args)
{
    pthread_detach(pthread_self());
    MEDIA_BUFFER mb = NULL;

    VENC_PROC_PARAM venc_arg = *(VENC_PROC_PARAM *)args;
    free(args);

    printf("video_venc_thread...\n");

    FILE *save_file = NULL;
    const char *pOutPath = "camera_venc_thread.h264";
    save_file = fopen(pOutPath, "w");
    if (!save_file)
      printf("ERROR: Open %s failed!\n", pOutPath);

    while (1)
    {
        // 从指定通道中获取VENC数据
        // printf("get venc media buffer ...\n");
        mb = RK_MPI_SYS_GetMediaBuffer(RK_ID_VENC, venc_arg.vencId, -1);
        if (!mb)
        {
            printf("get venc media buffer error\n");
            break;
        }
        uint8_t *mbPtr = (uint8_t *)RK_MPI_MB_GetPtr(mb);
        size_t mbSize =  RK_MPI_MB_GetSize(mb);  
        // else
        // {
        //     printf("get venc media buffer success\n");
        // }
        // int naluType = RK_MPI_MB_GetFlag(mb);

        // 分配video_data_packet_t结构体
        video_data_packet_t *video_data_packet = (video_data_packet_t *)malloc(sizeof(video_data_packet_t));
        // 把VENC视频缓冲区数据传输到video_data_packet的buffer中
        memcpy(video_data_packet->buffer, mbPtr, mbSize);
        // 把VENC的长度赋值给video_data_packet的video_frame_size中
        video_data_packet->video_frame_size = mbSize;
        // video_data_packet->frame_flag = naluType;
        // 入到视频压缩队列
        // printf("push video frame:%d %d %d\n", 
        // sizeof(*video_data_packet),video_data_packet->video_frame_size,video_queue);

        video_queue->putVideoPacketQueue(video_data_packet);
        // printf("#naluType = %d \n", naluType);
        // 释放VENC资源

        // printf("video size:%d\n",RK_MPI_MB_GetSize(mb));
        // fwrite(RK_MPI_MB_GetPtr(mb), 1, RK_MPI_MB_GetSize(mb), save_file);

        //资源释放
        RK_MPI_MB_ReleaseBuffer(mb);
        mb = NULL;
    }

    // MPP_CHN_S vi_channel;
    // MPP_CHN_S venc_channel;

    // vi_channel.enModId = RK_ID_VI;
    // vi_channel.s32ChnId = 0;

    // venc_channel.enModId = RK_ID_VENC;
    // venc_channel.s32ChnId = venc_arg.vencId;

    if (save_file)
        fclose(save_file);

    int ret;
    // ret = RK_MPI_SYS_UnBind(&vi_channel, &venc_channel);
    // if (ret != 0)
    // {
    //     printf("VI UnBind failed \n");
    // }
    // else
    // {
    //     printf("Vi UnBind success\n");
    // }

    ret = RK_MPI_VENC_DestroyChn(0);
    if (ret)
    {
        printf("Destroy Venc error! ret=%d\n", ret);
        return 0;
    }
    // // destroy vi
    // ret = RK_MPI_VI_DisableChn(0, 0);
    // if (ret)
    // {
    //     printf("DisableChn Venc error! ret=%d\n", ret);
    //     return 0;
    // }

    return NULL;
}

#define MP3_PROFILE_LOW 1

typedef struct FreqIdx_ {
  RK_S32 u32SmpRate;
  RK_U8 u8FreqIdx;
} FreqIdx;

FreqIdx FreqIdxTbl[13] = {{96000, 0}, {88200, 1}, {64000, 2},  {48000, 3},
                          {44100, 4}, {32000, 5}, {24000, 6},  {22050, 7},
                          {16000, 8}, {12000, 9}, {11025, 10}, {8000, 11},
                          {7350, 12}};

static void GetHeader(RK_U8 *pu8Hdr, RK_S32 u32SmpRate, RK_U8 u8Channel,
                      RK_U32 u32DataLen) {
  RK_U8 u8FreqIdx = 0;
  for (int i = 0; i < 13; i++) {
    if (u32SmpRate == FreqIdxTbl[i].u32SmpRate) {
      u8FreqIdx = FreqIdxTbl[i].u8FreqIdx;
      break;
    }
  }

  RK_U32 u32PacketLen = u32DataLen + 7;
  pu8Hdr[0] = 0xFF;
  pu8Hdr[1] = 0xF1;
  pu8Hdr[2] = ((MP3_PROFILE_LOW) << 6) + (u8FreqIdx << 2) + (u8Channel >> 2);
  pu8Hdr[3] = (((u8Channel & 3) << 6) + (u32PacketLen >> 11));
  pu8Hdr[4] = ((u32PacketLen & 0x7FF) >> 3);
  pu8Hdr[5] = (((u32PacketLen & 7) << 5) + 0x1F);
  pu8Hdr[6] = 0xFC;
}

//  获取音频数据头数据
static void get_adts_header(AVCodecContext *ctx, char *adts_header, int aac_length)
{
    uint8_t freq_idx = 0;
    switch (ctx->sample_rate) {
        case 96000: freq_idx = 0; break;
        case 88200: freq_idx = 1; break;
        case 64000: freq_idx = 2; break;
        case 48000: freq_idx = 3; break;
        case 44100: freq_idx = 4; break;
        case 32000: freq_idx = 5; break;
        case 24000: freq_idx = 6; break;
        case 22050: freq_idx = 7; break;
        case 16000: freq_idx = 8; break;
        case 12000: freq_idx = 9; break;
        case 11025: freq_idx = 10;break;
        case 8000: freq_idx = 11; break;
        case 7350: freq_idx = 12; break;
        default: freq_idx = 4;    break;
    }
    uint8_t chanCfg = ctx->channels;
    uint32_t frame_length = aac_length + 7;
    adts_header[0] = 0xFF;
    adts_header[1] = 0xF1;
    adts_header[2] = ((ctx->profile) << 6) + (freq_idx << 2) + (chanCfg >> 2);
    adts_header[3] = (((chanCfg & 3) << 6) + (frame_length  >> 11));
    adts_header[4] = ((frame_length & 0x7FF) >> 3);
    adts_header[5] = (((frame_length & 7) << 5) + 0x1F);
    adts_header[6] = 0xFC;
}

// 将aac裸流数据添加头部信息并写入输出文件
int encode(AVCodecContext* codec_ctx,AVFrame* frame,AVPacket* pkt,FILE* out)
{
    int ret=0;
    // 向编码器发送原始视频或音频帧以供编码
    ret = avcodec_send_frame(codec_ctx,frame);
    if(ret<0)
    {
        printf("avcodec_send_frame failed\n");
        return ret;
    }

    while(1)
    {
        // 从编码器读取编码后的数据
        ret=avcodec_receive_packet(codec_ctx,pkt);
        if(ret==AVERROR(EAGAIN)||ret==AVERROR(AVERROR_EOF))
            break;
        if(ret<0)
            return ret;
        
        int data_size = 0;
        audio_data_packet_t *audio_data_packet = (audio_data_packet_t *)malloc(sizeof(audio_data_packet_t));

        // ADTS头部添加
        // 检查 codec_ctx 的 flags 字段中是否设置了 AV_CODEC_FLAG_GLOBAL_HEADER 标志。
        // 这个标志的设置通常意味着编解码器需要输出一个全局头部，该头部可能包含了一些对于解码器来说必要的信息，
        // 比如比特流的格式、编码器的配置等
        if(codec_ctx->flags&AV_CODEC_FLAG_GLOBAL_HEADER)
        {
            char header[7]={0};

            // 获取头部
            get_adts_header(codec_ctx,header,pkt->size);

            // 写入头部到输出文件out
            // int size=fwrite(header,1,7,out);
            // if(size!=7)
            // {
            //     printf("fwrite error\n");
            //     return 0;
            // }

            data_size = 7;
            memcpy(audio_data_packet->buffer, header, data_size);
        }

        // 写入ES裸流数据到输出文件out
        // fwrite(pkt->data,1,pkt->size,out);
        memcpy(audio_data_packet->buffer+data_size, pkt->data, pkt->size);
        data_size += pkt->size;        
        audio_data_packet->audio_frame_size = data_size;
        audio_queue->putAudioPacketQueue(audio_data_packet);    
    }

    return 0;
}

void *audio_aenc_thread(void *args)
{
    pthread_detach(pthread_self());
    MEDIA_BUFFER mb = NULL;

    AENC_PROC_PARAM aenc_arg = *(AENC_PROC_PARAM *)args;
    free(args);

    FILE *save_file = NULL;
    const char *pOutPath = "audio_aenc_thread.aac";
    save_file = fopen(pOutPath, "w");
    if (!save_file)
      printf("ERROR: Open %s failed!\n", pOutPath);
    RK_U8 header[7];
    RK_S32 u32SampleRate = 16000;
    RK_U32 u32ChnCnt = 2;

    //软编码pcm转为acc
    // 查找与指定编解码器ID匹配的已注册编码器
    AVCodec* codec=avcodec_find_encoder(AV_CODEC_ID_AAC);
    if (codec==NULL)
    {
        printf("Can't not find any encoder");
        return 0;
    }
    else
    {
        printf("Success find encoder \n");
    }

    // 分配一个AVCodecContext(编解码器上下文)
    AVCodecContext* codec_ctx=avcodec_alloc_context3(codec);
     if (!codec_ctx)
    {
        printf("Can't not allocate context3\n");
        return 0;
    }
    else
    {
        printf("Success allocate context3 \n");
    }

    // 设置编码器参数
    codec_ctx->bit_rate=153600;                     // 平均比特率
    codec_ctx->channel_layout=AV_CH_LAYOUT_STEREO;  // 音频通道布局
    codec_ctx->channels=av_get_channel_layout_nb_channels(codec_ctx->channel_layout);// 通道数量
    codec_ctx->sample_fmt=AV_SAMPLE_FMT_FLTP;       // 音频样本格式
    codec_ctx->sample_rate=16000;                   // 音频的采样率
    codec_ctx->profile=FF_PROFILE_AAC_LOW;          // aac级别
    codec_ctx->flags|=AV_CODEC_FLAG_GLOBAL_HEADER;  // 每帧数据需要头部 
    
    // 打开编码器
    avcodec_open2(codec_ctx,codec,NULL);
    
    // 申请帧数据结构体
    AVPacket* pkt=av_packet_alloc();
    
    // 申请原始数据结构体
    AVFrame* frame=av_frame_alloc();
    
    // 初始化原始数据结构体
    frame->channels=codec_ctx->channels;    // 原始音频数据通道数
    frame->format=codec_ctx->sample_fmt;    // 原始音频数据样本格式
    frame->nb_samples=codec_ctx->frame_size;// 原始音频数据采样个数
    av_frame_get_buffer(frame,0);           // 分配frame的buff
    int pts=0;

    while (1)
    {
        // 从指定通道中获取AENC数据
        mb = RK_MPI_SYS_GetMediaBuffer(RK_ID_AI, 0, -1);
        if (!mb)
        {
            printf("get aenc buffer error\n");
            break;
        }

        unsigned char *pcm_buffer = (unsigned char *)malloc(RK_MPI_MB_GetSize(mb));
        // 把AENC视频缓冲区数据传输到audio_data_packet的buffer中
        memcpy(pcm_buffer, RK_MPI_MB_GetPtr(mb), RK_MPI_MB_GetSize(mb));
 
        // 确保帧数据结构体是可写的
        // 如果帧已经是可写的，则什么也不做；如果不是，则分配新的缓冲区并复制数据
        int ret = av_frame_make_writable(frame);
        if(ret != 0){
        printf("av_frame_make_writable failed, ret = %d\n", ret);
        }

        // 填充音频帧（AVFrame 结构体）中的数据指针数组和行大小
        int need_size=av_samples_fill_arrays(
            frame->data,            // 要填充每个通道指针的数组
            frame->linesize,        // 计算出的行大小
            pcm_buffer, // PCM数据样本的缓冲区的指针
            frame->channels,        // 通道数
            frame->nb_samples,      // 单个通道中的样本数
            codec_ctx->sample_fmt,  // 样本格式
            0                       // 缓冲区大小对齐（0 = 默认，1 = 不对齐）
        ); 

        frame->pts=pts;
        pts+=1;

        // 将aac裸流数据添加头部信息并写入输出文件
        encode(codec_ctx,frame,pkt,save_file);       

        if(pcm_buffer)
        {
            free(pcm_buffer);
        }
        // 释放AENC资源
        RK_MPI_MB_ReleaseBuffer(mb);
    }

    MPP_CHN_S ai_channel;
    MPP_CHN_S aenc_channel;

    ai_channel.enModId = RK_ID_AI;
    ai_channel.s32ChnId = 0;

    aenc_channel.enModId = RK_ID_AENC;
    aenc_channel.s32ChnId = aenc_arg.aencId;

    int ret;
    ret = RK_MPI_AI_DisableChn(0);
    if (ret)
    {
        printf("Disable AENC CHN\n");
        return 0;
    }

    return NULL;
}


// 音视频合成推流线程
void *push_server_thread(void *args)
{
    pthread_detach(pthread_self());

    RKMEDIA_FFMPEG_CONFIG ffmpeg_config = *(RKMEDIA_FFMPEG_CONFIG *)args;
    free(args);
    // AVOutputFormat *fmt = NULL;
    int ret;

    printf("start push stream ...\n");

    while (1)
    {
     /*
     我们以转换到同一时基下的时间戳为例，假设上一时刻音、视频帧的保存时间戳都是0。
     当前任意保存一种视频帧，例如保存视频的时间戳为video_t1。接着比较时间戳，发现音频时间戳为0 < video_t1，保存一帧音频，时间戳为audio_t1。
     继续比较时间戳，发现audio_t1 < video_t1，选择保存一帧音频，时间戳为audio_t2。
     再一次比较时间戳video_t1 < audio_t2，选择保存一帧视频，时间戳为video_t2。
     int av_compare_ts(int64_t ts_a, AVRational_tb_b,int64_t ts_b, AVRational tb_b)
     {
         int64_t a = tb_a.num * (int64_t)tb_b.den;
         int64_t b = tb_b.num * (int64_t)tb_a.den;
         if ((FFABS64U(ts_a)|a|FFABS64U(ts_b)|b) <= INT_MAX)
             return (ts_a*a > ts_b*b) - (ts_a*a < ts_b*b);
         if (av_rescale_rnd(ts_a, a, b, AV_ROUND_DOWN) < ts_b)
             return -1;
          if (av_rescale_rnd(ts_b, b, a, AV_ROUND_DOWN) < ts_a)
             return -1;
         return 0;
     }
     */
    // printf("video timestamp:%d \n",ffmpeg_config.video_stream.next_timestamp);
    // printf("time_base %d %d\n",ffmpeg_config.video_stream.enc->time_base.den,ffmpeg_config.video_stream.enc->time_base.num);
    // printf("audio timestamp:%d \n",ffmpeg_config.audio_stream.next_timestamp);
    // printf("time_base %d %d\n",ffmpeg_config.audio_stream.enc->time_base.den,ffmpeg_config.audio_stream.enc->time_base.num);

    // AVRational video_timebase =  (AVRational){1, 30};
    // AVRational audio_timebase =  (AVRational){1, 16000};

    // printf("time_base %d %d\n",video_timebase.den,video_timebase.num);

    ret = av_compare_ts(ffmpeg_config.video_stream.next_timestamp,
                        ffmpeg_config.video_stream.enc->time_base,
                        ffmpeg_config.audio_stream.next_timestamp,
                        ffmpeg_config.audio_stream.enc->time_base);

        if (ret <= 0)
        {
            // printf("deal_video_avpacket \n");
            ret = deal_video_avpacket(ffmpeg_config.oc, &ffmpeg_config.video_stream); // 处理FFMPEG视频数据
            if (ret == -1)
            {
                printf("deal_video_avpacket error\n");
                break;
            }
        }
        else
        {
            // printf("deal_audio_avpacket \n");
            ret = deal_audio_avpacket(ffmpeg_config.oc, &ffmpeg_config.audio_stream); // 处理FFMPEG音频数据
            if (ret == -1)
            {
                printf("deal_audio_avpacket error\n");
                break;
            }
        }
    }

    av_write_trailer(ffmpeg_config.oc);                         // 写入AVFormatContext的尾巴
    free_stream(ffmpeg_config.oc, &ffmpeg_config.video_stream); // 释放VIDEO_STREAM的资源
    free_stream(ffmpeg_config.oc, &ffmpeg_config.audio_stream); // 释放AUDIO_STREAM的资源
    avio_closep(&ffmpeg_config.oc->pb);                         // 释放AVIO资源
    avformat_free_context(ffmpeg_config.oc);                    // 释放AVFormatContext资源
    return NULL;
}

