/*
 * RTP output format
 * Copyright (c) 2002 Fabrice Bellard
 *
 * This file is part of FFmpeg.
 *
 * FFmpeg is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * FFmpeg is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with FFmpeg; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */

#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "mpegts.h"
#include "internal.h"
#include "mux.h"
#include "libavutil/mathematics.h"
#include "libavutil/random_seed.h"
#include "libavutil/opt.h"
#include <time.h>

#include "rtp.h"

#include "rtpenc_jtt1078.h"
#include "rtpenc.h"

#define OFFSET(x) offsetof(RTPJTTMuxContext, x)
static const AVOption options[] = {
    //FF_RTP_FLAG_OPTS(RTPJTTMuxContext, flags),
    { "payload_type", "Specify RTP payload type", offsetof(RTPJTTMuxContext, payload_type), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, 127, AV_OPT_FLAG_ENCODING_PARAM },
    { "ssrc", "Stream identifier", offsetof(RTPJTTMuxContext, ssrc), AV_OPT_TYPE_INT, { .i64 = 0 }, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
    { "cname", "CNAME to include in RTCP SR packets", offsetof(RTPJTTMuxContext, cname), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
    { "seq", "Starting sequence number", offsetof(RTPJTTMuxContext, seq), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 65535, AV_OPT_FLAG_ENCODING_PARAM },
    { "logic_channel", "logic channel number", offsetof(RTPJTTMuxContext, logic_channel), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, 37, AV_OPT_FLAG_ENCODING_PARAM },
    { "sim", "sim card number", offsetof(RTPJTTMuxContext, sim), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },

    { NULL },
};

static const AVClass rtp_jtt1078_muxer_class = {
    .class_name = "jtt1078/RTP muxer",
    .item_name  = av_default_item_name,
    .option     = options,
    .version    = LIBAVUTIL_VERSION_INT,
};

RTPJTTTimeRecord jtt_time_record;
uint8_t audio_frist_frame = 1;
uint8_t audio_timestamp = 40;


#define RTCP_SR_SIZE 28

static int is_supported(enum AVCodecID id)
{
    switch(id) {
    case AV_CODEC_ID_DIRAC:
    case AV_CODEC_ID_H261:
    case AV_CODEC_ID_H263:
    case AV_CODEC_ID_H263P:
    case AV_CODEC_ID_H264:
    case AV_CODEC_ID_HEVC:
    case AV_CODEC_ID_MPEG1VIDEO:
    case AV_CODEC_ID_MPEG2VIDEO:
    case AV_CODEC_ID_MPEG4:
    case AV_CODEC_ID_AAC:
    case AV_CODEC_ID_MP2:
    case AV_CODEC_ID_MP3:
    case AV_CODEC_ID_PCM_ALAW:
    case AV_CODEC_ID_PCM_MULAW:
    case AV_CODEC_ID_PCM_S8:
    case AV_CODEC_ID_PCM_S16BE:
    case AV_CODEC_ID_PCM_S16LE:
    case AV_CODEC_ID_PCM_S24BE:
    case AV_CODEC_ID_PCM_U16BE:
    case AV_CODEC_ID_PCM_U16LE:
    case AV_CODEC_ID_PCM_U8:
    case AV_CODEC_ID_MPEG2TS:
    case AV_CODEC_ID_AMR_NB:
    case AV_CODEC_ID_AMR_WB:
    case AV_CODEC_ID_VORBIS:
    case AV_CODEC_ID_THEORA:
    case AV_CODEC_ID_VP8:
    case AV_CODEC_ID_VP9:
    case AV_CODEC_ID_ADPCM_G722:
    case AV_CODEC_ID_ADPCM_G726:
    case AV_CODEC_ID_ADPCM_G726LE:
    case AV_CODEC_ID_ADPCM_MS:
    case AV_CODEC_ID_ILBC:
    case AV_CODEC_ID_MJPEG:
    case AV_CODEC_ID_SPEEX:
    case AV_CODEC_ID_OPUS:
    case AV_CODEC_ID_RAWVIDEO:
    case AV_CODEC_ID_BITPACKED:
        return 1;
    default:
        return 0;
    }
}

static int rtp_jtt1078_write_header(AVFormatContext *s1)
{
    RTPJTTMuxContext *s = s1->priv_data;
    int n, ret = AVERROR(EINVAL);
    int i = 0;
    AVStream *st;
/*
    if (s1->nb_streams != 1) {
        av_log(s1, AV_LOG_ERROR, "Only one stream supported in the RTP muxer\n");
        return AVERROR(EINVAL);
    }
*/
    for (i = 0; i < s1->nb_streams; i++)
    {
        st = s1->streams[i];
        if (!is_supported(st->codecpar->codec_id)) {
            av_log(s1, AV_LOG_ERROR, "Unsupported codec %s\n", avcodec_get_name(st->codecpar->codec_id));

            return -1;
        }

        av_log(s1, AV_LOG_ERROR, "rtp_jtt1078_write_header codec_id = %d\n", st->codecpar->codec_id);
        if (s->payload_type[i] < 0) {
            /* Re-validate non-dynamic payload types */
            if (st->id < RTP_PT_PRIVATE)
                st->id = ff_rtp_get_payload_type(s1, st->codecpar, -1);

            s->payload_type[i] = st->id;
        } else {
            /* private option takes priority */
            st->id = s->payload_type[i];
        }

        s->base_timestamp = av_get_random_seed();
        s->timestamp = s->base_timestamp;
        s->cur_timestamp = 0;
        if (!s->ssrc)
            s->ssrc = av_get_random_seed();
        s->first_packet = 1;
        s->first_rtcp_ntp_time = ff_ntp_time();
        if (s1->start_time_realtime != 0  &&  s1->start_time_realtime != AV_NOPTS_VALUE)
            /* Round the NTP time to whole milliseconds. */
            s->first_rtcp_ntp_time = (s1->start_time_realtime / 1000) * 1000 +
                                     NTP_OFFSET_US;
        // Pick a random sequence start number, but in the lower end of the
        // available range, so that any wraparound doesn't happen immediately.
        // (Immediate wraparound would be an issue for SRTP.)
        if (s->seq < 0) {
            if (s1->flags & AVFMT_FLAG_BITEXACT) {
                s->seq = 0;
            } else
                s->seq = av_get_random_seed() & 0x0fff;
        } else
            s->seq &= 0xffff; // Use the given parameter, wrapped to the right interval

        if (s1->packet_size) {
            if (s1->pb->max_packet_size)
                s1->packet_size = FFMIN(s1->packet_size,
                                        s1->pb->max_packet_size);
        } else
            s1->packet_size = s1->pb->max_packet_size;
        if (s1->packet_size <= 12) {
            av_log(s1, AV_LOG_ERROR, "Max packet size %u too low\n", s1->packet_size);
            return AVERROR(EIO);
        }
        s->buf[i] = av_malloc(s1->packet_size);
        if (!s->buf[i]) {
            return AVERROR(ENOMEM);
        }
        s->max_payload_size[i] = s1->packet_size - 12;
        s->max_payload_size[i] = 950;

        if (s->sim) {
            int len = strlen(s->sim);
            if (len != 11) {
                av_log(s1, AV_LOG_ERROR, "sim len error(should be 11 chars)\n");
                return AVERROR(ENOMEM);
            }
        }

        if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
            avpriv_set_pts_info(st, 32, 1, st->codecpar->sample_rate);
        } else {
            avpriv_set_pts_info(st, 32, 1, 90000);
        }
        s->buf_ptr[i] = s->buf[i];
        switch(st->codecpar->codec_id) {
        case AV_CODEC_ID_MP2:
        case AV_CODEC_ID_MP3:
            s->buf_ptr[i] = s->buf[i] + 4;
            s->max_payload_size[i] = 950;
            s->payload_type[i] = 25; //jtt1078 表12
            avpriv_set_pts_info(st, 32, 1, 90000);
            break;
        case AV_CODEC_ID_MPEG1VIDEO:
        case AV_CODEC_ID_MPEG2VIDEO:
            break;
        case AV_CODEC_ID_MPEG2TS:
            n = s->max_payload_size[i] / TS_PACKET_SIZE;
            if (n < 1)
                n = 1;
            s->max_payload_size[i] = n * TS_PACKET_SIZE;
            break;
        case AV_CODEC_ID_DIRAC:
            if (s1->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
                av_log(s, AV_LOG_ERROR,
                       "Packetizing VC-2 is experimental and does not use all values "
                       "of the specification "
                       "(even though most receivers may handle it just fine). "
                       "Please set -strict experimental in order to enable it.\n");
                ret = AVERROR_EXPERIMENTAL;
                goto fail;
            }
            break;
        case AV_CODEC_ID_H261:
            if (s1->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
                av_log(s, AV_LOG_ERROR,
                       "Packetizing H.261 is experimental and produces incorrect "
                       "packetization for cases where GOBs don't fit into packets "
                       "(even though most receivers may handle it just fine). "
                       "Please set -f_strict experimental in order to enable it.\n");
                ret = AVERROR_EXPERIMENTAL;
                goto fail;
            }
            break;
        case AV_CODEC_ID_H264:
            /* check for H.264 MP4 syntax */
            if (st->codecpar->extradata_size > 4 && st->codecpar->extradata[0] == 1) {
                s->nal_length_size = (st->codecpar->extradata[4] & 0x03) + 1;
            }
            s->payload_type[i] = 98; //jtt1078 表12
            break;
        case AV_CODEC_ID_HEVC:
            /* Only check for the standardized hvcC version of extradata, keeping
             * things simple and similar to the avcC/H.264 case above, instead
             * of trying to handle the pre-standardization versions (as in
             * libavcodec/hevc.c). */
            if (st->codecpar->extradata_size > 21 && st->codecpar->extradata[0] == 1) {
                s->nal_length_size = (st->codecpar->extradata[21] & 0x03) + 1;
            }
            break;
        case AV_CODEC_ID_VP9:
            if (s1->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
                av_log(s, AV_LOG_ERROR,
                       "Packetizing VP9 is experimental and its specification is "
                       "still in draft state. "
                       "Please set -strict experimental in order to enable it.\n");
                ret = AVERROR_EXPERIMENTAL;
                goto fail;
            }
            break;
        case AV_CODEC_ID_VORBIS:
        case AV_CODEC_ID_THEORA:
            s->max_frames_per_packet[i] = 15;
            break;
        case AV_CODEC_ID_ADPCM_G722:
            /* Due to a historical error, the clock rate for G722 in RTP is
             * 8000, even if the sample rate is 16000. See RFC 3551. */
            s->max_frames_per_packet[i] = 50;
            s->max_payload_size[i] = 950;
            s->payload_type[i] = 2; //jtt1078 表12
            avpriv_set_pts_info(st, 32, 1, 8000);
            break;
        case AV_CODEC_ID_OPUS:
            if (st->codecpar->ch_layout.nb_channels > 2) {
                av_log(s1, AV_LOG_ERROR, "Multistream opus not supported in RTP\n");
                goto fail;
            }
            /* The opus RTP RFC says that all opus streams should use 48000 Hz
             * as clock rate, since all opus sample rates can be expressed in
             * this clock rate, and sample rate changes on the fly are supported. */
            avpriv_set_pts_info(st, 32, 1, 48000);
            break;
        case AV_CODEC_ID_ILBC:
            if (st->codecpar->block_align != 38 && st->codecpar->block_align != 50) {
                av_log(s1, AV_LOG_ERROR, "Incorrect iLBC block size specified\n");
                goto fail;
            }
            s->max_frames_per_packet[i] = s->max_payload_size[i] / st->codecpar->block_align;
            break;
        case AV_CODEC_ID_AMR_NB:
        case AV_CODEC_ID_AMR_WB:
            s->max_frames_per_packet[i] = 50;
            if (st->codecpar->codec_id == AV_CODEC_ID_AMR_NB)
                n = 31;
            else
                n = 61;
            /* max_header_toc_size + the largest AMR payload must fit */
            if (1 + s->max_frames_per_packet[i] + n > s->max_payload_size[i]) {
                av_log(s1, AV_LOG_ERROR, "RTP max payload size too small for AMR\n");
                goto fail;
            }
            if (st->codecpar->ch_layout.nb_channels != 1) {
                av_log(s1, AV_LOG_ERROR, "Only mono is supported\n");
                goto fail;
            }
            break;
        case AV_CODEC_ID_AAC:
            s->max_frames_per_packet[i] = 50;
            s->max_payload_size[i] = 950;
            s->payload_type[i] = 19; //jtt1078 表12
            break;
        case AV_CODEC_ID_ADPCM_MS:
            //av_log(s1, AV_LOG_ERROR, "write head into AV_CODEC_ID_ADPCM_IMA_QT\n");
            s->max_payload_size[i] = 950;
            s->payload_type[i] = 26; //jtt1078 表12
            break;
        case AV_CODEC_ID_ADPCM_G726:
        case AV_CODEC_ID_ADPCM_G726LE:
            s->max_payload_size[i] = 950;
            s->payload_type[i] = 8; //jtt1078 表12
            break;
        case AV_CODEC_ID_PCM_ALAW:
        case AV_CODEC_ID_PCM_MULAW:
            av_log(s1, AV_LOG_ERROR, "write head into AV_CODEC_ID_PCM_ALAW\n");
            s->max_frames_per_packet[i] = 50;
            s->max_payload_size[i] = 950;
            s->payload_type[i] = 6; //jtt1078 表12
            break;
        default:
            s->max_frames_per_packet[i] = 50;
            s->max_payload_size[i] = 950;
            s->payload_type[i] = 6; //jtt1078 表12
            break;
        }

        
    }
    return 0;

    fail:
        av_freep(&s->buf[i]);
        return ret;

}

/* send an rtcp sender report packet */
static void rtcp_send_sr(AVFormatContext *s1, int64_t ntp_time, int bye)
{
    return ;
    RTPJTTMuxContext *s = s1->priv_data;
    uint32_t rtp_ts;

    av_log(s1, AV_LOG_TRACE, "RTCP: %02x %"PRIx64" %"PRIx32"\n", s->payload_type[0], ntp_time, s->timestamp);

    s->last_rtcp_ntp_time = ntp_time;
    rtp_ts = av_rescale_q(ntp_time - s->first_rtcp_ntp_time, (AVRational){1, 1000000},
                          s1->streams[0]->time_base) + s->base_timestamp;
    avio_w8(s1->pb, RTP_VERSION << 6);
    avio_w8(s1->pb, RTCP_SR);
    avio_wb16(s1->pb, 6); /* length in words - 1 */
    avio_wb32(s1->pb, s->ssrc);
    avio_wb32(s1->pb, ntp_time / 1000000);
    avio_wb32(s1->pb, ((ntp_time % 1000000) << 32) / 1000000);
    avio_wb32(s1->pb, rtp_ts);
    avio_wb32(s1->pb, s->packet_count);
    avio_wb32(s1->pb, s->octet_count);

    if (s->cname) {
        int len = FFMIN(strlen(s->cname), 255);
        avio_w8(s1->pb, (RTP_VERSION << 6) + 1);
        avio_w8(s1->pb, RTCP_SDES);
        avio_wb16(s1->pb, (7 + len + 3) / 4); /* length in words - 1 */

        avio_wb32(s1->pb, s->ssrc);
        avio_w8(s1->pb, 0x01); /* CNAME */
        avio_w8(s1->pb, len);
        avio_write(s1->pb, s->cname, len);
        avio_w8(s1->pb, 0); /* END */
        for (len = (7 + len) % 4; len % 4; len++)
            avio_w8(s1->pb, 0);
    }

    if (bye) {
        avio_w8(s1->pb, (RTP_VERSION << 6) | 1);
        avio_w8(s1->pb, RTCP_BYE);
        avio_wb16(s1->pb, 1); /* length in words - 1 */
        avio_wb32(s1->pb, s->ssrc);
    }

    avio_flush(s1->pb);
}

static void rtp_send_raw_jtt1078(AVFormatContext *s1,
                         const uint8_t *buf1, int size, enum AVPictureType pict_type)
{
    RTPJTTMuxContext *s = s1->priv_data;
    int len, max_packet_size;

    uint8_t split_start = 0;
    
    max_packet_size = s->max_payload_size[0];
    s->timestamp = s->cur_timestamp;

    while (size > 0) {
        len = max_packet_size;
        if (len > size)
        {
            len = size;

            if (0 != split_start)
            {
                split_start = 2;
            }
        }
        else 
        {
            if (len != size)
            {
                if (split_start == 0)
                {
                    split_start = 1;
                }
                else
                {
                    split_start = 3;
                } 
            }
            else
            {
                if (0 != split_start)
                {
                    split_start = 3;
                }
            }
        }

        s->timestamp = s->cur_timestamp;
        ff_rtp_send_data_jtt1078(s1, buf1, len, (len == size), pict_type, split_start, VEDIO);

        buf1 += len;
        size -= len;

        split_start = 3;
    }
}

/* send an integer number of samples and compute time stamp and fill
   the rtp send buffer before sending. */
static int jttrtp_send_samples(AVFormatContext *s1,
                            const uint8_t *buf1, int size, int sample_size_bits)
{
    RTPJTTMuxContext *s = s1->priv_data;
    int len, max_packet_size, n;
    /* Calculate the number of bytes to get samples aligned on a byte border */
    int aligned_samples_size = sample_size_bits/av_gcd(sample_size_bits, 8);
    int i,audio_index = 0;
    uint8_t split_start = 0;

    //找到音频流的索引号
    for(i=0; i<s1->nb_streams; i++)
    {
        if(s1->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
        {
            audio_index = i;
            //av_log(s1, AV_LOG_ERROR,"audio index = %d\n", audio_index);
            break;
        }
    }

    max_packet_size = (s->max_payload_size[audio_index] / aligned_samples_size) * aligned_samples_size;
    /* Not needed, but who knows. Don't check if samples aren't an even number of bytes. */
    if ((sample_size_bits % 8) == 0 && ((8 * size) % sample_size_bits) != 0)
        return AVERROR(EINVAL);
    n = 0;

    while (size > 0) {
        s->buf_ptr[audio_index] = s->buf[audio_index];
        //max_packet_size = 160;
        len = FFMIN(max_packet_size, size);

        if (max_packet_size > size)
        {

            if (0 != split_start)
            {
                split_start = 2;
            }
        }
        else 
        {
            if (max_packet_size != size)
            {
                if (split_start == 0)
                {
                    split_start = 1;
                }
                else
                {
                    split_start = 3;
                } 
            }
            else
            {
                if (0 != split_start)
                {
                    split_start = 3;
                }
            }
        }
        /* copy data */
        memcpy(s->buf_ptr[audio_index], buf1, len);
        s->buf_ptr[audio_index] += len;
        buf1 += len;
        size -= len;
        s->timestamp = s->cur_timestamp + n * 8 / sample_size_bits;
        //ff_rtp_send_data(s1, s->buf, s->buf_ptr - s->buf, 0);
  
        ff_rtp_send_data_jtt1078(s1, s->buf[audio_index], s->buf_ptr[audio_index] - s->buf[audio_index], (split_start == 0 || split_start == 2), 4, split_start, AUDIO);
        n += (s->buf_ptr[audio_index] - s->buf[audio_index]);
        split_start = 3;
    }
    return 0;
}

static uint8_t str_to_bcd(char ch) {
    return ((ch - '0') & 0x0F);
}

static void simbcd(uint8_t* bcd, const char* buffer, size_t sz)
{
    for (size_t i = 0; i < sz; i++)
    {
        uint8_t tmp = str_to_bcd(buffer[i]);
        if (i % 2 == 0)
        {
            bcd[i/2] |= tmp;
        }
        else
        {
            bcd[i/2 + 1] |= tmp << 4;
        }
    }
    
}

/* send an rtp packet. sequence number is incremented, but the caller
   must update the timestamp itself */
void ff_rtp_send_data_jtt1078(AVFormatContext *s1, const uint8_t *buf1, int len, int m, enum AVPictureType pict_type, uint8_t split, eDataType etype)
{
    RTPJTTMuxContext *s = s1->priv_data;
    
    uint32_t jtt1078_head = (0x30 << 24) | (0x31 << 16) | (0x63 << 8) | 0x64; 
    uint8_t jtt1078_rtp_head = (RTP_VERSION << 6) | 0x01; 
    //uint8_t yue[4] = {0x00, 0x00, 0x00, 0x00};
    uint8_t bcd[6] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
    uint8_t logic_channel = s->logic_channel;
    uint8_t datatype = 0;
    uint16_t Last_I_Frame_Interval = 0;
    uint16_t Last_Frame_Interval = 0;
    uint16_t data_size = len;
    uint64_t relative_timestamp = 0;
    struct timespec ts;

    //原子包或者分包的第一个包为新的一帧，需要记录时间
    if(split == 0 || split == 1)
    {
        clock_gettime(CLOCK_REALTIME, &ts);

        //记录上一帧的时间
        jtt_time_record.last_frame_time = jtt_time_record.cur_frame_time;
        //记录当前帧的时间
        jtt_time_record.cur_frame_time = ts.tv_sec * 1000LL + ts.tv_nsec / 1000000;
        //jtt_time_record.cur_frame_time += 20;

        if(etype == AUDIO)
        {
            jtt_time_record.audio_frame_time += audio_timestamp;
            if(audio_frist_frame == 1)
            {
                audio_frist_frame = 0;
                jtt_time_record.audio_init_time = jtt_time_record.audio_frame_time;
            }
        }
        
        //记录I帧时间
        if(pict_type == 1)
        {
            jtt_time_record.last_I_frame_time = jtt_time_record.cur_I_frame_time;
            jtt_time_record.cur_I_frame_time = jtt_time_record.cur_frame_time;
        }
        
        //包序号=0说明是第一帧
        if(s->seq == 0)
        {
            jtt_time_record.init_time = jtt_time_record.cur_frame_time;
            jtt_time_record.last_frame_time = jtt_time_record.cur_frame_time;
            jtt_time_record.last_I_frame_time = jtt_time_record.cur_I_frame_time;
        }
    }

    
    
    Last_I_Frame_Interval = jtt_time_record.cur_frame_time - jtt_time_record.last_I_frame_time;
    Last_Frame_Interval = jtt_time_record.cur_frame_time - jtt_time_record.last_frame_time;
    if(etype == AUDIO)
    {
        relative_timestamp = jtt_time_record.audio_frame_time - jtt_time_record.audio_init_time;
    }
    else
    {
        relative_timestamp = jtt_time_record.cur_frame_time - jtt_time_record.init_time;
    }
    

    av_log(s1, AV_LOG_ERROR, "I_frame_interval = %d  Last_Frame_Interval = %d  relative_timestamp = %ld\n", Last_I_Frame_Interval, Last_Frame_Interval, relative_timestamp);
    //av_log(s1, AV_LOG_ERROR, "rtp_jtt1078_send_data size=%d, channel=%d, seq=%d, m=%d, pic=%d, split=%d\n", len, logic_channel, s->seq, m, pict_type, split);

    
    //build the RTP-jtt1078 header
    avio_wb32(s1->pb, jtt1078_head);

    //avio_w8(s1->pb, RTP_VERSION << 6);
    avio_w8(s1->pb, jtt1078_rtp_head);

    avio_w8(s1->pb, (s->payload_type[etype] & 0x7f) | ((m & 0x01) << 7));// 1bit 确定是否为完整数据帧边界 7bit 负载类型

    avio_wb16(s1->pb, s->seq);

    //粤标SIM是10位
    //avio_write(s1->pb, yue, 4);

    if (s->sim) {
        int sim_len = strlen(s->sim);
        simbcd(bcd, s->sim, sim_len);
    }
    avio_write(s1->pb, bcd, 6);
    avio_w8(s1->pb, logic_channel);

    switch (pict_type)
    {
    case AV_PICTURE_TYPE_I:
        datatype = 0;
        break;
    case AV_PICTURE_TYPE_P:
        datatype = 1;
        break;
    case AV_PICTURE_TYPE_B:
        datatype = 2;
        break;
    default:
        break;
    }
    //数据类型和分包标志
    if(etype == VEDIO)
    {
        avio_w8(s1->pb, (datatype << 4) | split);//4bit 数据类型 4it 分包处理标记
    }
    else
    {
        avio_w8(s1->pb, 0x30 | split);
    }
    //时间戳
    avio_wb64(s1->pb, relative_timestamp);
    //时间间隔：视频帧才有
    if(etype == VEDIO)
    {
        avio_wb16(s1->pb, Last_I_Frame_Interval);
        avio_wb16(s1->pb, Last_Frame_Interval);
    }
    //数据长度
    avio_wb16(s1->pb, data_size);

/*
    avio_wb32(s1->pb, s->timestamp);
    avio_wb32(s1->pb, s->ssrc);
*/
    avio_write(s1->pb, buf1, len);
    avio_flush(s1->pb);

    s->seq = (s->seq + 1) & 0xffff;
    s->octet_count += len;
    s->packet_count++;
}

static int rtp_jtt1078_write_packet(AVFormatContext *s1, AVPacket *pkt)
{
    RTPJTTMuxContext *s = s1->priv_data;
    AVStream *st = s1->streams[pkt->stream_index];
    int rtcp_bytes;
    int size= pkt->size;

    uint8_t *side_data;
    size_t side_data_size;

    side_data = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS, &side_data_size);
    if (side_data && (side_data_size > 5))
    {  
         
        //av_log(s1, AV_LOG_ERROR, "%d: write len=%d| %s=%c\n", pkt->stream_index, size, av_packet_side_data_name(AV_PKT_DATA_QUALITY_STATS), av_get_picture_type_char(side_data[4]));
    }
    else
    {
        //av_log(s1, AV_LOG_ERROR, "%d: write len=%d\n", pkt->stream_index, size);
    }

    rtcp_bytes = ((s->octet_count - s->last_octet_count) * RTCP_TX_RATIO_NUM) /
        RTCP_TX_RATIO_DEN;
    if ((s->first_packet || ((rtcp_bytes >= RTCP_SR_SIZE) &&
                            (ff_ntp_time() - s->last_rtcp_ntp_time > 5000000))) &&
        !(s->flags & FF_RTP_FLAG_SKIP_RTCP)) {
        rtcp_send_sr(s1, ff_ntp_time(), 0);
        s->last_octet_count = s->octet_count;
        s->first_packet = 0;
    }
    s->cur_timestamp = s->base_timestamp + pkt->pts;

    //av_log(s1, AV_LOG_ERROR,"codec_id=%d  AV_CODEC_ID_H264 = %d AV_CODEC_ID_MP2 = %d AV_CODEC_ID_MP3 = %d AV_CODEC_ID_AAC = %d\n", st->codecpar->codec_id, AV_CODEC_ID_H264, AV_CODEC_ID_MP2, AV_CODEC_ID_MP3, AV_CODEC_ID_AAC);
    switch(st->codecpar->codec_id) {
    case AV_CODEC_ID_PCM_MULAW:
    case AV_CODEC_ID_PCM_ALAW:
    case AV_CODEC_ID_PCM_U8:
    case AV_CODEC_ID_PCM_S8:
        //return rtp_send_samples(s1, pkt->data, size, 8 * st->codecpar->ch_layout.nb_channels);
        //av_log(s1, AV_LOG_ERROR, "into AV_CODEC_ID_PCM_ALAW\n");
        //av_log(s1, AV_LOG_ERROR, "audio_size = %d\n", size);
        return jttrtp_send_samples(s1, pkt->data, size, 8 * st->codecpar->ch_layout.nb_channels);
    case AV_CODEC_ID_PCM_U16BE:
    case AV_CODEC_ID_PCM_U16LE:
    case AV_CODEC_ID_PCM_S16BE:
    case AV_CODEC_ID_PCM_S16LE:
        //return rtp_send_samples(s1, pkt->data, size, 16 * st->codecpar->ch_layout.nb_channels);
    case AV_CODEC_ID_PCM_S24BE:
        //return rtp_send_samples(s1, pkt->data, size, 24 * st->codecpar->ch_layout.nb_channels);
        return jttrtp_send_samples(s1, pkt->data, size, 24 * st->codecpar->ch_layout.nb_channels);
    case AV_CODEC_ID_ADPCM_G722:
        /* The actual sample size is half a byte per sample, but since the
         * stream clock rate is 8000 Hz while the sample rate is 16000 Hz,
         * the correct parameter for send_samples_bits is 8 bits per stream
         * clock. */
        return jttrtp_send_samples(s1, pkt->data, size, 8 * st->codecpar->ch_layout.nb_channels);
    case AV_CODEC_ID_ADPCM_G726:
    case AV_CODEC_ID_ADPCM_G726LE:
        //return rtp_send_samples(s1, pkt->data, size,
        //                        st->codecpar->bits_per_coded_sample * st->codecpar->ch_layout.nb_channels);
    case AV_CODEC_ID_MP2:
    case AV_CODEC_ID_MP3:
        //rtp_send_mpegaudio(s1, pkt->data, size);
        break;
    case AV_CODEC_ID_MPEG1VIDEO:
    case AV_CODEC_ID_MPEG2VIDEO:
        ff_rtp_send_mpegvideo(s1, pkt->data, size);
        break;
    case AV_CODEC_ID_AAC:
        if (s->flags & FF_RTP_FLAG_MP4A_LATM)
            ff_rtp_send_latm(s1, pkt->data, size);
        else
            ff_rtp_send_aac(s1, pkt->data, size);
        break;
    case AV_CODEC_ID_AMR_NB:
    case AV_CODEC_ID_AMR_WB:
        ff_rtp_send_amr(s1, pkt->data, size);
        break;
    case AV_CODEC_ID_MPEG2TS:
        //rtp_send_mpegts_raw(s1, pkt->data, size);
        break;
    case AV_CODEC_ID_DIRAC:
        ff_rtp_send_vc2hq(s1, pkt->data, size, st->codecpar->field_order != AV_FIELD_PROGRESSIVE ? 1 : 0);
        break;
    case AV_CODEC_ID_H264:
        //ff_rtp_send_h264_hevc(s1, pkt->data, size);
        rtp_send_raw_jtt1078(s1, pkt->data, size, side_data[4]);
        break;
    case AV_CODEC_ID_H261:
        ff_rtp_send_h261(s1, pkt->data, size);
        break;
    case AV_CODEC_ID_H263:
        if (s->flags & FF_RTP_FLAG_RFC2190) {
            size_t mb_info_size;
            const uint8_t *mb_info =
                av_packet_get_side_data(pkt, AV_PKT_DATA_H263_MB_INFO,
                                        &mb_info_size);
            ff_rtp_send_h263_rfc2190(s1, pkt->data, size, mb_info, mb_info_size);
            break;
        }
        /* Fallthrough */
    case AV_CODEC_ID_H263P:
        ff_rtp_send_h263(s1, pkt->data, size);
        break;
    case AV_CODEC_ID_HEVC:
        ff_rtp_send_h264_hevc(s1, pkt->data, size);
        break;
    case AV_CODEC_ID_VORBIS:
    case AV_CODEC_ID_THEORA:
        ff_rtp_send_xiph(s1, pkt->data, size);
        break;
    case AV_CODEC_ID_VP8:
        ff_rtp_send_vp8(s1, pkt->data, size);
        break;
    case AV_CODEC_ID_VP9:
        ff_rtp_send_vp9(s1, pkt->data, size);
        break;
    case AV_CODEC_ID_ILBC:
        //rtp_send_ilbc(s1, pkt->data, size);
        break;
    case AV_CODEC_ID_MJPEG:
        ff_rtp_send_jpeg(s1, pkt->data, size);
        break;
    case AV_CODEC_ID_BITPACKED:
    case AV_CODEC_ID_RAWVIDEO: {
        int interlaced = st->codecpar->field_order != AV_FIELD_PROGRESSIVE;

        ff_rtp_send_raw_rfc4175(s1, pkt->data, size, interlaced, 0);
        if (interlaced)
            ff_rtp_send_raw_rfc4175(s1, pkt->data, size, interlaced, 1);
        break;
        }
    case AV_CODEC_ID_OPUS:
        if (size > s->max_payload_size[0]) {
            av_log(s1, AV_LOG_ERROR,
                   "Packet size %d too large for max RTP payload size %d\n",
                   size, s->max_payload_size[0]);
            return AVERROR(EINVAL);
        }
        /* Intentional fallthrough */
    case AV_CODEC_ID_ADPCM_MS:
        //jttrtp_send_raw(s1, pkt->data, size);
        return jttrtp_send_samples(s1, pkt->data, size, 8 * st->codecpar->ch_layout.nb_channels);
        break;
    default:
        /* better than nothing : send the codec raw data */
        //(s1, pkt->data, size);
        rtp_send_raw_jtt1078(s1, pkt->data, size, side_data[4]);
        break;
    }
    return 0;
}

static int rtp_jtt1078_write_trailer(AVFormatContext *s1)
{
    RTPJTTMuxContext *s = s1->priv_data;

    /* If the caller closes and recreates ->pb, this might actually
     * be NULL here even if it was successfully allocated at the start. */
    if (s1->pb && (s->flags & FF_RTP_FLAG_SEND_BYE))
        rtcp_send_sr(s1, ff_ntp_time(), 1);
    av_freep(&s->buf);

    return 0;
}

const FFOutputFormat ff_rtp_jtt1078_muxer = {
    .p.name            = "rtp_jtt1078",
    .p.long_name       = NULL_IF_CONFIG_SMALL("jtt1078-2016,RTP output, Copyright(Daqian.Peng)"),
    .priv_data_size    = sizeof(RTPJTTMuxContext),
    .p.audio_codec     = AV_CODEC_ID_PCM_MULAW,
    .p.video_codec     = AV_CODEC_ID_MPEG4,
    .write_header      = rtp_jtt1078_write_header,
    .write_packet      = rtp_jtt1078_write_packet,
    .write_trailer     = rtp_jtt1078_write_trailer,
    .p.priv_class      = &rtp_jtt1078_muxer_class,
    .p.flags           = AVFMT_TS_NONSTRICT,
};
