id
int32
0
27.3k
func
stringlengths
26
142k
target
bool
2 classes
project
stringclasses
2 values
commit_id
stringlengths
40
40
func_clean
stringlengths
26
131k
vul_lines
dict
normalized_func
stringlengths
24
132k
0
static av_cold int vdadec_init(AVCodecContext *avctx) { VDADecoderContext *ctx = avctx->priv_data; struct vda_context *vda_ctx = &ctx->vda_ctx; OSStatus status; int ret; ctx->h264_initialized = 0; /* init pix_fmts of codec */ if (!ff_h264_vda_decoder.pix_fmts) { if (kCFCoreFoundationVersionNumber < kCFCoreFoundationVersionNumber10_7) ff_h264_vda_decoder.pix_fmts = vda_pixfmts_prior_10_7; else ff_h264_vda_decoder.pix_fmts = vda_pixfmts; } /* init vda */ memset(vda_ctx, 0, sizeof(struct vda_context)); vda_ctx->width = avctx->width; vda_ctx->height = avctx->height; vda_ctx->format = 'avc1'; vda_ctx->use_sync_decoding = 1; vda_ctx->use_ref_buffer = 1; ctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts); switch (ctx->pix_fmt) { case AV_PIX_FMT_UYVY422: vda_ctx->cv_pix_fmt_type = '2vuy'; break; case AV_PIX_FMT_YUYV422: vda_ctx->cv_pix_fmt_type = 'yuvs'; break; case AV_PIX_FMT_NV12: vda_ctx->cv_pix_fmt_type = '420v'; break; case AV_PIX_FMT_YUV420P: vda_ctx->cv_pix_fmt_type = 'y420'; break; default: av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format: %d\n", avctx->pix_fmt); goto failed; } status = ff_vda_create_decoder(vda_ctx, avctx->extradata, avctx->extradata_size); if (status != kVDADecoderNoErr) { av_log(avctx, AV_LOG_ERROR, "Failed to init VDA decoder: %d.\n", status); goto failed; } avctx->hwaccel_context = vda_ctx; /* changes callback functions */ avctx->get_format = get_format; avctx->get_buffer2 = get_buffer2; #if FF_API_GET_BUFFER // force the old get_buffer to be empty avctx->get_buffer = NULL; #endif /* init H.264 decoder */ ret = ff_h264_decoder.init(avctx); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "Failed to open H.264 decoder.\n"); goto failed; } ctx->h264_initialized = 1; return 0; failed: vdadec_close(avctx); return -1; }
false
FFmpeg
973b1a6b9070e2bf17d17568cbaf4043ce931f51
static av_cold int vdadec_init(AVCodecContext *avctx) { VDADecoderContext *ctx = avctx->priv_data; struct vda_context *vda_ctx = &ctx->vda_ctx; OSStatus status; int ret; ctx->h264_initialized = 0; if (!ff_h264_vda_decoder.pix_fmts) { if (kCFCoreFoundationVersionNumber < kCFCoreFoundationVersionNumber10_7) ff_h264_vda_decoder.pix_fmts = vda_pixfmts_prior_10_7; else ff_h264_vda_decoder.pix_fmts = vda_pixfmts; } memset(vda_ctx, 0, sizeof(struct vda_context)); vda_ctx->width = avctx->width; vda_ctx->height = avctx->height; vda_ctx->format = 'avc1'; vda_ctx->use_sync_decoding = 1; vda_ctx->use_ref_buffer = 1; ctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts); switch (ctx->pix_fmt) { case AV_PIX_FMT_UYVY422: vda_ctx->cv_pix_fmt_type = '2vuy'; break; case AV_PIX_FMT_YUYV422: vda_ctx->cv_pix_fmt_type = 'yuvs'; break; case AV_PIX_FMT_NV12: vda_ctx->cv_pix_fmt_type = '420v'; break; case AV_PIX_FMT_YUV420P: vda_ctx->cv_pix_fmt_type = 'y420'; break; default: av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format: %d\n", avctx->pix_fmt); goto failed; } status = ff_vda_create_decoder(vda_ctx, avctx->extradata, avctx->extradata_size); if (status != kVDADecoderNoErr) { av_log(avctx, AV_LOG_ERROR, "Failed to init VDA decoder: %d.\n", status); goto failed; } avctx->hwaccel_context = vda_ctx; avctx->get_format = get_format; avctx->get_buffer2 = get_buffer2; #if FF_API_GET_BUFFER avctx->get_buffer = NULL; #endif ret = ff_h264_decoder.init(avctx); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "Failed to open H.264 decoder.\n"); goto failed; } ctx->h264_initialized = 1; return 0; failed: vdadec_close(avctx); return -1; }
{ "code": [], "line_no": [] }
static av_cold int FUNC_0(AVCodecContext *avctx) { VDADecoderContext *ctx = avctx->priv_data; struct vda_context *VAR_0 = &ctx->VAR_0; OSStatus status; int VAR_1; ctx->h264_initialized = 0; if (!ff_h264_vda_decoder.pix_fmts) { if (kCFCoreFoundationVersionNumber < kCFCoreFoundationVersionNumber10_7) ff_h264_vda_decoder.pix_fmts = vda_pixfmts_prior_10_7; else ff_h264_vda_decoder.pix_fmts = vda_pixfmts; } memset(VAR_0, 0, sizeof(struct vda_context)); VAR_0->width = avctx->width; VAR_0->height = avctx->height; VAR_0->format = 'avc1'; VAR_0->use_sync_decoding = 1; VAR_0->use_ref_buffer = 1; ctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts); switch (ctx->pix_fmt) { case AV_PIX_FMT_UYVY422: VAR_0->cv_pix_fmt_type = '2vuy'; break; case AV_PIX_FMT_YUYV422: VAR_0->cv_pix_fmt_type = 'yuvs'; break; case AV_PIX_FMT_NV12: VAR_0->cv_pix_fmt_type = '420v'; break; case AV_PIX_FMT_YUV420P: VAR_0->cv_pix_fmt_type = 'y420'; break; default: av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format: %d\n", avctx->pix_fmt); goto failed; } status = ff_vda_create_decoder(VAR_0, avctx->extradata, avctx->extradata_size); if (status != kVDADecoderNoErr) { av_log(avctx, AV_LOG_ERROR, "Failed to init VDA decoder: %d.\n", status); goto failed; } avctx->hwaccel_context = VAR_0; avctx->get_format = get_format; avctx->get_buffer2 = get_buffer2; #if FF_API_GET_BUFFER avctx->get_buffer = NULL; #endif VAR_1 = ff_h264_decoder.init(avctx); if (VAR_1 < 0) { av_log(avctx, AV_LOG_ERROR, "Failed to open H.264 decoder.\n"); goto failed; } ctx->h264_initialized = 1; return 0; failed: vdadec_close(avctx); return -1; }
1
static int transcode(AVFormatContext **output_files, int nb_output_files, InputFile *input_files, int nb_input_files, StreamMap *stream_maps, int nb_stream_maps) { int ret = 0, i, j, k, n, nb_ostreams = 0, step; AVFormatContext *is, *os; AVCodecContext *codec, *icodec; OutputStream *ost, **ost_table = NULL; InputStream *ist; char error[1024]; int key; int want_sdp = 1; uint8_t no_packet[MAX_FILES]={0}; int no_packet_count=0; int nb_frame_threshold[AVMEDIA_TYPE_NB]={0}; int nb_streams[AVMEDIA_TYPE_NB]={0}; if (rate_emu) for (i = 0; i < nb_input_streams; i++) input_streams[i].start = av_gettime(); /* output stream init */ nb_ostreams = 0; for(i=0;i<nb_output_files;i++) { os = output_files[i]; if (!os->nb_streams && !(os->oformat->flags & AVFMT_NOSTREAMS)) { av_dump_format(output_files[i], i, output_files[i]->filename, 1); fprintf(stderr, "Output file #%d does not contain any stream\n", i); ret = AVERROR(EINVAL); goto fail; } nb_ostreams += os->nb_streams; } if (nb_stream_maps > 0 && nb_stream_maps != nb_ostreams) { fprintf(stderr, "Number of stream maps must match number of output streams\n"); ret = AVERROR(EINVAL); goto fail; } /* Sanity check the mapping args -- do the input files & streams exist? */ for(i=0;i<nb_stream_maps;i++) { int fi = stream_maps[i].file_index; int si = stream_maps[i].stream_index; if (fi < 0 || fi > nb_input_files - 1 || si < 0 || si > input_files[fi].ctx->nb_streams - 1) { fprintf(stderr,"Could not find input stream #%d.%d\n", fi, si); ret = AVERROR(EINVAL); goto fail; } fi = stream_maps[i].sync_file_index; si = stream_maps[i].sync_stream_index; if (fi < 0 || fi > nb_input_files - 1 || si < 0 || si > input_files[fi].ctx->nb_streams - 1) { fprintf(stderr,"Could not find sync stream #%d.%d\n", fi, si); ret = AVERROR(EINVAL); goto fail; } } ost_table = av_mallocz(sizeof(OutputStream *) * nb_ostreams); if (!ost_table) goto fail; for(k=0;k<nb_output_files;k++) { os = output_files[k]; for(i=0;i<os->nb_streams;i++,n++) { nb_streams[os->streams[i]->codec->codec_type]++; } } for(step=1<<30; step; step>>=1){ int found_streams[AVMEDIA_TYPE_NB]={0}; for(j=0; j<AVMEDIA_TYPE_NB; j++) nb_frame_threshold[j] += step; for(j=0; j<nb_input_streams; j++) { int skip=0; ist = &input_streams[j]; if(opt_programid){ int pi,si; AVFormatContext *f= input_files[ ist->file_index ].ctx; skip=1; for(pi=0; pi<f->nb_programs; pi++){ AVProgram *p= f->programs[pi]; if(p->id == opt_programid) for(si=0; si<p->nb_stream_indexes; si++){ if(f->streams[ p->stream_index[si] ] == ist->st) skip=0; } } } if (ist->discard && ist->st->discard != AVDISCARD_ALL && !skip && nb_frame_threshold[ist->st->codec->codec_type] <= ist->st->codec_info_nb_frames){ found_streams[ist->st->codec->codec_type]++; } } for(j=0; j<AVMEDIA_TYPE_NB; j++) if(found_streams[j] < nb_streams[j]) nb_frame_threshold[j] -= step; } n = 0; for(k=0;k<nb_output_files;k++) { os = output_files[k]; for(i=0;i<os->nb_streams;i++,n++) { int found; ost = ost_table[n] = output_streams_for_file[k][i]; if (nb_stream_maps > 0) { ost->source_index = input_files[stream_maps[n].file_index].ist_index + stream_maps[n].stream_index; /* Sanity check that the stream types match */ if (input_streams[ost->source_index].st->codec->codec_type != ost->st->codec->codec_type) { int i= ost->file_index; av_dump_format(output_files[i], i, output_files[i]->filename, 1); fprintf(stderr, "Codec type mismatch for mapping #%d.%d -> #%d.%d\n", stream_maps[n].file_index, stream_maps[n].stream_index, ost->file_index, ost->index); ffmpeg_exit(1); } } else { /* get corresponding input stream index : we select the first one with the right type */ found = 0; for (j = 0; j < nb_input_streams; j++) { int skip=0; ist = &input_streams[j]; if(opt_programid){ int pi,si; AVFormatContext *f = input_files[ist->file_index].ctx; skip=1; for(pi=0; pi<f->nb_programs; pi++){ AVProgram *p= f->programs[pi]; if(p->id == opt_programid) for(si=0; si<p->nb_stream_indexes; si++){ if(f->streams[ p->stream_index[si] ] == ist->st) skip=0; } } } if (ist->discard && ist->st->discard != AVDISCARD_ALL && !skip && ist->st->codec->codec_type == ost->st->codec->codec_type && nb_frame_threshold[ist->st->codec->codec_type] <= ist->st->codec_info_nb_frames) { ost->source_index = j; found = 1; break; } } if (!found) { if(! opt_programid) { /* try again and reuse existing stream */ for (j = 0; j < nb_input_streams; j++) { ist = &input_streams[j]; if ( ist->st->codec->codec_type == ost->st->codec->codec_type && ist->st->discard != AVDISCARD_ALL) { ost->source_index = j; found = 1; } } } if (!found) { int i= ost->file_index; av_dump_format(output_files[i], i, output_files[i]->filename, 1); fprintf(stderr, "Could not find input stream matching output stream #%d.%d\n", ost->file_index, ost->index); ffmpeg_exit(1); } } } ist = &input_streams[ost->source_index]; ist->discard = 0; ost->sync_ist = (nb_stream_maps > 0) ? &input_streams[input_files[stream_maps[n].sync_file_index].ist_index + stream_maps[n].sync_stream_index] : ist; } } /* for each output stream, we compute the right encoding parameters */ for(i=0;i<nb_ostreams;i++) { ost = ost_table[i]; os = output_files[ost->file_index]; ist = &input_streams[ost->source_index]; codec = ost->st->codec; icodec = ist->st->codec; if (metadata_streams_autocopy) av_dict_copy(&ost->st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE); ost->st->disposition = ist->st->disposition; codec->bits_per_raw_sample= icodec->bits_per_raw_sample; codec->chroma_sample_location = icodec->chroma_sample_location; if (ost->st->stream_copy) { uint64_t extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE; if (extra_size > INT_MAX) goto fail; /* if stream_copy is selected, no need to decode or encode */ codec->codec_id = icodec->codec_id; codec->codec_type = icodec->codec_type; if(!codec->codec_tag){ if( !os->oformat->codec_tag || av_codec_get_id (os->oformat->codec_tag, icodec->codec_tag) == codec->codec_id || av_codec_get_tag(os->oformat->codec_tag, icodec->codec_id) <= 0) codec->codec_tag = icodec->codec_tag; } codec->bit_rate = icodec->bit_rate; codec->rc_max_rate = icodec->rc_max_rate; codec->rc_buffer_size = icodec->rc_buffer_size; codec->extradata= av_mallocz(extra_size); if (!codec->extradata) goto fail; memcpy(codec->extradata, icodec->extradata, icodec->extradata_size); codec->extradata_size= icodec->extradata_size; if(!copy_tb && av_q2d(icodec->time_base)*icodec->ticks_per_frame > av_q2d(ist->st->time_base) && av_q2d(ist->st->time_base) < 1.0/500){ codec->time_base = icodec->time_base; codec->time_base.num *= icodec->ticks_per_frame; av_reduce(&codec->time_base.num, &codec->time_base.den, codec->time_base.num, codec->time_base.den, INT_MAX); }else codec->time_base = ist->st->time_base; switch(codec->codec_type) { case AVMEDIA_TYPE_AUDIO: if(audio_volume != 256) { fprintf(stderr,"-acodec copy and -vol are incompatible (frames are not decoded)\n"); ffmpeg_exit(1); } codec->channel_layout = icodec->channel_layout; codec->sample_rate = icodec->sample_rate; codec->channels = icodec->channels; codec->frame_size = icodec->frame_size; codec->audio_service_type = icodec->audio_service_type; codec->block_align= icodec->block_align; if(codec->block_align == 1 && codec->codec_id == CODEC_ID_MP3) codec->block_align= 0; if(codec->codec_id == CODEC_ID_AC3) codec->block_align= 0; break; case AVMEDIA_TYPE_VIDEO: codec->pix_fmt = icodec->pix_fmt; codec->width = icodec->width; codec->height = icodec->height; codec->has_b_frames = icodec->has_b_frames; if (!codec->sample_aspect_ratio.num) { codec->sample_aspect_ratio = ost->st->sample_aspect_ratio = ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio : ist->st->codec->sample_aspect_ratio.num ? ist->st->codec->sample_aspect_ratio : (AVRational){0, 1}; } break; case AVMEDIA_TYPE_SUBTITLE: codec->width = icodec->width; codec->height = icodec->height; break; case AVMEDIA_TYPE_DATA: break; default: abort(); } } else { if (!ost->enc) ost->enc = avcodec_find_encoder(ost->st->codec->codec_id); switch(codec->codec_type) { case AVMEDIA_TYPE_AUDIO: ost->fifo= av_fifo_alloc(1024); if(!ost->fifo) goto fail; ost->reformat_pair = MAKE_SFMT_PAIR(AV_SAMPLE_FMT_NONE,AV_SAMPLE_FMT_NONE); if (!codec->sample_rate) { codec->sample_rate = icodec->sample_rate; if (icodec->lowres) codec->sample_rate >>= icodec->lowres; } choose_sample_rate(ost->st, ost->enc); codec->time_base = (AVRational){1, codec->sample_rate}; if (codec->sample_fmt == AV_SAMPLE_FMT_NONE) codec->sample_fmt = icodec->sample_fmt; choose_sample_fmt(ost->st, ost->enc); if (!codec->channels) { codec->channels = icodec->channels; codec->channel_layout = icodec->channel_layout; } if (av_get_channel_layout_nb_channels(codec->channel_layout) != codec->channels) codec->channel_layout = 0; ost->audio_resample = codec->sample_rate != icodec->sample_rate || audio_sync_method > 1; icodec->request_channels = codec->channels; ist->decoding_needed = 1; ost->encoding_needed = 1; ost->resample_sample_fmt = icodec->sample_fmt; ost->resample_sample_rate = icodec->sample_rate; ost->resample_channels = icodec->channels; break; case AVMEDIA_TYPE_VIDEO: if (codec->pix_fmt == PIX_FMT_NONE) codec->pix_fmt = icodec->pix_fmt; choose_pixel_fmt(ost->st, ost->enc); if (ost->st->codec->pix_fmt == PIX_FMT_NONE) { fprintf(stderr, "Video pixel format is unknown, stream cannot be encoded\n"); ffmpeg_exit(1); } ost->video_resample = codec->width != icodec->width || codec->height != icodec->height || codec->pix_fmt != icodec->pix_fmt; if (ost->video_resample) { codec->bits_per_raw_sample= frame_bits_per_raw_sample; } if (!codec->width || !codec->height) { codec->width = icodec->width; codec->height = icodec->height; } ost->resample_height = icodec->height; ost->resample_width = icodec->width; ost->resample_pix_fmt= icodec->pix_fmt; ost->encoding_needed = 1; ist->decoding_needed = 1; if (!ost->frame_rate.num) ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25,1}; if (ost->enc && ost->enc->supported_framerates && !force_fps) { int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates); ost->frame_rate = ost->enc->supported_framerates[idx]; } codec->time_base = (AVRational){ost->frame_rate.den, ost->frame_rate.num}; if( av_q2d(codec->time_base) < 0.001 && video_sync_method && (video_sync_method==1 || (video_sync_method<0 && !(os->oformat->flags & AVFMT_VARIABLE_FPS)))){ av_log(os, AV_LOG_WARNING, "Frame rate very high for a muxer not effciciently supporting it.\n" "Please consider specifiying a lower framerate, a different muxer or -vsync 2\n"); } #if CONFIG_AVFILTER if (configure_video_filters(ist, ost)) { fprintf(stderr, "Error opening filters!\n"); exit(1); } #endif break; case AVMEDIA_TYPE_SUBTITLE: ost->encoding_needed = 1; ist->decoding_needed = 1; break; default: abort(); break; } /* two pass mode */ if (ost->encoding_needed && codec->codec_id != CODEC_ID_H264 && (codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) { char logfilename[1024]; FILE *f; snprintf(logfilename, sizeof(logfilename), "%s-%d.log", pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX, i); if (codec->flags & CODEC_FLAG_PASS1) { f = fopen(logfilename, "wb"); if (!f) { fprintf(stderr, "Cannot write log file '%s' for pass-1 encoding: %s\n", logfilename, strerror(errno)); ffmpeg_exit(1); } ost->logfile = f; } else { char *logbuffer; size_t logbuffer_size; if (read_file(logfilename, &logbuffer, &logbuffer_size) < 0) { fprintf(stderr, "Error reading log file '%s' for pass-2 encoding\n", logfilename); ffmpeg_exit(1); } codec->stats_in = logbuffer; } } } if(codec->codec_type == AVMEDIA_TYPE_VIDEO){ /* maximum video buffer size is 6-bytes per pixel, plus DPX header size */ int size= codec->width * codec->height; bit_buffer_size= FFMAX(bit_buffer_size, 6*size + 1664); } } if (!bit_buffer) bit_buffer = av_malloc(bit_buffer_size); if (!bit_buffer) { fprintf(stderr, "Cannot allocate %d bytes output buffer\n", bit_buffer_size); ret = AVERROR(ENOMEM); goto fail; } /* open each encoder */ for(i=0;i<nb_ostreams;i++) { ost = ost_table[i]; if (ost->encoding_needed) { AVCodec *codec = ost->enc; AVCodecContext *dec = input_streams[ost->source_index].st->codec; if (!codec) { snprintf(error, sizeof(error), "Encoder (codec id %d) not found for output stream #%d.%d", ost->st->codec->codec_id, ost->file_index, ost->index); ret = AVERROR(EINVAL); goto dump_format; } if (dec->subtitle_header) { ost->st->codec->subtitle_header = av_malloc(dec->subtitle_header_size); if (!ost->st->codec->subtitle_header) { ret = AVERROR(ENOMEM); goto dump_format; } memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size); ost->st->codec->subtitle_header_size = dec->subtitle_header_size; } if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) { snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d.%d - maybe incorrect parameters such as bit_rate, rate, width or height", ost->file_index, ost->index); ret = AVERROR(EINVAL); goto dump_format; } assert_codec_experimental(ost->st->codec, 1); assert_avoptions(ost->opts); if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000) av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low." "It takes bits/s as argument, not kbits/s\n"); extra_size += ost->st->codec->extradata_size; } } /* open each decoder */ for (i = 0; i < nb_input_streams; i++) { ist = &input_streams[i]; if (ist->decoding_needed) { AVCodec *codec = ist->dec; if (!codec) codec = avcodec_find_decoder(ist->st->codec->codec_id); if (!codec) { snprintf(error, sizeof(error), "Decoder (codec id %d) not found for input stream #%d.%d", ist->st->codec->codec_id, ist->file_index, ist->st->index); ret = AVERROR(EINVAL); goto dump_format; } if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) { snprintf(error, sizeof(error), "Error while opening decoder for input stream #%d.%d", ist->file_index, ist->st->index); ret = AVERROR(EINVAL); goto dump_format; } assert_codec_experimental(ist->st->codec, 0); assert_avoptions(ost->opts); //if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) // ist->st->codec->flags |= CODEC_FLAG_REPEAT_FIELD; } } /* init pts */ for (i = 0; i < nb_input_streams; i++) { AVStream *st; ist = &input_streams[i]; st= ist->st; ist->pts = st->avg_frame_rate.num ? - st->codec->has_b_frames*AV_TIME_BASE / av_q2d(st->avg_frame_rate) : 0; ist->next_pts = AV_NOPTS_VALUE; ist->is_start = 1; } /* set meta data information from input file if required */ for (i=0;i<nb_meta_data_maps;i++) { AVFormatContext *files[2]; AVDictionary **meta[2]; int j; #define METADATA_CHECK_INDEX(index, nb_elems, desc)\ if ((index) < 0 || (index) >= (nb_elems)) {\ snprintf(error, sizeof(error), "Invalid %s index %d while processing metadata maps\n",\ (desc), (index));\ ret = AVERROR(EINVAL);\ goto dump_format;\ } int out_file_index = meta_data_maps[i][0].file; int in_file_index = meta_data_maps[i][1].file; if (in_file_index < 0 || out_file_index < 0) continue; METADATA_CHECK_INDEX(out_file_index, nb_output_files, "output file") METADATA_CHECK_INDEX(in_file_index, nb_input_files, "input file") files[0] = output_files[out_file_index]; files[1] = input_files[in_file_index].ctx; for (j = 0; j < 2; j++) { MetadataMap *map = &meta_data_maps[i][j]; switch (map->type) { case 'g': meta[j] = &files[j]->metadata; break; case 's': METADATA_CHECK_INDEX(map->index, files[j]->nb_streams, "stream") meta[j] = &files[j]->streams[map->index]->metadata; break; case 'c': METADATA_CHECK_INDEX(map->index, files[j]->nb_chapters, "chapter") meta[j] = &files[j]->chapters[map->index]->metadata; break; case 'p': METADATA_CHECK_INDEX(map->index, files[j]->nb_programs, "program") meta[j] = &files[j]->programs[map->index]->metadata; break; } } av_dict_copy(meta[0], *meta[1], AV_DICT_DONT_OVERWRITE); } /* copy global metadata by default */ if (metadata_global_autocopy) { for (i = 0; i < nb_output_files; i++) av_dict_copy(&output_files[i]->metadata, input_files[0].ctx->metadata, AV_DICT_DONT_OVERWRITE); } /* copy chapters according to chapter maps */ for (i = 0; i < nb_chapter_maps; i++) { int infile = chapter_maps[i].in_file; int outfile = chapter_maps[i].out_file; if (infile < 0 || outfile < 0) continue; if (infile >= nb_input_files) { snprintf(error, sizeof(error), "Invalid input file index %d in chapter mapping.\n", infile); ret = AVERROR(EINVAL); goto dump_format; } if (outfile >= nb_output_files) { snprintf(error, sizeof(error), "Invalid output file index %d in chapter mapping.\n",outfile); ret = AVERROR(EINVAL); goto dump_format; } copy_chapters(infile, outfile); } /* copy chapters from the first input file that has them*/ if (!nb_chapter_maps) for (i = 0; i < nb_input_files; i++) { if (!input_files[i].ctx->nb_chapters) continue; for (j = 0; j < nb_output_files; j++) if ((ret = copy_chapters(i, j)) < 0) goto dump_format; break; } /* open files and write file headers */ for(i=0;i<nb_output_files;i++) { os = output_files[i]; if (avformat_write_header(os, &output_opts[i]) < 0) { snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?)", i); ret = AVERROR(EINVAL); goto dump_format; } assert_avoptions(output_opts[i]); if (strcmp(output_files[i]->oformat->name, "rtp")) { want_sdp = 0; } } dump_format: /* dump the file output parameters - cannot be done before in case of stream copy */ for(i=0;i<nb_output_files;i++) { av_dump_format(output_files[i], i, output_files[i]->filename, 1); } /* dump the stream mapping */ if (verbose >= 0) { fprintf(stderr, "Stream mapping:\n"); for(i=0;i<nb_ostreams;i++) { ost = ost_table[i]; fprintf(stderr, " Stream #%d.%d -> #%d.%d", input_streams[ost->source_index].file_index, input_streams[ost->source_index].st->index, ost->file_index, ost->index); if (ost->sync_ist != &input_streams[ost->source_index]) fprintf(stderr, " [sync #%d.%d]", ost->sync_ist->file_index, ost->sync_ist->st->index); fprintf(stderr, "\n"); } } if (ret) { fprintf(stderr, "%s\n", error); goto fail; } if (want_sdp) { print_sdp(output_files, nb_output_files); } if (!using_stdin) { if(verbose >= 0) fprintf(stderr, "Press [q] to stop, [?] for help\n"); avio_set_interrupt_cb(decode_interrupt_cb); } term_init(); timer_start = av_gettime(); for(; received_sigterm == 0;) { int file_index, ist_index; AVPacket pkt; double ipts_min; double opts_min; redo: ipts_min= 1e100; opts_min= 1e100; /* if 'q' pressed, exits */ if (!using_stdin) { if (q_pressed) break; /* read_key() returns 0 on EOF */ key = read_key(); if (key == 'q') break; if (key == '+') verbose++; if (key == '-') verbose--; if (key == 's') qp_hist ^= 1; if (key == 'h'){ if (do_hex_dump){ do_hex_dump = do_pkt_dump = 0; } else if(do_pkt_dump){ do_hex_dump = 1; } else do_pkt_dump = 1; av_log_set_level(AV_LOG_DEBUG); } if (key == 'd' || key == 'D'){ int debug=0; if(key == 'D') { debug = input_streams[0].st->codec->debug<<1; if(!debug) debug = 1; while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash debug += debug; }else scanf("%d", &debug); for(i=0;i<nb_input_streams;i++) { input_streams[i].st->codec->debug = debug; } for(i=0;i<nb_ostreams;i++) { ost = ost_table[i]; ost->st->codec->debug = debug; } if(debug) av_log_set_level(AV_LOG_DEBUG); fprintf(stderr,"debug=%d\n", debug); } if (key == '?'){ fprintf(stderr, "key function\n" "? show this help\n" "+ increase verbosity\n" "- decrease verbosity\n" "D cycle through available debug modes\n" "h dump packets/hex press to cycle through the 3 states\n" "q quit\n" "s Show QP histogram\n" ); } } /* select the stream that we must read now by looking at the smallest output pts */ file_index = -1; for(i=0;i<nb_ostreams;i++) { double ipts, opts; ost = ost_table[i]; os = output_files[ost->file_index]; ist = &input_streams[ost->source_index]; if(ist->is_past_recording_time || no_packet[ist->file_index]) continue; opts = ost->st->pts.val * av_q2d(ost->st->time_base); ipts = (double)ist->pts; if (!input_files[ist->file_index].eof_reached){ if(ipts < ipts_min) { ipts_min = ipts; if(input_sync ) file_index = ist->file_index; } if(opts < opts_min) { opts_min = opts; if(!input_sync) file_index = ist->file_index; } } if(ost->frame_number >= max_frames[ost->st->codec->codec_type]){ file_index= -1; break; } } /* if none, if is finished */ if (file_index < 0) { if(no_packet_count){ no_packet_count=0; memset(no_packet, 0, sizeof(no_packet)); usleep(10000); continue; } break; } /* finish if limit size exhausted */ if (limit_filesize != 0 && limit_filesize <= avio_tell(output_files[0]->pb)) break; /* read a frame from it and output it in the fifo */ is = input_files[file_index].ctx; ret= av_read_frame(is, &pkt); if(ret == AVERROR(EAGAIN)){ no_packet[file_index]=1; no_packet_count++; continue; } if (ret < 0) { input_files[file_index].eof_reached = 1; if (opt_shortest) break; else continue; } no_packet_count=0; memset(no_packet, 0, sizeof(no_packet)); if (do_pkt_dump) { av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump, is->streams[pkt.stream_index]); } /* the following test is needed in case new streams appear dynamically in stream : we ignore them */ if (pkt.stream_index >= input_files[file_index].ctx->nb_streams) goto discard_packet; ist_index = input_files[file_index].ist_index + pkt.stream_index; ist = &input_streams[ist_index]; if (ist->discard) goto discard_packet; if (pkt.dts != AV_NOPTS_VALUE) pkt.dts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base); if (pkt.pts != AV_NOPTS_VALUE) pkt.pts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base); if (ist->ts_scale) { if(pkt.pts != AV_NOPTS_VALUE) pkt.pts *= ist->ts_scale; if(pkt.dts != AV_NOPTS_VALUE) pkt.dts *= ist->ts_scale; } // fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n", ist->next_pts, pkt.dts, input_files[ist->file_index].ts_offset, ist->st->codec->codec_type); if (pkt.dts != AV_NOPTS_VALUE && ist->next_pts != AV_NOPTS_VALUE && (is->iformat->flags & AVFMT_TS_DISCONT)) { int64_t pkt_dts= av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q); int64_t delta= pkt_dts - ist->next_pts; if((FFABS(delta) > 1LL*dts_delta_threshold*AV_TIME_BASE || pkt_dts+1<ist->pts)&& !copy_ts){ input_files[ist->file_index].ts_offset -= delta; if (verbose > 2) fprintf(stderr, "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n", delta, input_files[ist->file_index].ts_offset); pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); if(pkt.pts != AV_NOPTS_VALUE) pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); } } /* finish if recording time exhausted */ if (recording_time != INT64_MAX && (pkt.pts != AV_NOPTS_VALUE ? av_compare_ts(pkt.pts, ist->st->time_base, recording_time + start_time, (AVRational){1, 1000000}) : av_compare_ts(ist->pts, AV_TIME_BASE_Q, recording_time + start_time, (AVRational){1, 1000000}) )>= 0) { ist->is_past_recording_time = 1; goto discard_packet; } //fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size); if (output_packet(ist, ist_index, ost_table, nb_ostreams, &pkt) < 0) { if (verbose >= 0) fprintf(stderr, "Error while decoding stream #%d.%d\n", ist->file_index, ist->st->index); if (exit_on_error) ffmpeg_exit(1); av_free_packet(&pkt); goto redo; } discard_packet: av_free_packet(&pkt); /* dump report by using the output first video and audio streams */ print_report(output_files, ost_table, nb_ostreams, 0); } /* at the end of stream, we must flush the decoder buffers */ for (i = 0; i < nb_input_streams; i++) { ist = &input_streams[i]; if (ist->decoding_needed) { output_packet(ist, i, ost_table, nb_ostreams, NULL); } } term_exit(); /* write the trailer if needed and close file */ for(i=0;i<nb_output_files;i++) { os = output_files[i]; av_write_trailer(os); } /* dump report by using the first video and audio streams */ print_report(output_files, ost_table, nb_ostreams, 1); /* close each encoder */ for(i=0;i<nb_ostreams;i++) { ost = ost_table[i]; if (ost->encoding_needed) { av_freep(&ost->st->codec->stats_in); avcodec_close(ost->st->codec); } #if CONFIG_AVFILTER avfilter_graph_free(&ost->graph); #endif } /* close each decoder */ for (i = 0; i < nb_input_streams; i++) { ist = &input_streams[i]; if (ist->decoding_needed) { avcodec_close(ist->st->codec); } } /* finished ! */ ret = 0; fail: av_freep(&bit_buffer); if (ost_table) { for(i=0;i<nb_ostreams;i++) { ost = ost_table[i]; if (ost) { if (ost->st->stream_copy) av_freep(&ost->st->codec->extradata); if (ost->logfile) { fclose(ost->logfile); ost->logfile = NULL; } av_fifo_free(ost->fifo); /* works even if fifo is not initialized but set to zero */ av_freep(&ost->st->codec->subtitle_header); av_free(ost->resample_frame.data[0]); av_free(ost->forced_kf_pts); if (ost->video_resample) sws_freeContext(ost->img_resample_ctx); if (ost->resample) audio_resample_close(ost->resample); if (ost->reformat_ctx) av_audio_convert_free(ost->reformat_ctx); av_dict_free(&ost->opts); av_free(ost); } } av_free(ost_table); } return ret; }
false
FFmpeg
321b2a9ded0468670b7678b7c098886930ae16b2
static int transcode(AVFormatContext **output_files, int nb_output_files, InputFile *input_files, int nb_input_files, StreamMap *stream_maps, int nb_stream_maps) { int ret = 0, i, j, k, n, nb_ostreams = 0, step; AVFormatContext *is, *os; AVCodecContext *codec, *icodec; OutputStream *ost, **ost_table = NULL; InputStream *ist; char error[1024]; int key; int want_sdp = 1; uint8_t no_packet[MAX_FILES]={0}; int no_packet_count=0; int nb_frame_threshold[AVMEDIA_TYPE_NB]={0}; int nb_streams[AVMEDIA_TYPE_NB]={0}; if (rate_emu) for (i = 0; i < nb_input_streams; i++) input_streams[i].start = av_gettime(); nb_ostreams = 0; for(i=0;i<nb_output_files;i++) { os = output_files[i]; if (!os->nb_streams && !(os->oformat->flags & AVFMT_NOSTREAMS)) { av_dump_format(output_files[i], i, output_files[i]->filename, 1); fprintf(stderr, "Output file #%d does not contain any stream\n", i); ret = AVERROR(EINVAL); goto fail; } nb_ostreams += os->nb_streams; } if (nb_stream_maps > 0 && nb_stream_maps != nb_ostreams) { fprintf(stderr, "Number of stream maps must match number of output streams\n"); ret = AVERROR(EINVAL); goto fail; } for(i=0;i<nb_stream_maps;i++) { int fi = stream_maps[i].file_index; int si = stream_maps[i].stream_index; if (fi < 0 || fi > nb_input_files - 1 || si < 0 || si > input_files[fi].ctx->nb_streams - 1) { fprintf(stderr,"Could not find input stream #%d.%d\n", fi, si); ret = AVERROR(EINVAL); goto fail; } fi = stream_maps[i].sync_file_index; si = stream_maps[i].sync_stream_index; if (fi < 0 || fi > nb_input_files - 1 || si < 0 || si > input_files[fi].ctx->nb_streams - 1) { fprintf(stderr,"Could not find sync stream #%d.%d\n", fi, si); ret = AVERROR(EINVAL); goto fail; } } ost_table = av_mallocz(sizeof(OutputStream *) * nb_ostreams); if (!ost_table) goto fail; for(k=0;k<nb_output_files;k++) { os = output_files[k]; for(i=0;i<os->nb_streams;i++,n++) { nb_streams[os->streams[i]->codec->codec_type]++; } } for(step=1<<30; step; step>>=1){ int found_streams[AVMEDIA_TYPE_NB]={0}; for(j=0; j<AVMEDIA_TYPE_NB; j++) nb_frame_threshold[j] += step; for(j=0; j<nb_input_streams; j++) { int skip=0; ist = &input_streams[j]; if(opt_programid){ int pi,si; AVFormatContext *f= input_files[ ist->file_index ].ctx; skip=1; for(pi=0; pi<f->nb_programs; pi++){ AVProgram *p= f->programs[pi]; if(p->id == opt_programid) for(si=0; si<p->nb_stream_indexes; si++){ if(f->streams[ p->stream_index[si] ] == ist->st) skip=0; } } } if (ist->discard && ist->st->discard != AVDISCARD_ALL && !skip && nb_frame_threshold[ist->st->codec->codec_type] <= ist->st->codec_info_nb_frames){ found_streams[ist->st->codec->codec_type]++; } } for(j=0; j<AVMEDIA_TYPE_NB; j++) if(found_streams[j] < nb_streams[j]) nb_frame_threshold[j] -= step; } n = 0; for(k=0;k<nb_output_files;k++) { os = output_files[k]; for(i=0;i<os->nb_streams;i++,n++) { int found; ost = ost_table[n] = output_streams_for_file[k][i]; if (nb_stream_maps > 0) { ost->source_index = input_files[stream_maps[n].file_index].ist_index + stream_maps[n].stream_index; if (input_streams[ost->source_index].st->codec->codec_type != ost->st->codec->codec_type) { int i= ost->file_index; av_dump_format(output_files[i], i, output_files[i]->filename, 1); fprintf(stderr, "Codec type mismatch for mapping #%d.%d -> #%d.%d\n", stream_maps[n].file_index, stream_maps[n].stream_index, ost->file_index, ost->index); ffmpeg_exit(1); } } else { found = 0; for (j = 0; j < nb_input_streams; j++) { int skip=0; ist = &input_streams[j]; if(opt_programid){ int pi,si; AVFormatContext *f = input_files[ist->file_index].ctx; skip=1; for(pi=0; pi<f->nb_programs; pi++){ AVProgram *p= f->programs[pi]; if(p->id == opt_programid) for(si=0; si<p->nb_stream_indexes; si++){ if(f->streams[ p->stream_index[si] ] == ist->st) skip=0; } } } if (ist->discard && ist->st->discard != AVDISCARD_ALL && !skip && ist->st->codec->codec_type == ost->st->codec->codec_type && nb_frame_threshold[ist->st->codec->codec_type] <= ist->st->codec_info_nb_frames) { ost->source_index = j; found = 1; break; } } if (!found) { if(! opt_programid) { for (j = 0; j < nb_input_streams; j++) { ist = &input_streams[j]; if ( ist->st->codec->codec_type == ost->st->codec->codec_type && ist->st->discard != AVDISCARD_ALL) { ost->source_index = j; found = 1; } } } if (!found) { int i= ost->file_index; av_dump_format(output_files[i], i, output_files[i]->filename, 1); fprintf(stderr, "Could not find input stream matching output stream #%d.%d\n", ost->file_index, ost->index); ffmpeg_exit(1); } } } ist = &input_streams[ost->source_index]; ist->discard = 0; ost->sync_ist = (nb_stream_maps > 0) ? &input_streams[input_files[stream_maps[n].sync_file_index].ist_index + stream_maps[n].sync_stream_index] : ist; } } for(i=0;i<nb_ostreams;i++) { ost = ost_table[i]; os = output_files[ost->file_index]; ist = &input_streams[ost->source_index]; codec = ost->st->codec; icodec = ist->st->codec; if (metadata_streams_autocopy) av_dict_copy(&ost->st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE); ost->st->disposition = ist->st->disposition; codec->bits_per_raw_sample= icodec->bits_per_raw_sample; codec->chroma_sample_location = icodec->chroma_sample_location; if (ost->st->stream_copy) { uint64_t extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE; if (extra_size > INT_MAX) goto fail; codec->codec_id = icodec->codec_id; codec->codec_type = icodec->codec_type; if(!codec->codec_tag){ if( !os->oformat->codec_tag || av_codec_get_id (os->oformat->codec_tag, icodec->codec_tag) == codec->codec_id || av_codec_get_tag(os->oformat->codec_tag, icodec->codec_id) <= 0) codec->codec_tag = icodec->codec_tag; } codec->bit_rate = icodec->bit_rate; codec->rc_max_rate = icodec->rc_max_rate; codec->rc_buffer_size = icodec->rc_buffer_size; codec->extradata= av_mallocz(extra_size); if (!codec->extradata) goto fail; memcpy(codec->extradata, icodec->extradata, icodec->extradata_size); codec->extradata_size= icodec->extradata_size; if(!copy_tb && av_q2d(icodec->time_base)*icodec->ticks_per_frame > av_q2d(ist->st->time_base) && av_q2d(ist->st->time_base) < 1.0/500){ codec->time_base = icodec->time_base; codec->time_base.num *= icodec->ticks_per_frame; av_reduce(&codec->time_base.num, &codec->time_base.den, codec->time_base.num, codec->time_base.den, INT_MAX); }else codec->time_base = ist->st->time_base; switch(codec->codec_type) { case AVMEDIA_TYPE_AUDIO: if(audio_volume != 256) { fprintf(stderr,"-acodec copy and -vol are incompatible (frames are not decoded)\n"); ffmpeg_exit(1); } codec->channel_layout = icodec->channel_layout; codec->sample_rate = icodec->sample_rate; codec->channels = icodec->channels; codec->frame_size = icodec->frame_size; codec->audio_service_type = icodec->audio_service_type; codec->block_align= icodec->block_align; if(codec->block_align == 1 && codec->codec_id == CODEC_ID_MP3) codec->block_align= 0; if(codec->codec_id == CODEC_ID_AC3) codec->block_align= 0; break; case AVMEDIA_TYPE_VIDEO: codec->pix_fmt = icodec->pix_fmt; codec->width = icodec->width; codec->height = icodec->height; codec->has_b_frames = icodec->has_b_frames; if (!codec->sample_aspect_ratio.num) { codec->sample_aspect_ratio = ost->st->sample_aspect_ratio = ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio : ist->st->codec->sample_aspect_ratio.num ? ist->st->codec->sample_aspect_ratio : (AVRational){0, 1}; } break; case AVMEDIA_TYPE_SUBTITLE: codec->width = icodec->width; codec->height = icodec->height; break; case AVMEDIA_TYPE_DATA: break; default: abort(); } } else { if (!ost->enc) ost->enc = avcodec_find_encoder(ost->st->codec->codec_id); switch(codec->codec_type) { case AVMEDIA_TYPE_AUDIO: ost->fifo= av_fifo_alloc(1024); if(!ost->fifo) goto fail; ost->reformat_pair = MAKE_SFMT_PAIR(AV_SAMPLE_FMT_NONE,AV_SAMPLE_FMT_NONE); if (!codec->sample_rate) { codec->sample_rate = icodec->sample_rate; if (icodec->lowres) codec->sample_rate >>= icodec->lowres; } choose_sample_rate(ost->st, ost->enc); codec->time_base = (AVRational){1, codec->sample_rate}; if (codec->sample_fmt == AV_SAMPLE_FMT_NONE) codec->sample_fmt = icodec->sample_fmt; choose_sample_fmt(ost->st, ost->enc); if (!codec->channels) { codec->channels = icodec->channels; codec->channel_layout = icodec->channel_layout; } if (av_get_channel_layout_nb_channels(codec->channel_layout) != codec->channels) codec->channel_layout = 0; ost->audio_resample = codec->sample_rate != icodec->sample_rate || audio_sync_method > 1; icodec->request_channels = codec->channels; ist->decoding_needed = 1; ost->encoding_needed = 1; ost->resample_sample_fmt = icodec->sample_fmt; ost->resample_sample_rate = icodec->sample_rate; ost->resample_channels = icodec->channels; break; case AVMEDIA_TYPE_VIDEO: if (codec->pix_fmt == PIX_FMT_NONE) codec->pix_fmt = icodec->pix_fmt; choose_pixel_fmt(ost->st, ost->enc); if (ost->st->codec->pix_fmt == PIX_FMT_NONE) { fprintf(stderr, "Video pixel format is unknown, stream cannot be encoded\n"); ffmpeg_exit(1); } ost->video_resample = codec->width != icodec->width || codec->height != icodec->height || codec->pix_fmt != icodec->pix_fmt; if (ost->video_resample) { codec->bits_per_raw_sample= frame_bits_per_raw_sample; } if (!codec->width || !codec->height) { codec->width = icodec->width; codec->height = icodec->height; } ost->resample_height = icodec->height; ost->resample_width = icodec->width; ost->resample_pix_fmt= icodec->pix_fmt; ost->encoding_needed = 1; ist->decoding_needed = 1; if (!ost->frame_rate.num) ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25,1}; if (ost->enc && ost->enc->supported_framerates && !force_fps) { int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates); ost->frame_rate = ost->enc->supported_framerates[idx]; } codec->time_base = (AVRational){ost->frame_rate.den, ost->frame_rate.num}; if( av_q2d(codec->time_base) < 0.001 && video_sync_method && (video_sync_method==1 || (video_sync_method<0 && !(os->oformat->flags & AVFMT_VARIABLE_FPS)))){ av_log(os, AV_LOG_WARNING, "Frame rate very high for a muxer not effciciently supporting it.\n" "Please consider specifiying a lower framerate, a different muxer or -vsync 2\n"); } #if CONFIG_AVFILTER if (configure_video_filters(ist, ost)) { fprintf(stderr, "Error opening filters!\n"); exit(1); } #endif break; case AVMEDIA_TYPE_SUBTITLE: ost->encoding_needed = 1; ist->decoding_needed = 1; break; default: abort(); break; } if (ost->encoding_needed && codec->codec_id != CODEC_ID_H264 && (codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) { char logfilename[1024]; FILE *f; snprintf(logfilename, sizeof(logfilename), "%s-%d.log", pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX, i); if (codec->flags & CODEC_FLAG_PASS1) { f = fopen(logfilename, "wb"); if (!f) { fprintf(stderr, "Cannot write log file '%s' for pass-1 encoding: %s\n", logfilename, strerror(errno)); ffmpeg_exit(1); } ost->logfile = f; } else { char *logbuffer; size_t logbuffer_size; if (read_file(logfilename, &logbuffer, &logbuffer_size) < 0) { fprintf(stderr, "Error reading log file '%s' for pass-2 encoding\n", logfilename); ffmpeg_exit(1); } codec->stats_in = logbuffer; } } } if(codec->codec_type == AVMEDIA_TYPE_VIDEO){ int size= codec->width * codec->height; bit_buffer_size= FFMAX(bit_buffer_size, 6*size + 1664); } } if (!bit_buffer) bit_buffer = av_malloc(bit_buffer_size); if (!bit_buffer) { fprintf(stderr, "Cannot allocate %d bytes output buffer\n", bit_buffer_size); ret = AVERROR(ENOMEM); goto fail; } for(i=0;i<nb_ostreams;i++) { ost = ost_table[i]; if (ost->encoding_needed) { AVCodec *codec = ost->enc; AVCodecContext *dec = input_streams[ost->source_index].st->codec; if (!codec) { snprintf(error, sizeof(error), "Encoder (codec id %d) not found for output stream #%d.%d", ost->st->codec->codec_id, ost->file_index, ost->index); ret = AVERROR(EINVAL); goto dump_format; } if (dec->subtitle_header) { ost->st->codec->subtitle_header = av_malloc(dec->subtitle_header_size); if (!ost->st->codec->subtitle_header) { ret = AVERROR(ENOMEM); goto dump_format; } memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size); ost->st->codec->subtitle_header_size = dec->subtitle_header_size; } if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) { snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d.%d - maybe incorrect parameters such as bit_rate, rate, width or height", ost->file_index, ost->index); ret = AVERROR(EINVAL); goto dump_format; } assert_codec_experimental(ost->st->codec, 1); assert_avoptions(ost->opts); if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000) av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low." "It takes bits/s as argument, not kbits/s\n"); extra_size += ost->st->codec->extradata_size; } } for (i = 0; i < nb_input_streams; i++) { ist = &input_streams[i]; if (ist->decoding_needed) { AVCodec *codec = ist->dec; if (!codec) codec = avcodec_find_decoder(ist->st->codec->codec_id); if (!codec) { snprintf(error, sizeof(error), "Decoder (codec id %d) not found for input stream #%d.%d", ist->st->codec->codec_id, ist->file_index, ist->st->index); ret = AVERROR(EINVAL); goto dump_format; } if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) { snprintf(error, sizeof(error), "Error while opening decoder for input stream #%d.%d", ist->file_index, ist->st->index); ret = AVERROR(EINVAL); goto dump_format; } assert_codec_experimental(ist->st->codec, 0); assert_avoptions(ost->opts); } } for (i = 0; i < nb_input_streams; i++) { AVStream *st; ist = &input_streams[i]; st= ist->st; ist->pts = st->avg_frame_rate.num ? - st->codec->has_b_frames*AV_TIME_BASE / av_q2d(st->avg_frame_rate) : 0; ist->next_pts = AV_NOPTS_VALUE; ist->is_start = 1; } for (i=0;i<nb_meta_data_maps;i++) { AVFormatContext *files[2]; AVDictionary **meta[2]; int j; #define METADATA_CHECK_INDEX(index, nb_elems, desc)\ if ((index) < 0 || (index) >= (nb_elems)) {\ snprintf(error, sizeof(error), "Invalid %s index %d while processing metadata maps\n",\ (desc), (index));\ ret = AVERROR(EINVAL);\ goto dump_format;\ } int out_file_index = meta_data_maps[i][0].file; int in_file_index = meta_data_maps[i][1].file; if (in_file_index < 0 || out_file_index < 0) continue; METADATA_CHECK_INDEX(out_file_index, nb_output_files, "output file") METADATA_CHECK_INDEX(in_file_index, nb_input_files, "input file") files[0] = output_files[out_file_index]; files[1] = input_files[in_file_index].ctx; for (j = 0; j < 2; j++) { MetadataMap *map = &meta_data_maps[i][j]; switch (map->type) { case 'g': meta[j] = &files[j]->metadata; break; case 's': METADATA_CHECK_INDEX(map->index, files[j]->nb_streams, "stream") meta[j] = &files[j]->streams[map->index]->metadata; break; case 'c': METADATA_CHECK_INDEX(map->index, files[j]->nb_chapters, "chapter") meta[j] = &files[j]->chapters[map->index]->metadata; break; case 'p': METADATA_CHECK_INDEX(map->index, files[j]->nb_programs, "program") meta[j] = &files[j]->programs[map->index]->metadata; break; } } av_dict_copy(meta[0], *meta[1], AV_DICT_DONT_OVERWRITE); } if (metadata_global_autocopy) { for (i = 0; i < nb_output_files; i++) av_dict_copy(&output_files[i]->metadata, input_files[0].ctx->metadata, AV_DICT_DONT_OVERWRITE); } for (i = 0; i < nb_chapter_maps; i++) { int infile = chapter_maps[i].in_file; int outfile = chapter_maps[i].out_file; if (infile < 0 || outfile < 0) continue; if (infile >= nb_input_files) { snprintf(error, sizeof(error), "Invalid input file index %d in chapter mapping.\n", infile); ret = AVERROR(EINVAL); goto dump_format; } if (outfile >= nb_output_files) { snprintf(error, sizeof(error), "Invalid output file index %d in chapter mapping.\n",outfile); ret = AVERROR(EINVAL); goto dump_format; } copy_chapters(infile, outfile); } if (!nb_chapter_maps) for (i = 0; i < nb_input_files; i++) { if (!input_files[i].ctx->nb_chapters) continue; for (j = 0; j < nb_output_files; j++) if ((ret = copy_chapters(i, j)) < 0) goto dump_format; break; } for(i=0;i<nb_output_files;i++) { os = output_files[i]; if (avformat_write_header(os, &output_opts[i]) < 0) { snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?)", i); ret = AVERROR(EINVAL); goto dump_format; } assert_avoptions(output_opts[i]); if (strcmp(output_files[i]->oformat->name, "rtp")) { want_sdp = 0; } } dump_format: for(i=0;i<nb_output_files;i++) { av_dump_format(output_files[i], i, output_files[i]->filename, 1); } if (verbose >= 0) { fprintf(stderr, "Stream mapping:\n"); for(i=0;i<nb_ostreams;i++) { ost = ost_table[i]; fprintf(stderr, " Stream #%d.%d -> #%d.%d", input_streams[ost->source_index].file_index, input_streams[ost->source_index].st->index, ost->file_index, ost->index); if (ost->sync_ist != &input_streams[ost->source_index]) fprintf(stderr, " [sync #%d.%d]", ost->sync_ist->file_index, ost->sync_ist->st->index); fprintf(stderr, "\n"); } } if (ret) { fprintf(stderr, "%s\n", error); goto fail; } if (want_sdp) { print_sdp(output_files, nb_output_files); } if (!using_stdin) { if(verbose >= 0) fprintf(stderr, "Press [q] to stop, [?] for help\n"); avio_set_interrupt_cb(decode_interrupt_cb); } term_init(); timer_start = av_gettime(); for(; received_sigterm == 0;) { int file_index, ist_index; AVPacket pkt; double ipts_min; double opts_min; redo: ipts_min= 1e100; opts_min= 1e100; if (!using_stdin) { if (q_pressed) break; key = read_key(); if (key == 'q') break; if (key == '+') verbose++; if (key == '-') verbose--; if (key == 's') qp_hist ^= 1; if (key == 'h'){ if (do_hex_dump){ do_hex_dump = do_pkt_dump = 0; } else if(do_pkt_dump){ do_hex_dump = 1; } else do_pkt_dump = 1; av_log_set_level(AV_LOG_DEBUG); } if (key == 'd' || key == 'D'){ int debug=0; if(key == 'D') { debug = input_streams[0].st->codec->debug<<1; if(!debug) debug = 1; while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) debug += debug; }else scanf("%d", &debug); for(i=0;i<nb_input_streams;i++) { input_streams[i].st->codec->debug = debug; } for(i=0;i<nb_ostreams;i++) { ost = ost_table[i]; ost->st->codec->debug = debug; } if(debug) av_log_set_level(AV_LOG_DEBUG); fprintf(stderr,"debug=%d\n", debug); } if (key == '?'){ fprintf(stderr, "key function\n" "? show this help\n" "+ increase verbosity\n" "- decrease verbosity\n" "D cycle through available debug modes\n" "h dump packets/hex press to cycle through the 3 states\n" "q quit\n" "s Show QP histogram\n" ); } } file_index = -1; for(i=0;i<nb_ostreams;i++) { double ipts, opts; ost = ost_table[i]; os = output_files[ost->file_index]; ist = &input_streams[ost->source_index]; if(ist->is_past_recording_time || no_packet[ist->file_index]) continue; opts = ost->st->pts.val * av_q2d(ost->st->time_base); ipts = (double)ist->pts; if (!input_files[ist->file_index].eof_reached){ if(ipts < ipts_min) { ipts_min = ipts; if(input_sync ) file_index = ist->file_index; } if(opts < opts_min) { opts_min = opts; if(!input_sync) file_index = ist->file_index; } } if(ost->frame_number >= max_frames[ost->st->codec->codec_type]){ file_index= -1; break; } } if (file_index < 0) { if(no_packet_count){ no_packet_count=0; memset(no_packet, 0, sizeof(no_packet)); usleep(10000); continue; } break; } if (limit_filesize != 0 && limit_filesize <= avio_tell(output_files[0]->pb)) break; is = input_files[file_index].ctx; ret= av_read_frame(is, &pkt); if(ret == AVERROR(EAGAIN)){ no_packet[file_index]=1; no_packet_count++; continue; } if (ret < 0) { input_files[file_index].eof_reached = 1; if (opt_shortest) break; else continue; } no_packet_count=0; memset(no_packet, 0, sizeof(no_packet)); if (do_pkt_dump) { av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump, is->streams[pkt.stream_index]); } if (pkt.stream_index >= input_files[file_index].ctx->nb_streams) goto discard_packet; ist_index = input_files[file_index].ist_index + pkt.stream_index; ist = &input_streams[ist_index]; if (ist->discard) goto discard_packet; if (pkt.dts != AV_NOPTS_VALUE) pkt.dts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base); if (pkt.pts != AV_NOPTS_VALUE) pkt.pts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base); if (ist->ts_scale) { if(pkt.pts != AV_NOPTS_VALUE) pkt.pts *= ist->ts_scale; if(pkt.dts != AV_NOPTS_VALUE) pkt.dts *= ist->ts_scale; } if (pkt.dts != AV_NOPTS_VALUE && ist->next_pts != AV_NOPTS_VALUE && (is->iformat->flags & AVFMT_TS_DISCONT)) { int64_t pkt_dts= av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q); int64_t delta= pkt_dts - ist->next_pts; if((FFABS(delta) > 1LL*dts_delta_threshold*AV_TIME_BASE || pkt_dts+1<ist->pts)&& !copy_ts){ input_files[ist->file_index].ts_offset -= delta; if (verbose > 2) fprintf(stderr, "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n", delta, input_files[ist->file_index].ts_offset); pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); if(pkt.pts != AV_NOPTS_VALUE) pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); } } if (recording_time != INT64_MAX && (pkt.pts != AV_NOPTS_VALUE ? av_compare_ts(pkt.pts, ist->st->time_base, recording_time + start_time, (AVRational){1, 1000000}) : av_compare_ts(ist->pts, AV_TIME_BASE_Q, recording_time + start_time, (AVRational){1, 1000000}) )>= 0) { ist->is_past_recording_time = 1; goto discard_packet; } if (output_packet(ist, ist_index, ost_table, nb_ostreams, &pkt) < 0) { if (verbose >= 0) fprintf(stderr, "Error while decoding stream #%d.%d\n", ist->file_index, ist->st->index); if (exit_on_error) ffmpeg_exit(1); av_free_packet(&pkt); goto redo; } discard_packet: av_free_packet(&pkt); print_report(output_files, ost_table, nb_ostreams, 0); } for (i = 0; i < nb_input_streams; i++) { ist = &input_streams[i]; if (ist->decoding_needed) { output_packet(ist, i, ost_table, nb_ostreams, NULL); } } term_exit(); for(i=0;i<nb_output_files;i++) { os = output_files[i]; av_write_trailer(os); } print_report(output_files, ost_table, nb_ostreams, 1); for(i=0;i<nb_ostreams;i++) { ost = ost_table[i]; if (ost->encoding_needed) { av_freep(&ost->st->codec->stats_in); avcodec_close(ost->st->codec); } #if CONFIG_AVFILTER avfilter_graph_free(&ost->graph); #endif } for (i = 0; i < nb_input_streams; i++) { ist = &input_streams[i]; if (ist->decoding_needed) { avcodec_close(ist->st->codec); } } ret = 0; fail: av_freep(&bit_buffer); if (ost_table) { for(i=0;i<nb_ostreams;i++) { ost = ost_table[i]; if (ost) { if (ost->st->stream_copy) av_freep(&ost->st->codec->extradata); if (ost->logfile) { fclose(ost->logfile); ost->logfile = NULL; } av_fifo_free(ost->fifo); av_freep(&ost->st->codec->subtitle_header); av_free(ost->resample_frame.data[0]); av_free(ost->forced_kf_pts); if (ost->video_resample) sws_freeContext(ost->img_resample_ctx); if (ost->resample) audio_resample_close(ost->resample); if (ost->reformat_ctx) av_audio_convert_free(ost->reformat_ctx); av_dict_free(&ost->opts); av_free(ost); } } av_free(ost_table); } return ret; }
{ "code": [], "line_no": [] }
static int FUNC_0(AVFormatContext **VAR_0, int VAR_1, InputFile *VAR_2, int VAR_3, StreamMap *VAR_4, int VAR_5) { int VAR_6 = 0, VAR_7, VAR_8, VAR_9, VAR_10, VAR_11 = 0, VAR_12; AVFormatContext *is, *os; AVCodecContext *codec, *icodec; OutputStream *ost, **ost_table = NULL; InputStream *ist; char VAR_13[1024]; int VAR_14; int VAR_15 = 1; uint8_t no_packet[MAX_FILES]={0}; int VAR_16=0; int VAR_17[AVMEDIA_TYPE_NB]={0}; int VAR_18[AVMEDIA_TYPE_NB]={0}; if (rate_emu) for (VAR_7 = 0; VAR_7 < nb_input_streams; VAR_7++) input_streams[VAR_7].start = av_gettime(); VAR_11 = 0; for(VAR_7=0;VAR_7<VAR_1;VAR_7++) { os = VAR_0[VAR_7]; if (!os->VAR_18 && !(os->oformat->flags & AVFMT_NOSTREAMS)) { av_dump_format(VAR_0[VAR_7], VAR_7, VAR_0[VAR_7]->filename, 1); fprintf(stderr, "Output file #%d does not contain any stream\VAR_10", VAR_7); VAR_6 = AVERROR(EINVAL); goto fail; } VAR_11 += os->VAR_18; } if (VAR_5 > 0 && VAR_5 != VAR_11) { fprintf(stderr, "Number of stream maps must match number of output streams\VAR_10"); VAR_6 = AVERROR(EINVAL); goto fail; } for(VAR_7=0;VAR_7<VAR_5;VAR_7++) { int VAR_19 = VAR_4[VAR_7].file_index; int VAR_20 = VAR_4[VAR_7].stream_index; if (VAR_19 < 0 || VAR_19 > VAR_3 - 1 || VAR_20 < 0 || VAR_20 > VAR_2[VAR_19].ctx->VAR_18 - 1) { fprintf(stderr,"Could not find input stream #%d.%d\VAR_10", VAR_19, VAR_20); VAR_6 = AVERROR(EINVAL); goto fail; } VAR_19 = VAR_4[VAR_7].sync_file_index; VAR_20 = VAR_4[VAR_7].sync_stream_index; if (VAR_19 < 0 || VAR_19 > VAR_3 - 1 || VAR_20 < 0 || VAR_20 > VAR_2[VAR_19].ctx->VAR_18 - 1) { fprintf(stderr,"Could not find sync stream #%d.%d\VAR_10", VAR_19, VAR_20); VAR_6 = AVERROR(EINVAL); goto fail; } } ost_table = av_mallocz(sizeof(OutputStream *) * VAR_11); if (!ost_table) goto fail; for(VAR_9=0;VAR_9<VAR_1;VAR_9++) { os = VAR_0[VAR_9]; for(VAR_7=0;VAR_7<os->VAR_18;VAR_7++,VAR_10++) { VAR_18[os->streams[VAR_7]->codec->codec_type]++; } } for(VAR_12=1<<30; VAR_12; VAR_12>>=1){ int VAR_21[AVMEDIA_TYPE_NB]={0}; for(VAR_8=0; VAR_8<AVMEDIA_TYPE_NB; VAR_8++) VAR_17[VAR_8] += VAR_12; for(VAR_8=0; VAR_8<nb_input_streams; VAR_8++) { int skip=0; ist = &input_streams[VAR_8]; if(opt_programid){ int pi,VAR_20; AVFormatContext *f= VAR_2[ ist->file_index ].ctx; skip=1; for(pi=0; pi<f->nb_programs; pi++){ AVProgram *p= f->programs[pi]; if(p->id == opt_programid) for(VAR_20=0; VAR_20<p->nb_stream_indexes; VAR_20++){ if(f->streams[ p->stream_index[VAR_20] ] == ist->st) skip=0; } } } if (ist->discard && ist->st->discard != AVDISCARD_ALL && !skip && VAR_17[ist->st->codec->codec_type] <= ist->st->codec_info_nb_frames){ VAR_21[ist->st->codec->codec_type]++; } } for(VAR_8=0; VAR_8<AVMEDIA_TYPE_NB; VAR_8++) if(VAR_21[VAR_8] < VAR_18[VAR_8]) VAR_17[VAR_8] -= VAR_12; } VAR_10 = 0; for(VAR_9=0;VAR_9<VAR_1;VAR_9++) { os = VAR_0[VAR_9]; for(VAR_7=0;VAR_7<os->VAR_18;VAR_7++,VAR_10++) { int found; ost = ost_table[VAR_10] = output_streams_for_file[VAR_9][VAR_7]; if (VAR_5 > 0) { ost->source_index = VAR_2[VAR_4[VAR_10].file_index].ist_index + VAR_4[VAR_10].stream_index; if (input_streams[ost->source_index].st->codec->codec_type != ost->st->codec->codec_type) { int VAR_7= ost->file_index; av_dump_format(VAR_0[VAR_7], VAR_7, VAR_0[VAR_7]->filename, 1); fprintf(stderr, "Codec type mismatch for mapping #%d.%d -> #%d.%d\VAR_10", VAR_4[VAR_10].file_index, VAR_4[VAR_10].stream_index, ost->file_index, ost->index); ffmpeg_exit(1); } } else { found = 0; for (VAR_8 = 0; VAR_8 < nb_input_streams; VAR_8++) { int skip=0; ist = &input_streams[VAR_8]; if(opt_programid){ int pi,VAR_20; AVFormatContext *f = VAR_2[ist->file_index].ctx; skip=1; for(pi=0; pi<f->nb_programs; pi++){ AVProgram *p= f->programs[pi]; if(p->id == opt_programid) for(VAR_20=0; VAR_20<p->nb_stream_indexes; VAR_20++){ if(f->streams[ p->stream_index[VAR_20] ] == ist->st) skip=0; } } } if (ist->discard && ist->st->discard != AVDISCARD_ALL && !skip && ist->st->codec->codec_type == ost->st->codec->codec_type && VAR_17[ist->st->codec->codec_type] <= ist->st->codec_info_nb_frames) { ost->source_index = VAR_8; found = 1; break; } } if (!found) { if(! opt_programid) { for (VAR_8 = 0; VAR_8 < nb_input_streams; VAR_8++) { ist = &input_streams[VAR_8]; if ( ist->st->codec->codec_type == ost->st->codec->codec_type && ist->st->discard != AVDISCARD_ALL) { ost->source_index = VAR_8; found = 1; } } } if (!found) { int VAR_7= ost->file_index; av_dump_format(VAR_0[VAR_7], VAR_7, VAR_0[VAR_7]->filename, 1); fprintf(stderr, "Could not find input stream matching output stream #%d.%d\VAR_10", ost->file_index, ost->index); ffmpeg_exit(1); } } } ist = &input_streams[ost->source_index]; ist->discard = 0; ost->sync_ist = (VAR_5 > 0) ? &input_streams[VAR_2[VAR_4[VAR_10].sync_file_index].ist_index + VAR_4[VAR_10].sync_stream_index] : ist; } } for(VAR_7=0;VAR_7<VAR_11;VAR_7++) { ost = ost_table[VAR_7]; os = VAR_0[ost->file_index]; ist = &input_streams[ost->source_index]; codec = ost->st->codec; icodec = ist->st->codec; if (metadata_streams_autocopy) av_dict_copy(&ost->st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE); ost->st->disposition = ist->st->disposition; codec->bits_per_raw_sample= icodec->bits_per_raw_sample; codec->chroma_sample_location = icodec->chroma_sample_location; if (ost->st->stream_copy) { uint64_t extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE; if (extra_size > INT_MAX) goto fail; codec->codec_id = icodec->codec_id; codec->codec_type = icodec->codec_type; if(!codec->codec_tag){ if( !os->oformat->codec_tag || av_codec_get_id (os->oformat->codec_tag, icodec->codec_tag) == codec->codec_id || av_codec_get_tag(os->oformat->codec_tag, icodec->codec_id) <= 0) codec->codec_tag = icodec->codec_tag; } codec->bit_rate = icodec->bit_rate; codec->rc_max_rate = icodec->rc_max_rate; codec->rc_buffer_size = icodec->rc_buffer_size; codec->extradata= av_mallocz(extra_size); if (!codec->extradata) goto fail; memcpy(codec->extradata, icodec->extradata, icodec->extradata_size); codec->extradata_size= icodec->extradata_size; if(!copy_tb && av_q2d(icodec->time_base)*icodec->ticks_per_frame > av_q2d(ist->st->time_base) && av_q2d(ist->st->time_base) < 1.0/500){ codec->time_base = icodec->time_base; codec->time_base.num *= icodec->ticks_per_frame; av_reduce(&codec->time_base.num, &codec->time_base.den, codec->time_base.num, codec->time_base.den, INT_MAX); }else codec->time_base = ist->st->time_base; switch(codec->codec_type) { case AVMEDIA_TYPE_AUDIO: if(audio_volume != 256) { fprintf(stderr,"-acodec copy and -vol are incompatible (frames are not decoded)\VAR_10"); ffmpeg_exit(1); } codec->channel_layout = icodec->channel_layout; codec->sample_rate = icodec->sample_rate; codec->channels = icodec->channels; codec->frame_size = icodec->frame_size; codec->audio_service_type = icodec->audio_service_type; codec->block_align= icodec->block_align; if(codec->block_align == 1 && codec->codec_id == CODEC_ID_MP3) codec->block_align= 0; if(codec->codec_id == CODEC_ID_AC3) codec->block_align= 0; break; case AVMEDIA_TYPE_VIDEO: codec->pix_fmt = icodec->pix_fmt; codec->width = icodec->width; codec->height = icodec->height; codec->has_b_frames = icodec->has_b_frames; if (!codec->sample_aspect_ratio.num) { codec->sample_aspect_ratio = ost->st->sample_aspect_ratio = ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio : ist->st->codec->sample_aspect_ratio.num ? ist->st->codec->sample_aspect_ratio : (AVRational){0, 1}; } break; case AVMEDIA_TYPE_SUBTITLE: codec->width = icodec->width; codec->height = icodec->height; break; case AVMEDIA_TYPE_DATA: break; default: abort(); } } else { if (!ost->enc) ost->enc = avcodec_find_encoder(ost->st->codec->codec_id); switch(codec->codec_type) { case AVMEDIA_TYPE_AUDIO: ost->fifo= av_fifo_alloc(1024); if(!ost->fifo) goto fail; ost->reformat_pair = MAKE_SFMT_PAIR(AV_SAMPLE_FMT_NONE,AV_SAMPLE_FMT_NONE); if (!codec->sample_rate) { codec->sample_rate = icodec->sample_rate; if (icodec->lowres) codec->sample_rate >>= icodec->lowres; } choose_sample_rate(ost->st, ost->enc); codec->time_base = (AVRational){1, codec->sample_rate}; if (codec->sample_fmt == AV_SAMPLE_FMT_NONE) codec->sample_fmt = icodec->sample_fmt; choose_sample_fmt(ost->st, ost->enc); if (!codec->channels) { codec->channels = icodec->channels; codec->channel_layout = icodec->channel_layout; } if (av_get_channel_layout_nb_channels(codec->channel_layout) != codec->channels) codec->channel_layout = 0; ost->audio_resample = codec->sample_rate != icodec->sample_rate || audio_sync_method > 1; icodec->request_channels = codec->channels; ist->decoding_needed = 1; ost->encoding_needed = 1; ost->resample_sample_fmt = icodec->sample_fmt; ost->resample_sample_rate = icodec->sample_rate; ost->resample_channels = icodec->channels; break; case AVMEDIA_TYPE_VIDEO: if (codec->pix_fmt == PIX_FMT_NONE) codec->pix_fmt = icodec->pix_fmt; choose_pixel_fmt(ost->st, ost->enc); if (ost->st->codec->pix_fmt == PIX_FMT_NONE) { fprintf(stderr, "Video pixel format is unknown, stream cannot be encoded\VAR_10"); ffmpeg_exit(1); } ost->video_resample = codec->width != icodec->width || codec->height != icodec->height || codec->pix_fmt != icodec->pix_fmt; if (ost->video_resample) { codec->bits_per_raw_sample= frame_bits_per_raw_sample; } if (!codec->width || !codec->height) { codec->width = icodec->width; codec->height = icodec->height; } ost->resample_height = icodec->height; ost->resample_width = icodec->width; ost->resample_pix_fmt= icodec->pix_fmt; ost->encoding_needed = 1; ist->decoding_needed = 1; if (!ost->frame_rate.num) ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25,1}; if (ost->enc && ost->enc->supported_framerates && !force_fps) { int VAR_22 = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates); ost->frame_rate = ost->enc->supported_framerates[VAR_22]; } codec->time_base = (AVRational){ost->frame_rate.den, ost->frame_rate.num}; if( av_q2d(codec->time_base) < 0.001 && video_sync_method && (video_sync_method==1 || (video_sync_method<0 && !(os->oformat->flags & AVFMT_VARIABLE_FPS)))){ av_log(os, AV_LOG_WARNING, "Frame rate very high for a muxer not effciciently supporting it.\VAR_10" "Please consider specifiying a lower framerate, a different muxer or -vsync 2\VAR_10"); } #if CONFIG_AVFILTER if (configure_video_filters(ist, ost)) { fprintf(stderr, "Error opening filters!\VAR_10"); exit(1); } #endif break; case AVMEDIA_TYPE_SUBTITLE: ost->encoding_needed = 1; ist->decoding_needed = 1; break; default: abort(); break; } if (ost->encoding_needed && codec->codec_id != CODEC_ID_H264 && (codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) { char VAR_23[1024]; FILE *f; snprintf(VAR_23, sizeof(VAR_23), "%s-%d.log", pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX, VAR_7); if (codec->flags & CODEC_FLAG_PASS1) { f = fopen(VAR_23, "wb"); if (!f) { fprintf(stderr, "Cannot write log file '%s' for pass-1 encoding: %s\VAR_10", VAR_23, strerror(errno)); ffmpeg_exit(1); } ost->logfile = f; } else { char *VAR_24; size_t logbuffer_size; if (read_file(VAR_23, &VAR_24, &logbuffer_size) < 0) { fprintf(stderr, "Error reading log file '%s' for pass-2 encoding\VAR_10", VAR_23); ffmpeg_exit(1); } codec->stats_in = VAR_24; } } } if(codec->codec_type == AVMEDIA_TYPE_VIDEO){ int VAR_25= codec->width * codec->height; bit_buffer_size= FFMAX(bit_buffer_size, 6*VAR_25 + 1664); } } if (!bit_buffer) bit_buffer = av_malloc(bit_buffer_size); if (!bit_buffer) { fprintf(stderr, "Cannot allocate %d bytes output buffer\VAR_10", bit_buffer_size); VAR_6 = AVERROR(ENOMEM); goto fail; } for(VAR_7=0;VAR_7<VAR_11;VAR_7++) { ost = ost_table[VAR_7]; if (ost->encoding_needed) { AVCodec *codec = ost->enc; AVCodecContext *dec = input_streams[ost->source_index].st->codec; if (!codec) { snprintf(VAR_13, sizeof(VAR_13), "Encoder (codec id %d) not found for output stream #%d.%d", ost->st->codec->codec_id, ost->file_index, ost->index); VAR_6 = AVERROR(EINVAL); goto dump_format; } if (dec->subtitle_header) { ost->st->codec->subtitle_header = av_malloc(dec->subtitle_header_size); if (!ost->st->codec->subtitle_header) { VAR_6 = AVERROR(ENOMEM); goto dump_format; } memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size); ost->st->codec->subtitle_header_size = dec->subtitle_header_size; } if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) { snprintf(VAR_13, sizeof(VAR_13), "Error while opening encoder for output stream #%d.%d - maybe incorrect parameters such as bit_rate, rate, width or height", ost->file_index, ost->index); VAR_6 = AVERROR(EINVAL); goto dump_format; } assert_codec_experimental(ost->st->codec, 1); assert_avoptions(ost->opts); if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000) av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low." "It takes bits/s as argument, not kbits/s\VAR_10"); extra_size += ost->st->codec->extradata_size; } } for (VAR_7 = 0; VAR_7 < nb_input_streams; VAR_7++) { ist = &input_streams[VAR_7]; if (ist->decoding_needed) { AVCodec *codec = ist->dec; if (!codec) codec = avcodec_find_decoder(ist->st->codec->codec_id); if (!codec) { snprintf(VAR_13, sizeof(VAR_13), "Decoder (codec id %d) not found for input stream #%d.%d", ist->st->codec->codec_id, ist->file_index, ist->st->index); VAR_6 = AVERROR(EINVAL); goto dump_format; } if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) { snprintf(VAR_13, sizeof(VAR_13), "Error while opening decoder for input stream #%d.%d", ist->file_index, ist->st->index); VAR_6 = AVERROR(EINVAL); goto dump_format; } assert_codec_experimental(ist->st->codec, 0); assert_avoptions(ost->opts); } } for (VAR_7 = 0; VAR_7 < nb_input_streams; VAR_7++) { AVStream *st; ist = &input_streams[VAR_7]; st= ist->st; ist->pts = st->avg_frame_rate.num ? - st->codec->has_b_frames*AV_TIME_BASE / av_q2d(st->avg_frame_rate) : 0; ist->next_pts = AV_NOPTS_VALUE; ist->is_start = 1; } for (VAR_7=0;VAR_7<nb_meta_data_maps;VAR_7++) { AVFormatContext *files[2]; AVDictionary **meta[2]; int VAR_8; #define METADATA_CHECK_INDEX(index, nb_elems, desc)\ if ((index) < 0 || (index) >= (nb_elems)) {\ snprintf(VAR_13, sizeof(VAR_13), "Invalid %s index %d while processing metadata maps\VAR_10",\ (desc), (index));\ VAR_6 = AVERROR(EINVAL);\ goto dump_format;\ } int out_file_index = meta_data_maps[VAR_7][0].file; int in_file_index = meta_data_maps[VAR_7][1].file; if (in_file_index < 0 || out_file_index < 0) continue; METADATA_CHECK_INDEX(out_file_index, VAR_1, "output file") METADATA_CHECK_INDEX(in_file_index, VAR_3, "input file") files[0] = VAR_0[out_file_index]; files[1] = VAR_2[in_file_index].ctx; for (VAR_8 = 0; VAR_8 < 2; VAR_8++) { MetadataMap *map = &meta_data_maps[VAR_7][VAR_8]; switch (map->type) { case 'g': meta[VAR_8] = &files[VAR_8]->metadata; break; case 's': METADATA_CHECK_INDEX(map->index, files[VAR_8]->VAR_18, "stream") meta[VAR_8] = &files[VAR_8]->streams[map->index]->metadata; break; case 'c': METADATA_CHECK_INDEX(map->index, files[VAR_8]->nb_chapters, "chapter") meta[VAR_8] = &files[VAR_8]->chapters[map->index]->metadata; break; case 'p': METADATA_CHECK_INDEX(map->index, files[VAR_8]->nb_programs, "program") meta[VAR_8] = &files[VAR_8]->programs[map->index]->metadata; break; } } av_dict_copy(meta[0], *meta[1], AV_DICT_DONT_OVERWRITE); } if (metadata_global_autocopy) { for (VAR_7 = 0; VAR_7 < VAR_1; VAR_7++) av_dict_copy(&VAR_0[VAR_7]->metadata, VAR_2[0].ctx->metadata, AV_DICT_DONT_OVERWRITE); } for (VAR_7 = 0; VAR_7 < nb_chapter_maps; VAR_7++) { int infile = chapter_maps[VAR_7].in_file; int outfile = chapter_maps[VAR_7].out_file; if (infile < 0 || outfile < 0) continue; if (infile >= VAR_3) { snprintf(VAR_13, sizeof(VAR_13), "Invalid input file index %d in chapter mapping.\VAR_10", infile); VAR_6 = AVERROR(EINVAL); goto dump_format; } if (outfile >= VAR_1) { snprintf(VAR_13, sizeof(VAR_13), "Invalid output file index %d in chapter mapping.\VAR_10",outfile); VAR_6 = AVERROR(EINVAL); goto dump_format; } copy_chapters(infile, outfile); } if (!nb_chapter_maps) for (VAR_7 = 0; VAR_7 < VAR_3; VAR_7++) { if (!VAR_2[VAR_7].ctx->nb_chapters) continue; for (VAR_8 = 0; VAR_8 < VAR_1; VAR_8++) if ((VAR_6 = copy_chapters(VAR_7, VAR_8)) < 0) goto dump_format; break; } for(VAR_7=0;VAR_7<VAR_1;VAR_7++) { os = VAR_0[VAR_7]; if (avformat_write_header(os, &output_opts[VAR_7]) < 0) { snprintf(VAR_13, sizeof(VAR_13), "Could not write header for output file #%d (incorrect codec parameters ?)", VAR_7); VAR_6 = AVERROR(EINVAL); goto dump_format; } assert_avoptions(output_opts[VAR_7]); if (strcmp(VAR_0[VAR_7]->oformat->name, "rtp")) { VAR_15 = 0; } } dump_format: for(VAR_7=0;VAR_7<VAR_1;VAR_7++) { av_dump_format(VAR_0[VAR_7], VAR_7, VAR_0[VAR_7]->filename, 1); } if (verbose >= 0) { fprintf(stderr, "Stream mapping:\VAR_10"); for(VAR_7=0;VAR_7<VAR_11;VAR_7++) { ost = ost_table[VAR_7]; fprintf(stderr, " Stream #%d.%d -> #%d.%d", input_streams[ost->source_index].file_index, input_streams[ost->source_index].st->index, ost->file_index, ost->index); if (ost->sync_ist != &input_streams[ost->source_index]) fprintf(stderr, " [sync #%d.%d]", ost->sync_ist->file_index, ost->sync_ist->st->index); fprintf(stderr, "\VAR_10"); } } if (VAR_6) { fprintf(stderr, "%s\VAR_10", VAR_13); goto fail; } if (VAR_15) { print_sdp(VAR_0, VAR_1); } if (!using_stdin) { if(verbose >= 0) fprintf(stderr, "Press [q] to stop, [?] for help\VAR_10"); avio_set_interrupt_cb(decode_interrupt_cb); } term_init(); timer_start = av_gettime(); for(; received_sigterm == 0;) { int file_index, ist_index; AVPacket pkt; double ipts_min; double opts_min; redo: ipts_min= 1e100; opts_min= 1e100; if (!using_stdin) { if (q_pressed) break; VAR_14 = read_key(); if (VAR_14 == 'q') break; if (VAR_14 == '+') verbose++; if (VAR_14 == '-') verbose--; if (VAR_14 == 's') qp_hist ^= 1; if (VAR_14 == 'h'){ if (do_hex_dump){ do_hex_dump = do_pkt_dump = 0; } else if(do_pkt_dump){ do_hex_dump = 1; } else do_pkt_dump = 1; av_log_set_level(AV_LOG_DEBUG); } if (VAR_14 == 'd' || VAR_14 == 'D'){ int debug=0; if(VAR_14 == 'D') { debug = input_streams[0].st->codec->debug<<1; if(!debug) debug = 1; while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) debug += debug; }else scanf("%d", &debug); for(VAR_7=0;VAR_7<nb_input_streams;VAR_7++) { input_streams[VAR_7].st->codec->debug = debug; } for(VAR_7=0;VAR_7<VAR_11;VAR_7++) { ost = ost_table[VAR_7]; ost->st->codec->debug = debug; } if(debug) av_log_set_level(AV_LOG_DEBUG); fprintf(stderr,"debug=%d\VAR_10", debug); } if (VAR_14 == '?'){ fprintf(stderr, "VAR_14 function\VAR_10" "? show this help\VAR_10" "+ increase verbosity\VAR_10" "- decrease verbosity\VAR_10" "D cycle through available debug modes\VAR_10" "h dump packets/hex press to cycle through the 3 states\VAR_10" "q quit\VAR_10" "s Show QP histogram\VAR_10" ); } } file_index = -1; for(VAR_7=0;VAR_7<VAR_11;VAR_7++) { double ipts, opts; ost = ost_table[VAR_7]; os = VAR_0[ost->file_index]; ist = &input_streams[ost->source_index]; if(ist->is_past_recording_time || no_packet[ist->file_index]) continue; opts = ost->st->pts.val * av_q2d(ost->st->time_base); ipts = (double)ist->pts; if (!VAR_2[ist->file_index].eof_reached){ if(ipts < ipts_min) { ipts_min = ipts; if(input_sync ) file_index = ist->file_index; } if(opts < opts_min) { opts_min = opts; if(!input_sync) file_index = ist->file_index; } } if(ost->frame_number >= max_frames[ost->st->codec->codec_type]){ file_index= -1; break; } } if (file_index < 0) { if(VAR_16){ VAR_16=0; memset(no_packet, 0, sizeof(no_packet)); usleep(10000); continue; } break; } if (limit_filesize != 0 && limit_filesize <= avio_tell(VAR_0[0]->pb)) break; is = VAR_2[file_index].ctx; VAR_6= av_read_frame(is, &pkt); if(VAR_6 == AVERROR(EAGAIN)){ no_packet[file_index]=1; VAR_16++; continue; } if (VAR_6 < 0) { VAR_2[file_index].eof_reached = 1; if (opt_shortest) break; else continue; } VAR_16=0; memset(no_packet, 0, sizeof(no_packet)); if (do_pkt_dump) { av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump, is->streams[pkt.stream_index]); } if (pkt.stream_index >= VAR_2[file_index].ctx->VAR_18) goto discard_packet; ist_index = VAR_2[file_index].ist_index + pkt.stream_index; ist = &input_streams[ist_index]; if (ist->discard) goto discard_packet; if (pkt.dts != AV_NOPTS_VALUE) pkt.dts += av_rescale_q(VAR_2[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base); if (pkt.pts != AV_NOPTS_VALUE) pkt.pts += av_rescale_q(VAR_2[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base); if (ist->ts_scale) { if(pkt.pts != AV_NOPTS_VALUE) pkt.pts *= ist->ts_scale; if(pkt.dts != AV_NOPTS_VALUE) pkt.dts *= ist->ts_scale; } if (pkt.dts != AV_NOPTS_VALUE && ist->next_pts != AV_NOPTS_VALUE && (is->iformat->flags & AVFMT_TS_DISCONT)) { int64_t pkt_dts= av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q); int64_t delta= pkt_dts - ist->next_pts; if((FFABS(delta) > 1LL*dts_delta_threshold*AV_TIME_BASE || pkt_dts+1<ist->pts)&& !copy_ts){ VAR_2[ist->file_index].ts_offset -= delta; if (verbose > 2) fprintf(stderr, "timestamp discontinuity %"PRId64", new offset= %"PRId64"\VAR_10", delta, VAR_2[ist->file_index].ts_offset); pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); if(pkt.pts != AV_NOPTS_VALUE) pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); } } if (recording_time != INT64_MAX && (pkt.pts != AV_NOPTS_VALUE ? av_compare_ts(pkt.pts, ist->st->time_base, recording_time + start_time, (AVRational){1, 1000000}) : av_compare_ts(ist->pts, AV_TIME_BASE_Q, recording_time + start_time, (AVRational){1, 1000000}) )>= 0) { ist->is_past_recording_time = 1; goto discard_packet; } if (output_packet(ist, ist_index, ost_table, VAR_11, &pkt) < 0) { if (verbose >= 0) fprintf(stderr, "Error while decoding stream #%d.%d\VAR_10", ist->file_index, ist->st->index); if (exit_on_error) ffmpeg_exit(1); av_free_packet(&pkt); goto redo; } discard_packet: av_free_packet(&pkt); print_report(VAR_0, ost_table, VAR_11, 0); } for (VAR_7 = 0; VAR_7 < nb_input_streams; VAR_7++) { ist = &input_streams[VAR_7]; if (ist->decoding_needed) { output_packet(ist, VAR_7, ost_table, VAR_11, NULL); } } term_exit(); for(VAR_7=0;VAR_7<VAR_1;VAR_7++) { os = VAR_0[VAR_7]; av_write_trailer(os); } print_report(VAR_0, ost_table, VAR_11, 1); for(VAR_7=0;VAR_7<VAR_11;VAR_7++) { ost = ost_table[VAR_7]; if (ost->encoding_needed) { av_freep(&ost->st->codec->stats_in); avcodec_close(ost->st->codec); } #if CONFIG_AVFILTER avfilter_graph_free(&ost->graph); #endif } for (VAR_7 = 0; VAR_7 < nb_input_streams; VAR_7++) { ist = &input_streams[VAR_7]; if (ist->decoding_needed) { avcodec_close(ist->st->codec); } } VAR_6 = 0; fail: av_freep(&bit_buffer); if (ost_table) { for(VAR_7=0;VAR_7<VAR_11;VAR_7++) { ost = ost_table[VAR_7]; if (ost) { if (ost->st->stream_copy) av_freep(&ost->st->codec->extradata); if (ost->logfile) { fclose(ost->logfile); ost->logfile = NULL; } av_fifo_free(ost->fifo); av_freep(&ost->st->codec->subtitle_header); av_free(ost->resample_frame.data[0]); av_free(ost->forced_kf_pts); if (ost->video_resample) sws_freeContext(ost->img_resample_ctx); if (ost->resample) audio_resample_close(ost->resample); if (ost->reformat_ctx) av_audio_convert_free(ost->reformat_ctx); av_dict_free(&ost->opts); av_free(ost); } } av_free(ost_table); } return VAR_6; }
2
static void v4l2_free_buffer(void *opaque, uint8_t *unused) { V4L2Buffer* avbuf = opaque; V4L2m2mContext *s = buf_to_m2mctx(avbuf); if (atomic_fetch_sub(&avbuf->context_refcount, 1) == 1) { atomic_fetch_sub_explicit(&s->refcount, 1, memory_order_acq_rel); if (s->reinit) { if (!atomic_load(&s->refcount)) sem_post(&s->refsync); } else if (avbuf->context->streamon) ff_v4l2_buffer_enqueue(avbuf); av_buffer_unref(&avbuf->context_ref); } }
false
FFmpeg
5d5de3eba4c7890c2e8077f5b4ae569671d11cf8
static void v4l2_free_buffer(void *opaque, uint8_t *unused) { V4L2Buffer* avbuf = opaque; V4L2m2mContext *s = buf_to_m2mctx(avbuf); if (atomic_fetch_sub(&avbuf->context_refcount, 1) == 1) { atomic_fetch_sub_explicit(&s->refcount, 1, memory_order_acq_rel); if (s->reinit) { if (!atomic_load(&s->refcount)) sem_post(&s->refsync); } else if (avbuf->context->streamon) ff_v4l2_buffer_enqueue(avbuf); av_buffer_unref(&avbuf->context_ref); } }
{ "code": [], "line_no": [] }
static void FUNC_0(void *VAR_0, uint8_t *VAR_1) { V4L2Buffer* avbuf = VAR_0; V4L2m2mContext *s = buf_to_m2mctx(avbuf); if (atomic_fetch_sub(&avbuf->context_refcount, 1) == 1) { atomic_fetch_sub_explicit(&s->refcount, 1, memory_order_acq_rel); if (s->reinit) { if (!atomic_load(&s->refcount)) sem_post(&s->refsync); } else if (avbuf->context->streamon) ff_v4l2_buffer_enqueue(avbuf); av_buffer_unref(&avbuf->context_ref); } }
4
int av_opencl_buffer_write(cl_mem dst_cl_buf, uint8_t *src_buf, size_t buf_size) { cl_int status; void *mapped = clEnqueueMapBuffer(gpu_env.command_queue, dst_cl_buf, CL_TRUE,CL_MAP_WRITE, 0, sizeof(uint8_t) * buf_size, 0, NULL, NULL, &status); if (status != CL_SUCCESS) { av_log(&openclutils, AV_LOG_ERROR, "Could not map OpenCL buffer: %s\n", opencl_errstr(status)); return AVERROR_EXTERNAL; } memcpy(mapped, src_buf, buf_size); status = clEnqueueUnmapMemObject(gpu_env.command_queue, dst_cl_buf, mapped, 0, NULL, NULL); if (status != CL_SUCCESS) { av_log(&openclutils, AV_LOG_ERROR, "Could not unmap OpenCL buffer: %s\n", opencl_errstr(status)); return AVERROR_EXTERNAL; } return 0; }
false
FFmpeg
57d77b3963ce1023eaf5ada8cba58b9379405cc8
int av_opencl_buffer_write(cl_mem dst_cl_buf, uint8_t *src_buf, size_t buf_size) { cl_int status; void *mapped = clEnqueueMapBuffer(gpu_env.command_queue, dst_cl_buf, CL_TRUE,CL_MAP_WRITE, 0, sizeof(uint8_t) * buf_size, 0, NULL, NULL, &status); if (status != CL_SUCCESS) { av_log(&openclutils, AV_LOG_ERROR, "Could not map OpenCL buffer: %s\n", opencl_errstr(status)); return AVERROR_EXTERNAL; } memcpy(mapped, src_buf, buf_size); status = clEnqueueUnmapMemObject(gpu_env.command_queue, dst_cl_buf, mapped, 0, NULL, NULL); if (status != CL_SUCCESS) { av_log(&openclutils, AV_LOG_ERROR, "Could not unmap OpenCL buffer: %s\n", opencl_errstr(status)); return AVERROR_EXTERNAL; } return 0; }
{ "code": [], "line_no": [] }
int FUNC_0(cl_mem VAR_0, uint8_t *VAR_1, size_t VAR_2) { cl_int status; void *VAR_3 = clEnqueueMapBuffer(gpu_env.command_queue, VAR_0, CL_TRUE,CL_MAP_WRITE, 0, sizeof(uint8_t) * VAR_2, 0, NULL, NULL, &status); if (status != CL_SUCCESS) { av_log(&openclutils, AV_LOG_ERROR, "Could not map OpenCL buffer: %s\n", opencl_errstr(status)); return AVERROR_EXTERNAL; } memcpy(VAR_3, VAR_1, VAR_2); status = clEnqueueUnmapMemObject(gpu_env.command_queue, VAR_0, VAR_3, 0, NULL, NULL); if (status != CL_SUCCESS) { av_log(&openclutils, AV_LOG_ERROR, "Could not unmap OpenCL buffer: %s\n", opencl_errstr(status)); return AVERROR_EXTERNAL; } return 0; }
5
static int r3d_read_rdvo(AVFormatContext *s, Atom *atom) { R3DContext *r3d = s->priv_data; AVStream *st = s->streams[0]; int i; r3d->video_offsets_count = (atom->size - 8) / 4; r3d->video_offsets = av_malloc(atom->size); if (!r3d->video_offsets) return AVERROR(ENOMEM); for (i = 0; i < r3d->video_offsets_count; i++) { r3d->video_offsets[i] = avio_rb32(s->pb); if (!r3d->video_offsets[i]) { r3d->video_offsets_count = i; break; } av_dlog(s, "video offset %d: %#x\n", i, r3d->video_offsets[i]); } if (st->r_frame_rate.num) st->duration = av_rescale_q(r3d->video_offsets_count, (AVRational){st->r_frame_rate.den, st->r_frame_rate.num}, st->time_base); av_dlog(s, "duration %"PRId64"\n", st->duration); return 0; }
true
FFmpeg
aba232cfa9b193604ed98f3fa505378d006b1b3b
static int r3d_read_rdvo(AVFormatContext *s, Atom *atom) { R3DContext *r3d = s->priv_data; AVStream *st = s->streams[0]; int i; r3d->video_offsets_count = (atom->size - 8) / 4; r3d->video_offsets = av_malloc(atom->size); if (!r3d->video_offsets) return AVERROR(ENOMEM); for (i = 0; i < r3d->video_offsets_count; i++) { r3d->video_offsets[i] = avio_rb32(s->pb); if (!r3d->video_offsets[i]) { r3d->video_offsets_count = i; break; } av_dlog(s, "video offset %d: %#x\n", i, r3d->video_offsets[i]); } if (st->r_frame_rate.num) st->duration = av_rescale_q(r3d->video_offsets_count, (AVRational){st->r_frame_rate.den, st->r_frame_rate.num}, st->time_base); av_dlog(s, "duration %"PRId64"\n", st->duration); return 0; }
{ "code": [ " if (st->r_frame_rate.num)", " (AVRational){st->r_frame_rate.den,", " st->r_frame_rate.num},", " if (st->r_frame_rate.num)" ], "line_no": [ 41, 45, 47, 41 ] }
static int FUNC_0(AVFormatContext *VAR_0, Atom *VAR_1) { R3DContext *r3d = VAR_0->priv_data; AVStream *st = VAR_0->streams[0]; int VAR_2; r3d->video_offsets_count = (VAR_1->size - 8) / 4; r3d->video_offsets = av_malloc(VAR_1->size); if (!r3d->video_offsets) return AVERROR(ENOMEM); for (VAR_2 = 0; VAR_2 < r3d->video_offsets_count; VAR_2++) { r3d->video_offsets[VAR_2] = avio_rb32(VAR_0->pb); if (!r3d->video_offsets[VAR_2]) { r3d->video_offsets_count = VAR_2; break; } av_dlog(VAR_0, "video offset %d: %#x\n", VAR_2, r3d->video_offsets[VAR_2]); } if (st->r_frame_rate.num) st->duration = av_rescale_q(r3d->video_offsets_count, (AVRational){st->r_frame_rate.den, st->r_frame_rate.num}, st->time_base); av_dlog(VAR_0, "duration %"PRId64"\n", st->duration); return 0; }
6
static int dds_decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { DDSContext *ctx = avctx->priv_data; GetByteContext *gbc = &ctx->gbc; AVFrame *frame = data; int mipmap; int ret; ff_texturedsp_init(&ctx->texdsp); bytestream2_init(gbc, avpkt->data, avpkt->size); if (bytestream2_get_bytes_left(gbc) < 128) { av_log(avctx, AV_LOG_ERROR, "Frame is too small (%d).\n", bytestream2_get_bytes_left(gbc)); return AVERROR_INVALIDDATA; } if (bytestream2_get_le32(gbc) != MKTAG('D', 'D', 'S', ' ') || bytestream2_get_le32(gbc) != 124) { // header size av_log(avctx, AV_LOG_ERROR, "Invalid DDS header.\n"); return AVERROR_INVALIDDATA; } bytestream2_skip(gbc, 4); // flags avctx->height = bytestream2_get_le32(gbc); avctx->width = bytestream2_get_le32(gbc); ret = av_image_check_size(avctx->width, avctx->height, 0, avctx); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "Invalid image size %dx%d.\n", avctx->width, avctx->height); return ret; } /* Since codec is based on 4x4 blocks, size is aligned to 4. */ avctx->coded_width = FFALIGN(avctx->width, TEXTURE_BLOCK_W); avctx->coded_height = FFALIGN(avctx->height, TEXTURE_BLOCK_H); bytestream2_skip(gbc, 4); // pitch bytestream2_skip(gbc, 4); // depth mipmap = bytestream2_get_le32(gbc); if (mipmap != 0) av_log(avctx, AV_LOG_VERBOSE, "Found %d mipmaps (ignored).\n", mipmap); /* Extract pixel format information, considering additional elements * in reserved1 and reserved2. */ ret = parse_pixel_format(avctx); if (ret < 0) return ret; ret = ff_get_buffer(avctx, frame, 0); if (ret < 0) return ret; if (ctx->compressed) { int size = (avctx->coded_height / TEXTURE_BLOCK_H) * (avctx->coded_width / TEXTURE_BLOCK_W) * ctx->tex_ratio; ctx->slice_count = av_clip(avctx->thread_count, 1, avctx->coded_height / TEXTURE_BLOCK_H); if (bytestream2_get_bytes_left(gbc) < size) { av_log(avctx, AV_LOG_ERROR, "Compressed Buffer is too small (%d < %d).\n", bytestream2_get_bytes_left(gbc), size); return AVERROR_INVALIDDATA; } /* Use the decompress function on the texture, one block per thread. */ ctx->tex_data = gbc->buffer; avctx->execute2(avctx, decompress_texture_thread, frame, NULL, ctx->slice_count); } else if (!ctx->paletted && ctx->bpp == 4 && avctx->pix_fmt == AV_PIX_FMT_PAL8) { uint8_t *dst = frame->data[0]; int x, y, i; /* Use the first 64 bytes as palette, then copy the rest. */ bytestream2_get_buffer(gbc, frame->data[1], 16 * 4); for (i = 0; i < 16; i++) { AV_WN32(frame->data[1] + i*4, (frame->data[1][2+i*4]<<0)+ (frame->data[1][1+i*4]<<8)+ (frame->data[1][0+i*4]<<16)+ (frame->data[1][3+i*4]<<24) ); } frame->palette_has_changed = 1; if (bytestream2_get_bytes_left(gbc) < frame->height * frame->width / 2) { av_log(avctx, AV_LOG_ERROR, "Buffer is too small (%d < %d).\n", bytestream2_get_bytes_left(gbc), frame->height * frame->width / 2); return AVERROR_INVALIDDATA; } for (y = 0; y < frame->height; y++) { for (x = 0; x < frame->width; x += 2) { uint8_t val = bytestream2_get_byte(gbc); dst[x ] = val & 0xF; dst[x + 1] = val >> 4; } dst += frame->linesize[0]; } } else { int linesize = av_image_get_linesize(avctx->pix_fmt, frame->width, 0); if (ctx->paletted) { int i; /* Use the first 1024 bytes as palette, then copy the rest. */ bytestream2_get_buffer(gbc, frame->data[1], 256 * 4); for (i = 0; i < 256; i++) AV_WN32(frame->data[1] + i*4, (frame->data[1][2+i*4]<<0)+ (frame->data[1][1+i*4]<<8)+ (frame->data[1][0+i*4]<<16)+ (frame->data[1][3+i*4]<<24) ); frame->palette_has_changed = 1; } if (bytestream2_get_bytes_left(gbc) < frame->height * linesize) { av_log(avctx, AV_LOG_ERROR, "Buffer is too small (%d < %d).\n", bytestream2_get_bytes_left(gbc), frame->height * linesize); return AVERROR_INVALIDDATA; } av_image_copy_plane(frame->data[0], frame->linesize[0], gbc->buffer, linesize, linesize, frame->height); } /* Run any post processing here if needed. */ if (ctx->postproc != DDS_NONE) run_postproc(avctx, frame); /* Frame is ready to be output. */ frame->pict_type = AV_PICTURE_TYPE_I; frame->key_frame = 1; *got_frame = 1; return avpkt->size; }
true
FFmpeg
afb4632cc30e83287338690c785ebac180436a59
static int dds_decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { DDSContext *ctx = avctx->priv_data; GetByteContext *gbc = &ctx->gbc; AVFrame *frame = data; int mipmap; int ret; ff_texturedsp_init(&ctx->texdsp); bytestream2_init(gbc, avpkt->data, avpkt->size); if (bytestream2_get_bytes_left(gbc) < 128) { av_log(avctx, AV_LOG_ERROR, "Frame is too small (%d).\n", bytestream2_get_bytes_left(gbc)); return AVERROR_INVALIDDATA; } if (bytestream2_get_le32(gbc) != MKTAG('D', 'D', 'S', ' ') || bytestream2_get_le32(gbc) != 124) { av_log(avctx, AV_LOG_ERROR, "Invalid DDS header.\n"); return AVERROR_INVALIDDATA; } bytestream2_skip(gbc, 4); avctx->height = bytestream2_get_le32(gbc); avctx->width = bytestream2_get_le32(gbc); ret = av_image_check_size(avctx->width, avctx->height, 0, avctx); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "Invalid image size %dx%d.\n", avctx->width, avctx->height); return ret; } avctx->coded_width = FFALIGN(avctx->width, TEXTURE_BLOCK_W); avctx->coded_height = FFALIGN(avctx->height, TEXTURE_BLOCK_H); bytestream2_skip(gbc, 4); bytestream2_skip(gbc, 4); mipmap = bytestream2_get_le32(gbc); if (mipmap != 0) av_log(avctx, AV_LOG_VERBOSE, "Found %d mipmaps (ignored).\n", mipmap); ret = parse_pixel_format(avctx); if (ret < 0) return ret; ret = ff_get_buffer(avctx, frame, 0); if (ret < 0) return ret; if (ctx->compressed) { int size = (avctx->coded_height / TEXTURE_BLOCK_H) * (avctx->coded_width / TEXTURE_BLOCK_W) * ctx->tex_ratio; ctx->slice_count = av_clip(avctx->thread_count, 1, avctx->coded_height / TEXTURE_BLOCK_H); if (bytestream2_get_bytes_left(gbc) < size) { av_log(avctx, AV_LOG_ERROR, "Compressed Buffer is too small (%d < %d).\n", bytestream2_get_bytes_left(gbc), size); return AVERROR_INVALIDDATA; } ctx->tex_data = gbc->buffer; avctx->execute2(avctx, decompress_texture_thread, frame, NULL, ctx->slice_count); } else if (!ctx->paletted && ctx->bpp == 4 && avctx->pix_fmt == AV_PIX_FMT_PAL8) { uint8_t *dst = frame->data[0]; int x, y, i; bytestream2_get_buffer(gbc, frame->data[1], 16 * 4); for (i = 0; i < 16; i++) { AV_WN32(frame->data[1] + i*4, (frame->data[1][2+i*4]<<0)+ (frame->data[1][1+i*4]<<8)+ (frame->data[1][0+i*4]<<16)+ (frame->data[1][3+i*4]<<24) ); } frame->palette_has_changed = 1; if (bytestream2_get_bytes_left(gbc) < frame->height * frame->width / 2) { av_log(avctx, AV_LOG_ERROR, "Buffer is too small (%d < %d).\n", bytestream2_get_bytes_left(gbc), frame->height * frame->width / 2); return AVERROR_INVALIDDATA; } for (y = 0; y < frame->height; y++) { for (x = 0; x < frame->width; x += 2) { uint8_t val = bytestream2_get_byte(gbc); dst[x ] = val & 0xF; dst[x + 1] = val >> 4; } dst += frame->linesize[0]; } } else { int linesize = av_image_get_linesize(avctx->pix_fmt, frame->width, 0); if (ctx->paletted) { int i; bytestream2_get_buffer(gbc, frame->data[1], 256 * 4); for (i = 0; i < 256; i++) AV_WN32(frame->data[1] + i*4, (frame->data[1][2+i*4]<<0)+ (frame->data[1][1+i*4]<<8)+ (frame->data[1][0+i*4]<<16)+ (frame->data[1][3+i*4]<<24) ); frame->palette_has_changed = 1; } if (bytestream2_get_bytes_left(gbc) < frame->height * linesize) { av_log(avctx, AV_LOG_ERROR, "Buffer is too small (%d < %d).\n", bytestream2_get_bytes_left(gbc), frame->height * linesize); return AVERROR_INVALIDDATA; } av_image_copy_plane(frame->data[0], frame->linesize[0], gbc->buffer, linesize, linesize, frame->height); } if (ctx->postproc != DDS_NONE) run_postproc(avctx, frame); frame->pict_type = AV_PICTURE_TYPE_I; frame->key_frame = 1; *got_frame = 1; return avpkt->size; }
{ "code": [ " (frame->data[1][3+i*4]<<24)" ], "line_no": [ 227 ] }
static int FUNC_0(AVCodecContext *VAR_0, void *VAR_1, int *VAR_2, AVPacket *VAR_3) { DDSContext *ctx = VAR_0->priv_data; GetByteContext *gbc = &ctx->gbc; AVFrame *frame = VAR_1; int VAR_4; int VAR_5; ff_texturedsp_init(&ctx->texdsp); bytestream2_init(gbc, VAR_3->VAR_1, VAR_3->VAR_6); if (bytestream2_get_bytes_left(gbc) < 128) { av_log(VAR_0, AV_LOG_ERROR, "Frame is too small (%d).\n", bytestream2_get_bytes_left(gbc)); return AVERROR_INVALIDDATA; } if (bytestream2_get_le32(gbc) != MKTAG('D', 'D', 'S', ' ') || bytestream2_get_le32(gbc) != 124) { av_log(VAR_0, AV_LOG_ERROR, "Invalid DDS header.\n"); return AVERROR_INVALIDDATA; } bytestream2_skip(gbc, 4); VAR_0->height = bytestream2_get_le32(gbc); VAR_0->width = bytestream2_get_le32(gbc); VAR_5 = av_image_check_size(VAR_0->width, VAR_0->height, 0, VAR_0); if (VAR_5 < 0) { av_log(VAR_0, AV_LOG_ERROR, "Invalid image VAR_6 %dx%d.\n", VAR_0->width, VAR_0->height); return VAR_5; } VAR_0->coded_width = FFALIGN(VAR_0->width, TEXTURE_BLOCK_W); VAR_0->coded_height = FFALIGN(VAR_0->height, TEXTURE_BLOCK_H); bytestream2_skip(gbc, 4); bytestream2_skip(gbc, 4); VAR_4 = bytestream2_get_le32(gbc); if (VAR_4 != 0) av_log(VAR_0, AV_LOG_VERBOSE, "Found %d mipmaps (ignored).\n", VAR_4); VAR_5 = parse_pixel_format(VAR_0); if (VAR_5 < 0) return VAR_5; VAR_5 = ff_get_buffer(VAR_0, frame, 0); if (VAR_5 < 0) return VAR_5; if (ctx->compressed) { int VAR_6 = (VAR_0->coded_height / TEXTURE_BLOCK_H) * (VAR_0->coded_width / TEXTURE_BLOCK_W) * ctx->tex_ratio; ctx->slice_count = av_clip(VAR_0->thread_count, 1, VAR_0->coded_height / TEXTURE_BLOCK_H); if (bytestream2_get_bytes_left(gbc) < VAR_6) { av_log(VAR_0, AV_LOG_ERROR, "Compressed Buffer is too small (%d < %d).\n", bytestream2_get_bytes_left(gbc), VAR_6); return AVERROR_INVALIDDATA; } ctx->tex_data = gbc->buffer; VAR_0->execute2(VAR_0, decompress_texture_thread, frame, NULL, ctx->slice_count); } else if (!ctx->paletted && ctx->bpp == 4 && VAR_0->pix_fmt == AV_PIX_FMT_PAL8) { uint8_t *dst = frame->VAR_1[0]; int VAR_7, VAR_8, VAR_11; bytestream2_get_buffer(gbc, frame->VAR_1[1], 16 * 4); for (VAR_11 = 0; VAR_11 < 16; VAR_11++) { AV_WN32(frame->VAR_1[1] + VAR_11*4, (frame->VAR_1[1][2+VAR_11*4]<<0)+ (frame->VAR_1[1][1+VAR_11*4]<<8)+ (frame->VAR_1[1][0+VAR_11*4]<<16)+ (frame->VAR_1[1][3+VAR_11*4]<<24) ); } frame->palette_has_changed = 1; if (bytestream2_get_bytes_left(gbc) < frame->height * frame->width / 2) { av_log(VAR_0, AV_LOG_ERROR, "Buffer is too small (%d < %d).\n", bytestream2_get_bytes_left(gbc), frame->height * frame->width / 2); return AVERROR_INVALIDDATA; } for (VAR_8 = 0; VAR_8 < frame->height; VAR_8++) { for (VAR_7 = 0; VAR_7 < frame->width; VAR_7 += 2) { uint8_t val = bytestream2_get_byte(gbc); dst[VAR_7 ] = val & 0xF; dst[VAR_7 + 1] = val >> 4; } dst += frame->VAR_10[0]; } } else { int VAR_10 = av_image_get_linesize(VAR_0->pix_fmt, frame->width, 0); if (ctx->paletted) { int VAR_11; bytestream2_get_buffer(gbc, frame->VAR_1[1], 256 * 4); for (VAR_11 = 0; VAR_11 < 256; VAR_11++) AV_WN32(frame->VAR_1[1] + VAR_11*4, (frame->VAR_1[1][2+VAR_11*4]<<0)+ (frame->VAR_1[1][1+VAR_11*4]<<8)+ (frame->VAR_1[1][0+VAR_11*4]<<16)+ (frame->VAR_1[1][3+VAR_11*4]<<24) ); frame->palette_has_changed = 1; } if (bytestream2_get_bytes_left(gbc) < frame->height * VAR_10) { av_log(VAR_0, AV_LOG_ERROR, "Buffer is too small (%d < %d).\n", bytestream2_get_bytes_left(gbc), frame->height * VAR_10); return AVERROR_INVALIDDATA; } av_image_copy_plane(frame->VAR_1[0], frame->VAR_10[0], gbc->buffer, VAR_10, VAR_10, frame->height); } if (ctx->postproc != DDS_NONE) run_postproc(VAR_0, frame); frame->pict_type = AV_PICTURE_TYPE_I; frame->key_frame = 1; *VAR_2 = 1; return VAR_3->VAR_6; }
7
static void check_lowpass_line(int depth){ LOCAL_ALIGNED_32(uint8_t, src, [SRC_SIZE]); LOCAL_ALIGNED_32(uint8_t, dst_ref, [WIDTH_PADDED]); LOCAL_ALIGNED_32(uint8_t, dst_new, [WIDTH_PADDED]); int w = WIDTH; int mref = WIDTH_PADDED * -1; int pref = WIDTH_PADDED; int i, depth_byte; InterlaceContext s; declare_func(void, uint8_t *dstp, ptrdiff_t linesize, const uint8_t *srcp, ptrdiff_t mref, ptrdiff_t pref, int clip_max); s.lowpass = 1; s.lowpass = VLPF_LIN; depth_byte = depth >> 3; w /= depth_byte; memset(src, 0, SRC_SIZE); memset(dst_ref, 0, WIDTH_PADDED); memset(dst_new, 0, WIDTH_PADDED); randomize_buffers(src, SRC_SIZE); ff_interlace_init(&s, depth); if (check_func(s.lowpass_line, "lowpass_line_%d", depth)) { for (i = 0; i < 32; i++) { /* simulate crop */ call_ref(dst_ref, w, src + WIDTH_PADDED, mref - i*depth_byte, pref, 0); call_new(dst_new, w, src + WIDTH_PADDED, mref - i*depth_byte, pref, 0); if (memcmp(dst_ref, dst_new, WIDTH - i)) fail(); } bench_new(dst_new, w, src + WIDTH_PADDED, mref, pref, 0); } }
true
FFmpeg
da032427786d9db4ab21014998cb1245083d6c85
static void check_lowpass_line(int depth){ LOCAL_ALIGNED_32(uint8_t, src, [SRC_SIZE]); LOCAL_ALIGNED_32(uint8_t, dst_ref, [WIDTH_PADDED]); LOCAL_ALIGNED_32(uint8_t, dst_new, [WIDTH_PADDED]); int w = WIDTH; int mref = WIDTH_PADDED * -1; int pref = WIDTH_PADDED; int i, depth_byte; InterlaceContext s; declare_func(void, uint8_t *dstp, ptrdiff_t linesize, const uint8_t *srcp, ptrdiff_t mref, ptrdiff_t pref, int clip_max); s.lowpass = 1; s.lowpass = VLPF_LIN; depth_byte = depth >> 3; w /= depth_byte; memset(src, 0, SRC_SIZE); memset(dst_ref, 0, WIDTH_PADDED); memset(dst_new, 0, WIDTH_PADDED); randomize_buffers(src, SRC_SIZE); ff_interlace_init(&s, depth); if (check_func(s.lowpass_line, "lowpass_line_%d", depth)) { for (i = 0; i < 32; i++) { call_ref(dst_ref, w, src + WIDTH_PADDED, mref - i*depth_byte, pref, 0); call_new(dst_new, w, src + WIDTH_PADDED, mref - i*depth_byte, pref, 0); if (memcmp(dst_ref, dst_new, WIDTH - i)) fail(); } bench_new(dst_new, w, src + WIDTH_PADDED, mref, pref, 0); } }
{ "code": [ "static void check_lowpass_line(int depth){", " LOCAL_ALIGNED_32(uint8_t, src, [SRC_SIZE]);", " LOCAL_ALIGNED_32(uint8_t, dst_ref, [WIDTH_PADDED]);", " LOCAL_ALIGNED_32(uint8_t, dst_new, [WIDTH_PADDED]);", " int w = WIDTH;", " int mref = WIDTH_PADDED * -1;", " int pref = WIDTH_PADDED;", " int i, depth_byte;", " InterlaceContext s;", " declare_func(void, uint8_t *dstp, ptrdiff_t linesize, const uint8_t *srcp,", " ptrdiff_t mref, ptrdiff_t pref, int clip_max);", " s.lowpass = 1;", " s.lowpass = VLPF_LIN;", " depth_byte = depth >> 3;", " w /= depth_byte;", " memset(src, 0, SRC_SIZE);", " memset(dst_ref, 0, WIDTH_PADDED);", " memset(dst_new, 0, WIDTH_PADDED);", " randomize_buffers(src, SRC_SIZE);", " ff_interlace_init(&s, depth);", " if (check_func(s.lowpass_line, \"lowpass_line_%d\", depth)) {", " call_ref(dst_ref, w, src + WIDTH_PADDED, mref - i*depth_byte, pref, 0);", " call_new(dst_new, w, src + WIDTH_PADDED, mref - i*depth_byte, pref, 0);", " if (memcmp(dst_ref, dst_new, WIDTH - i))", " fail();", " bench_new(dst_new, w, src + WIDTH_PADDED, mref, pref, 0);" ], "line_no": [ 1, 3, 5, 7, 9, 11, 13, 15, 17, 21, 23, 27, 29, 31, 33, 37, 39, 41, 43, 47, 51, 55, 57, 59, 61, 65 ] }
static void FUNC_0(int VAR_0){ LOCAL_ALIGNED_32(uint8_t, src, [SRC_SIZE]); LOCAL_ALIGNED_32(uint8_t, dst_ref, [WIDTH_PADDED]); LOCAL_ALIGNED_32(uint8_t, dst_new, [WIDTH_PADDED]); int VAR_1 = WIDTH; int VAR_2 = WIDTH_PADDED * -1; int VAR_3 = WIDTH_PADDED; int VAR_4, VAR_5; InterlaceContext s; declare_func(void, uint8_t *dstp, ptrdiff_t linesize, const uint8_t *srcp, ptrdiff_t VAR_2, ptrdiff_t VAR_3, int clip_max); s.lowpass = 1; s.lowpass = VLPF_LIN; VAR_5 = VAR_0 >> 3; VAR_1 /= VAR_5; memset(src, 0, SRC_SIZE); memset(dst_ref, 0, WIDTH_PADDED); memset(dst_new, 0, WIDTH_PADDED); randomize_buffers(src, SRC_SIZE); ff_interlace_init(&s, VAR_0); if (check_func(s.lowpass_line, "lowpass_line_%d", VAR_0)) { for (VAR_4 = 0; VAR_4 < 32; VAR_4++) { call_ref(dst_ref, VAR_1, src + WIDTH_PADDED, VAR_2 - VAR_4*VAR_5, VAR_3, 0); call_new(dst_new, VAR_1, src + WIDTH_PADDED, VAR_2 - VAR_4*VAR_5, VAR_3, 0); if (memcmp(dst_ref, dst_new, WIDTH - VAR_4)) fail(); } bench_new(dst_new, VAR_1, src + WIDTH_PADDED, VAR_2, VAR_3, 0); } }
9
static void test_init(TestData *d) { QPCIBus *bus; QTestState *qs; char *s; s = g_strdup_printf("-machine q35 %s %s", d->noreboot ? "" : "-global ICH9-LPC.noreboot=false", !d->args ? "" : d->args); qs = qtest_start(s); qtest_irq_intercept_in(qs, "ioapic"); g_free(s); bus = qpci_init_pc(NULL); d->dev = qpci_device_find(bus, QPCI_DEVFN(0x1f, 0x00)); g_assert(d->dev != NULL); qpci_device_enable(d->dev); /* set ACPI PM I/O space base address */ qpci_config_writel(d->dev, ICH9_LPC_PMBASE, PM_IO_BASE_ADDR | 0x1); /* enable ACPI I/O */ qpci_config_writeb(d->dev, ICH9_LPC_ACPI_CTRL, 0x80); /* set Root Complex BAR */ qpci_config_writel(d->dev, ICH9_LPC_RCBA, RCBA_BASE_ADDR | 0x1); d->tco_io_base = qpci_legacy_iomap(d->dev, PM_IO_BASE_ADDR + 0x60); }
true
qemu
b4ba67d9a702507793c2724e56f98e9b0f7be02b
static void test_init(TestData *d) { QPCIBus *bus; QTestState *qs; char *s; s = g_strdup_printf("-machine q35 %s %s", d->noreboot ? "" : "-global ICH9-LPC.noreboot=false", !d->args ? "" : d->args); qs = qtest_start(s); qtest_irq_intercept_in(qs, "ioapic"); g_free(s); bus = qpci_init_pc(NULL); d->dev = qpci_device_find(bus, QPCI_DEVFN(0x1f, 0x00)); g_assert(d->dev != NULL); qpci_device_enable(d->dev); qpci_config_writel(d->dev, ICH9_LPC_PMBASE, PM_IO_BASE_ADDR | 0x1); qpci_config_writeb(d->dev, ICH9_LPC_ACPI_CTRL, 0x80); qpci_config_writel(d->dev, ICH9_LPC_RCBA, RCBA_BASE_ADDR | 0x1); d->tco_io_base = qpci_legacy_iomap(d->dev, PM_IO_BASE_ADDR + 0x60); }
{ "code": [ " d->tco_io_base = qpci_legacy_iomap(d->dev, PM_IO_BASE_ADDR + 0x60);" ], "line_no": [ 53 ] }
static void FUNC_0(TestData *VAR_0) { QPCIBus *bus; QTestState *qs; char *VAR_1; VAR_1 = g_strdup_printf("-machine q35 %VAR_1 %VAR_1", VAR_0->noreboot ? "" : "-global ICH9-LPC.noreboot=false", !VAR_0->args ? "" : VAR_0->args); qs = qtest_start(VAR_1); qtest_irq_intercept_in(qs, "ioapic"); g_free(VAR_1); bus = qpci_init_pc(NULL); VAR_0->dev = qpci_device_find(bus, QPCI_DEVFN(0x1f, 0x00)); g_assert(VAR_0->dev != NULL); qpci_device_enable(VAR_0->dev); qpci_config_writel(VAR_0->dev, ICH9_LPC_PMBASE, PM_IO_BASE_ADDR | 0x1); qpci_config_writeb(VAR_0->dev, ICH9_LPC_ACPI_CTRL, 0x80); qpci_config_writel(VAR_0->dev, ICH9_LPC_RCBA, RCBA_BASE_ADDR | 0x1); VAR_0->tco_io_base = qpci_legacy_iomap(VAR_0->dev, PM_IO_BASE_ADDR + 0x60); }
11
void assert_avoptions(AVDictionary *m) { AVDictionaryEntry *t; if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) { av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key); exit(1); } }
true
FFmpeg
636ced8e1dc8248a1353b416240b93d70ad03edb
void assert_avoptions(AVDictionary *m) { AVDictionaryEntry *t; if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) { av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key); exit(1); } }
{ "code": [ " exit(1);", " exit(1);", " exit(1);", " exit(1);", " exit(1);", " exit(1);", " exit(1);", " exit(1);", " exit(1);", " exit(1);", " exit(1);", " exit(1);", " exit(1);", " exit(1);", " exit(1);", " exit(1);", " exit(1);" ], "line_no": [ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11 ] }
void FUNC_0(AVDictionary *VAR_0) { AVDictionaryEntry *t; if ((t = av_dict_get(VAR_0, "", NULL, AV_DICT_IGNORE_SUFFIX))) { av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key); exit(1); } }
13
static void nbd_refresh_filename(BlockDriverState *bs, QDict *options) { BDRVNBDState *s = bs->opaque; QDict *opts = qdict_new(); QObject *saddr_qdict; Visitor *ov; const char *host = NULL, *port = NULL, *path = NULL; if (s->saddr->type == SOCKET_ADDRESS_KIND_INET) { const InetSocketAddress *inet = s->saddr->u.inet.data; if (!inet->has_ipv4 && !inet->has_ipv6 && !inet->has_to) { host = inet->host; port = inet->port; } } else if (s->saddr->type == SOCKET_ADDRESS_KIND_UNIX) { path = s->saddr->u.q_unix.data->path; } qdict_put(opts, "driver", qstring_from_str("nbd")); if (path && s->export) { snprintf(bs->exact_filename, sizeof(bs->exact_filename), "nbd+unix:///%s?socket=%s", s->export, path); } else if (path && !s->export) { snprintf(bs->exact_filename, sizeof(bs->exact_filename), "nbd+unix://?socket=%s", path); } else if (host && s->export) { snprintf(bs->exact_filename, sizeof(bs->exact_filename), "nbd://%s:%s/%s", host, port, s->export); } else if (host && !s->export) { snprintf(bs->exact_filename, sizeof(bs->exact_filename), "nbd://%s:%s", host, port); } ov = qobject_output_visitor_new(&saddr_qdict); visit_type_SocketAddress(ov, NULL, &s->saddr, &error_abort); visit_complete(ov, &saddr_qdict); assert(qobject_type(saddr_qdict) == QTYPE_QDICT); qdict_put_obj(opts, "server", saddr_qdict); if (s->export) { qdict_put(opts, "export", qstring_from_str(s->export)); } if (s->tlscredsid) { qdict_put(opts, "tls-creds", qstring_from_str(s->tlscredsid)); } qdict_flatten(opts); bs->full_open_options = opts; }
true
qemu
a1d4e38a8b01a6699355c31867d524f8d4cd480e
static void nbd_refresh_filename(BlockDriverState *bs, QDict *options) { BDRVNBDState *s = bs->opaque; QDict *opts = qdict_new(); QObject *saddr_qdict; Visitor *ov; const char *host = NULL, *port = NULL, *path = NULL; if (s->saddr->type == SOCKET_ADDRESS_KIND_INET) { const InetSocketAddress *inet = s->saddr->u.inet.data; if (!inet->has_ipv4 && !inet->has_ipv6 && !inet->has_to) { host = inet->host; port = inet->port; } } else if (s->saddr->type == SOCKET_ADDRESS_KIND_UNIX) { path = s->saddr->u.q_unix.data->path; } qdict_put(opts, "driver", qstring_from_str("nbd")); if (path && s->export) { snprintf(bs->exact_filename, sizeof(bs->exact_filename), "nbd+unix: } else if (path && !s->export) { snprintf(bs->exact_filename, sizeof(bs->exact_filename), "nbd+unix: } else if (host && s->export) { snprintf(bs->exact_filename, sizeof(bs->exact_filename), "nbd: } else if (host && !s->export) { snprintf(bs->exact_filename, sizeof(bs->exact_filename), "nbd: } ov = qobject_output_visitor_new(&saddr_qdict); visit_type_SocketAddress(ov, NULL, &s->saddr, &error_abort); visit_complete(ov, &saddr_qdict); assert(qobject_type(saddr_qdict) == QTYPE_QDICT); qdict_put_obj(opts, "server", saddr_qdict); if (s->export) { qdict_put(opts, "export", qstring_from_str(s->export)); } if (s->tlscredsid) { qdict_put(opts, "tls-creds", qstring_from_str(s->tlscredsid)); } qdict_flatten(opts); bs->full_open_options = opts; }
{ "code": [], "line_no": [] }
static void FUNC_0(BlockDriverState *VAR_0, QDict *VAR_1) { BDRVNBDState *s = VAR_0->opaque; QDict *opts = qdict_new(); QObject *saddr_qdict; Visitor *ov; const char *VAR_2 = NULL, *VAR_3 = NULL, *VAR_4 = NULL; if (s->saddr->type == SOCKET_ADDRESS_KIND_INET) { const InetSocketAddress *VAR_5 = s->saddr->u.VAR_5.data; if (!VAR_5->has_ipv4 && !VAR_5->has_ipv6 && !VAR_5->has_to) { VAR_2 = VAR_5->VAR_2; VAR_3 = VAR_5->VAR_3; } } else if (s->saddr->type == SOCKET_ADDRESS_KIND_UNIX) { VAR_4 = s->saddr->u.q_unix.data->VAR_4; } qdict_put(opts, "driver", qstring_from_str("nbd")); if (VAR_4 && s->export) { snprintf(VAR_0->exact_filename, sizeof(VAR_0->exact_filename), "nbd+unix: } else if (VAR_4 && !s->export) { snprintf(VAR_0->exact_filename, sizeof(VAR_0->exact_filename), "nbd+unix: } else if (VAR_2 && s->export) { snprintf(VAR_0->exact_filename, sizeof(VAR_0->exact_filename), "nbd: } else if (VAR_2 && !s->export) { snprintf(VAR_0->exact_filename, sizeof(VAR_0->exact_filename), "nbd: } ov = qobject_output_visitor_new(&saddr_qdict); visit_type_SocketAddress(ov, NULL, &s->saddr, &error_abort); visit_complete(ov, &saddr_qdict); assert(qobject_type(saddr_qdict) == QTYPE_QDICT); qdict_put_obj(opts, "server", saddr_qdict); if (s->export) { qdict_put(opts, "export", qstring_from_str(s->export)); } if (s->tlscredsid) { qdict_put(opts, "tls-creds", qstring_from_str(s->tlscredsid)); } qdict_flatten(opts); VAR_0->full_open_options = opts; }
14
int net_init_tap(QemuOpts *opts, const char *name, VLANState *vlan) { const char *ifname; ifname = qemu_opt_get(opts, "ifname"); if (!ifname) { error_report("tap: no interface name"); return -1; } if (tap_win32_init(vlan, "tap", name, ifname) == -1) { return -1; } return 0; }
true
qemu
6687b79d636cd60ed9adb1177d0d946b58fa7717
int net_init_tap(QemuOpts *opts, const char *name, VLANState *vlan) { const char *ifname; ifname = qemu_opt_get(opts, "ifname"); if (!ifname) { error_report("tap: no interface name"); return -1; } if (tap_win32_init(vlan, "tap", name, ifname) == -1) { return -1; } return 0; }
{ "code": [ " return -1;", "int net_init_tap(QemuOpts *opts, const char *name, VLANState *vlan)", "int net_init_tap(QemuOpts *opts, const char *name, VLANState *vlan)" ], "line_no": [ 17, 1, 1 ] }
int FUNC_0(QemuOpts *VAR_0, const char *VAR_1, VLANState *VAR_2) { const char *VAR_3; VAR_3 = qemu_opt_get(VAR_0, "VAR_3"); if (!VAR_3) { error_report("tap: no interface VAR_1"); return -1; } if (tap_win32_init(VAR_2, "tap", VAR_1, VAR_3) == -1) { return -1; } return 0; }

Dataset Card for "devign_with_vul_lines"

More Information needed

Downloads last month
29
Edit dataset card