cwe_id
stringclasses
8 values
func
stringlengths
40
61.2k
label
int64
0
1
cve_id
stringlengths
13
16
id
int64
0
3.29k
text_label
stringclasses
2 values
CWE-119
void ACSequentialScan::DecodeBlock(LONG *block, LONG &prevdc,LONG &prevdiff, UBYTE small,UBYTE large,UBYTE kx,UBYTE dc,UBYTE ac) { // DC coding if (m_ucScanStart == 0 && m_bResidual == false) { LONG diff; struct QMContextSet::DCContextZeroSet &cz = m_Context[dc].Classify(prevdiff,small,large); // Check whether the difference is nonzero. if (m_Coder.Get(cz.S0)) { LONG sz; bool sign = m_Coder.Get(cz.SS); // sign coding, is true for negative. // // // Positive and negative are encoded in different contexts. // Decode the magnitude cathegory. if (m_Coder.Get((sign)?(cz.SN):(cz.SP))) { int i = 0; LONG m = 2; while(m_Coder.Get(m_Context[dc].DCMagnitude.X[i])) { m <<= 1; i++; if (m == 0) JPG_THROW(MALFORMED_STREAM,"ACSequentialScan::DecodeBlock", "QMDecoder is out of sync"); } // // Get the MSB to decode. m >>= 1; sz = m; // // Refinement coding of remaining bits. while((m >>= 1)) { if (m_Coder.Get(m_Context[dc].DCMagnitude.M[i])) { sz |= m; } } } else { sz = 0; } // // Done, finally, include the sign and the offset. if (sign) { diff = -sz - 1; } else { diff = sz + 1; } } else { // Difference is zero. diff = 0; } prevdiff = diff; if (m_bDifferential) { prevdc = diff; } else { prevdc += diff; } block[0] = prevdc << m_ucLowBit; // point transformation } if (m_ucScanStop) { // AC coding. No block skipping used here. int k = (m_ucScanStart)?(m_ucScanStart):((m_bResidual)?0:1); // // EOB decoding. while(k <= m_ucScanStop && !m_Coder.Get(m_Context[ac].ACZero[k-1].SE)) { LONG sz; bool sign; // // Not yet EOB. Run coding in S0: Skip over zeros. while(!m_Coder.Get(m_Context[ac].ACZero[k-1].S0)) { k++; if (k > m_ucScanStop) JPG_THROW(MALFORMED_STREAM,"ACSequentialScan::DecodeBlock", "QMDecoder is out of sync"); } // // Now decode the sign of the coefficient. // This happens in the uniform context. sign = m_Coder.Get(m_Context[ac].Uniform); // // Decode the magnitude. if (m_Coder.Get(m_Context[ac].ACZero[k-1].SP)) { // X1 coding, identical to SN and SP. if (m_Coder.Get(m_Context[ac].ACZero[k-1].SP)) { int i = 0; LONG m = 4; struct QMContextSet::ACContextMagnitudeSet &acm = (k > kx)?(m_Context[ac].ACMagnitudeHigh):(m_Context[ac].ACMagnitudeLow); while(m_Coder.Get(acm.X[i])) { m <<= 1; i++; if (m == 0) JPG_THROW(MALFORMED_STREAM,"ACSequentialScan::DecodeBlock", "QMDecoder is out of sync"); } // // Get the MSB to decode m >>= 1; sz = m; // // Proceed to refinement. while((m >>= 1)) { if (m_Coder.Get(acm.M[i])) { sz |= m; } } } else { sz = 1; } } else { sz = 0; } // // Done. Finally, include sign and offset. sz++; if (sign) sz = -sz; block[DCT::ScanOrder[k]] = sz << m_ucLowBit; // // Proceed to the next block. k++; } } }
0
CVE-2022-31620
1,414
benign
CWE-119
void ACSequentialScan::DecodeBlock(LONG *block, LONG &prevdc,LONG &prevdiff, UBYTE small,UBYTE large,UBYTE kx,UBYTE dc,UBYTE ac) { // DC coding if (m_ucScanStart == 0 && m_bResidual == false) { LONG diff; struct QMContextSet::DCContextZeroSet &cz = m_Context[dc].Classify(prevdiff,small,large); // Check whether the difference is nonzero. if (m_Coder.Get(cz.S0)) { LONG sz; bool sign = m_Coder.Get(cz.SS); // sign coding, is true for negative. // // // Positive and negative are encoded in different contexts. // Decode the magnitude cathegory. if (m_Coder.Get((sign)?(cz.SN):(cz.SP))) { int i = 0; LONG m = 2; while(m_Coder.Get(m_Context[dc].DCMagnitude.X[i])) { m <<= 1; if(++i >= QMContextSet::DCContextMagnitudeSet::MagnitudeContexts) JPG_THROW(MALFORMED_STREAM,"ACSequentialScan::DecodeBlock", "QMDecoder is out of sync"); } // // Get the MSB to decode. m >>= 1; sz = m; // // Refinement coding of remaining bits. while((m >>= 1)) { if (m_Coder.Get(m_Context[dc].DCMagnitude.M[i])) { sz |= m; } } } else { sz = 0; } // // Done, finally, include the sign and the offset. if (sign) { diff = -sz - 1; } else { diff = sz + 1; } } else { // Difference is zero. diff = 0; } prevdiff = diff; if (m_bDifferential) { prevdc = diff; } else { prevdc += diff; } block[0] = prevdc << m_ucLowBit; // point transformation } if (m_ucScanStop) { // AC coding. No block skipping used here. int k = (m_ucScanStart)?(m_ucScanStart):((m_bResidual)?0:1); // // EOB decoding. while(k <= m_ucScanStop && !m_Coder.Get(m_Context[ac].ACZero[k-1].SE)) { LONG sz; bool sign; // // Not yet EOB. Run coding in S0: Skip over zeros. while(!m_Coder.Get(m_Context[ac].ACZero[k-1].S0)) { k++; if (k > m_ucScanStop) JPG_THROW(MALFORMED_STREAM,"ACSequentialScan::DecodeBlock", "QMDecoder is out of sync"); } // // Now decode the sign of the coefficient. // This happens in the uniform context. sign = m_Coder.Get(m_Context[ac].Uniform); // // Decode the magnitude. if (m_Coder.Get(m_Context[ac].ACZero[k-1].SP)) { // X1 coding, identical to SN and SP. if (m_Coder.Get(m_Context[ac].ACZero[k-1].SP)) { int i = 0; LONG m = 4; struct QMContextSet::ACContextMagnitudeSet &acm = (k > kx)?(m_Context[ac].ACMagnitudeHigh):(m_Context[ac].ACMagnitudeLow); while(m_Coder.Get(acm.X[i])) { m <<= 1; if(++i >= QMContextSet::ACContextMagnitudeSet::MagnitudeContexts) JPG_THROW(MALFORMED_STREAM,"ACSequentialScan::DecodeBlock", "QMDecoder is out of sync"); } // // Get the MSB to decode m >>= 1; sz = m; // // Proceed to refinement. while((m >>= 1)) { if (m_Coder.Get(acm.M[i])) { sz |= m; } } } else { sz = 1; } } else { sz = 0; } // // Done. Finally, include sign and offset. sz++; if (sign) sz = -sz; block[DCT::ScanOrder[k]] = sz << m_ucLowBit; // // Proceed to the next block. k++; } } }
1
CVE-2022-31620
1,414
vulnerable
CWE-125
TfLiteRegistration OkOpRegistration() { TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr}; // Set output size to the input size in OkOp::Prepare(). Code exists to have // a framework in Prepare. The input and output tensors are not used. reg.prepare = [](TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* in_tensor = GetInput(context, node, 0); TfLiteTensor* out_tensor = GetOutput(context, node, 0); TfLiteIntArray* new_size = TfLiteIntArrayCopy(in_tensor->dims); return context->ResizeTensor(context, out_tensor, new_size); }; reg.invoke = [](TfLiteContext* context, TfLiteNode* node) { return kTfLiteOk; }; return reg; }
0
CVE-2020-15211
300
benign
CWE-125
TfLiteRegistration OkOpRegistration() { TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr}; // Set output size to the input size in OkOp::Prepare(). Code exists to have // a framework in Prepare. The input and output tensors are not used. reg.prepare = [](TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* in_tensor; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &in_tensor)); TfLiteTensor* out_tensor; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &out_tensor)); TfLiteIntArray* new_size = TfLiteIntArrayCopy(in_tensor->dims); return context->ResizeTensor(context, out_tensor, new_size); }; reg.invoke = [](TfLiteContext* context, TfLiteNode* node) { return kTfLiteOk; }; return reg; }
1
CVE-2020-15211
300
vulnerable
CWE-787
de265_error seq_parameter_set::read(error_queue* errqueue, bitreader* br) { int vlc; video_parameter_set_id = get_bits(br,4); sps_max_sub_layers = get_bits(br,3) +1; if (sps_max_sub_layers>7) { return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } sps_temporal_id_nesting_flag = get_bits(br,1); profile_tier_level_.read(br, sps_max_sub_layers); READ_VLC(seq_parameter_set_id, uvlc); if (seq_parameter_set_id >= DE265_MAX_SPS_SETS) { return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } // --- decode chroma type --- READ_VLC(chroma_format_idc, uvlc); if (chroma_format_idc == 3) { separate_colour_plane_flag = get_bits(br,1); } else { separate_colour_plane_flag = 0; } if (chroma_format_idc<0 || chroma_format_idc>3) { errqueue->add_warning(DE265_WARNING_INVALID_CHROMA_FORMAT, false); return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } // --- picture size --- READ_VLC(pic_width_in_luma_samples, uvlc); READ_VLC(pic_height_in_luma_samples, uvlc); if (pic_width_in_luma_samples == 0 || pic_height_in_luma_samples == 0) { return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } if (pic_width_in_luma_samples > MAX_PICTURE_WIDTH || pic_height_in_luma_samples> MAX_PICTURE_HEIGHT) { return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } conformance_window_flag = get_bits(br,1); if (conformance_window_flag) { READ_VLC(conf_win_left_offset, uvlc); READ_VLC(conf_win_right_offset, uvlc); READ_VLC(conf_win_top_offset, uvlc); READ_VLC(conf_win_bottom_offset,uvlc); } else { conf_win_left_offset = 0; conf_win_right_offset = 0; conf_win_top_offset = 0; conf_win_bottom_offset= 0; } READ_VLC_OFFSET(bit_depth_luma, uvlc, 8); READ_VLC_OFFSET(bit_depth_chroma,uvlc, 8); if (bit_depth_luma > 16 || bit_depth_chroma > 16) { errqueue->add_warning(DE265_WARNING_SPS_HEADER_INVALID, false); return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } READ_VLC_OFFSET(log2_max_pic_order_cnt_lsb, uvlc, 4); if (log2_max_pic_order_cnt_lsb<4 || log2_max_pic_order_cnt_lsb>16) { errqueue->add_warning(DE265_WARNING_SPS_HEADER_INVALID, false); return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } MaxPicOrderCntLsb = 1<<(log2_max_pic_order_cnt_lsb); // --- sub_layer_ordering_info --- sps_sub_layer_ordering_info_present_flag = get_bits(br,1); int firstLayer = (sps_sub_layer_ordering_info_present_flag ? 0 : sps_max_sub_layers-1 ); for (int i=firstLayer ; i <= sps_max_sub_layers-1; i++ ) { // sps_max_dec_pic_buffering[i] vlc=get_uvlc(br); if (vlc == UVLC_ERROR || vlc+1 > MAX_NUM_REF_PICS) { errqueue->add_warning(DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE, false); return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } sps_max_dec_pic_buffering[i] = vlc+1; // sps_max_num_reorder_pics[i] READ_VLC(sps_max_num_reorder_pics[i], uvlc); // sps_max_latency_increase[i] READ_VLC(sps_max_latency_increase_plus1[i], uvlc); SpsMaxLatencyPictures[i] = (sps_max_num_reorder_pics[i] + sps_max_latency_increase_plus1[i]-1); } // copy info to all layers if only specified once if (sps_sub_layer_ordering_info_present_flag) { int ref = sps_max_sub_layers-1; assert(ref<7); for (int i=0 ; i < sps_max_sub_layers-1; i++ ) { sps_max_dec_pic_buffering[i] = sps_max_dec_pic_buffering[ref]; sps_max_num_reorder_pics[i] = sps_max_num_reorder_pics[ref]; sps_max_latency_increase_plus1[i] = sps_max_latency_increase_plus1[ref]; } } READ_VLC_OFFSET(log2_min_luma_coding_block_size, uvlc, 3); READ_VLC (log2_diff_max_min_luma_coding_block_size, uvlc); READ_VLC_OFFSET(log2_min_transform_block_size, uvlc, 2); READ_VLC(log2_diff_max_min_transform_block_size, uvlc); READ_VLC(max_transform_hierarchy_depth_inter, uvlc); READ_VLC(max_transform_hierarchy_depth_intra, uvlc); if (log2_min_luma_coding_block_size > 6) { return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } if (log2_min_luma_coding_block_size + log2_diff_max_min_luma_coding_block_size > 6) { return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } if (log2_min_transform_block_size > 5) { return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } if (log2_min_transform_block_size + log2_diff_max_min_transform_block_size > 5) { return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } scaling_list_enable_flag = get_bits(br,1); if (scaling_list_enable_flag) { sps_scaling_list_data_present_flag = get_bits(br,1); if (sps_scaling_list_data_present_flag) { de265_error err; if ((err=read_scaling_list(br,this, &scaling_list, false)) != DE265_OK) { return err; } } else { set_default_scaling_lists(&scaling_list); } } amp_enabled_flag = get_bits(br,1); sample_adaptive_offset_enabled_flag = get_bits(br,1); pcm_enabled_flag = get_bits(br,1); if (pcm_enabled_flag) { pcm_sample_bit_depth_luma = get_bits(br,4)+1; pcm_sample_bit_depth_chroma = get_bits(br,4)+1; READ_VLC_OFFSET(log2_min_pcm_luma_coding_block_size, uvlc, 3); READ_VLC(log2_diff_max_min_pcm_luma_coding_block_size, uvlc); pcm_loop_filter_disable_flag = get_bits(br,1); if (pcm_sample_bit_depth_luma > bit_depth_luma) { errqueue->add_warning(DE265_WARNING_PCM_BITDEPTH_TOO_LARGE, false); return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } if (pcm_sample_bit_depth_chroma > bit_depth_chroma) { errqueue->add_warning(DE265_WARNING_PCM_BITDEPTH_TOO_LARGE, false); return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } } else { pcm_sample_bit_depth_luma = 0; pcm_sample_bit_depth_chroma = 0; log2_min_pcm_luma_coding_block_size = 0; log2_diff_max_min_pcm_luma_coding_block_size = 0; pcm_loop_filter_disable_flag = 0; } int num_short_term_ref_pic_sets; READ_VLC(num_short_term_ref_pic_sets, uvlc); if (num_short_term_ref_pic_sets < 0 || num_short_term_ref_pic_sets > 64) { errqueue->add_warning(DE265_WARNING_NUMBER_OF_SHORT_TERM_REF_PIC_SETS_OUT_OF_RANGE, false); return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } // --- allocate reference pic set --- // we do not allocate the ref-pic-set for the slice header here, but in the slice header itself ref_pic_sets.resize(num_short_term_ref_pic_sets); for (int i = 0; i < num_short_term_ref_pic_sets; i++) { bool success = read_short_term_ref_pic_set(errqueue,this,br, &ref_pic_sets[i], i, ref_pic_sets, false); if (!success) { return DE265_WARNING_SPS_HEADER_INVALID; } // dump_short_term_ref_pic_set(&(*ref_pic_sets)[i], fh); } long_term_ref_pics_present_flag = get_bits(br,1); if (long_term_ref_pics_present_flag) { READ_VLC(num_long_term_ref_pics_sps, uvlc); if (num_long_term_ref_pics_sps > MAX_NUM_LT_REF_PICS_SPS) { return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } for (int i = 0; i < num_long_term_ref_pics_sps; i++ ) { lt_ref_pic_poc_lsb_sps[i] = get_bits(br, log2_max_pic_order_cnt_lsb); used_by_curr_pic_lt_sps_flag[i] = get_bits(br,1); } } else { num_long_term_ref_pics_sps = 0; // NOTE: missing definition in standard ! } sps_temporal_mvp_enabled_flag = get_bits(br,1); strong_intra_smoothing_enable_flag = get_bits(br,1); vui_parameters_present_flag = get_bits(br,1); if (vui_parameters_present_flag) { vui.read(errqueue, br, this); } sps_extension_present_flag = get_bits(br,1); if (sps_extension_present_flag) { sps_range_extension_flag = get_bits(br,1); sps_multilayer_extension_flag = get_bits(br,1); sps_extension_6bits = get_bits(br,6); } else { sps_range_extension_flag = 0; } if (sps_range_extension_flag) { de265_error err = range_extension.read(errqueue, br); if (err != DE265_OK) { return err; } } /* sps_extension_flag = get_bits(br,1); if (sps_extension_flag) { assert(false); } */ de265_error err = compute_derived_values(); if (err != DE265_OK) { return err; } sps_read = true; return DE265_OK; }
0
CVE-2022-1253
150
benign
CWE-787
de265_error seq_parameter_set::read(error_queue* errqueue, bitreader* br) { int vlc; video_parameter_set_id = get_bits(br,4); sps_max_sub_layers = get_bits(br,3) +1; if (sps_max_sub_layers>7) { return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } sps_temporal_id_nesting_flag = get_bits(br,1); profile_tier_level_.read(br, sps_max_sub_layers); READ_VLC(seq_parameter_set_id, uvlc); if (seq_parameter_set_id >= DE265_MAX_SPS_SETS) { return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } // --- decode chroma type --- READ_VLC(chroma_format_idc, uvlc); if (chroma_format_idc == 3) { separate_colour_plane_flag = get_bits(br,1); } else { separate_colour_plane_flag = 0; } if (chroma_format_idc<0 || chroma_format_idc>3) { errqueue->add_warning(DE265_WARNING_INVALID_CHROMA_FORMAT, false); return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } // --- picture size --- READ_VLC(pic_width_in_luma_samples, uvlc); READ_VLC(pic_height_in_luma_samples, uvlc); if (pic_width_in_luma_samples == 0 || pic_height_in_luma_samples == 0) { return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } if (pic_width_in_luma_samples > MAX_PICTURE_WIDTH || pic_height_in_luma_samples> MAX_PICTURE_HEIGHT) { return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } conformance_window_flag = get_bits(br,1); if (conformance_window_flag) { READ_VLC(conf_win_left_offset, uvlc); READ_VLC(conf_win_right_offset, uvlc); READ_VLC(conf_win_top_offset, uvlc); READ_VLC(conf_win_bottom_offset,uvlc); } else { conf_win_left_offset = 0; conf_win_right_offset = 0; conf_win_top_offset = 0; conf_win_bottom_offset= 0; } READ_VLC_OFFSET(bit_depth_luma, uvlc, 8); READ_VLC_OFFSET(bit_depth_chroma,uvlc, 8); if (bit_depth_luma > 16 || bit_depth_chroma > 16) { errqueue->add_warning(DE265_WARNING_SPS_HEADER_INVALID, false); return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } READ_VLC_OFFSET(log2_max_pic_order_cnt_lsb, uvlc, 4); if (log2_max_pic_order_cnt_lsb<4 || log2_max_pic_order_cnt_lsb>16) { errqueue->add_warning(DE265_WARNING_SPS_HEADER_INVALID, false); return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } MaxPicOrderCntLsb = 1<<(log2_max_pic_order_cnt_lsb); // --- sub_layer_ordering_info --- sps_sub_layer_ordering_info_present_flag = get_bits(br,1); int firstLayer = (sps_sub_layer_ordering_info_present_flag ? 0 : sps_max_sub_layers-1 ); for (int i=firstLayer ; i <= sps_max_sub_layers-1; i++ ) { // sps_max_dec_pic_buffering[i] vlc=get_uvlc(br); if (vlc == UVLC_ERROR || vlc+1 > MAX_NUM_REF_PICS) { errqueue->add_warning(DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE, false); return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } sps_max_dec_pic_buffering[i] = vlc+1; // sps_max_num_reorder_pics[i] READ_VLC(sps_max_num_reorder_pics[i], uvlc); // sps_max_latency_increase[i] READ_VLC(sps_max_latency_increase_plus1[i], uvlc); SpsMaxLatencyPictures[i] = (sps_max_num_reorder_pics[i] + sps_max_latency_increase_plus1[i]-1); } // copy info to all layers if only specified once if (sps_sub_layer_ordering_info_present_flag) { int ref = sps_max_sub_layers-1; assert(ref<7); for (int i=0 ; i < sps_max_sub_layers-1; i++ ) { sps_max_dec_pic_buffering[i] = sps_max_dec_pic_buffering[ref]; sps_max_num_reorder_pics[i] = sps_max_num_reorder_pics[ref]; sps_max_latency_increase_plus1[i] = sps_max_latency_increase_plus1[ref]; } } READ_VLC_OFFSET(log2_min_luma_coding_block_size, uvlc, 3); READ_VLC (log2_diff_max_min_luma_coding_block_size, uvlc); READ_VLC_OFFSET(log2_min_transform_block_size, uvlc, 2); READ_VLC(log2_diff_max_min_transform_block_size, uvlc); READ_VLC(max_transform_hierarchy_depth_inter, uvlc); READ_VLC(max_transform_hierarchy_depth_intra, uvlc); if (log2_min_luma_coding_block_size > 6) { return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } if (log2_min_luma_coding_block_size + log2_diff_max_min_luma_coding_block_size > 6) { return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } if (log2_min_transform_block_size > 5) { return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } if (log2_min_transform_block_size + log2_diff_max_min_transform_block_size > 5) { return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } scaling_list_enable_flag = get_bits(br,1); if (scaling_list_enable_flag) { sps_scaling_list_data_present_flag = get_bits(br,1); if (sps_scaling_list_data_present_flag) { de265_error err; if ((err=read_scaling_list(br,this, &scaling_list, false)) != DE265_OK) { return err; } } else { set_default_scaling_lists(&scaling_list); } } amp_enabled_flag = get_bits(br,1); sample_adaptive_offset_enabled_flag = get_bits(br,1); pcm_enabled_flag = get_bits(br,1); if (pcm_enabled_flag) { pcm_sample_bit_depth_luma = get_bits(br,4)+1; pcm_sample_bit_depth_chroma = get_bits(br,4)+1; READ_VLC_OFFSET(log2_min_pcm_luma_coding_block_size, uvlc, 3); READ_VLC(log2_diff_max_min_pcm_luma_coding_block_size, uvlc); pcm_loop_filter_disable_flag = get_bits(br,1); if (pcm_sample_bit_depth_luma > bit_depth_luma) { errqueue->add_warning(DE265_WARNING_PCM_BITDEPTH_TOO_LARGE, false); return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } if (pcm_sample_bit_depth_chroma > bit_depth_chroma) { errqueue->add_warning(DE265_WARNING_PCM_BITDEPTH_TOO_LARGE, false); return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } } else { pcm_sample_bit_depth_luma = 0; pcm_sample_bit_depth_chroma = 0; log2_min_pcm_luma_coding_block_size = 0; log2_diff_max_min_pcm_luma_coding_block_size = 0; pcm_loop_filter_disable_flag = 0; } int num_short_term_ref_pic_sets; READ_VLC(num_short_term_ref_pic_sets, uvlc); if (num_short_term_ref_pic_sets < 0 || num_short_term_ref_pic_sets > 64) { errqueue->add_warning(DE265_WARNING_NUMBER_OF_SHORT_TERM_REF_PIC_SETS_OUT_OF_RANGE, false); return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } // --- allocate reference pic set --- // we do not allocate the ref-pic-set for the slice header here, but in the slice header itself ref_pic_sets.resize(num_short_term_ref_pic_sets); for (int i = 0; i < num_short_term_ref_pic_sets; i++) { bool success = read_short_term_ref_pic_set(errqueue,this,br, &ref_pic_sets[i], i, ref_pic_sets, false); if (!success) { return DE265_WARNING_SPS_HEADER_INVALID; } // dump_short_term_ref_pic_set(&(*ref_pic_sets)[i], fh); } long_term_ref_pics_present_flag = get_bits(br,1); if (long_term_ref_pics_present_flag) { READ_VLC(num_long_term_ref_pics_sps, uvlc); if (num_long_term_ref_pics_sps > MAX_NUM_LT_REF_PICS_SPS) { return DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE; } for (int i = 0; i < num_long_term_ref_pics_sps; i++ ) { lt_ref_pic_poc_lsb_sps[i] = get_bits(br, log2_max_pic_order_cnt_lsb); used_by_curr_pic_lt_sps_flag[i] = get_bits(br,1); } } else { num_long_term_ref_pics_sps = 0; // NOTE: missing definition in standard ! } sps_temporal_mvp_enabled_flag = get_bits(br,1); strong_intra_smoothing_enable_flag = get_bits(br,1); vui_parameters_present_flag = get_bits(br,1); if (vui_parameters_present_flag) { de265_error err = vui.read(errqueue, br, this); if (err) { return err; } } sps_extension_present_flag = get_bits(br,1); if (sps_extension_present_flag) { sps_range_extension_flag = get_bits(br,1); sps_multilayer_extension_flag = get_bits(br,1); sps_extension_6bits = get_bits(br,6); } else { sps_range_extension_flag = 0; } if (sps_range_extension_flag) { de265_error err = range_extension.read(errqueue, br); if (err != DE265_OK) { return err; } } /* sps_extension_flag = get_bits(br,1); if (sps_extension_flag) { assert(false); } */ de265_error err = compute_derived_values(); if (err != DE265_OK) { return err; } sps_read = true; return DE265_OK; }
1
CVE-2022-1253
150
vulnerable
CWE-125
static int xar_get_numeric_from_xml_element(xmlTextReaderPtr reader, long * value) { const xmlChar * numstr; if (xmlTextReaderRead(reader) == 1 && xmlTextReaderNodeType(reader) == XML_READER_TYPE_TEXT) { numstr = xmlTextReaderConstValue(reader); if (numstr) { *value = atol((const char *)numstr); if (*value < 0) { cli_dbgmsg("cli_scanxar: XML element value %li\n", *value); return CL_EFORMAT; } return CL_SUCCESS; } } cli_dbgmsg("cli_scanxar: No text for XML element\n"); return CL_EFORMAT; }
0
CVE-2018-1000085
1,324
benign
CWE-125
static int xar_get_numeric_from_xml_element(xmlTextReaderPtr reader, size_t * value) { const xmlChar * numstr; ssize_t numval; if (xmlTextReaderRead(reader) == 1 && xmlTextReaderNodeType(reader) == XML_READER_TYPE_TEXT) { numstr = xmlTextReaderConstValue(reader); if (numstr) { numval = atol((const char *)numstr); if (numval < 0) { cli_dbgmsg("cli_scanxar: XML element value %li\n", *value); return CL_EFORMAT; } *value = numval; return CL_SUCCESS; } } cli_dbgmsg("cli_scanxar: No text for XML element\n"); return CL_EFORMAT; }
1
CVE-2018-1000085
1,324
vulnerable
CWE-125
mp_join_print(netdissect_options *ndo, const u_char *opt, u_int opt_len, u_char flags) { const struct mp_join *mpj = (const struct mp_join *) opt; if (!(opt_len == 12 && flags & TH_SYN) && !(opt_len == 16 && (flags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) && !(opt_len == 24 && flags & TH_ACK)) return 0; if (opt_len != 24) { if (mpj->sub_b & MP_JOIN_B) ND_PRINT((ndo, " backup")); ND_PRINT((ndo, " id %u", mpj->addr_id)); } switch (opt_len) { case 12: /* SYN */ ND_PRINT((ndo, " token 0x%x" " nonce 0x%x", EXTRACT_32BITS(mpj->u.syn.token), EXTRACT_32BITS(mpj->u.syn.nonce))); break; case 16: /* SYN/ACK */ ND_PRINT((ndo, " hmac 0x%" PRIx64 " nonce 0x%x", EXTRACT_64BITS(mpj->u.synack.mac), EXTRACT_32BITS(mpj->u.synack.nonce))); break; case 24: {/* ACK */ size_t i; ND_PRINT((ndo, " hmac 0x")); for (i = 0; i < sizeof(mpj->u.ack.mac); ++i) ND_PRINT((ndo, "%02x", mpj->u.ack.mac[i])); } default: break; } return 1; }
0
CVE-2017-13040
2,281
benign
CWE-125
mp_join_print(netdissect_options *ndo, const u_char *opt, u_int opt_len, u_char flags) { const struct mp_join *mpj = (const struct mp_join *) opt; if (!(opt_len == 12 && (flags & TH_SYN)) && !(opt_len == 16 && (flags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) && !(opt_len == 24 && (flags & TH_ACK))) return 0; if (opt_len != 24) { if (mpj->sub_b & MP_JOIN_B) ND_PRINT((ndo, " backup")); ND_PRINT((ndo, " id %u", mpj->addr_id)); } switch (opt_len) { case 12: /* SYN */ ND_PRINT((ndo, " token 0x%x" " nonce 0x%x", EXTRACT_32BITS(mpj->u.syn.token), EXTRACT_32BITS(mpj->u.syn.nonce))); break; case 16: /* SYN/ACK */ ND_PRINT((ndo, " hmac 0x%" PRIx64 " nonce 0x%x", EXTRACT_64BITS(mpj->u.synack.mac), EXTRACT_32BITS(mpj->u.synack.nonce))); break; case 24: {/* ACK */ size_t i; ND_PRINT((ndo, " hmac 0x")); for (i = 0; i < sizeof(mpj->u.ack.mac); ++i) ND_PRINT((ndo, "%02x", mpj->u.ack.mac[i])); } default: break; } return 1; }
1
CVE-2017-13040
2,281
vulnerable
CWE-125
name_parse(u8 *packet, int length, int *idx, char *name_out, int name_out_len) { int name_end = -1; int j = *idx; int ptr_count = 0; #define GET32(x) do { if (j + 4 > length) goto err; memcpy(&t32_, packet + j, 4); j += 4; x = ntohl(t32_); } while (0) #define GET16(x) do { if (j + 2 > length) goto err; memcpy(&t_, packet + j, 2); j += 2; x = ntohs(t_); } while (0) #define GET8(x) do { if (j >= length) goto err; x = packet[j++]; } while (0) char *cp = name_out; const char *const end = name_out + name_out_len; /* Normally, names are a series of length prefixed strings terminated */ /* with a length of 0 (the lengths are u8's < 63). */ /* However, the length can start with a pair of 1 bits and that */ /* means that the next 14 bits are a pointer within the current */ /* packet. */ for (;;) { u8 label_len; if (j >= length) return -1; GET8(label_len); if (!label_len) break; if (label_len & 0xc0) { u8 ptr_low; GET8(ptr_low); if (name_end < 0) name_end = j; j = (((int)label_len & 0x3f) << 8) + ptr_low; /* Make sure that the target offset is in-bounds. */ if (j < 0 || j >= length) return -1; /* If we've jumped more times than there are characters in the * message, we must have a loop. */ if (++ptr_count > length) return -1; continue; } if (label_len > 63) return -1; if (cp != name_out) { if (cp + 1 >= end) return -1; *cp++ = '.'; } if (cp + label_len >= end) return -1; memcpy(cp, packet + j, label_len); cp += label_len; j += label_len; } if (cp >= end) return -1; *cp = '\0'; if (name_end < 0) *idx = j; else *idx = name_end; return 0; err: return -1; }
0
CVE-2016-10195
2,220
benign
CWE-125
name_parse(u8 *packet, int length, int *idx, char *name_out, int name_out_len) { int name_end = -1; int j = *idx; int ptr_count = 0; #define GET32(x) do { if (j + 4 > length) goto err; memcpy(&t32_, packet + j, 4); j += 4; x = ntohl(t32_); } while (0) #define GET16(x) do { if (j + 2 > length) goto err; memcpy(&t_, packet + j, 2); j += 2; x = ntohs(t_); } while (0) #define GET8(x) do { if (j >= length) goto err; x = packet[j++]; } while (0) char *cp = name_out; const char *const end = name_out + name_out_len; /* Normally, names are a series of length prefixed strings terminated */ /* with a length of 0 (the lengths are u8's < 63). */ /* However, the length can start with a pair of 1 bits and that */ /* means that the next 14 bits are a pointer within the current */ /* packet. */ for (;;) { u8 label_len; GET8(label_len); if (!label_len) break; if (label_len & 0xc0) { u8 ptr_low; GET8(ptr_low); if (name_end < 0) name_end = j; j = (((int)label_len & 0x3f) << 8) + ptr_low; /* Make sure that the target offset is in-bounds. */ if (j < 0 || j >= length) return -1; /* If we've jumped more times than there are characters in the * message, we must have a loop. */ if (++ptr_count > length) return -1; continue; } if (label_len > 63) return -1; if (cp != name_out) { if (cp + 1 >= end) return -1; *cp++ = '.'; } if (cp + label_len >= end) return -1; if (j + label_len > length) return -1; memcpy(cp, packet + j, label_len); cp += label_len; j += label_len; } if (cp >= end) return -1; *cp = '\0'; if (name_end < 0) *idx = j; else *idx = name_end; return 0; err: return -1; }
1
CVE-2016-10195
2,220
vulnerable
CWE-20
void beforeSleep(struct aeEventLoop *eventLoop) { REDIS_NOTUSED(eventLoop); listNode *ln; redisClient *c; /* Awake clients that got all the swapped keys they requested */ if (server.vm_enabled && listLength(server.io_ready_clients)) { listIter li; listRewind(server.io_ready_clients,&li); while((ln = listNext(&li))) { c = ln->value; struct redisCommand *cmd; /* Resume the client. */ listDelNode(server.io_ready_clients,ln); c->flags &= (~REDIS_IO_WAIT); server.vm_blocked_clients--; aeCreateFileEvent(server.el, c->fd, AE_READABLE, readQueryFromClient, c); cmd = lookupCommand(c->argv[0]->ptr); redisAssert(cmd != NULL); call(c,cmd); resetClient(c); /* There may be more data to process in the input buffer. */ if (c->querybuf && sdslen(c->querybuf) > 0) processInputBuffer(c); } } /* Try to process pending commands for clients that were just unblocked. */ while (listLength(server.unblocked_clients)) { ln = listFirst(server.unblocked_clients); redisAssert(ln != NULL); c = ln->value; listDelNode(server.unblocked_clients,ln); /* Process remaining data in the input buffer. */ if (c->querybuf && sdslen(c->querybuf) > 0) processInputBuffer(c); } /* Write the AOF buffer on disk */ flushAppendOnlyFile(); }
0
CVE-2013-0178
720
benign
CWE-20
void beforeSleep(struct aeEventLoop *eventLoop) { REDIS_NOTUSED(eventLoop); listNode *ln; redisClient *c; /* Awake clients that got all the on disk keys they requested */ if (server.ds_enabled && listLength(server.io_ready_clients)) { listIter li; listRewind(server.io_ready_clients,&li); while((ln = listNext(&li))) { c = ln->value; struct redisCommand *cmd; /* Resume the client. */ listDelNode(server.io_ready_clients,ln); c->flags &= (~REDIS_IO_WAIT); server.cache_blocked_clients--; aeCreateFileEvent(server.el, c->fd, AE_READABLE, readQueryFromClient, c); cmd = lookupCommand(c->argv[0]->ptr); redisAssert(cmd != NULL); call(c,cmd); resetClient(c); /* There may be more data to process in the input buffer. */ if (c->querybuf && sdslen(c->querybuf) > 0) processInputBuffer(c); } } /* Try to process pending commands for clients that were just unblocked. */ while (listLength(server.unblocked_clients)) { ln = listFirst(server.unblocked_clients); redisAssert(ln != NULL); c = ln->value; listDelNode(server.unblocked_clients,ln); /* Process remaining data in the input buffer. */ if (c->querybuf && sdslen(c->querybuf) > 0) processInputBuffer(c); } /* Write the AOF buffer on disk */ flushAppendOnlyFile(); }
1
CVE-2013-0178
720
vulnerable
CWE-119
safe_fprintf(FILE *f, const char *fmt, ...) { char fmtbuff_stack[256]; /* Place to format the printf() string. */ char outbuff[256]; /* Buffer for outgoing characters. */ char *fmtbuff_heap; /* If fmtbuff_stack is too small, we use malloc */ char *fmtbuff; /* Pointer to fmtbuff_stack or fmtbuff_heap. */ int fmtbuff_length; int length, n; va_list ap; const char *p; unsigned i; wchar_t wc; char try_wc; /* Use a stack-allocated buffer if we can, for speed and safety. */ fmtbuff_heap = NULL; fmtbuff_length = sizeof(fmtbuff_stack); fmtbuff = fmtbuff_stack; /* Try formatting into the stack buffer. */ va_start(ap, fmt); length = vsnprintf(fmtbuff, fmtbuff_length, fmt, ap); va_end(ap); /* If the result was too large, allocate a buffer on the heap. */ while (length < 0 || length >= fmtbuff_length) { if (length >= fmtbuff_length) fmtbuff_length = length+1; else if (fmtbuff_length < 8192) fmtbuff_length *= 2; else if (fmtbuff_length < 1000000) fmtbuff_length += fmtbuff_length / 4; else { length = fmtbuff_length; fmtbuff_heap[length-1] = '\0'; break; } free(fmtbuff_heap); fmtbuff_heap = malloc(fmtbuff_length); /* Reformat the result into the heap buffer if we can. */ if (fmtbuff_heap != NULL) { fmtbuff = fmtbuff_heap; va_start(ap, fmt); length = vsnprintf(fmtbuff, fmtbuff_length, fmt, ap); va_end(ap); } else { /* Leave fmtbuff pointing to the truncated * string in fmtbuff_stack. */ length = sizeof(fmtbuff_stack) - 1; break; } } /* Note: mbrtowc() has a cleaner API, but mbtowc() seems a bit * more portable, so we use that here instead. */ if (mbtowc(NULL, NULL, 1) == -1) { /* Reset the shift state. */ /* mbtowc() should never fail in practice, but * handle the theoretical error anyway. */ free(fmtbuff_heap); return; } /* Write data, expanding unprintable characters. */ p = fmtbuff; i = 0; try_wc = 1; while (*p != '\0') { /* Convert to wide char, test if the wide * char is printable in the current locale. */ if (try_wc && (n = mbtowc(&wc, p, length)) != -1) { length -= n; if (iswprint(wc) && wc != L'\\') { /* Printable, copy the bytes through. */ while (n-- > 0) outbuff[i++] = *p++; } else { /* Not printable, format the bytes. */ while (n-- > 0) i += (unsigned)bsdtar_expand_char( outbuff, i, *p++); } } else { /* After any conversion failure, don't bother * trying to convert the rest. */ i += (unsigned)bsdtar_expand_char(outbuff, i, *p++); try_wc = 0; } /* If our output buffer is full, dump it and keep going. */ if (i > (sizeof(outbuff) - 20)) { outbuff[i] = '\0'; fprintf(f, "%s", outbuff); i = 0; } } outbuff[i] = '\0'; fprintf(f, "%s", outbuff); /* If we allocated a heap-based formatting buffer, free it now. */ free(fmtbuff_heap); }
0
CVE-2016-8687
885
benign
CWE-119
safe_fprintf(FILE *f, const char *fmt, ...) { char fmtbuff_stack[256]; /* Place to format the printf() string. */ char outbuff[256]; /* Buffer for outgoing characters. */ char *fmtbuff_heap; /* If fmtbuff_stack is too small, we use malloc */ char *fmtbuff; /* Pointer to fmtbuff_stack or fmtbuff_heap. */ int fmtbuff_length; int length, n; va_list ap; const char *p; unsigned i; wchar_t wc; char try_wc; /* Use a stack-allocated buffer if we can, for speed and safety. */ fmtbuff_heap = NULL; fmtbuff_length = sizeof(fmtbuff_stack); fmtbuff = fmtbuff_stack; /* Try formatting into the stack buffer. */ va_start(ap, fmt); length = vsnprintf(fmtbuff, fmtbuff_length, fmt, ap); va_end(ap); /* If the result was too large, allocate a buffer on the heap. */ while (length < 0 || length >= fmtbuff_length) { if (length >= fmtbuff_length) fmtbuff_length = length+1; else if (fmtbuff_length < 8192) fmtbuff_length *= 2; else if (fmtbuff_length < 1000000) fmtbuff_length += fmtbuff_length / 4; else { length = fmtbuff_length; fmtbuff_heap[length-1] = '\0'; break; } free(fmtbuff_heap); fmtbuff_heap = malloc(fmtbuff_length); /* Reformat the result into the heap buffer if we can. */ if (fmtbuff_heap != NULL) { fmtbuff = fmtbuff_heap; va_start(ap, fmt); length = vsnprintf(fmtbuff, fmtbuff_length, fmt, ap); va_end(ap); } else { /* Leave fmtbuff pointing to the truncated * string in fmtbuff_stack. */ length = sizeof(fmtbuff_stack) - 1; break; } } /* Note: mbrtowc() has a cleaner API, but mbtowc() seems a bit * more portable, so we use that here instead. */ if (mbtowc(NULL, NULL, 1) == -1) { /* Reset the shift state. */ /* mbtowc() should never fail in practice, but * handle the theoretical error anyway. */ free(fmtbuff_heap); return; } /* Write data, expanding unprintable characters. */ p = fmtbuff; i = 0; try_wc = 1; while (*p != '\0') { /* Convert to wide char, test if the wide * char is printable in the current locale. */ if (try_wc && (n = mbtowc(&wc, p, length)) != -1) { length -= n; if (iswprint(wc) && wc != L'\\') { /* Printable, copy the bytes through. */ while (n-- > 0) outbuff[i++] = *p++; } else { /* Not printable, format the bytes. */ while (n-- > 0) i += (unsigned)bsdtar_expand_char( outbuff, i, *p++); } } else { /* After any conversion failure, don't bother * trying to convert the rest. */ i += (unsigned)bsdtar_expand_char(outbuff, i, *p++); try_wc = 0; } /* If our output buffer is full, dump it and keep going. */ if (i > (sizeof(outbuff) - 128)) { outbuff[i] = '\0'; fprintf(f, "%s", outbuff); i = 0; } } outbuff[i] = '\0'; fprintf(f, "%s", outbuff); /* If we allocated a heap-based formatting buffer, free it now. */ free(fmtbuff_heap); }
1
CVE-2016-8687
885
vulnerable
CWE-476
GlyphCache::GlyphCache(const Face & face, const uint32 face_options) : _glyph_loader(new Loader(face, bool(face_options & gr_face_dumbRendering))), _glyphs(_glyph_loader && *_glyph_loader && _glyph_loader->num_glyphs() ? grzeroalloc<const GlyphFace *>(_glyph_loader->num_glyphs()) : 0), _boxes(_glyph_loader && _glyph_loader->has_boxes() && _glyph_loader->num_glyphs() ? grzeroalloc<GlyphBox *>(_glyph_loader->num_glyphs()) : 0), _num_glyphs(_glyphs ? _glyph_loader->num_glyphs() : 0), _num_attrs(_glyphs ? _glyph_loader->num_attrs() : 0), _upem(_glyphs ? _glyph_loader->units_per_em() : 0) { if ((face_options & gr_face_preloadGlyphs) && _glyph_loader && _glyphs) { int numsubs = 0; GlyphFace * const glyphs = new GlyphFace [_num_glyphs]; if (!glyphs) return; // The 0 glyph is definately required. _glyphs[0] = _glyph_loader->read_glyph(0, glyphs[0], &numsubs); // glyphs[0] has the same address as the glyphs array just allocated, // thus assigning the &glyphs[0] to _glyphs[0] means _glyphs[0] points // to the entire array. const GlyphFace * loaded = _glyphs[0]; for (uint16 gid = 1; loaded && gid != _num_glyphs; ++gid) _glyphs[gid] = loaded = _glyph_loader->read_glyph(gid, glyphs[gid], &numsubs); if (!loaded) { _glyphs[0] = 0; delete [] glyphs; } else if (numsubs > 0 && _boxes) { GlyphBox * boxes = (GlyphBox *)gralloc<char>(_num_glyphs * sizeof(GlyphBox) + numsubs * 8 * sizeof(float)); GlyphBox * currbox = boxes; for (uint16 gid = 0; currbox && gid != _num_glyphs; ++gid) { _boxes[gid] = currbox; currbox = _glyph_loader->read_box(gid, currbox, *_glyphs[gid]); } if (!currbox) { free(boxes); _boxes[0] = 0; } } delete _glyph_loader; _glyph_loader = 0; } if (_glyphs && glyph(0) == 0) { free(_glyphs); _glyphs = 0; if (_boxes) { free(_boxes); _boxes = 0; } _num_glyphs = _num_attrs = _upem = 0; } }
0
CVE-2018-7999
3,088
benign
CWE-476
GlyphCache::GlyphCache(const Face & face, const uint32 face_options) : _glyph_loader(new Loader(face)), _glyphs(_glyph_loader && *_glyph_loader && _glyph_loader->num_glyphs() ? grzeroalloc<const GlyphFace *>(_glyph_loader->num_glyphs()) : 0), _boxes(_glyph_loader && _glyph_loader->has_boxes() && _glyph_loader->num_glyphs() ? grzeroalloc<GlyphBox *>(_glyph_loader->num_glyphs()) : 0), _num_glyphs(_glyphs ? _glyph_loader->num_glyphs() : 0), _num_attrs(_glyphs ? _glyph_loader->num_attrs() : 0), _upem(_glyphs ? _glyph_loader->units_per_em() : 0) { if ((face_options & gr_face_preloadGlyphs) && _glyph_loader && _glyphs) { int numsubs = 0; GlyphFace * const glyphs = new GlyphFace [_num_glyphs]; if (!glyphs) return; // The 0 glyph is definately required. _glyphs[0] = _glyph_loader->read_glyph(0, glyphs[0], &numsubs); // glyphs[0] has the same address as the glyphs array just allocated, // thus assigning the &glyphs[0] to _glyphs[0] means _glyphs[0] points // to the entire array. const GlyphFace * loaded = _glyphs[0]; for (uint16 gid = 1; loaded && gid != _num_glyphs; ++gid) _glyphs[gid] = loaded = _glyph_loader->read_glyph(gid, glyphs[gid], &numsubs); if (!loaded) { _glyphs[0] = 0; delete [] glyphs; } else if (numsubs > 0 && _boxes) { GlyphBox * boxes = (GlyphBox *)gralloc<char>(_num_glyphs * sizeof(GlyphBox) + numsubs * 8 * sizeof(float)); GlyphBox * currbox = boxes; for (uint16 gid = 0; currbox && gid != _num_glyphs; ++gid) { _boxes[gid] = currbox; currbox = _glyph_loader->read_box(gid, currbox, *_glyphs[gid]); } if (!currbox) { free(boxes); _boxes[0] = 0; } } delete _glyph_loader; _glyph_loader = 0; } if (_glyphs && glyph(0) == 0) { free(_glyphs); _glyphs = 0; if (_boxes) { free(_boxes); _boxes = 0; } _num_glyphs = _num_attrs = _upem = 0; } }
1
CVE-2018-7999
3,088
vulnerable
CWE-119
void * pvPortMalloc( size_t xWantedSize ) { BlockLink_t * pxBlock, * pxPreviousBlock, * pxNewBlockLink; void * pvReturn = NULL; /* The heap must be initialised before the first call to * prvPortMalloc(). */ configASSERT( pxEnd ); vTaskSuspendAll(); { /* Check the requested block size is not so large that the top bit is * set. The top bit of the block size member of the BlockLink_t structure * is used to determine who owns the block - the application or the * kernel, so it must be free. */ if( ( xWantedSize & xBlockAllocatedBit ) == 0 ) { /* The wanted size is increased so it can contain a BlockLink_t * structure in addition to the requested amount of bytes. */ if( xWantedSize > 0 ) { xWantedSize += xHeapStructSize; /* Ensure that blocks are always aligned to the required number * of bytes. */ if( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 ) { /* Byte alignment required. */ xWantedSize += ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) ); } else { mtCOVERAGE_TEST_MARKER(); } } else { mtCOVERAGE_TEST_MARKER(); } if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) ) { /* Traverse the list from the start (lowest address) block until * one of adequate size is found. */ pxPreviousBlock = &xStart; pxBlock = xStart.pxNextFreeBlock; while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) ) { pxPreviousBlock = pxBlock; pxBlock = pxBlock->pxNextFreeBlock; } /* If the end marker was reached then a block of adequate size * was not found. */ if( pxBlock != pxEnd ) { /* Return the memory space pointed to - jumping over the * BlockLink_t structure at its start. */ pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize ); /* This block is being returned for use so must be taken out * of the list of free blocks. */ pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock; /* If the block is larger than required it can be split into * two. */ if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE ) { /* This block is to be split into two. Create a new * block following the number of bytes requested. The void * cast is used to prevent byte alignment warnings from the * compiler. */ pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize ); /* Calculate the sizes of two blocks split from the * single block. */ pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize; pxBlock->xBlockSize = xWantedSize; /* Insert the new block into the list of free blocks. */ prvInsertBlockIntoFreeList( ( pxNewBlockLink ) ); } else { mtCOVERAGE_TEST_MARKER(); } xFreeBytesRemaining -= pxBlock->xBlockSize; if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining ) { xMinimumEverFreeBytesRemaining = xFreeBytesRemaining; } else { mtCOVERAGE_TEST_MARKER(); } /* The block is being returned - it is allocated and owned * by the application and has no "next" block. */ pxBlock->xBlockSize |= xBlockAllocatedBit; pxBlock->pxNextFreeBlock = NULL; xNumberOfSuccessfulAllocations++; } else { mtCOVERAGE_TEST_MARKER(); } } else { mtCOVERAGE_TEST_MARKER(); } } else { mtCOVERAGE_TEST_MARKER(); } traceMALLOC( pvReturn, xWantedSize ); } ( void ) xTaskResumeAll(); #if ( configUSE_MALLOC_FAILED_HOOK == 1 ) { if( pvReturn == NULL ) { extern void vApplicationMallocFailedHook( void ); vApplicationMallocFailedHook(); } else { mtCOVERAGE_TEST_MARKER(); } } #endif /* if ( configUSE_MALLOC_FAILED_HOOK == 1 ) */ return pvReturn; }
0
CVE-2021-32020
1,535
benign
CWE-119
void * pvPortMalloc( size_t xWantedSize ) { BlockLink_t * pxBlock, * pxPreviousBlock, * pxNewBlockLink; void * pvReturn = NULL; /* The heap must be initialised before the first call to * prvPortMalloc(). */ configASSERT( pxEnd ); vTaskSuspendAll(); { /* Check the requested block size is not so large that the top bit is * set. The top bit of the block size member of the BlockLink_t structure * is used to determine who owns the block - the application or the * kernel, so it must be free. */ if( ( xWantedSize & xBlockAllocatedBit ) == 0 ) { /* The wanted size is increased so it can contain a BlockLink_t * structure in addition to the requested amount of bytes. */ if( ( xWantedSize > 0 ) && ( ( xWantedSize + xHeapStructSize ) > xWantedSize ) ) /* Overflow check */ { xWantedSize += xHeapStructSize; /* Ensure that blocks are always aligned */ if( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 ) { /* Byte alignment required. Check for overflow */ if( ( xWantedSize + ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) ) ) > xWantedSize ) { xWantedSize += ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) ); } else { xWantedSize = 0; } } else { mtCOVERAGE_TEST_MARKER(); } } else { xWantedSize = 0; } if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) ) { /* Traverse the list from the start (lowest address) block until * one of adequate size is found. */ pxPreviousBlock = &xStart; pxBlock = xStart.pxNextFreeBlock; while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) ) { pxPreviousBlock = pxBlock; pxBlock = pxBlock->pxNextFreeBlock; } /* If the end marker was reached then a block of adequate size * was not found. */ if( pxBlock != pxEnd ) { /* Return the memory space pointed to - jumping over the * BlockLink_t structure at its start. */ pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize ); /* This block is being returned for use so must be taken out * of the list of free blocks. */ pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock; /* If the block is larger than required it can be split into * two. */ if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE ) { /* This block is to be split into two. Create a new * block following the number of bytes requested. The void * cast is used to prevent byte alignment warnings from the * compiler. */ pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize ); /* Calculate the sizes of two blocks split from the * single block. */ pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize; pxBlock->xBlockSize = xWantedSize; /* Insert the new block into the list of free blocks. */ prvInsertBlockIntoFreeList( ( pxNewBlockLink ) ); } else { mtCOVERAGE_TEST_MARKER(); } xFreeBytesRemaining -= pxBlock->xBlockSize; if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining ) { xMinimumEverFreeBytesRemaining = xFreeBytesRemaining; } else { mtCOVERAGE_TEST_MARKER(); } /* The block is being returned - it is allocated and owned * by the application and has no "next" block. */ pxBlock->xBlockSize |= xBlockAllocatedBit; pxBlock->pxNextFreeBlock = NULL; xNumberOfSuccessfulAllocations++; } else { mtCOVERAGE_TEST_MARKER(); } } else { mtCOVERAGE_TEST_MARKER(); } } else { mtCOVERAGE_TEST_MARKER(); } traceMALLOC( pvReturn, xWantedSize ); } ( void ) xTaskResumeAll(); #if ( configUSE_MALLOC_FAILED_HOOK == 1 ) { if( pvReturn == NULL ) { extern void vApplicationMallocFailedHook( void ); vApplicationMallocFailedHook(); } else { mtCOVERAGE_TEST_MARKER(); } } #endif /* if ( configUSE_MALLOC_FAILED_HOOK == 1 ) */ return pvReturn; }
1
CVE-2021-32020
1,535
vulnerable
CWE-125
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); output->type = input->type; return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
0
CVE-2020-15211
876
benign
CWE-125
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); output->type = input->type; return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
1
CVE-2020-15211
876
vulnerable
CWE-125
PJ_DEF(pj_status_t) pjmedia_rtcp_fb_build_pli( pjmedia_rtcp_session *session, void *buf, pj_size_t *length) { pjmedia_rtcp_common *hdr; unsigned len; PJ_ASSERT_RETURN(session && buf && length, PJ_EINVAL); len = 12; if (len > *length) return PJ_ETOOSMALL; /* Build RTCP-FB PLI header */ hdr = (pjmedia_rtcp_common*)buf; pj_memcpy(hdr, &session->rtcp_rr_pkt.common, sizeof(*hdr)); hdr->pt = RTCP_PSFB; hdr->count = 1; /* FMT = 1 */ hdr->length = pj_htons((pj_uint16_t)(len/4 - 1)); /* Finally */ *length = len; return PJ_SUCCESS; }
0
CVE-2022-24786
1,174
benign
CWE-125
PJ_DEF(pj_status_t) pjmedia_rtcp_fb_build_pli( pjmedia_rtcp_session *session, void *buf, pj_size_t *length) { pjmedia_rtcp_fb_common *hdr; unsigned len; PJ_ASSERT_RETURN(session && buf && length, PJ_EINVAL); len = 12; if (len > *length) return PJ_ETOOSMALL; /* Build RTCP-FB PLI header */ hdr = (pjmedia_rtcp_fb_common*)buf; pj_memcpy(hdr, &session->rtcp_fb_com, sizeof(*hdr)); hdr->rtcp_common.pt = RTCP_PSFB; hdr->rtcp_common.count = 1; /* FMT = 1 */ hdr->rtcp_common.length = pj_htons((pj_uint16_t)(len/4 - 1)); /* Finally */ *length = len; return PJ_SUCCESS; }
1
CVE-2022-24786
1,174
vulnerable
CWE-362
static int bcm_release(struct socket *sock) { struct sock *sk = sock->sk; struct net *net; struct bcm_sock *bo; struct bcm_op *op, *next; if (!sk) return 0; net = sock_net(sk); bo = bcm_sk(sk); /* remove bcm_ops, timer, rx_unregister(), etc. */ spin_lock(&bcm_notifier_lock); while (bcm_busy_notifier == bo) { spin_unlock(&bcm_notifier_lock); schedule_timeout_uninterruptible(1); spin_lock(&bcm_notifier_lock); } list_del(&bo->notifier); spin_unlock(&bcm_notifier_lock); lock_sock(sk); list_for_each_entry_safe(op, next, &bo->tx_ops, list) bcm_remove_op(op); list_for_each_entry_safe(op, next, &bo->rx_ops, list) { /* * Don't care if we're bound or not (due to netdev problems) * can_rx_unregister() is always a save thing to do here. */ if (op->ifindex) { /* * Only remove subscriptions that had not * been removed due to NETDEV_UNREGISTER * in bcm_notifier() */ if (op->rx_reg_dev) { struct net_device *dev; dev = dev_get_by_index(net, op->ifindex); if (dev) { bcm_rx_unreg(dev, op); dev_put(dev); } } } else can_rx_unregister(net, NULL, op->can_id, REGMASK(op->can_id), bcm_rx_handler, op); bcm_remove_op(op); } #if IS_ENABLED(CONFIG_PROC_FS) /* remove procfs entry */ if (net->can.bcmproc_dir && bo->bcm_proc_read) remove_proc_entry(bo->procname, net->can.bcmproc_dir); #endif /* CONFIG_PROC_FS */ /* remove device reference */ if (bo->bound) { bo->bound = 0; bo->ifindex = 0; } sock_orphan(sk); sock->sk = NULL; release_sock(sk); sock_put(sk); return 0; }
0
CVE-2021-3609
3,035
benign
CWE-362
static int bcm_release(struct socket *sock) { struct sock *sk = sock->sk; struct net *net; struct bcm_sock *bo; struct bcm_op *op, *next; if (!sk) return 0; net = sock_net(sk); bo = bcm_sk(sk); /* remove bcm_ops, timer, rx_unregister(), etc. */ spin_lock(&bcm_notifier_lock); while (bcm_busy_notifier == bo) { spin_unlock(&bcm_notifier_lock); schedule_timeout_uninterruptible(1); spin_lock(&bcm_notifier_lock); } list_del(&bo->notifier); spin_unlock(&bcm_notifier_lock); lock_sock(sk); list_for_each_entry_safe(op, next, &bo->tx_ops, list) bcm_remove_op(op); list_for_each_entry_safe(op, next, &bo->rx_ops, list) { /* * Don't care if we're bound or not (due to netdev problems) * can_rx_unregister() is always a save thing to do here. */ if (op->ifindex) { /* * Only remove subscriptions that had not * been removed due to NETDEV_UNREGISTER * in bcm_notifier() */ if (op->rx_reg_dev) { struct net_device *dev; dev = dev_get_by_index(net, op->ifindex); if (dev) { bcm_rx_unreg(dev, op); dev_put(dev); } } } else can_rx_unregister(net, NULL, op->can_id, REGMASK(op->can_id), bcm_rx_handler, op); } synchronize_rcu(); list_for_each_entry_safe(op, next, &bo->rx_ops, list) bcm_remove_op(op); #if IS_ENABLED(CONFIG_PROC_FS) /* remove procfs entry */ if (net->can.bcmproc_dir && bo->bcm_proc_read) remove_proc_entry(bo->procname, net->can.bcmproc_dir); #endif /* CONFIG_PROC_FS */ /* remove device reference */ if (bo->bound) { bo->bound = 0; bo->ifindex = 0; } sock_orphan(sk); sock->sk = NULL; release_sock(sk); sock_put(sk); return 0; }
1
CVE-2021-3609
3,035
vulnerable
CWE-362
mptctl_eventreport (unsigned long arg) { struct mpt_ioctl_eventreport __user *uarg = (void __user *) arg; struct mpt_ioctl_eventreport karg; MPT_ADAPTER *ioc; int iocnum; int numBytes, maxEvents, max; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventreport))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_eventreport - " "Unable to read in mpt_ioctl_eventreport struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_eventreport() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); return -ENODEV; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventreport called.\n", ioc->name)); numBytes = karg.hdr.maxDataSize - sizeof(mpt_ioctl_header); maxEvents = numBytes/sizeof(MPT_IOCTL_EVENTS); max = MPTCTL_EVENT_LOG_SIZE < maxEvents ? MPTCTL_EVENT_LOG_SIZE : maxEvents; /* If fewer than 1 event is requested, there must have * been some type of error. */ if ((max < 1) || !ioc->events) return -ENODATA; /* reset this flag so SIGIO can restart */ ioc->aen_event_read_flag=0; /* Copy the data from kernel memory to user memory */ numBytes = max * sizeof(MPT_IOCTL_EVENTS); if (copy_to_user(uarg->eventData, ioc->events, numBytes)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_eventreport - " "Unable to write out mpt_ioctl_eventreport struct @ %p\n", ioc->name, __FILE__, __LINE__, ioc->events); return -EFAULT; } return 0; }
0
CVE-2020-12652
809
benign
CWE-362
mptctl_eventreport (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_eventreport __user *uarg = (void __user *) arg; struct mpt_ioctl_eventreport karg; int numBytes, maxEvents, max; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventreport))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_eventreport - " "Unable to read in mpt_ioctl_eventreport struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventreport called.\n", ioc->name)); numBytes = karg.hdr.maxDataSize - sizeof(mpt_ioctl_header); maxEvents = numBytes/sizeof(MPT_IOCTL_EVENTS); max = MPTCTL_EVENT_LOG_SIZE < maxEvents ? MPTCTL_EVENT_LOG_SIZE : maxEvents; /* If fewer than 1 event is requested, there must have * been some type of error. */ if ((max < 1) || !ioc->events) return -ENODATA; /* reset this flag so SIGIO can restart */ ioc->aen_event_read_flag=0; /* Copy the data from kernel memory to user memory */ numBytes = max * sizeof(MPT_IOCTL_EVENTS); if (copy_to_user(uarg->eventData, ioc->events, numBytes)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_eventreport - " "Unable to write out mpt_ioctl_eventreport struct @ %p\n", ioc->name, __FILE__, __LINE__, ioc->events); return -EFAULT; } return 0; }
1
CVE-2020-12652
809
vulnerable
CWE-416
static GF_Err BM_ParseGlobalQuantizer(GF_BifsDecoder *codec, GF_BitStream *bs, GF_List *com_list) { GF_Node *node; GF_Command *com; GF_CommandField *inf; node = gf_bifs_dec_node(codec, bs, NDT_SFWorldNode); if (!node) return GF_NON_COMPLIANT_BITSTREAM; /*reset global QP*/ if (codec->scenegraph->global_qp) { gf_node_unregister(codec->scenegraph->global_qp, NULL); } codec->ActiveQP = NULL; codec->scenegraph->global_qp = NULL; if (gf_node_get_tag(node) != TAG_MPEG4_QuantizationParameter) { gf_node_unregister(node, NULL); return GF_NON_COMPLIANT_BITSTREAM; } /*register global QP*/ codec->ActiveQP = (M_QuantizationParameter *) node; codec->ActiveQP->isLocal = 0; codec->scenegraph->global_qp = node; /*register TWICE: once for the command, and for the scenegraph globalQP*/ node->sgprivate->num_instances = 2; com = gf_sg_command_new(codec->current_graph, GF_SG_GLOBAL_QUANTIZER); inf = gf_sg_command_field_new(com); inf->new_node = node; inf->field_ptr = &inf->new_node; inf->fieldType = GF_SG_VRML_SFNODE; gf_list_add(com_list, com); return GF_OK; }
0
CVE-2022-1795
3,186
benign
CWE-416
static GF_Err BM_ParseGlobalQuantizer(GF_BifsDecoder *codec, GF_BitStream *bs, GF_List *com_list) { GF_Node *node; GF_Command *com; GF_CommandField *inf; node = gf_bifs_dec_node(codec, bs, NDT_SFWorldNode); if (!node) return GF_NON_COMPLIANT_BITSTREAM; /*reset global QP*/ if (codec->scenegraph->global_qp) { gf_node_unregister(codec->scenegraph->global_qp, NULL); } codec->ActiveQP = NULL; codec->scenegraph->global_qp = NULL; if (gf_node_get_tag(node) != TAG_MPEG4_QuantizationParameter) { //if node was just created (num_instances == 0), unregister //otherwise (USE node) don't do anything if (!node->sgprivate->num_instances) { node->sgprivate->num_instances = 1; gf_node_unregister(node, NULL); } return GF_NON_COMPLIANT_BITSTREAM; } /*register global QP*/ codec->ActiveQP = (M_QuantizationParameter *) node; codec->ActiveQP->isLocal = 0; codec->scenegraph->global_qp = node; /*register TWICE: once for the command, and for the scenegraph globalQP*/ gf_node_unregister(node, NULL); gf_node_unregister(node, NULL); com = gf_sg_command_new(codec->current_graph, GF_SG_GLOBAL_QUANTIZER); inf = gf_sg_command_field_new(com); inf->new_node = node; inf->field_ptr = &inf->new_node; inf->fieldType = GF_SG_VRML_SFNODE; gf_list_add(com_list, com); return GF_OK; }
1
CVE-2022-1795
3,186
vulnerable
CWE-125
struct r_bin_dyldcache_obj_t* r_bin_dyldcache_from_bytes_new(const ut8* buf, ut64 size) { struct r_bin_dyldcache_obj_t *bin; if (!(bin = malloc (sizeof (struct r_bin_dyldcache_obj_t)))) { return NULL; } memset (bin, 0, sizeof (struct r_bin_dyldcache_obj_t)); if (!buf) { return r_bin_dyldcache_free (bin); } bin->b = r_buf_new(); if (!r_buf_set_bytes (bin->b, buf, size)) { return r_bin_dyldcache_free (bin); } if (!r_bin_dyldcache_init (bin)) { return r_bin_dyldcache_free (bin); } bin->size = size; return bin; }
0
CVE-2018-20458
591
benign
CWE-125
struct r_bin_dyldcache_obj_t* r_bin_dyldcache_from_bytes_new(const ut8* buf, ut64 size) { struct r_bin_dyldcache_obj_t *bin = R_NEW0 (struct r_bin_dyldcache_obj_t); if (!bin) { return NULL; } if (!buf) { return r_bin_dyldcache_free (bin); } bin->b = r_buf_new (); if (!bin->b || !r_buf_set_bytes (bin->b, buf, size)) { return r_bin_dyldcache_free (bin); } if (!r_bin_dyldcache_init (bin)) { return r_bin_dyldcache_free (bin); } bin->size = size; return bin; }
1
CVE-2018-20458
591
vulnerable
CWE-362
mptctl_mpt_command (unsigned long arg) { struct mpt_ioctl_command __user *uarg = (void __user *) arg; struct mpt_ioctl_command karg; MPT_ADAPTER *ioc; int iocnum; int rc; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_command))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_mpt_command - " "Unable to read in mpt_ioctl_command struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_mpt_command() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); return -ENODEV; } rc = mptctl_do_mpt_command (karg, &uarg->MF); return rc; }
0
CVE-2020-12652
815
benign
CWE-362
mptctl_mpt_command (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_command __user *uarg = (void __user *) arg; struct mpt_ioctl_command karg; int rc; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_command))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_mpt_command - " "Unable to read in mpt_ioctl_command struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } rc = mptctl_do_mpt_command (ioc, karg, &uarg->MF); return rc; }
1
CVE-2020-12652
815
vulnerable
CWE-20
int net_get(int s, void *arg, int *len) { struct net_hdr nh; int plen; if (net_read_exact(s, &nh, sizeof(nh)) == -1) { return -1; } plen = ntohl(nh.nh_len); if (!(plen <= *len)) printf("PLEN %d type %d len %d\n", plen, nh.nh_type, *len); assert(plen <= *len); /* XXX */ *len = plen; if ((*len) && (net_read_exact(s, arg, *len) == -1)) { return -1; } return nh.nh_type; }
0
CVE-2014-8324
464
benign
CWE-20
int net_get(int s, void *arg, int *len) { struct net_hdr nh; int plen; if (net_read_exact(s, &nh, sizeof(nh)) == -1) { return -1; } plen = ntohl(nh.nh_len); if (!(plen <= *len)) printf("PLEN %d type %d len %d\n", plen, nh.nh_type, *len); assert(plen <= *len && plen > 0); /* XXX */ *len = plen; if ((*len) && (net_read_exact(s, arg, *len) == -1)) { return -1; } return nh.nh_type; }
1
CVE-2014-8324
464
vulnerable
CWE-125
ast_for_with_stmt(struct compiling *c, const node *n0, bool is_async) { const node * const n = is_async ? CHILD(n0, 1) : n0; int i, n_items, end_lineno, end_col_offset; asdl_seq *items, *body; REQ(n, with_stmt); n_items = (NCH(n) - 2) / 2; items = _Py_asdl_seq_new(n_items, c->c_arena); if (!items) return NULL; for (i = 1; i < NCH(n) - 2; i += 2) { withitem_ty item = ast_for_with_item(c, CHILD(n, i)); if (!item) return NULL; asdl_seq_SET(items, (i - 1) / 2, item); } body = ast_for_suite(c, CHILD(n, NCH(n) - 1)); if (!body) return NULL; get_last_end_pos(body, &end_lineno, &end_col_offset); if (is_async) return AsyncWith(items, body, LINENO(n0), n0->n_col_offset, end_lineno, end_col_offset, c->c_arena); else return With(items, body, LINENO(n), n->n_col_offset, end_lineno, end_col_offset, c->c_arena); }
0
CVE-2019-19274
1,806
benign
CWE-125
ast_for_with_stmt(struct compiling *c, const node *n0, bool is_async) { const node * const n = is_async ? CHILD(n0, 1) : n0; int i, n_items, nch_minus_type, has_type_comment, end_lineno, end_col_offset; asdl_seq *items, *body; string type_comment; REQ(n, with_stmt); has_type_comment = TYPE(CHILD(n, NCH(n) - 2)) == TYPE_COMMENT; nch_minus_type = NCH(n) - has_type_comment; n_items = (nch_minus_type - 2) / 2; items = _Py_asdl_seq_new(n_items, c->c_arena); if (!items) return NULL; for (i = 1; i < nch_minus_type - 2; i += 2) { withitem_ty item = ast_for_with_item(c, CHILD(n, i)); if (!item) return NULL; asdl_seq_SET(items, (i - 1) / 2, item); } body = ast_for_suite(c, CHILD(n, NCH(n) - 1)); if (!body) return NULL; get_last_end_pos(body, &end_lineno, &end_col_offset); if (has_type_comment) { type_comment = NEW_TYPE_COMMENT(CHILD(n, NCH(n) - 2)); if (!type_comment) return NULL; } else type_comment = NULL; if (is_async) return AsyncWith(items, body, type_comment, LINENO(n0), n0->n_col_offset, end_lineno, end_col_offset, c->c_arena); else return With(items, body, type_comment, LINENO(n), n->n_col_offset, end_lineno, end_col_offset, c->c_arena); }
1
CVE-2019-19274
1,806
vulnerable
CWE-119
unsigned long lh_char_hash(const void *k) { unsigned int h = 0; const char* data = (const char*)k; while( *data!=0 ) h = h*129 + (unsigned int)(*data++) + LH_PRIME; return h; }
0
CVE-2013-6370
238
benign
CWE-119
unsigned long lh_char_hash(const void *k) { static volatile int random_seed = -1; if (random_seed == -1) { int seed; /* we can't use -1 as it is the unitialized sentinel */ while ((seed = json_c_get_random_seed()) == -1); #if defined __GNUC__ __sync_val_compare_and_swap(&random_seed, -1, seed); #elif defined _MSC_VER InterlockedCompareExchange(&random_seed, seed, -1); #else #warning "racy random seed initializtion if used by multiple threads" random_seed = seed; /* potentially racy */ #endif } return hashlittle((const char*)k, strlen((const char*)k), random_seed); }
1
CVE-2013-6370
238
vulnerable
CWE-476
*/ static void php_wddx_serialize_object(wddx_packet *packet, zval *obj) { /* OBJECTS_FIXME */ zval **ent, *fname, **varname; zval *retval = NULL; const char *key; ulong idx; char tmp_buf[WDDX_BUF_LEN]; HashTable *objhash, *sleephash; TSRMLS_FETCH(); MAKE_STD_ZVAL(fname); ZVAL_STRING(fname, "__sleep", 1); /* * We try to call __sleep() method on object. It's supposed to return an * array of property names to be serialized. */ if (call_user_function_ex(CG(function_table), &obj, fname, &retval, 0, 0, 1, NULL TSRMLS_CC) == SUCCESS) { if (retval && (sleephash = HASH_OF(retval))) { PHP_CLASS_ATTRIBUTES; PHP_SET_CLASS_ATTRIBUTES(obj); php_wddx_add_chunk_static(packet, WDDX_STRUCT_S); snprintf(tmp_buf, WDDX_BUF_LEN, WDDX_VAR_S, PHP_CLASS_NAME_VAR); php_wddx_add_chunk(packet, tmp_buf); php_wddx_add_chunk_static(packet, WDDX_STRING_S); php_wddx_add_chunk_ex(packet, class_name, name_len); php_wddx_add_chunk_static(packet, WDDX_STRING_E); php_wddx_add_chunk_static(packet, WDDX_VAR_E); PHP_CLEANUP_CLASS_ATTRIBUTES(); objhash = HASH_OF(obj); for (zend_hash_internal_pointer_reset(sleephash); zend_hash_get_current_data(sleephash, (void **)&varname) == SUCCESS; zend_hash_move_forward(sleephash)) { if (Z_TYPE_PP(varname) != IS_STRING) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "__sleep should return an array only containing the names of instance-variables to serialize."); continue; } if (zend_hash_find(objhash, Z_STRVAL_PP(varname), Z_STRLEN_PP(varname)+1, (void **)&ent) == SUCCESS) { php_wddx_serialize_var(packet, *ent, Z_STRVAL_PP(varname), Z_STRLEN_PP(varname) TSRMLS_CC); } } php_wddx_add_chunk_static(packet, WDDX_STRUCT_E); } } else { uint key_len; PHP_CLASS_ATTRIBUTES; PHP_SET_CLASS_ATTRIBUTES(obj); php_wddx_add_chunk_static(packet, WDDX_STRUCT_S); snprintf(tmp_buf, WDDX_BUF_LEN, WDDX_VAR_S, PHP_CLASS_NAME_VAR); php_wddx_add_chunk(packet, tmp_buf); php_wddx_add_chunk_static(packet, WDDX_STRING_S); php_wddx_add_chunk_ex(packet, class_name, name_len); php_wddx_add_chunk_static(packet, WDDX_STRING_E); php_wddx_add_chunk_static(packet, WDDX_VAR_E); PHP_CLEANUP_CLASS_ATTRIBUTES(); objhash = HASH_OF(obj); for (zend_hash_internal_pointer_reset(objhash); zend_hash_get_current_data(objhash, (void**)&ent) == SUCCESS; zend_hash_move_forward(objhash)) { if (*ent == obj) { continue; } if (zend_hash_get_current_key_ex(objhash, &key, &key_len, &idx, 0, NULL) == HASH_KEY_IS_STRING) { const char *class_name, *prop_name; zend_unmangle_property_name(key, key_len-1, &class_name, &prop_name); php_wddx_serialize_var(packet, *ent, prop_name, strlen(prop_name)+1 TSRMLS_CC); } else { key_len = slprintf(tmp_buf, sizeof(tmp_buf), "%ld", idx); php_wddx_serialize_var(packet, *ent, tmp_buf, key_len TSRMLS_CC); } } php_wddx_add_chunk_static(packet, WDDX_STRUCT_E); } zval_dtor(fname); FREE_ZVAL(fname); if (retval) { zval_ptr_dtor(&retval); }
0
CVE-2016-9934
1,714
benign
CWE-476
*/ static void php_wddx_serialize_object(wddx_packet *packet, zval *obj) { /* OBJECTS_FIXME */ zval **ent, *fname, **varname; zval *retval = NULL; const char *key; ulong idx; char tmp_buf[WDDX_BUF_LEN]; HashTable *objhash, *sleephash; zend_class_entry *ce; PHP_CLASS_ATTRIBUTES; TSRMLS_FETCH(); PHP_SET_CLASS_ATTRIBUTES(obj); ce = Z_OBJCE_P(obj); if (!ce || ce->serialize || ce->unserialize) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Class %s can not be serialized", class_name); PHP_CLEANUP_CLASS_ATTRIBUTES(); return; } MAKE_STD_ZVAL(fname); ZVAL_STRING(fname, "__sleep", 1); /* * We try to call __sleep() method on object. It's supposed to return an * array of property names to be serialized. */ if (call_user_function_ex(CG(function_table), &obj, fname, &retval, 0, 0, 1, NULL TSRMLS_CC) == SUCCESS) { if (retval && (sleephash = HASH_OF(retval))) { php_wddx_add_chunk_static(packet, WDDX_STRUCT_S); snprintf(tmp_buf, WDDX_BUF_LEN, WDDX_VAR_S, PHP_CLASS_NAME_VAR); php_wddx_add_chunk(packet, tmp_buf); php_wddx_add_chunk_static(packet, WDDX_STRING_S); php_wddx_add_chunk_ex(packet, class_name, name_len); php_wddx_add_chunk_static(packet, WDDX_STRING_E); php_wddx_add_chunk_static(packet, WDDX_VAR_E); objhash = HASH_OF(obj); for (zend_hash_internal_pointer_reset(sleephash); zend_hash_get_current_data(sleephash, (void **)&varname) == SUCCESS; zend_hash_move_forward(sleephash)) { if (Z_TYPE_PP(varname) != IS_STRING) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "__sleep should return an array only containing the names of instance-variables to serialize."); continue; } if (zend_hash_find(objhash, Z_STRVAL_PP(varname), Z_STRLEN_PP(varname)+1, (void **)&ent) == SUCCESS) { php_wddx_serialize_var(packet, *ent, Z_STRVAL_PP(varname), Z_STRLEN_PP(varname) TSRMLS_CC); } } php_wddx_add_chunk_static(packet, WDDX_STRUCT_E); } } else { uint key_len; php_wddx_add_chunk_static(packet, WDDX_STRUCT_S); snprintf(tmp_buf, WDDX_BUF_LEN, WDDX_VAR_S, PHP_CLASS_NAME_VAR); php_wddx_add_chunk(packet, tmp_buf); php_wddx_add_chunk_static(packet, WDDX_STRING_S); php_wddx_add_chunk_ex(packet, class_name, name_len); php_wddx_add_chunk_static(packet, WDDX_STRING_E); php_wddx_add_chunk_static(packet, WDDX_VAR_E); objhash = HASH_OF(obj); for (zend_hash_internal_pointer_reset(objhash); zend_hash_get_current_data(objhash, (void**)&ent) == SUCCESS; zend_hash_move_forward(objhash)) { if (*ent == obj) { continue; } if (zend_hash_get_current_key_ex(objhash, &key, &key_len, &idx, 0, NULL) == HASH_KEY_IS_STRING) { const char *class_name, *prop_name; zend_unmangle_property_name(key, key_len-1, &class_name, &prop_name); php_wddx_serialize_var(packet, *ent, prop_name, strlen(prop_name)+1 TSRMLS_CC); } else { key_len = slprintf(tmp_buf, sizeof(tmp_buf), "%ld", idx); php_wddx_serialize_var(packet, *ent, tmp_buf, key_len TSRMLS_CC); } } php_wddx_add_chunk_static(packet, WDDX_STRUCT_E); } PHP_CLEANUP_CLASS_ATTRIBUTES(); zval_dtor(fname); FREE_ZVAL(fname); if (retval) { zval_ptr_dtor(&retval); }
1
CVE-2016-9934
1,714
vulnerable
CWE-362
int PipeSocketHandler::connect(const SocketEndpoint& endpoint) { lock_guard<std::recursive_mutex> mutexGuard(globalMutex); string pipePath = endpoint.name(); sockaddr_un remote; int sockFd = ::socket(AF_UNIX, SOCK_STREAM, 0); FATAL_FAIL(sockFd); initSocket(sockFd); remote.sun_family = AF_UNIX; strcpy(remote.sun_path, pipePath.c_str()); VLOG(3) << "Connecting to " << endpoint << " with fd " << sockFd; int result = ::connect(sockFd, (struct sockaddr*)&remote, sizeof(sockaddr_un)); auto localErrno = GetErrno(); if (result < 0 && localErrno != EINPROGRESS) { VLOG(3) << "Connection result: " << result << " (" << strerror(localErrno) << ")"; #ifdef WIN32 ::shutdown(sockFd, SD_BOTH); #else ::shutdown(sockFd, SHUT_RDWR); #endif #ifdef _MSC_VER FATAL_FAIL(::closesocket(sockFd)); #else FATAL_FAIL(::close(sockFd)); #endif sockFd = -1; SetErrno(localErrno); return sockFd; } fd_set fdset; FD_ZERO(&fdset); FD_SET(sockFd, &fdset); timeval tv; tv.tv_sec = 3; /* 3 second timeout */ tv.tv_usec = 0; VLOG(4) << "Before selecting sockFd"; select(sockFd + 1, NULL, &fdset, NULL, &tv); if (FD_ISSET(sockFd, &fdset)) { VLOG(4) << "sockFd " << sockFd << " is selected"; int so_error; socklen_t len = sizeof so_error; FATAL_FAIL( ::getsockopt(sockFd, SOL_SOCKET, SO_ERROR, (char*)&so_error, &len)); if (so_error == 0) { LOG(INFO) << "Connected to endpoint " << endpoint; // Initialize the socket again once it's blocking to make sure timeouts // are set initSocket(sockFd); // if we get here, we must have connected successfully } else { LOG(INFO) << "Error connecting to " << endpoint << ": " << so_error << " " << strerror(so_error); #ifdef _MSC_VER FATAL_FAIL(::closesocket(sockFd)); #else FATAL_FAIL(::close(sockFd)); #endif sockFd = -1; } } else { auto localErrno = GetErrno(); LOG(INFO) << "Error connecting to " << endpoint << ": " << localErrno << " " << strerror(localErrno); #ifdef _MSC_VER FATAL_FAIL(::closesocket(sockFd)); #else FATAL_FAIL(::close(sockFd)); #endif sockFd = -1; } LOG(INFO) << sockFd << " is a good socket"; if (sockFd >= 0) { addToActiveSockets(sockFd); } return sockFd; }
0
CVE-2022-24949
1,771
benign
CWE-362
int PipeSocketHandler::connect(const SocketEndpoint& endpoint) { lock_guard<std::recursive_mutex> mutexGuard(globalMutex); string pipePath = endpoint.name(); sockaddr_un remote; int sockFd = ::socket(AF_UNIX, SOCK_STREAM, 0); FATAL_FAIL(sockFd); initSocket(sockFd); remote.sun_family = AF_UNIX; strncpy(remote.sun_path, pipePath.c_str(), sizeof(remote.sun_path)); VLOG(3) << "Connecting to " << endpoint << " with fd " << sockFd; int result = ::connect(sockFd, (struct sockaddr*)&remote, sizeof(sockaddr_un)); auto localErrno = GetErrno(); if (result < 0 && localErrno != EINPROGRESS) { VLOG(3) << "Connection result: " << result << " (" << strerror(localErrno) << ")"; #ifdef WIN32 ::shutdown(sockFd, SD_BOTH); #else ::shutdown(sockFd, SHUT_RDWR); #endif #ifdef _MSC_VER FATAL_FAIL(::closesocket(sockFd)); #else FATAL_FAIL(::close(sockFd)); #endif sockFd = -1; SetErrno(localErrno); return sockFd; } fd_set fdset; FD_ZERO(&fdset); FD_SET(sockFd, &fdset); timeval tv; tv.tv_sec = 3; /* 3 second timeout */ tv.tv_usec = 0; VLOG(4) << "Before selecting sockFd"; select(sockFd + 1, NULL, &fdset, NULL, &tv); if (FD_ISSET(sockFd, &fdset)) { VLOG(4) << "sockFd " << sockFd << " is selected"; int so_error; socklen_t len = sizeof so_error; FATAL_FAIL( ::getsockopt(sockFd, SOL_SOCKET, SO_ERROR, (char*)&so_error, &len)); if (so_error == 0) { LOG(INFO) << "Connected to endpoint " << endpoint; // Initialize the socket again once it's blocking to make sure timeouts // are set initSocket(sockFd); // if we get here, we must have connected successfully } else { LOG(INFO) << "Error connecting to " << endpoint << ": " << so_error << " " << strerror(so_error); #ifdef _MSC_VER FATAL_FAIL(::closesocket(sockFd)); #else FATAL_FAIL(::close(sockFd)); #endif sockFd = -1; } } else { auto localErrno = GetErrno(); LOG(INFO) << "Error connecting to " << endpoint << ": " << localErrno << " " << strerror(localErrno); #ifdef _MSC_VER FATAL_FAIL(::closesocket(sockFd)); #else FATAL_FAIL(::close(sockFd)); #endif sockFd = -1; } LOG(INFO) << sockFd << " is a good socket"; if (sockFd >= 0) { addToActiveSockets(sockFd); } return sockFd; }
1
CVE-2022-24949
1,771
vulnerable
CWE-416
struct reloc_t* MACH0_(get_relocs)(struct MACH0_(obj_t)* bin) { struct reloc_t *relocs; int i = 0, len; ulebr ur = {NULL}; int wordsize = MACH0_(get_bits)(bin) / 8; if (bin->dyld_info) { ut8 *opcodes,*end, type = 0, rel_type = 0; int lib_ord, seg_idx = -1, sym_ord = -1; size_t j, count, skip, bind_size, lazy_size; st64 addend = 0; ut64 segmentAddress = 0LL; ut64 addr = 0LL; ut8 done = 0; #define CASE(T) case (T / 8): rel_type = R_BIN_RELOC_ ## T; break switch (wordsize) { CASE(8); CASE(16); CASE(32); CASE(64); default: return NULL; } #undef CASE bind_size = bin->dyld_info->bind_size; lazy_size = bin->dyld_info->lazy_bind_size; if (!bind_size || !lazy_size) { return NULL; } if ((bind_size + lazy_size)<1) { return NULL; } if (bin->dyld_info->bind_off > bin->size || bin->dyld_info->bind_off + bind_size > bin->size) return NULL; if (bin->dyld_info->lazy_bind_off > bin->size || \ bin->dyld_info->lazy_bind_off + lazy_size > bin->size) return NULL; if (bin->dyld_info->bind_off+bind_size+lazy_size > bin->size) return NULL; // NOTE(eddyb) it's a waste of memory, but we don't know the actual number of relocs. if (!(relocs = calloc (1, (1 + bind_size + lazy_size) * sizeof (struct reloc_t)))) return NULL; opcodes = calloc (1, bind_size + lazy_size + 1); if (!opcodes) { free (relocs); return NULL; } len = r_buf_read_at (bin->b, bin->dyld_info->bind_off, opcodes, bind_size); i = r_buf_read_at (bin->b, bin->dyld_info->lazy_bind_off, opcodes + bind_size, lazy_size); if (len < 1 || i < 1) { bprintf ("Error: read (dyld_info bind) at 0x%08"PFMT64x"\n", (ut64)(size_t)bin->dyld_info->bind_off); free (opcodes); relocs[i].last = 1; return relocs; } i = 0; // that +2 is a minimum required for uleb128, this may be wrong, // the correct fix would be to make ULEB() must use rutil's // implementation that already checks for buffer boundaries for (ur.p = opcodes, end = opcodes + bind_size + lazy_size ; (ur.p+2 < end) && !done; ) { ut8 imm = *ur.p & BIND_IMMEDIATE_MASK, op = *ur.p & BIND_OPCODE_MASK; ++ur.p; switch (op) { #define ULEB() read_uleb128 (&ur,end) #define SLEB() read_sleb128 (&ur,end) case BIND_OPCODE_DONE: done = 1; break; case BIND_OPCODE_SET_DYLIB_ORDINAL_IMM: lib_ord = imm; break; case BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB: lib_ord = ULEB(); break; case BIND_OPCODE_SET_DYLIB_SPECIAL_IMM: lib_ord = imm? (st8)(BIND_OPCODE_MASK | imm) : 0; break; case BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM: { char *sym_name = (char*)ur.p; //ut8 sym_flags = imm; while (*ur.p++ && ur.p<end) { /* empty loop */ } sym_ord = -1; if (bin->symtab && bin->dysymtab.nundefsym < 0xffff) for (j = 0; j < bin->dysymtab.nundefsym; j++) { int stridx = 0; int iundefsym = bin->dysymtab.iundefsym; if (iundefsym>=0 && iundefsym < bin->nsymtab) { int sidx = iundefsym +j; if (sidx<0 || sidx>= bin->nsymtab) continue; stridx = bin->symtab[sidx].n_strx; if (stridx < 0 || stridx >= bin->symstrlen) continue; } if (!strcmp ((char *)bin->symstr + stridx, sym_name)) { sym_ord = j; break; } } break; } case BIND_OPCODE_SET_TYPE_IMM: type = imm; break; case BIND_OPCODE_SET_ADDEND_SLEB: addend = SLEB(); break; case BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB: seg_idx = imm; if (seg_idx < 0 || seg_idx >= bin->nsegs) { bprintf ("Error: BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB" " has unexistent segment %d\n", seg_idx); addr = 0LL; return 0; // early exit to avoid future mayhem } else { addr = bin->segs[seg_idx].vmaddr + ULEB(); segmentAddress = bin->segs[seg_idx].vmaddr \ + bin->segs[seg_idx].vmsize; } break; case BIND_OPCODE_ADD_ADDR_ULEB: addr += ULEB(); break; #define DO_BIND() do {\ if (sym_ord < 0 || seg_idx < 0 ) break;\ if (i >= (bind_size + lazy_size)) break;\ relocs[i].addr = addr;\ relocs[i].offset = addr - bin->segs[seg_idx].vmaddr + bin->segs[seg_idx].fileoff;\ if (type == BIND_TYPE_TEXT_PCREL32)\ relocs[i].addend = addend - (bin->baddr + addr);\ else relocs[i].addend = addend;\ /* library ordinal ??? */ \ relocs[i].ord = lib_ord;\ relocs[i].ord = sym_ord;\ relocs[i].type = rel_type;\ relocs[i++].last = 0;\ } while (0) case BIND_OPCODE_DO_BIND: if (addr >= segmentAddress) { bprintf ("Error: Malformed DO bind opcode\n"); goto beach; } DO_BIND(); addr += wordsize; break; case BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB: if (addr >= segmentAddress) { bprintf ("Error: Malformed ADDR ULEB bind opcode\n"); goto beach; } DO_BIND(); addr += ULEB() + wordsize; break; case BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED: if (addr >= segmentAddress) { bprintf ("Error: Malformed IMM SCALED bind opcode\n"); goto beach; } DO_BIND(); addr += (ut64)imm * (ut64)wordsize + wordsize; break; case BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB: count = ULEB(); skip = ULEB(); for (j = 0; j < count; j++) { if (addr >= segmentAddress) { bprintf ("Error: Malformed ULEB TIMES bind opcode\n"); goto beach; } DO_BIND(); addr += skip + wordsize; } break; #undef DO_BIND #undef ULEB #undef SLEB default: bprintf ("Error: unknown bind opcode 0x%02x in dyld_info\n", *ur.p); free (opcodes); relocs[i].last = 1; return relocs; } } free (opcodes); } else { int j; if (!bin->symtab || !bin->symstr || !bin->sects || !bin->indirectsyms) return NULL; if (!(relocs = malloc ((bin->dysymtab.nundefsym + 1) * sizeof(struct reloc_t)))) return NULL; for (j = 0; j < bin->dysymtab.nundefsym; j++) { if (parse_import_ptr(bin, &relocs[i], bin->dysymtab.iundefsym + j)) { relocs[i].ord = j; relocs[i++].last = 0; } } } beach: relocs[i].last = 1; return relocs; }
0
CVE-2017-7946
348
benign
CWE-416
struct reloc_t* MACH0_(get_relocs)(struct MACH0_(obj_t)* bin) { struct reloc_t *relocs; int i = 0, len; ulebr ur = {NULL}; int wordsize = MACH0_(get_bits)(bin) / 8; if (bin->dyld_info) { ut8 *opcodes,*end, type = 0, rel_type = 0; int lib_ord, seg_idx = -1, sym_ord = -1; size_t j, count, skip, bind_size, lazy_size; st64 addend = 0; ut64 segmentAddress = 0LL; ut64 addr = 0LL; ut8 done = 0; #define CASE(T) case (T / 8): rel_type = R_BIN_RELOC_ ## T; break switch (wordsize) { CASE(8); CASE(16); CASE(32); CASE(64); default: return NULL; } #undef CASE bind_size = bin->dyld_info->bind_size; lazy_size = bin->dyld_info->lazy_bind_size; if (!bind_size || !lazy_size) { return NULL; } if ((bind_size + lazy_size)<1) { return NULL; } if (bin->dyld_info->bind_off > bin->size || bin->dyld_info->bind_off + bind_size > bin->size) { return NULL; } if (bin->dyld_info->lazy_bind_off > bin->size || \ bin->dyld_info->lazy_bind_off + lazy_size > bin->size) { return NULL; } if (bin->dyld_info->bind_off+bind_size+lazy_size > bin->size) { return NULL; } // NOTE(eddyb) it's a waste of memory, but we don't know the actual number of relocs. if (!(relocs = calloc (1, (1 + bind_size + lazy_size) * sizeof (struct reloc_t)))) { return NULL; } opcodes = calloc (1, bind_size + lazy_size + 1); if (!opcodes) { free (relocs); return NULL; } len = r_buf_read_at (bin->b, bin->dyld_info->bind_off, opcodes, bind_size); i = r_buf_read_at (bin->b, bin->dyld_info->lazy_bind_off, opcodes + bind_size, lazy_size); if (len < 1 || i < 1) { bprintf ("Error: read (dyld_info bind) at 0x%08"PFMT64x"\n", (ut64)(size_t)bin->dyld_info->bind_off); free (opcodes); relocs[i].last = 1; return relocs; } i = 0; // that +2 is a minimum required for uleb128, this may be wrong, // the correct fix would be to make ULEB() must use rutil's // implementation that already checks for buffer boundaries for (ur.p = opcodes, end = opcodes + bind_size + lazy_size ; (ur.p+2 < end) && !done; ) { ut8 imm = *ur.p & BIND_IMMEDIATE_MASK, op = *ur.p & BIND_OPCODE_MASK; ++ur.p; switch (op) { #define ULEB() read_uleb128 (&ur,end) #define SLEB() read_sleb128 (&ur,end) case BIND_OPCODE_DONE: done = 1; break; case BIND_OPCODE_SET_DYLIB_ORDINAL_IMM: lib_ord = imm; break; case BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB: lib_ord = ULEB(); break; case BIND_OPCODE_SET_DYLIB_SPECIAL_IMM: lib_ord = imm? (st8)(BIND_OPCODE_MASK | imm) : 0; break; case BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM: { char *sym_name = (char*)ur.p; //ut8 sym_flags = imm; while (*ur.p++ && ur.p<end) { /* empty loop */ } sym_ord = -1; if (bin->symtab && bin->dysymtab.nundefsym < 0xffff) for (j = 0; j < bin->dysymtab.nundefsym; j++) { int stridx = 0; int iundefsym = bin->dysymtab.iundefsym; if (iundefsym>=0 && iundefsym < bin->nsymtab) { int sidx = iundefsym +j; if (sidx<0 || sidx>= bin->nsymtab) continue; stridx = bin->symtab[sidx].n_strx; if (stridx < 0 || stridx >= bin->symstrlen) continue; } if (!strcmp ((char *)bin->symstr + stridx, sym_name)) { sym_ord = j; break; } } break; } case BIND_OPCODE_SET_TYPE_IMM: type = imm; break; case BIND_OPCODE_SET_ADDEND_SLEB: addend = SLEB(); break; case BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB: seg_idx = imm; if (seg_idx < 0 || seg_idx >= bin->nsegs) { bprintf ("Error: BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB" " has unexistent segment %d\n", seg_idx); addr = 0LL; return 0; // early exit to avoid future mayhem } else { addr = bin->segs[seg_idx].vmaddr + ULEB(); segmentAddress = bin->segs[seg_idx].vmaddr \ + bin->segs[seg_idx].vmsize; } break; case BIND_OPCODE_ADD_ADDR_ULEB: addr += ULEB(); break; #define DO_BIND() do {\ if (sym_ord < 0 || seg_idx < 0 ) break;\ if (i >= (bind_size + lazy_size)) break;\ relocs[i].addr = addr;\ relocs[i].offset = addr - bin->segs[seg_idx].vmaddr + bin->segs[seg_idx].fileoff;\ if (type == BIND_TYPE_TEXT_PCREL32)\ relocs[i].addend = addend - (bin->baddr + addr);\ else relocs[i].addend = addend;\ /* library ordinal ??? */ \ relocs[i].ord = lib_ord;\ relocs[i].ord = sym_ord;\ relocs[i].type = rel_type;\ relocs[i++].last = 0;\ } while (0) case BIND_OPCODE_DO_BIND: if (addr >= segmentAddress) { bprintf ("Error: Malformed DO bind opcode\n"); goto beach; } DO_BIND(); addr += wordsize; break; case BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB: if (addr >= segmentAddress) { bprintf ("Error: Malformed ADDR ULEB bind opcode\n"); goto beach; } DO_BIND(); addr += ULEB() + wordsize; break; case BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED: if (addr >= segmentAddress) { bprintf ("Error: Malformed IMM SCALED bind opcode\n"); goto beach; } DO_BIND(); addr += (ut64)imm * (ut64)wordsize + wordsize; break; case BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB: count = ULEB(); skip = ULEB(); for (j = 0; j < count; j++) { if (addr >= segmentAddress) { bprintf ("Error: Malformed ULEB TIMES bind opcode\n"); goto beach; } DO_BIND(); addr += skip + wordsize; } break; #undef DO_BIND #undef ULEB #undef SLEB default: bprintf ("Error: unknown bind opcode 0x%02x in dyld_info\n", *ur.p); free (opcodes); relocs[i].last = 1; return relocs; } } free (opcodes); } else { int j; if (!bin->symtab || !bin->symstr || !bin->sects || !bin->indirectsyms) { return NULL; } if (!(relocs = malloc ((bin->dysymtab.nundefsym + 1) * sizeof(struct reloc_t)))) { return NULL; } for (j = 0; j < bin->dysymtab.nundefsym; j++) { if (parse_import_ptr (bin, &relocs[i], bin->dysymtab.iundefsym + j)) { relocs[i].ord = j; relocs[i++].last = 0; } } } beach: relocs[i].last = 1; return relocs; }
1
CVE-2017-7946
348
vulnerable
CWE-416
static zend_always_inline int process_nested_data(UNSERIALIZE_PARAMETER, HashTable *ht, zend_long elements, int objprops) { while (elements-- > 0) { zval key, *data, d, *old_data; zend_ulong idx; ZVAL_UNDEF(&key); if (!php_var_unserialize_internal(&key, p, max, NULL, classes)) { zval_dtor(&key); return 0; } data = NULL; ZVAL_UNDEF(&d); if (!objprops) { if (Z_TYPE(key) == IS_LONG) { idx = Z_LVAL(key); numeric_key: if (UNEXPECTED((old_data = zend_hash_index_find(ht, idx)) != NULL)) { //??? update hash var_push_dtor(var_hash, old_data); data = zend_hash_index_update(ht, idx, &d); } else { data = zend_hash_index_add_new(ht, idx, &d); } } else if (Z_TYPE(key) == IS_STRING) { if (UNEXPECTED(ZEND_HANDLE_NUMERIC(Z_STR(key), idx))) { goto numeric_key; } if (UNEXPECTED((old_data = zend_hash_find(ht, Z_STR(key))) != NULL)) { //??? update hash var_push_dtor(var_hash, old_data); data = zend_hash_update(ht, Z_STR(key), &d); } else { data = zend_hash_add_new(ht, Z_STR(key), &d); } } else { zval_dtor(&key); return 0; } } else { if (EXPECTED(Z_TYPE(key) == IS_STRING)) { string_key: if ((old_data = zend_hash_find(ht, Z_STR(key))) != NULL) { if (Z_TYPE_P(old_data) == IS_INDIRECT) { old_data = Z_INDIRECT_P(old_data); } var_push_dtor(var_hash, old_data); data = zend_hash_update_ind(ht, Z_STR(key), &d); } else { data = zend_hash_add_new(ht, Z_STR(key), &d); } } else if (Z_TYPE(key) == IS_LONG) { /* object properties should include no integers */ convert_to_string(&key); goto string_key; } else { zval_dtor(&key); return 0; } } if (!php_var_unserialize_internal(data, p, max, var_hash, classes)) { zval_dtor(&key); return 0; } if (UNEXPECTED(Z_ISUNDEF_P(data))) { if (Z_TYPE(key) == IS_LONG) { zend_hash_index_del(ht, Z_LVAL(key)); } else { zend_hash_del_ind(ht, Z_STR(key)); } } else { var_push_dtor(var_hash, data); } zval_dtor(&key); if (elements && *(*p-1) != ';' && *(*p-1) != '}') { (*p)--; return 0; } } return 1; }
0
CVE-2017-12932
710
benign
CWE-416
static zend_always_inline int process_nested_data(UNSERIALIZE_PARAMETER, HashTable *ht, zend_long elements, int objprops) { while (elements-- > 0) { zval key, *data, d, *old_data; zend_ulong idx; ZVAL_UNDEF(&key); if (!php_var_unserialize_internal(&key, p, max, NULL, classes)) { zval_dtor(&key); return 0; } data = NULL; ZVAL_UNDEF(&d); if (!objprops) { if (Z_TYPE(key) == IS_LONG) { idx = Z_LVAL(key); numeric_key: if (UNEXPECTED((old_data = zend_hash_index_find(ht, idx)) != NULL)) { //??? update hash var_push_dtor(var_hash, old_data); data = zend_hash_index_update(ht, idx, &d); } else { data = zend_hash_index_add_new(ht, idx, &d); } } else if (Z_TYPE(key) == IS_STRING) { if (UNEXPECTED(ZEND_HANDLE_NUMERIC(Z_STR(key), idx))) { goto numeric_key; } if (UNEXPECTED((old_data = zend_hash_find(ht, Z_STR(key))) != NULL)) { //??? update hash var_push_dtor(var_hash, old_data); data = zend_hash_update(ht, Z_STR(key), &d); } else { data = zend_hash_add_new(ht, Z_STR(key), &d); } } else { zval_dtor(&key); return 0; } } else { if (EXPECTED(Z_TYPE(key) == IS_STRING)) { string_key: if ((old_data = zend_hash_find(ht, Z_STR(key))) != NULL) { if (Z_TYPE_P(old_data) == IS_INDIRECT) { old_data = Z_INDIRECT_P(old_data); } var_push_dtor(var_hash, old_data); data = zend_hash_update_ind(ht, Z_STR(key), &d); } else { data = zend_hash_add_new(ht, Z_STR(key), &d); } } else if (Z_TYPE(key) == IS_LONG) { /* object properties should include no integers */ convert_to_string(&key); goto string_key; } else { zval_dtor(&key); return 0; } } if (!php_var_unserialize_internal(data, p, max, var_hash, classes)) { zval_dtor(&key); return 0; } var_push_dtor(var_hash, data); zval_dtor(&key); if (elements && *(*p-1) != ';' && *(*p-1) != '}') { (*p)--; return 0; } } return 1; }
1
CVE-2017-12932
710
vulnerable
CWE-119
static ssize_t k90_show_current_profile(struct device *dev, struct device_attribute *attr, char *buf) { int ret; struct usb_interface *usbif = to_usb_interface(dev->parent); struct usb_device *usbdev = interface_to_usbdev(usbif); int current_profile; char data[8]; ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), K90_REQUEST_STATUS, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, data, 8, USB_CTRL_SET_TIMEOUT); if (ret < 0) { dev_warn(dev, "Failed to get K90 initial state (error %d).\n", ret); return -EIO; } current_profile = data[7]; if (current_profile < 1 || current_profile > 3) { dev_warn(dev, "Read invalid current profile: %02hhx.\n", data[7]); return -EIO; } return snprintf(buf, PAGE_SIZE, "%d\n", current_profile); }
0
CVE-2017-5547
263
benign
CWE-119
static ssize_t k90_show_current_profile(struct device *dev, struct device_attribute *attr, char *buf) { int ret; struct usb_interface *usbif = to_usb_interface(dev->parent); struct usb_device *usbdev = interface_to_usbdev(usbif); int current_profile; char *data; data = kmalloc(8, GFP_KERNEL); if (!data) return -ENOMEM; ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), K90_REQUEST_STATUS, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, data, 8, USB_CTRL_SET_TIMEOUT); if (ret < 0) { dev_warn(dev, "Failed to get K90 initial state (error %d).\n", ret); ret = -EIO; goto out; } current_profile = data[7]; if (current_profile < 1 || current_profile > 3) { dev_warn(dev, "Read invalid current profile: %02hhx.\n", data[7]); ret = -EIO; goto out; } ret = snprintf(buf, PAGE_SIZE, "%d\n", current_profile); out: kfree(data); return ret; }
1
CVE-2017-5547
263
vulnerable
CWE-362
int ip_queue_xmit(struct sk_buff *skb) { struct sock *sk = skb->sk; struct inet_sock *inet = inet_sk(sk); struct ip_options *opt = inet->opt; struct rtable *rt; struct iphdr *iph; int res; /* Skip all of this if the packet is already routed, * f.e. by something like SCTP. */ rcu_read_lock(); rt = skb_rtable(skb); if (rt != NULL) goto packet_routed; /* Make sure we can route this packet. */ rt = (struct rtable *)__sk_dst_check(sk, 0); if (rt == NULL) { __be32 daddr; /* Use correct destination address if we have options. */ daddr = inet->inet_daddr; if(opt && opt->srr) daddr = opt->faddr; /* If this fails, retransmit mechanism of transport layer will * keep trying until route appears or the connection times * itself out. */ rt = ip_route_output_ports(sock_net(sk), sk, daddr, inet->inet_saddr, inet->inet_dport, inet->inet_sport, sk->sk_protocol, RT_CONN_FLAGS(sk), sk->sk_bound_dev_if); if (IS_ERR(rt)) goto no_route; sk_setup_caps(sk, &rt->dst); } skb_dst_set_noref(skb, &rt->dst); packet_routed: if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) goto no_route; /* OK, we know where to send it, allocate and build IP header. */ skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0)); skb_reset_network_header(skb); iph = ip_hdr(skb); *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff)); if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df) iph->frag_off = htons(IP_DF); else iph->frag_off = 0; iph->ttl = ip_select_ttl(inet, &rt->dst); iph->protocol = sk->sk_protocol; iph->saddr = rt->rt_src; iph->daddr = rt->rt_dst; /* Transport layer set skb->h.foo itself. */ if (opt && opt->optlen) { iph->ihl += opt->optlen >> 2; ip_options_build(skb, opt, inet->inet_daddr, rt, 0); } ip_select_ident_more(iph, &rt->dst, sk, (skb_shinfo(skb)->gso_segs ?: 1) - 1); skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; res = ip_local_out(skb); rcu_read_unlock(); return res; no_route: rcu_read_unlock(); IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); kfree_skb(skb); return -EHOSTUNREACH; }
0
CVE-2012-3552
2,842
benign
CWE-362
int ip_queue_xmit(struct sk_buff *skb) { struct sock *sk = skb->sk; struct inet_sock *inet = inet_sk(sk); struct ip_options_rcu *inet_opt; struct rtable *rt; struct iphdr *iph; int res; /* Skip all of this if the packet is already routed, * f.e. by something like SCTP. */ rcu_read_lock(); inet_opt = rcu_dereference(inet->inet_opt); rt = skb_rtable(skb); if (rt != NULL) goto packet_routed; /* Make sure we can route this packet. */ rt = (struct rtable *)__sk_dst_check(sk, 0); if (rt == NULL) { __be32 daddr; /* Use correct destination address if we have options. */ daddr = inet->inet_daddr; if (inet_opt && inet_opt->opt.srr) daddr = inet_opt->opt.faddr; /* If this fails, retransmit mechanism of transport layer will * keep trying until route appears or the connection times * itself out. */ rt = ip_route_output_ports(sock_net(sk), sk, daddr, inet->inet_saddr, inet->inet_dport, inet->inet_sport, sk->sk_protocol, RT_CONN_FLAGS(sk), sk->sk_bound_dev_if); if (IS_ERR(rt)) goto no_route; sk_setup_caps(sk, &rt->dst); } skb_dst_set_noref(skb, &rt->dst); packet_routed: if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_dst != rt->rt_gateway) goto no_route; /* OK, we know where to send it, allocate and build IP header. */ skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0)); skb_reset_network_header(skb); iph = ip_hdr(skb); *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff)); if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df) iph->frag_off = htons(IP_DF); else iph->frag_off = 0; iph->ttl = ip_select_ttl(inet, &rt->dst); iph->protocol = sk->sk_protocol; iph->saddr = rt->rt_src; iph->daddr = rt->rt_dst; /* Transport layer set skb->h.foo itself. */ if (inet_opt && inet_opt->opt.optlen) { iph->ihl += inet_opt->opt.optlen >> 2; ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0); } ip_select_ident_more(iph, &rt->dst, sk, (skb_shinfo(skb)->gso_segs ?: 1) - 1); skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; res = ip_local_out(skb); rcu_read_unlock(); return res; no_route: rcu_read_unlock(); IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); kfree_skb(skb); return -EHOSTUNREACH; }
1
CVE-2012-3552
2,842
vulnerable
CWE-20
static void keyring_describe(const struct key *keyring, struct seq_file *m) { if (keyring->description) seq_puts(m, keyring->description); else seq_puts(m, "[anon]"); if (key_is_instantiated(keyring)) { if (keyring->keys.nr_leaves_on_tree != 0) seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree); else seq_puts(m, ": empty"); } }
0
CVE-2017-15951
2,099
benign
CWE-20
static void keyring_describe(const struct key *keyring, struct seq_file *m) { if (keyring->description) seq_puts(m, keyring->description); else seq_puts(m, "[anon]"); if (key_is_positive(keyring)) { if (keyring->keys.nr_leaves_on_tree != 0) seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree); else seq_puts(m, ": empty"); } }
1
CVE-2017-15951
2,099
vulnerable
CWE-476
GF_Err ilst_item_box_read(GF_Box *s,GF_BitStream *bs) { GF_Err e; u32 sub_type; GF_Box *a = NULL; GF_ListItemBox *ptr = (GF_ListItemBox *)s; /*iTunes way: there's a data atom containing the data*/ sub_type = gf_bs_peek_bits(bs, 32, 4); if (sub_type == GF_ISOM_BOX_TYPE_DATA ) { e = gf_isom_box_parse(&a, bs); if (!e && ptr->size < a->size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[isom] not enough bytes in box %s: %d left, reading %d (file %s, line %d)\n", gf_4cc_to_str(ptr->type), ptr->size, a->size, __FILE__, __LINE__ )); \ e = GF_ISOM_INVALID_FILE; } if (e) { if (a) gf_isom_box_del(a); return e; } ISOM_DECREASE_SIZE(ptr, a->size); if (a && ptr->data) gf_isom_box_del_parent(&ptr->child_boxes, (GF_Box *) ptr->data); /* otherwise a->data will always overflow */ if (a && a->size > 4 && a->type != GF_ISOM_BOX_TYPE_VOID) { ptr->data = (GF_DataBox *)a; if (!ptr->child_boxes) ptr->child_boxes = gf_list_new(); gf_list_add(ptr->child_boxes, ptr->data); } else { ptr->data = NULL; gf_isom_box_del(a); } } /*QT way*/ else { u64 pos = gf_bs_get_position(bs); u64 prev_size = s->size; /*try parsing as generic box list*/ e = gf_isom_box_array_read(s, bs, NULL); if (e==GF_OK) return GF_OK; //reset content and retry - this deletes ptr->data !! gf_isom_box_array_del(s->child_boxes); s->child_boxes=NULL; gf_bs_seek(bs, pos); s->size = prev_size; ptr->data = (GF_DataBox *)gf_isom_box_new_parent(&ptr->child_boxes, GF_ISOM_BOX_TYPE_DATA); //nope, check qt-style ptr->data->qt_style = GF_TRUE; ISOM_DECREASE_SIZE(ptr, 2); ptr->data->dataSize = gf_bs_read_u16(bs); gf_bs_read_u16(bs); ptr->data->data = (char *) gf_malloc(sizeof(char)*(ptr->data->dataSize + 1)); gf_bs_read_data(bs, ptr->data->data, ptr->data->dataSize); ptr->data->data[ptr->data->dataSize] = 0; ISOM_DECREASE_SIZE(ptr, ptr->data->dataSize); } return GF_OK; }
0
CVE-2020-19488
407
benign
CWE-476
GF_Err ilst_item_box_read(GF_Box *s,GF_BitStream *bs) { GF_Err e; u32 sub_type; GF_Box *a = NULL; GF_ListItemBox *ptr = (GF_ListItemBox *)s; /*iTunes way: there's a data atom containing the data*/ sub_type = gf_bs_peek_bits(bs, 32, 4); if (sub_type == GF_ISOM_BOX_TYPE_DATA ) { e = gf_isom_box_parse(&a, bs); if (!e && a && (ptr->size < a->size)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[isom] not enough bytes in box %s: %d left, reading %d (file %s, line %d)\n", gf_4cc_to_str(ptr->type), ptr->size, a->size, __FILE__, __LINE__ )); \ e = GF_ISOM_INVALID_FILE; } if (e) { if (a) gf_isom_box_del(a); return e; } if (!a) return GF_NON_COMPLIANT_BITSTREAM; ISOM_DECREASE_SIZE(ptr, a->size); if (a && ptr->data) gf_isom_box_del_parent(&ptr->child_boxes, (GF_Box *) ptr->data); /* otherwise a->data will always overflow */ if (a && a->size > 4 && a->type != GF_ISOM_BOX_TYPE_VOID) { ptr->data = (GF_DataBox *)a; if (!ptr->child_boxes) ptr->child_boxes = gf_list_new(); gf_list_add(ptr->child_boxes, ptr->data); } else { ptr->data = NULL; gf_isom_box_del(a); } } /*QT way*/ else { u64 pos = gf_bs_get_position(bs); u64 prev_size = s->size; /*try parsing as generic box list*/ e = gf_isom_box_array_read(s, bs, NULL); if (e==GF_OK) return GF_OK; //reset content and retry - this deletes ptr->data !! gf_isom_box_array_del(s->child_boxes); s->child_boxes=NULL; gf_bs_seek(bs, pos); s->size = prev_size; ptr->data = (GF_DataBox *)gf_isom_box_new_parent(&ptr->child_boxes, GF_ISOM_BOX_TYPE_DATA); //nope, check qt-style ptr->data->qt_style = GF_TRUE; ISOM_DECREASE_SIZE(ptr, 2); ptr->data->dataSize = gf_bs_read_u16(bs); gf_bs_read_u16(bs); ptr->data->data = (char *) gf_malloc(sizeof(char)*(ptr->data->dataSize + 1)); gf_bs_read_data(bs, ptr->data->data, ptr->data->dataSize); ptr->data->data[ptr->data->dataSize] = 0; ISOM_DECREASE_SIZE(ptr, ptr->data->dataSize); } return GF_OK; }
1
CVE-2020-19488
407
vulnerable
CWE-20
static int cmd_handle_untagged (IMAP_DATA* idata) { char* s; char* pn; unsigned int count; s = imap_next_word (idata->buf); pn = imap_next_word (s); if ((idata->state >= IMAP_SELECTED) && isdigit ((unsigned char) *s)) { pn = s; s = imap_next_word (s); /* EXISTS and EXPUNGE are always related to the SELECTED mailbox for the * connection, so update that one. */ if (ascii_strncasecmp ("EXISTS", s, 6) == 0) { dprint (2, (debugfile, "Handling EXISTS\n")); /* new mail arrived */ mutt_atoui (pn, &count); if ( !(idata->reopen & IMAP_EXPUNGE_PENDING) && count < idata->max_msn) { /* Notes 6.0.3 has a tendency to report fewer messages exist than * it should. */ dprint (1, (debugfile, "Message count is out of sync")); return 0; } /* at least the InterChange server sends EXISTS messages freely, * even when there is no new mail */ else if (count == idata->max_msn) dprint (3, (debugfile, "cmd_handle_untagged: superfluous EXISTS message.\n")); else { if (!(idata->reopen & IMAP_EXPUNGE_PENDING)) { dprint (2, (debugfile, "cmd_handle_untagged: New mail in %s - %d messages total.\n", idata->mailbox, count)); idata->reopen |= IMAP_NEWMAIL_PENDING; } idata->newMailCount = count; } } /* pn vs. s: need initial seqno */ else if (ascii_strncasecmp ("EXPUNGE", s, 7) == 0) cmd_parse_expunge (idata, pn); else if (ascii_strncasecmp ("FETCH", s, 5) == 0) cmd_parse_fetch (idata, pn); } else if (ascii_strncasecmp ("CAPABILITY", s, 10) == 0) cmd_parse_capability (idata, s); else if (!ascii_strncasecmp ("OK [CAPABILITY", s, 14)) cmd_parse_capability (idata, pn); else if (!ascii_strncasecmp ("OK [CAPABILITY", pn, 14)) cmd_parse_capability (idata, imap_next_word (pn)); else if (ascii_strncasecmp ("LIST", s, 4) == 0) cmd_parse_list (idata, s); else if (ascii_strncasecmp ("LSUB", s, 4) == 0) cmd_parse_lsub (idata, s); else if (ascii_strncasecmp ("MYRIGHTS", s, 8) == 0) cmd_parse_myrights (idata, s); else if (ascii_strncasecmp ("SEARCH", s, 6) == 0) cmd_parse_search (idata, s); else if (ascii_strncasecmp ("STATUS", s, 6) == 0) cmd_parse_status (idata, s); else if (ascii_strncasecmp ("ENABLED", s, 7) == 0) cmd_parse_enabled (idata, s); else if (ascii_strncasecmp ("BYE", s, 3) == 0) { dprint (2, (debugfile, "Handling BYE\n")); /* check if we're logging out */ if (idata->status == IMAP_BYE) return 0; /* server shut down our connection */ s += 3; SKIPWS (s); mutt_error ("%s", s); mutt_sleep (2); cmd_handle_fatal (idata); return -1; } else if (option (OPTIMAPSERVERNOISE) && (ascii_strncasecmp ("NO", s, 2) == 0)) { dprint (2, (debugfile, "Handling untagged NO\n")); /* Display the warning message from the server */ mutt_error ("%s", s+3); mutt_sleep (2); } return 0; }
0
CVE-2018-14349
929
benign
CWE-20
static int cmd_handle_untagged (IMAP_DATA* idata) { char* s; char* pn; unsigned int count; s = imap_next_word (idata->buf); pn = imap_next_word (s); if ((idata->state >= IMAP_SELECTED) && isdigit ((unsigned char) *s)) { pn = s; s = imap_next_word (s); /* EXISTS and EXPUNGE are always related to the SELECTED mailbox for the * connection, so update that one. */ if (ascii_strncasecmp ("EXISTS", s, 6) == 0) { dprint (2, (debugfile, "Handling EXISTS\n")); /* new mail arrived */ mutt_atoui (pn, &count); if ( !(idata->reopen & IMAP_EXPUNGE_PENDING) && count < idata->max_msn) { /* Notes 6.0.3 has a tendency to report fewer messages exist than * it should. */ dprint (1, (debugfile, "Message count is out of sync")); return 0; } /* at least the InterChange server sends EXISTS messages freely, * even when there is no new mail */ else if (count == idata->max_msn) dprint (3, (debugfile, "cmd_handle_untagged: superfluous EXISTS message.\n")); else { if (!(idata->reopen & IMAP_EXPUNGE_PENDING)) { dprint (2, (debugfile, "cmd_handle_untagged: New mail in %s - %d messages total.\n", idata->mailbox, count)); idata->reopen |= IMAP_NEWMAIL_PENDING; } idata->newMailCount = count; } } /* pn vs. s: need initial seqno */ else if (ascii_strncasecmp ("EXPUNGE", s, 7) == 0) cmd_parse_expunge (idata, pn); else if (ascii_strncasecmp ("FETCH", s, 5) == 0) cmd_parse_fetch (idata, pn); } else if (ascii_strncasecmp ("CAPABILITY", s, 10) == 0) cmd_parse_capability (idata, s); else if (!ascii_strncasecmp ("OK [CAPABILITY", s, 14)) cmd_parse_capability (idata, pn); else if (!ascii_strncasecmp ("OK [CAPABILITY", pn, 14)) cmd_parse_capability (idata, imap_next_word (pn)); else if (ascii_strncasecmp ("LIST", s, 4) == 0) cmd_parse_list (idata, s); else if (ascii_strncasecmp ("LSUB", s, 4) == 0) cmd_parse_lsub (idata, s); else if (ascii_strncasecmp ("MYRIGHTS", s, 8) == 0) cmd_parse_myrights (idata, s); else if (ascii_strncasecmp ("SEARCH", s, 6) == 0) cmd_parse_search (idata, s); else if (ascii_strncasecmp ("STATUS", s, 6) == 0) cmd_parse_status (idata, s); else if (ascii_strncasecmp ("ENABLED", s, 7) == 0) cmd_parse_enabled (idata, s); else if (ascii_strncasecmp ("BYE", s, 3) == 0) { dprint (2, (debugfile, "Handling BYE\n")); /* check if we're logging out */ if (idata->status == IMAP_BYE) return 0; /* server shut down our connection */ s += 3; SKIPWS (s); mutt_error ("%s", s); mutt_sleep (2); cmd_handle_fatal (idata); return -1; } else if (option (OPTIMAPSERVERNOISE) && (ascii_strncasecmp ("NO", s, 2) == 0)) { dprint (2, (debugfile, "Handling untagged NO\n")); /* Display the warning message from the server */ mutt_error ("%s", s+2); mutt_sleep (2); } return 0; }
1
CVE-2018-14349
929
vulnerable
CWE-119
void jpc_qmfb_split_col(jpc_fix_t *a, int numrows, int stride, int parity) { int bufsize = JPC_CEILDIVPOW2(numrows, 1); jpc_fix_t splitbuf[QMFB_SPLITBUFSIZE]; jpc_fix_t *buf = splitbuf; register jpc_fix_t *srcptr; register jpc_fix_t *dstptr; register int n; register int m; int hstartcol; /* Get a buffer. */ if (bufsize > QMFB_SPLITBUFSIZE) { if (!(buf = jas_alloc2(bufsize, sizeof(jpc_fix_t)))) { /* We have no choice but to commit suicide in this case. */ abort(); } } if (numrows >= 2) { hstartcol = (numrows + 1 - parity) >> 1; // ORIGINAL (WRONG): m = (parity) ? hstartcol : (numrows - hstartcol); m = numrows - hstartcol; /* Save the samples destined for the highpass channel. */ n = m; dstptr = buf; srcptr = &a[(1 - parity) * stride]; while (n-- > 0) { *dstptr = *srcptr; ++dstptr; srcptr += stride << 1; } /* Copy the appropriate samples into the lowpass channel. */ dstptr = &a[(1 - parity) * stride]; srcptr = &a[(2 - parity) * stride]; n = numrows - m - (!parity); while (n-- > 0) { *dstptr = *srcptr; dstptr += stride; srcptr += stride << 1; } /* Copy the saved samples into the highpass channel. */ dstptr = &a[hstartcol * stride]; srcptr = buf; n = m; while (n-- > 0) { *dstptr = *srcptr; dstptr += stride; ++srcptr; } } /* If the split buffer was allocated on the heap, free this memory. */ if (buf != splitbuf) { jas_free(buf); } }
0
CVE-2016-8654
2,141
benign
CWE-119
void jpc_qmfb_split_col(jpc_fix_t *a, int numrows, int stride, int parity) { int bufsize = JPC_CEILDIVPOW2(numrows, 1); jpc_fix_t splitbuf[QMFB_SPLITBUFSIZE]; jpc_fix_t *buf = splitbuf; register jpc_fix_t *srcptr; register jpc_fix_t *dstptr; register int n; register int m; int hstartrow; /* Get a buffer. */ if (bufsize > QMFB_SPLITBUFSIZE) { if (!(buf = jas_alloc2(bufsize, sizeof(jpc_fix_t)))) { /* We have no choice but to commit suicide in this case. */ abort(); } } if (numrows >= 2) { hstartrow = (numrows + 1 - parity) >> 1; // ORIGINAL (WRONG): m = (parity) ? hstartrow : (numrows - hstartrow); m = numrows - hstartrow; /* Save the samples destined for the highpass channel. */ n = m; dstptr = buf; srcptr = &a[(1 - parity) * stride]; while (n-- > 0) { *dstptr = *srcptr; ++dstptr; srcptr += stride << 1; } /* Copy the appropriate samples into the lowpass channel. */ dstptr = &a[(1 - parity) * stride]; srcptr = &a[(2 - parity) * stride]; n = numrows - m - (!parity); while (n-- > 0) { *dstptr = *srcptr; dstptr += stride; srcptr += stride << 1; } /* Copy the saved samples into the highpass channel. */ dstptr = &a[hstartrow * stride]; srcptr = buf; n = m; while (n-- > 0) { *dstptr = *srcptr; dstptr += stride; ++srcptr; } } /* If the split buffer was allocated on the heap, free this memory. */ if (buf != splitbuf) { jas_free(buf); } }
1
CVE-2016-8654
2,141
vulnerable
CWE-20
jas_image_t *jas_image_create0() { jas_image_t *image; if (!(image = jas_malloc(sizeof(jas_image_t)))) { return 0; } image->tlx_ = 0; image->tly_ = 0; image->brx_ = 0; image->bry_ = 0; image->clrspc_ = JAS_CLRSPC_UNKNOWN; image->numcmpts_ = 0; image->maxcmpts_ = 0; image->cmpts_ = 0; image->inmem_ = true; image->cmprof_ = 0; return image; }
0
CVE-2016-9395
2,980
benign
CWE-20
jas_image_t *jas_image_create0() { jas_image_t *image; if (!(image = jas_malloc(sizeof(jas_image_t)))) { return 0; } image->tlx_ = 0; image->tly_ = 0; image->brx_ = 0; image->bry_ = 0; image->clrspc_ = JAS_CLRSPC_UNKNOWN; image->numcmpts_ = 0; image->maxcmpts_ = 0; image->cmpts_ = 0; // image->inmem_ = true; image->cmprof_ = 0; return image; }
1
CVE-2016-9395
2,980
vulnerable
CWE-476
static int f2fs_read_single_page(struct inode *inode, struct page *page, unsigned nr_pages, struct f2fs_map_blocks *map, struct bio **bio_ret, sector_t *last_block_in_bio, bool is_readahead) { struct bio *bio = *bio_ret; const unsigned blkbits = inode->i_blkbits; const unsigned blocksize = 1 << blkbits; sector_t block_in_file; sector_t last_block; sector_t last_block_in_file; sector_t block_nr; int ret = 0; block_in_file = (sector_t)page->index; last_block = block_in_file + nr_pages; last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; if (last_block > last_block_in_file) last_block = last_block_in_file; /* just zeroing out page which is beyond EOF */ if (block_in_file >= last_block) goto zero_out; /* * Map blocks using the previous result first. */ if ((map->m_flags & F2FS_MAP_MAPPED) && block_in_file > map->m_lblk && block_in_file < (map->m_lblk + map->m_len)) goto got_it; /* * Then do more f2fs_map_blocks() calls until we are * done with this page. */ map->m_lblk = block_in_file; map->m_len = last_block - block_in_file; ret = f2fs_map_blocks(inode, map, 0, F2FS_GET_BLOCK_DEFAULT); if (ret) goto out; got_it: if ((map->m_flags & F2FS_MAP_MAPPED)) { block_nr = map->m_pblk + block_in_file - map->m_lblk; SetPageMappedToDisk(page); if (!PageUptodate(page) && !cleancache_get_page(page)) { SetPageUptodate(page); goto confused; } if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr, DATA_GENERIC_ENHANCE_READ)) { ret = -EFSCORRUPTED; goto out; } } else { zero_out: zero_user_segment(page, 0, PAGE_SIZE); if (!PageUptodate(page)) SetPageUptodate(page); unlock_page(page); goto out; } /* * This page will go to BIO. Do we need to send this * BIO off first? */ if (bio && (*last_block_in_bio != block_nr - 1 || !__same_bdev(F2FS_I_SB(inode), block_nr, bio))) { submit_and_realloc: __submit_bio(F2FS_I_SB(inode), bio, DATA); bio = NULL; } if (bio == NULL) { bio = f2fs_grab_read_bio(inode, block_nr, nr_pages, is_readahead ? REQ_RAHEAD : 0); if (IS_ERR(bio)) { ret = PTR_ERR(bio); bio = NULL; goto out; } } /* * If the page is under writeback, we need to wait for * its completion to see the correct decrypted data. */ f2fs_wait_on_block_writeback(inode, block_nr); if (bio_add_page(bio, page, blocksize, 0) < blocksize) goto submit_and_realloc; inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA); ClearPageError(page); *last_block_in_bio = block_nr; goto out; confused: if (bio) { __submit_bio(F2FS_I_SB(inode), bio, DATA); bio = NULL; } unlock_page(page); out: *bio_ret = bio; return ret; }
0
CVE-2019-19815
2,104
benign
CWE-476
static int f2fs_read_single_page(struct inode *inode, struct page *page, unsigned nr_pages, struct f2fs_map_blocks *map, struct bio **bio_ret, sector_t *last_block_in_bio, bool is_readahead) { struct bio *bio = *bio_ret; const unsigned blkbits = inode->i_blkbits; const unsigned blocksize = 1 << blkbits; sector_t block_in_file; sector_t last_block; sector_t last_block_in_file; sector_t block_nr; int ret = 0; block_in_file = (sector_t)page_index(page); last_block = block_in_file + nr_pages; last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; if (last_block > last_block_in_file) last_block = last_block_in_file; /* just zeroing out page which is beyond EOF */ if (block_in_file >= last_block) goto zero_out; /* * Map blocks using the previous result first. */ if ((map->m_flags & F2FS_MAP_MAPPED) && block_in_file > map->m_lblk && block_in_file < (map->m_lblk + map->m_len)) goto got_it; /* * Then do more f2fs_map_blocks() calls until we are * done with this page. */ map->m_lblk = block_in_file; map->m_len = last_block - block_in_file; ret = f2fs_map_blocks(inode, map, 0, F2FS_GET_BLOCK_DEFAULT); if (ret) goto out; got_it: if ((map->m_flags & F2FS_MAP_MAPPED)) { block_nr = map->m_pblk + block_in_file - map->m_lblk; SetPageMappedToDisk(page); if (!PageUptodate(page) && (!PageSwapCache(page) && !cleancache_get_page(page))) { SetPageUptodate(page); goto confused; } if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr, DATA_GENERIC_ENHANCE_READ)) { ret = -EFSCORRUPTED; goto out; } } else { zero_out: zero_user_segment(page, 0, PAGE_SIZE); if (!PageUptodate(page)) SetPageUptodate(page); unlock_page(page); goto out; } /* * This page will go to BIO. Do we need to send this * BIO off first? */ if (bio && (*last_block_in_bio != block_nr - 1 || !__same_bdev(F2FS_I_SB(inode), block_nr, bio))) { submit_and_realloc: __submit_bio(F2FS_I_SB(inode), bio, DATA); bio = NULL; } if (bio == NULL) { bio = f2fs_grab_read_bio(inode, block_nr, nr_pages, is_readahead ? REQ_RAHEAD : 0); if (IS_ERR(bio)) { ret = PTR_ERR(bio); bio = NULL; goto out; } } /* * If the page is under writeback, we need to wait for * its completion to see the correct decrypted data. */ f2fs_wait_on_block_writeback(inode, block_nr); if (bio_add_page(bio, page, blocksize, 0) < blocksize) goto submit_and_realloc; inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA); ClearPageError(page); *last_block_in_bio = block_nr; goto out; confused: if (bio) { __submit_bio(F2FS_I_SB(inode), bio, DATA); bio = NULL; } unlock_page(page); out: *bio_ret = bio; return ret; }
1
CVE-2019-19815
2,104
vulnerable
CWE-125
TfLiteStatus ResizeOutput(TfLiteContext* context, TfLiteNode* node) { TfLiteIntArray* output_shape = GetOutputShape(context, node); std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> scoped_output_shape(output_shape, TfLiteIntArrayFree); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Tensorflow's Reshape allows one of the shape components to have the // special -1 value, meaning it will be calculated automatically based on the // input. Here we calculate what that dimension should be so that the number // of output elements in the same as the number of input elements. int num_input_elements = NumElements(input); int num_output_elements = 1; int stretch_dim = -1; for (int i = 0; i < output_shape->size; ++i) { int value = output_shape->data[i]; if (value == -1) { TF_LITE_ENSURE_EQ(context, stretch_dim, -1); stretch_dim = i; } else { num_output_elements *= value; } } if (stretch_dim != -1) { output_shape->data[stretch_dim] = num_input_elements / num_output_elements; num_output_elements *= output_shape->data[stretch_dim]; } TF_LITE_ENSURE_EQ(context, num_input_elements, num_output_elements); return context->ResizeTensor(context, output, scoped_output_shape.release()); }
0
CVE-2020-15211
1,117
benign
CWE-125
TfLiteStatus ResizeOutput(TfLiteContext* context, TfLiteNode* node) { TfLiteIntArray* output_shape = GetOutputShape(context, node); std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> scoped_output_shape(output_shape, TfLiteIntArrayFree); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); // Tensorflow's Reshape allows one of the shape components to have the // special -1 value, meaning it will be calculated automatically based on the // input. Here we calculate what that dimension should be so that the number // of output elements in the same as the number of input elements. int num_input_elements = NumElements(input); int num_output_elements = 1; int stretch_dim = -1; for (int i = 0; i < output_shape->size; ++i) { int value = output_shape->data[i]; if (value == -1) { TF_LITE_ENSURE_EQ(context, stretch_dim, -1); stretch_dim = i; } else { num_output_elements *= value; } } if (stretch_dim != -1) { output_shape->data[stretch_dim] = num_input_elements / num_output_elements; num_output_elements *= output_shape->data[stretch_dim]; } TF_LITE_ENSURE_EQ(context, num_input_elements, num_output_elements); return context->ResizeTensor(context, output, scoped_output_shape.release()); }
1
CVE-2020-15211
1,117
vulnerable
CWE-125
TfLiteStatus NonMaxSuppressionSingleClassHelper( TfLiteContext* context, TfLiteNode* node, OpData* op_data, const std::vector<float>& scores, std::vector<int>* selected, int max_detections) { const TfLiteTensor* input_box_encodings = GetInput(context, node, kInputTensorBoxEncodings); const TfLiteTensor* decoded_boxes = &context->tensors[op_data->decoded_boxes_index]; const int num_boxes = input_box_encodings->dims->data[1]; const float non_max_suppression_score_threshold = op_data->non_max_suppression_score_threshold; const float intersection_over_union_threshold = op_data->intersection_over_union_threshold; // Maximum detections should be positive. TF_LITE_ENSURE(context, (max_detections >= 0)); // intersection_over_union_threshold should be positive // and should be less than 1. TF_LITE_ENSURE(context, (intersection_over_union_threshold > 0.0f) && (intersection_over_union_threshold <= 1.0f)); // Validate boxes TF_LITE_ENSURE(context, ValidateBoxes(decoded_boxes, num_boxes)); // threshold scores std::vector<int> keep_indices; // TODO (chowdhery): Remove the dynamic allocation and replace it // with temporaries, esp for std::vector<float> std::vector<float> keep_scores; SelectDetectionsAboveScoreThreshold( scores, non_max_suppression_score_threshold, &keep_scores, &keep_indices); int num_scores_kept = keep_scores.size(); std::vector<int> sorted_indices; sorted_indices.resize(num_scores_kept); DecreasingPartialArgSort(keep_scores.data(), num_scores_kept, num_scores_kept, sorted_indices.data()); const int num_boxes_kept = num_scores_kept; const int output_size = std::min(num_boxes_kept, max_detections); selected->clear(); TfLiteTensor* active_candidate = &context->tensors[op_data->active_candidate_index]; TF_LITE_ENSURE(context, (active_candidate->dims->data[0]) == num_boxes); int num_active_candidate = num_boxes_kept; uint8_t* active_box_candidate = (active_candidate->data.uint8); for (int row = 0; row < num_boxes_kept; row++) { active_box_candidate[row] = 1; } for (int i = 0; i < num_boxes_kept; ++i) { if (num_active_candidate == 0 || selected->size() >= output_size) break; if (active_box_candidate[i] == 1) { selected->push_back(keep_indices[sorted_indices[i]]); active_box_candidate[i] = 0; num_active_candidate--; } else { continue; } for (int j = i + 1; j < num_boxes_kept; ++j) { if (active_box_candidate[j] == 1) { float intersection_over_union = ComputeIntersectionOverUnion( decoded_boxes, keep_indices[sorted_indices[i]], keep_indices[sorted_indices[j]]); if (intersection_over_union > intersection_over_union_threshold) { active_box_candidate[j] = 0; num_active_candidate--; } } } } return kTfLiteOk; }
0
CVE-2020-15211
2,084
benign
CWE-125
TfLiteStatus NonMaxSuppressionSingleClassHelper( TfLiteContext* context, TfLiteNode* node, OpData* op_data, const std::vector<float>& scores, std::vector<int>* selected, int max_detections) { const TfLiteTensor* input_box_encodings; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensorBoxEncodings, &input_box_encodings)); const TfLiteTensor* decoded_boxes = &context->tensors[op_data->decoded_boxes_index]; const int num_boxes = input_box_encodings->dims->data[1]; const float non_max_suppression_score_threshold = op_data->non_max_suppression_score_threshold; const float intersection_over_union_threshold = op_data->intersection_over_union_threshold; // Maximum detections should be positive. TF_LITE_ENSURE(context, (max_detections >= 0)); // intersection_over_union_threshold should be positive // and should be less than 1. TF_LITE_ENSURE(context, (intersection_over_union_threshold > 0.0f) && (intersection_over_union_threshold <= 1.0f)); // Validate boxes TF_LITE_ENSURE(context, ValidateBoxes(decoded_boxes, num_boxes)); // threshold scores std::vector<int> keep_indices; // TODO (chowdhery): Remove the dynamic allocation and replace it // with temporaries, esp for std::vector<float> std::vector<float> keep_scores; SelectDetectionsAboveScoreThreshold( scores, non_max_suppression_score_threshold, &keep_scores, &keep_indices); int num_scores_kept = keep_scores.size(); std::vector<int> sorted_indices; sorted_indices.resize(num_scores_kept); DecreasingPartialArgSort(keep_scores.data(), num_scores_kept, num_scores_kept, sorted_indices.data()); const int num_boxes_kept = num_scores_kept; const int output_size = std::min(num_boxes_kept, max_detections); selected->clear(); TfLiteTensor* active_candidate = &context->tensors[op_data->active_candidate_index]; TF_LITE_ENSURE(context, (active_candidate->dims->data[0]) == num_boxes); int num_active_candidate = num_boxes_kept; uint8_t* active_box_candidate = (active_candidate->data.uint8); for (int row = 0; row < num_boxes_kept; row++) { active_box_candidate[row] = 1; } for (int i = 0; i < num_boxes_kept; ++i) { if (num_active_candidate == 0 || selected->size() >= output_size) break; if (active_box_candidate[i] == 1) { selected->push_back(keep_indices[sorted_indices[i]]); active_box_candidate[i] = 0; num_active_candidate--; } else { continue; } for (int j = i + 1; j < num_boxes_kept; ++j) { if (active_box_candidate[j] == 1) { float intersection_over_union = ComputeIntersectionOverUnion( decoded_boxes, keep_indices[sorted_indices[i]], keep_indices[sorted_indices[j]]); if (intersection_over_union > intersection_over_union_threshold) { active_box_candidate[j] = 0; num_active_candidate--; } } } } return kTfLiteOk; }
1
CVE-2020-15211
2,084
vulnerable
CWE-190
static int parse_exports_table(long long *table_start) { int res; int indexes = SQUASHFS_LOOKUP_BLOCKS(sBlk.s.inodes); long long export_index_table[indexes]; res = read_fs_bytes(fd, sBlk.s.lookup_table_start, SQUASHFS_LOOKUP_BLOCK_BYTES(sBlk.s.inodes), export_index_table); if(res == FALSE) { ERROR("parse_exports_table: failed to read export index table\n"); return FALSE; } SQUASHFS_INSWAP_LOOKUP_BLOCKS(export_index_table, indexes); /* * export_index_table[0] stores the start of the compressed export blocks. * This by definition is also the end of the previous filesystem * table - the fragment table. */ *table_start = export_index_table[0]; return TRUE; }
0
CVE-2015-4645
2,838
benign
CWE-190
static int parse_exports_table(long long *table_start) { /* * Note on overflow limits: * Size of SBlk.s.inodes is 2^32 (unsigned int) * Max indexes is (2^32*8)/8K or 2^22 * Max length is ((2^32*8)/8K)*8 or 2^25 */ int res; int indexes = SQUASHFS_LOOKUP_BLOCKS((long long) sBlk.s.inodes); int length = SQUASHFS_LOOKUP_BLOCK_BYTES((long long) sBlk.s.inodes); long long *export_index_table; /* * The size of the index table (length bytes) should match the * table start and end points */ if(length != (*table_start - sBlk.s.lookup_table_start)) { ERROR("parse_exports_table: Bad inode count in super block\n"); return FALSE; } export_index_table = alloc_index_table(indexes); res = read_fs_bytes(fd, sBlk.s.lookup_table_start, length, export_index_table); if(res == FALSE) { ERROR("parse_exports_table: failed to read export index table\n"); return FALSE; } SQUASHFS_INSWAP_LOOKUP_BLOCKS(export_index_table, indexes); /* * export_index_table[0] stores the start of the compressed export blocks. * This by definition is also the end of the previous filesystem * table - the fragment table. */ *table_start = export_index_table[0]; return TRUE; }
1
CVE-2015-4645
2,838
vulnerable
CWE-416
*/ static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer) { struct bfq_data *bfqd = container_of(timer, struct bfq_data, idle_slice_timer); struct bfq_queue *bfqq = bfqd->in_service_queue; /* * Theoretical race here: the in-service queue can be NULL or * different from the queue that was idling if a new request * arrives for the current queue and there is a full dispatch * cycle that changes the in-service queue. This can hardly * happen, but in the worst case we just expire a queue too * early. */ if (bfqq) bfq_idle_slice_timer_body(bfqq); return HRTIMER_NORESTART;
0
CVE-2020-12657
3,085
benign
CWE-416
*/ static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer) { struct bfq_data *bfqd = container_of(timer, struct bfq_data, idle_slice_timer); struct bfq_queue *bfqq = bfqd->in_service_queue; /* * Theoretical race here: the in-service queue can be NULL or * different from the queue that was idling if a new request * arrives for the current queue and there is a full dispatch * cycle that changes the in-service queue. This can hardly * happen, but in the worst case we just expire a queue too * early. */ if (bfqq) bfq_idle_slice_timer_body(bfqd, bfqq); return HRTIMER_NORESTART;
1
CVE-2020-12657
3,085
vulnerable
CWE-125
TfLiteStatus Rfft2dHelper(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); const float* input_data = GetTensorData<float>(input); const TfLiteTensor* fft_length = GetInput(context, node, kFftLengthTensor); const int32_t* fft_length_data = GetTensorData<int32_t>(fft_length); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); complex<float>* output_data = GetTensorData<complex<float>>(output); int fft_height, fft_width; fft_height = fft_length_data[0]; fft_width = fft_length_data[1]; // FFT is processed for every slice on the inner most 2 dimensions. // Count the number of slices in the input tensor. const RuntimeShape input_shape = GetTensorShape(input); const int input_dims_count = input_shape.DimensionsCount(); const auto* input_dims_data = input_shape.DimsData(); int num_slices = 1; for (int i = 0; i < input_dims_count - 2; ++i) { num_slices *= input_dims_data[i]; } int input_height = input_dims_data[input_dims_count - 2]; int input_width = input_dims_data[input_dims_count - 1]; int input_slice_size = input_height * input_width; int output_slice_size = fft_height * (fft_width / 2 + 1); // Create input/output buffer for FFT double** fft_input_output = new double*[fft_height]; for (int i = 0; i < fft_height; ++i) { fft_input_output[i] = new double[fft_width + 2]; } // Get buffer for integer working area. TfLiteTensor* fft_integer_working_area = GetTemporary(context, node, kFftIntegerWorkingAreaTensor); int* fft_integer_working_area_data = GetTensorData<int>(fft_integer_working_area); // Get buffer for double working area. TfLiteTensor* fft_double_working_area = GetTemporary(context, node, kFftDoubleWorkingAreaTensor); // Get double value out of the memory of fft_double_working_area_data. double* fft_double_working_area_data = reinterpret_cast<double*>( GetTensorData<int64_t>(fft_double_working_area)); // Process every slice in the input buffer for (int i = 0; i < num_slices; ++i) { PrepareInputBuffer(input_data, input_height, input_width, fft_height, fft_width, fft_input_output); memset(fft_integer_working_area_data, 0, fft_integer_working_area->bytes); memset(fft_double_working_area_data, 0, fft_double_working_area->bytes); Rfft2dImpl(fft_height, fft_width, fft_input_output, fft_integer_working_area_data, fft_double_working_area_data); PrepareOutputBuffer(output_data, fft_height, fft_width, fft_input_output); input_data += input_slice_size; output_data += output_slice_size; } // Delete the input buffer for (int i = 0; i < fft_height; ++i) { delete[] fft_input_output[i]; } delete[] fft_input_output; return kTfLiteOk; }
0
CVE-2020-15211
1,867
benign
CWE-125
TfLiteStatus Rfft2dHelper(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); const float* input_data = GetTensorData<float>(input); const TfLiteTensor* fft_length; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kFftLengthTensor, &fft_length)); const int32_t* fft_length_data = GetTensorData<int32_t>(fft_length); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); complex<float>* output_data = GetTensorData<complex<float>>(output); int fft_height, fft_width; fft_height = fft_length_data[0]; fft_width = fft_length_data[1]; // FFT is processed for every slice on the inner most 2 dimensions. // Count the number of slices in the input tensor. const RuntimeShape input_shape = GetTensorShape(input); const int input_dims_count = input_shape.DimensionsCount(); const auto* input_dims_data = input_shape.DimsData(); int num_slices = 1; for (int i = 0; i < input_dims_count - 2; ++i) { num_slices *= input_dims_data[i]; } int input_height = input_dims_data[input_dims_count - 2]; int input_width = input_dims_data[input_dims_count - 1]; int input_slice_size = input_height * input_width; int output_slice_size = fft_height * (fft_width / 2 + 1); // Create input/output buffer for FFT double** fft_input_output = new double*[fft_height]; for (int i = 0; i < fft_height; ++i) { fft_input_output[i] = new double[fft_width + 2]; } // Get buffer for integer working area. TfLiteTensor* fft_integer_working_area; TF_LITE_ENSURE_OK( context, GetTemporarySafe(context, node, kFftIntegerWorkingAreaTensor, &fft_integer_working_area)); int* fft_integer_working_area_data = GetTensorData<int>(fft_integer_working_area); // Get buffer for double working area. TfLiteTensor* fft_double_working_area; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kFftDoubleWorkingAreaTensor, &fft_double_working_area)); // Get double value out of the memory of fft_double_working_area_data. double* fft_double_working_area_data = reinterpret_cast<double*>( GetTensorData<int64_t>(fft_double_working_area)); // Process every slice in the input buffer for (int i = 0; i < num_slices; ++i) { PrepareInputBuffer(input_data, input_height, input_width, fft_height, fft_width, fft_input_output); memset(fft_integer_working_area_data, 0, fft_integer_working_area->bytes); memset(fft_double_working_area_data, 0, fft_double_working_area->bytes); Rfft2dImpl(fft_height, fft_width, fft_input_output, fft_integer_working_area_data, fft_double_working_area_data); PrepareOutputBuffer(output_data, fft_height, fft_width, fft_input_output); input_data += input_slice_size; output_data += output_slice_size; } // Delete the input buffer for (int i = 0; i < fft_height; ++i) { delete[] fft_input_output[i]; } delete[] fft_input_output; return kTfLiteOk; }
1
CVE-2020-15211
1,867
vulnerable
CWE-125
GF_Err sgpd_dump(GF_Box *a, FILE * trace) { u32 i; GF_SampleGroupDescriptionBox *ptr = (GF_SampleGroupDescriptionBox*) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "SampleGroupDescriptionBox", trace); if (ptr->grouping_type) fprintf(trace, "grouping_type=\"%s\"", gf_4cc_to_str(ptr->grouping_type) ); if (ptr->version==1) fprintf(trace, " default_length=\"%d\"", ptr->default_length); if ((ptr->version>=2) && ptr->default_description_index) fprintf(trace, " default_group_index=\"%d\"", ptr->default_description_index); fprintf(trace, ">\n"); for (i=0; i<gf_list_count(ptr->group_descriptions); i++) { void *entry = gf_list_get(ptr->group_descriptions, i); switch (ptr->grouping_type) { case GF_ISOM_SAMPLE_GROUP_ROLL: fprintf(trace, "<RollRecoveryEntry roll_distance=\"%d\" />\n", ((GF_RollRecoveryEntry*)entry)->roll_distance ); break; case GF_ISOM_SAMPLE_GROUP_PROL: fprintf(trace, "<AudioPreRollEntry roll_distance=\"%d\" />\n", ((GF_RollRecoveryEntry*)entry)->roll_distance ); break; case GF_ISOM_SAMPLE_GROUP_TELE: fprintf(trace, "<TemporalLevelEntry level_independently_decodable=\"%d\"/>\n", ((GF_TemporalLevelEntry*)entry)->level_independently_decodable); break; case GF_ISOM_SAMPLE_GROUP_RAP: fprintf(trace, "<VisualRandomAccessEntry num_leading_samples_known=\"%s\"", ((GF_VisualRandomAccessEntry*)entry)->num_leading_samples_known ? "yes" : "no"); if (((GF_VisualRandomAccessEntry*)entry)->num_leading_samples_known) fprintf(trace, " num_leading_samples=\"%d\"", ((GF_VisualRandomAccessEntry*)entry)->num_leading_samples); fprintf(trace, "/>\n"); break; case GF_ISOM_SAMPLE_GROUP_SYNC: fprintf(trace, "<SyncSampleGroupEntry NAL_unit_type=\"%d\"/>\n", ((GF_SYNCEntry*)entry)->NALU_type); break; case GF_ISOM_SAMPLE_GROUP_SEIG: fprintf(trace, "<CENCSampleEncryptionGroupEntry IsEncrypted=\"%d\" IV_size=\"%d\" KID=\"", ((GF_CENCSampleEncryptionGroupEntry*)entry)->IsProtected, ((GF_CENCSampleEncryptionGroupEntry*)entry)->Per_Sample_IV_size); dump_data_hex(trace, (char *)((GF_CENCSampleEncryptionGroupEntry*)entry)->KID, 16); if ((((GF_CENCSampleEncryptionGroupEntry*)entry)->IsProtected == 1) && !((GF_CENCSampleEncryptionGroupEntry*)entry)->Per_Sample_IV_size) { fprintf(trace, "\" constant_IV_size=\"%d\" constant_IV=\"", ((GF_CENCSampleEncryptionGroupEntry*)entry)->constant_IV_size); dump_data_hex(trace, (char *)((GF_CENCSampleEncryptionGroupEntry*)entry)->constant_IV, ((GF_CENCSampleEncryptionGroupEntry*)entry)->constant_IV_size); } fprintf(trace, "\"/>\n"); break; case GF_ISOM_SAMPLE_GROUP_OINF: oinf_entry_dump(entry, trace); break; case GF_ISOM_SAMPLE_GROUP_LINF: linf_dump(entry, trace); break; case GF_ISOM_SAMPLE_GROUP_TRIF: trif_dump(trace, (char *) ((GF_DefaultSampleGroupDescriptionEntry*)entry)->data, ((GF_DefaultSampleGroupDescriptionEntry*)entry)->length); break; case GF_ISOM_SAMPLE_GROUP_NALM: nalm_dump(trace, (char *) ((GF_DefaultSampleGroupDescriptionEntry*)entry)->data, ((GF_DefaultSampleGroupDescriptionEntry*)entry)->length); break; case GF_ISOM_SAMPLE_GROUP_SAP: fprintf(trace, "<SAPEntry dependent_flag=\"%d\" SAP_type=\"%d\" />\n", ((GF_SAPEntry*)entry)->dependent_flag, ((GF_SAPEntry*)entry)->SAP_type); break; default: fprintf(trace, "<DefaultSampleGroupDescriptionEntry size=\"%d\" data=\"", ((GF_DefaultSampleGroupDescriptionEntry*)entry)->length); dump_data(trace, (char *) ((GF_DefaultSampleGroupDescriptionEntry*)entry)->data, ((GF_DefaultSampleGroupDescriptionEntry*)entry)->length); fprintf(trace, "\"/>\n"); } } if (!ptr->size) { switch (ptr->grouping_type) { case GF_ISOM_SAMPLE_GROUP_ROLL: fprintf(trace, "<RollRecoveryEntry roll_distance=\"\"/>\n"); break; case GF_ISOM_SAMPLE_GROUP_PROL: fprintf(trace, "<AudioPreRollEntry roll_distance=\"\"/>\n"); break; case GF_ISOM_SAMPLE_GROUP_TELE: fprintf(trace, "<TemporalLevelEntry level_independently_decodable=\"\"/>\n"); break; case GF_ISOM_SAMPLE_GROUP_RAP: fprintf(trace, "<VisualRandomAccessEntry num_leading_samples_known=\"yes|no\" num_leading_samples=\"\" />\n"); break; case GF_ISOM_SAMPLE_GROUP_SYNC: fprintf(trace, "<SyncSampleGroupEntry NAL_unit_type=\"\" />\n"); break; case GF_ISOM_SAMPLE_GROUP_SEIG: fprintf(trace, "<CENCSampleEncryptionGroupEntry IsEncrypted=\"\" IV_size=\"\" KID=\"\" constant_IV_size=\"\" constant_IV=\"\"/>\n"); break; case GF_ISOM_SAMPLE_GROUP_OINF: oinf_entry_dump(NULL, trace); break; case GF_ISOM_SAMPLE_GROUP_LINF: linf_dump(NULL, trace); break; case GF_ISOM_SAMPLE_GROUP_TRIF: trif_dump(trace, NULL, 0); break; case GF_ISOM_SAMPLE_GROUP_NALM: nalm_dump(trace, NULL, 0); break; case GF_ISOM_SAMPLE_GROUP_SAP: fprintf(trace, "<SAPEntry dependent_flag=\"\" SAP_type=\"\" />\n"); break; default: fprintf(trace, "<DefaultSampleGroupDescriptionEntry size=\"\" data=\"\"/>\n"); } } gf_isom_box_dump_done("SampleGroupDescriptionBox", a, trace); return GF_OK; }
0
CVE-2018-13006
2,114
benign
CWE-125
GF_Err sgpd_dump(GF_Box *a, FILE * trace) { u32 i; GF_SampleGroupDescriptionBox *ptr = (GF_SampleGroupDescriptionBox*) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "SampleGroupDescriptionBox", trace); if (ptr->grouping_type) fprintf(trace, "grouping_type=\"%s\"", gf_4cc_to_str(ptr->grouping_type) ); if (ptr->version==1) fprintf(trace, " default_length=\"%d\"", ptr->default_length); if ((ptr->version>=2) && ptr->default_description_index) fprintf(trace, " default_group_index=\"%d\"", ptr->default_description_index); fprintf(trace, ">\n"); for (i=0; i<gf_list_count(ptr->group_descriptions); i++) { void *entry = gf_list_get(ptr->group_descriptions, i); switch (ptr->grouping_type) { case GF_ISOM_SAMPLE_GROUP_ROLL: fprintf(trace, "<RollRecoveryEntry roll_distance=\"%d\" />\n", ((GF_RollRecoveryEntry*)entry)->roll_distance ); break; case GF_ISOM_SAMPLE_GROUP_PROL: fprintf(trace, "<AudioPreRollEntry roll_distance=\"%d\" />\n", ((GF_RollRecoveryEntry*)entry)->roll_distance ); break; case GF_ISOM_SAMPLE_GROUP_TELE: fprintf(trace, "<TemporalLevelEntry level_independently_decodable=\"%d\"/>\n", ((GF_TemporalLevelEntry*)entry)->level_independently_decodable); break; case GF_ISOM_SAMPLE_GROUP_RAP: fprintf(trace, "<VisualRandomAccessEntry num_leading_samples_known=\"%s\"", ((GF_VisualRandomAccessEntry*)entry)->num_leading_samples_known ? "yes" : "no"); if (((GF_VisualRandomAccessEntry*)entry)->num_leading_samples_known) fprintf(trace, " num_leading_samples=\"%d\"", ((GF_VisualRandomAccessEntry*)entry)->num_leading_samples); fprintf(trace, "/>\n"); break; case GF_ISOM_SAMPLE_GROUP_SYNC: fprintf(trace, "<SyncSampleGroupEntry NAL_unit_type=\"%d\"/>\n", ((GF_SYNCEntry*)entry)->NALU_type); break; case GF_ISOM_SAMPLE_GROUP_SEIG: fprintf(trace, "<CENCSampleEncryptionGroupEntry IsEncrypted=\"%d\" IV_size=\"%d\" KID=\"", ((GF_CENCSampleEncryptionGroupEntry*)entry)->IsProtected, ((GF_CENCSampleEncryptionGroupEntry*)entry)->Per_Sample_IV_size); dump_data_hex(trace, (char *)((GF_CENCSampleEncryptionGroupEntry*)entry)->KID, 16); if ((((GF_CENCSampleEncryptionGroupEntry*)entry)->IsProtected == 1) && !((GF_CENCSampleEncryptionGroupEntry*)entry)->Per_Sample_IV_size) { fprintf(trace, "\" constant_IV_size=\"%d\" constant_IV=\"", ((GF_CENCSampleEncryptionGroupEntry*)entry)->constant_IV_size); dump_data_hex(trace, (char *)((GF_CENCSampleEncryptionGroupEntry*)entry)->constant_IV, ((GF_CENCSampleEncryptionGroupEntry*)entry)->constant_IV_size); } fprintf(trace, "\"/>\n"); break; case GF_ISOM_SAMPLE_GROUP_OINF: oinf_entry_dump(entry, trace); break; case GF_ISOM_SAMPLE_GROUP_LINF: linf_dump(entry, trace); break; case GF_ISOM_SAMPLE_GROUP_TRIF: trif_dump(trace, (char *) ((GF_DefaultSampleGroupDescriptionEntry*)entry)->data, ((GF_DefaultSampleGroupDescriptionEntry*)entry)->length); break; case GF_ISOM_SAMPLE_GROUP_NALM: nalm_dump(trace, (char *) ((GF_DefaultSampleGroupDescriptionEntry*)entry)->data, ((GF_DefaultSampleGroupDescriptionEntry*)entry)->length); break; case GF_ISOM_SAMPLE_GROUP_SAP: fprintf(trace, "<SAPEntry dependent_flag=\"%d\" SAP_type=\"%d\" />\n", ((GF_SAPEntry*)entry)->dependent_flag, ((GF_SAPEntry*)entry)->SAP_type); break; default: fprintf(trace, "<DefaultSampleGroupDescriptionEntry size=\"%d\" data=\"", ((GF_DefaultSampleGroupDescriptionEntry*)entry)->length); dump_data(trace, (char *) ((GF_DefaultSampleGroupDescriptionEntry*)entry)->data, ((GF_DefaultSampleGroupDescriptionEntry*)entry)->length); fprintf(trace, "\"/>\n"); } } if (!ptr->size) { switch (ptr->grouping_type) { case GF_ISOM_SAMPLE_GROUP_ROLL: fprintf(trace, "<RollRecoveryEntry roll_distance=\"\"/>\n"); break; case GF_ISOM_SAMPLE_GROUP_PROL: fprintf(trace, "<AudioPreRollEntry roll_distance=\"\"/>\n"); break; case GF_ISOM_SAMPLE_GROUP_TELE: fprintf(trace, "<TemporalLevelEntry level_independently_decodable=\"\"/>\n"); break; case GF_ISOM_SAMPLE_GROUP_RAP: fprintf(trace, "<VisualRandomAccessEntry num_leading_samples_known=\"yes|no\" num_leading_samples=\"\" />\n"); break; case GF_ISOM_SAMPLE_GROUP_SYNC: fprintf(trace, "<SyncSampleGroupEntry NAL_unit_type=\"\" />\n"); break; case GF_ISOM_SAMPLE_GROUP_SEIG: fprintf(trace, "<CENCSampleEncryptionGroupEntry IsEncrypted=\"\" IV_size=\"\" KID=\"\" constant_IV_size=\"\" constant_IV=\"\"/>\n"); break; case GF_ISOM_SAMPLE_GROUP_OINF: oinf_entry_dump(NULL, trace); break; case GF_ISOM_SAMPLE_GROUP_LINF: linf_dump(NULL, trace); break; case GF_ISOM_SAMPLE_GROUP_TRIF: trif_dump(trace, NULL, 0); break; case GF_ISOM_SAMPLE_GROUP_NALM: nalm_dump(trace, NULL, 0); break; case GF_ISOM_SAMPLE_GROUP_SAP: fprintf(trace, "<SAPEntry dependent_flag=\"\" SAP_type=\"\" />\n"); break; default: fprintf(trace, "<DefaultSampleGroupDescriptionEntry size=\"\" data=\"\"/>\n"); } } gf_isom_box_dump_done("SampleGroupDescriptionBox", a, trace); return GF_OK; }
1
CVE-2018-13006
2,114
vulnerable
CWE-125
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteDivParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); output->type = input2->type; data->requires_broadcast = !HaveSameShapes(input1, input2); TfLiteIntArray* output_size = nullptr; if (data->requires_broadcast) { TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast( context, input1, input2, &output_size)); } else { output_size = TfLiteIntArrayCopy(input1->dims); } if (output->type == kTfLiteUInt8) { TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( context, params->activation, output, &data->output_activation_min, &data->output_activation_max)); const double real_multiplier = input1->params.scale / (input2->params.scale * output->params.scale); QuantizeMultiplier(real_multiplier, &data->output_multiplier, &data->output_shift); } return context->ResizeTensor(context, output, output_size); }
0
CVE-2020-15211
2,516
benign
CWE-125
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteDivParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input1; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor1, &input1)); const TfLiteTensor* input2; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor2, &input2)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); output->type = input2->type; data->requires_broadcast = !HaveSameShapes(input1, input2); TfLiteIntArray* output_size = nullptr; if (data->requires_broadcast) { TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast( context, input1, input2, &output_size)); } else { output_size = TfLiteIntArrayCopy(input1->dims); } if (output->type == kTfLiteUInt8) { TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( context, params->activation, output, &data->output_activation_min, &data->output_activation_max)); const double real_multiplier = input1->params.scale / (input2->params.scale * output->params.scale); QuantizeMultiplier(real_multiplier, &data->output_multiplier, &data->output_shift); } return context->ResizeTensor(context, output, output_size); }
1
CVE-2020-15211
2,516
vulnerable
CWE-125
std::pair<Function *, Function *> ESTreeIRGen::doLazyFunction( hbc::LazyCompilationData *lazyData) { // Create a top level function that will never be executed, because: // 1. IRGen assumes the first function always has global scope // 2. It serves as the root for dummy functions for lexical data Function *topLevel = Builder.createTopLevelFunction(lazyData->strictMode, {}); FunctionContext topLevelFunctionContext{this, topLevel, nullptr}; // Save the top-level context, but ensure it doesn't outlive what it is // pointing to. llvh::SaveAndRestore<FunctionContext *> saveTopLevelContext( topLevelContext, &topLevelFunctionContext); auto *node = cast<ESTree::FunctionLikeNode>(Root); // We restore scoping information in two separate ways: // 1. By adding them to ExternalScopes for resolution here // 2. By adding dummy functions for lexical scoping debug info later // // Instruction selection determines the delta between the ExternalScope // and the dummy function chain, so we add the ExternalScopes with // positive depth. lexicalScopeChain = lazyData->parentScope; materializeScopesInChain( topLevel, lexicalScopeChain, getDepth(lexicalScopeChain) - 1); // If lazyData->closureAlias is specified, we must create an alias binding // between originalName (which must be valid) and the variable identified by // closureAlias. Variable *parentVar = nullptr; if (lazyData->closureAlias.isValid()) { assert(lazyData->originalName.isValid() && "Original name invalid"); assert( lazyData->originalName != lazyData->closureAlias && "Original name must be different from the alias"); // NOTE: the closureAlias target must exist and must be a Variable. parentVar = cast<Variable>(nameTable_.lookup(lazyData->closureAlias)); // Re-create the alias. nameTable_.insert(lazyData->originalName, parentVar); } assert( !llvh::isa<ESTree::ArrowFunctionExpressionNode>(node) && "lazy compilation not supported for arrow functions"); auto *func = genES5Function(lazyData->originalName, parentVar, node); addLexicalDebugInfo(func, topLevel, lexicalScopeChain); return {func, topLevel}; }
0
CVE-2020-1912
1,034
benign
CWE-125
std::pair<Function *, Function *> ESTreeIRGen::doLazyFunction( hbc::LazyCompilationData *lazyData) { // Create a top level function that will never be executed, because: // 1. IRGen assumes the first function always has global scope // 2. It serves as the root for dummy functions for lexical data Function *topLevel = Builder.createTopLevelFunction(lazyData->strictMode, {}); FunctionContext topLevelFunctionContext{this, topLevel, nullptr}; // Save the top-level context, but ensure it doesn't outlive what it is // pointing to. llvh::SaveAndRestore<FunctionContext *> saveTopLevelContext( topLevelContext, &topLevelFunctionContext); auto *node = cast<ESTree::FunctionLikeNode>(Root); // We restore scoping information in two separate ways: // 1. By adding them to ExternalScopes for resolution here // 2. By adding dummy functions for lexical scoping debug info later // // Instruction selection determines the delta between the ExternalScope // and the dummy function chain, so we add the ExternalScopes with // positive depth. lexicalScopeChain = lazyData->parentScope; materializeScopesInChain( topLevel, lexicalScopeChain, getDepth(lexicalScopeChain) - 1); // If lazyData->closureAlias is specified, we must create an alias binding // between originalName (which must be valid) and the variable identified by // closureAlias. Variable *parentVar = nullptr; if (lazyData->closureAlias.isValid()) { assert(lazyData->originalName.isValid() && "Original name invalid"); assert( lazyData->originalName != lazyData->closureAlias && "Original name must be different from the alias"); // NOTE: the closureAlias target must exist and must be a Variable. parentVar = cast<Variable>(nameTable_.lookup(lazyData->closureAlias)); // Re-create the alias. nameTable_.insert(lazyData->originalName, parentVar); } assert( !llvh::isa<ESTree::ArrowFunctionExpressionNode>(node) && "lazy compilation not supported for arrow functions"); auto *func = genES5Function( lazyData->originalName, parentVar, node, lazyData->isGeneratorInnerFunction); addLexicalDebugInfo(func, topLevel, lexicalScopeChain); return {func, topLevel}; }
1
CVE-2020-1912
1,034
vulnerable
CWE-125
get_lisp_indent(void) { pos_T *pos, realpos, paren; int amount; char_u *that; colnr_T col; colnr_T firsttry; int parencount, quotecount; int vi_lisp; // Set vi_lisp to use the vi-compatible method vi_lisp = (vim_strchr(p_cpo, CPO_LISP) != NULL); realpos = curwin->w_cursor; curwin->w_cursor.col = 0; if ((pos = findmatch(NULL, '(')) == NULL) pos = findmatch(NULL, '['); else { paren = *pos; pos = findmatch(NULL, '['); if (pos == NULL || LT_POSP(pos, &paren)) pos = &paren; } if (pos != NULL) { // Extra trick: Take the indent of the first previous non-white // line that is at the same () level. amount = -1; parencount = 0; while (--curwin->w_cursor.lnum >= pos->lnum) { if (linewhite(curwin->w_cursor.lnum)) continue; for (that = ml_get_curline(); *that != NUL; ++that) { if (*that == ';') { while (*(that + 1) != NUL) ++that; continue; } if (*that == '\\') { if (*(that + 1) != NUL) ++that; continue; } if (*that == '"' && *(that + 1) != NUL) { while (*++that && *that != '"') { // skipping escaped characters in the string if (*that == '\\') { if (*++that == NUL) break; if (that[1] == NUL) { ++that; break; } } } if (*that == NUL) break; } if (*that == '(' || *that == '[') ++parencount; else if (*that == ')' || *that == ']') --parencount; } if (parencount == 0) { amount = get_indent(); break; } } if (amount == -1) { curwin->w_cursor.lnum = pos->lnum; curwin->w_cursor.col = pos->col; col = pos->col; that = ml_get_curline(); if (vi_lisp && get_indent() == 0) amount = 2; else { char_u *line = that; amount = 0; while (*that && col) { amount += lbr_chartabsize_adv(line, &that, (colnr_T)amount); col--; } // Some keywords require "body" indenting rules (the // non-standard-lisp ones are Scheme special forms): // // (let ((a 1)) instead (let ((a 1)) // (...)) of (...)) if (!vi_lisp && (*that == '(' || *that == '[') && lisp_match(that + 1)) amount += 2; else { that++; amount++; firsttry = amount; while (VIM_ISWHITE(*that)) { amount += lbr_chartabsize(line, that, (colnr_T)amount); ++that; } if (*that && *that != ';') // not a comment line { // test *that != '(' to accommodate first let/do // argument if it is more than one line if (!vi_lisp && *that != '(' && *that != '[') firsttry++; parencount = 0; quotecount = 0; if (vi_lisp || (*that != '"' && *that != '\'' && *that != '#' && (*that < '0' || *that > '9'))) { while (*that && (!VIM_ISWHITE(*that) || quotecount || parencount) && (!((*that == '(' || *that == '[') && !quotecount && !parencount && vi_lisp))) { if (*that == '"') quotecount = !quotecount; if ((*that == '(' || *that == '[') && !quotecount) ++parencount; if ((*that == ')' || *that == ']') && !quotecount) --parencount; if (*that == '\\' && *(that+1) != NUL) amount += lbr_chartabsize_adv( line, &that, (colnr_T)amount); amount += lbr_chartabsize_adv( line, &that, (colnr_T)amount); } } while (VIM_ISWHITE(*that)) { amount += lbr_chartabsize( line, that, (colnr_T)amount); that++; } if (!*that || *that == ';') amount = firsttry; } } } } } else amount = 0; // no matching '(' or '[' found, use zero indent curwin->w_cursor = realpos; return amount; }
0
CVE-2022-2183
1,131
benign
CWE-125
get_lisp_indent(void) { pos_T *pos, realpos, paren; int amount; char_u *that; colnr_T col; colnr_T firsttry; int parencount, quotecount; int vi_lisp; // Set vi_lisp to use the vi-compatible method vi_lisp = (vim_strchr(p_cpo, CPO_LISP) != NULL); realpos = curwin->w_cursor; curwin->w_cursor.col = 0; if ((pos = findmatch(NULL, '(')) == NULL) pos = findmatch(NULL, '['); else { paren = *pos; pos = findmatch(NULL, '['); if (pos == NULL || LT_POSP(pos, &paren)) pos = &paren; } if (pos != NULL) { // Extra trick: Take the indent of the first previous non-white // line that is at the same () level. amount = -1; parencount = 0; while (--curwin->w_cursor.lnum >= pos->lnum) { if (linewhite(curwin->w_cursor.lnum)) continue; for (that = ml_get_curline(); *that != NUL; ++that) { if (*that == ';') { while (*(that + 1) != NUL) ++that; continue; } if (*that == '\\') { if (*(that + 1) != NUL) ++that; continue; } if (*that == '"' && *(that + 1) != NUL) { while (*++that && *that != '"') { // skipping escaped characters in the string if (*that == '\\') { if (*++that == NUL) break; if (that[1] == NUL) { ++that; break; } } } if (*that == NUL) break; } if (*that == '(' || *that == '[') ++parencount; else if (*that == ')' || *that == ']') --parencount; } if (parencount == 0) { amount = get_indent(); break; } } if (amount == -1) { curwin->w_cursor.lnum = pos->lnum; curwin->w_cursor.col = pos->col; col = pos->col; that = ml_get_curline(); if (vi_lisp && get_indent() == 0) amount = 2; else { char_u *line = that; amount = 0; while (*that && col) { amount += lbr_chartabsize_adv(line, &that, (colnr_T)amount); col--; } // Some keywords require "body" indenting rules (the // non-standard-lisp ones are Scheme special forms): // // (let ((a 1)) instead (let ((a 1)) // (...)) of (...)) if (!vi_lisp && (*that == '(' || *that == '[') && lisp_match(that + 1)) amount += 2; else { if (*that != NUL) { that++; amount++; } firsttry = amount; while (VIM_ISWHITE(*that)) { amount += lbr_chartabsize(line, that, (colnr_T)amount); ++that; } if (*that && *that != ';') // not a comment line { // test *that != '(' to accommodate first let/do // argument if it is more than one line if (!vi_lisp && *that != '(' && *that != '[') firsttry++; parencount = 0; quotecount = 0; if (vi_lisp || (*that != '"' && *that != '\'' && *that != '#' && (*that < '0' || *that > '9'))) { while (*that && (!VIM_ISWHITE(*that) || quotecount || parencount) && (!((*that == '(' || *that == '[') && !quotecount && !parencount && vi_lisp))) { if (*that == '"') quotecount = !quotecount; if ((*that == '(' || *that == '[') && !quotecount) ++parencount; if ((*that == ')' || *that == ']') && !quotecount) --parencount; if (*that == '\\' && *(that+1) != NUL) amount += lbr_chartabsize_adv( line, &that, (colnr_T)amount); amount += lbr_chartabsize_adv( line, &that, (colnr_T)amount); } } while (VIM_ISWHITE(*that)) { amount += lbr_chartabsize( line, that, (colnr_T)amount); that++; } if (!*that || *that == ';') amount = firsttry; } } } } } else amount = 0; // no matching '(' or '[' found, use zero indent curwin->w_cursor = realpos; return amount; }
1
CVE-2022-2183
1,131
vulnerable
CWE-119
static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { struct lg_drv_data *drv_data = hid_get_drvdata(hdev); struct usb_device_descriptor *udesc; __u16 bcdDevice, rev_maj, rev_min; if ((drv_data->quirks & LG_RDESC) && *rsize >= 90 && rdesc[83] == 0x26 && rdesc[84] == 0x8c && rdesc[85] == 0x02) { hid_info(hdev, "fixing up Logitech keyboard report descriptor\n"); rdesc[84] = rdesc[89] = 0x4d; rdesc[85] = rdesc[90] = 0x10; } if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 50 && rdesc[32] == 0x81 && rdesc[33] == 0x06 && rdesc[49] == 0x81 && rdesc[50] == 0x06) { hid_info(hdev, "fixing up rel/abs in Logitech report descriptor\n"); rdesc[33] = rdesc[50] = 0x02; } switch (hdev->product) { /* Several wheels report as this id when operating in emulation mode. */ case USB_DEVICE_ID_LOGITECH_WHEEL: udesc = &(hid_to_usb_dev(hdev)->descriptor); if (!udesc) { hid_err(hdev, "NULL USB device descriptor\n"); break; } bcdDevice = le16_to_cpu(udesc->bcdDevice); rev_maj = bcdDevice >> 8; rev_min = bcdDevice & 0xff; /* Update the report descriptor for only the Driving Force wheel */ if (rev_maj == 1 && rev_min == 2 && *rsize == DF_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Driving Force report descriptor\n"); rdesc = df_rdesc_fixed; *rsize = sizeof(df_rdesc_fixed); } break; case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL: if (*rsize == MOMO_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Momo Force (Red) report descriptor\n"); rdesc = momo_rdesc_fixed; *rsize = sizeof(momo_rdesc_fixed); } break; case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2: if (*rsize == MOMO2_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Momo Racing Force (Black) report descriptor\n"); rdesc = momo2_rdesc_fixed; *rsize = sizeof(momo2_rdesc_fixed); } break; case USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL: if (*rsize == FV_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Formula Vibration report descriptor\n"); rdesc = fv_rdesc_fixed; *rsize = sizeof(fv_rdesc_fixed); } break; case USB_DEVICE_ID_LOGITECH_DFP_WHEEL: if (*rsize == DFP_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Driving Force Pro report descriptor\n"); rdesc = dfp_rdesc_fixed; *rsize = sizeof(dfp_rdesc_fixed); } break; case USB_DEVICE_ID_LOGITECH_WII_WHEEL: if (*rsize >= 101 && rdesc[41] == 0x95 && rdesc[42] == 0x0B && rdesc[47] == 0x05 && rdesc[48] == 0x09) { hid_info(hdev, "fixing up Logitech Speed Force Wireless report descriptor\n"); rdesc[41] = 0x05; rdesc[42] = 0x09; rdesc[47] = 0x95; rdesc[48] = 0x0B; } break; } return rdesc; }
0
CVE-2014-3184
267
benign
CWE-119
static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { struct lg_drv_data *drv_data = hid_get_drvdata(hdev); struct usb_device_descriptor *udesc; __u16 bcdDevice, rev_maj, rev_min; if ((drv_data->quirks & LG_RDESC) && *rsize >= 91 && rdesc[83] == 0x26 && rdesc[84] == 0x8c && rdesc[85] == 0x02) { hid_info(hdev, "fixing up Logitech keyboard report descriptor\n"); rdesc[84] = rdesc[89] = 0x4d; rdesc[85] = rdesc[90] = 0x10; } if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 51 && rdesc[32] == 0x81 && rdesc[33] == 0x06 && rdesc[49] == 0x81 && rdesc[50] == 0x06) { hid_info(hdev, "fixing up rel/abs in Logitech report descriptor\n"); rdesc[33] = rdesc[50] = 0x02; } switch (hdev->product) { /* Several wheels report as this id when operating in emulation mode. */ case USB_DEVICE_ID_LOGITECH_WHEEL: udesc = &(hid_to_usb_dev(hdev)->descriptor); if (!udesc) { hid_err(hdev, "NULL USB device descriptor\n"); break; } bcdDevice = le16_to_cpu(udesc->bcdDevice); rev_maj = bcdDevice >> 8; rev_min = bcdDevice & 0xff; /* Update the report descriptor for only the Driving Force wheel */ if (rev_maj == 1 && rev_min == 2 && *rsize == DF_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Driving Force report descriptor\n"); rdesc = df_rdesc_fixed; *rsize = sizeof(df_rdesc_fixed); } break; case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL: if (*rsize == MOMO_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Momo Force (Red) report descriptor\n"); rdesc = momo_rdesc_fixed; *rsize = sizeof(momo_rdesc_fixed); } break; case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2: if (*rsize == MOMO2_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Momo Racing Force (Black) report descriptor\n"); rdesc = momo2_rdesc_fixed; *rsize = sizeof(momo2_rdesc_fixed); } break; case USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL: if (*rsize == FV_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Formula Vibration report descriptor\n"); rdesc = fv_rdesc_fixed; *rsize = sizeof(fv_rdesc_fixed); } break; case USB_DEVICE_ID_LOGITECH_DFP_WHEEL: if (*rsize == DFP_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Driving Force Pro report descriptor\n"); rdesc = dfp_rdesc_fixed; *rsize = sizeof(dfp_rdesc_fixed); } break; case USB_DEVICE_ID_LOGITECH_WII_WHEEL: if (*rsize >= 101 && rdesc[41] == 0x95 && rdesc[42] == 0x0B && rdesc[47] == 0x05 && rdesc[48] == 0x09) { hid_info(hdev, "fixing up Logitech Speed Force Wireless report descriptor\n"); rdesc[41] = 0x05; rdesc[42] = 0x09; rdesc[47] = 0x95; rdesc[48] = 0x0B; } break; } return rdesc; }
1
CVE-2014-3184
267
vulnerable
CWE-20
int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct ipv6_pinfo *np = inet6_sk(sk); struct inet_sock *inet = inet_sk(sk); struct sk_buff *skb; unsigned int ulen, copied; int peeked, off = 0; int err; int is_udplite = IS_UDPLITE(sk); int is_udp4; bool slow; if (addr_len) *addr_len = sizeof(struct sockaddr_in6); if (flags & MSG_ERRQUEUE) return ipv6_recv_error(sk, msg, len); if (np->rxpmtu && np->rxopt.bits.rxpmtu) return ipv6_recv_rxpmtu(sk, msg, len); try_again: skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), &peeked, &off, &err); if (!skb) goto out; ulen = skb->len - sizeof(struct udphdr); copied = len; if (copied > ulen) copied = ulen; else if (copied < ulen) msg->msg_flags |= MSG_TRUNC; is_udp4 = (skb->protocol == htons(ETH_P_IP)); /* * If checksum is needed at all, try to do it while copying the * data. If the data is truncated, or if we only want a partial * coverage checksum (UDP-Lite), do it before the copy. */ if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { if (udp_lib_checksum_complete(skb)) goto csum_copy_err; } if (skb_csum_unnecessary(skb)) err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov, copied); else { err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); if (err == -EINVAL) goto csum_copy_err; } if (unlikely(err)) { trace_kfree_skb(skb, udpv6_recvmsg); if (!peeked) { atomic_inc(&sk->sk_drops); if (is_udp4) UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); else UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } goto out_free; } if (!peeked) { if (is_udp4) UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INDATAGRAMS, is_udplite); else UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_INDATAGRAMS, is_udplite); } sock_recv_ts_and_drops(msg, sk, skb); /* Copy the address. */ if (msg->msg_name) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *) msg->msg_name; sin6->sin6_family = AF_INET6; sin6->sin6_port = udp_hdr(skb)->source; sin6->sin6_flowinfo = 0; if (is_udp4) { ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, &sin6->sin6_addr); sin6->sin6_scope_id = 0; } else { sin6->sin6_addr = ipv6_hdr(skb)->saddr; sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr, IP6CB(skb)->iif); } } if (is_udp4) { if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); } else { if (np->rxopt.all) ip6_datagram_recv_ctl(sk, msg, skb); } err = copied; if (flags & MSG_TRUNC) err = ulen; out_free: skb_free_datagram_locked(sk, skb); out: return err; csum_copy_err: slow = lock_sock_fast(sk); if (!skb_kill_datagram(sk, skb, flags)) { if (is_udp4) { UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } else { UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } } unlock_sock_fast(sk, slow); if (noblock) return -EAGAIN; /* starting over for a new packet */ msg->msg_flags &= ~MSG_TRUNC; goto try_again; }
0
CVE-2013-7263
1,981
benign
CWE-20
int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct ipv6_pinfo *np = inet6_sk(sk); struct inet_sock *inet = inet_sk(sk); struct sk_buff *skb; unsigned int ulen, copied; int peeked, off = 0; int err; int is_udplite = IS_UDPLITE(sk); int is_udp4; bool slow; if (flags & MSG_ERRQUEUE) return ipv6_recv_error(sk, msg, len); if (np->rxpmtu && np->rxopt.bits.rxpmtu) return ipv6_recv_rxpmtu(sk, msg, len); try_again: skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), &peeked, &off, &err); if (!skb) goto out; ulen = skb->len - sizeof(struct udphdr); copied = len; if (copied > ulen) copied = ulen; else if (copied < ulen) msg->msg_flags |= MSG_TRUNC; is_udp4 = (skb->protocol == htons(ETH_P_IP)); /* * If checksum is needed at all, try to do it while copying the * data. If the data is truncated, or if we only want a partial * coverage checksum (UDP-Lite), do it before the copy. */ if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { if (udp_lib_checksum_complete(skb)) goto csum_copy_err; } if (skb_csum_unnecessary(skb)) err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov, copied); else { err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); if (err == -EINVAL) goto csum_copy_err; } if (unlikely(err)) { trace_kfree_skb(skb, udpv6_recvmsg); if (!peeked) { atomic_inc(&sk->sk_drops); if (is_udp4) UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); else UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } goto out_free; } if (!peeked) { if (is_udp4) UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INDATAGRAMS, is_udplite); else UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_INDATAGRAMS, is_udplite); } sock_recv_ts_and_drops(msg, sk, skb); /* Copy the address. */ if (msg->msg_name) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *) msg->msg_name; sin6->sin6_family = AF_INET6; sin6->sin6_port = udp_hdr(skb)->source; sin6->sin6_flowinfo = 0; if (is_udp4) { ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, &sin6->sin6_addr); sin6->sin6_scope_id = 0; } else { sin6->sin6_addr = ipv6_hdr(skb)->saddr; sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr, IP6CB(skb)->iif); } *addr_len = sizeof(*sin6); } if (is_udp4) { if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); } else { if (np->rxopt.all) ip6_datagram_recv_ctl(sk, msg, skb); } err = copied; if (flags & MSG_TRUNC) err = ulen; out_free: skb_free_datagram_locked(sk, skb); out: return err; csum_copy_err: slow = lock_sock_fast(sk); if (!skb_kill_datagram(sk, skb, flags)) { if (is_udp4) { UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } else { UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } } unlock_sock_fast(sk, slow); if (noblock) return -EAGAIN; /* starting over for a new packet */ msg->msg_flags &= ~MSG_TRUNC; goto try_again; }
1
CVE-2013-7263
1,981
vulnerable
CWE-125
ast_for_expr(struct compiling *c, const node *n) { /* handle the full range of simple expressions test: or_test ['if' or_test 'else' test] | lambdef test_nocond: or_test | lambdef_nocond or_test: and_test ('or' and_test)* and_test: not_test ('and' not_test)* not_test: 'not' not_test | comparison comparison: expr (comp_op expr)* expr: xor_expr ('|' xor_expr)* xor_expr: and_expr ('^' and_expr)* and_expr: shift_expr ('&' shift_expr)* shift_expr: arith_expr (('<<'|'>>') arith_expr)* arith_expr: term (('+'|'-') term)* term: factor (('*'|'@'|'/'|'%'|'//') factor)* factor: ('+'|'-'|'~') factor | power power: atom_expr ['**' factor] atom_expr: [AWAIT] atom trailer* yield_expr: 'yield' [yield_arg] */ asdl_seq *seq; int i; loop: switch (TYPE(n)) { case test: case test_nocond: if (TYPE(CHILD(n, 0)) == lambdef || TYPE(CHILD(n, 0)) == lambdef_nocond) return ast_for_lambdef(c, CHILD(n, 0)); else if (NCH(n) > 1) return ast_for_ifexpr(c, n); /* Fallthrough */ case or_test: case and_test: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } seq = _Ta3_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!seq) return NULL; for (i = 0; i < NCH(n); i += 2) { expr_ty e = ast_for_expr(c, CHILD(n, i)); if (!e) return NULL; asdl_seq_SET(seq, i / 2, e); } if (!strcmp(STR(CHILD(n, 1)), "and")) return BoolOp(And, seq, LINENO(n), n->n_col_offset, c->c_arena); assert(!strcmp(STR(CHILD(n, 1)), "or")); return BoolOp(Or, seq, LINENO(n), n->n_col_offset, c->c_arena); case not_test: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } else { expr_ty expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; return UnaryOp(Not, expression, LINENO(n), n->n_col_offset, c->c_arena); } case comparison: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } else { expr_ty expression; asdl_int_seq *ops; asdl_seq *cmps; ops = _Ta3_asdl_int_seq_new(NCH(n) / 2, c->c_arena); if (!ops) return NULL; cmps = _Ta3_asdl_seq_new(NCH(n) / 2, c->c_arena); if (!cmps) { return NULL; } for (i = 1; i < NCH(n); i += 2) { cmpop_ty newoperator; newoperator = ast_for_comp_op(c, CHILD(n, i)); if (!newoperator) { return NULL; } expression = ast_for_expr(c, CHILD(n, i + 1)); if (!expression) { return NULL; } asdl_seq_SET(ops, i / 2, newoperator); asdl_seq_SET(cmps, i / 2, expression); } expression = ast_for_expr(c, CHILD(n, 0)); if (!expression) { return NULL; } return Compare(expression, ops, cmps, LINENO(n), n->n_col_offset, c->c_arena); } break; case star_expr: return ast_for_starred(c, n); /* The next five cases all handle BinOps. The main body of code is the same in each case, but the switch turned inside out to reuse the code for each type of operator. */ case expr: case xor_expr: case and_expr: case shift_expr: case arith_expr: case term: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } return ast_for_binop(c, n); case yield_expr: { node *an = NULL; node *en = NULL; int is_from = 0; expr_ty exp = NULL; if (NCH(n) > 1) an = CHILD(n, 1); /* yield_arg */ if (an) { en = CHILD(an, NCH(an) - 1); if (NCH(an) == 2) { is_from = 1; exp = ast_for_expr(c, en); } else exp = ast_for_testlist(c, en); if (!exp) return NULL; } if (is_from) return YieldFrom(exp, LINENO(n), n->n_col_offset, c->c_arena); return Yield(exp, LINENO(n), n->n_col_offset, c->c_arena); } case factor: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } return ast_for_factor(c, n); case power: return ast_for_power(c, n); default: PyErr_Format(PyExc_SystemError, "unhandled expr: %d", TYPE(n)); return NULL; } /* should never get here unless if error is set */ return NULL; }
0
CVE-2019-19274
2,604
benign
CWE-125
ast_for_expr(struct compiling *c, const node *n) { /* handle the full range of simple expressions test: or_test ['if' or_test 'else' test] | lambdef test_nocond: or_test | lambdef_nocond or_test: and_test ('or' and_test)* and_test: not_test ('and' not_test)* not_test: 'not' not_test | comparison comparison: expr (comp_op expr)* expr: xor_expr ('|' xor_expr)* xor_expr: and_expr ('^' and_expr)* and_expr: shift_expr ('&' shift_expr)* shift_expr: arith_expr (('<<'|'>>') arith_expr)* arith_expr: term (('+'|'-') term)* term: factor (('*'|'@'|'/'|'%'|'//') factor)* factor: ('+'|'-'|'~') factor | power power: atom_expr ['**' factor] atom_expr: ['await'] atom trailer* yield_expr: 'yield' [yield_arg] */ asdl_seq *seq; int i; loop: switch (TYPE(n)) { case test: case test_nocond: if (TYPE(CHILD(n, 0)) == lambdef || TYPE(CHILD(n, 0)) == lambdef_nocond) return ast_for_lambdef(c, CHILD(n, 0)); else if (NCH(n) > 1) return ast_for_ifexpr(c, n); /* Fallthrough */ case or_test: case and_test: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } seq = _Ta3_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!seq) return NULL; for (i = 0; i < NCH(n); i += 2) { expr_ty e = ast_for_expr(c, CHILD(n, i)); if (!e) return NULL; asdl_seq_SET(seq, i / 2, e); } if (!strcmp(STR(CHILD(n, 1)), "and")) return BoolOp(And, seq, LINENO(n), n->n_col_offset, c->c_arena); assert(!strcmp(STR(CHILD(n, 1)), "or")); return BoolOp(Or, seq, LINENO(n), n->n_col_offset, c->c_arena); case not_test: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } else { expr_ty expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; return UnaryOp(Not, expression, LINENO(n), n->n_col_offset, c->c_arena); } case comparison: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } else { expr_ty expression; asdl_int_seq *ops; asdl_seq *cmps; ops = _Ta3_asdl_int_seq_new(NCH(n) / 2, c->c_arena); if (!ops) return NULL; cmps = _Ta3_asdl_seq_new(NCH(n) / 2, c->c_arena); if (!cmps) { return NULL; } for (i = 1; i < NCH(n); i += 2) { cmpop_ty newoperator; newoperator = ast_for_comp_op(c, CHILD(n, i)); if (!newoperator) { return NULL; } expression = ast_for_expr(c, CHILD(n, i + 1)); if (!expression) { return NULL; } asdl_seq_SET(ops, i / 2, newoperator); asdl_seq_SET(cmps, i / 2, expression); } expression = ast_for_expr(c, CHILD(n, 0)); if (!expression) { return NULL; } return Compare(expression, ops, cmps, LINENO(n), n->n_col_offset, c->c_arena); } break; case star_expr: return ast_for_starred(c, n); /* The next five cases all handle BinOps. The main body of code is the same in each case, but the switch turned inside out to reuse the code for each type of operator. */ case expr: case xor_expr: case and_expr: case shift_expr: case arith_expr: case term: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } return ast_for_binop(c, n); case yield_expr: { node *an = NULL; node *en = NULL; int is_from = 0; expr_ty exp = NULL; if (NCH(n) > 1) an = CHILD(n, 1); /* yield_arg */ if (an) { en = CHILD(an, NCH(an) - 1); if (NCH(an) == 2) { is_from = 1; exp = ast_for_expr(c, en); } else exp = ast_for_testlist(c, en); if (!exp) return NULL; } if (is_from) return YieldFrom(exp, LINENO(n), n->n_col_offset, c->c_arena); return Yield(exp, LINENO(n), n->n_col_offset, c->c_arena); } case factor: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } return ast_for_factor(c, n); case power: return ast_for_power(c, n); default: PyErr_Format(PyExc_SystemError, "unhandled expr: %d", TYPE(n)); return NULL; } /* should never get here unless if error is set */ return NULL; }
1
CVE-2019-19274
2,604
vulnerable
CWE-476
void Context::onDone() { if (wasm_->onDone_) { wasm_->onDone_(this, id_); } }
0
CVE-2020-10739
1,633
benign
CWE-476
void Context::onDone() { if (in_vm_context_created_ && wasm_->onDone_) { wasm_->onDone_(this, id_); } }
1
CVE-2020-10739
1,633
vulnerable
CWE-787
FdInStream::FdInStream(int fd_, FdInStreamBlockCallback* blockCallback_, int bufSize_) : fd(fd_), timeoutms(0), blockCallback(blockCallback_), timing(false), timeWaitedIn100us(5), timedKbits(0), bufSize(bufSize_ ? bufSize_ : DEFAULT_BUF_SIZE), offset(0) { ptr = end = start = new U8[bufSize]; }
0
CVE-2019-15694
1,107
benign
CWE-787
FdInStream::FdInStream(int fd_, int timeoutms_, size_t bufSize_, bool closeWhenDone_) : fd(fd_), closeWhenDone(closeWhenDone_), timeoutms(timeoutms_), blockCallback(0), timing(false), timeWaitedIn100us(5), timedKbits(0), bufSize(bufSize_ ? bufSize_ : DEFAULT_BUF_SIZE), offset(0) { ptr = end = start = new U8[bufSize]; }
1
CVE-2019-15694
1,107
vulnerable
CWE-20
static int try_read_command(conn *c) { assert(c != NULL); assert(c->rcurr <= (c->rbuf + c->rsize)); assert(c->rbytes > 0); if (c->protocol == negotiating_prot || c->transport == udp_transport) { if ((unsigned char)c->rbuf[0] == (unsigned char)PROTOCOL_BINARY_REQ) { c->protocol = binary_prot; } else { c->protocol = ascii_prot; } if (settings.verbose > 1) { fprintf(stderr, "%d: Client using the %s protocol\n", c->sfd, prot_text(c->protocol)); } } if (c->protocol == binary_prot) { /* Do we have the complete packet header? */ if (c->rbytes < sizeof(c->binary_header)) { /* need more data! */ return 0; } else { #ifdef NEED_ALIGN if (((long)(c->rcurr)) % 8 != 0) { /* must realign input buffer */ memmove(c->rbuf, c->rcurr, c->rbytes); c->rcurr = c->rbuf; if (settings.verbose > 1) { fprintf(stderr, "%d: Realign input buffer\n", c->sfd); } } #endif protocol_binary_request_header* req; req = (protocol_binary_request_header*)c->rcurr; if (settings.verbose > 1) { /* Dump the packet before we convert it to host order */ int ii; fprintf(stderr, "<%d Read binary protocol data:", c->sfd); for (ii = 0; ii < sizeof(req->bytes); ++ii) { if (ii % 4 == 0) { fprintf(stderr, "\n<%d ", c->sfd); } fprintf(stderr, " 0x%02x", req->bytes[ii]); } fprintf(stderr, "\n"); } c->binary_header = *req; c->binary_header.request.keylen = ntohs(req->request.keylen); c->binary_header.request.bodylen = ntohl(req->request.bodylen); c->binary_header.request.cas = ntohll(req->request.cas); if (c->binary_header.request.magic != PROTOCOL_BINARY_REQ) { if (settings.verbose) { fprintf(stderr, "Invalid magic: %x\n", c->binary_header.request.magic); } conn_set_state(c, conn_closing); return -1; } c->msgcurr = 0; c->msgused = 0; c->iovused = 0; if (add_msghdr(c) != 0) { out_string(c, "SERVER_ERROR out of memory"); return 0; } c->cmd = c->binary_header.request.opcode; c->keylen = c->binary_header.request.keylen; c->opaque = c->binary_header.request.opaque; /* clear the returned cas value */ c->cas = 0; dispatch_bin_command(c); c->rbytes -= sizeof(c->binary_header); c->rcurr += sizeof(c->binary_header); } } else { char *el, *cont; if (c->rbytes == 0) return 0; el = memchr(c->rcurr, '\n', c->rbytes); if (!el) { if (c->rbytes > 1024) { /* * We didn't have a '\n' in the first k. This _has_ to be a * large multiget, if not we should just nuke the connection. */ char *ptr = c->rcurr; while (*ptr == ' ') { /* ignore leading whitespaces */ ++ptr; } if (strcmp(ptr, "get ") && strcmp(ptr, "gets ")) { conn_set_state(c, conn_closing); return 1; } } return 0; } cont = el + 1; if ((el - c->rcurr) > 1 && *(el - 1) == '\r') { el--; } *el = '\0'; assert(cont <= (c->rcurr + c->rbytes)); process_command(c, c->rcurr); c->rbytes -= (cont - c->rcurr); c->rcurr = cont; assert(c->rcurr <= (c->rbuf + c->rsize)); } return 1; }
0
CVE-2010-1152
847
benign
CWE-20
static int try_read_command(conn *c) { assert(c != NULL); assert(c->rcurr <= (c->rbuf + c->rsize)); assert(c->rbytes > 0); if (c->protocol == negotiating_prot || c->transport == udp_transport) { if ((unsigned char)c->rbuf[0] == (unsigned char)PROTOCOL_BINARY_REQ) { c->protocol = binary_prot; } else { c->protocol = ascii_prot; } if (settings.verbose > 1) { fprintf(stderr, "%d: Client using the %s protocol\n", c->sfd, prot_text(c->protocol)); } } if (c->protocol == binary_prot) { /* Do we have the complete packet header? */ if (c->rbytes < sizeof(c->binary_header)) { /* need more data! */ return 0; } else { #ifdef NEED_ALIGN if (((long)(c->rcurr)) % 8 != 0) { /* must realign input buffer */ memmove(c->rbuf, c->rcurr, c->rbytes); c->rcurr = c->rbuf; if (settings.verbose > 1) { fprintf(stderr, "%d: Realign input buffer\n", c->sfd); } } #endif protocol_binary_request_header* req; req = (protocol_binary_request_header*)c->rcurr; if (settings.verbose > 1) { /* Dump the packet before we convert it to host order */ int ii; fprintf(stderr, "<%d Read binary protocol data:", c->sfd); for (ii = 0; ii < sizeof(req->bytes); ++ii) { if (ii % 4 == 0) { fprintf(stderr, "\n<%d ", c->sfd); } fprintf(stderr, " 0x%02x", req->bytes[ii]); } fprintf(stderr, "\n"); } c->binary_header = *req; c->binary_header.request.keylen = ntohs(req->request.keylen); c->binary_header.request.bodylen = ntohl(req->request.bodylen); c->binary_header.request.cas = ntohll(req->request.cas); if (c->binary_header.request.magic != PROTOCOL_BINARY_REQ) { if (settings.verbose) { fprintf(stderr, "Invalid magic: %x\n", c->binary_header.request.magic); } conn_set_state(c, conn_closing); return -1; } c->msgcurr = 0; c->msgused = 0; c->iovused = 0; if (add_msghdr(c) != 0) { out_string(c, "SERVER_ERROR out of memory"); return 0; } c->cmd = c->binary_header.request.opcode; c->keylen = c->binary_header.request.keylen; c->opaque = c->binary_header.request.opaque; /* clear the returned cas value */ c->cas = 0; dispatch_bin_command(c); c->rbytes -= sizeof(c->binary_header); c->rcurr += sizeof(c->binary_header); } } else { char *el, *cont; if (c->rbytes == 0) return 0; el = memchr(c->rcurr, '\n', c->rbytes); if (!el) { if (c->rbytes > 1024) { /* * We didn't have a '\n' in the first k. This _has_ to be a * large multiget, if not we should just nuke the connection. */ char *ptr = c->rcurr; while (*ptr == ' ') { /* ignore leading whitespaces */ ++ptr; } if (ptr - c->rcurr > 100 || (strncmp(ptr, "get ", 4) && strncmp(ptr, "gets ", 5))) { conn_set_state(c, conn_closing); return 1; } } return 0; } cont = el + 1; if ((el - c->rcurr) > 1 && *(el - 1) == '\r') { el--; } *el = '\0'; assert(cont <= (c->rcurr + c->rbytes)); process_command(c, c->rcurr); c->rbytes -= (cont - c->rcurr); c->rcurr = cont; assert(c->rcurr <= (c->rbuf + c->rsize)); } return 1; }
1
CVE-2010-1152
847
vulnerable
CWE-787
exif_data_load_data_entry (ExifData *data, ExifEntry *entry, const unsigned char *d, unsigned int size, unsigned int offset) { unsigned int s, doff; entry->tag = exif_get_short (d + offset + 0, data->priv->order); entry->format = exif_get_short (d + offset + 2, data->priv->order); entry->components = exif_get_long (d + offset + 4, data->priv->order); /* FIXME: should use exif_tag_get_name_in_ifd here but entry->parent * has not been set yet */ exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "Loading entry 0x%x ('%s')...", entry->tag, exif_tag_get_name (entry->tag)); /* {0,1,2,4,8} x { 0x00000000 .. 0xffffffff } * -> { 0x000000000 .. 0x7fffffff8 } */ s = exif_format_get_size(entry->format) * entry->components; if ((s < entry->components) || (s == 0)){ return 0; } /* * Size? If bigger than 4 bytes, the actual data is not * in the entry but somewhere else (offset). */ if (s > 4) doff = exif_get_long (d + offset + 8, data->priv->order); else doff = offset + 8; /* Sanity checks */ if ((doff + s < doff) || (doff + s < s) || (doff + s > size)) { exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "Tag data past end of buffer (%u > %u)", doff+s, size); return 0; } entry->data = exif_data_alloc (data, s); if (entry->data) { entry->size = s; memcpy (entry->data, d + doff, s); } else { EXIF_LOG_NO_MEMORY(data->priv->log, "ExifData", s); return 0; } /* If this is the MakerNote, remember the offset */ if (entry->tag == EXIF_TAG_MAKER_NOTE) { if (!entry->data) { exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "MakerNote found with empty data"); } else if (entry->size > 6) { exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "MakerNote found (%02x %02x %02x %02x " "%02x %02x %02x...).", entry->data[0], entry->data[1], entry->data[2], entry->data[3], entry->data[4], entry->data[5], entry->data[6]); } data->priv->offset_mnote = doff; } return 1; }
0
CVE-2019-9278
2,252
benign
CWE-787
exif_data_load_data_entry (ExifData *data, ExifEntry *entry, const unsigned char *d, unsigned int size, unsigned int offset) { unsigned int s, doff; entry->tag = exif_get_short (d + offset + 0, data->priv->order); entry->format = exif_get_short (d + offset + 2, data->priv->order); entry->components = exif_get_long (d + offset + 4, data->priv->order); /* FIXME: should use exif_tag_get_name_in_ifd here but entry->parent * has not been set yet */ exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "Loading entry 0x%x ('%s')...", entry->tag, exif_tag_get_name (entry->tag)); /* {0,1,2,4,8} x { 0x00000000 .. 0xffffffff } * -> { 0x000000000 .. 0x7fffffff8 } */ s = exif_format_get_size(entry->format) * entry->components; if ((s < entry->components) || (s == 0)){ return 0; } /* * Size? If bigger than 4 bytes, the actual data is not * in the entry but somewhere else (offset). */ if (s > 4) doff = exif_get_long (d + offset + 8, data->priv->order); else doff = offset + 8; /* Sanity checks */ if (doff >= size) { exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "Tag starts past end of buffer (%u > %u)", doff, size); return 0; } if (s > size - doff) { exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "Tag data goes past end of buffer (%u > %u)", doff+s, size); return 0; } entry->data = exif_data_alloc (data, s); if (entry->data) { entry->size = s; memcpy (entry->data, d + doff, s); } else { EXIF_LOG_NO_MEMORY(data->priv->log, "ExifData", s); return 0; } /* If this is the MakerNote, remember the offset */ if (entry->tag == EXIF_TAG_MAKER_NOTE) { if (!entry->data) { exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "MakerNote found with empty data"); } else if (entry->size > 6) { exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "MakerNote found (%02x %02x %02x %02x " "%02x %02x %02x...).", entry->data[0], entry->data[1], entry->data[2], entry->data[3], entry->data[4], entry->data[5], entry->data[6]); } data->priv->offset_mnote = doff; } return 1; }
1
CVE-2019-9278
2,252
vulnerable
CWE-20
int treeRead(struct READER *reader, struct DATAOBJECT *data) { int i, j, err, olen, elements, size, x, y, z, b, e, dy, dz, sx, sy, sz, dzy, szy; char *input, *output; uint8_t node_type, node_level; uint16_t entries_used; uint32_t size_of_chunk; uint32_t filter_mask; uint64_t address_of_left_sibling, address_of_right_sibling, start[4], child_pointer, key, store; char buf[4]; UNUSED(node_level); UNUSED(address_of_right_sibling); UNUSED(address_of_left_sibling); UNUSED(key); if (data->ds.dimensionality > 3) { log("TREE dimensions > 3"); return MYSOFA_INVALID_FORMAT; } /* read signature */ if (fread(buf, 1, 4, reader->fhd) != 4 || strncmp(buf, "TREE", 4)) { log("cannot read signature of TREE\n"); return MYSOFA_INVALID_FORMAT; } log("%08lX %.4s\n", (uint64_t )ftell(reader->fhd) - 4, buf); node_type = (uint8_t)fgetc(reader->fhd); node_level = (uint8_t)fgetc(reader->fhd); entries_used = (uint16_t)readValue(reader, 2); if(entries_used>0x1000) return MYSOFA_UNSUPPORTED_FORMAT; address_of_left_sibling = readValue(reader, reader->superblock.size_of_offsets); address_of_right_sibling = readValue(reader, reader->superblock.size_of_offsets); elements = 1; for (j = 0; j < data->ds.dimensionality; j++) elements *= data->datalayout_chunk[j]; dy = data->datalayout_chunk[1]; dz = data->datalayout_chunk[2]; sx = data->ds.dimension_size[0]; sy = data->ds.dimension_size[1]; sz = data->ds.dimension_size[2]; dzy = dz * dy; szy = sz * sy; size = data->datalayout_chunk[data->ds.dimensionality]; log("elements %d size %d\n",elements,size); if (!(output = malloc(elements * size))) { return MYSOFA_NO_MEMORY; } for (e = 0; e < entries_used * 2; e++) { if (node_type == 0) { key = readValue(reader, reader->superblock.size_of_lengths); } else { size_of_chunk = (uint32_t)readValue(reader, 4); filter_mask = (uint32_t)readValue(reader, 4); if (filter_mask) { log("TREE all filters must be enabled\n"); free(output); return MYSOFA_INVALID_FORMAT; } for (j = 0; j < data->ds.dimensionality; j++) { start[j] = readValue(reader, 8); log("start %d %lu\n",j,start[j]); } if (readValue(reader, 8)) { break; } child_pointer = readValue(reader, reader->superblock.size_of_offsets); log(" data at %lX len %u\n", child_pointer, size_of_chunk); /* read data */ store = ftell(reader->fhd); if (fseek(reader->fhd, child_pointer, SEEK_SET)<0) { free(output); return errno; } if (!(input = malloc(size_of_chunk))) { free(output); return MYSOFA_NO_MEMORY; } if (fread(input, 1, size_of_chunk, reader->fhd) != size_of_chunk) { free(output); free(input); return MYSOFA_INVALID_FORMAT; } olen = elements * size; err = gunzip(size_of_chunk, input, &olen, output); free(input); log(" gunzip %d %d %d\n",err, olen, elements*size); if (err || olen != elements * size) { free(output); return MYSOFA_INVALID_FORMAT; } switch (data->ds.dimensionality) { case 1: for (i = 0; i < olen; i++) { b = i / elements; x = i % elements + start[0]; if (x < sx) { j = x * size + b; ((char*)data->data)[j] = output[i]; } } break; case 2: for (i = 0; i < olen; i++) { b = i / elements; x = i % elements; y = x % dy + start[1]; x = x / dy + start[0]; if (y < sy && x < sx) { j = ((x * sy + y) * size) + b; ((char*)data->data)[j] = output[i]; } } break; case 3: for (i = 0; i < olen; i++) { b = i / elements; x = i % elements; z = x % dz + start[2]; y = (x / dz) % dy + start[1]; x = (x / dzy) + start[0]; if (z < sz && y < sy && x < sx) { j = (x * szy + y * sz + z) * size + b; ((char*)data->data)[j] = output[i]; } } break; default: log("invalid dim\n"); return MYSOFA_INTERNAL_ERROR; } if(fseek(reader->fhd, store, SEEK_SET)<0) { free(output); return errno; } } } free(output); if(fseek(reader->fhd, 4, SEEK_CUR)<0) /* skip checksum */ return errno; return MYSOFA_OK; }
0
CVE-2019-10672
2,042
benign
CWE-20
int treeRead(struct READER *reader, struct DATAOBJECT *data) { int i, j, err, olen, elements, size, x, y, z, b, e, dy, dz, sx, sy, sz, dzy, szy; char *input, *output; uint8_t node_type, node_level; uint16_t entries_used; uint32_t size_of_chunk; uint32_t filter_mask; uint64_t address_of_left_sibling, address_of_right_sibling, start[4], child_pointer, key, store; char buf[4]; UNUSED(node_level); UNUSED(address_of_right_sibling); UNUSED(address_of_left_sibling); UNUSED(key); if (data->ds.dimensionality > 3) { log("TREE dimensions > 3"); return MYSOFA_INVALID_FORMAT; } /* read signature */ if (fread(buf, 1, 4, reader->fhd) != 4 || strncmp(buf, "TREE", 4)) { log("cannot read signature of TREE\n"); return MYSOFA_INVALID_FORMAT; } log("%08lX %.4s\n", (uint64_t )ftell(reader->fhd) - 4, buf); node_type = (uint8_t)fgetc(reader->fhd); node_level = (uint8_t)fgetc(reader->fhd); entries_used = (uint16_t)readValue(reader, 2); if(entries_used>0x1000) return MYSOFA_UNSUPPORTED_FORMAT; address_of_left_sibling = readValue(reader, reader->superblock.size_of_offsets); address_of_right_sibling = readValue(reader, reader->superblock.size_of_offsets); elements = 1; for (j = 0; j < data->ds.dimensionality; j++) elements *= data->datalayout_chunk[j]; dy = data->datalayout_chunk[1]; dz = data->datalayout_chunk[2]; sx = data->ds.dimension_size[0]; sy = data->ds.dimension_size[1]; sz = data->ds.dimension_size[2]; dzy = dz * dy; szy = sz * sy; size = data->datalayout_chunk[data->ds.dimensionality]; log("elements %d size %d\n",elements,size); if (!(output = malloc(elements * size))) { return MYSOFA_NO_MEMORY; } for (e = 0; e < entries_used * 2; e++) { if (node_type == 0) { key = readValue(reader, reader->superblock.size_of_lengths); } else { size_of_chunk = (uint32_t)readValue(reader, 4); filter_mask = (uint32_t)readValue(reader, 4); if (filter_mask) { log("TREE all filters must be enabled\n"); free(output); return MYSOFA_INVALID_FORMAT; } for (j = 0; j < data->ds.dimensionality; j++) { start[j] = readValue(reader, 8); log("start %d %lu\n",j,start[j]); } if (readValue(reader, 8)) { break; } child_pointer = readValue(reader, reader->superblock.size_of_offsets); log(" data at %lX len %u\n", child_pointer, size_of_chunk); /* read data */ store = ftell(reader->fhd); if (fseek(reader->fhd, child_pointer, SEEK_SET)<0) { free(output); return errno; } if (!(input = malloc(size_of_chunk))) { free(output); return MYSOFA_NO_MEMORY; } if (fread(input, 1, size_of_chunk, reader->fhd) != size_of_chunk) { free(output); free(input); return MYSOFA_INVALID_FORMAT; } olen = elements * size; err = gunzip(size_of_chunk, input, &olen, output); free(input); log(" gunzip %d %d %d\n",err, olen, elements*size); if (err || olen != elements * size) { free(output); return MYSOFA_INVALID_FORMAT; } switch (data->ds.dimensionality) { case 1: for (i = 0; i < olen; i++) { b = i / elements; x = i % elements + start[0]; j = x * size + b; if (j>=0 && j < elements * size) { ((char*)data->data)[j] = output[i]; } } break; case 2: for (i = 0; i < olen; i++) { b = i / elements; x = i % elements; y = x % dy + start[1]; x = x / dy + start[0]; j = ((x * sy + y) * size) + b; if (j>=0 && j < elements * size) { ((char*)data->data)[j] = output[i]; } } break; case 3: for (i = 0; i < olen; i++) { b = i / elements; x = i % elements; z = x % dz + start[2]; y = (x / dz) % dy + start[1]; x = (x / dzy) + start[0]; j = (x * szy + y * sz + z) * size + b; if (j>=0 && j < elements * size) { ((char*)data->data)[j] = output[i]; } } break; default: log("invalid dim\n"); return MYSOFA_INTERNAL_ERROR; } if(fseek(reader->fhd, store, SEEK_SET)<0) { free(output); return errno; } } } free(output); if(fseek(reader->fhd, 4, SEEK_CUR)<0) /* skip checksum */ return errno; return MYSOFA_OK; }
1
CVE-2019-10672
2,042
vulnerable
CWE-476
long keyctl_update_key(key_serial_t id, const void __user *_payload, size_t plen) { key_ref_t key_ref; void *payload; long ret; ret = -EINVAL; if (plen > PAGE_SIZE) goto error; /* pull the payload in if one was supplied */ payload = NULL; if (_payload) { ret = -ENOMEM; payload = kmalloc(plen, GFP_KERNEL); if (!payload) goto error; ret = -EFAULT; if (copy_from_user(payload, _payload, plen) != 0) goto error2; } /* find the target key (which must be writable) */ key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error2; } /* update the key */ ret = key_update(key_ref, payload, plen); key_ref_put(key_ref); error2: kfree(payload); error: return ret; }
0
CVE-2017-15274
234
benign
CWE-476
long keyctl_update_key(key_serial_t id, const void __user *_payload, size_t plen) { key_ref_t key_ref; void *payload; long ret; ret = -EINVAL; if (plen > PAGE_SIZE) goto error; /* pull the payload in if one was supplied */ payload = NULL; if (plen) { ret = -ENOMEM; payload = kmalloc(plen, GFP_KERNEL); if (!payload) goto error; ret = -EFAULT; if (copy_from_user(payload, _payload, plen) != 0) goto error2; } /* find the target key (which must be writable) */ key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error2; } /* update the key */ ret = key_update(key_ref, payload, plen); key_ref_put(key_ref); error2: kfree(payload); error: return ret; }
1
CVE-2017-15274
234
vulnerable