cwe_id
stringclasses
8 values
func
stringlengths
40
61.2k
label
int64
0
1
cve_id
stringlengths
13
16
id
int64
0
3.29k
text_label
stringclasses
2 values
CWE-125
ssize_t enc_untrusted_read(int fd, void *buf, size_t count) { return static_cast<ssize_t>(EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_read, fd, buf, count)); }
0
CVE-2020-8942
1,577
benign
CWE-125
ssize_t enc_untrusted_read(int fd, void *buf, size_t count) { ssize_t ret = static_cast<ssize_t>(EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_read, fd, buf, count)); if (ret != -1 && ret > count) { ::asylo::primitives::TrustedPrimitives::BestEffortAbort( "enc_untrusted_read: read result exceeds requested"); } return ret; }
1
CVE-2020-8942
1,577
vulnerable
CWE-416
njs_async_function_frame_invoke(njs_vm_t *vm, njs_value_t *retval) { njs_int_t ret; njs_value_t ctor; njs_native_frame_t *frame; njs_promise_capability_t *capability; frame = vm->top_frame; frame->retval = retval; njs_set_function(&ctor, &vm->constructors[NJS_OBJ_TYPE_PROMISE]); capability = njs_promise_new_capability(vm, &ctor); if (njs_slow_path(capability == NULL)) { return NJS_ERROR; } frame->function->context = capability; ret = njs_function_lambda_call(vm); if (ret == NJS_OK) { ret = njs_function_call(vm, njs_function(&capability->resolve), &njs_value_undefined, retval, 1, &vm->retval); } else if (ret == NJS_AGAIN) { ret = NJS_OK; } else if (ret == NJS_ERROR) { if (njs_is_memory_error(vm, &vm->retval)) { return NJS_ERROR; } ret = njs_function_call(vm, njs_function(&capability->reject), &njs_value_undefined, &vm->retval, 1, &vm->retval); } *retval = capability->promise; return ret; }
0
CVE-2022-25139
881
benign
CWE-416
njs_async_function_frame_invoke(njs_vm_t *vm, njs_value_t *retval) { njs_int_t ret; njs_value_t ctor; njs_native_frame_t *frame; njs_promise_capability_t *capability; frame = vm->top_frame; frame->retval = retval; njs_set_function(&ctor, &vm->constructors[NJS_OBJ_TYPE_PROMISE]); capability = njs_promise_new_capability(vm, &ctor); if (njs_slow_path(capability == NULL)) { return NJS_ERROR; } ret = njs_function_lambda_call(vm, capability, NULL); if (ret == NJS_OK) { ret = njs_function_call(vm, njs_function(&capability->resolve), &njs_value_undefined, retval, 1, &vm->retval); } else if (ret == NJS_AGAIN) { ret = NJS_OK; } else if (ret == NJS_ERROR) { if (njs_is_memory_error(vm, &vm->retval)) { return NJS_ERROR; } ret = njs_function_call(vm, njs_function(&capability->reject), &njs_value_undefined, &vm->retval, 1, &vm->retval); } *retval = capability->promise; return ret; }
1
CVE-2022-25139
881
vulnerable
CWE-476
RList *r_bin_ne_get_symbols(r_bin_ne_obj_t *bin) { RBinSymbol *sym; ut16 off = bin->ne_header->ResidNamTable + bin->header_offset; RList *symbols = r_list_newf (free); if (!symbols) { return NULL; } RList *entries = r_bin_ne_get_entrypoints (bin); bool resident = true, first = true; while (true) { ut8 sz = r_buf_read8_at (bin->buf, off); if (!sz) { first = true; if (resident) { resident = false; off = bin->ne_header->OffStartNonResTab; sz = r_buf_read8_at (bin->buf, off); if (!sz) { break; } } else { break; } } char *name = malloc ((ut64)sz + 1); if (!name) { break; } off++; r_buf_read_at (bin->buf, off, (ut8 *)name, sz); name[sz] = '\0'; off += sz; sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = name; if (!first) { sym->bind = R_BIN_BIND_GLOBAL_STR; } ut16 entry_off = r_buf_read_le16_at (bin->buf, off); off += 2; RBinAddr *entry = r_list_get_n (entries, entry_off); if (entry) { sym->paddr = entry->paddr; } else { sym->paddr = -1; } sym->ordinal = entry_off; r_list_append (symbols, sym); first = false; } RListIter *it; RBinAddr *en; int i = 1; r_list_foreach (entries, it, en) { if (!r_list_find (symbols, &en->paddr, __find_symbol_by_paddr)) { sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = r_str_newf ("entry%d", i - 1); sym->paddr = en->paddr; sym->bind = R_BIN_BIND_GLOBAL_STR; sym->ordinal = i; r_list_append (symbols, sym); } i++; } bin->symbols = symbols; return symbols; }
0
CVE-2022-1283
187
benign
CWE-476
RList *r_bin_ne_get_symbols(r_bin_ne_obj_t *bin) { RBinSymbol *sym; ut16 off = bin->ne_header->ResidNamTable + bin->header_offset; RList *symbols = r_list_newf (free); if (!symbols) { return NULL; } RList *entries = r_bin_ne_get_entrypoints (bin); bool resident = true, first = true; while (entries) { ut8 sz = r_buf_read8_at (bin->buf, off); if (!sz) { first = true; if (resident) { resident = false; off = bin->ne_header->OffStartNonResTab; sz = r_buf_read8_at (bin->buf, off); if (!sz) { break; } } else { break; } } char *name = malloc ((ut64)sz + 1); if (!name) { break; } off++; r_buf_read_at (bin->buf, off, (ut8 *)name, sz); name[sz] = '\0'; off += sz; sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = name; if (!first) { sym->bind = R_BIN_BIND_GLOBAL_STR; } ut16 entry_off = r_buf_read_le16_at (bin->buf, off); off += 2; RBinAddr *entry = r_list_get_n (entries, entry_off); if (entry) { sym->paddr = entry->paddr; } else { sym->paddr = -1; } sym->ordinal = entry_off; r_list_append (symbols, sym); first = false; } RListIter *it; RBinAddr *en; int i = 1; r_list_foreach (entries, it, en) { if (!r_list_find (symbols, &en->paddr, __find_symbol_by_paddr)) { sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = r_str_newf ("entry%d", i - 1); sym->paddr = en->paddr; sym->bind = R_BIN_BIND_GLOBAL_STR; sym->ordinal = i; r_list_append (symbols, sym); } i++; } bin->symbols = symbols; return symbols; }
1
CVE-2022-1283
187
vulnerable
CWE-119
void ReadJPEG( JPEGReader* pJPEGReader, void* pInputStream, long* pLines, Size const & previewSize ) { jpeg_decompress_struct cinfo; ErrorManagerStruct jerr; if ( setjmp( jerr.setjmp_buffer ) ) { jpeg_destroy_decompress( &cinfo ); return; } cinfo.err = jpeg_std_error( &jerr.pub ); jerr.pub.error_exit = errorExit; jerr.pub.output_message = outputMessage; jpeg_create_decompress( &cinfo ); jpeg_svstream_src( &cinfo, pInputStream ); SourceManagerStruct *source = reinterpret_cast<SourceManagerStruct*>(cinfo.src); jpeg_read_header( &cinfo, TRUE ); cinfo.scale_num = 1; cinfo.scale_denom = 1; cinfo.output_gamma = 1.0; cinfo.raw_data_out = FALSE; cinfo.quantize_colors = FALSE; /* change scale for preview import */ long nPreviewWidth = previewSize.Width(); long nPreviewHeight = previewSize.Height(); if( nPreviewWidth || nPreviewHeight ) { if( nPreviewWidth == 0 ) { nPreviewWidth = ( cinfo.image_width * nPreviewHeight ) / cinfo.image_height; if( nPreviewWidth <= 0 ) { nPreviewWidth = 1; } } else if( nPreviewHeight == 0 ) { nPreviewHeight = ( cinfo.image_height * nPreviewWidth ) / cinfo.image_width; if( nPreviewHeight <= 0 ) { nPreviewHeight = 1; } } for( cinfo.scale_denom = 1; cinfo.scale_denom < 8; cinfo.scale_denom *= 2 ) { if( cinfo.image_width < nPreviewWidth * cinfo.scale_denom ) break; if( cinfo.image_height < nPreviewHeight * cinfo.scale_denom ) break; } if( cinfo.scale_denom > 1 ) { cinfo.dct_method = JDCT_FASTEST; cinfo.do_fancy_upsampling = FALSE; cinfo.do_block_smoothing = FALSE; } } jpeg_start_decompress( &cinfo ); long nWidth = cinfo.output_width; long nHeight = cinfo.output_height; bool bGray = (cinfo.output_components == 1); JPEGCreateBitmapParam aCreateBitmapParam; aCreateBitmapParam.nWidth = nWidth; aCreateBitmapParam.nHeight = nHeight; aCreateBitmapParam.density_unit = cinfo.density_unit; aCreateBitmapParam.X_density = cinfo.X_density; aCreateBitmapParam.Y_density = cinfo.Y_density; aCreateBitmapParam.bGray = bGray; bool bBitmapCreated = pJPEGReader->CreateBitmap(aCreateBitmapParam); if (bBitmapCreated) { Bitmap::ScopedWriteAccess pAccess(pJPEGReader->GetBitmap()); if (pAccess) { int nPixelSize = 3; J_COLOR_SPACE best_out_color_space = JCS_RGB; ScanlineFormat eScanlineFormat = ScanlineFormat::N24BitTcRgb; ScanlineFormat eFinalFormat = pAccess->GetScanlineFormat(); if (eFinalFormat == ScanlineFormat::N32BitTcBgra) { best_out_color_space = JCS_EXT_BGRA; eScanlineFormat = eFinalFormat; nPixelSize = 4; } else if (eFinalFormat == ScanlineFormat::N32BitTcRgba) { best_out_color_space = JCS_EXT_RGBA; eScanlineFormat = eFinalFormat; nPixelSize = 4; } else if (eFinalFormat == ScanlineFormat::N32BitTcArgb) { best_out_color_space = JCS_EXT_ARGB; eScanlineFormat = eFinalFormat; nPixelSize = 4; } if ( cinfo.jpeg_color_space == JCS_YCbCr ) cinfo.out_color_space = best_out_color_space; else if ( cinfo.jpeg_color_space == JCS_YCCK ) cinfo.out_color_space = JCS_CMYK; if (cinfo.out_color_space != JCS_CMYK && cinfo.out_color_space != JCS_GRAYSCALE && cinfo.out_color_space != best_out_color_space) { SAL_WARN("vcl.filter", "jpg with unknown out color space, forcing to :" << best_out_color_space); cinfo.out_color_space = best_out_color_space; } JSAMPLE* aRangeLimit = cinfo.sample_range_limit; std::vector<sal_uInt8> pScanLineBuffer(nWidth * (bGray ? 1 : nPixelSize)); std::vector<sal_uInt8> pCYMKBuffer; if (cinfo.out_color_space == JCS_CMYK) { pCYMKBuffer.resize(nWidth * 4); } std::unique_ptr<BitmapColor[]> pCols; if (bGray) { pCols.reset(new BitmapColor[256]); for (sal_uInt16 n = 0; n < 256; n++) { const sal_uInt8 cGray = n; pCols[n] = pAccess->GetBestMatchingColor(BitmapColor(cGray, cGray, cGray)); } } for (*pLines = 0; *pLines < nHeight && !source->no_data_available; (*pLines)++) { size_t yIndex = *pLines; sal_uInt8* p = (cinfo.out_color_space == JCS_CMYK) ? pCYMKBuffer.data() : pScanLineBuffer.data(); jpeg_read_scanlines(&cinfo, reinterpret_cast<JSAMPARRAY>(&p), 1); if (bGray) { for (long x = 0; x < nWidth; ++x) { sal_uInt8 nColorGray = pScanLineBuffer[x]; pAccess->SetPixel(yIndex, x, pCols[nColorGray]); } } else if (cinfo.out_color_space == JCS_CMYK) { // convert CMYK to RGB for (long cmyk = 0, x = 0; cmyk < nWidth * 4; cmyk += 4, ++x) { int color_C = 255 - pCYMKBuffer[cmyk + 0]; int color_M = 255 - pCYMKBuffer[cmyk + 1]; int color_Y = 255 - pCYMKBuffer[cmyk + 2]; int color_K = 255 - pCYMKBuffer[cmyk + 3]; sal_uInt8 cRed = aRangeLimit[255L - (color_C + color_K)]; sal_uInt8 cGreen = aRangeLimit[255L - (color_M + color_K)]; sal_uInt8 cBlue = aRangeLimit[255L - (color_Y + color_K)]; pAccess->SetPixel(yIndex, x, BitmapColor(cRed, cGreen, cBlue)); } } else { pAccess->CopyScanline(yIndex, pScanLineBuffer.data(), eScanlineFormat, pScanLineBuffer.size()); } /* PENDING ??? */ if (cinfo.err->msg_code == 113) break; } } } if (bBitmapCreated) { jpeg_finish_decompress( &cinfo ); } else { jpeg_abort_decompress( &cinfo ); } jpeg_destroy_decompress( &cinfo ); }
0
CVE-2017-8358
186
benign
CWE-119
void ReadJPEG( JPEGReader* pJPEGReader, void* pInputStream, long* pLines, Size const & previewSize ) { jpeg_decompress_struct cinfo; ErrorManagerStruct jerr; if ( setjmp( jerr.setjmp_buffer ) ) { jpeg_destroy_decompress( &cinfo ); return; } cinfo.err = jpeg_std_error( &jerr.pub ); jerr.pub.error_exit = errorExit; jerr.pub.output_message = outputMessage; jpeg_create_decompress( &cinfo ); jpeg_svstream_src( &cinfo, pInputStream ); SourceManagerStruct *source = reinterpret_cast<SourceManagerStruct*>(cinfo.src); jpeg_read_header( &cinfo, TRUE ); cinfo.scale_num = 1; cinfo.scale_denom = 1; cinfo.output_gamma = 1.0; cinfo.raw_data_out = FALSE; cinfo.quantize_colors = FALSE; /* change scale for preview import */ long nPreviewWidth = previewSize.Width(); long nPreviewHeight = previewSize.Height(); if( nPreviewWidth || nPreviewHeight ) { if( nPreviewWidth == 0 ) { nPreviewWidth = ( cinfo.image_width * nPreviewHeight ) / cinfo.image_height; if( nPreviewWidth <= 0 ) { nPreviewWidth = 1; } } else if( nPreviewHeight == 0 ) { nPreviewHeight = ( cinfo.image_height * nPreviewWidth ) / cinfo.image_width; if( nPreviewHeight <= 0 ) { nPreviewHeight = 1; } } for( cinfo.scale_denom = 1; cinfo.scale_denom < 8; cinfo.scale_denom *= 2 ) { if( cinfo.image_width < nPreviewWidth * cinfo.scale_denom ) break; if( cinfo.image_height < nPreviewHeight * cinfo.scale_denom ) break; } if( cinfo.scale_denom > 1 ) { cinfo.dct_method = JDCT_FASTEST; cinfo.do_fancy_upsampling = FALSE; cinfo.do_block_smoothing = FALSE; } } jpeg_calc_output_dimensions(&cinfo); long nWidth = cinfo.output_width; long nHeight = cinfo.output_height; bool bGray = (cinfo.output_components == 1); JPEGCreateBitmapParam aCreateBitmapParam; aCreateBitmapParam.nWidth = nWidth; aCreateBitmapParam.nHeight = nHeight; aCreateBitmapParam.density_unit = cinfo.density_unit; aCreateBitmapParam.X_density = cinfo.X_density; aCreateBitmapParam.Y_density = cinfo.Y_density; aCreateBitmapParam.bGray = bGray; bool bBitmapCreated = pJPEGReader->CreateBitmap(aCreateBitmapParam); if (bBitmapCreated) { Bitmap::ScopedWriteAccess pAccess(pJPEGReader->GetBitmap()); if (pAccess) { int nPixelSize = 3; J_COLOR_SPACE best_out_color_space = JCS_RGB; ScanlineFormat eScanlineFormat = ScanlineFormat::N24BitTcRgb; ScanlineFormat eFinalFormat = pAccess->GetScanlineFormat(); if (eFinalFormat == ScanlineFormat::N32BitTcBgra) { best_out_color_space = JCS_EXT_BGRA; eScanlineFormat = eFinalFormat; nPixelSize = 4; } else if (eFinalFormat == ScanlineFormat::N32BitTcRgba) { best_out_color_space = JCS_EXT_RGBA; eScanlineFormat = eFinalFormat; nPixelSize = 4; } else if (eFinalFormat == ScanlineFormat::N32BitTcArgb) { best_out_color_space = JCS_EXT_ARGB; eScanlineFormat = eFinalFormat; nPixelSize = 4; } if ( cinfo.jpeg_color_space == JCS_YCbCr ) cinfo.out_color_space = best_out_color_space; else if ( cinfo.jpeg_color_space == JCS_YCCK ) cinfo.out_color_space = JCS_CMYK; if (cinfo.out_color_space != JCS_CMYK && cinfo.out_color_space != JCS_GRAYSCALE && cinfo.out_color_space != best_out_color_space) { SAL_WARN("vcl.filter", "jpg with unknown out color space, forcing to :" << best_out_color_space << " gray "); cinfo.out_color_space = best_out_color_space; } jpeg_start_decompress(&cinfo); JSAMPLE* aRangeLimit = cinfo.sample_range_limit; std::vector<sal_uInt8> pScanLineBuffer(nWidth * (bGray ? 1 : nPixelSize)); std::vector<sal_uInt8> pCYMKBuffer; if (cinfo.out_color_space == JCS_CMYK) { pCYMKBuffer.resize(nWidth * 4); } std::unique_ptr<BitmapColor[]> pCols; if (bGray) { pCols.reset(new BitmapColor[256]); for (sal_uInt16 n = 0; n < 256; n++) { const sal_uInt8 cGray = n; pCols[n] = pAccess->GetBestMatchingColor(BitmapColor(cGray, cGray, cGray)); } } for (*pLines = 0; *pLines < nHeight && !source->no_data_available; (*pLines)++) { size_t yIndex = *pLines; sal_uInt8* p = (cinfo.out_color_space == JCS_CMYK) ? pCYMKBuffer.data() : pScanLineBuffer.data(); jpeg_read_scanlines(&cinfo, reinterpret_cast<JSAMPARRAY>(&p), 1); if (bGray) { for (long x = 0; x < nWidth; ++x) { sal_uInt8 nColorGray = pScanLineBuffer[x]; pAccess->SetPixel(yIndex, x, pCols[nColorGray]); } } else if (cinfo.out_color_space == JCS_CMYK) { // convert CMYK to RGB for (long cmyk = 0, x = 0; cmyk < nWidth * 4; cmyk += 4, ++x) { int color_C = 255 - pCYMKBuffer[cmyk + 0]; int color_M = 255 - pCYMKBuffer[cmyk + 1]; int color_Y = 255 - pCYMKBuffer[cmyk + 2]; int color_K = 255 - pCYMKBuffer[cmyk + 3]; sal_uInt8 cRed = aRangeLimit[255L - (color_C + color_K)]; sal_uInt8 cGreen = aRangeLimit[255L - (color_M + color_K)]; sal_uInt8 cBlue = aRangeLimit[255L - (color_Y + color_K)]; pAccess->SetPixel(yIndex, x, BitmapColor(cRed, cGreen, cBlue)); } } else { pAccess->CopyScanline(yIndex, pScanLineBuffer.data(), eScanlineFormat, pScanLineBuffer.size()); } /* PENDING ??? */ if (cinfo.err->msg_code == 113) break; } } } if (bBitmapCreated) { jpeg_finish_decompress( &cinfo ); } else { jpeg_abort_decompress( &cinfo ); } jpeg_destroy_decompress( &cinfo ); }
1
CVE-2017-8358
186
vulnerable
CWE-20
static int spl_array_has_dimension_ex(int check_inherited, zval *object, zval *offset, int check_empty TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); long index; zval *rv, *value = NULL, **tmp; if (check_inherited && intern->fptr_offset_has) { zval *offset_tmp = offset; SEPARATE_ARG_IF_REF(offset_tmp); zend_call_method_with_1_params(&object, Z_OBJCE_P(object), &intern->fptr_offset_has, "offsetExists", &rv, offset_tmp); zval_ptr_dtor(&offset_tmp); if (rv && zend_is_true(rv)) { zval_ptr_dtor(&rv); if (check_empty != 1) { return 1; } else if (intern->fptr_offset_get) { value = spl_array_read_dimension_ex(1, object, offset, BP_VAR_R TSRMLS_CC); } } else { if (rv) { zval_ptr_dtor(&rv); } return 0; } } if (!value) { HashTable *ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); switch(Z_TYPE_P(offset)) { case IS_STRING: if (zend_symtable_find(ht, Z_STRVAL_P(offset), Z_STRLEN_P(offset)+1, (void **) &tmp) != FAILURE) { if (check_empty == 2) { return 1; } } else { return 0; } break; case IS_DOUBLE: case IS_RESOURCE: case IS_BOOL: case IS_LONG: if (offset->type == IS_DOUBLE) { index = (long)Z_DVAL_P(offset); } else { index = Z_LVAL_P(offset); } if (zend_hash_index_find(ht, index, (void **)&tmp) != FAILURE) { if (check_empty == 2) { return 1; } } else { return 0; } break; default: zend_error(E_WARNING, "Illegal offset type"); return 0; } if (check_empty && check_inherited && intern->fptr_offset_get) { value = spl_array_read_dimension_ex(1, object, offset, BP_VAR_R TSRMLS_CC); } else { value = *tmp; } } return check_empty ? zend_is_true(value) : Z_TYPE_P(value) != IS_NULL; } /* }}} */
0
CVE-2016-7417
1,298
benign
CWE-20
static int spl_array_has_dimension_ex(int check_inherited, zval *object, zval *offset, int check_empty TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); long index; zval *rv, *value = NULL, **tmp; if (check_inherited && intern->fptr_offset_has) { zval *offset_tmp = offset; SEPARATE_ARG_IF_REF(offset_tmp); zend_call_method_with_1_params(&object, Z_OBJCE_P(object), &intern->fptr_offset_has, "offsetExists", &rv, offset_tmp); zval_ptr_dtor(&offset_tmp); if (rv && zend_is_true(rv)) { zval_ptr_dtor(&rv); if (check_empty != 1) { return 1; } else if (intern->fptr_offset_get) { value = spl_array_read_dimension_ex(1, object, offset, BP_VAR_R TSRMLS_CC); } } else { if (rv) { zval_ptr_dtor(&rv); } return 0; } } if (!value) { HashTable *ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); switch(Z_TYPE_P(offset)) { case IS_STRING: if (zend_symtable_find(ht, Z_STRVAL_P(offset), Z_STRLEN_P(offset)+1, (void **) &tmp) != FAILURE) { if (check_empty == 2) { return 1; } } else { return 0; } break; case IS_DOUBLE: case IS_RESOURCE: case IS_BOOL: case IS_LONG: if (offset->type == IS_DOUBLE) { index = (long)Z_DVAL_P(offset); } else { index = Z_LVAL_P(offset); } if (zend_hash_index_find(ht, index, (void **)&tmp) != FAILURE) { if (check_empty == 2) { return 1; } } else { return 0; } break; default: zend_error(E_WARNING, "Illegal offset type"); return 0; } if (check_empty && check_inherited && intern->fptr_offset_get) { value = spl_array_read_dimension_ex(1, object, offset, BP_VAR_R TSRMLS_CC); } else { value = *tmp; } } return check_empty ? zend_is_true(value) : Z_TYPE_P(value) != IS_NULL; } /* }}} */
1
CVE-2016-7417
1,298
vulnerable
CWE-362
void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) { struct iphdr *iph; int room; struct icmp_bxm icmp_param; struct rtable *rt = skb_rtable(skb_in); struct ipcm_cookie ipc; __be32 saddr; u8 tos; struct net *net; struct sock *sk; if (!rt) goto out; net = dev_net(rt->dst.dev); /* * Find the original header. It is expected to be valid, of course. * Check this, icmp_send is called from the most obscure devices * sometimes. */ iph = ip_hdr(skb_in); if ((u8 *)iph < skb_in->head || (skb_in->network_header + sizeof(*iph)) > skb_in->tail) goto out; /* * No replies to physical multicast/broadcast */ if (skb_in->pkt_type != PACKET_HOST) goto out; /* * Now check at the protocol level */ if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) goto out; /* * Only reply to fragment 0. We byte re-order the constant * mask for efficiency. */ if (iph->frag_off & htons(IP_OFFSET)) goto out; /* * If we send an ICMP error to an ICMP error a mess would result.. */ if (icmp_pointers[type].error) { /* * We are an error, check if we are replying to an * ICMP error */ if (iph->protocol == IPPROTO_ICMP) { u8 _inner_type, *itp; itp = skb_header_pointer(skb_in, skb_network_header(skb_in) + (iph->ihl << 2) + offsetof(struct icmphdr, type) - skb_in->data, sizeof(_inner_type), &_inner_type); if (itp == NULL) goto out; /* * Assume any unknown ICMP type is an error. This * isn't specified by the RFC, but think about it.. */ if (*itp > NR_ICMP_TYPES || icmp_pointers[*itp].error) goto out; } } sk = icmp_xmit_lock(net); if (sk == NULL) return; /* * Construct source address and options. */ saddr = iph->daddr; if (!(rt->rt_flags & RTCF_LOCAL)) { struct net_device *dev = NULL; rcu_read_lock(); if (rt_is_input_route(rt) && net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr) dev = dev_get_by_index_rcu(net, rt->rt_iif); if (dev) saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK); else saddr = 0; rcu_read_unlock(); } tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) | IPTOS_PREC_INTERNETCONTROL) : iph->tos; if (ip_options_echo(&icmp_param.replyopts, skb_in)) goto out_unlock; /* * Prepare data for ICMP header. */ icmp_param.data.icmph.type = type; icmp_param.data.icmph.code = code; icmp_param.data.icmph.un.gateway = info; icmp_param.data.icmph.checksum = 0; icmp_param.skb = skb_in; icmp_param.offset = skb_network_offset(skb_in); inet_sk(sk)->tos = tos; ipc.addr = iph->saddr; ipc.opt = &icmp_param.replyopts; ipc.tx_flags = 0; rt = icmp_route_lookup(net, skb_in, iph, saddr, tos, type, code, &icmp_param); if (IS_ERR(rt)) goto out_unlock; if (!icmpv4_xrlim_allow(net, rt, type, code)) goto ende; /* RFC says return as much as we can without exceeding 576 bytes. */ room = dst_mtu(&rt->dst); if (room > 576) room = 576; room -= sizeof(struct iphdr) + icmp_param.replyopts.optlen; room -= sizeof(struct icmphdr); icmp_param.data_len = skb_in->len - icmp_param.offset; if (icmp_param.data_len > room) icmp_param.data_len = room; icmp_param.head_len = sizeof(struct icmphdr); icmp_push_reply(&icmp_param, &ipc, &rt); ende: ip_rt_put(rt); out_unlock: icmp_xmit_unlock(sk); out:; }
0
CVE-2012-3552
312
benign
CWE-362
void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) { struct iphdr *iph; int room; struct icmp_bxm icmp_param; struct rtable *rt = skb_rtable(skb_in); struct ipcm_cookie ipc; __be32 saddr; u8 tos; struct net *net; struct sock *sk; if (!rt) goto out; net = dev_net(rt->dst.dev); /* * Find the original header. It is expected to be valid, of course. * Check this, icmp_send is called from the most obscure devices * sometimes. */ iph = ip_hdr(skb_in); if ((u8 *)iph < skb_in->head || (skb_in->network_header + sizeof(*iph)) > skb_in->tail) goto out; /* * No replies to physical multicast/broadcast */ if (skb_in->pkt_type != PACKET_HOST) goto out; /* * Now check at the protocol level */ if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) goto out; /* * Only reply to fragment 0. We byte re-order the constant * mask for efficiency. */ if (iph->frag_off & htons(IP_OFFSET)) goto out; /* * If we send an ICMP error to an ICMP error a mess would result.. */ if (icmp_pointers[type].error) { /* * We are an error, check if we are replying to an * ICMP error */ if (iph->protocol == IPPROTO_ICMP) { u8 _inner_type, *itp; itp = skb_header_pointer(skb_in, skb_network_header(skb_in) + (iph->ihl << 2) + offsetof(struct icmphdr, type) - skb_in->data, sizeof(_inner_type), &_inner_type); if (itp == NULL) goto out; /* * Assume any unknown ICMP type is an error. This * isn't specified by the RFC, but think about it.. */ if (*itp > NR_ICMP_TYPES || icmp_pointers[*itp].error) goto out; } } sk = icmp_xmit_lock(net); if (sk == NULL) return; /* * Construct source address and options. */ saddr = iph->daddr; if (!(rt->rt_flags & RTCF_LOCAL)) { struct net_device *dev = NULL; rcu_read_lock(); if (rt_is_input_route(rt) && net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr) dev = dev_get_by_index_rcu(net, rt->rt_iif); if (dev) saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK); else saddr = 0; rcu_read_unlock(); } tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) | IPTOS_PREC_INTERNETCONTROL) : iph->tos; if (ip_options_echo(&icmp_param.replyopts.opt.opt, skb_in)) goto out_unlock; /* * Prepare data for ICMP header. */ icmp_param.data.icmph.type = type; icmp_param.data.icmph.code = code; icmp_param.data.icmph.un.gateway = info; icmp_param.data.icmph.checksum = 0; icmp_param.skb = skb_in; icmp_param.offset = skb_network_offset(skb_in); inet_sk(sk)->tos = tos; ipc.addr = iph->saddr; ipc.opt = &icmp_param.replyopts.opt; ipc.tx_flags = 0; rt = icmp_route_lookup(net, skb_in, iph, saddr, tos, type, code, &icmp_param); if (IS_ERR(rt)) goto out_unlock; if (!icmpv4_xrlim_allow(net, rt, type, code)) goto ende; /* RFC says return as much as we can without exceeding 576 bytes. */ room = dst_mtu(&rt->dst); if (room > 576) room = 576; room -= sizeof(struct iphdr) + icmp_param.replyopts.opt.opt.optlen; room -= sizeof(struct icmphdr); icmp_param.data_len = skb_in->len - icmp_param.offset; if (icmp_param.data_len > room) icmp_param.data_len = room; icmp_param.head_len = sizeof(struct icmphdr); icmp_push_reply(&icmp_param, &ipc, &rt); ende: ip_rt_put(rt); out_unlock: icmp_xmit_unlock(sk); out:; }
1
CVE-2012-3552
312
vulnerable
CWE-119
void show_object_with_name(FILE *out, struct object *obj, struct strbuf *path, const char *component) { char *name = path_name(path, component); char *p; fprintf(out, "%s ", oid_to_hex(&obj->oid)); for (p = name; *p && *p != '\n'; p++) fputc(*p, out); fputc('\n', out); free(name); }
0
CVE-2016-2315
978
benign
CWE-119
void show_object_with_name(FILE *out, struct object *obj, const char *name) { const char *p; fprintf(out, "%s ", oid_to_hex(&obj->oid)); for (p = name; *p && *p != '\n'; p++) fputc(*p, out); fputc('\n', out); }
1
CVE-2016-2315
978
vulnerable
CWE-125
isis_print_extd_ip_reach(netdissect_options *ndo, const uint8_t *tptr, const char *ident, uint16_t afi) { char ident_buffer[20]; uint8_t prefix[sizeof(struct in6_addr)]; /* shared copy buffer for IPv4 and IPv6 prefixes */ u_int metric, status_byte, bit_length, byte_length, sublen, processed, subtlvtype, subtlvlen; if (!ND_TTEST2(*tptr, 4)) return (0); metric = EXTRACT_32BITS(tptr); processed=4; tptr+=4; if (afi == AF_INET) { if (!ND_TTEST2(*tptr, 1)) /* fetch status byte */ return (0); status_byte=*(tptr++); bit_length = status_byte&0x3f; if (bit_length > 32) { ND_PRINT((ndo, "%sIPv4 prefix: bad bit length %u", ident, bit_length)); return (0); } processed++; } else if (afi == AF_INET6) { if (!ND_TTEST2(*tptr, 1)) /* fetch status & prefix_len byte */ return (0); status_byte=*(tptr++); bit_length=*(tptr++); if (bit_length > 128) { ND_PRINT((ndo, "%sIPv6 prefix: bad bit length %u", ident, bit_length)); return (0); } processed+=2; } else return (0); /* somebody is fooling us */ byte_length = (bit_length + 7) / 8; /* prefix has variable length encoding */ if (!ND_TTEST2(*tptr, byte_length)) return (0); memset(prefix, 0, sizeof prefix); /* clear the copy buffer */ memcpy(prefix,tptr,byte_length); /* copy as much as is stored in the TLV */ tptr+=byte_length; processed+=byte_length; if (afi == AF_INET) ND_PRINT((ndo, "%sIPv4 prefix: %15s/%u", ident, ipaddr_string(ndo, prefix), bit_length)); else if (afi == AF_INET6) ND_PRINT((ndo, "%sIPv6 prefix: %s/%u", ident, ip6addr_string(ndo, prefix), bit_length)); ND_PRINT((ndo, ", Distribution: %s, Metric: %u", ISIS_MASK_TLV_EXTD_IP_UPDOWN(status_byte) ? "down" : "up", metric)); if (afi == AF_INET && ISIS_MASK_TLV_EXTD_IP_SUBTLV(status_byte)) ND_PRINT((ndo, ", sub-TLVs present")); else if (afi == AF_INET6) ND_PRINT((ndo, ", %s%s", ISIS_MASK_TLV_EXTD_IP6_IE(status_byte) ? "External" : "Internal", ISIS_MASK_TLV_EXTD_IP6_SUBTLV(status_byte) ? ", sub-TLVs present" : "")); if ((afi == AF_INET && ISIS_MASK_TLV_EXTD_IP_SUBTLV(status_byte)) || (afi == AF_INET6 && ISIS_MASK_TLV_EXTD_IP6_SUBTLV(status_byte)) ) { /* assume that one prefix can hold more than one subTLV - therefore the first byte must reflect the aggregate bytecount of the subTLVs for this prefix */ if (!ND_TTEST2(*tptr, 1)) return (0); sublen=*(tptr++); processed+=sublen+1; ND_PRINT((ndo, " (%u)", sublen)); /* print out subTLV length */ while (sublen>0) { if (!ND_TTEST2(*tptr,2)) return (0); subtlvtype=*(tptr++); subtlvlen=*(tptr++); /* prepend the indent string */ snprintf(ident_buffer, sizeof(ident_buffer), "%s ",ident); if (!isis_print_ip_reach_subtlv(ndo, tptr, subtlvtype, subtlvlen, ident_buffer)) return(0); tptr+=subtlvlen; sublen-=(subtlvlen+2); } } return (processed); }
0
CVE-2017-12998
2,986
benign
CWE-125
isis_print_extd_ip_reach(netdissect_options *ndo, const uint8_t *tptr, const char *ident, uint16_t afi) { char ident_buffer[20]; uint8_t prefix[sizeof(struct in6_addr)]; /* shared copy buffer for IPv4 and IPv6 prefixes */ u_int metric, status_byte, bit_length, byte_length, sublen, processed, subtlvtype, subtlvlen; if (!ND_TTEST2(*tptr, 4)) return (0); metric = EXTRACT_32BITS(tptr); processed=4; tptr+=4; if (afi == AF_INET) { if (!ND_TTEST2(*tptr, 1)) /* fetch status byte */ return (0); status_byte=*(tptr++); bit_length = status_byte&0x3f; if (bit_length > 32) { ND_PRINT((ndo, "%sIPv4 prefix: bad bit length %u", ident, bit_length)); return (0); } processed++; } else if (afi == AF_INET6) { if (!ND_TTEST2(*tptr, 2)) /* fetch status & prefix_len byte */ return (0); status_byte=*(tptr++); bit_length=*(tptr++); if (bit_length > 128) { ND_PRINT((ndo, "%sIPv6 prefix: bad bit length %u", ident, bit_length)); return (0); } processed+=2; } else return (0); /* somebody is fooling us */ byte_length = (bit_length + 7) / 8; /* prefix has variable length encoding */ if (!ND_TTEST2(*tptr, byte_length)) return (0); memset(prefix, 0, sizeof prefix); /* clear the copy buffer */ memcpy(prefix,tptr,byte_length); /* copy as much as is stored in the TLV */ tptr+=byte_length; processed+=byte_length; if (afi == AF_INET) ND_PRINT((ndo, "%sIPv4 prefix: %15s/%u", ident, ipaddr_string(ndo, prefix), bit_length)); else if (afi == AF_INET6) ND_PRINT((ndo, "%sIPv6 prefix: %s/%u", ident, ip6addr_string(ndo, prefix), bit_length)); ND_PRINT((ndo, ", Distribution: %s, Metric: %u", ISIS_MASK_TLV_EXTD_IP_UPDOWN(status_byte) ? "down" : "up", metric)); if (afi == AF_INET && ISIS_MASK_TLV_EXTD_IP_SUBTLV(status_byte)) ND_PRINT((ndo, ", sub-TLVs present")); else if (afi == AF_INET6) ND_PRINT((ndo, ", %s%s", ISIS_MASK_TLV_EXTD_IP6_IE(status_byte) ? "External" : "Internal", ISIS_MASK_TLV_EXTD_IP6_SUBTLV(status_byte) ? ", sub-TLVs present" : "")); if ((afi == AF_INET && ISIS_MASK_TLV_EXTD_IP_SUBTLV(status_byte)) || (afi == AF_INET6 && ISIS_MASK_TLV_EXTD_IP6_SUBTLV(status_byte)) ) { /* assume that one prefix can hold more than one subTLV - therefore the first byte must reflect the aggregate bytecount of the subTLVs for this prefix */ if (!ND_TTEST2(*tptr, 1)) return (0); sublen=*(tptr++); processed+=sublen+1; ND_PRINT((ndo, " (%u)", sublen)); /* print out subTLV length */ while (sublen>0) { if (!ND_TTEST2(*tptr,2)) return (0); subtlvtype=*(tptr++); subtlvlen=*(tptr++); /* prepend the indent string */ snprintf(ident_buffer, sizeof(ident_buffer), "%s ",ident); if (!isis_print_ip_reach_subtlv(ndo, tptr, subtlvtype, subtlvlen, ident_buffer)) return(0); tptr+=subtlvlen; sublen-=(subtlvlen+2); } } return (processed); }
1
CVE-2017-12998
2,986
vulnerable
CWE-190
static int mem_read(jas_stream_obj_t *obj, char *buf, int cnt) { int n; assert(cnt >= 0); assert(buf); JAS_DBGLOG(100, ("mem_read(%p, %p, %d)\n", obj, buf, cnt)); jas_stream_memobj_t *m = (jas_stream_memobj_t *)obj; n = m->len_ - m->pos_; cnt = JAS_MIN(n, cnt); memcpy(buf, &m->buf_[m->pos_], cnt); m->pos_ += cnt; return cnt; }
0
CVE-2016-9262
2,910
benign
CWE-190
static int mem_read(jas_stream_obj_t *obj, char *buf, int cnt) { ssize_t n; assert(cnt >= 0); assert(buf); JAS_DBGLOG(100, ("mem_read(%p, %p, %d)\n", obj, buf, cnt)); jas_stream_memobj_t *m = (jas_stream_memobj_t *)obj; n = m->len_ - m->pos_; cnt = JAS_MIN(n, cnt); memcpy(buf, &m->buf_[m->pos_], cnt); m->pos_ += cnt; return cnt; }
1
CVE-2016-9262
2,910
vulnerable
CWE-125
static int parse_import_ptr(struct MACH0_(obj_t)* bin, struct reloc_t *reloc, int idx) { int i, j, sym, wordsize; ut32 stype; wordsize = MACH0_(get_bits)(bin) / 8; if (idx < 0 || idx >= bin->nsymtab) { return 0; } if ((bin->symtab[idx].n_desc & REFERENCE_TYPE) == REFERENCE_FLAG_UNDEFINED_LAZY) { stype = S_LAZY_SYMBOL_POINTERS; } else { stype = S_NON_LAZY_SYMBOL_POINTERS; } reloc->offset = 0; reloc->addr = 0; reloc->addend = 0; #define CASE(T) case (T / 8): reloc->type = R_BIN_RELOC_ ## T; break switch (wordsize) { CASE(8); CASE(16); CASE(32); CASE(64); default: return false; } #undef CASE for (i = 0; i < bin->nsects; i++) { if ((bin->sects[i].flags & SECTION_TYPE) == stype) { for (j=0, sym=-1; bin->sects[i].reserved1+j < bin->nindirectsyms; j++) if (idx == bin->indirectsyms[bin->sects[i].reserved1 + j]) { sym = j; break; } reloc->offset = sym == -1 ? 0 : bin->sects[i].offset + sym * wordsize; reloc->addr = sym == -1 ? 0 : bin->sects[i].addr + sym * wordsize; return true; } } return false; }
0
CVE-2018-11380
253
benign
CWE-125
static int parse_import_ptr(struct MACH0_(obj_t)* bin, struct reloc_t *reloc, int idx) { int i, j, sym, wordsize; ut32 stype; wordsize = MACH0_(get_bits)(bin) / 8; if (idx < 0 || idx >= bin->nsymtab) { return 0; } if ((bin->symtab[idx].n_desc & REFERENCE_TYPE) == REFERENCE_FLAG_UNDEFINED_LAZY) { stype = S_LAZY_SYMBOL_POINTERS; } else { stype = S_NON_LAZY_SYMBOL_POINTERS; } reloc->offset = 0; reloc->addr = 0; reloc->addend = 0; #define CASE(T) case (T / 8): reloc->type = R_BIN_RELOC_ ## T; break switch (wordsize) { CASE(8); CASE(16); CASE(32); CASE(64); default: return false; } #undef CASE for (i = 0; i < bin->nsects; i++) { if ((bin->sects[i].flags & SECTION_TYPE) == stype) { for (j = 0, sym = -1; bin->sects[i].reserved1 + j < bin->nindirectsyms; j++) { int indidx = bin->sects[i].reserved1 + j; if (indidx < 0 || indidx >= bin->nindirectsyms) { break; } if (idx == bin->indirectsyms[indidx]) { sym = j; break; } } reloc->offset = sym == -1 ? 0 : bin->sects[i].offset + sym * wordsize; reloc->addr = sym == -1 ? 0 : bin->sects[i].addr + sym * wordsize; return true; } } return false; }
1
CVE-2018-11380
253
vulnerable
CWE-125
snmp_api_set_time_ticks(snmp_varbind_t *varbind, uint32_t *oid, uint32_t integer) { snmp_api_replace_oid(varbind, oid); varbind->value_type = SNMP_DATA_TYPE_TIME_TICKS; varbind->value.integer = integer; }
0
CVE-2020-12141
2,128
benign
CWE-125
snmp_api_set_time_ticks(snmp_varbind_t *varbind, snmp_oid_t *oid, uint32_t integer) { memcpy(&varbind->oid, oid, sizeof(snmp_oid_t)); varbind->value_type = BER_DATA_TYPE_TIMETICKS; varbind->value.integer = integer; }
1
CVE-2020-12141
2,128
vulnerable
CWE-416
eval_next_line(char_u *arg, evalarg_T *evalarg) { garray_T *gap = &evalarg->eval_ga; char_u *line; if (arg != NULL) { if (*arg == NL) return newline_skip_comments(arg); // Truncate before a trailing comment, so that concatenating the lines // won't turn the rest into a comment. if (*skipwhite(arg) == '#') *arg = NUL; } if (evalarg->eval_cookie != NULL) line = evalarg->eval_getline(0, evalarg->eval_cookie, 0, GETLINE_CONCAT_ALL); else line = next_line_from_context(evalarg->eval_cctx, TRUE); if (line == NULL) return NULL; ++evalarg->eval_break_count; if (gap->ga_itemsize > 0 && ga_grow(gap, 1) == OK) { char_u *p = skipwhite(line); // Going to concatenate the lines after parsing. For an empty or // comment line use an empty string. if (*p == NUL || vim9_comment_start(p)) { vim_free(line); line = vim_strsave((char_u *)""); } ((char_u **)gap->ga_data)[gap->ga_len] = line; ++gap->ga_len; } else if (evalarg->eval_cookie != NULL) { vim_free(evalarg->eval_tofree); evalarg->eval_tofree = line; } // Advanced to the next line, "arg" no longer points into the previous // line. evalarg->eval_using_cmdline = FALSE; return skipwhite(line); }
0
CVE-2022-2889
2,814
benign
CWE-416
eval_next_line(char_u *arg, evalarg_T *evalarg) { garray_T *gap = &evalarg->eval_ga; char_u *line; if (arg != NULL) { if (*arg == NL) return newline_skip_comments(arg); // Truncate before a trailing comment, so that concatenating the lines // won't turn the rest into a comment. if (*skipwhite(arg) == '#') *arg = NUL; } if (evalarg->eval_cookie != NULL) line = evalarg->eval_getline(0, evalarg->eval_cookie, 0, GETLINE_CONCAT_ALL); else line = next_line_from_context(evalarg->eval_cctx, TRUE); if (line == NULL) return NULL; ++evalarg->eval_break_count; if (gap->ga_itemsize > 0 && ga_grow(gap, 1) == OK) { char_u *p = skipwhite(line); // Going to concatenate the lines after parsing. For an empty or // comment line use an empty string. if (*p == NUL || vim9_comment_start(p)) { vim_free(line); line = vim_strsave((char_u *)""); } ((char_u **)gap->ga_data)[gap->ga_len] = line; ++gap->ga_len; } else if (evalarg->eval_cookie != NULL) { free_eval_tofree_later(evalarg); evalarg->eval_tofree = line; } // Advanced to the next line, "arg" no longer points into the previous // line. evalarg->eval_using_cmdline = FALSE; return skipwhite(line); }
1
CVE-2022-2889
2,814
vulnerable
CWE-476
static void gf_dump_vrml_field(GF_SceneDumper *sdump, GF_Node *node, GF_FieldInfo field) { u32 i, sf_type; Bool needs_field_container; GF_ChildNodeItem *list; void *slot_ptr; switch (field.fieldType) { case GF_SG_VRML_SFNODE: assert ( *(GF_Node **)field.far_ptr); if (sdump->XMLDump) { if (!sdump->X3DDump) { StartElement(sdump, (char *) field.name); EndElementHeader(sdump, 1); sdump->indent++; } } else { StartAttribute(sdump, field.name); } gf_dump_vrml_node(sdump, *(GF_Node **)field.far_ptr, 0, NULL); if (sdump->XMLDump) { if (!sdump->X3DDump) { sdump->indent--; EndElement(sdump, (char *) field.name, 1); } } else { EndAttribute(sdump); } return; case GF_SG_VRML_MFNODE: needs_field_container = 0; if (sdump->XMLDump && sdump->X3DDump) { u32 count, nb_ndt; GF_FieldInfo info; if (!strcmp(field.name, "children")) { needs_field_container = 0; } else { nb_ndt = 0; count = gf_node_get_field_count(node); for (i=0; i<count; i++) { gf_node_get_field(node, i, &info); if ((info.eventType==GF_SG_EVENT_IN) || (info.eventType==GF_SG_EVENT_OUT)) continue; if (info.NDTtype==field.NDTtype) nb_ndt++; } needs_field_container = (nb_ndt>1) ? 1 : 0; } } #ifndef GPAC_DISABLE_X3D if (!sdump->X3DDump) { if (gf_node_get_tag(node)==TAG_X3D_Switch) field.name = "choice"; } #endif list = * ((GF_ChildNodeItem **) field.far_ptr); assert(list); if (!sdump->XMLDump || !sdump->X3DDump) StartList(sdump, field.name); sdump->indent++; while (list) { gf_dump_vrml_node(sdump, list->node, 1, needs_field_container ? (char *) field.name : NULL); list = list->next; } sdump->indent--; if (!sdump->XMLDump || !sdump->X3DDump) EndList(sdump, field.name); return; case GF_SG_VRML_SFCOMMANDBUFFER: { SFCommandBuffer *cb = (SFCommandBuffer *)field.far_ptr; StartElement(sdump, (char *) field.name); EndElementHeader(sdump, 1); sdump->indent++; if (!gf_list_count(cb->commandList)) { /*the arch does not allow for that (we would need a codec and so on, or decompress the command list in all cases...)*/ if (sdump->trace && cb->bufferSize) { if (sdump->XMLDump) gf_fprintf(sdump->trace, "<!--SFCommandBuffer cannot be dumped while playing - use MP4Box instead-->\n"); else gf_fprintf(sdump->trace, "#SFCommandBuffer cannot be dumped while playing - use MP4Box instead\n"); } } else { gf_sm_dump_command_list(sdump, cb->commandList, sdump->indent, 0); } sdump->indent--; EndElement(sdump, (char *) field.name, 1); } return; case GF_SG_VRML_MFATTRREF: if (sdump->XMLDump) { MFAttrRef *ar = (MFAttrRef *)field.far_ptr; StartElement(sdump, (char *) field.name); EndElementHeader(sdump, 1); sdump->indent++; for (i=0; i<ar->count; i++) { if (ar->vals[i].node) { GF_FieldInfo pinfo; DUMP_IND(sdump); gf_node_get_field(ar->vals[i].node, ar->vals[i].fieldIndex, &pinfo); gf_fprintf(sdump->trace, "<store node=\""); scene_dump_vrml_id(sdump, ar->vals[i].node); gf_fprintf(sdump->trace, "\" field=\"%s\"/>\n", pinfo.name); } } sdump->indent--; EndElement(sdump, (char *) field.name, 1); return; } break; } if (gf_sg_vrml_is_sf_field(field.fieldType)) { StartAttribute(sdump, field.name); gf_dump_vrml_sffield(sdump, field.fieldType, field.far_ptr, 0, node); EndAttribute(sdump); } else { GenMFField *mffield = (GenMFField *) field.far_ptr; sf_type = gf_sg_vrml_get_sf_type(field.fieldType); if (sdump->XMLDump && sdump->X3DDump) { switch (sf_type) { case GF_SG_VRML_SFSTRING: case GF_SG_VRML_SFSCRIPT: case GF_SG_VRML_SFURL: gf_fprintf(sdump->trace, " %s=\'", (char *) field.name); break; default: StartAttribute(sdump, field.name); break; } } else { StartAttribute(sdump, field.name); } if (!sdump->XMLDump) gf_fprintf(sdump->trace, "["); if (mffield) { for (i=0; i<mffield->count; i++) { if (i) gf_fprintf(sdump->trace, " "); gf_sg_vrml_mf_get_item(field.far_ptr, field.fieldType, &slot_ptr, i); gf_dump_vrml_sffield(sdump, sf_type, slot_ptr, 1, node); } } if (!sdump->XMLDump) gf_fprintf(sdump->trace, "]"); if (sdump->XMLDump && sdump->X3DDump) { switch (sf_type) { case GF_SG_VRML_SFSTRING: case GF_SG_VRML_SFSCRIPT: case GF_SG_VRML_SFURL: gf_fprintf(sdump->trace, "\'"); break; default: EndAttribute(sdump); break; } } else { EndAttribute(sdump); } } }
0
CVE-2022-2549
20
benign
CWE-476
static void gf_dump_vrml_field(GF_SceneDumper *sdump, GF_Node *node, GF_FieldInfo field) { u32 i, sf_type; Bool needs_field_container; GF_ChildNodeItem *list; void *slot_ptr; switch (field.fieldType) { case GF_SG_VRML_SFNODE: assert ( *(GF_Node **)field.far_ptr); if (sdump->XMLDump) { if (!sdump->X3DDump) { StartElement(sdump, (char *) field.name); EndElementHeader(sdump, 1); sdump->indent++; } } else { StartAttribute(sdump, field.name); } gf_dump_vrml_node(sdump, *(GF_Node **)field.far_ptr, 0, NULL); if (sdump->XMLDump) { if (!sdump->X3DDump) { sdump->indent--; EndElement(sdump, (char *) field.name, 1); } } else { EndAttribute(sdump); } return; case GF_SG_VRML_MFNODE: needs_field_container = 0; if (sdump->XMLDump && sdump->X3DDump) { u32 count, nb_ndt; GF_FieldInfo info; if (!strcmp(field.name, "children")) { needs_field_container = 0; } else { nb_ndt = 0; count = gf_node_get_field_count(node); for (i=0; i<count; i++) { gf_node_get_field(node, i, &info); if ((info.eventType==GF_SG_EVENT_IN) || (info.eventType==GF_SG_EVENT_OUT)) continue; if (info.NDTtype==field.NDTtype) nb_ndt++; } needs_field_container = (nb_ndt>1) ? 1 : 0; } } #ifndef GPAC_DISABLE_X3D if (!sdump->X3DDump) { if (gf_node_get_tag(node)==TAG_X3D_Switch) field.name = "choice"; } #endif list = * ((GF_ChildNodeItem **) field.far_ptr); assert(list); if (!sdump->XMLDump || !sdump->X3DDump) StartList(sdump, field.name); sdump->indent++; while (list) { gf_dump_vrml_node(sdump, list->node, 1, needs_field_container ? (char *) field.name : NULL); list = list->next; } sdump->indent--; if (!sdump->XMLDump || !sdump->X3DDump) EndList(sdump, field.name); return; case GF_SG_VRML_SFCOMMANDBUFFER: { SFCommandBuffer *cb = (SFCommandBuffer *)field.far_ptr; StartElement(sdump, (char *) field.name); EndElementHeader(sdump, 1); sdump->indent++; if (!gf_list_count(cb->commandList)) { /*the arch does not allow for that (we would need a codec and so on, or decompress the command list in all cases...)*/ if (sdump->trace && cb->bufferSize) { if (sdump->XMLDump) gf_fprintf(sdump->trace, "<!--SFCommandBuffer cannot be dumped while playing - use MP4Box instead-->\n"); else gf_fprintf(sdump->trace, "#SFCommandBuffer cannot be dumped while playing - use MP4Box instead\n"); } } else { gf_sm_dump_command_list(sdump, cb->commandList, sdump->indent, 0); } sdump->indent--; EndElement(sdump, (char *) field.name, 1); } return; case GF_SG_VRML_MFATTRREF: if (sdump->XMLDump) { MFAttrRef *ar = (MFAttrRef *)field.far_ptr; StartElement(sdump, (char *) field.name); EndElementHeader(sdump, 1); sdump->indent++; for (i=0; i<ar->count; i++) { if (ar->vals[i].node) { GF_FieldInfo pinfo; DUMP_IND(sdump); gf_node_get_field(ar->vals[i].node, ar->vals[i].fieldIndex, &pinfo); gf_fprintf(sdump->trace, "<store node=\""); scene_dump_vrml_id(sdump, ar->vals[i].node); gf_fprintf(sdump->trace, "\" field=\"%s\"/>\n", pinfo.name); } } sdump->indent--; EndElement(sdump, (char *) field.name, 1); return; } break; } if (gf_sg_vrml_is_sf_field(field.fieldType)) { StartAttribute(sdump, field.name); gf_dump_vrml_sffield(sdump, field.fieldType, field.far_ptr, 0, node); EndAttribute(sdump); } else { GenMFField *mffield = (GenMFField *) field.far_ptr; sf_type = gf_sg_vrml_get_sf_type(field.fieldType); if (sdump->XMLDump && sdump->X3DDump) { switch (sf_type) { case GF_SG_VRML_SFSTRING: case GF_SG_VRML_SFSCRIPT: case GF_SG_VRML_SFURL: gf_fprintf(sdump->trace, " %s=\'", (char *) field.name); break; default: StartAttribute(sdump, field.name); break; } } else { StartAttribute(sdump, field.name); } if (!sdump->XMLDump) gf_fprintf(sdump->trace, "["); for (i=0; mffield && (i<mffield->count); i++) { if (i) gf_fprintf(sdump->trace, " "); gf_sg_vrml_mf_get_item(field.far_ptr, field.fieldType, &slot_ptr, i); gf_dump_vrml_sffield(sdump, sf_type, slot_ptr, 1, node); } if (!sdump->XMLDump) gf_fprintf(sdump->trace, "]"); if (sdump->XMLDump && sdump->X3DDump) { switch (sf_type) { case GF_SG_VRML_SFSTRING: case GF_SG_VRML_SFSCRIPT: case GF_SG_VRML_SFURL: gf_fprintf(sdump->trace, "\'"); break; default: EndAttribute(sdump); break; } } else { EndAttribute(sdump); } } }
1
CVE-2022-2549
20
vulnerable
CWE-787
void TightDecoder::FilterGradient(const rdr::U8* inbuf, const PixelFormat& pf, PIXEL_T* outbuf, int stride, const Rect& r) { int x, y, c; static rdr::U8 prevRow[TIGHT_MAX_WIDTH*3]; static rdr::U8 thisRow[TIGHT_MAX_WIDTH*3]; rdr::U8 pix[3]; int est[3]; memset(prevRow, 0, sizeof(prevRow)); // Set up shortcut variables int rectHeight = r.height(); int rectWidth = r.width(); for (y = 0; y < rectHeight; y++) { /* First pixel in a row */ pf.rgbFromBuffer(pix, &inbuf[y*rectWidth], 1); for (c = 0; c < 3; c++) pix[c] += prevRow[c]; memcpy(thisRow, pix, sizeof(pix)); pf.bufferFromRGB((rdr::U8*)&outbuf[y*stride], pix, 1); /* Remaining pixels of a row */ for (x = 1; x < rectWidth; x++) { for (c = 0; c < 3; c++) { est[c] = prevRow[x*3+c] + pix[c] - prevRow[(x-1)*3+c]; if (est[c] > 255) { est[c] = 255; } else if (est[c] < 0) { est[c] = 0; } } pf.rgbFromBuffer(pix, &inbuf[y*rectWidth+x], 1); for (c = 0; c < 3; c++) pix[c] += est[c]; memcpy(&thisRow[x*3], pix, sizeof(pix)); pf.bufferFromRGB((rdr::U8*)&outbuf[y*stride+x], pix, 1); } memcpy(prevRow, thisRow, sizeof(prevRow)); } }
0
CVE-2019-15693
1,286
benign
CWE-787
void TightDecoder::FilterGradient(const rdr::U8* inbuf, const PixelFormat& pf, PIXEL_T* outbuf, int stride, const Rect& r) { int x, y, c; static rdr::U8 prevRow[TIGHT_MAX_WIDTH*3]; static rdr::U8 thisRow[TIGHT_MAX_WIDTH*3]; rdr::U8 pix[3]; int est[3]; memset(prevRow, 0, sizeof(prevRow)); // Set up shortcut variables int rectHeight = r.height(); int rectWidth = r.width(); for (y = 0; y < rectHeight; y++) { for (x = 0; x < rectWidth; x++) { /* First pixel in a row */ if (x == 0) { pf.rgbFromBuffer(pix, &inbuf[y*rectWidth], 1); for (c = 0; c < 3; c++) pix[c] += prevRow[c]; memcpy(thisRow, pix, sizeof(pix)); pf.bufferFromRGB((rdr::U8*)&outbuf[y*stride], pix, 1); continue; } for (c = 0; c < 3; c++) { est[c] = prevRow[x*3+c] + pix[c] - prevRow[(x-1)*3+c]; if (est[c] > 255) { est[c] = 255; } else if (est[c] < 0) { est[c] = 0; } } pf.rgbFromBuffer(pix, &inbuf[y*rectWidth+x], 1); for (c = 0; c < 3; c++) pix[c] += est[c]; memcpy(&thisRow[x*3], pix, sizeof(pix)); pf.bufferFromRGB((rdr::U8*)&outbuf[y*stride+x], pix, 1); } memcpy(prevRow, thisRow, sizeof(prevRow)); } }
1
CVE-2019-15693
1,286
vulnerable
CWE-125
S_grok_bslash_N(pTHX_ RExC_state_t *pRExC_state, regnode ** node_p, UV * code_point_p, int * cp_count, I32 * flagp, const bool strict, const U32 depth ) { /* This routine teases apart the various meanings of \N and returns * accordingly. The input parameters constrain which meaning(s) is/are valid * in the current context. * * Exactly one of <node_p> and <code_point_p> must be non-NULL. * * If <code_point_p> is not NULL, the context is expecting the result to be a * single code point. If this \N instance turns out to a single code point, * the function returns TRUE and sets *code_point_p to that code point. * * If <node_p> is not NULL, the context is expecting the result to be one of * the things representable by a regnode. If this \N instance turns out to be * one such, the function generates the regnode, returns TRUE and sets *node_p * to point to that regnode. * * If this instance of \N isn't legal in any context, this function will * generate a fatal error and not return. * * On input, RExC_parse should point to the first char following the \N at the * time of the call. On successful return, RExC_parse will have been updated * to point to just after the sequence identified by this routine. Also * *flagp has been updated as needed. * * When there is some problem with the current context and this \N instance, * the function returns FALSE, without advancing RExC_parse, nor setting * *node_p, nor *code_point_p, nor *flagp. * * If <cp_count> is not NULL, the caller wants to know the length (in code * points) that this \N sequence matches. This is set even if the function * returns FALSE, as detailed below. * * There are 5 possibilities here, as detailed in the next 5 paragraphs. * * Probably the most common case is for the \N to specify a single code point. * *cp_count will be set to 1, and *code_point_p will be set to that code * point. * * Another possibility is for the input to be an empty \N{}, which for * backwards compatibility we accept. *cp_count will be set to 0. *node_p * will be set to a generated NOTHING node. * * Still another possibility is for the \N to mean [^\n]. *cp_count will be * set to 0. *node_p will be set to a generated REG_ANY node. * * The fourth possibility is that \N resolves to a sequence of more than one * code points. *cp_count will be set to the number of code points in the * sequence. *node_p * will be set to a generated node returned by this * function calling S_reg(). * * The final possibility is that it is premature to be calling this function; * that pass1 needs to be restarted. This can happen when this changes from * /d to /u rules, or when the pattern needs to be upgraded to UTF-8. The * latter occurs only when the fourth possibility would otherwise be in * effect, and is because one of those code points requires the pattern to be * recompiled as UTF-8. The function returns FALSE, and sets the * RESTART_PASS1 and NEED_UTF8 flags in *flagp, as appropriate. When this * happens, the caller needs to desist from continuing parsing, and return * this information to its caller. This is not set for when there is only one * code point, as this can be called as part of an ANYOF node, and they can * store above-Latin1 code points without the pattern having to be in UTF-8. * * For non-single-quoted regexes, the tokenizer has resolved character and * sequence names inside \N{...} into their Unicode values, normalizing the * result into what we should see here: '\N{U+c1.c2...}', where c1... are the * hex-represented code points in the sequence. This is done there because * the names can vary based on what charnames pragma is in scope at the time, * so we need a way to take a snapshot of what they resolve to at the time of * the original parse. [perl #56444]. * * That parsing is skipped for single-quoted regexes, so we may here get * '\N{NAME}'. This is a fatal error. These names have to be resolved by the * parser. But if the single-quoted regex is something like '\N{U+41}', that * is legal and handled here. The code point is Unicode, and has to be * translated into the native character set for non-ASCII platforms. */ char * endbrace; /* points to '}' following the name */ char *endchar; /* Points to '.' or '}' ending cur char in the input stream */ char* p = RExC_parse; /* Temporary */ GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_GROK_BSLASH_N; GET_RE_DEBUG_FLAGS; assert(cBOOL(node_p) ^ cBOOL(code_point_p)); /* Exactly one should be set */ assert(! (node_p && cp_count)); /* At most 1 should be set */ if (cp_count) { /* Initialize return for the most common case */ *cp_count = 1; } /* The [^\n] meaning of \N ignores spaces and comments under the /x * modifier. The other meanings do not, so use a temporary until we find * out which we are being called with */ skip_to_be_ignored_text(pRExC_state, &p, FALSE /* Don't force to /x */ ); /* Disambiguate between \N meaning a named character versus \N meaning * [^\n]. The latter is assumed when the {...} following the \N is a legal * quantifier, or there is no '{' at all */ if (*p != '{' || regcurly(p)) { RExC_parse = p; if (cp_count) { *cp_count = -1; } if (! node_p) { return FALSE; } *node_p = reg_node(pRExC_state, REG_ANY); *flagp |= HASWIDTH|SIMPLE; MARK_NAUGHTY(1); Set_Node_Length(*node_p, 1); /* MJD */ return TRUE; } /* Here, we have decided it should be a named character or sequence */ /* The test above made sure that the next real character is a '{', but * under the /x modifier, it could be separated by space (or a comment and * \n) and this is not allowed (for consistency with \x{...} and the * tokenizer handling of \N{NAME}). */ if (*RExC_parse != '{') { vFAIL("Missing braces on \\N{}"); } RExC_parse++; /* Skip past the '{' */ endbrace = strchr(RExC_parse, '}'); if (! endbrace) { /* no trailing brace */ vFAIL2("Missing right brace on \\%c{}", 'N'); } else if (!( endbrace == RExC_parse /* nothing between the {} */ || memBEGINs(RExC_parse, /* U+ (bad hex is checked below for a better error msg) */ (STRLEN) (RExC_end - RExC_parse), "U+"))) { RExC_parse = endbrace; /* position msg's '<--HERE' */ vFAIL("\\N{NAME} must be resolved by the lexer"); } REQUIRE_UNI_RULES(flagp, FALSE); /* Unicode named chars imply Unicode semantics */ if (endbrace == RExC_parse) { /* empty: \N{} */ if (strict) { RExC_parse++; /* Position after the "}" */ vFAIL("Zero length \\N{}"); } if (cp_count) { *cp_count = 0; } nextchar(pRExC_state); if (! node_p) { return FALSE; } *node_p = reg_node(pRExC_state,NOTHING); return TRUE; } RExC_parse += 2; /* Skip past the 'U+' */ /* Because toke.c has generated a special construct for us guaranteed not * to have NULs, we can use a str function */ endchar = RExC_parse + strcspn(RExC_parse, ".}"); /* Code points are separated by dots. If none, there is only one code * point, and is terminated by the brace */ if (endchar >= endbrace) { STRLEN length_of_hex; I32 grok_hex_flags; /* Here, exactly one code point. If that isn't what is wanted, fail */ if (! code_point_p) { RExC_parse = p; return FALSE; } /* Convert code point from hex */ length_of_hex = (STRLEN)(endchar - RExC_parse); grok_hex_flags = PERL_SCAN_ALLOW_UNDERSCORES | PERL_SCAN_DISALLOW_PREFIX /* No errors in the first pass (See [perl * #122671].) We let the code below find the * errors when there are multiple chars. */ | ((SIZE_ONLY) ? PERL_SCAN_SILENT_ILLDIGIT : 0); /* This routine is the one place where both single- and double-quotish * \N{U+xxxx} are evaluated. The value is a Unicode code point which * must be converted to native. */ *code_point_p = UNI_TO_NATIVE(grok_hex(RExC_parse, &length_of_hex, &grok_hex_flags, NULL)); /* The tokenizer should have guaranteed validity, but it's possible to * bypass it by using single quoting, so check. Don't do the check * here when there are multiple chars; we do it below anyway. */ if (length_of_hex == 0 || length_of_hex != (STRLEN)(endchar - RExC_parse) ) { RExC_parse += length_of_hex; /* Includes all the valid */ RExC_parse += (RExC_orig_utf8) /* point to after 1st invalid */ ? UTF8SKIP(RExC_parse) : 1; /* Guard against malformed utf8 */ if (RExC_parse >= endchar) { RExC_parse = endchar; } vFAIL("Invalid hexadecimal number in \\N{U+...}"); } RExC_parse = endbrace + 1; return TRUE; } else { /* Is a multiple character sequence */ SV * substitute_parse; STRLEN len; char *orig_end = RExC_end; char *save_start = RExC_start; I32 flags; /* Count the code points, if desired, in the sequence */ if (cp_count) { *cp_count = 0; while (RExC_parse < endbrace) { /* Point to the beginning of the next character in the sequence. */ RExC_parse = endchar + 1; endchar = RExC_parse + strcspn(RExC_parse, ".}"); (*cp_count)++; } } /* Fail if caller doesn't want to handle a multi-code-point sequence. * But don't backup up the pointer if the caller wants to know how many * code points there are (they can then handle things) */ if (! node_p) { if (! cp_count) { RExC_parse = p; } return FALSE; } /* What is done here is to convert this to a sub-pattern of the form * \x{char1}\x{char2}... and then call reg recursively to parse it * (enclosing in "(?: ... )" ). That way, it retains its atomicness, * while not having to worry about special handling that some code * points may have. */ substitute_parse = newSVpvs("?:"); while (RExC_parse < endbrace) { /* Convert to notation the rest of the code understands */ sv_catpv(substitute_parse, "\\x{"); sv_catpvn(substitute_parse, RExC_parse, endchar - RExC_parse); sv_catpv(substitute_parse, "}"); /* Point to the beginning of the next character in the sequence. */ RExC_parse = endchar + 1; endchar = RExC_parse + strcspn(RExC_parse, ".}"); } sv_catpv(substitute_parse, ")"); len = SvCUR(substitute_parse); /* Don't allow empty number */ if (len < (STRLEN) 8) { RExC_parse = endbrace; vFAIL("Invalid hexadecimal number in \\N{U+...}"); } RExC_parse = RExC_start = RExC_adjusted_start = SvPV_nolen(substitute_parse); RExC_end = RExC_parse + len; /* The values are Unicode, and therefore not subject to recoding, but * have to be converted to native on a non-Unicode (meaning non-ASCII) * platform. */ #ifdef EBCDIC RExC_recode_x_to_native = 1; #endif *node_p = reg(pRExC_state, 1, &flags, depth+1); /* Restore the saved values */ RExC_start = RExC_adjusted_start = save_start; RExC_parse = endbrace; RExC_end = orig_end; #ifdef EBCDIC RExC_recode_x_to_native = 0; #endif SvREFCNT_dec_NN(substitute_parse); if (! *node_p) { if (flags & (RESTART_PASS1|NEED_UTF8)) { *flagp = flags & (RESTART_PASS1|NEED_UTF8); return FALSE; } FAIL2("panic: reg returned NULL to grok_bslash_N, flags=%#" UVxf, (UV) flags); } *flagp |= flags&(HASWIDTH|SPSTART|SIMPLE|POSTPONED); nextchar(pRExC_state); return TRUE; } }
0
CVE-2018-18313
1,574
benign
CWE-125
S_grok_bslash_N(pTHX_ RExC_state_t *pRExC_state, regnode ** node_p, UV * code_point_p, int * cp_count, I32 * flagp, const bool strict, const U32 depth ) { /* This routine teases apart the various meanings of \N and returns * accordingly. The input parameters constrain which meaning(s) is/are valid * in the current context. * * Exactly one of <node_p> and <code_point_p> must be non-NULL. * * If <code_point_p> is not NULL, the context is expecting the result to be a * single code point. If this \N instance turns out to a single code point, * the function returns TRUE and sets *code_point_p to that code point. * * If <node_p> is not NULL, the context is expecting the result to be one of * the things representable by a regnode. If this \N instance turns out to be * one such, the function generates the regnode, returns TRUE and sets *node_p * to point to that regnode. * * If this instance of \N isn't legal in any context, this function will * generate a fatal error and not return. * * On input, RExC_parse should point to the first char following the \N at the * time of the call. On successful return, RExC_parse will have been updated * to point to just after the sequence identified by this routine. Also * *flagp has been updated as needed. * * When there is some problem with the current context and this \N instance, * the function returns FALSE, without advancing RExC_parse, nor setting * *node_p, nor *code_point_p, nor *flagp. * * If <cp_count> is not NULL, the caller wants to know the length (in code * points) that this \N sequence matches. This is set even if the function * returns FALSE, as detailed below. * * There are 5 possibilities here, as detailed in the next 5 paragraphs. * * Probably the most common case is for the \N to specify a single code point. * *cp_count will be set to 1, and *code_point_p will be set to that code * point. * * Another possibility is for the input to be an empty \N{}, which for * backwards compatibility we accept. *cp_count will be set to 0. *node_p * will be set to a generated NOTHING node. * * Still another possibility is for the \N to mean [^\n]. *cp_count will be * set to 0. *node_p will be set to a generated REG_ANY node. * * The fourth possibility is that \N resolves to a sequence of more than one * code points. *cp_count will be set to the number of code points in the * sequence. *node_p * will be set to a generated node returned by this * function calling S_reg(). * * The final possibility is that it is premature to be calling this function; * that pass1 needs to be restarted. This can happen when this changes from * /d to /u rules, or when the pattern needs to be upgraded to UTF-8. The * latter occurs only when the fourth possibility would otherwise be in * effect, and is because one of those code points requires the pattern to be * recompiled as UTF-8. The function returns FALSE, and sets the * RESTART_PASS1 and NEED_UTF8 flags in *flagp, as appropriate. When this * happens, the caller needs to desist from continuing parsing, and return * this information to its caller. This is not set for when there is only one * code point, as this can be called as part of an ANYOF node, and they can * store above-Latin1 code points without the pattern having to be in UTF-8. * * For non-single-quoted regexes, the tokenizer has resolved character and * sequence names inside \N{...} into their Unicode values, normalizing the * result into what we should see here: '\N{U+c1.c2...}', where c1... are the * hex-represented code points in the sequence. This is done there because * the names can vary based on what charnames pragma is in scope at the time, * so we need a way to take a snapshot of what they resolve to at the time of * the original parse. [perl #56444]. * * That parsing is skipped for single-quoted regexes, so we may here get * '\N{NAME}'. This is a fatal error. These names have to be resolved by the * parser. But if the single-quoted regex is something like '\N{U+41}', that * is legal and handled here. The code point is Unicode, and has to be * translated into the native character set for non-ASCII platforms. */ char * endbrace; /* points to '}' following the name */ char *endchar; /* Points to '.' or '}' ending cur char in the input stream */ char* p = RExC_parse; /* Temporary */ GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_GROK_BSLASH_N; GET_RE_DEBUG_FLAGS; assert(cBOOL(node_p) ^ cBOOL(code_point_p)); /* Exactly one should be set */ assert(! (node_p && cp_count)); /* At most 1 should be set */ if (cp_count) { /* Initialize return for the most common case */ *cp_count = 1; } /* The [^\n] meaning of \N ignores spaces and comments under the /x * modifier. The other meanings do not, so use a temporary until we find * out which we are being called with */ skip_to_be_ignored_text(pRExC_state, &p, FALSE /* Don't force to /x */ ); /* Disambiguate between \N meaning a named character versus \N meaning * [^\n]. The latter is assumed when the {...} following the \N is a legal * quantifier, or there is no '{' at all */ if (*p != '{' || regcurly(p)) { RExC_parse = p; if (cp_count) { *cp_count = -1; } if (! node_p) { return FALSE; } *node_p = reg_node(pRExC_state, REG_ANY); *flagp |= HASWIDTH|SIMPLE; MARK_NAUGHTY(1); Set_Node_Length(*node_p, 1); /* MJD */ return TRUE; } /* Here, we have decided it should be a named character or sequence */ /* The test above made sure that the next real character is a '{', but * under the /x modifier, it could be separated by space (or a comment and * \n) and this is not allowed (for consistency with \x{...} and the * tokenizer handling of \N{NAME}). */ if (*RExC_parse != '{') { vFAIL("Missing braces on \\N{}"); } RExC_parse++; /* Skip past the '{' */ endbrace = (char *) memchr(RExC_parse, '}', RExC_end - RExC_parse); if (! endbrace) { /* no trailing brace */ vFAIL2("Missing right brace on \\%c{}", 'N'); } else if (!( endbrace == RExC_parse /* nothing between the {} */ || memBEGINs(RExC_parse, /* U+ (bad hex is checked below for a better error msg) */ (STRLEN) (RExC_end - RExC_parse), "U+"))) { RExC_parse = endbrace; /* position msg's '<--HERE' */ vFAIL("\\N{NAME} must be resolved by the lexer"); } REQUIRE_UNI_RULES(flagp, FALSE); /* Unicode named chars imply Unicode semantics */ if (endbrace == RExC_parse) { /* empty: \N{} */ if (strict) { RExC_parse++; /* Position after the "}" */ vFAIL("Zero length \\N{}"); } if (cp_count) { *cp_count = 0; } nextchar(pRExC_state); if (! node_p) { return FALSE; } *node_p = reg_node(pRExC_state,NOTHING); return TRUE; } RExC_parse += 2; /* Skip past the 'U+' */ /* Because toke.c has generated a special construct for us guaranteed not * to have NULs, we can use a str function */ endchar = RExC_parse + strcspn(RExC_parse, ".}"); /* Code points are separated by dots. If none, there is only one code * point, and is terminated by the brace */ if (endchar >= endbrace) { STRLEN length_of_hex; I32 grok_hex_flags; /* Here, exactly one code point. If that isn't what is wanted, fail */ if (! code_point_p) { RExC_parse = p; return FALSE; } /* Convert code point from hex */ length_of_hex = (STRLEN)(endchar - RExC_parse); grok_hex_flags = PERL_SCAN_ALLOW_UNDERSCORES | PERL_SCAN_DISALLOW_PREFIX /* No errors in the first pass (See [perl * #122671].) We let the code below find the * errors when there are multiple chars. */ | ((SIZE_ONLY) ? PERL_SCAN_SILENT_ILLDIGIT : 0); /* This routine is the one place where both single- and double-quotish * \N{U+xxxx} are evaluated. The value is a Unicode code point which * must be converted to native. */ *code_point_p = UNI_TO_NATIVE(grok_hex(RExC_parse, &length_of_hex, &grok_hex_flags, NULL)); /* The tokenizer should have guaranteed validity, but it's possible to * bypass it by using single quoting, so check. Don't do the check * here when there are multiple chars; we do it below anyway. */ if (length_of_hex == 0 || length_of_hex != (STRLEN)(endchar - RExC_parse) ) { RExC_parse += length_of_hex; /* Includes all the valid */ RExC_parse += (RExC_orig_utf8) /* point to after 1st invalid */ ? UTF8SKIP(RExC_parse) : 1; /* Guard against malformed utf8 */ if (RExC_parse >= endchar) { RExC_parse = endchar; } vFAIL("Invalid hexadecimal number in \\N{U+...}"); } RExC_parse = endbrace + 1; return TRUE; } else { /* Is a multiple character sequence */ SV * substitute_parse; STRLEN len; char *orig_end = RExC_end; char *save_start = RExC_start; I32 flags; /* Count the code points, if desired, in the sequence */ if (cp_count) { *cp_count = 0; while (RExC_parse < endbrace) { /* Point to the beginning of the next character in the sequence. */ RExC_parse = endchar + 1; endchar = RExC_parse + strcspn(RExC_parse, ".}"); (*cp_count)++; } } /* Fail if caller doesn't want to handle a multi-code-point sequence. * But don't backup up the pointer if the caller wants to know how many * code points there are (they can then handle things) */ if (! node_p) { if (! cp_count) { RExC_parse = p; } return FALSE; } /* What is done here is to convert this to a sub-pattern of the form * \x{char1}\x{char2}... and then call reg recursively to parse it * (enclosing in "(?: ... )" ). That way, it retains its atomicness, * while not having to worry about special handling that some code * points may have. */ substitute_parse = newSVpvs("?:"); while (RExC_parse < endbrace) { /* Convert to notation the rest of the code understands */ sv_catpv(substitute_parse, "\\x{"); sv_catpvn(substitute_parse, RExC_parse, endchar - RExC_parse); sv_catpv(substitute_parse, "}"); /* Point to the beginning of the next character in the sequence. */ RExC_parse = endchar + 1; endchar = RExC_parse + strcspn(RExC_parse, ".}"); } sv_catpv(substitute_parse, ")"); len = SvCUR(substitute_parse); /* Don't allow empty number */ if (len < (STRLEN) 8) { RExC_parse = endbrace; vFAIL("Invalid hexadecimal number in \\N{U+...}"); } RExC_parse = RExC_start = RExC_adjusted_start = SvPV_nolen(substitute_parse); RExC_end = RExC_parse + len; /* The values are Unicode, and therefore not subject to recoding, but * have to be converted to native on a non-Unicode (meaning non-ASCII) * platform. */ #ifdef EBCDIC RExC_recode_x_to_native = 1; #endif *node_p = reg(pRExC_state, 1, &flags, depth+1); /* Restore the saved values */ RExC_start = RExC_adjusted_start = save_start; RExC_parse = endbrace; RExC_end = orig_end; #ifdef EBCDIC RExC_recode_x_to_native = 0; #endif SvREFCNT_dec_NN(substitute_parse); if (! *node_p) { if (flags & (RESTART_PASS1|NEED_UTF8)) { *flagp = flags & (RESTART_PASS1|NEED_UTF8); return FALSE; } FAIL2("panic: reg returned NULL to grok_bslash_N, flags=%#" UVxf, (UV) flags); } *flagp |= flags&(HASWIDTH|SPSTART|SIMPLE|POSTPONED); nextchar(pRExC_state); return TRUE; } }
1
CVE-2018-18313
1,574
vulnerable
CWE-787
int ReadJpegSections (FILE * infile, ReadMode_t ReadMode) { int a; int HaveCom = FALSE; a = fgetc(infile); if (a != 0xff || fgetc(infile) != M_SOI){ return FALSE; } ImageInfo.JfifHeader.XDensity = ImageInfo.JfifHeader.YDensity = 300; ImageInfo.JfifHeader.ResolutionUnits = 1; for(;;){ int itemlen; int prev; int marker = 0; int ll,lh, got; uchar * Data; CheckSectionsAllocated(); prev = 0; for (a=0;;a++){ marker = fgetc(infile); if (marker != 0xff && prev == 0xff) break; if (marker == EOF){ ErrFatal("Unexpected end of file"); } prev = marker; } if (a > 10){ ErrNonfatal("Extraneous %d padding bytes before section %02X",a-1,marker); } Sections[SectionsRead].Type = marker; // Read the length of the section. lh = fgetc(infile); ll = fgetc(infile); if (lh == EOF || ll == EOF){ ErrFatal("Unexpected end of file"); } itemlen = (lh << 8) | ll; if (itemlen < 2){ ErrFatal("invalid marker"); } Sections[SectionsRead].Size = itemlen; Data = (uchar *)malloc(itemlen); if (Data == NULL){ ErrFatal("Could not allocate memory"); } Sections[SectionsRead].Data = Data; // Store first two pre-read bytes. Data[0] = (uchar)lh; Data[1] = (uchar)ll; got = fread(Data+2, 1, itemlen-2, infile); // Read the whole section. if (got != itemlen-2){ ErrFatal("Premature end of file?"); } SectionsRead += 1; switch(marker){ case M_SOS: // stop before hitting compressed data // If reading entire image is requested, read the rest of the data. if (ReadMode & READ_IMAGE){ int cp, ep, size; // Determine how much file is left. cp = ftell(infile); fseek(infile, 0, SEEK_END); ep = ftell(infile); fseek(infile, cp, SEEK_SET); size = ep-cp; Data = (uchar *)malloc(size); if (Data == NULL){ ErrFatal("could not allocate data for entire image"); } got = fread(Data, 1, size, infile); if (got != size){ ErrFatal("could not read the rest of the image"); } CheckSectionsAllocated(); Sections[SectionsRead].Data = Data; Sections[SectionsRead].Size = size; Sections[SectionsRead].Type = PSEUDO_IMAGE_MARKER; SectionsRead ++; HaveAll = 1; } return TRUE; case M_DQT: // Use for jpeg quality guessing process_DQT(Data, itemlen); break; case M_DHT: // Use for jpeg quality guessing process_DHT(Data, itemlen); break; case M_EOI: // in case it's a tables-only JPEG stream fprintf(stderr,"No image in jpeg!\n"); return FALSE; case M_COM: // Comment section if (HaveCom || ((ReadMode & READ_METADATA) == 0)){ // Discard this section. free(Sections[--SectionsRead].Data); }else{ process_COM(Data, itemlen); HaveCom = TRUE; } break; case M_JFIF: // Regular jpegs always have this tag, exif images have the exif // marker instead, althogh ACDsee will write images with both markers. // this program will re-create this marker on absence of exif marker. // hence no need to keep the copy from the file. if (itemlen < 16){ fprintf(stderr,"Jfif header too short\n"); goto ignore; } if (memcmp(Data+2, "JFIF\0",5)){ fprintf(stderr,"Header missing JFIF marker\n"); } ImageInfo.JfifHeader.Present = TRUE; ImageInfo.JfifHeader.ResolutionUnits = Data[9]; ImageInfo.JfifHeader.XDensity = (Data[10]<<8) | Data[11]; ImageInfo.JfifHeader.YDensity = (Data[12]<<8) | Data[13]; if (ShowTags){ printf("JFIF SOI marker: Units: %d ",ImageInfo.JfifHeader.ResolutionUnits); switch(ImageInfo.JfifHeader.ResolutionUnits){ case 0: printf("(aspect ratio)"); break; case 1: printf("(dots per inch)"); break; case 2: printf("(dots per cm)"); break; default: printf("(unknown)"); break; } printf(" X-density=%d Y-density=%d\n",ImageInfo.JfifHeader.XDensity, ImageInfo.JfifHeader.YDensity); if (Data[14] || Data[15]){ fprintf(stderr,"Ignoring jfif header thumbnail\n"); } } ignore: free(Sections[--SectionsRead].Data); break; case M_EXIF: // There can be different section using the same marker. if (ReadMode & READ_METADATA){ if (memcmp(Data+2, "Exif", 4) == 0){ process_EXIF(Data, itemlen); break; }else if (memcmp(Data+2, "http:", 5) == 0){ Sections[SectionsRead-1].Type = M_XMP; // Change tag for internal purposes. if (ShowTags){ printf("Image contains XMP section, %d bytes long\n", itemlen); if (ShowTags){ ShowXmp(Sections[SectionsRead-1]); } } break; } } // Oterwise, discard this section. free(Sections[--SectionsRead].Data); break; case M_IPTC: if (ReadMode & READ_METADATA){ if (ShowTags){ printf("Image contains IPTC section, %d bytes long\n", itemlen); } // Note: We just store the IPTC section. Its relatively straightforward // and we don't act on any part of it, so just display it at parse time. }else{ free(Sections[--SectionsRead].Data); } break; case M_SOF0: case M_SOF1: case M_SOF2: case M_SOF3: case M_SOF5: case M_SOF6: case M_SOF7: case M_SOF9: case M_SOF10: case M_SOF11: case M_SOF13: case M_SOF14: case M_SOF15: if (itemlen < 8){ fprintf(stderr,"Section too short\n"); break; } process_SOFn(Data, marker); break; default: // Skip any other sections. if (ShowTags){ printf("Jpeg section marker 0x%02x size %d\n",marker, itemlen); } break; } } return TRUE; }
0
CVE-2020-26208
1,203
benign
CWE-787
int ReadJpegSections (FILE * infile, ReadMode_t ReadMode) { int a; int HaveCom = FALSE; a = fgetc(infile); if (a != 0xff || fgetc(infile) != M_SOI){ return FALSE; } ImageInfo.JfifHeader.XDensity = ImageInfo.JfifHeader.YDensity = 300; ImageInfo.JfifHeader.ResolutionUnits = 1; for(;;){ int itemlen; int prev; int marker = 0; int ll,lh, got; uchar * Data; CheckSectionsAllocated(); prev = 0; for (a=0;;a++){ marker = fgetc(infile); if (marker != 0xff && prev == 0xff) break; if (marker == EOF){ ErrFatal("Unexpected end of file"); } prev = marker; } if (a > 10){ ErrNonfatal("Extraneous %d padding bytes before section %02X",a-1,marker); } Sections[SectionsRead].Type = marker; // Read the length of the section. lh = fgetc(infile); ll = fgetc(infile); if (lh == EOF || ll == EOF){ ErrFatal("Unexpected end of file"); } itemlen = (lh << 8) | ll; if (itemlen < 2){ ErrFatal("invalid marker"); } Sections[SectionsRead].Size = itemlen; // Allocate an extra 20 bytes more than needed, because sometimes when reading structures, // if the section erroneously ends before short structures that should be there, that can trip // memory checkers in combination with fuzzers. Data = (uchar *)malloc(itemlen+20); if (Data == NULL){ ErrFatal("Could not allocate memory"); } Sections[SectionsRead].Data = Data; // Store first two pre-read bytes. Data[0] = (uchar)lh; Data[1] = (uchar)ll; got = fread(Data+2, 1, itemlen-2, infile); // Read the whole section. if (got != itemlen-2){ ErrFatal("Premature end of file?"); } SectionsRead += 1; switch(marker){ case M_SOS: // stop before hitting compressed data // If reading entire image is requested, read the rest of the data. if (ReadMode & READ_IMAGE){ int cp, ep, size; // Determine how much file is left. cp = ftell(infile); fseek(infile, 0, SEEK_END); ep = ftell(infile); fseek(infile, cp, SEEK_SET); size = ep-cp; Data = (uchar *)malloc(size); if (Data == NULL){ ErrFatal("could not allocate data for entire image"); } got = fread(Data, 1, size, infile); if (got != size){ ErrFatal("could not read the rest of the image"); } CheckSectionsAllocated(); Sections[SectionsRead].Data = Data; Sections[SectionsRead].Size = size; Sections[SectionsRead].Type = PSEUDO_IMAGE_MARKER; SectionsRead ++; HaveAll = 1; } return TRUE; case M_DQT: // Use for jpeg quality guessing process_DQT(Data, itemlen); break; case M_DHT: // Use for jpeg quality guessing process_DHT(Data, itemlen); break; case M_EOI: // in case it's a tables-only JPEG stream fprintf(stderr,"No image in jpeg!\n"); return FALSE; case M_COM: // Comment section if (HaveCom || ((ReadMode & READ_METADATA) == 0)){ // Discard this section. free(Sections[--SectionsRead].Data); }else{ process_COM(Data, itemlen); HaveCom = TRUE; } break; case M_JFIF: // Regular jpegs always have this tag, exif images have the exif // marker instead, althogh ACDsee will write images with both markers. // this program will re-create this marker on absence of exif marker. // hence no need to keep the copy from the file. if (itemlen < 16){ fprintf(stderr,"Jfif header too short\n"); goto ignore; } if (memcmp(Data+2, "JFIF\0",5)){ fprintf(stderr,"Header missing JFIF marker\n"); } ImageInfo.JfifHeader.Present = TRUE; ImageInfo.JfifHeader.ResolutionUnits = Data[9]; ImageInfo.JfifHeader.XDensity = (Data[10]<<8) | Data[11]; ImageInfo.JfifHeader.YDensity = (Data[12]<<8) | Data[13]; if (ShowTags){ printf("JFIF SOI marker: Units: %d ",ImageInfo.JfifHeader.ResolutionUnits); switch(ImageInfo.JfifHeader.ResolutionUnits){ case 0: printf("(aspect ratio)"); break; case 1: printf("(dots per inch)"); break; case 2: printf("(dots per cm)"); break; default: printf("(unknown)"); break; } printf(" X-density=%d Y-density=%d\n",ImageInfo.JfifHeader.XDensity, ImageInfo.JfifHeader.YDensity); if (Data[14] || Data[15]){ fprintf(stderr,"Ignoring jfif header thumbnail\n"); } } ignore: free(Sections[--SectionsRead].Data); break; case M_EXIF: // There can be different section using the same marker. if (ReadMode & READ_METADATA){ if (memcmp(Data+2, "Exif", 4) == 0){ process_EXIF(Data, itemlen); break; }else if (memcmp(Data+2, "http:", 5) == 0){ Sections[SectionsRead-1].Type = M_XMP; // Change tag for internal purposes. if (ShowTags){ printf("Image contains XMP section, %d bytes long\n", itemlen); if (ShowTags){ ShowXmp(Sections[SectionsRead-1]); } } break; } } // Oterwise, discard this section. free(Sections[--SectionsRead].Data); break; case M_IPTC: if (ReadMode & READ_METADATA){ if (ShowTags){ printf("Image contains IPTC section, %d bytes long\n", itemlen); } // Note: We just store the IPTC section. Its relatively straightforward // and we don't act on any part of it, so just display it at parse time. }else{ free(Sections[--SectionsRead].Data); } break; case M_SOF0: case M_SOF1: case M_SOF2: case M_SOF3: case M_SOF5: case M_SOF6: case M_SOF7: case M_SOF9: case M_SOF10: case M_SOF11: case M_SOF13: case M_SOF14: case M_SOF15: if (itemlen < 8){ fprintf(stderr,"Section too short\n"); break; } process_SOFn(Data, marker); break; default: // Skip any other sections. if (ShowTags){ printf("Jpeg section marker 0x%02x size %d\n",marker, itemlen); } break; } } return TRUE; }
1
CVE-2020-26208
1,203
vulnerable
CWE-119
static void show_object(struct object *object, struct strbuf *path, const char *last, void *data) { struct bitmap *base = data; int bitmap_pos; bitmap_pos = bitmap_position(object->oid.hash); if (bitmap_pos < 0) { char *name = path_name(path, last); bitmap_pos = ext_index_add_object(object, name); free(name); } bitmap_set(base, bitmap_pos); }
0
CVE-2016-2315
2,407
benign
CWE-119
static void show_object(struct object *object, const char *name, void *data) { struct bitmap *base = data; int bitmap_pos; bitmap_pos = bitmap_position(object->oid.hash); if (bitmap_pos < 0) bitmap_pos = ext_index_add_object(object, name); bitmap_set(base, bitmap_pos); }
1
CVE-2016-2315
2,407
vulnerable
CWE-125
static void sycc444_to_rgb(opj_image_t *img) { int *d0, *d1, *d2, *r, *g, *b; const int *y, *cb, *cr; unsigned int maxw, maxh, max, i; int offset, upb; upb = (int)img->comps[0].prec; offset = 1<<(upb - 1); upb = (1<<upb)-1; maxw = (unsigned int)img->comps[0].w; maxh = (unsigned int)img->comps[0].h; max = maxw * maxh; y = img->comps[0].data; cb = img->comps[1].data; cr = img->comps[2].data; d0 = r = (int*)malloc(sizeof(int) * (size_t)max); d1 = g = (int*)malloc(sizeof(int) * (size_t)max); d2 = b = (int*)malloc(sizeof(int) * (size_t)max); if(r == NULL || g == NULL || b == NULL) goto fails; for(i = 0U; i < max; ++i) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++cb; ++cr; ++r; ++g; ++b; } free(img->comps[0].data); img->comps[0].data = d0; free(img->comps[1].data); img->comps[1].data = d1; free(img->comps[2].data); img->comps[2].data = d2; return; fails: if(r) free(r); if(g) free(g); if(b) free(b); }/* sycc444_to_rgb() */
0
CVE-2016-3183
3,247
benign
CWE-125
static void sycc444_to_rgb(opj_image_t *img) { int *d0, *d1, *d2, *r, *g, *b; const int *y, *cb, *cr; size_t maxw, maxh, max, i; int offset, upb; upb = (int)img->comps[0].prec; offset = 1<<(upb - 1); upb = (1<<upb)-1; maxw = (size_t)img->comps[0].w; maxh = (size_t)img->comps[0].h; max = maxw * maxh; y = img->comps[0].data; cb = img->comps[1].data; cr = img->comps[2].data; d0 = r = (int*)malloc(sizeof(int) * max); d1 = g = (int*)malloc(sizeof(int) * max); d2 = b = (int*)malloc(sizeof(int) * max); if(r == NULL || g == NULL || b == NULL) goto fails; for(i = 0U; i < max; ++i) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++cb; ++cr; ++r; ++g; ++b; } free(img->comps[0].data); img->comps[0].data = d0; free(img->comps[1].data); img->comps[1].data = d1; free(img->comps[2].data); img->comps[2].data = d2; img->color_space = OPJ_CLRSPC_SRGB; return; fails: free(r); free(g); free(b); }/* sycc444_to_rgb() */
1
CVE-2016-3183
3,247
vulnerable
CWE-119
static int filter_frame(AVFilterLink *inlink, AVFrame *in) { DelogoContext *s = inlink->dst->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); AVFrame *out; int hsub0 = desc->log2_chroma_w; int vsub0 = desc->log2_chroma_h; int direct = 0; int plane; AVRational sar; if (av_frame_is_writable(in)) { direct = 1; out = in; } else { out = ff_get_video_buffer(outlink, outlink->w, outlink->h); if (!out) { av_frame_free(&in); return AVERROR(ENOMEM); } av_frame_copy_props(out, in); } sar = in->sample_aspect_ratio; /* Assume square pixels if SAR is unknown */ if (!sar.num) sar.num = sar.den = 1; for (plane = 0; plane < 4 && in->data[plane]; plane++) { int hsub = plane == 1 || plane == 2 ? hsub0 : 0; int vsub = plane == 1 || plane == 2 ? vsub0 : 0; apply_delogo(out->data[plane], out->linesize[plane], in ->data[plane], in ->linesize[plane], FF_CEIL_RSHIFT(inlink->w, hsub), FF_CEIL_RSHIFT(inlink->h, vsub), sar, s->x>>hsub, s->y>>vsub, /* Up and left borders were rounded down, inject lost bits * into width and height to avoid error accumulation */ FF_CEIL_RSHIFT(s->w + (s->x & ((1<<hsub)-1)), hsub), FF_CEIL_RSHIFT(s->h + (s->y & ((1<<vsub)-1)), vsub), s->band>>FFMIN(hsub, vsub), s->show, direct); } if (!direct) av_frame_free(&in); return ff_filter_frame(outlink, out); }
0
CVE-2013-4263
3,205
benign
CWE-119
static int filter_frame(AVFilterLink *inlink, AVFrame *in) { DelogoContext *s = inlink->dst->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); AVFrame *out; int hsub0 = desc->log2_chroma_w; int vsub0 = desc->log2_chroma_h; int direct = 0; int plane; AVRational sar; if (av_frame_is_writable(in)) { direct = 1; out = in; } else { out = ff_get_video_buffer(outlink, outlink->w, outlink->h); if (!out) { av_frame_free(&in); return AVERROR(ENOMEM); } av_frame_copy_props(out, in); } sar = in->sample_aspect_ratio; /* Assume square pixels if SAR is unknown */ if (!sar.num) sar.num = sar.den = 1; for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) { int hsub = plane == 1 || plane == 2 ? hsub0 : 0; int vsub = plane == 1 || plane == 2 ? vsub0 : 0; apply_delogo(out->data[plane], out->linesize[plane], in ->data[plane], in ->linesize[plane], FF_CEIL_RSHIFT(inlink->w, hsub), FF_CEIL_RSHIFT(inlink->h, vsub), sar, s->x>>hsub, s->y>>vsub, /* Up and left borders were rounded down, inject lost bits * into width and height to avoid error accumulation */ FF_CEIL_RSHIFT(s->w + (s->x & ((1<<hsub)-1)), hsub), FF_CEIL_RSHIFT(s->h + (s->y & ((1<<vsub)-1)), vsub), s->band>>FFMIN(hsub, vsub), s->show, direct); } if (!direct) av_frame_free(&in); return ff_filter_frame(outlink, out); }
1
CVE-2013-4263
3,205
vulnerable
CWE-20
int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; struct sk_buff *skb; unsigned int ulen, copied; int peeked, off = 0; int err; int is_udplite = IS_UDPLITE(sk); bool slow; /* * Check any passed addresses */ if (addr_len) *addr_len = sizeof(*sin); if (flags & MSG_ERRQUEUE) return ip_recv_error(sk, msg, len); try_again: skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), &peeked, &off, &err); if (!skb) goto out; ulen = skb->len - sizeof(struct udphdr); copied = len; if (copied > ulen) copied = ulen; else if (copied < ulen) msg->msg_flags |= MSG_TRUNC; /* * If checksum is needed at all, try to do it while copying the * data. If the data is truncated, or if we only want a partial * coverage checksum (UDP-Lite), do it before the copy. */ if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { if (udp_lib_checksum_complete(skb)) goto csum_copy_err; } if (skb_csum_unnecessary(skb)) err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov, copied); else { err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); if (err == -EINVAL) goto csum_copy_err; } if (unlikely(err)) { trace_kfree_skb(skb, udp_recvmsg); if (!peeked) { atomic_inc(&sk->sk_drops); UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } goto out_free; } if (!peeked) UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INDATAGRAMS, is_udplite); sock_recv_ts_and_drops(msg, sk, skb); /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; sin->sin_port = udp_hdr(skb)->source; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); } if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); err = copied; if (flags & MSG_TRUNC) err = ulen; out_free: skb_free_datagram_locked(sk, skb); out: return err; csum_copy_err: slow = lock_sock_fast(sk); if (!skb_kill_datagram(sk, skb, flags)) { UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } unlock_sock_fast(sk, slow); if (noblock) return -EAGAIN; /* starting over for a new packet */ msg->msg_flags &= ~MSG_TRUNC; goto try_again; }
0
CVE-2013-7263
2,155
benign
CWE-20
int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; struct sk_buff *skb; unsigned int ulen, copied; int peeked, off = 0; int err; int is_udplite = IS_UDPLITE(sk); bool slow; if (flags & MSG_ERRQUEUE) return ip_recv_error(sk, msg, len); try_again: skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), &peeked, &off, &err); if (!skb) goto out; ulen = skb->len - sizeof(struct udphdr); copied = len; if (copied > ulen) copied = ulen; else if (copied < ulen) msg->msg_flags |= MSG_TRUNC; /* * If checksum is needed at all, try to do it while copying the * data. If the data is truncated, or if we only want a partial * coverage checksum (UDP-Lite), do it before the copy. */ if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { if (udp_lib_checksum_complete(skb)) goto csum_copy_err; } if (skb_csum_unnecessary(skb)) err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov, copied); else { err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); if (err == -EINVAL) goto csum_copy_err; } if (unlikely(err)) { trace_kfree_skb(skb, udp_recvmsg); if (!peeked) { atomic_inc(&sk->sk_drops); UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } goto out_free; } if (!peeked) UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INDATAGRAMS, is_udplite); sock_recv_ts_and_drops(msg, sk, skb); /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; sin->sin_port = udp_hdr(skb)->source; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); *addr_len = sizeof(*sin); } if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); err = copied; if (flags & MSG_TRUNC) err = ulen; out_free: skb_free_datagram_locked(sk, skb); out: return err; csum_copy_err: slow = lock_sock_fast(sk); if (!skb_kill_datagram(sk, skb, flags)) { UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } unlock_sock_fast(sk, slow); if (noblock) return -EAGAIN; /* starting over for a new packet */ msg->msg_flags &= ~MSG_TRUNC; goto try_again; }
1
CVE-2013-7263
2,155
vulnerable
CWE-125
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const auto* params = reinterpret_cast<const TfLiteGatherParams*>(node->builtin_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); const TfLiteTensor* positions; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputPositions, &positions)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); if (positions->type == kTfLiteInt32) { switch (input->type) { case kTfLiteFloat32: return Gather<float, int32_t>(*params, input, positions, output); case kTfLiteUInt8: return Gather<uint8_t, int32_t>(*params, input, positions, output); case kTfLiteInt8: return Gather<int8_t, int32_t>(*params, input, positions, output); case kTfLiteInt16: return Gather<int16_t, int32_t>(*params, input, positions, output); case kTfLiteInt32: return Gather<int32_t, int32_t>(*params, input, positions, output); case kTfLiteInt64: return Gather<int64_t, int32_t>(*params, input, positions, output); case kTfLiteBool: return Gather<bool, int32_t>(*params, input, positions, output); case kTfLiteString: return GatherStrings<int32_t>(context, input, positions, output); default: context->ReportError(context, "Type '%s' is not supported by gather.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } if (positions->type == kTfLiteInt64) { switch (input->type) { case kTfLiteFloat32: return Gather<float, int64_t>(*params, input, positions, output); case kTfLiteUInt8: return Gather<uint8_t, int64_t>(*params, input, positions, output); case kTfLiteInt8: return Gather<int8_t, int64_t>(*params, input, positions, output); case kTfLiteInt16: return Gather<int16_t, int64_t>(*params, input, positions, output); case kTfLiteInt32: return Gather<int32_t, int64_t>(*params, input, positions, output); case kTfLiteInt64: return Gather<int64_t, int64_t>(*params, input, positions, output); case kTfLiteBool: return Gather<bool, int64_t>(*params, input, positions, output); case kTfLiteString: return GatherStrings<int64_t>(context, input, positions, output); default: context->ReportError(context, "Type '%s' is not supported by gather.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } context->ReportError(context, "Positions of type '%s' are not supported by gather.", TfLiteTypeGetName(positions->type)); return kTfLiteError; }
0
CVE-2021-37687
1,887
benign
CWE-125
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const auto* params = reinterpret_cast<const TfLiteGatherParams*>(node->builtin_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); const TfLiteTensor* positions; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputPositions, &positions)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); if (positions->type == kTfLiteInt32) { switch (input->type) { case kTfLiteFloat32: return Gather<float, int32_t>(context, *params, input, positions, output); case kTfLiteUInt8: return Gather<uint8_t, int32_t>(context, *params, input, positions, output); case kTfLiteInt8: return Gather<int8_t, int32_t>(context, *params, input, positions, output); case kTfLiteInt16: return Gather<int16_t, int32_t>(context, *params, input, positions, output); case kTfLiteInt32: return Gather<int32_t, int32_t>(context, *params, input, positions, output); case kTfLiteInt64: return Gather<int64_t, int32_t>(context, *params, input, positions, output); case kTfLiteBool: return Gather<bool, int32_t>(context, *params, input, positions, output); case kTfLiteString: return GatherStrings<int32_t>(context, input, positions, output); default: context->ReportError(context, "Type '%s' is not supported by gather.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } if (positions->type == kTfLiteInt64) { switch (input->type) { case kTfLiteFloat32: return Gather<float, int64_t>(context, *params, input, positions, output); case kTfLiteUInt8: return Gather<uint8_t, int64_t>(context, *params, input, positions, output); case kTfLiteInt8: return Gather<int8_t, int64_t>(context, *params, input, positions, output); case kTfLiteInt16: return Gather<int16_t, int64_t>(context, *params, input, positions, output); case kTfLiteInt32: return Gather<int32_t, int64_t>(context, *params, input, positions, output); case kTfLiteInt64: return Gather<int64_t, int64_t>(context, *params, input, positions, output); case kTfLiteBool: return Gather<bool, int64_t>(context, *params, input, positions, output); case kTfLiteString: return GatherStrings<int64_t>(context, input, positions, output); default: context->ReportError(context, "Type '%s' is not supported by gather.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } context->ReportError(context, "Positions of type '%s' are not supported by gather.", TfLiteTypeGetName(positions->type)); return kTfLiteError; }
1
CVE-2021-37687
1,887
vulnerable
CWE-125
gif_read_lzw(FILE *fp, /* I - File to read from */ int first_time, /* I - 1 = first time, 0 = not first time */ int input_code_size) /* I - Code size in bits */ { int i, /* Looping var */ code, /* Current code */ incode; /* Input code */ static short fresh = 0, /* 1 = empty buffers */ code_size = 0, /* Current code size */ set_code_size = 0, /* Initial code size set */ max_code = 0, /* Maximum code used */ max_code_size = 0, /* Maximum code size */ firstcode = 0, /* First code read */ oldcode = 0, /* Last code read */ clear_code = 0, /* Clear code for LZW input */ end_code = 0, /* End code for LZW input */ table[2][4096], /* String table */ stack[8192], /* Output stack */ *sp = stack; /* Current stack pointer */ if (first_time) { /* * Setup LZW state... */ set_code_size = (short)input_code_size; code_size = set_code_size + 1; clear_code = (short)(1 << set_code_size); end_code = clear_code + 1; max_code_size = 2 * clear_code; max_code = clear_code + 2; /* * Initialize input buffers... */ gif_get_code(fp, 0, 1); /* * Wipe the decompressor table... */ fresh = 1; for (i = 0; i < clear_code; i ++) { table[0][i] = 0; table[1][i] = (short)i; } for (; i < 4096; i ++) table[0][i] = table[1][0] = 0; sp = stack; return (0); } else if (fresh) { fresh = 0; do firstcode = oldcode = (short)gif_get_code(fp, code_size, 0); while (firstcode == clear_code); return (firstcode); } if (sp > stack) return (*--sp); while ((code = gif_get_code (fp, code_size, 0)) >= 0) { if (code == clear_code) { for (i = 0; i < clear_code; i ++) { table[0][i] = 0; table[1][i] = (short)i; } for (; i < 4096; i ++) table[0][i] = table[1][i] = 0; code_size = set_code_size + 1; max_code_size = 2 * clear_code; max_code = clear_code + 2; sp = stack; firstcode = oldcode = (short)gif_get_code(fp, code_size, 0); return (firstcode); } else if (code == end_code) { uchar buf[260]; if (!gif_eof) while (gif_get_block(fp, buf) > 0); return (-2); } incode = code; if (code >= max_code) { *sp++ = firstcode; code = oldcode; } while (code >= clear_code) { *sp++ = table[1][code]; if (code == table[0][code]) return (255); code = table[0][code]; } *sp++ = firstcode = table[1][code]; code = max_code; if (code < 4096) { table[0][code] = oldcode; table[1][code] = firstcode; max_code ++; if (max_code >= max_code_size && max_code_size < 4096) { max_code_size *= 2; code_size ++; } } oldcode = (short)incode; if (sp > stack) return (*--sp); } return (code); }
0
CVE-2022-0534
3,240
benign
CWE-125
gif_read_lzw(FILE *fp, /* I - File to read from */ int first_time, /* I - 1 = first time, 0 = not first time */ int input_code_size) /* I - Code size in bits */ { int i, /* Looping var */ code, /* Current code */ incode; /* Input code */ static short fresh = 0, /* 1 = empty buffers */ code_size = 0, /* Current code size */ set_code_size = 0, /* Initial code size set */ max_code = 0, /* Maximum code used */ max_code_size = 0, /* Maximum code size */ firstcode = 0, /* First code read */ oldcode = 0, /* Last code read */ clear_code = 0, /* Clear code for LZW input */ end_code = 0, /* End code for LZW input */ table[2][4096], /* String table */ stack[8192], /* Output stack */ *sp = stack; /* Current stack pointer */ if (first_time) { /* * Setup LZW state... */ set_code_size = (short)input_code_size; code_size = set_code_size + 1; clear_code = (short)(1 << set_code_size); end_code = clear_code + 1; max_code_size = 2 * clear_code; max_code = clear_code + 2; /* * Initialize input buffers... */ gif_get_code(fp, 0, 1); /* * Wipe the decompressor table... */ fresh = 1; for (i = 0; i < clear_code; i ++) { table[0][i] = 0; table[1][i] = (short)i; } for (; i < 4096; i ++) table[0][i] = table[1][0] = 0; sp = stack; return (0); } else if (fresh) { fresh = 0; do firstcode = oldcode = (short)gif_get_code(fp, code_size, 0); while (firstcode == clear_code); return (firstcode); } if (sp > stack) return (*--sp); while ((code = gif_get_code(fp, code_size, 0)) >= 0) { if (code == clear_code) { for (i = 0; i < clear_code; i ++) { table[0][i] = 0; table[1][i] = (short)i; } for (; i < 4096; i ++) table[0][i] = table[1][i] = 0; code_size = set_code_size + 1; max_code_size = 2 * clear_code; max_code = clear_code + 2; sp = stack; firstcode = oldcode = (short)gif_get_code(fp, code_size, 0); return (firstcode); } else if (code == end_code) { uchar buf[260]; if (!gif_eof) while (gif_get_block(fp, buf) > 0); return (-2); } incode = code; if (code >= max_code) { *sp++ = firstcode; code = oldcode; } while (code >= clear_code) { *sp++ = table[1][code]; if (code == table[0][code]) return (255); code = table[0][code]; } *sp++ = firstcode = table[1][code]; code = max_code; if (code < 4096) { table[0][code] = oldcode; table[1][code] = firstcode; max_code ++; if (max_code >= max_code_size && max_code_size < 4096) { max_code_size *= 2; code_size ++; } } oldcode = (short)incode; if (sp > stack) return (*--sp); } return (code); }
1
CVE-2022-0534
3,240
vulnerable
CWE-119
do_bid_note(struct magic_set *ms, unsigned char *nbuf, uint32_t type, int swap __attribute__((__unused__)), uint32_t namesz, uint32_t descsz, size_t noff, size_t doff, int *flags) { if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 && type == NT_GNU_BUILD_ID && (descsz >= 4 || descsz <= 20)) { uint8_t desc[20]; const char *btype; uint32_t i; *flags |= FLAGS_DID_BUILD_ID; switch (descsz) { case 8: btype = "xxHash"; break; case 16: btype = "md5/uuid"; break; case 20: btype = "sha1"; break; default: btype = "unknown"; break; } if (file_printf(ms, ", BuildID[%s]=", btype) == -1) return 1; (void)memcpy(desc, &nbuf[doff], descsz); for (i = 0; i < descsz; i++) if (file_printf(ms, "%02x", desc[i]) == -1) return 1; return 1; } return 0; }
0
CVE-2017-1000249
1,486
benign
CWE-119
do_bid_note(struct magic_set *ms, unsigned char *nbuf, uint32_t type, int swap __attribute__((__unused__)), uint32_t namesz, uint32_t descsz, size_t noff, size_t doff, int *flags) { if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 && type == NT_GNU_BUILD_ID && (descsz >= 4 && descsz <= 20)) { uint8_t desc[20]; const char *btype; uint32_t i; *flags |= FLAGS_DID_BUILD_ID; switch (descsz) { case 8: btype = "xxHash"; break; case 16: btype = "md5/uuid"; break; case 20: btype = "sha1"; break; default: btype = "unknown"; break; } if (file_printf(ms, ", BuildID[%s]=", btype) == -1) return 1; (void)memcpy(desc, &nbuf[doff], descsz); for (i = 0; i < descsz; i++) if (file_printf(ms, "%02x", desc[i]) == -1) return 1; return 1; } return 0; }
1
CVE-2017-1000249
1,486
vulnerable
CWE-190
void BlockCodec::runPull() { AFframecount framesToRead = m_outChunk->frameCount; AFframecount framesRead = 0; assert(framesToRead % m_framesPerPacket == 0); int blockCount = framesToRead / m_framesPerPacket; // Read the compressed data. ssize_t bytesRead = read(m_inChunk->buffer, m_bytesPerPacket * blockCount); int blocksRead = bytesRead >= 0 ? bytesRead / m_bytesPerPacket : 0; // Decompress into m_outChunk. for (int i=0; i<blocksRead; i++) { decodeBlock(static_cast<const uint8_t *>(m_inChunk->buffer) + i * m_bytesPerPacket, static_cast<int16_t *>(m_outChunk->buffer) + i * m_framesPerPacket * m_track->f.channelCount); framesRead += m_framesPerPacket; } m_track->nextfframe += framesRead; assert(tell() == m_track->fpos_next_frame); if (framesRead < framesToRead) reportReadError(framesRead, framesToRead); m_outChunk->frameCount = framesRead; }
0
CVE-2017-6839
761
benign
CWE-190
void BlockCodec::runPull() { AFframecount framesToRead = m_outChunk->frameCount; AFframecount framesRead = 0; assert(framesToRead % m_framesPerPacket == 0); int blockCount = framesToRead / m_framesPerPacket; // Read the compressed data. ssize_t bytesRead = read(m_inChunk->buffer, m_bytesPerPacket * blockCount); int blocksRead = bytesRead >= 0 ? bytesRead / m_bytesPerPacket : 0; // Decompress into m_outChunk. for (int i=0; i<blocksRead; i++) { if (decodeBlock(static_cast<const uint8_t *>(m_inChunk->buffer) + i * m_bytesPerPacket, static_cast<int16_t *>(m_outChunk->buffer) + i * m_framesPerPacket * m_track->f.channelCount)==0) break; framesRead += m_framesPerPacket; } m_track->nextfframe += framesRead; assert(tell() == m_track->fpos_next_frame); if (framesRead < framesToRead) reportReadError(framesRead, framesToRead); m_outChunk->frameCount = framesRead; }
1
CVE-2017-6839
761
vulnerable
CWE-125
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); // TODO(b/137042749): TFLite infrastructure (converter, delegate) doesn't // fully support 0-output ops yet. Currently it works if we manually crfat // a TFLite graph that contains variable ops. Note: // * The TFLite Converter need to be changed to be able to produce an op // with 0 output. // * The delegation code need to be changed to handle 0 output ops. However // everything still works fine when variable ops aren't used. TF_LITE_ENSURE_EQ(context, NumOutputs(node), 0); const TfLiteTensor* input_resource_id_tensor = GetInput(context, node, kInputVariableId); TF_LITE_ENSURE_EQ(context, input_resource_id_tensor->type, kTfLiteInt32); TF_LITE_ENSURE_EQ(context, NumElements(input_resource_id_tensor), 1); return kTfLiteOk; }
0
CVE-2020-15211
2,735
benign
CWE-125
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); // TODO(b/137042749): TFLite infrastructure (converter, delegate) doesn't // fully support 0-output ops yet. Currently it works if we manually crfat // a TFLite graph that contains variable ops. Note: // * The TFLite Converter need to be changed to be able to produce an op // with 0 output. // * The delegation code need to be changed to handle 0 output ops. However // everything still works fine when variable ops aren't used. TF_LITE_ENSURE_EQ(context, NumOutputs(node), 0); const TfLiteTensor* input_resource_id_tensor; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputVariableId, &input_resource_id_tensor)); TF_LITE_ENSURE_EQ(context, input_resource_id_tensor->type, kTfLiteInt32); TF_LITE_ENSURE_EQ(context, NumElements(input_resource_id_tensor), 1); return kTfLiteOk; }
1
CVE-2020-15211
2,735
vulnerable
CWE-787
NO_INLINE JsVar *jspeFactorDelete() { JSP_ASSERT_MATCH(LEX_R_DELETE); JsVar *parent = 0; JsVar *a = jspeFactorMember(jspeFactor(), &parent); JsVar *result = 0; if (JSP_SHOULD_EXECUTE) { bool ok = false; if (jsvIsName(a) && !jsvIsNewChild(a)) { // if no parent, check in root? if (!parent && jsvIsChild(execInfo.root, a)) parent = jsvLockAgain(execInfo.root); if (jsvHasChildren(parent)) { // else remove properly. if (jsvIsArray(parent)) { // For arrays, we must make sure we don't change the length JsVarInt l = jsvGetArrayLength(parent); jsvRemoveChild(parent, a); jsvSetArrayLength(parent, l, false); } else { jsvRemoveChild(parent, a); } ok = true; } } result = jsvNewFromBool(ok); } jsvUnLock2(a, parent); return result; }
0
CVE-2022-25044
794
benign
CWE-787
NO_INLINE JsVar *jspeFactorDelete() { JSP_ASSERT_MATCH(LEX_R_DELETE); JsVar *parent = 0; JsVar *a = jspeFactorMember(jspeFactor(), &parent); JsVar *result = 0; if (JSP_SHOULD_EXECUTE) { bool ok = false; if (jsvIsName(a) && !jsvIsNewChild(a)) { // if no parent, check in root? if (!parent && jsvIsChild(execInfo.root, a)) parent = jsvLockAgain(execInfo.root); #ifdef DEBUG if (jsvHasChildren(parent)) assert(jsvIsChild(parent, a)); #endif if (jsvHasChildren(parent) && jsvIsChild(parent, a)) { // else remove properly. /* we use jsvIsChild here just in case. delete probably isn't called that often so it pays to be safe */ if (jsvIsArray(parent)) { // For arrays, we must make sure we don't change the length JsVarInt l = jsvGetArrayLength(parent); jsvRemoveChild(parent, a); jsvSetArrayLength(parent, l, false); } else { jsvRemoveChild(parent, a); } ok = true; } } result = jsvNewFromBool(ok); } jsvUnLock2(a, parent); return result; }
1
CVE-2022-25044
794
vulnerable
CWE-125
TfLiteStatus GreaterEqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::GreaterEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::GreaterEqualFn>( input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; }
0
CVE-2020-15211
3,055
benign
CWE-125
TfLiteStatus GreaterEqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor1, &input1)); const TfLiteTensor* input2; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor2, &input2)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::GreaterEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::GreaterEqualFn>( input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; }
1
CVE-2020-15211
3,055
vulnerable
CWE-787
bool Unpack::ProcessDecoded(UnpackThreadData &D) { UnpackDecodedItem *Item=D.Decoded,*Border=D.Decoded+D.DecodedSize; while (Item<Border) { UnpPtr&=MaxWinMask; if (((WriteBorder-UnpPtr) & MaxWinMask)<MAX_LZ_MATCH+3 && WriteBorder!=UnpPtr) { UnpWriteBuf(); if (WrittenFileSize>DestUnpSize) return false; } if (Item->Type==UNPDT_LITERAL) { #if defined(LITTLE_ENDIAN) && defined(ALLOW_MISALIGNED) if (Item->Length==3 && UnpPtr<MaxWinSize-4) { *(uint32 *)(Window+UnpPtr)=*(uint32 *)Item->Literal; UnpPtr+=4; } else #endif for (uint I=0;I<=Item->Length;I++) Window[UnpPtr++ & MaxWinMask]=Item->Literal[I]; } else if (Item->Type==UNPDT_MATCH) { InsertOldDist(Item->Distance); LastLength=Item->Length; CopyString(Item->Length,Item->Distance); } else if (Item->Type==UNPDT_REP) { uint Distance=OldDist[Item->Distance]; for (uint I=Item->Distance;I>0;I--) OldDist[I]=OldDist[I-1]; OldDist[0]=Distance; LastLength=Item->Length; CopyString(Item->Length,Distance); } else if (Item->Type==UNPDT_FULLREP) { if (LastLength!=0) CopyString(LastLength,OldDist[0]); } else if (Item->Type==UNPDT_FILTER) { UnpackFilter Filter; Filter.Type=(byte)Item->Length; Filter.BlockStart=Item->Distance; Item++; Filter.Channels=(byte)Item->Length; Filter.BlockLength=Item->Distance; AddFilter(Filter); } Item++; } return true; }
0
CVE-2017-20006
16
benign
CWE-787
bool Unpack::ProcessDecoded(UnpackThreadData &D) { UnpackDecodedItem *Item=D.Decoded,*Border=D.Decoded+D.DecodedSize; while (Item<Border) { UnpPtr&=MaxWinMask; if (((WriteBorder-UnpPtr) & MaxWinMask)<MAX_INC_LZ_MATCH && WriteBorder!=UnpPtr) { UnpWriteBuf(); if (WrittenFileSize>DestUnpSize) return false; } if (Item->Type==UNPDT_LITERAL) { #if defined(LITTLE_ENDIAN) && defined(ALLOW_MISALIGNED) if (Item->Length==3 && UnpPtr<MaxWinSize-4) { *(uint32 *)(Window+UnpPtr)=*(uint32 *)Item->Literal; UnpPtr+=4; } else #endif for (uint I=0;I<=Item->Length;I++) Window[UnpPtr++ & MaxWinMask]=Item->Literal[I]; } else if (Item->Type==UNPDT_MATCH) { InsertOldDist(Item->Distance); LastLength=Item->Length; CopyString(Item->Length,Item->Distance); } else if (Item->Type==UNPDT_REP) { uint Distance=OldDist[Item->Distance]; for (uint I=Item->Distance;I>0;I--) OldDist[I]=OldDist[I-1]; OldDist[0]=Distance; LastLength=Item->Length; CopyString(Item->Length,Distance); } else if (Item->Type==UNPDT_FULLREP) { if (LastLength!=0) CopyString(LastLength,OldDist[0]); } else if (Item->Type==UNPDT_FILTER) { UnpackFilter Filter; Filter.Type=(byte)Item->Length; Filter.BlockStart=Item->Distance; Item++; Filter.Channels=(byte)Item->Length; Filter.BlockLength=Item->Distance; AddFilter(Filter); } Item++; } return true; }
1
CVE-2017-20006
16
vulnerable
CWE-20
label (const uint8_t * src, size_t srclen, uint8_t * dst, size_t * dstlen, int flags) { size_t plen; uint32_t *p; int rc; size_t tmpl; if (_idn2_ascii_p (src, srclen)) { if (flags & IDN2_ALABEL_ROUNDTRIP) /* FIXME implement this MAY: If the input to this procedure appears to be an A-label (i.e., it starts in "xn--", interpreted case-insensitively), the lookup application MAY attempt to convert it to a U-label, first ensuring that the A-label is entirely in lowercase (converting it to lowercase if necessary), and apply the tests of Section 5.4 and the conversion of Section 5.5 to that form. */ return IDN2_INVALID_FLAGS; if (srclen > IDN2_LABEL_MAX_LENGTH) return IDN2_TOO_BIG_LABEL; if (srclen > *dstlen) return IDN2_TOO_BIG_DOMAIN; memcpy (dst, src, srclen); *dstlen = srclen; return IDN2_OK; } rc = _idn2_u8_to_u32_nfc (src, srclen, &p, &plen, flags & IDN2_NFC_INPUT); if (rc != IDN2_OK) return rc; if (!(flags & IDN2_TRANSITIONAL)) { rc = _idn2_label_test( TEST_NFC | TEST_2HYPHEN | TEST_LEADING_COMBINING | TEST_DISALLOWED | TEST_CONTEXTJ_RULE | TEST_CONTEXTO_WITH_RULE | TEST_UNASSIGNED | TEST_BIDI | ((flags & IDN2_NONTRANSITIONAL) ? TEST_NONTRANSITIONAL : 0) | ((flags & IDN2_USE_STD3_ASCII_RULES) ? 0 : TEST_ALLOW_STD3_DISALLOWED), p, plen); if (rc != IDN2_OK) { free(p); return rc; } } dst[0] = 'x'; dst[1] = 'n'; dst[2] = '-'; dst[3] = '-'; tmpl = *dstlen - 4; rc = _idn2_punycode_encode (plen, p, &tmpl, (char *) dst + 4); free (p); if (rc != IDN2_OK) return rc; *dstlen = 4 + tmpl; return IDN2_OK; }
0
CVE-2019-12290
3,061
benign
CWE-20
label (const uint8_t * src, size_t srclen, uint8_t * dst, size_t * dstlen, int flags) { size_t plen; uint32_t *p; const uint8_t *src_org = NULL; uint8_t *src_allocated = NULL; int rc, check_roundtrip = 0; size_t tmpl, srclen_org = 0; uint32_t label_u32[IDN2_LABEL_MAX_LENGTH]; size_t label32_len = IDN2_LABEL_MAX_LENGTH; if (_idn2_ascii_p (src, srclen)) { if (!(flags & IDN2_NO_ALABEL_ROUNDTRIP) && srclen >= 4 && memcmp (src, "xn--", 4) == 0) { /* If the input to this procedure appears to be an A-label (i.e., it starts in "xn--", interpreted case-insensitively), the lookup application MAY attempt to convert it to a U-label, first ensuring that the A-label is entirely in lowercase (converting it to lowercase if necessary), and apply the tests of Section 5.4 and the conversion of Section 5.5 to that form. */ rc = _idn2_punycode_decode (srclen - 4, (char *) src + 4, &label32_len, label_u32); if (rc) return rc; check_roundtrip = 1; src_org = src; srclen_org = srclen; srclen = IDN2_LABEL_MAX_LENGTH; src = src_allocated = u32_to_u8 (label_u32, label32_len, NULL, &srclen); if (!src) { if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ENCODING_ERROR; } } else { if (srclen > IDN2_LABEL_MAX_LENGTH) return IDN2_TOO_BIG_LABEL; if (srclen > *dstlen) return IDN2_TOO_BIG_DOMAIN; memcpy (dst, src, srclen); *dstlen = srclen; return IDN2_OK; } } rc = _idn2_u8_to_u32_nfc (src, srclen, &p, &plen, flags & IDN2_NFC_INPUT); if (rc != IDN2_OK) goto out; if (!(flags & IDN2_TRANSITIONAL)) { rc = _idn2_label_test( TEST_NFC | TEST_2HYPHEN | TEST_LEADING_COMBINING | TEST_DISALLOWED | TEST_CONTEXTJ_RULE | TEST_CONTEXTO_WITH_RULE | TEST_UNASSIGNED | TEST_BIDI | ((flags & IDN2_NONTRANSITIONAL) ? TEST_NONTRANSITIONAL : 0) | ((flags & IDN2_USE_STD3_ASCII_RULES) ? 0 : TEST_ALLOW_STD3_DISALLOWED), p, plen); if (rc != IDN2_OK) { free (p); goto out; } } dst[0] = 'x'; dst[1] = 'n'; dst[2] = '-'; dst[3] = '-'; tmpl = *dstlen - 4; rc = _idn2_punycode_encode (plen, p, &tmpl, (char *) dst + 4); free (p); if (rc != IDN2_OK) goto out; *dstlen = 4 + tmpl; if (check_roundtrip) { if (srclen_org != *dstlen || memcmp (src_org, dst, srclen_org)) { rc = IDN2_ALABEL_ROUNDTRIP_FAILED; goto out; } } rc = IDN2_OK; out: free (src_allocated); return rc; }
1
CVE-2019-12290
3,061
vulnerable
CWE-20
static int trusted_update(struct key *key, struct key_preparsed_payload *prep) { struct trusted_key_payload *p; struct trusted_key_payload *new_p; struct trusted_key_options *new_o; size_t datalen = prep->datalen; char *datablob; int ret = 0; if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) return -ENOKEY; p = key->payload.data[0]; if (!p->migratable) return -EPERM; if (datalen <= 0 || datalen > 32767 || !prep->data) return -EINVAL; datablob = kmalloc(datalen + 1, GFP_KERNEL); if (!datablob) return -ENOMEM; new_o = trusted_options_alloc(); if (!new_o) { ret = -ENOMEM; goto out; } new_p = trusted_payload_alloc(key); if (!new_p) { ret = -ENOMEM; goto out; } memcpy(datablob, prep->data, datalen); datablob[datalen] = '\0'; ret = datablob_parse(datablob, new_p, new_o); if (ret != Opt_update) { ret = -EINVAL; kzfree(new_p); goto out; } if (!new_o->keyhandle) { ret = -EINVAL; kzfree(new_p); goto out; } /* copy old key values, and reseal with new pcrs */ new_p->migratable = p->migratable; new_p->key_len = p->key_len; memcpy(new_p->key, p->key, p->key_len); dump_payload(p); dump_payload(new_p); ret = key_seal(new_p, new_o); if (ret < 0) { pr_info("trusted_key: key_seal failed (%d)\n", ret); kzfree(new_p); goto out; } if (new_o->pcrlock) { ret = pcrlock(new_o->pcrlock); if (ret < 0) { pr_info("trusted_key: pcrlock failed (%d)\n", ret); kzfree(new_p); goto out; } } rcu_assign_keypointer(key, new_p); call_rcu(&p->rcu, trusted_rcu_free); out: kzfree(datablob); kzfree(new_o); return ret; }
0
CVE-2017-15951
503
benign
CWE-20
static int trusted_update(struct key *key, struct key_preparsed_payload *prep) { struct trusted_key_payload *p; struct trusted_key_payload *new_p; struct trusted_key_options *new_o; size_t datalen = prep->datalen; char *datablob; int ret = 0; if (key_is_negative(key)) return -ENOKEY; p = key->payload.data[0]; if (!p->migratable) return -EPERM; if (datalen <= 0 || datalen > 32767 || !prep->data) return -EINVAL; datablob = kmalloc(datalen + 1, GFP_KERNEL); if (!datablob) return -ENOMEM; new_o = trusted_options_alloc(); if (!new_o) { ret = -ENOMEM; goto out; } new_p = trusted_payload_alloc(key); if (!new_p) { ret = -ENOMEM; goto out; } memcpy(datablob, prep->data, datalen); datablob[datalen] = '\0'; ret = datablob_parse(datablob, new_p, new_o); if (ret != Opt_update) { ret = -EINVAL; kzfree(new_p); goto out; } if (!new_o->keyhandle) { ret = -EINVAL; kzfree(new_p); goto out; } /* copy old key values, and reseal with new pcrs */ new_p->migratable = p->migratable; new_p->key_len = p->key_len; memcpy(new_p->key, p->key, p->key_len); dump_payload(p); dump_payload(new_p); ret = key_seal(new_p, new_o); if (ret < 0) { pr_info("trusted_key: key_seal failed (%d)\n", ret); kzfree(new_p); goto out; } if (new_o->pcrlock) { ret = pcrlock(new_o->pcrlock); if (ret < 0) { pr_info("trusted_key: pcrlock failed (%d)\n", ret); kzfree(new_p); goto out; } } rcu_assign_keypointer(key, new_p); call_rcu(&p->rcu, trusted_rcu_free); out: kzfree(datablob); kzfree(new_o); return ret; }
1
CVE-2017-15951
503
vulnerable
CWE-125
static INLINE BOOL update_write_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { Stream_Write_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { Stream_Write_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { Stream_Write_UINT8(s, brush->style); } if (brush->style & CACHED_BRUSH) { brush->hatch = brush->index; brush->bpp = BMF_BPP[brush->style & 0x07]; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_04) { Stream_Write_UINT8(s, brush->hatch); } if (fieldFlags & ORDER_FIELD_05) { brush->data = (BYTE*)brush->p8x8; Stream_Write_UINT8(s, brush->data[7]); Stream_Write_UINT8(s, brush->data[6]); Stream_Write_UINT8(s, brush->data[5]); Stream_Write_UINT8(s, brush->data[4]); Stream_Write_UINT8(s, brush->data[3]); Stream_Write_UINT8(s, brush->data[2]); Stream_Write_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; }
0
CVE-2020-11096
2,477
benign
CWE-125
static INLINE BOOL update_write_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { Stream_Write_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { Stream_Write_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { Stream_Write_UINT8(s, brush->style); } if (brush->style & CACHED_BRUSH) { BOOL rc; brush->hatch = brush->index; brush->bpp = get_bmf_bpp(brush->style, &rc); if (!rc) return FALSE; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_04) { Stream_Write_UINT8(s, brush->hatch); } if (fieldFlags & ORDER_FIELD_05) { brush->data = (BYTE*)brush->p8x8; Stream_Write_UINT8(s, brush->data[7]); Stream_Write_UINT8(s, brush->data[6]); Stream_Write_UINT8(s, brush->data[5]); Stream_Write_UINT8(s, brush->data[4]); Stream_Write_UINT8(s, brush->data[3]); Stream_Write_UINT8(s, brush->data[2]); Stream_Write_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; }
1
CVE-2020-11096
2,477
vulnerable
CWE-416
static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct page *head, *page; int refs; if (!pmd_access_permitted(orig, write)) return 0; if (pmd_devmap(orig)) return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr); refs = 0; page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); do { pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); head = compound_head(pmd_page(orig)); if (!page_cache_add_speculative(head, refs)) { *nr -= refs; return 0; } if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { *nr -= refs; while (refs--) put_page(head); return 0; } SetPageReferenced(head); return 1; }
0
CVE-2019-11487
984
benign
CWE-416
static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct page *head, *page; int refs; if (!pmd_access_permitted(orig, write)) return 0; if (pmd_devmap(orig)) return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr); refs = 0; page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); do { pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); head = try_get_compound_head(pmd_page(orig), refs); if (!head) { *nr -= refs; return 0; } if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { *nr -= refs; while (refs--) put_page(head); return 0; } SetPageReferenced(head); return 1; }
1
CVE-2019-11487
984
vulnerable
CWE-476
void SparseFillEmptyRowsOpImpl(OpKernelContext* context, AsyncOpKernel::DoneCallback done = nullptr) { // Note that setting this empty lambda as the default parameter value directly // can cause strange compiler/linker errors, so we do it like this instead. if (!done) { done = [] {}; } const int kIndicesInput = 0; const int kValuesInput = 1; const int kDenseShapeInput = 2; const int kDefaultValueInput = 3; const Tensor& indices_t = context->input(kIndicesInput); const Tensor& values_t = context->input(kValuesInput); const Tensor& dense_shape_t = context->input(kDenseShapeInput); const Tensor& default_value_t = context->input(kDefaultValueInput); OP_REQUIRES_ASYNC( context, TensorShapeUtils::IsVector(dense_shape_t.shape()), errors::InvalidArgument("dense_shape must be a vector, saw: ", dense_shape_t.shape().DebugString()), done); OP_REQUIRES_ASYNC(context, TensorShapeUtils::IsMatrix(indices_t.shape()), errors::InvalidArgument("indices must be a matrix, saw: ", indices_t.shape().DebugString()), done); OP_REQUIRES_ASYNC(context, TensorShapeUtils::IsVector(values_t.shape()), errors::InvalidArgument("values must be a vector, saw: ", values_t.shape().DebugString()), done); OP_REQUIRES_ASYNC( context, TensorShapeUtils::IsScalar(default_value_t.shape()), errors::InvalidArgument("default_value must be a scalar, saw: ", default_value_t.shape().DebugString()), done); // TODO(ebrevdo): add shape checks between values, indices, // dense_shape. Also add check that dense rank > 0. using FunctorType = functor::SparseFillEmptyRows<Device, T, Tindex>; OP_REQUIRES_OK_ASYNC(context, FunctorType()(context, default_value_t, indices_t, values_t, dense_shape_t, done), done); }
0
CVE-2021-29565
2,975
benign
CWE-476
void SparseFillEmptyRowsOpImpl(OpKernelContext* context, AsyncOpKernel::DoneCallback done = nullptr) { // Note that setting this empty lambda as the default parameter value directly // can cause strange compiler/linker errors, so we do it like this instead. if (!done) { done = [] {}; } const int kIndicesInput = 0; const int kValuesInput = 1; const int kDenseShapeInput = 2; const int kDefaultValueInput = 3; const Tensor& indices_t = context->input(kIndicesInput); const Tensor& values_t = context->input(kValuesInput); const Tensor& dense_shape_t = context->input(kDenseShapeInput); const Tensor& default_value_t = context->input(kDefaultValueInput); OP_REQUIRES_ASYNC( context, TensorShapeUtils::IsVector(dense_shape_t.shape()), errors::InvalidArgument("dense_shape must be a vector, saw: ", dense_shape_t.shape().DebugString()), done); OP_REQUIRES_ASYNC(context, TensorShapeUtils::IsMatrix(indices_t.shape()), errors::InvalidArgument("indices must be a matrix, saw: ", indices_t.shape().DebugString()), done); OP_REQUIRES_ASYNC(context, TensorShapeUtils::IsVector(values_t.shape()), errors::InvalidArgument("values must be a vector, saw: ", values_t.shape().DebugString()), done); OP_REQUIRES_ASYNC( context, TensorShapeUtils::IsScalar(default_value_t.shape()), errors::InvalidArgument("default_value must be a scalar, saw: ", default_value_t.shape().DebugString()), done); // TODO(ebrevdo): add shape checks between values, indices, // Also add check that dense rank > 0. OP_REQUIRES_ASYNC(context, dense_shape_t.NumElements() != 0, errors::InvalidArgument("Dense shape cannot be empty."), done); using FunctorType = functor::SparseFillEmptyRows<Device, T, Tindex>; OP_REQUIRES_OK_ASYNC(context, FunctorType()(context, default_value_t, indices_t, values_t, dense_shape_t, done), done); }
1
CVE-2021-29565
2,975
vulnerable
CWE-119
PHP_FUNCTION(curl_unescape) { char *str = NULL, *out = NULL; size_t str_len = 0; int out_len; zval *zid; php_curl *ch; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rs", &zid, &str, &str_len) == FAILURE) { return; } if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) { RETURN_FALSE; } if (str_len > INT_MAX) { RETURN_FALSE; } if ((out = curl_easy_unescape(ch->cp, str, str_len, &out_len))) { RETVAL_STRINGL(out, out_len); curl_free(out); } else { RETURN_FALSE; } }
0
CVE-2016-7134
2,194
benign
CWE-119
PHP_FUNCTION(curl_unescape) { char *str = NULL, *out = NULL; size_t str_len = 0; int out_len; zval *zid; php_curl *ch; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rs", &zid, &str, &str_len) == FAILURE) { return; } if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) { RETURN_FALSE; } if (ZEND_SIZE_T_INT_OVFL(str_len)) { RETURN_FALSE; } if ((out = curl_easy_unescape(ch->cp, str, str_len, &out_len))) { RETVAL_STRINGL(out, out_len); curl_free(out); } else { RETURN_FALSE; } }
1
CVE-2016-7134
2,194
vulnerable
CWE-119
void luaD_callnoyield (lua_State *L, StkId func, int nResults) { incXCcalls(L); if (getCcalls(L) <= CSTACKERR) /* possible stack overflow? */ luaE_freeCI(L); luaD_call(L, func, nResults); decXCcalls(L); }
0
CVE-2020-24342
1,207
benign
CWE-119
void luaD_callnoyield (lua_State *L, StkId func, int nResults) { incXCcalls(L); if (getCcalls(L) <= CSTACKERR) { /* possible C stack overflow? */ luaE_exitCcall(L); /* to compensate decrement in next call */ luaE_enterCcall(L); /* check properly */ } luaD_call(L, func, nResults); decXCcalls(L); }
1
CVE-2020-24342
1,207
vulnerable
CWE-119
static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); int err; if (opcode == BPF_END || opcode == BPF_NEG) { if (opcode == BPF_NEG) { if (BPF_SRC(insn->code) != 0 || insn->src_reg != BPF_REG_0 || insn->off != 0 || insn->imm != 0) { verbose(env, "BPF_NEG uses reserved fields\n"); return -EINVAL; } } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0 || (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || BPF_CLASS(insn->code) == BPF_ALU64) { verbose(env, "BPF_END uses reserved fields\n"); return -EINVAL; } } /* check src operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->dst_reg)) { verbose(env, "R%d pointer arithmetic prohibited\n", insn->dst_reg); return -EACCES; } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; } else if (opcode == BPF_MOV) { if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose(env, "BPF_MOV uses reserved fields\n"); return -EINVAL; } /* check src operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0) { verbose(env, "BPF_MOV uses reserved fields\n"); return -EINVAL; } } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; if (BPF_SRC(insn->code) == BPF_X) { if (BPF_CLASS(insn->code) == BPF_ALU64) { /* case: R1 = R2 * copy register state to dest reg */ regs[insn->dst_reg] = regs[insn->src_reg]; regs[insn->dst_reg].live |= REG_LIVE_WRITTEN; } else { /* R1 = (u32) R2 */ if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d partial copy of pointer\n", insn->src_reg); return -EACCES; } mark_reg_unknown(env, regs, insn->dst_reg); /* high 32 bits are known zero. */ regs[insn->dst_reg].var_off = tnum_cast( regs[insn->dst_reg].var_off, 4); __update_reg_bounds(&regs[insn->dst_reg]); } } else { /* case: R = imm * remember the value we stored into this reg */ regs[insn->dst_reg].type = SCALAR_VALUE; if (BPF_CLASS(insn->code) == BPF_ALU64) { __mark_reg_known(regs + insn->dst_reg, insn->imm); } else { __mark_reg_known(regs + insn->dst_reg, (u32)insn->imm); } } } else if (opcode > BPF_END) { verbose(env, "invalid BPF_ALU opcode %x\n", opcode); return -EINVAL; } else { /* all other ALU ops: and, sub, xor, add, ... */ if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; } } /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if ((opcode == BPF_MOD || opcode == BPF_DIV) && BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { verbose(env, "div by zero\n"); return -EINVAL; } if ((opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; if (insn->imm < 0 || insn->imm >= size) { verbose(env, "invalid shift %d\n", insn->imm); return -EINVAL; } } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; return adjust_reg_min_max_vals(env, insn); } return 0; }
0
CVE-2017-16996
3,163
benign
CWE-119
static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); int err; if (opcode == BPF_END || opcode == BPF_NEG) { if (opcode == BPF_NEG) { if (BPF_SRC(insn->code) != 0 || insn->src_reg != BPF_REG_0 || insn->off != 0 || insn->imm != 0) { verbose(env, "BPF_NEG uses reserved fields\n"); return -EINVAL; } } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0 || (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || BPF_CLASS(insn->code) == BPF_ALU64) { verbose(env, "BPF_END uses reserved fields\n"); return -EINVAL; } } /* check src operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->dst_reg)) { verbose(env, "R%d pointer arithmetic prohibited\n", insn->dst_reg); return -EACCES; } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; } else if (opcode == BPF_MOV) { if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose(env, "BPF_MOV uses reserved fields\n"); return -EINVAL; } /* check src operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0) { verbose(env, "BPF_MOV uses reserved fields\n"); return -EINVAL; } } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; if (BPF_SRC(insn->code) == BPF_X) { if (BPF_CLASS(insn->code) == BPF_ALU64) { /* case: R1 = R2 * copy register state to dest reg */ regs[insn->dst_reg] = regs[insn->src_reg]; regs[insn->dst_reg].live |= REG_LIVE_WRITTEN; } else { /* R1 = (u32) R2 */ if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d partial copy of pointer\n", insn->src_reg); return -EACCES; } mark_reg_unknown(env, regs, insn->dst_reg); coerce_reg_to_size(&regs[insn->dst_reg], 4); } } else { /* case: R = imm * remember the value we stored into this reg */ regs[insn->dst_reg].type = SCALAR_VALUE; if (BPF_CLASS(insn->code) == BPF_ALU64) { __mark_reg_known(regs + insn->dst_reg, insn->imm); } else { __mark_reg_known(regs + insn->dst_reg, (u32)insn->imm); } } } else if (opcode > BPF_END) { verbose(env, "invalid BPF_ALU opcode %x\n", opcode); return -EINVAL; } else { /* all other ALU ops: and, sub, xor, add, ... */ if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; } } /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if ((opcode == BPF_MOD || opcode == BPF_DIV) && BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { verbose(env, "div by zero\n"); return -EINVAL; } if ((opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; if (insn->imm < 0 || insn->imm >= size) { verbose(env, "invalid shift %d\n", insn->imm); return -EINVAL; } } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; return adjust_reg_min_max_vals(env, insn); } return 0; }
1
CVE-2017-16996
3,163
vulnerable
CWE-125
TfLiteStatus LogSoftmaxEval(TfLiteContext* context, TfLiteNode* node) { const LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { SoftmaxParams op_params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } case kTfLiteUInt8: { SoftmaxParams op_params = data->params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, input->params.scale, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } case kTfLiteInt8: { const auto input_shape = GetTensorShape(input); const auto output_shape = GetTensorShape(output); const int trailing_dim = input_shape.DimensionsCount() - 1; const int outer_size = MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); const int depth = MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); reference_integer_ops::LogSoftmax( data->input_multiplier, data->input_left_shift, data->reverse_scaling_divisor, data->reverse_scaling_right_shift, data->diff_min, outer_size, depth, GetTensorData<int8_t>(input), GetTensorData<int8_t>(output)); return kTfLiteOk; } default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
0
CVE-2020-15211
2,425
benign
CWE-125
TfLiteStatus LogSoftmaxEval(TfLiteContext* context, TfLiteNode* node) { const LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { SoftmaxParams op_params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } case kTfLiteUInt8: { SoftmaxParams op_params = data->params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, input->params.scale, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } case kTfLiteInt8: { const auto input_shape = GetTensorShape(input); const auto output_shape = GetTensorShape(output); const int trailing_dim = input_shape.DimensionsCount() - 1; const int outer_size = MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); const int depth = MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); reference_integer_ops::LogSoftmax( data->input_multiplier, data->input_left_shift, data->reverse_scaling_divisor, data->reverse_scaling_right_shift, data->diff_min, outer_size, depth, GetTensorData<int8_t>(input), GetTensorData<int8_t>(output)); return kTfLiteOk; } default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
1
CVE-2020-15211
2,425
vulnerable
CWE-787
static int iw_process_rows_intermediate_to_final(struct iw_context *ctx, int intermed_channel, const struct iw_csdescr *out_csdescr) { int i,j; int z; int k; int retval=0; iw_tmpsample tmpsamp; iw_tmpsample alphasamp = 0.0; iw_tmpsample *inpix_tofree = NULL; // Used if we need a separate temp buffer for input samples iw_tmpsample *outpix_tofree = NULL; // Used if we need a separate temp buffer for output samples // Do any of the output channels use error-diffusion dithering? int using_errdiffdither = 0; int output_channel; int is_alpha_channel; int bkgd_has_transparency; double tmpbkgdalpha=0.0; int alt_bkgd = 0; // Nonzero if we should use bkgd2 for this sample struct iw_resize_settings *rs = NULL; int ditherfamily, dithersubtype; struct iw_channelinfo_intermed *int_ci; struct iw_channelinfo_out *out_ci; iw_tmpsample *in_pix = NULL; iw_tmpsample *out_pix = NULL; int num_in_pix; int num_out_pix; num_in_pix = ctx->intermed_canvas_width; num_out_pix = ctx->img2.width; int_ci = &ctx->intermed_ci[intermed_channel]; output_channel = int_ci->corresponding_output_channel; out_ci = &ctx->img2_ci[output_channel]; is_alpha_channel = (int_ci->channeltype==IW_CHANNELTYPE_ALPHA); bkgd_has_transparency = iw_bkgd_has_transparency(ctx); inpix_tofree = (iw_tmpsample*)iw_malloc(ctx, num_in_pix * sizeof(iw_tmpsample)); in_pix = inpix_tofree; // We need an output buffer. outpix_tofree = (iw_tmpsample*)iw_malloc(ctx, num_out_pix * sizeof(iw_tmpsample)); if(!outpix_tofree) goto done; out_pix = outpix_tofree; // Decide if the 'nearest color table' optimization can be used if(ctx->nearest_color_table && !is_alpha_channel && out_ci->ditherfamily==IW_DITHERFAMILY_NONE && out_ci->color_count==0) { out_ci->use_nearest_color_table = 1; } else { out_ci->use_nearest_color_table = 0; } // Seed the PRNG, if necessary. ditherfamily = out_ci->ditherfamily; dithersubtype = out_ci->dithersubtype; if(ditherfamily==IW_DITHERFAMILY_RANDOM) { // Decide what random seed to use. The alpha channel always has its own // seed. If using "r" (not "r2") dithering, every channel has its own seed. if(dithersubtype==IW_DITHERSUBTYPE_SAMEPATTERN && out_ci->channeltype!=IW_CHANNELTYPE_ALPHA) { iwpvt_prng_set_random_seed(ctx->prng,ctx->random_seed); } else { iwpvt_prng_set_random_seed(ctx->prng,ctx->random_seed+out_ci->channeltype); } } // Initialize Floyd-Steinberg dithering. if(output_channel>=0 && out_ci->ditherfamily==IW_DITHERFAMILY_ERRDIFF) { using_errdiffdither = 1; for(i=0;i<ctx->img2.width;i++) { for(k=0;k<IW_DITHER_MAXROWS;k++) { ctx->dither_errors[k][i] = 0.0; } } } rs=&ctx->resize_settings[IW_DIMENSION_H]; // If the resize context for this dimension already exists, we should be // able to reuse it. Otherwise, create a new one. if(!rs->rrctx) { rs->rrctx = iwpvt_resize_rows_init(ctx,rs,int_ci->channeltype, num_in_pix, num_out_pix); if(!rs->rrctx) goto done; } for(j=0;j<ctx->intermed_canvas_height;j++) { // As needed, either copy the input pixels to a temp buffer (inpix, which // ctx->in_pix already points to), or point ctx->in_pix directly to the // intermediate data. if(is_alpha_channel) { for(i=0;i<num_in_pix;i++) { inpix_tofree[i] = ctx->intermediate_alpha32[((size_t)j)*ctx->intermed_canvas_width+i]; } } else { for(i=0;i<num_in_pix;i++) { inpix_tofree[i] = ctx->intermediate32[((size_t)j)*ctx->intermed_canvas_width+i]; } } // Resize ctx->in_pix to ctx->out_pix. iwpvt_resize_row_main(rs->rrctx,in_pix,out_pix); if(ctx->intclamp) clamp_output_samples(ctx,out_pix,num_out_pix); // If necessary, copy the resized samples to the final_alpha image if(is_alpha_channel && outpix_tofree && ctx->final_alpha32) { for(i=0;i<num_out_pix;i++) { ctx->final_alpha32[((size_t)j)*ctx->img2.width+i] = (iw_float32)outpix_tofree[i]; } } // Now convert the out_pix and put them in the final image. if(output_channel == -1) { // No corresponding output channel. // (Presumably because this is an alpha channel that's being // removed because we're applying a background.) goto here; } for(z=0;z<ctx->img2.width;z++) { // For decent Floyd-Steinberg dithering, we need to process alternate // rows in reverse order. if(using_errdiffdither && (j%2)) i=ctx->img2.width-1-z; else i=z; tmpsamp = out_pix[i]; if(ctx->bkgd_checkerboard) { alt_bkgd = (((ctx->bkgd_check_origin[IW_DIMENSION_H]+i)/ctx->bkgd_check_size)%2) != (((ctx->bkgd_check_origin[IW_DIMENSION_V]+j)/ctx->bkgd_check_size)%2); } if(bkgd_has_transparency) { tmpbkgdalpha = alt_bkgd ? ctx->bkgd2alpha : ctx->bkgd1alpha; } if(int_ci->need_unassoc_alpha_processing) { // Convert color samples back to unassociated alpha. alphasamp = ctx->final_alpha32[((size_t)j)*ctx->img2.width + i]; if(alphasamp!=0.0) { tmpsamp /= alphasamp; } if(ctx->apply_bkgd && ctx->apply_bkgd_strategy==IW_BKGD_STRATEGY_LATE) { // Apply a background color (or checkerboard pattern). double bkcolor; bkcolor = alt_bkgd ? out_ci->bkgd2_color_lin : out_ci->bkgd1_color_lin; if(bkgd_has_transparency) { tmpsamp = tmpsamp*alphasamp + bkcolor*tmpbkgdalpha*(1.0-alphasamp); } else { tmpsamp = tmpsamp*alphasamp + bkcolor*(1.0-alphasamp); } } } else if(is_alpha_channel && bkgd_has_transparency) { // Composite the alpha of the foreground over the alpha of the background. tmpsamp = tmpsamp + tmpbkgdalpha*(1.0-tmpsamp); } if(ctx->img2.sampletype==IW_SAMPLETYPE_FLOATINGPOINT) put_sample_convert_from_linear_flt(ctx,tmpsamp,i,j,output_channel,out_csdescr); else put_sample_convert_from_linear(ctx,tmpsamp,i,j,output_channel,out_csdescr); } if(using_errdiffdither) { // Move "next row" error data to "this row", and clear the "next row". // TODO: Obviously, it would be more efficient to just swap pointers // to the rows. for(i=0;i<ctx->img2.width;i++) { // Move data in all rows but the first row up one row. for(k=0;k<IW_DITHER_MAXROWS-1;k++) { ctx->dither_errors[k][i] = ctx->dither_errors[k+1][i]; } // Clear the last row. ctx->dither_errors[IW_DITHER_MAXROWS-1][i] = 0.0; } } here: ; } retval=1; done: if(rs && rs->disable_rrctx_cache && rs->rrctx) { // In some cases, the channels may need different resize contexts. // Delete the current context, so that it doesn't get reused. iwpvt_resize_rows_done(rs->rrctx); rs->rrctx = NULL; } if(inpix_tofree) iw_free(ctx,inpix_tofree); if(outpix_tofree) iw_free(ctx,outpix_tofree); return retval; }
0
CVE-2017-9203
1,631
benign
CWE-787
static int iw_process_rows_intermediate_to_final(struct iw_context *ctx, int intermed_channel, const struct iw_csdescr *out_csdescr) { int i,j; int z; int k; int retval=0; iw_tmpsample tmpsamp; iw_tmpsample alphasamp = 0.0; iw_tmpsample *inpix_tofree = NULL; // Used if we need a separate temp buffer for input samples iw_tmpsample *outpix_tofree = NULL; // Used if we need a separate temp buffer for output samples // Do any of the output channels use error-diffusion dithering? int using_errdiffdither = 0; int output_channel; int is_alpha_channel; int bkgd_has_transparency; double tmpbkgdalpha=0.0; int alt_bkgd = 0; // Nonzero if we should use bkgd2 for this sample struct iw_resize_settings *rs = NULL; int ditherfamily, dithersubtype; struct iw_channelinfo_intermed *int_ci; struct iw_channelinfo_out *out_ci; iw_tmpsample *in_pix = NULL; iw_tmpsample *out_pix = NULL; int num_in_pix; int num_out_pix; struct iw_channelinfo_out default_ci_out; num_in_pix = ctx->intermed_canvas_width; num_out_pix = ctx->img2.width; int_ci = &ctx->intermed_ci[intermed_channel]; output_channel = int_ci->corresponding_output_channel; if(output_channel>=0) { out_ci = &ctx->img2_ci[output_channel]; } else { // If there is no output channelinfo struct, create a temporary one to // use. // TODO: This is admittedly ugly, but we use these settings for a few // things even when there is no corresponding output channel, and I // don't remember exactly why. iw_zeromem(&default_ci_out, sizeof(struct iw_channelinfo_out)); default_ci_out.channeltype = IW_CHANNELTYPE_NONALPHA; out_ci = &default_ci_out; } is_alpha_channel = (int_ci->channeltype==IW_CHANNELTYPE_ALPHA); bkgd_has_transparency = iw_bkgd_has_transparency(ctx); inpix_tofree = (iw_tmpsample*)iw_malloc(ctx, num_in_pix * sizeof(iw_tmpsample)); in_pix = inpix_tofree; // We need an output buffer. outpix_tofree = (iw_tmpsample*)iw_malloc(ctx, num_out_pix * sizeof(iw_tmpsample)); if(!outpix_tofree) goto done; out_pix = outpix_tofree; // Decide if the 'nearest color table' optimization can be used if(ctx->nearest_color_table && !is_alpha_channel && out_ci->ditherfamily==IW_DITHERFAMILY_NONE && out_ci->color_count==0) { out_ci->use_nearest_color_table = 1; } else { out_ci->use_nearest_color_table = 0; } // Seed the PRNG, if necessary. ditherfamily = out_ci->ditherfamily; dithersubtype = out_ci->dithersubtype; if(ditherfamily==IW_DITHERFAMILY_RANDOM) { // Decide what random seed to use. The alpha channel always has its own // seed. If using "r" (not "r2") dithering, every channel has its own seed. if(dithersubtype==IW_DITHERSUBTYPE_SAMEPATTERN && out_ci->channeltype!=IW_CHANNELTYPE_ALPHA) { iwpvt_prng_set_random_seed(ctx->prng,ctx->random_seed); } else { iwpvt_prng_set_random_seed(ctx->prng,ctx->random_seed+out_ci->channeltype); } } // Initialize Floyd-Steinberg dithering. if(output_channel>=0 && out_ci->ditherfamily==IW_DITHERFAMILY_ERRDIFF) { using_errdiffdither = 1; for(i=0;i<ctx->img2.width;i++) { for(k=0;k<IW_DITHER_MAXROWS;k++) { ctx->dither_errors[k][i] = 0.0; } } } rs=&ctx->resize_settings[IW_DIMENSION_H]; // If the resize context for this dimension already exists, we should be // able to reuse it. Otherwise, create a new one. if(!rs->rrctx) { rs->rrctx = iwpvt_resize_rows_init(ctx,rs,int_ci->channeltype, num_in_pix, num_out_pix); if(!rs->rrctx) goto done; } for(j=0;j<ctx->intermed_canvas_height;j++) { // As needed, either copy the input pixels to a temp buffer (inpix, which // ctx->in_pix already points to), or point ctx->in_pix directly to the // intermediate data. if(is_alpha_channel) { for(i=0;i<num_in_pix;i++) { inpix_tofree[i] = ctx->intermediate_alpha32[((size_t)j)*ctx->intermed_canvas_width+i]; } } else { for(i=0;i<num_in_pix;i++) { inpix_tofree[i] = ctx->intermediate32[((size_t)j)*ctx->intermed_canvas_width+i]; } } // Resize ctx->in_pix to ctx->out_pix. iwpvt_resize_row_main(rs->rrctx,in_pix,out_pix); if(ctx->intclamp) clamp_output_samples(ctx,out_pix,num_out_pix); // If necessary, copy the resized samples to the final_alpha image if(is_alpha_channel && outpix_tofree && ctx->final_alpha32) { for(i=0;i<num_out_pix;i++) { ctx->final_alpha32[((size_t)j)*ctx->img2.width+i] = (iw_float32)outpix_tofree[i]; } } // Now convert the out_pix and put them in the final image. if(output_channel == -1) { // No corresponding output channel. // (Presumably because this is an alpha channel that's being // removed because we're applying a background.) goto here; } for(z=0;z<ctx->img2.width;z++) { // For decent Floyd-Steinberg dithering, we need to process alternate // rows in reverse order. if(using_errdiffdither && (j%2)) i=ctx->img2.width-1-z; else i=z; tmpsamp = out_pix[i]; if(ctx->bkgd_checkerboard) { alt_bkgd = (((ctx->bkgd_check_origin[IW_DIMENSION_H]+i)/ctx->bkgd_check_size)%2) != (((ctx->bkgd_check_origin[IW_DIMENSION_V]+j)/ctx->bkgd_check_size)%2); } if(bkgd_has_transparency) { tmpbkgdalpha = alt_bkgd ? ctx->bkgd2alpha : ctx->bkgd1alpha; } if(int_ci->need_unassoc_alpha_processing) { // Convert color samples back to unassociated alpha. alphasamp = ctx->final_alpha32[((size_t)j)*ctx->img2.width + i]; if(alphasamp!=0.0) { tmpsamp /= alphasamp; } if(ctx->apply_bkgd && ctx->apply_bkgd_strategy==IW_BKGD_STRATEGY_LATE) { // Apply a background color (or checkerboard pattern). double bkcolor; bkcolor = alt_bkgd ? out_ci->bkgd2_color_lin : out_ci->bkgd1_color_lin; if(bkgd_has_transparency) { tmpsamp = tmpsamp*alphasamp + bkcolor*tmpbkgdalpha*(1.0-alphasamp); } else { tmpsamp = tmpsamp*alphasamp + bkcolor*(1.0-alphasamp); } } } else if(is_alpha_channel && bkgd_has_transparency) { // Composite the alpha of the foreground over the alpha of the background. tmpsamp = tmpsamp + tmpbkgdalpha*(1.0-tmpsamp); } if(ctx->img2.sampletype==IW_SAMPLETYPE_FLOATINGPOINT) put_sample_convert_from_linear_flt(ctx,tmpsamp,i,j,output_channel,out_csdescr); else put_sample_convert_from_linear(ctx,tmpsamp,i,j,output_channel,out_csdescr); } if(using_errdiffdither) { // Move "next row" error data to "this row", and clear the "next row". // TODO: Obviously, it would be more efficient to just swap pointers // to the rows. for(i=0;i<ctx->img2.width;i++) { // Move data in all rows but the first row up one row. for(k=0;k<IW_DITHER_MAXROWS-1;k++) { ctx->dither_errors[k][i] = ctx->dither_errors[k+1][i]; } // Clear the last row. ctx->dither_errors[IW_DITHER_MAXROWS-1][i] = 0.0; } } here: ; } retval=1; done: if(rs && rs->disable_rrctx_cache && rs->rrctx) { // In some cases, the channels may need different resize contexts. // Delete the current context, so that it doesn't get reused. iwpvt_resize_rows_done(rs->rrctx); rs->rrctx = NULL; } if(inpix_tofree) iw_free(ctx,inpix_tofree); if(outpix_tofree) iw_free(ctx,outpix_tofree); return retval; }
1
CVE-2017-9203
1,631
vulnerable
CWE-787
static void WritePixel(struct ngiflib_img * i, struct ngiflib_decode_context * context, u8 v) { struct ngiflib_gif * p = i->parent; if(v!=i->gce.transparent_color || !i->gce.transparent_flag) { #ifndef NGIFLIB_INDEXED_ONLY if(p->mode & NGIFLIB_MODE_INDEXED) { #endif /* NGIFLIB_INDEXED_ONLY */ *context->frbuff_p.p8 = v; #ifndef NGIFLIB_INDEXED_ONLY } else *context->frbuff_p.p32 = GifIndexToTrueColor(i->palette, v); #endif /* NGIFLIB_INDEXED_ONLY */ } if(--(context->Xtogo) <= 0) { #ifdef NGIFLIB_ENABLE_CALLBACKS if(p->line_cb) p->line_cb(p, context->line_p, context->curY); #endif /* NGIFLIB_ENABLE_CALLBACKS */ context->Xtogo = i->width; switch(context->pass) { case 0: context->curY++; break; case 1: /* 1st pass : every eighth row starting from 0 */ context->curY += 8; if(context->curY >= p->height) { context->pass++; context->curY = i->posY + 4; } break; case 2: /* 2nd pass : every eighth row starting from 4 */ context->curY += 8; if(context->curY >= p->height) { context->pass++; context->curY = i->posY + 2; } break; case 3: /* 3rd pass : every fourth row starting from 2 */ context->curY += 4; if(context->curY >= p->height) { context->pass++; context->curY = i->posY + 1; } break; case 4: /* 4th pass : every odd row */ context->curY += 2; break; } #ifndef NGIFLIB_INDEXED_ONLY if(p->mode & NGIFLIB_MODE_INDEXED) { #endif /* NGIFLIB_INDEXED_ONLY */ #ifdef NGIFLIB_ENABLE_CALLBACKS context->line_p.p8 = p->frbuff.p8 + (u32)context->curY*p->width; context->frbuff_p.p8 = context->line_p.p8 + i->posX; #else context->frbuff_p.p8 = p->frbuff.p8 + (u32)context->curY*p->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ #ifndef NGIFLIB_INDEXED_ONLY } else { #ifdef NGIFLIB_ENABLE_CALLBACKS context->line_p.p32 = p->frbuff.p32 + (u32)context->curY*p->width; context->frbuff_p.p32 = context->line_p.p32 + i->posX; #else context->frbuff_p.p32 = p->frbuff.p32 + (u32)context->curY*p->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ } #endif /* NGIFLIB_INDEXED_ONLY */ } else { #ifndef NGIFLIB_INDEXED_ONLY if(p->mode & NGIFLIB_MODE_INDEXED) { #endif /* NGIFLIB_INDEXED_ONLY */ context->frbuff_p.p8++; #ifndef NGIFLIB_INDEXED_ONLY } else { context->frbuff_p.p32++; } #endif /* NGIFLIB_INDEXED_ONLY */ } }
0
CVE-2019-16346
2,725
benign
CWE-787
static void WritePixel(struct ngiflib_img * i, struct ngiflib_decode_context * context, u8 v) { struct ngiflib_gif * p = i->parent; if(v!=i->gce.transparent_color || !i->gce.transparent_flag) { #ifndef NGIFLIB_INDEXED_ONLY if(p->mode & NGIFLIB_MODE_INDEXED) { #endif /* NGIFLIB_INDEXED_ONLY */ *context->frbuff_p.p8 = v; #ifndef NGIFLIB_INDEXED_ONLY } else *context->frbuff_p.p32 = GifIndexToTrueColor(i->palette, v); #endif /* NGIFLIB_INDEXED_ONLY */ } if(--(context->Xtogo) <= 0) { #ifdef NGIFLIB_ENABLE_CALLBACKS if(p->line_cb) p->line_cb(p, context->line_p, context->curY); #endif /* NGIFLIB_ENABLE_CALLBACKS */ context->Xtogo = i->width; switch(context->pass) { case 0: context->curY++; break; case 1: /* 1st pass : every eighth row starting from 0 */ context->curY += 8; break; case 2: /* 2nd pass : every eighth row starting from 4 */ context->curY += 8; break; case 3: /* 3rd pass : every fourth row starting from 2 */ context->curY += 4; break; case 4: /* 4th pass : every odd row */ context->curY += 2; break; } while(context->pass > 0 && context->pass < 4 && context->curY >= p->height) { switch(++context->pass) { case 2: /* 2nd pass : every eighth row starting from 4 */ context->curY = i->posY + 4; break; case 3: /* 3rd pass : every fourth row starting from 2 */ context->curY = i->posY + 2; break; case 4: /* 4th pass : every odd row */ context->curY = i->posY + 1; break; } } #ifndef NGIFLIB_INDEXED_ONLY if(p->mode & NGIFLIB_MODE_INDEXED) { #endif /* NGIFLIB_INDEXED_ONLY */ #ifdef NGIFLIB_ENABLE_CALLBACKS context->line_p.p8 = p->frbuff.p8 + (u32)context->curY*p->width; context->frbuff_p.p8 = context->line_p.p8 + i->posX; #else context->frbuff_p.p8 = p->frbuff.p8 + (u32)context->curY*p->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ #ifndef NGIFLIB_INDEXED_ONLY } else { #ifdef NGIFLIB_ENABLE_CALLBACKS context->line_p.p32 = p->frbuff.p32 + (u32)context->curY*p->width; context->frbuff_p.p32 = context->line_p.p32 + i->posX; #else context->frbuff_p.p32 = p->frbuff.p32 + (u32)context->curY*p->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ } #endif /* NGIFLIB_INDEXED_ONLY */ } else { #ifndef NGIFLIB_INDEXED_ONLY if(p->mode & NGIFLIB_MODE_INDEXED) { #endif /* NGIFLIB_INDEXED_ONLY */ context->frbuff_p.p8++; #ifndef NGIFLIB_INDEXED_ONLY } else { context->frbuff_p.p32++; } #endif /* NGIFLIB_INDEXED_ONLY */ } }
1
CVE-2019-16346
2,725
vulnerable
CWE-190
int read_filesystem_tables_4() { long long directory_table_end, table_start; if(read_xattrs_from_disk(fd, &sBlk.s, no_xattrs, &table_start) == 0) return FALSE; if(read_uids_guids(&table_start) == FALSE) return FALSE; if(parse_exports_table(&table_start) == FALSE) return FALSE; if(read_fragment_table(&directory_table_end) == FALSE) return FALSE; if(read_inode_table(sBlk.s.inode_table_start, sBlk.s.directory_table_start) == FALSE) return FALSE; if(read_directory_table(sBlk.s.directory_table_start, directory_table_end) == FALSE) return FALSE; if(no_xattrs) sBlk.s.xattr_id_table_start = SQUASHFS_INVALID_BLK; return TRUE; }
0
CVE-2015-4645
2,839
benign
CWE-190
int read_filesystem_tables_4() { long long table_start; /* Read xattrs */ if(sBlk.s.xattr_id_table_start != SQUASHFS_INVALID_BLK) { /* sanity check super block contents */ if(sBlk.s.xattr_id_table_start >= sBlk.s.bytes_used) { ERROR("read_filesystem_tables: xattr id table start too large in super block\n"); goto corrupted; } if(read_xattrs_from_disk(fd, &sBlk.s, no_xattrs, &table_start) == 0) goto corrupted; } else table_start = sBlk.s.bytes_used; /* Read id lookup table */ /* Sanity check super block contents */ if(sBlk.s.id_table_start >= table_start) { ERROR("read_filesystem_tables: id table start too large in super block\n"); goto corrupted; } /* there should always be at least one id */ if(sBlk.s.no_ids == 0) { ERROR("read_filesystem_tables: Bad id count in super block\n"); goto corrupted; } /* * the number of ids can never be more than double the number of inodes * (the maximum is a unique uid and gid for each inode). */ if(sBlk.s.no_ids > (sBlk.s.inodes * 2L)) { ERROR("read_filesystem_tables: Bad id count in super block\n"); goto corrupted; } if(read_id_table(&table_start) == FALSE) goto corrupted; /* Read exports table */ if(sBlk.s.lookup_table_start != SQUASHFS_INVALID_BLK) { /* sanity check super block contents */ if(sBlk.s.lookup_table_start >= table_start) { ERROR("read_filesystem_tables: lookup table start too large in super block\n"); goto corrupted; } if(parse_exports_table(&table_start) == FALSE) goto corrupted; } /* Read fragment table */ if(sBlk.s.fragments != 0) { /* Sanity check super block contents */ if(sBlk.s.fragment_table_start >= table_start) { ERROR("read_filesystem_tables: fragment table start too large in super block\n"); goto corrupted; } /* The number of fragments should not exceed the number of inodes */ if(sBlk.s.fragments > sBlk.s.inodes) { ERROR("read_filesystem_tables: Bad fragment count in super block\n"); goto corrupted; } if(read_fragment_table(&table_start) == FALSE) goto corrupted; } else { /* * Sanity check super block contents - with 0 fragments, * the fragment table should be empty */ if(sBlk.s.fragment_table_start != table_start) { ERROR("read_filesystem_tables: fragment table start invalid in super block\n"); goto corrupted; } } /* Read directory table */ /* Sanity check super block contents */ if(sBlk.s.directory_table_start >= table_start) { ERROR("read_filesystem_tables: directory table start too large in super block\n"); goto corrupted; } if(read_directory_table(sBlk.s.directory_table_start, table_start) == FALSE) goto corrupted; /* Read inode table */ /* Sanity check super block contents */ if(sBlk.s.inode_table_start >= sBlk.s.directory_table_start) { ERROR("read_filesystem_tables: inode table start too large in super block\n"); goto corrupted; } if(read_inode_table(sBlk.s.inode_table_start, sBlk.s.directory_table_start) == FALSE) goto corrupted; if(no_xattrs) sBlk.s.xattr_id_table_start = SQUASHFS_INVALID_BLK; return TRUE; corrupted: ERROR("File system corruption detected\n"); return FALSE; }
1
CVE-2015-4645
2,839
vulnerable
CWE-190
choose_volume(struct archive_read *a, struct iso9660 *iso9660) { struct file_info *file; int64_t skipsize; struct vd *vd; const void *block; char seenJoliet; vd = &(iso9660->primary); if (!iso9660->opt_support_joliet) iso9660->seenJoliet = 0; if (iso9660->seenJoliet && vd->location > iso9660->joliet.location) /* This condition is unlikely; by way of caution. */ vd = &(iso9660->joliet); skipsize = LOGICAL_BLOCK_SIZE * vd->location; skipsize = __archive_read_consume(a, skipsize); if (skipsize < 0) return ((int)skipsize); iso9660->current_position = skipsize; block = __archive_read_ahead(a, vd->size, NULL); if (block == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to read full block when scanning " "ISO9660 directory list"); return (ARCHIVE_FATAL); } /* * While reading Root Directory, flag seenJoliet must be zero to * avoid converting special name 0x00(Current Directory) and * next byte to UCS2. */ seenJoliet = iso9660->seenJoliet;/* Save flag. */ iso9660->seenJoliet = 0; file = parse_file_info(a, NULL, block); if (file == NULL) return (ARCHIVE_FATAL); iso9660->seenJoliet = seenJoliet; /* * If the iso image has both RockRidge and Joliet, we preferentially * use RockRidge Extensions rather than Joliet ones. */ if (vd == &(iso9660->primary) && iso9660->seenRockridge && iso9660->seenJoliet) iso9660->seenJoliet = 0; if (vd == &(iso9660->primary) && !iso9660->seenRockridge && iso9660->seenJoliet) { /* Switch reading data from primary to joliet. */ vd = &(iso9660->joliet); skipsize = LOGICAL_BLOCK_SIZE * vd->location; skipsize -= iso9660->current_position; skipsize = __archive_read_consume(a, skipsize); if (skipsize < 0) return ((int)skipsize); iso9660->current_position += skipsize; block = __archive_read_ahead(a, vd->size, NULL); if (block == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to read full block when scanning " "ISO9660 directory list"); return (ARCHIVE_FATAL); } iso9660->seenJoliet = 0; file = parse_file_info(a, NULL, block); if (file == NULL) return (ARCHIVE_FATAL); iso9660->seenJoliet = seenJoliet; } /* Store the root directory in the pending list. */ if (add_entry(a, iso9660, file) != ARCHIVE_OK) return (ARCHIVE_FATAL); if (iso9660->seenRockridge) { a->archive.archive_format = ARCHIVE_FORMAT_ISO9660_ROCKRIDGE; a->archive.archive_format_name = "ISO9660 with Rockridge extensions"; } return (ARCHIVE_OK); }
0
CVE-2016-5844
425
benign
CWE-190
choose_volume(struct archive_read *a, struct iso9660 *iso9660) { struct file_info *file; int64_t skipsize; struct vd *vd; const void *block; char seenJoliet; vd = &(iso9660->primary); if (!iso9660->opt_support_joliet) iso9660->seenJoliet = 0; if (iso9660->seenJoliet && vd->location > iso9660->joliet.location) /* This condition is unlikely; by way of caution. */ vd = &(iso9660->joliet); skipsize = LOGICAL_BLOCK_SIZE * (int64_t)vd->location; skipsize = __archive_read_consume(a, skipsize); if (skipsize < 0) return ((int)skipsize); iso9660->current_position = skipsize; block = __archive_read_ahead(a, vd->size, NULL); if (block == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to read full block when scanning " "ISO9660 directory list"); return (ARCHIVE_FATAL); } /* * While reading Root Directory, flag seenJoliet must be zero to * avoid converting special name 0x00(Current Directory) and * next byte to UCS2. */ seenJoliet = iso9660->seenJoliet;/* Save flag. */ iso9660->seenJoliet = 0; file = parse_file_info(a, NULL, block); if (file == NULL) return (ARCHIVE_FATAL); iso9660->seenJoliet = seenJoliet; /* * If the iso image has both RockRidge and Joliet, we preferentially * use RockRidge Extensions rather than Joliet ones. */ if (vd == &(iso9660->primary) && iso9660->seenRockridge && iso9660->seenJoliet) iso9660->seenJoliet = 0; if (vd == &(iso9660->primary) && !iso9660->seenRockridge && iso9660->seenJoliet) { /* Switch reading data from primary to joliet. */ vd = &(iso9660->joliet); skipsize = LOGICAL_BLOCK_SIZE * (int64_t)vd->location; skipsize -= iso9660->current_position; skipsize = __archive_read_consume(a, skipsize); if (skipsize < 0) return ((int)skipsize); iso9660->current_position += skipsize; block = __archive_read_ahead(a, vd->size, NULL); if (block == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to read full block when scanning " "ISO9660 directory list"); return (ARCHIVE_FATAL); } iso9660->seenJoliet = 0; file = parse_file_info(a, NULL, block); if (file == NULL) return (ARCHIVE_FATAL); iso9660->seenJoliet = seenJoliet; } /* Store the root directory in the pending list. */ if (add_entry(a, iso9660, file) != ARCHIVE_OK) return (ARCHIVE_FATAL); if (iso9660->seenRockridge) { a->archive.archive_format = ARCHIVE_FORMAT_ISO9660_ROCKRIDGE; a->archive.archive_format_name = "ISO9660 with Rockridge extensions"; } return (ARCHIVE_OK); }
1
CVE-2016-5844
425
vulnerable
CWE-119
static Sdb *store_versioninfo_gnu_verneed(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { ut8 *end, *need = NULL; const char *section_name = ""; Elf_(Shdr) *link_shdr = NULL; const char *link_section_name = ""; Sdb *sdb_vernaux = NULL; Sdb *sdb_version = NULL; Sdb *sdb = NULL; int i, cnt; if (!bin || !bin->dynstr) { return NULL; } if (shdr->sh_link > bin->ehdr.e_shnum) { return NULL; } if (shdr->sh_size < 1) { return NULL; } sdb = sdb_new0 (); if (!sdb) { return NULL; } link_shdr = &bin->shdr[shdr->sh_link]; if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } if (!(need = (ut8*) calloc (R_MAX (1, shdr->sh_size), sizeof (ut8)))) { bprintf ("Warning: Cannot allocate memory for Elf_(Verneed)\n"); goto beach; } end = need + shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "num_entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); if (shdr->sh_offset > bin->size || shdr->sh_offset + shdr->sh_size > bin->size) { goto beach; } if (shdr->sh_offset + shdr->sh_size < shdr->sh_size) { goto beach; } i = r_buf_read_at (bin->b, shdr->sh_offset, need, shdr->sh_size); if (i < 0) goto beach; //XXX we should use DT_VERNEEDNUM instead of sh_info //TODO https://sourceware.org/ml/binutils/2014-11/msg00353.html for (i = 0, cnt = 0; cnt < shdr->sh_info; ++cnt) { int j, isum; ut8 *vstart = need + i; Elf_(Verneed) vvn = {0}; if (vstart + sizeof (Elf_(Verneed)) > end) { goto beach; } Elf_(Verneed) *entry = &vvn; char key[32] = {0}; sdb_version = sdb_new0 (); if (!sdb_version) { goto beach; } j = 0; vvn.vn_version = READ16 (vstart, j) vvn.vn_cnt = READ16 (vstart, j) vvn.vn_file = READ32 (vstart, j) vvn.vn_aux = READ32 (vstart, j) vvn.vn_next = READ32 (vstart, j) sdb_num_set (sdb_version, "vn_version", entry->vn_version, 0); sdb_num_set (sdb_version, "idx", i, 0); if (entry->vn_file > bin->dynstr_size) { goto beach; } { char *s = r_str_ndup (&bin->dynstr[entry->vn_file], 16); sdb_set (sdb_version, "file_name", s, 0); free (s); } sdb_num_set (sdb_version, "cnt", entry->vn_cnt, 0); st32 vnaux = entry->vn_aux; if (vnaux < 1) { goto beach; } vstart += vnaux; for (j = 0, isum = i + entry->vn_aux; j < entry->vn_cnt && vstart + sizeof (Elf_(Vernaux)) <= end; ++j) { int k; Elf_(Vernaux) * aux = NULL; Elf_(Vernaux) vaux = {0}; sdb_vernaux = sdb_new0 (); if (!sdb_vernaux) { goto beach; } aux = (Elf_(Vernaux)*)&vaux; k = 0; vaux.vna_hash = READ32 (vstart, k) vaux.vna_flags = READ16 (vstart, k) vaux.vna_other = READ16 (vstart, k) vaux.vna_name = READ32 (vstart, k) vaux.vna_next = READ32 (vstart, k) if (aux->vna_name > bin->dynstr_size) { goto beach; } sdb_num_set (sdb_vernaux, "idx", isum, 0); if (aux->vna_name > 0 && aux->vna_name + 8 < bin->dynstr_size) { char name [16]; strncpy (name, &bin->dynstr[aux->vna_name], sizeof (name)-1); name[sizeof(name)-1] = 0; sdb_set (sdb_vernaux, "name", name, 0); } sdb_set (sdb_vernaux, "flags", get_ver_flags (aux->vna_flags), 0); sdb_num_set (sdb_vernaux, "version", aux->vna_other, 0); isum += aux->vna_next; vstart += aux->vna_next; snprintf (key, sizeof (key), "vernaux%d", j); sdb_ns_set (sdb_version, key, sdb_vernaux); } if ((int)entry->vn_next < 0) { bprintf ("Invalid vn_next\n"); break; } i += entry->vn_next; snprintf (key, sizeof (key), "version%d", cnt ); sdb_ns_set (sdb, key, sdb_version); //if entry->vn_next is 0 it iterate infinitely if (!entry->vn_next) { break; } } free (need); return sdb; beach: free (need); sdb_free (sdb_vernaux); sdb_free (sdb_version); sdb_free (sdb); return NULL; }
0
CVE-2017-16357
878
benign
CWE-119
static Sdb *store_versioninfo_gnu_verneed(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { ut8 *end, *need = NULL; const char *section_name = ""; Elf_(Shdr) *link_shdr = NULL; const char *link_section_name = ""; Sdb *sdb_vernaux = NULL; Sdb *sdb_version = NULL; Sdb *sdb = NULL; int i, cnt; if (!bin || !bin->dynstr) { return NULL; } if (shdr->sh_link > bin->ehdr.e_shnum) { return NULL; } if (shdr->sh_size < 1 || shdr->sh_size > SIZE_MAX) { return NULL; } sdb = sdb_new0 (); if (!sdb) { return NULL; } link_shdr = &bin->shdr[shdr->sh_link]; if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } if (!(need = (ut8*) calloc (R_MAX (1, shdr->sh_size), sizeof (ut8)))) { bprintf ("Warning: Cannot allocate memory for Elf_(Verneed)\n"); goto beach; } end = need + shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "num_entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); if (shdr->sh_offset > bin->size || shdr->sh_offset + shdr->sh_size > bin->size) { goto beach; } if (shdr->sh_offset + shdr->sh_size < shdr->sh_size) { goto beach; } i = r_buf_read_at (bin->b, shdr->sh_offset, need, shdr->sh_size); if (i < 0) goto beach; //XXX we should use DT_VERNEEDNUM instead of sh_info //TODO https://sourceware.org/ml/binutils/2014-11/msg00353.html for (i = 0, cnt = 0; cnt < shdr->sh_info; ++cnt) { int j, isum; ut8 *vstart = need + i; Elf_(Verneed) vvn = {0}; if (vstart + sizeof (Elf_(Verneed)) > end) { goto beach; } Elf_(Verneed) *entry = &vvn; char key[32] = {0}; sdb_version = sdb_new0 (); if (!sdb_version) { goto beach; } j = 0; vvn.vn_version = READ16 (vstart, j) vvn.vn_cnt = READ16 (vstart, j) vvn.vn_file = READ32 (vstart, j) vvn.vn_aux = READ32 (vstart, j) vvn.vn_next = READ32 (vstart, j) sdb_num_set (sdb_version, "vn_version", entry->vn_version, 0); sdb_num_set (sdb_version, "idx", i, 0); if (entry->vn_file > bin->dynstr_size) { goto beach; } { char *s = r_str_ndup (&bin->dynstr[entry->vn_file], 16); sdb_set (sdb_version, "file_name", s, 0); free (s); } sdb_num_set (sdb_version, "cnt", entry->vn_cnt, 0); st32 vnaux = entry->vn_aux; if (vnaux < 1) { goto beach; } vstart += vnaux; for (j = 0, isum = i + entry->vn_aux; j < entry->vn_cnt && vstart + sizeof (Elf_(Vernaux)) <= end; ++j) { int k; Elf_(Vernaux) * aux = NULL; Elf_(Vernaux) vaux = {0}; sdb_vernaux = sdb_new0 (); if (!sdb_vernaux) { goto beach; } aux = (Elf_(Vernaux)*)&vaux; k = 0; vaux.vna_hash = READ32 (vstart, k) vaux.vna_flags = READ16 (vstart, k) vaux.vna_other = READ16 (vstart, k) vaux.vna_name = READ32 (vstart, k) vaux.vna_next = READ32 (vstart, k) if (aux->vna_name > bin->dynstr_size) { goto beach; } sdb_num_set (sdb_vernaux, "idx", isum, 0); if (aux->vna_name > 0 && aux->vna_name + 8 < bin->dynstr_size) { char name [16]; strncpy (name, &bin->dynstr[aux->vna_name], sizeof (name)-1); name[sizeof(name)-1] = 0; sdb_set (sdb_vernaux, "name", name, 0); } sdb_set (sdb_vernaux, "flags", get_ver_flags (aux->vna_flags), 0); sdb_num_set (sdb_vernaux, "version", aux->vna_other, 0); isum += aux->vna_next; vstart += aux->vna_next; snprintf (key, sizeof (key), "vernaux%d", j); sdb_ns_set (sdb_version, key, sdb_vernaux); } if ((int)entry->vn_next < 0) { bprintf ("Invalid vn_next\n"); break; } i += entry->vn_next; snprintf (key, sizeof (key), "version%d", cnt ); sdb_ns_set (sdb, key, sdb_version); //if entry->vn_next is 0 it iterate infinitely if (!entry->vn_next) { break; } } free (need); return sdb; beach: free (need); sdb_free (sdb_vernaux); sdb_free (sdb_version); sdb_free (sdb); return NULL; }
1
CVE-2017-16357
878
vulnerable
CWE-787
TEE_Result syscall_obj_generate_key(unsigned long obj, unsigned long key_size, const struct utee_attribute *usr_params, unsigned long param_count) { TEE_Result res; struct tee_ta_session *sess; const struct tee_cryp_obj_type_props *type_props; struct tee_obj *o; struct tee_cryp_obj_secret *key; size_t byte_size; TEE_Attribute *params = NULL; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; /* Must be a transient object */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) != 0) return TEE_ERROR_BAD_STATE; /* Must not be initialized already */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) != 0) return TEE_ERROR_BAD_STATE; /* Find description of object */ type_props = tee_svc_find_type_props(o->info.objectType); if (!type_props) return TEE_ERROR_NOT_SUPPORTED; /* Check that maxKeySize follows restrictions */ if (key_size % type_props->quanta != 0) return TEE_ERROR_NOT_SUPPORTED; if (key_size < type_props->min_size) return TEE_ERROR_NOT_SUPPORTED; if (key_size > type_props->max_size) return TEE_ERROR_NOT_SUPPORTED; params = malloc(sizeof(TEE_Attribute) * param_count); if (!params) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(to_user_ta_ctx(sess->ctx), usr_params, param_count, params); if (res != TEE_SUCCESS) goto out; res = tee_svc_cryp_check_attr(ATTR_USAGE_GENERATE_KEY, type_props, params, param_count); if (res != TEE_SUCCESS) goto out; switch (o->info.objectType) { case TEE_TYPE_AES: case TEE_TYPE_DES: case TEE_TYPE_DES3: case TEE_TYPE_HMAC_MD5: case TEE_TYPE_HMAC_SHA1: case TEE_TYPE_HMAC_SHA224: case TEE_TYPE_HMAC_SHA256: case TEE_TYPE_HMAC_SHA384: case TEE_TYPE_HMAC_SHA512: case TEE_TYPE_GENERIC_SECRET: byte_size = key_size / 8; /* * We have to do it like this because the parity bits aren't * counted when telling the size of the key in bits. */ if (o->info.objectType == TEE_TYPE_DES || o->info.objectType == TEE_TYPE_DES3) { byte_size = (key_size + key_size / 7) / 8; } key = (struct tee_cryp_obj_secret *)o->attr; if (byte_size > key->alloc_size) { res = TEE_ERROR_EXCESS_DATA; goto out; } res = crypto_rng_read((void *)(key + 1), byte_size); if (res != TEE_SUCCESS) goto out; key->key_size = byte_size; /* Set bits for all known attributes for this object type */ o->have_attrs = (1 << type_props->num_type_attrs) - 1; break; case TEE_TYPE_RSA_KEYPAIR: res = tee_svc_obj_generate_key_rsa(o, type_props, key_size, params, param_count); if (res != TEE_SUCCESS) goto out; break; case TEE_TYPE_DSA_KEYPAIR: res = tee_svc_obj_generate_key_dsa(o, type_props, key_size); if (res != TEE_SUCCESS) goto out; break; case TEE_TYPE_DH_KEYPAIR: res = tee_svc_obj_generate_key_dh(o, type_props, key_size, params, param_count); if (res != TEE_SUCCESS) goto out; break; case TEE_TYPE_ECDSA_KEYPAIR: case TEE_TYPE_ECDH_KEYPAIR: res = tee_svc_obj_generate_key_ecc(o, type_props, key_size, params, param_count); if (res != TEE_SUCCESS) goto out; break; default: res = TEE_ERROR_BAD_FORMAT; } out: free(params); if (res == TEE_SUCCESS) { o->info.keySize = key_size; o->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; } return res; }
0
CVE-2019-1010297
678
benign
CWE-787
TEE_Result syscall_obj_generate_key(unsigned long obj, unsigned long key_size, const struct utee_attribute *usr_params, unsigned long param_count) { TEE_Result res; struct tee_ta_session *sess; const struct tee_cryp_obj_type_props *type_props; struct tee_obj *o; struct tee_cryp_obj_secret *key; size_t byte_size; TEE_Attribute *params = NULL; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; /* Must be a transient object */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) != 0) return TEE_ERROR_BAD_STATE; /* Must not be initialized already */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) != 0) return TEE_ERROR_BAD_STATE; /* Find description of object */ type_props = tee_svc_find_type_props(o->info.objectType); if (!type_props) return TEE_ERROR_NOT_SUPPORTED; /* Check that maxKeySize follows restrictions */ if (key_size % type_props->quanta != 0) return TEE_ERROR_NOT_SUPPORTED; if (key_size < type_props->min_size) return TEE_ERROR_NOT_SUPPORTED; if (key_size > type_props->max_size) return TEE_ERROR_NOT_SUPPORTED; size_t alloc_size = 0; if (MUL_OVERFLOW(sizeof(TEE_Attribute), param_count, &alloc_size)) return TEE_ERROR_OVERFLOW; params = malloc(alloc_size); if (!params) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(to_user_ta_ctx(sess->ctx), usr_params, param_count, params); if (res != TEE_SUCCESS) goto out; res = tee_svc_cryp_check_attr(ATTR_USAGE_GENERATE_KEY, type_props, params, param_count); if (res != TEE_SUCCESS) goto out; switch (o->info.objectType) { case TEE_TYPE_AES: case TEE_TYPE_DES: case TEE_TYPE_DES3: case TEE_TYPE_HMAC_MD5: case TEE_TYPE_HMAC_SHA1: case TEE_TYPE_HMAC_SHA224: case TEE_TYPE_HMAC_SHA256: case TEE_TYPE_HMAC_SHA384: case TEE_TYPE_HMAC_SHA512: case TEE_TYPE_GENERIC_SECRET: byte_size = key_size / 8; /* * We have to do it like this because the parity bits aren't * counted when telling the size of the key in bits. */ if (o->info.objectType == TEE_TYPE_DES || o->info.objectType == TEE_TYPE_DES3) { byte_size = (key_size + key_size / 7) / 8; } key = (struct tee_cryp_obj_secret *)o->attr; if (byte_size > key->alloc_size) { res = TEE_ERROR_EXCESS_DATA; goto out; } res = crypto_rng_read((void *)(key + 1), byte_size); if (res != TEE_SUCCESS) goto out; key->key_size = byte_size; /* Set bits for all known attributes for this object type */ o->have_attrs = (1 << type_props->num_type_attrs) - 1; break; case TEE_TYPE_RSA_KEYPAIR: res = tee_svc_obj_generate_key_rsa(o, type_props, key_size, params, param_count); if (res != TEE_SUCCESS) goto out; break; case TEE_TYPE_DSA_KEYPAIR: res = tee_svc_obj_generate_key_dsa(o, type_props, key_size); if (res != TEE_SUCCESS) goto out; break; case TEE_TYPE_DH_KEYPAIR: res = tee_svc_obj_generate_key_dh(o, type_props, key_size, params, param_count); if (res != TEE_SUCCESS) goto out; break; case TEE_TYPE_ECDSA_KEYPAIR: case TEE_TYPE_ECDH_KEYPAIR: res = tee_svc_obj_generate_key_ecc(o, type_props, key_size, params, param_count); if (res != TEE_SUCCESS) goto out; break; default: res = TEE_ERROR_BAD_FORMAT; } out: free(params); if (res == TEE_SUCCESS) { o->info.keySize = key_size; o->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; } return res; }
1
CVE-2019-1010297
678
vulnerable
CWE-125
static u32 read_32(cdk_stream_t s) { byte buf[4]; size_t nread; assert(s != NULL); stream_read(s, buf, 4, &nread); if (nread != 4) return (u32) - 1; return buf[0] << 24 | buf[1] << 16 | buf[2] << 8 | buf[3]; }
0
CVE-2017-5335
336
benign
CWE-125
static u32 read_32(cdk_stream_t s) { byte buf[4]; size_t nread = 0; assert(s != NULL); stream_read(s, buf, 4, &nread); if (nread != 4) return (u32) -1; return buf[0] << 24 | buf[1] << 16 | buf[2] << 8 | buf[3]; }
1
CVE-2017-5335
336
vulnerable
CWE-476
void Compute(OpKernelContext* const context) override { // node_id_range const Tensor* node_id_range_t; OP_REQUIRES_OK(context, context->input("node_id_range", &node_id_range_t)); OP_REQUIRES( context, node_id_range_t->NumElements() == 2, errors::InvalidArgument("node_id_range argument must have shape [2]")); const auto node_id_range = node_id_range_t->vec<int32>(); const int32_t node_id_first = node_id_range(0); // inclusive const int32_t node_id_last = node_id_range(1); // exclusive const Tensor* stats_summary_t; OP_REQUIRES_OK(context, context->input("stats_summary", &stats_summary_t)); OP_REQUIRES( context, stats_summary_t->shape().dims() == 4, errors::InvalidArgument("stats_summary argument must have rank 4")); TTypes<float, 4>::ConstTensor stats_summary = stats_summary_t->tensor<float, 4>(); const int32_t feature_dims = stats_summary_t->dim_size(1); // The last bucket is for default/missing value. const int32_t num_buckets = stats_summary_t->dim_size(2) - 1; const int32_t logits_dim = logits_dim_; const int32_t hessian_dim = stats_summary_t->dim_size(3) - logits_dim; DCHECK_GT(hessian_dim, 0); DCHECK_LE(hessian_dim, logits_dim * logits_dim); const Tensor* l1_t; OP_REQUIRES_OK(context, context->input("l1", &l1_t)); OP_REQUIRES(context, l1_t->NumElements() == 1, errors::InvalidArgument("l1 argument must be a scalar")); const auto l1 = l1_t->scalar<float>()(); DCHECK_GE(l1, 0); if (logits_dim_ > 1) { // Multi-class L1 regularization not supported yet. DCHECK_EQ(l1, 0); } const Tensor* l2_t; OP_REQUIRES_OK(context, context->input("l2", &l2_t)); OP_REQUIRES(context, l2_t->NumElements() == 1, errors::InvalidArgument("l2 argument must be a scalar")); const auto l2 = l2_t->scalar<float>()(); DCHECK_GE(l2, 0); const Tensor* tree_complexity_t; OP_REQUIRES_OK(context, context->input("tree_complexity", &tree_complexity_t)); OP_REQUIRES( context, tree_complexity_t->NumElements() == 1, errors::InvalidArgument("tree_complexity argument must be a scalar")); const auto tree_complexity = tree_complexity_t->scalar<float>()(); const Tensor* min_node_weight_t; OP_REQUIRES_OK(context, context->input("min_node_weight", &min_node_weight_t)); OP_REQUIRES( context, min_node_weight_t->NumElements() == 1, errors::InvalidArgument("min_node_weight argument must be a scalar")); const auto min_node_weight = min_node_weight_t->scalar<float>()(); std::vector<int32> output_node_ids; std::vector<float> output_gains; std::vector<int32> output_feature_dimensions; std::vector<int32> output_thresholds; std::vector<Eigen::VectorXf> output_left_node_contribs; std::vector<Eigen::VectorXf> output_right_node_contribs; std::vector<std::string> output_split_types; // TODO(tanzheny) parallelize the computation. // Iterate each node and find the best gain per node. for (int32_t node_id = node_id_first; node_id < node_id_last; ++node_id) { float best_gain = std::numeric_limits<float>::lowest(); int32_t best_bucket = 0; int32_t best_f_dim = 0; string best_split_type; Eigen::VectorXf best_contrib_for_left(logits_dim); Eigen::VectorXf best_contrib_for_right(logits_dim); float parent_gain; // Including default bucket. ConstMatrixMap stats_mat(&stats_summary(node_id, 0, 0, 0), num_buckets + 1, logits_dim + hessian_dim); const Eigen::VectorXf total_grad = stats_mat.leftCols(logits_dim).colwise().sum(); const Eigen::VectorXf total_hess = stats_mat.rightCols(hessian_dim).colwise().sum(); if (total_hess.norm() < min_node_weight) { continue; } Eigen::VectorXf parent_weight(logits_dim); CalculateWeightsAndGains(total_grad, total_hess, l1, l2, &parent_weight, &parent_gain); if (split_type_ == "inequality") { CalculateBestInequalitySplit( stats_summary, node_id, feature_dims, logits_dim, hessian_dim, num_buckets, min_node_weight, l1, l2, &best_gain, &best_bucket, &best_f_dim, &best_split_type, &best_contrib_for_left, &best_contrib_for_right); } else { CalculateBestEqualitySplit( stats_summary, total_grad, total_hess, node_id, feature_dims, logits_dim, hessian_dim, num_buckets, l1, l2, &best_gain, &best_bucket, &best_f_dim, &best_split_type, &best_contrib_for_left, &best_contrib_for_right); } if (best_gain == std::numeric_limits<float>::lowest()) { // Do not add the node if not split if found. continue; } output_node_ids.push_back(node_id); // Remove the parent gain for the parent node. output_gains.push_back(best_gain - parent_gain); output_feature_dimensions.push_back(best_f_dim); // default direction is fixed for dense splits. // TODO(tanzheny) account for default values. output_split_types.push_back(best_split_type); output_thresholds.push_back(best_bucket); output_left_node_contribs.push_back(best_contrib_for_left); output_right_node_contribs.push_back(best_contrib_for_right); } // for node id const int num_nodes = output_node_ids.size(); // output_node_ids Tensor* output_node_ids_t = nullptr; OP_REQUIRES_OK(context, context->allocate_output("node_ids", {num_nodes}, &output_node_ids_t)); auto output_node_ids_vec = output_node_ids_t->vec<int32>(); // output_gains Tensor* output_gains_t; OP_REQUIRES_OK(context, context->allocate_output("gains", {num_nodes}, &output_gains_t)); auto output_gains_vec = output_gains_t->vec<float>(); // output_feature_dimensions Tensor* output_feature_dimension_t; OP_REQUIRES_OK(context, context->allocate_output("feature_dimensions", {num_nodes}, &output_feature_dimension_t)); auto output_feature_dimensions_vec = output_feature_dimension_t->vec<int32>(); // output_thresholds Tensor* output_thresholds_t; OP_REQUIRES_OK(context, context->allocate_output("thresholds", {num_nodes}, &output_thresholds_t)); auto output_thresholds_vec = output_thresholds_t->vec<int32>(); // output_left_node_contribs Tensor* output_left_node_contribs_t; OP_REQUIRES_OK(context, context->allocate_output( "left_node_contribs", {num_nodes, logits_dim}, &output_left_node_contribs_t)); auto output_left_node_contribs_matrix = output_left_node_contribs_t->matrix<float>(); // output_right_node_contribs Tensor* output_right_node_contribs_t; OP_REQUIRES_OK(context, context->allocate_output( "right_node_contribs", {num_nodes, logits_dim}, &output_right_node_contribs_t)); auto output_right_node_contribs_matrix = output_right_node_contribs_t->matrix<float>(); // split type Tensor* output_split_types_t; OP_REQUIRES_OK( context, context->allocate_output("split_with_default_directions", {num_nodes}, &output_split_types_t)); auto output_split_types_vec = output_split_types_t->vec<tstring>(); // Sets output tensors from vectors. for (int i = 0; i < num_nodes; ++i) { output_node_ids_vec(i) = output_node_ids[i]; // Adjust the gains to penalize by tree complexity. output_gains_vec(i) = output_gains[i] - tree_complexity; output_feature_dimensions_vec(i) = output_feature_dimensions[i]; output_thresholds_vec(i) = output_thresholds[i]; for (int j = 0; j < logits_dim; ++j) { output_left_node_contribs_matrix(i, j) = output_left_node_contribs[i][j]; output_right_node_contribs_matrix(i, j) = output_right_node_contribs[i][j]; } output_split_types_vec(i) = output_split_types[i]; } }
0
CVE-2021-41208
2,994
benign
CWE-476
void Compute(OpKernelContext* const context) override { // node_id_range const Tensor* node_id_range_t; OP_REQUIRES_OK(context, context->input("node_id_range", &node_id_range_t)); OP_REQUIRES( context, node_id_range_t->NumElements() == 2, errors::InvalidArgument("node_id_range argument must have shape [2]")); const auto node_id_range = node_id_range_t->vec<int32>(); const int32_t node_id_first = node_id_range(0); // inclusive const int32_t node_id_last = node_id_range(1); // exclusive const Tensor* stats_summary_t; OP_REQUIRES_OK(context, context->input("stats_summary", &stats_summary_t)); OP_REQUIRES( context, stats_summary_t->shape().dims() == 4, errors::InvalidArgument("stats_summary argument must have rank 4")); TTypes<float, 4>::ConstTensor stats_summary = stats_summary_t->tensor<float, 4>(); const int32_t feature_dims = stats_summary_t->dim_size(1); // The last bucket is for default/missing value. const int32_t num_buckets = stats_summary_t->dim_size(2) - 1; const int32_t logits_dim = logits_dim_; const int32_t hessian_dim = stats_summary_t->dim_size(3) - logits_dim; OP_REQUIRES(context, hessian_dim > 0, errors::InvalidArgument("hessian dim should be < 0, got ", hessian_dim)); OP_REQUIRES(context, hessian_dim <= logits_dim * logits_dim, errors::InvalidArgument( "hessian dim should be <= ", logits_dim * logits_dim, " but got: ", hessian_dim)); const Tensor* l1_t; OP_REQUIRES_OK(context, context->input("l1", &l1_t)); OP_REQUIRES(context, l1_t->NumElements() == 1, errors::InvalidArgument("l1 argument must be a scalar")); const auto l1 = l1_t->scalar<float>()(); DCHECK_GE(l1, 0); if (logits_dim_ > 1) { // Multi-class L1 regularization not supported yet. DCHECK_EQ(l1, 0); } const Tensor* l2_t; OP_REQUIRES_OK(context, context->input("l2", &l2_t)); OP_REQUIRES(context, l2_t->NumElements() == 1, errors::InvalidArgument("l2 argument must be a scalar")); const auto l2 = l2_t->scalar<float>()(); DCHECK_GE(l2, 0); const Tensor* tree_complexity_t; OP_REQUIRES_OK(context, context->input("tree_complexity", &tree_complexity_t)); OP_REQUIRES( context, tree_complexity_t->NumElements() == 1, errors::InvalidArgument("tree_complexity argument must be a scalar")); const auto tree_complexity = tree_complexity_t->scalar<float>()(); const Tensor* min_node_weight_t; OP_REQUIRES_OK(context, context->input("min_node_weight", &min_node_weight_t)); OP_REQUIRES( context, min_node_weight_t->NumElements() == 1, errors::InvalidArgument("min_node_weight argument must be a scalar")); const auto min_node_weight = min_node_weight_t->scalar<float>()(); std::vector<int32> output_node_ids; std::vector<float> output_gains; std::vector<int32> output_feature_dimensions; std::vector<int32> output_thresholds; std::vector<Eigen::VectorXf> output_left_node_contribs; std::vector<Eigen::VectorXf> output_right_node_contribs; std::vector<std::string> output_split_types; // TODO(tanzheny) parallelize the computation. // Iterate each node and find the best gain per node. for (int32_t node_id = node_id_first; node_id < node_id_last; ++node_id) { float best_gain = std::numeric_limits<float>::lowest(); int32_t best_bucket = 0; int32_t best_f_dim = 0; string best_split_type; Eigen::VectorXf best_contrib_for_left(logits_dim); Eigen::VectorXf best_contrib_for_right(logits_dim); float parent_gain; // Including default bucket. ConstMatrixMap stats_mat(&stats_summary(node_id, 0, 0, 0), num_buckets + 1, logits_dim + hessian_dim); const Eigen::VectorXf total_grad = stats_mat.leftCols(logits_dim).colwise().sum(); const Eigen::VectorXf total_hess = stats_mat.rightCols(hessian_dim).colwise().sum(); if (total_hess.norm() < min_node_weight) { continue; } Eigen::VectorXf parent_weight(logits_dim); CalculateWeightsAndGains(total_grad, total_hess, l1, l2, &parent_weight, &parent_gain); if (split_type_ == "inequality") { CalculateBestInequalitySplit( stats_summary, node_id, feature_dims, logits_dim, hessian_dim, num_buckets, min_node_weight, l1, l2, &best_gain, &best_bucket, &best_f_dim, &best_split_type, &best_contrib_for_left, &best_contrib_for_right); } else { CalculateBestEqualitySplit( stats_summary, total_grad, total_hess, node_id, feature_dims, logits_dim, hessian_dim, num_buckets, l1, l2, &best_gain, &best_bucket, &best_f_dim, &best_split_type, &best_contrib_for_left, &best_contrib_for_right); } if (best_gain == std::numeric_limits<float>::lowest()) { // Do not add the node if not split if found. continue; } output_node_ids.push_back(node_id); // Remove the parent gain for the parent node. output_gains.push_back(best_gain - parent_gain); output_feature_dimensions.push_back(best_f_dim); // default direction is fixed for dense splits. // TODO(tanzheny) account for default values. output_split_types.push_back(best_split_type); output_thresholds.push_back(best_bucket); output_left_node_contribs.push_back(best_contrib_for_left); output_right_node_contribs.push_back(best_contrib_for_right); } // for node id const int num_nodes = output_node_ids.size(); // output_node_ids Tensor* output_node_ids_t = nullptr; OP_REQUIRES_OK(context, context->allocate_output("node_ids", {num_nodes}, &output_node_ids_t)); auto output_node_ids_vec = output_node_ids_t->vec<int32>(); // output_gains Tensor* output_gains_t; OP_REQUIRES_OK(context, context->allocate_output("gains", {num_nodes}, &output_gains_t)); auto output_gains_vec = output_gains_t->vec<float>(); // output_feature_dimensions Tensor* output_feature_dimension_t; OP_REQUIRES_OK(context, context->allocate_output("feature_dimensions", {num_nodes}, &output_feature_dimension_t)); auto output_feature_dimensions_vec = output_feature_dimension_t->vec<int32>(); // output_thresholds Tensor* output_thresholds_t; OP_REQUIRES_OK(context, context->allocate_output("thresholds", {num_nodes}, &output_thresholds_t)); auto output_thresholds_vec = output_thresholds_t->vec<int32>(); // output_left_node_contribs Tensor* output_left_node_contribs_t; OP_REQUIRES_OK(context, context->allocate_output( "left_node_contribs", {num_nodes, logits_dim}, &output_left_node_contribs_t)); auto output_left_node_contribs_matrix = output_left_node_contribs_t->matrix<float>(); // output_right_node_contribs Tensor* output_right_node_contribs_t; OP_REQUIRES_OK(context, context->allocate_output( "right_node_contribs", {num_nodes, logits_dim}, &output_right_node_contribs_t)); auto output_right_node_contribs_matrix = output_right_node_contribs_t->matrix<float>(); // split type Tensor* output_split_types_t; OP_REQUIRES_OK( context, context->allocate_output("split_with_default_directions", {num_nodes}, &output_split_types_t)); auto output_split_types_vec = output_split_types_t->vec<tstring>(); // Sets output tensors from vectors. for (int i = 0; i < num_nodes; ++i) { output_node_ids_vec(i) = output_node_ids[i]; // Adjust the gains to penalize by tree complexity. output_gains_vec(i) = output_gains[i] - tree_complexity; output_feature_dimensions_vec(i) = output_feature_dimensions[i]; output_thresholds_vec(i) = output_thresholds[i]; for (int j = 0; j < logits_dim; ++j) { output_left_node_contribs_matrix(i, j) = output_left_node_contribs[i][j]; output_right_node_contribs_matrix(i, j) = output_right_node_contribs[i][j]; } output_split_types_vec(i) = output_split_types[i]; } }
1
CVE-2021-41208
2,994
vulnerable
CWE-125
static int dnxhd_decode_header(DNXHDContext *ctx, AVFrame *frame, const uint8_t *buf, int buf_size, int first_field) { int i, cid, ret; int old_bit_depth = ctx->bit_depth, bitdepth; uint64_t header_prefix; if (buf_size < 0x280) { av_log(ctx->avctx, AV_LOG_ERROR, "buffer too small (%d < 640).\n", buf_size); return AVERROR_INVALIDDATA; } header_prefix = ff_dnxhd_parse_header_prefix(buf); if (header_prefix == 0) { av_log(ctx->avctx, AV_LOG_ERROR, "unknown header 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X\n", buf[0], buf[1], buf[2], buf[3], buf[4]); return AVERROR_INVALIDDATA; } if (buf[5] & 2) { /* interlaced */ ctx->cur_field = buf[5] & 1; frame->interlaced_frame = 1; frame->top_field_first = first_field ^ ctx->cur_field; av_log(ctx->avctx, AV_LOG_DEBUG, "interlaced %d, cur field %d\n", buf[5] & 3, ctx->cur_field); } else { ctx->cur_field = 0; } ctx->mbaff = (buf[0x6] >> 5) & 1; ctx->height = AV_RB16(buf + 0x18); ctx->width = AV_RB16(buf + 0x1a); switch(buf[0x21] >> 5) { case 1: bitdepth = 8; break; case 2: bitdepth = 10; break; case 3: bitdepth = 12; break; default: av_log(ctx->avctx, AV_LOG_ERROR, "Unknown bitdepth indicator (%d)\n", buf[0x21] >> 5); return AVERROR_INVALIDDATA; } cid = AV_RB32(buf + 0x28); ctx->avctx->profile = dnxhd_get_profile(cid); if ((ret = dnxhd_init_vlc(ctx, cid, bitdepth)) < 0) return ret; if (ctx->mbaff && ctx->cid_table->cid != 1260) av_log(ctx->avctx, AV_LOG_WARNING, "Adaptive MB interlace flag in an unsupported profile.\n"); ctx->act = buf[0x2C] & 7; if (ctx->act && ctx->cid_table->cid != 1256 && ctx->cid_table->cid != 1270) av_log(ctx->avctx, AV_LOG_WARNING, "Adaptive color transform in an unsupported profile.\n"); ctx->is_444 = (buf[0x2C] >> 6) & 1; if (ctx->is_444) { if (bitdepth == 8) { avpriv_request_sample(ctx->avctx, "4:4:4 8 bits"); return AVERROR_INVALIDDATA; } else if (bitdepth == 10) { ctx->decode_dct_block = dnxhd_decode_dct_block_10_444; ctx->pix_fmt = ctx->act ? AV_PIX_FMT_YUV444P10 : AV_PIX_FMT_GBRP10; } else { ctx->decode_dct_block = dnxhd_decode_dct_block_12_444; ctx->pix_fmt = ctx->act ? AV_PIX_FMT_YUV444P12 : AV_PIX_FMT_GBRP12; } } else if (bitdepth == 12) { ctx->decode_dct_block = dnxhd_decode_dct_block_12; ctx->pix_fmt = AV_PIX_FMT_YUV422P12; } else if (bitdepth == 10) { if (ctx->avctx->profile == FF_PROFILE_DNXHR_HQX) ctx->decode_dct_block = dnxhd_decode_dct_block_10_444; else ctx->decode_dct_block = dnxhd_decode_dct_block_10; ctx->pix_fmt = AV_PIX_FMT_YUV422P10; } else { ctx->decode_dct_block = dnxhd_decode_dct_block_8; ctx->pix_fmt = AV_PIX_FMT_YUV422P; } ctx->avctx->bits_per_raw_sample = ctx->bit_depth = bitdepth; if (ctx->bit_depth != old_bit_depth) { ff_blockdsp_init(&ctx->bdsp, ctx->avctx); ff_idctdsp_init(&ctx->idsp, ctx->avctx); ff_init_scantable(ctx->idsp.idct_permutation, &ctx->scantable, ff_zigzag_direct); } // make sure profile size constraints are respected // DNx100 allows 1920->1440 and 1280->960 subsampling if (ctx->width != ctx->cid_table->width && ctx->cid_table->width != DNXHD_VARIABLE) { av_reduce(&ctx->avctx->sample_aspect_ratio.num, &ctx->avctx->sample_aspect_ratio.den, ctx->width, ctx->cid_table->width, 255); ctx->width = ctx->cid_table->width; } if (buf_size < ctx->cid_table->coding_unit_size) { av_log(ctx->avctx, AV_LOG_ERROR, "incorrect frame size (%d < %u).\n", buf_size, ctx->cid_table->coding_unit_size); return AVERROR_INVALIDDATA; } ctx->mb_width = (ctx->width + 15)>> 4; ctx->mb_height = AV_RB16(buf + 0x16c); if ((ctx->height + 15) >> 4 == ctx->mb_height && frame->interlaced_frame) ctx->height <<= 1; av_log(ctx->avctx, AV_LOG_VERBOSE, "%dx%d, 4:%s %d bits, MBAFF=%d ACT=%d\n", ctx->width, ctx->height, ctx->is_444 ? "4:4" : "2:2", ctx->bit_depth, ctx->mbaff, ctx->act); // Newer format supports variable mb_scan_index sizes if (ctx->mb_height > 68 && ff_dnxhd_check_header_prefix_hr(header_prefix)) { ctx->data_offset = 0x170 + (ctx->mb_height << 2); } else { if (ctx->mb_height > 68 || (ctx->mb_height << frame->interlaced_frame) > (ctx->height + 15) >> 4) { av_log(ctx->avctx, AV_LOG_ERROR, "mb height too big: %d\n", ctx->mb_height); return AVERROR_INVALIDDATA; } ctx->data_offset = 0x280; } if (buf_size < ctx->data_offset) { av_log(ctx->avctx, AV_LOG_ERROR, "buffer too small (%d < %d).\n", buf_size, ctx->data_offset); return AVERROR_INVALIDDATA; } if (ctx->mb_height > FF_ARRAY_ELEMS(ctx->mb_scan_index)) { av_log(ctx->avctx, AV_LOG_ERROR, "mb_height too big (%d > %"SIZE_SPECIFIER").\n", ctx->mb_height, FF_ARRAY_ELEMS(ctx->mb_scan_index)); return AVERROR_INVALIDDATA; } for (i = 0; i < ctx->mb_height; i++) { ctx->mb_scan_index[i] = AV_RB32(buf + 0x170 + (i << 2)); ff_dlog(ctx->avctx, "mb scan index %d, pos %d: %"PRIu32"\n", i, 0x170 + (i << 2), ctx->mb_scan_index[i]); if (buf_size - ctx->data_offset < ctx->mb_scan_index[i]) { av_log(ctx->avctx, AV_LOG_ERROR, "invalid mb scan index (%"PRIu32" vs %u).\n", ctx->mb_scan_index[i], buf_size - ctx->data_offset); return AVERROR_INVALIDDATA; } } return 0; }
0
CVE-2017-11719
41
benign
CWE-125
static int dnxhd_decode_header(DNXHDContext *ctx, AVFrame *frame, const uint8_t *buf, int buf_size, int first_field) { int i, cid, ret; int old_bit_depth = ctx->bit_depth, bitdepth; uint64_t header_prefix; if (buf_size < 0x280) { av_log(ctx->avctx, AV_LOG_ERROR, "buffer too small (%d < 640).\n", buf_size); return AVERROR_INVALIDDATA; } header_prefix = ff_dnxhd_parse_header_prefix(buf); if (header_prefix == 0) { av_log(ctx->avctx, AV_LOG_ERROR, "unknown header 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X\n", buf[0], buf[1], buf[2], buf[3], buf[4]); return AVERROR_INVALIDDATA; } if (buf[5] & 2) { /* interlaced */ ctx->cur_field = buf[5] & 1; frame->interlaced_frame = 1; frame->top_field_first = first_field ^ ctx->cur_field; av_log(ctx->avctx, AV_LOG_DEBUG, "interlaced %d, cur field %d\n", buf[5] & 3, ctx->cur_field); } else { ctx->cur_field = 0; } ctx->mbaff = (buf[0x6] >> 5) & 1; ctx->height = AV_RB16(buf + 0x18); ctx->width = AV_RB16(buf + 0x1a); switch(buf[0x21] >> 5) { case 1: bitdepth = 8; break; case 2: bitdepth = 10; break; case 3: bitdepth = 12; break; default: av_log(ctx->avctx, AV_LOG_ERROR, "Unknown bitdepth indicator (%d)\n", buf[0x21] >> 5); return AVERROR_INVALIDDATA; } cid = AV_RB32(buf + 0x28); ctx->avctx->profile = dnxhd_get_profile(cid); if ((ret = dnxhd_init_vlc(ctx, cid, bitdepth)) < 0) return ret; if (ctx->mbaff && ctx->cid_table->cid != 1260) av_log(ctx->avctx, AV_LOG_WARNING, "Adaptive MB interlace flag in an unsupported profile.\n"); ctx->act = buf[0x2C] & 7; if (ctx->act && ctx->cid_table->cid != 1256 && ctx->cid_table->cid != 1270) av_log(ctx->avctx, AV_LOG_WARNING, "Adaptive color transform in an unsupported profile.\n"); ctx->is_444 = (buf[0x2C] >> 6) & 1; if (ctx->is_444) { if (bitdepth == 8) { avpriv_request_sample(ctx->avctx, "4:4:4 8 bits"); return AVERROR_INVALIDDATA; } else if (bitdepth == 10) { ctx->decode_dct_block = dnxhd_decode_dct_block_10_444; ctx->pix_fmt = ctx->act ? AV_PIX_FMT_YUV444P10 : AV_PIX_FMT_GBRP10; } else { ctx->decode_dct_block = dnxhd_decode_dct_block_12_444; ctx->pix_fmt = ctx->act ? AV_PIX_FMT_YUV444P12 : AV_PIX_FMT_GBRP12; } } else if (bitdepth == 12) { ctx->decode_dct_block = dnxhd_decode_dct_block_12; ctx->pix_fmt = AV_PIX_FMT_YUV422P12; } else if (bitdepth == 10) { if (ctx->avctx->profile == FF_PROFILE_DNXHR_HQX) ctx->decode_dct_block = dnxhd_decode_dct_block_10_444; else ctx->decode_dct_block = dnxhd_decode_dct_block_10; ctx->pix_fmt = AV_PIX_FMT_YUV422P10; } else { ctx->decode_dct_block = dnxhd_decode_dct_block_8; ctx->pix_fmt = AV_PIX_FMT_YUV422P; } ctx->avctx->bits_per_raw_sample = ctx->bit_depth = bitdepth; if (ctx->bit_depth != old_bit_depth) { ff_blockdsp_init(&ctx->bdsp, ctx->avctx); ff_idctdsp_init(&ctx->idsp, ctx->avctx); ff_init_scantable(ctx->idsp.idct_permutation, &ctx->scantable, ff_zigzag_direct); } // make sure profile size constraints are respected // DNx100 allows 1920->1440 and 1280->960 subsampling if (ctx->width != ctx->cid_table->width && ctx->cid_table->width != DNXHD_VARIABLE) { av_reduce(&ctx->avctx->sample_aspect_ratio.num, &ctx->avctx->sample_aspect_ratio.den, ctx->width, ctx->cid_table->width, 255); ctx->width = ctx->cid_table->width; } if (buf_size < ctx->cid_table->coding_unit_size) { av_log(ctx->avctx, AV_LOG_ERROR, "incorrect frame size (%d < %u).\n", buf_size, ctx->cid_table->coding_unit_size); return AVERROR_INVALIDDATA; } ctx->mb_width = (ctx->width + 15)>> 4; ctx->mb_height = AV_RB16(buf + 0x16c); if ((ctx->height + 15) >> 4 == ctx->mb_height && frame->interlaced_frame) ctx->height <<= 1; av_log(ctx->avctx, AV_LOG_VERBOSE, "%dx%d, 4:%s %d bits, MBAFF=%d ACT=%d\n", ctx->width, ctx->height, ctx->is_444 ? "4:4" : "2:2", ctx->bit_depth, ctx->mbaff, ctx->act); // Newer format supports variable mb_scan_index sizes if (ctx->mb_height > 68 && ff_dnxhd_check_header_prefix_hr(header_prefix)) { ctx->data_offset = 0x170 + (ctx->mb_height << 2); } else { if (ctx->mb_height > 68) { av_log(ctx->avctx, AV_LOG_ERROR, "mb height too big: %d\n", ctx->mb_height); return AVERROR_INVALIDDATA; } ctx->data_offset = 0x280; } if ((ctx->mb_height << frame->interlaced_frame) > (ctx->height + 15) >> 4) { av_log(ctx->avctx, AV_LOG_ERROR, "mb height too big: %d\n", ctx->mb_height); return AVERROR_INVALIDDATA; } if (buf_size < ctx->data_offset) { av_log(ctx->avctx, AV_LOG_ERROR, "buffer too small (%d < %d).\n", buf_size, ctx->data_offset); return AVERROR_INVALIDDATA; } if (ctx->mb_height > FF_ARRAY_ELEMS(ctx->mb_scan_index)) { av_log(ctx->avctx, AV_LOG_ERROR, "mb_height too big (%d > %"SIZE_SPECIFIER").\n", ctx->mb_height, FF_ARRAY_ELEMS(ctx->mb_scan_index)); return AVERROR_INVALIDDATA; } for (i = 0; i < ctx->mb_height; i++) { ctx->mb_scan_index[i] = AV_RB32(buf + 0x170 + (i << 2)); ff_dlog(ctx->avctx, "mb scan index %d, pos %d: %"PRIu32"\n", i, 0x170 + (i << 2), ctx->mb_scan_index[i]); if (buf_size - ctx->data_offset < ctx->mb_scan_index[i]) { av_log(ctx->avctx, AV_LOG_ERROR, "invalid mb scan index (%"PRIu32" vs %u).\n", ctx->mb_scan_index[i], buf_size - ctx->data_offset); return AVERROR_INVALIDDATA; } } return 0; }
1
CVE-2017-11719
41
vulnerable
CWE-787
webSocketsDecodeHybi(rfbClientPtr cl, char *dst, int len) { char *buf, *payload; uint32_t *payload32; int ret = -1, result = -1; int total = 0; ws_mask_t mask; ws_header_t *header; int i; unsigned char opcode; ws_ctx_t *wsctx = (ws_ctx_t *)cl->wsctx; int flength, fhlen; /* int fin; */ /* not used atm */ /* rfbLog(" <== %s[%d]: %d cl: %p, wsctx: %p-%p (%d)\n", __func__, gettid(), len, cl, wsctx, (char *)wsctx + sizeof(ws_ctx_t), sizeof(ws_ctx_t)); */ if (wsctx->readbuflen) { /* simply return what we have */ if (wsctx->readbuflen > len) { memcpy(dst, wsctx->readbuf + wsctx->readbufstart, len); result = len; wsctx->readbuflen -= len; wsctx->readbufstart += len; } else { memcpy(dst, wsctx->readbuf + wsctx->readbufstart, wsctx->readbuflen); result = wsctx->readbuflen; wsctx->readbuflen = 0; wsctx->readbufstart = 0; } goto spor; } buf = wsctx->codeBufDecode; header = (ws_header_t *)wsctx->codeBufDecode; ret = ws_peek(cl, buf, B64LEN(len) + WSHLENMAX); if (ret < 2) { /* save errno because rfbErr() will tamper it */ if (-1 == ret) { int olderrno = errno; rfbErr("%s: peek; %m\n", __func__); errno = olderrno; } else if (0 == ret) { result = 0; } else { errno = EAGAIN; } goto spor; } opcode = header->b0 & 0x0f; /* fin = (header->b0 & 0x80) >> 7; */ /* not used atm */ flength = header->b1 & 0x7f; /* * 4.3. Client-to-Server Masking * * The client MUST mask all frames sent to the server. A server MUST * close the connection upon receiving a frame with the MASK bit set to 0. **/ if (!(header->b1 & 0x80)) { rfbErr("%s: got frame without mask\n", __func__, ret); errno = EIO; goto spor; } if (flength < 126) { fhlen = 2; mask = header->u.m; } else if (flength == 126 && 4 <= ret) { flength = WS_NTOH16(header->u.s16.l16); fhlen = 4; mask = header->u.s16.m16; } else if (flength == 127 && 10 <= ret) { flength = WS_NTOH64(header->u.s64.l64); fhlen = 10; mask = header->u.s64.m64; } else { /* Incomplete frame header */ rfbErr("%s: incomplete frame header\n", __func__, ret); errno = EIO; goto spor; } /* absolute length of frame */ total = fhlen + flength + 4; payload = buf + fhlen + 4; /* header length + mask */ if (-1 == (ret = ws_read(cl, buf, total))) { int olderrno = errno; rfbErr("%s: read; %m", __func__); errno = olderrno; return ret; } else if (ret < total) { /* GT TODO: hmm? */ rfbLog("%s: read; got partial data\n", __func__); } else { buf[ret] = '\0'; } /* process 1 frame (32 bit op) */ payload32 = (uint32_t *)payload; for (i = 0; i < flength / 4; i++) { payload32[i] ^= mask.u; } /* process the remaining bytes (if any) */ for (i*=4; i < flength; i++) { payload[i] ^= mask.c[i % 4]; } switch (opcode) { case WS_OPCODE_CLOSE: rfbLog("got closure, reason %d\n", WS_NTOH16(((uint16_t *)payload)[0])); errno = ECONNRESET; break; case WS_OPCODE_TEXT_FRAME: if (-1 == (flength = b64_pton(payload, (unsigned char *)wsctx->codeBufDecode, sizeof(wsctx->codeBufDecode)))) { rfbErr("%s: Base64 decode error; %m\n", __func__); break; } payload = wsctx->codeBufDecode; /* fall through */ case WS_OPCODE_BINARY_FRAME: if (flength > len) { memcpy(wsctx->readbuf, payload + len, flength - len); wsctx->readbufstart = 0; wsctx->readbuflen = flength - len; flength = len; } memcpy(dst, payload, flength); result = flength; break; default: rfbErr("%s: unhandled opcode %d, b0: %02x, b1: %02x\n", __func__, (int)opcode, header->b0, header->b1); } /* single point of return, if someone has questions :-) */ spor: /* rfbLog("%s: ret: %d/%d\n", __func__, result, len); */ return result; }
0
CVE-2017-18922
1,752
benign
CWE-787
webSocketsDecodeHybi(rfbClientPtr cl, char *dst, int len) { int result = -1; ws_ctx_t *wsctx = (ws_ctx_t *)cl->wsctx; /* int fin; */ /* not used atm */ /* rfbLog(" <== %s[%d]: %d cl: %p, wsctx: %p-%p (%d)\n", __func__, gettid(), len, cl, wsctx, (char *)wsctx + sizeof(ws_ctx_t), sizeof(ws_ctx_t)); */ rfbLog("%s_enter: len=%d; " "CTX: readlen=%d readPos=%p " "writeTo=%p " "state=%d toRead=%d remaining=%d " " nReadRaw=%d carrylen=%d carryBuf=%p\n", __func__, len, wsctx->readlen, wsctx->readPos, wsctx->writePos, wsctx->hybiDecodeState, wsctx->nToRead, hybiRemaining(wsctx), wsctx->nReadRaw, wsctx->carrylen, wsctx->carryBuf); switch (wsctx->hybiDecodeState){ case WS_HYBI_STATE_HEADER_PENDING: wsctx->hybiDecodeState = hybiReadHeader(cl, &result); if (wsctx->hybiDecodeState == WS_HYBI_STATE_ERR) { goto spor; } if (wsctx->hybiDecodeState != WS_HYBI_STATE_HEADER_PENDING) { /* when header is complete, try to read some more data */ wsctx->hybiDecodeState = hybiReadAndDecode(cl, dst, len, &result); } break; case WS_HYBI_STATE_DATA_AVAILABLE: wsctx->hybiDecodeState = hybiReturnData(dst, len, wsctx, &result); break; case WS_HYBI_STATE_DATA_NEEDED: wsctx->hybiDecodeState = hybiReadAndDecode(cl, dst, len, &result); break; case WS_HYBI_STATE_CLOSE_REASON_PENDING: wsctx->hybiDecodeState = hybiReadAndDecode(cl, dst, len, &result); break; default: /* invalid state */ rfbErr("%s: called with invalid state %d\n", wsctx->hybiDecodeState); result = -1; errno = EIO; wsctx->hybiDecodeState = WS_HYBI_STATE_ERR; } /* single point of return, if someone has questions :-) */ spor: /* rfbLog("%s: ret: %d/%d\n", __func__, result, len); */ if (wsctx->hybiDecodeState == WS_HYBI_STATE_FRAME_COMPLETE) { rfbLog("frame received successfully, cleaning up: read=%d hlen=%d plen=%d\n", wsctx->header.nRead, wsctx->header.headerLen, wsctx->header.payloadLen); /* frame finished, cleanup state */ hybiDecodeCleanup(wsctx); } else if (wsctx->hybiDecodeState == WS_HYBI_STATE_ERR) { hybiDecodeCleanup(wsctx); } rfbLog("%s_exit: len=%d; " "CTX: readlen=%d readPos=%p " "writePos=%p " "state=%d toRead=%d remaining=%d " "nRead=%d carrylen=%d carryBuf=%p " "result=%d\n", __func__, len, wsctx->readlen, wsctx->readPos, wsctx->writePos, wsctx->hybiDecodeState, wsctx->nToRead, hybiRemaining(wsctx), wsctx->nReadRaw, wsctx->carrylen, wsctx->carryBuf, result); return result; }
1
CVE-2017-18922
1,752
vulnerable
CWE-20
sds genRedisInfoString(void) { sds info; time_t uptime = time(NULL)-server.stat_starttime; int j; char hmem[64]; struct rusage self_ru, c_ru; getrusage(RUSAGE_SELF, &self_ru); getrusage(RUSAGE_CHILDREN, &c_ru); bytesToHuman(hmem,zmalloc_used_memory()); info = sdscatprintf(sdsempty(), "redis_version:%s\r\n" "redis_git_sha1:%s\r\n" "redis_git_dirty:%d\r\n" "arch_bits:%s\r\n" "multiplexing_api:%s\r\n" "process_id:%ld\r\n" "uptime_in_seconds:%ld\r\n" "uptime_in_days:%ld\r\n" "lru_clock:%ld\r\n" "used_cpu_sys:%.2f\r\n" "used_cpu_user:%.2f\r\n" "used_cpu_sys_childrens:%.2f\r\n" "used_cpu_user_childrens:%.2f\r\n" "connected_clients:%d\r\n" "connected_slaves:%d\r\n" "blocked_clients:%d\r\n" "used_memory:%zu\r\n" "used_memory_human:%s\r\n" "used_memory_rss:%zu\r\n" "mem_fragmentation_ratio:%.2f\r\n" "use_tcmalloc:%d\r\n" "loading:%d\r\n" "aof_enabled:%d\r\n" "changes_since_last_save:%lld\r\n" "bgsave_in_progress:%d\r\n" "last_save_time:%ld\r\n" "bgrewriteaof_in_progress:%d\r\n" "total_connections_received:%lld\r\n" "total_commands_processed:%lld\r\n" "expired_keys:%lld\r\n" "evicted_keys:%lld\r\n" "keyspace_hits:%lld\r\n" "keyspace_misses:%lld\r\n" "hash_max_zipmap_entries:%zu\r\n" "hash_max_zipmap_value:%zu\r\n" "pubsub_channels:%ld\r\n" "pubsub_patterns:%u\r\n" "vm_enabled:%d\r\n" "role:%s\r\n" ,REDIS_VERSION, redisGitSHA1(), strtol(redisGitDirty(),NULL,10) > 0, (sizeof(long) == 8) ? "64" : "32", aeGetApiName(), (long) getpid(), uptime, uptime/(3600*24), (unsigned long) server.lruclock, (float)self_ru.ru_utime.tv_sec+(float)self_ru.ru_utime.tv_usec/1000000, (float)self_ru.ru_stime.tv_sec+(float)self_ru.ru_stime.tv_usec/1000000, (float)c_ru.ru_utime.tv_sec+(float)c_ru.ru_utime.tv_usec/1000000, (float)c_ru.ru_stime.tv_sec+(float)c_ru.ru_stime.tv_usec/1000000, listLength(server.clients)-listLength(server.slaves), listLength(server.slaves), server.bpop_blocked_clients, zmalloc_used_memory(), hmem, zmalloc_get_rss(), zmalloc_get_fragmentation_ratio(), #ifdef USE_TCMALLOC 1, #else 0, #endif server.loading, server.appendonly, server.dirty, server.bgsavechildpid != -1, server.lastsave, server.bgrewritechildpid != -1, server.stat_numconnections, server.stat_numcommands, server.stat_expiredkeys, server.stat_evictedkeys, server.stat_keyspace_hits, server.stat_keyspace_misses, server.hash_max_zipmap_entries, server.hash_max_zipmap_value, dictSize(server.pubsub_channels), listLength(server.pubsub_patterns), server.vm_enabled != 0, server.masterhost == NULL ? "master" : "slave" ); if (server.masterhost) { info = sdscatprintf(info, "master_host:%s\r\n" "master_port:%d\r\n" "master_link_status:%s\r\n" "master_last_io_seconds_ago:%d\r\n" "master_sync_in_progress:%d\r\n" ,server.masterhost, server.masterport, (server.replstate == REDIS_REPL_CONNECTED) ? "up" : "down", server.master ? ((int)(time(NULL)-server.master->lastinteraction)) : -1, server.replstate == REDIS_REPL_TRANSFER ); if (server.replstate == REDIS_REPL_TRANSFER) { info = sdscatprintf(info, "master_sync_left_bytes:%ld\r\n" "master_sync_last_io_seconds_ago:%d\r\n" ,(long)server.repl_transfer_left, (int)(time(NULL)-server.repl_transfer_lastio) ); } } if (server.vm_enabled) { lockThreadedIO(); info = sdscatprintf(info, "vm_conf_max_memory:%llu\r\n" "vm_conf_page_size:%llu\r\n" "vm_conf_pages:%llu\r\n" "vm_stats_used_pages:%llu\r\n" "vm_stats_swapped_objects:%llu\r\n" "vm_stats_swappin_count:%llu\r\n" "vm_stats_swappout_count:%llu\r\n" "vm_stats_io_newjobs_len:%lu\r\n" "vm_stats_io_processing_len:%lu\r\n" "vm_stats_io_processed_len:%lu\r\n" "vm_stats_io_active_threads:%lu\r\n" "vm_stats_blocked_clients:%lu\r\n" ,(unsigned long long) server.vm_max_memory, (unsigned long long) server.vm_page_size, (unsigned long long) server.vm_pages, (unsigned long long) server.vm_stats_used_pages, (unsigned long long) server.vm_stats_swapped_objects, (unsigned long long) server.vm_stats_swapins, (unsigned long long) server.vm_stats_swapouts, (unsigned long) listLength(server.io_newjobs), (unsigned long) listLength(server.io_processing), (unsigned long) listLength(server.io_processed), (unsigned long) server.io_active_threads, (unsigned long) server.vm_blocked_clients ); unlockThreadedIO(); } if (server.loading) { double perc; time_t eta, elapsed; off_t remaining_bytes = server.loading_total_bytes- server.loading_loaded_bytes; perc = ((double)server.loading_loaded_bytes / server.loading_total_bytes) * 100; elapsed = time(NULL)-server.loading_start_time; if (elapsed == 0) { eta = 1; /* A fake 1 second figure if we don't have enough info */ } else { eta = (elapsed*remaining_bytes)/server.loading_loaded_bytes; } info = sdscatprintf(info, "loading_start_time:%ld\r\n" "loading_total_bytes:%llu\r\n" "loading_loaded_bytes:%llu\r\n" "loading_loaded_perc:%.2f\r\n" "loading_eta_seconds:%ld\r\n" ,(unsigned long) server.loading_start_time, (unsigned long long) server.loading_total_bytes, (unsigned long long) server.loading_loaded_bytes, perc, eta ); } for (j = 0; j < server.dbnum; j++) { long long keys, vkeys; keys = dictSize(server.db[j].dict); vkeys = dictSize(server.db[j].expires); if (keys || vkeys) { info = sdscatprintf(info, "db%d:keys=%lld,expires=%lld\r\n", j, keys, vkeys); } } return info; }
0
CVE-2013-0178
721
benign
CWE-20
sds genRedisInfoString(void) { sds info; time_t uptime = time(NULL)-server.stat_starttime; int j; char hmem[64]; struct rusage self_ru, c_ru; getrusage(RUSAGE_SELF, &self_ru); getrusage(RUSAGE_CHILDREN, &c_ru); bytesToHuman(hmem,zmalloc_used_memory()); info = sdscatprintf(sdsempty(), "redis_version:%s\r\n" "redis_git_sha1:%s\r\n" "redis_git_dirty:%d\r\n" "arch_bits:%s\r\n" "multiplexing_api:%s\r\n" "process_id:%ld\r\n" "uptime_in_seconds:%ld\r\n" "uptime_in_days:%ld\r\n" "lru_clock:%ld\r\n" "used_cpu_sys:%.2f\r\n" "used_cpu_user:%.2f\r\n" "used_cpu_sys_childrens:%.2f\r\n" "used_cpu_user_childrens:%.2f\r\n" "connected_clients:%d\r\n" "connected_slaves:%d\r\n" "blocked_clients:%d\r\n" "used_memory:%zu\r\n" "used_memory_human:%s\r\n" "used_memory_rss:%zu\r\n" "mem_fragmentation_ratio:%.2f\r\n" "use_tcmalloc:%d\r\n" "loading:%d\r\n" "aof_enabled:%d\r\n" "changes_since_last_save:%lld\r\n" "bgsave_in_progress:%d\r\n" "last_save_time:%ld\r\n" "bgrewriteaof_in_progress:%d\r\n" "total_connections_received:%lld\r\n" "total_commands_processed:%lld\r\n" "expired_keys:%lld\r\n" "evicted_keys:%lld\r\n" "keyspace_hits:%lld\r\n" "keyspace_misses:%lld\r\n" "hash_max_zipmap_entries:%zu\r\n" "hash_max_zipmap_value:%zu\r\n" "pubsub_channels:%ld\r\n" "pubsub_patterns:%u\r\n" "ds_enabled:%d\r\n" "role:%s\r\n" ,REDIS_VERSION, redisGitSHA1(), strtol(redisGitDirty(),NULL,10) > 0, (sizeof(long) == 8) ? "64" : "32", aeGetApiName(), (long) getpid(), uptime, uptime/(3600*24), (unsigned long) server.lruclock, (float)self_ru.ru_utime.tv_sec+(float)self_ru.ru_utime.tv_usec/1000000, (float)self_ru.ru_stime.tv_sec+(float)self_ru.ru_stime.tv_usec/1000000, (float)c_ru.ru_utime.tv_sec+(float)c_ru.ru_utime.tv_usec/1000000, (float)c_ru.ru_stime.tv_sec+(float)c_ru.ru_stime.tv_usec/1000000, listLength(server.clients)-listLength(server.slaves), listLength(server.slaves), server.bpop_blocked_clients, zmalloc_used_memory(), hmem, zmalloc_get_rss(), zmalloc_get_fragmentation_ratio(), #ifdef USE_TCMALLOC 1, #else 0, #endif server.loading, server.appendonly, server.dirty, server.bgsavechildpid != -1, server.lastsave, server.bgrewritechildpid != -1, server.stat_numconnections, server.stat_numcommands, server.stat_expiredkeys, server.stat_evictedkeys, server.stat_keyspace_hits, server.stat_keyspace_misses, server.hash_max_zipmap_entries, server.hash_max_zipmap_value, dictSize(server.pubsub_channels), listLength(server.pubsub_patterns), server.ds_enabled != 0, server.masterhost == NULL ? "master" : "slave" ); if (server.masterhost) { info = sdscatprintf(info, "master_host:%s\r\n" "master_port:%d\r\n" "master_link_status:%s\r\n" "master_last_io_seconds_ago:%d\r\n" "master_sync_in_progress:%d\r\n" ,server.masterhost, server.masterport, (server.replstate == REDIS_REPL_CONNECTED) ? "up" : "down", server.master ? ((int)(time(NULL)-server.master->lastinteraction)) : -1, server.replstate == REDIS_REPL_TRANSFER ); if (server.replstate == REDIS_REPL_TRANSFER) { info = sdscatprintf(info, "master_sync_left_bytes:%ld\r\n" "master_sync_last_io_seconds_ago:%d\r\n" ,(long)server.repl_transfer_left, (int)(time(NULL)-server.repl_transfer_lastio) ); } } if (server.ds_enabled) { lockThreadedIO(); info = sdscatprintf(info, "cache_max_memory:%llu\r\n" "cache_blocked_clients:%lu\r\n" ,(unsigned long long) server.cache_max_memory, (unsigned long) server.cache_blocked_clients ); unlockThreadedIO(); } if (server.loading) { double perc; time_t eta, elapsed; off_t remaining_bytes = server.loading_total_bytes- server.loading_loaded_bytes; perc = ((double)server.loading_loaded_bytes / server.loading_total_bytes) * 100; elapsed = time(NULL)-server.loading_start_time; if (elapsed == 0) { eta = 1; /* A fake 1 second figure if we don't have enough info */ } else { eta = (elapsed*remaining_bytes)/server.loading_loaded_bytes; } info = sdscatprintf(info, "loading_start_time:%ld\r\n" "loading_total_bytes:%llu\r\n" "loading_loaded_bytes:%llu\r\n" "loading_loaded_perc:%.2f\r\n" "loading_eta_seconds:%ld\r\n" ,(unsigned long) server.loading_start_time, (unsigned long long) server.loading_total_bytes, (unsigned long long) server.loading_loaded_bytes, perc, eta ); } for (j = 0; j < server.dbnum; j++) { long long keys, vkeys; keys = dictSize(server.db[j].dict); vkeys = dictSize(server.db[j].expires); if (keys || vkeys) { info = sdscatprintf(info, "db%d:keys=%lld,expires=%lld\r\n", j, keys, vkeys); } } return info; }
1
CVE-2013-0178
721
vulnerable
CWE-125
static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn, struct bpf_reg_state *dst_reg, struct bpf_reg_state src_reg) { struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); bool src_known, dst_known; s64 smin_val, smax_val; u64 umin_val, umax_val; u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; smin_val = src_reg.smin_value; smax_val = src_reg.smax_value; umin_val = src_reg.umin_value; umax_val = src_reg.umax_value; src_known = tnum_is_const(src_reg.var_off); dst_known = tnum_is_const(dst_reg->var_off); if ((src_known && (smin_val != smax_val || umin_val != umax_val)) || smin_val > smax_val || umin_val > umax_val) { /* Taint dst register if offset had invalid bounds derived from * e.g. dead branches. */ __mark_reg_unknown(dst_reg); return 0; } if (!src_known && opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) { __mark_reg_unknown(dst_reg); return 0; } switch (opcode) { case BPF_ADD: if (signed_add_overflows(dst_reg->smin_value, smin_val) || signed_add_overflows(dst_reg->smax_value, smax_val)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value += smin_val; dst_reg->smax_value += smax_val; } if (dst_reg->umin_value + umin_val < umin_val || dst_reg->umax_value + umax_val < umax_val) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value += umin_val; dst_reg->umax_value += umax_val; } dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); break; case BPF_SUB: if (signed_sub_overflows(dst_reg->smin_value, smax_val) || signed_sub_overflows(dst_reg->smax_value, smin_val)) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value -= smax_val; dst_reg->smax_value -= smin_val; } if (dst_reg->umin_value < umax_val) { /* Overflow possible, we know nothing */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { /* Cannot overflow (as long as bounds are consistent) */ dst_reg->umin_value -= umax_val; dst_reg->umax_value -= umin_val; } dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); break; case BPF_MUL: dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); if (smin_val < 0 || dst_reg->smin_value < 0) { /* Ain't nobody got time to multiply that sign */ __mark_reg_unbounded(dst_reg); __update_reg_bounds(dst_reg); break; } /* Both values are positive, so we can work with unsigned and * copy the result to signed (unless it exceeds S64_MAX). */ if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { /* Potential overflow, we know nothing */ __mark_reg_unbounded(dst_reg); /* (except what we can learn from the var_off) */ __update_reg_bounds(dst_reg); break; } dst_reg->umin_value *= umin_val; dst_reg->umax_value *= umax_val; if (dst_reg->umax_value > S64_MAX) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } break; case BPF_AND: if (src_known && dst_known) { __mark_reg_known(dst_reg, dst_reg->var_off.value & src_reg.var_off.value); break; } /* We get our minimum from the var_off, since that's inherently * bitwise. Our maximum is the minimum of the operands' maxima. */ dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); dst_reg->umin_value = dst_reg->var_off.value; dst_reg->umax_value = min(dst_reg->umax_value, umax_val); if (dst_reg->smin_value < 0 || smin_val < 0) { /* Lose signed bounds when ANDing negative numbers, * ain't nobody got time for that. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { /* ANDing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_OR: if (src_known && dst_known) { __mark_reg_known(dst_reg, dst_reg->var_off.value | src_reg.var_off.value); break; } /* We get our maximum from the var_off, and our minimum is the * maximum of the operands' minima */ dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); dst_reg->umin_value = max(dst_reg->umin_value, umin_val); dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; if (dst_reg->smin_value < 0 || smin_val < 0) { /* Lose signed bounds when ORing negative numbers, * ain't nobody got time for that. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { /* ORing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_LSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* We lose all sign bit information (except what we can pick * up from var_off) */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; /* If we might shift our top bit out, then we know nothing */ if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value <<= umin_val; dst_reg->umax_value <<= umax_val; } dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_RSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* BPF_RSH is an unsigned shift. If the value in dst_reg might * be negative, then either: * 1) src_reg might be zero, so the sign bit of the result is * unknown, so we lose our signed bounds * 2) it's known negative, thus the unsigned bounds capture the * signed bounds * 3) the signed bounds cross zero, so they tell us nothing * about the result * If the value in dst_reg is known nonnegative, then again the * unsigned bounts capture the signed bounds. * Thus, in all cases it suffices to blow away our signed bounds * and rely on inferring new ones from the unsigned bounds and * var_off of the result. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); dst_reg->umin_value >>= umax_val; dst_reg->umax_value >>= umin_val; /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_ARSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* Upon reaching here, src_known is true and * umax_val is equal to umin_val. */ dst_reg->smin_value >>= umin_val; dst_reg->smax_value >>= umin_val; dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val); /* blow away the dst_reg umin_value/umax_value and rely on * dst_reg var_off to refine the result. */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; __update_reg_bounds(dst_reg); break; default: mark_reg_unknown(env, regs, insn->dst_reg); break; } if (BPF_CLASS(insn->code) != BPF_ALU64) { /* 32-bit ALU ops are (32,32)->32 */ coerce_reg_to_size(dst_reg, 4); coerce_reg_to_size(&src_reg, 4); } __reg_deduce_bounds(dst_reg); __reg_bound_offset(dst_reg); return 0; }
0
CVE-2018-18445
2,555
benign
CWE-125
static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn, struct bpf_reg_state *dst_reg, struct bpf_reg_state src_reg) { struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); bool src_known, dst_known; s64 smin_val, smax_val; u64 umin_val, umax_val; u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; if (insn_bitness == 32) { /* Relevant for 32-bit RSH: Information can propagate towards * LSB, so it isn't sufficient to only truncate the output to * 32 bits. */ coerce_reg_to_size(dst_reg, 4); coerce_reg_to_size(&src_reg, 4); } smin_val = src_reg.smin_value; smax_val = src_reg.smax_value; umin_val = src_reg.umin_value; umax_val = src_reg.umax_value; src_known = tnum_is_const(src_reg.var_off); dst_known = tnum_is_const(dst_reg->var_off); if ((src_known && (smin_val != smax_val || umin_val != umax_val)) || smin_val > smax_val || umin_val > umax_val) { /* Taint dst register if offset had invalid bounds derived from * e.g. dead branches. */ __mark_reg_unknown(dst_reg); return 0; } if (!src_known && opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) { __mark_reg_unknown(dst_reg); return 0; } switch (opcode) { case BPF_ADD: if (signed_add_overflows(dst_reg->smin_value, smin_val) || signed_add_overflows(dst_reg->smax_value, smax_val)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value += smin_val; dst_reg->smax_value += smax_val; } if (dst_reg->umin_value + umin_val < umin_val || dst_reg->umax_value + umax_val < umax_val) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value += umin_val; dst_reg->umax_value += umax_val; } dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); break; case BPF_SUB: if (signed_sub_overflows(dst_reg->smin_value, smax_val) || signed_sub_overflows(dst_reg->smax_value, smin_val)) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value -= smax_val; dst_reg->smax_value -= smin_val; } if (dst_reg->umin_value < umax_val) { /* Overflow possible, we know nothing */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { /* Cannot overflow (as long as bounds are consistent) */ dst_reg->umin_value -= umax_val; dst_reg->umax_value -= umin_val; } dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); break; case BPF_MUL: dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); if (smin_val < 0 || dst_reg->smin_value < 0) { /* Ain't nobody got time to multiply that sign */ __mark_reg_unbounded(dst_reg); __update_reg_bounds(dst_reg); break; } /* Both values are positive, so we can work with unsigned and * copy the result to signed (unless it exceeds S64_MAX). */ if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { /* Potential overflow, we know nothing */ __mark_reg_unbounded(dst_reg); /* (except what we can learn from the var_off) */ __update_reg_bounds(dst_reg); break; } dst_reg->umin_value *= umin_val; dst_reg->umax_value *= umax_val; if (dst_reg->umax_value > S64_MAX) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } break; case BPF_AND: if (src_known && dst_known) { __mark_reg_known(dst_reg, dst_reg->var_off.value & src_reg.var_off.value); break; } /* We get our minimum from the var_off, since that's inherently * bitwise. Our maximum is the minimum of the operands' maxima. */ dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); dst_reg->umin_value = dst_reg->var_off.value; dst_reg->umax_value = min(dst_reg->umax_value, umax_val); if (dst_reg->smin_value < 0 || smin_val < 0) { /* Lose signed bounds when ANDing negative numbers, * ain't nobody got time for that. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { /* ANDing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_OR: if (src_known && dst_known) { __mark_reg_known(dst_reg, dst_reg->var_off.value | src_reg.var_off.value); break; } /* We get our maximum from the var_off, and our minimum is the * maximum of the operands' minima */ dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); dst_reg->umin_value = max(dst_reg->umin_value, umin_val); dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; if (dst_reg->smin_value < 0 || smin_val < 0) { /* Lose signed bounds when ORing negative numbers, * ain't nobody got time for that. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { /* ORing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_LSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* We lose all sign bit information (except what we can pick * up from var_off) */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; /* If we might shift our top bit out, then we know nothing */ if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value <<= umin_val; dst_reg->umax_value <<= umax_val; } dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_RSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* BPF_RSH is an unsigned shift. If the value in dst_reg might * be negative, then either: * 1) src_reg might be zero, so the sign bit of the result is * unknown, so we lose our signed bounds * 2) it's known negative, thus the unsigned bounds capture the * signed bounds * 3) the signed bounds cross zero, so they tell us nothing * about the result * If the value in dst_reg is known nonnegative, then again the * unsigned bounts capture the signed bounds. * Thus, in all cases it suffices to blow away our signed bounds * and rely on inferring new ones from the unsigned bounds and * var_off of the result. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); dst_reg->umin_value >>= umax_val; dst_reg->umax_value >>= umin_val; /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_ARSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* Upon reaching here, src_known is true and * umax_val is equal to umin_val. */ dst_reg->smin_value >>= umin_val; dst_reg->smax_value >>= umin_val; dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val); /* blow away the dst_reg umin_value/umax_value and rely on * dst_reg var_off to refine the result. */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; __update_reg_bounds(dst_reg); break; default: mark_reg_unknown(env, regs, insn->dst_reg); break; } if (BPF_CLASS(insn->code) != BPF_ALU64) { /* 32-bit ALU ops are (32,32)->32 */ coerce_reg_to_size(dst_reg, 4); } __reg_deduce_bounds(dst_reg); __reg_bound_offset(dst_reg); return 0; }
1
CVE-2018-18445
2,555
vulnerable
CWE-476
void AV1_RewriteESDescriptorEx(GF_MPEGVisualSampleEntryBox *av1, GF_MediaBox *mdia) { GF_BitRateBox *btrt = gf_isom_sample_entry_get_bitrate((GF_SampleEntryBox *)av1, GF_FALSE); if (av1->emul_esd) gf_odf_desc_del((GF_Descriptor *)av1->emul_esd); av1->emul_esd = gf_odf_desc_esd_new(2); av1->emul_esd->decoderConfig->streamType = GF_STREAM_VISUAL; av1->emul_esd->decoderConfig->objectTypeIndication = GF_CODECID_AV1; if (btrt) { av1->emul_esd->decoderConfig->bufferSizeDB = btrt->bufferSizeDB; av1->emul_esd->decoderConfig->avgBitrate = btrt->avgBitrate; av1->emul_esd->decoderConfig->maxBitrate = btrt->maxBitrate; } if (av1->av1_config) { GF_AV1Config *av1_cfg = AV1_DuplicateConfig(av1->av1_config->config); if (av1_cfg) { gf_odf_av1_cfg_write(av1_cfg, &av1->emul_esd->decoderConfig->decoderSpecificInfo->data, &av1->emul_esd->decoderConfig->decoderSpecificInfo->dataLength); gf_odf_av1_cfg_del(av1_cfg); } } }
0
CVE-2021-31262
2,055
benign
CWE-476
void AV1_RewriteESDescriptorEx(GF_MPEGVisualSampleEntryBox *av1, GF_MediaBox *mdia) { GF_BitRateBox *btrt = gf_isom_sample_entry_get_bitrate((GF_SampleEntryBox *)av1, GF_FALSE); if (av1->emul_esd) gf_odf_desc_del((GF_Descriptor *)av1->emul_esd); av1->emul_esd = gf_odf_desc_esd_new(2); av1->emul_esd->decoderConfig->streamType = GF_STREAM_VISUAL; av1->emul_esd->decoderConfig->objectTypeIndication = GF_CODECID_AV1; if (btrt) { av1->emul_esd->decoderConfig->bufferSizeDB = btrt->bufferSizeDB; av1->emul_esd->decoderConfig->avgBitrate = btrt->avgBitrate; av1->emul_esd->decoderConfig->maxBitrate = btrt->maxBitrate; } if (av1->av1_config && av1->av1_config->config) { GF_AV1Config *av1_cfg = AV1_DuplicateConfig(av1->av1_config->config); if (av1_cfg) { gf_odf_av1_cfg_write(av1_cfg, &av1->emul_esd->decoderConfig->decoderSpecificInfo->data, &av1->emul_esd->decoderConfig->decoderSpecificInfo->dataLength); gf_odf_av1_cfg_del(av1_cfg); } } }
1
CVE-2021-31262
2,055
vulnerable