cwe_id
stringclasses
8 values
func
stringlengths
40
61.2k
label
int64
0
1
cve_id
stringlengths
13
16
id
int64
0
3.29k
text_label
stringclasses
2 values
CWE-787
static RList *create_cache_bins(RBinFile *bf, RDyldCache *cache) { RList *bins = r_list_newf ((RListFree)free_bin); if (!bins) { return NULL; } char *target_libs = NULL; RList *target_lib_names = NULL; int *deps = NULL; target_libs = r_sys_getenv ("R_DYLDCACHE_FILTER"); if (target_libs) { target_lib_names = r_str_split_list (target_libs, ":", 0); if (!target_lib_names) { r_list_free (bins); return NULL; } deps = R_NEWS0 (int, cache->hdr->imagesCount); if (!deps) { r_list_free (bins); r_list_free (target_lib_names); return NULL; } } ut32 i; for (i = 0; i < cache->n_hdr; i++) { cache_hdr_t *hdr = &cache->hdr[i]; ut64 hdr_offset = cache->hdr_offset[i]; ut32 maps_index = cache->maps_index[i]; cache_img_t *img = read_cache_images (cache->buf, hdr, hdr_offset); if (!img) { goto next; } ut32 j; ut16 *depArray = NULL; cache_imgxtr_t *extras = NULL; if (target_libs) { HtPU *path_to_idx = NULL; if (cache->accel) { depArray = R_NEWS0 (ut16, cache->accel->depListCount); if (!depArray) { goto next; } if (r_buf_fread_at (cache->buf, cache->accel->depListOffset, (ut8*) depArray, "s", cache->accel->depListCount) != cache->accel->depListCount * 2) { goto next; } extras = read_cache_imgextra (cache->buf, hdr, cache->accel); if (!extras) { goto next; } } else { path_to_idx = create_path_to_index (cache->buf, img, hdr); } for (j = 0; j < hdr->imagesCount; j++) { bool printing = !deps[j]; char *lib_name = get_lib_name (cache->buf, &img[j]); if (!lib_name) { break; } if (strstr (lib_name, "libobjc.A.dylib")) { deps[j]++; } if (!r_list_find (target_lib_names, lib_name, string_contains)) { R_FREE (lib_name); continue; } if (printing) { eprintf ("FILTER: %s\n", lib_name); } R_FREE (lib_name); deps[j]++; if (extras && depArray) { ut32 k; for (k = extras[j].dependentsStartArrayIndex; depArray[k] != 0xffff; k++) { ut16 dep_index = depArray[k] & 0x7fff; deps[dep_index]++; char *dep_name = get_lib_name (cache->buf, &img[dep_index]); if (!dep_name) { break; } if (printing) { eprintf ("-> %s\n", dep_name); } free (dep_name); } } else if (path_to_idx) { carve_deps_at_address (cache, img, path_to_idx, img[j].address, deps, printing); } } ht_pu_free (path_to_idx); R_FREE (depArray); R_FREE (extras); } for (j = 0; j < hdr->imagesCount; j++) { if (deps && !deps[j]) { continue; } ut64 pa = va2pa (img[j].address, hdr->mappingCount, &cache->maps[maps_index], cache->buf, 0, NULL, NULL); if (pa == UT64_MAX) { continue; } ut8 magicbytes[4]; r_buf_read_at (cache->buf, pa, magicbytes, 4); int magic = r_read_le32 (magicbytes); switch (magic) { case MH_MAGIC_64: { char file[256]; RDyldBinImage *bin = R_NEW0 (RDyldBinImage); if (!bin) { goto next; } bin->header_at = pa; bin->hdr_offset = hdr_offset; bin->symbols_off = resolve_symbols_off (cache, pa); bin->va = img[j].address; if (r_buf_read_at (cache->buf, img[j].pathFileOffset, (ut8*) &file, sizeof (file)) == sizeof (file)) { file[255] = 0; char *last_slash = strrchr (file, '/'); if (last_slash && *last_slash) { if (last_slash > file) { char *scan = last_slash - 1; while (scan > file && *scan != '/') { scan--; } if (*scan == '/') { bin->file = strdup (scan + 1); } else { bin->file = strdup (last_slash + 1); } } else { bin->file = strdup (last_slash + 1); } } else { bin->file = strdup (file); } } r_list_append (bins, bin); break; } default: eprintf ("Unknown sub-bin\n"); break; } } next: R_FREE (depArray); R_FREE (extras); R_FREE (img); } if (r_list_empty (bins)) { r_list_free (bins); bins = NULL; } R_FREE (deps); R_FREE (target_libs); r_list_free (target_lib_names); return bins; }
0
CVE-2022-0676
130
benign
CWE-787
static RList *create_cache_bins(RBinFile *bf, RDyldCache *cache) { RList *bins = r_list_newf ((RListFree)free_bin); ut16 *depArray = NULL; cache_imgxtr_t *extras = NULL; if (!bins) { return NULL; } char *target_libs = NULL; RList *target_lib_names = NULL; int *deps = NULL; target_libs = r_sys_getenv ("R_DYLDCACHE_FILTER"); if (target_libs) { target_lib_names = r_str_split_list (target_libs, ":", 0); if (!target_lib_names) { r_list_free (bins); return NULL; } deps = R_NEWS0 (int, cache->hdr->imagesCount); if (!deps) { r_list_free (bins); r_list_free (target_lib_names); return NULL; } } ut32 i; for (i = 0; i < cache->n_hdr; i++) { cache_hdr_t *hdr = &cache->hdr[i]; ut64 hdr_offset = cache->hdr_offset[i]; ut32 maps_index = cache->maps_index[i]; cache_img_t *img = read_cache_images (cache->buf, hdr, hdr_offset); if (!img) { goto next; } ut32 j; if (target_libs) { HtPU *path_to_idx = NULL; if (cache->accel) { depArray = R_NEWS0 (ut16, cache->accel->depListCount); if (!depArray) { goto next; } if (r_buf_fread_at (cache->buf, cache->accel->depListOffset, (ut8*) depArray, "s", cache->accel->depListCount) != cache->accel->depListCount * 2) { goto next; } extras = read_cache_imgextra (cache->buf, hdr, cache->accel); if (!extras) { goto next; } } else { path_to_idx = create_path_to_index (cache->buf, img, hdr); } for (j = 0; j < hdr->imagesCount; j++) { bool printing = !deps[j]; char *lib_name = get_lib_name (cache->buf, &img[j]); if (!lib_name) { break; } if (strstr (lib_name, "libobjc.A.dylib")) { deps[j]++; } if (!r_list_find (target_lib_names, lib_name, string_contains)) { R_FREE (lib_name); continue; } if (printing) { eprintf ("FILTER: %s\n", lib_name); } R_FREE (lib_name); deps[j]++; if (extras && depArray) { ut32 k; for (k = extras[j].dependentsStartArrayIndex; depArray[k] != 0xffff; k++) { ut16 dep_index = depArray[k] & 0x7fff; deps[dep_index]++; char *dep_name = get_lib_name (cache->buf, &img[dep_index]); if (!dep_name) { break; } if (printing) { eprintf ("-> %s\n", dep_name); } free (dep_name); } } else if (path_to_idx) { carve_deps_at_address (cache, img, path_to_idx, img[j].address, deps, printing); } } ht_pu_free (path_to_idx); R_FREE (depArray); R_FREE (extras); } for (j = 0; j < hdr->imagesCount; j++) { if (deps && !deps[j]) { continue; } ut64 pa = va2pa (img[j].address, hdr->mappingCount, &cache->maps[maps_index], cache->buf, 0, NULL, NULL); if (pa == UT64_MAX) { continue; } ut8 magicbytes[4]; r_buf_read_at (cache->buf, pa, magicbytes, 4); int magic = r_read_le32 (magicbytes); switch (magic) { case MH_MAGIC_64: { char file[256]; RDyldBinImage *bin = R_NEW0 (RDyldBinImage); if (!bin) { goto next; } bin->header_at = pa; bin->hdr_offset = hdr_offset; bin->symbols_off = resolve_symbols_off (cache, pa); bin->va = img[j].address; if (r_buf_read_at (cache->buf, img[j].pathFileOffset, (ut8*) &file, sizeof (file)) == sizeof (file)) { file[255] = 0; char *last_slash = strrchr (file, '/'); if (last_slash && *last_slash) { if (last_slash > file) { char *scan = last_slash - 1; while (scan > file && *scan != '/') { scan--; } if (*scan == '/') { bin->file = strdup (scan + 1); } else { bin->file = strdup (last_slash + 1); } } else { bin->file = strdup (last_slash + 1); } } else { bin->file = strdup (file); } } r_list_append (bins, bin); break; } default: eprintf ("Unknown sub-bin\n"); break; } } next: R_FREE (depArray); R_FREE (extras); R_FREE (img); } if (r_list_empty (bins)) { r_list_free (bins); bins = NULL; } R_FREE (deps); R_FREE (target_libs); r_list_free (target_lib_names); return bins; }
1
CVE-2022-0676
130
vulnerable
CWE-125
l2tp_ppp_discon_cc_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint16_t *ptr = (const uint16_t *)dat; ND_PRINT((ndo, "%04x, ", EXTRACT_16BITS(ptr))); ptr++; /* Disconnect Code */ ND_PRINT((ndo, "%04x ", EXTRACT_16BITS(ptr))); ptr++; /* Control Protocol Number */ ND_PRINT((ndo, "%s", tok2str(l2tp_cc_direction2str, "Direction-#%u", *((const u_char *)ptr++)))); if (length > 5) { ND_PRINT((ndo, " ")); print_string(ndo, (const u_char *)ptr, length-5); } }
0
CVE-2017-13006
1,257
benign
CWE-125
l2tp_ppp_discon_cc_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint16_t *ptr = (const uint16_t *)dat; if (length < 5) { ND_PRINT((ndo, "AVP too short")); return; } /* Disconnect Code */ ND_PRINT((ndo, "%04x, ", EXTRACT_16BITS(dat))); dat += 2; length -= 2; /* Control Protocol Number */ ND_PRINT((ndo, "%04x ", EXTRACT_16BITS(dat))); dat += 2; length -= 2; /* Direction */ ND_PRINT((ndo, "%s", tok2str(l2tp_cc_direction2str, "Direction-#%u", EXTRACT_8BITS(ptr)))); ptr++; length--; if (length != 0) { ND_PRINT((ndo, " ")); print_string(ndo, (const u_char *)ptr, length); } }
1
CVE-2017-13006
1,257
vulnerable
CWE-20
void CZNC::ForceEncoding() { m_uiForceEncoding++; #ifdef HAVE_ICU for (Csock* pSock : GetManager()) { if (pSock->GetEncoding().empty()) { pSock->SetEncoding("UTF-8"); } } #endif }
0
CVE-2019-9917
2,526
benign
CWE-20
void CZNC::ForceEncoding() { m_uiForceEncoding++; #ifdef HAVE_ICU for (Csock* pSock : GetManager()) { pSock->SetEncoding(FixupEncoding(pSock->GetEncoding())); } #endif }
1
CVE-2019-9917
2,526
vulnerable
CWE-125
TfLiteStatus ReverseSequenceImpl(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* seq_lengths_tensor = GetInput(context, node, kSeqLengthsTensor); const TS* seq_lengths = GetTensorData<TS>(seq_lengths_tensor); auto* params = reinterpret_cast<TfLiteReverseSequenceParams*>(node->builtin_data); int seq_dim = params->seq_dim; int batch_dim = params->batch_dim; TF_LITE_ENSURE(context, seq_dim >= 0); TF_LITE_ENSURE(context, batch_dim >= 0); TF_LITE_ENSURE(context, seq_dim != batch_dim); TF_LITE_ENSURE(context, seq_dim < NumDimensions(input)); TF_LITE_ENSURE(context, batch_dim < NumDimensions(input)); TF_LITE_ENSURE_EQ(context, SizeOfDimension(seq_lengths_tensor, 0), SizeOfDimension(input, batch_dim)); for (int i = 0; i < NumDimensions(seq_lengths_tensor); ++i) { TF_LITE_ENSURE(context, seq_lengths[i] <= SizeOfDimension(input, seq_dim)); } TfLiteTensor* output = GetOutput(context, node, kOutputTensor); reference_ops::ReverseSequence<T, TS>( seq_lengths, seq_dim, batch_dim, GetTensorShape(input), GetTensorData<T>(input), GetTensorShape(output), GetTensorData<T>(output)); return kTfLiteOk; }
0
CVE-2020-15211
2,336
benign
CWE-125
TfLiteStatus ReverseSequenceImpl(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); const TfLiteTensor* seq_lengths_tensor; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSeqLengthsTensor, &seq_lengths_tensor)); const TS* seq_lengths = GetTensorData<TS>(seq_lengths_tensor); auto* params = reinterpret_cast<TfLiteReverseSequenceParams*>(node->builtin_data); int seq_dim = params->seq_dim; int batch_dim = params->batch_dim; TF_LITE_ENSURE(context, seq_dim >= 0); TF_LITE_ENSURE(context, batch_dim >= 0); TF_LITE_ENSURE(context, seq_dim != batch_dim); TF_LITE_ENSURE(context, seq_dim < NumDimensions(input)); TF_LITE_ENSURE(context, batch_dim < NumDimensions(input)); TF_LITE_ENSURE_EQ(context, SizeOfDimension(seq_lengths_tensor, 0), SizeOfDimension(input, batch_dim)); for (int i = 0; i < NumDimensions(seq_lengths_tensor); ++i) { TF_LITE_ENSURE(context, seq_lengths[i] <= SizeOfDimension(input, seq_dim)); } TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); reference_ops::ReverseSequence<T, TS>( seq_lengths, seq_dim, batch_dim, GetTensorShape(input), GetTensorData<T>(input), GetTensorShape(output), GetTensorData<T>(output)); return kTfLiteOk; }
1
CVE-2020-15211
2,336
vulnerable
CWE-20
error_t ssiProcessIncludeCommand(HttpConnection *connection, const char_t *tag, size_t length, const char_t *uri, uint_t level) { error_t error; char_t *separator; char_t *attribute; char_t *value; char_t *path; char_t *p; //Discard invalid SSI directives if(length < 7 || length >= HTTP_SERVER_BUFFER_SIZE) return ERROR_INVALID_TAG; //Skip the SSI include command (7 bytes) osMemcpy(connection->buffer, tag + 7, length - 7); //Ensure the resulting string is NULL-terminated connection->buffer[length - 7] = '\0'; //Check whether a separator is present separator = strchr(connection->buffer, '='); //Separator not found? if(!separator) return ERROR_INVALID_TAG; //Split the tag *separator = '\0'; //Get attribute name and value attribute = strTrimWhitespace(connection->buffer); value = strTrimWhitespace(separator + 1); //Remove leading simple or double quote if(value[0] == '\'' || value[0] == '\"') value++; //Get the length of the attribute value length = osStrlen(value); //Remove trailing simple or double quote if(length > 0) { if(value[length - 1] == '\'' || value[length - 1] == '\"') value[length - 1] = '\0'; } //Check the length of the filename if(osStrlen(value) > HTTP_SERVER_URI_MAX_LEN) return ERROR_INVALID_TAG; //The file parameter defines the included file as relative to the document path if(!osStrcasecmp(attribute, "file")) { //Allocate a buffer to hold the path to the file to be included path = osAllocMem(osStrlen(uri) + osStrlen(value) + 1); //Failed to allocate memory? if(path == NULL) return ERROR_OUT_OF_MEMORY; //Copy the path identifying the script file being processed osStrcpy(path, uri); //Search for the last slash character p = strrchr(path, '/'); //Remove the filename from the path if applicable if(p) osStrcpy(p + 1, value); else osStrcpy(path, value); } //The virtual parameter defines the included file as relative to the document root else if(!osStrcasecmp(attribute, "virtual")) { //Copy the absolute path path = strDuplicate(value); //Failed to duplicate the string? if(path == NULL) return ERROR_OUT_OF_MEMORY; } //Unknown parameter... else { //Report an error return ERROR_INVALID_TAG; } //Use server-side scripting to dynamically generate HTML code? if(httpCompExtension(value, ".stm") || httpCompExtension(value, ".shtm") || httpCompExtension(value, ".shtml")) { //SSI processing (Server Side Includes) error = ssiExecuteScript(connection, path, level + 1); } else { #if (HTTP_SERVER_FS_SUPPORT == ENABLED) FsFile *file; //Retrieve the full pathname httpGetAbsolutePath(connection, path, connection->buffer, HTTP_SERVER_BUFFER_SIZE); //Open the file for reading file = fsOpenFile(connection->buffer, FS_FILE_MODE_READ); //Successful operation? if(file) { //Send the contents of the requested file while(1) { //Read data from the specified file error = fsReadFile(file, connection->buffer, HTTP_SERVER_BUFFER_SIZE, &length); //End of input stream? if(error) break; //Send data to the client error = httpWriteStream(connection, connection->buffer, length); //Any error to report? if(error) break; } //Close the file fsCloseFile(file); //Successful file transfer? if(error == ERROR_END_OF_FILE) error = NO_ERROR; } else { //The specified URI cannot be found error = ERROR_NOT_FOUND; } #else const uint8_t *data; //Retrieve the full pathname httpGetAbsolutePath(connection, path, connection->buffer, HTTP_SERVER_BUFFER_SIZE); //Get the resource data associated with the file error = resGetData(connection->buffer, &data, &length); //Send the contents of the requested file if(!error) error = httpWriteStream(connection, data, length); #endif } //Cannot found the specified resource? if(error == ERROR_NOT_FOUND) error = ERROR_INVALID_TAG; //Release previously allocated memory osFreeMem(path); //return status code return error; }
0
CVE-2021-26788
1,602
benign
CWE-20
error_t ssiProcessIncludeCommand(HttpConnection *connection, const char_t *tag, size_t length, const char_t *uri, uint_t level) { error_t error; char_t *separator; char_t *attribute; char_t *value; char_t *path; char_t *p; //Discard invalid SSI directives if(length < 7 || length >= HTTP_SERVER_BUFFER_SIZE) return ERROR_INVALID_TAG; //Skip the SSI include command (7 bytes) osMemcpy(connection->buffer, tag + 7, length - 7); //Ensure the resulting string is NULL-terminated connection->buffer[length - 7] = '\0'; //Check whether a separator is present separator = osStrchr(connection->buffer, '='); //Separator not found? if(!separator) return ERROR_INVALID_TAG; //Split the tag *separator = '\0'; //Get attribute name and value attribute = strTrimWhitespace(connection->buffer); value = strTrimWhitespace(separator + 1); //Remove leading simple or double quote if(value[0] == '\'' || value[0] == '\"') value++; //Get the length of the attribute value length = osStrlen(value); //Remove trailing simple or double quote if(length > 0) { if(value[length - 1] == '\'' || value[length - 1] == '\"') value[length - 1] = '\0'; } //Check the length of the filename if(osStrlen(value) > HTTP_SERVER_URI_MAX_LEN) return ERROR_INVALID_TAG; //The file parameter defines the included file as relative to the document path if(!osStrcasecmp(attribute, "file")) { //Allocate a buffer to hold the path to the file to be included path = osAllocMem(osStrlen(uri) + osStrlen(value) + 1); //Failed to allocate memory? if(path == NULL) return ERROR_OUT_OF_MEMORY; //Copy the path identifying the script file being processed osStrcpy(path, uri); //Search for the last slash character p = strrchr(path, '/'); //Remove the filename from the path if applicable if(p) osStrcpy(p + 1, value); else osStrcpy(path, value); } //The virtual parameter defines the included file as relative to the document root else if(!osStrcasecmp(attribute, "virtual")) { //Copy the absolute path path = strDuplicate(value); //Failed to duplicate the string? if(path == NULL) return ERROR_OUT_OF_MEMORY; } //Unknown parameter... else { //Report an error return ERROR_INVALID_TAG; } //Use server-side scripting to dynamically generate HTML code? if(httpCompExtension(value, ".stm") || httpCompExtension(value, ".shtm") || httpCompExtension(value, ".shtml")) { //SSI processing (Server Side Includes) error = ssiExecuteScript(connection, path, level + 1); } else { #if (HTTP_SERVER_FS_SUPPORT == ENABLED) FsFile *file; //Retrieve the full pathname httpGetAbsolutePath(connection, path, connection->buffer, HTTP_SERVER_BUFFER_SIZE); //Open the file for reading file = fsOpenFile(connection->buffer, FS_FILE_MODE_READ); //Successful operation? if(file) { //Send the contents of the requested file while(1) { //Read data from the specified file error = fsReadFile(file, connection->buffer, HTTP_SERVER_BUFFER_SIZE, &length); //End of input stream? if(error) break; //Send data to the client error = httpWriteStream(connection, connection->buffer, length); //Any error to report? if(error) break; } //Close the file fsCloseFile(file); //Successful file transfer? if(error == ERROR_END_OF_FILE) error = NO_ERROR; } else { //The specified URI cannot be found error = ERROR_NOT_FOUND; } #else const uint8_t *data; //Retrieve the full pathname httpGetAbsolutePath(connection, path, connection->buffer, HTTP_SERVER_BUFFER_SIZE); //Get the resource data associated with the file error = resGetData(connection->buffer, &data, &length); //Send the contents of the requested file if(!error) error = httpWriteStream(connection, data, length); #endif } //Cannot found the specified resource? if(error == ERROR_NOT_FOUND) error = ERROR_INVALID_TAG; //Release previously allocated memory osFreeMem(path); //return status code return error; }
1
CVE-2021-26788
1,602
vulnerable
CWE-119
static char *fstrndup(const char *ptr, unsigned long len) { char *result; if (len <= 0) return NULL; result = ALLOC_N(char, len); memccpy(result, ptr, 0, len); return result; }
0
CVE-2017-14064
657
benign
CWE-119
static char *fstrndup(const char *ptr, unsigned long len) { char *result; if (len <= 0) return NULL; result = ALLOC_N(char, len); memcpy(result, ptr, len); return result; }
1
CVE-2017-14064
657
vulnerable
CWE-119
void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt) { struct oz_usb_hdr *usb_hdr = (struct oz_usb_hdr *)(elt + 1); struct oz_usb_ctx *usb_ctx; spin_lock_bh(&pd->app_lock[OZ_APPID_USB]); usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB]; if (usb_ctx) oz_usb_get(usb_ctx); spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]); if (usb_ctx == NULL) return; /* Context has gone so nothing to do. */ if (usb_ctx->stopped) goto done; /* If sequence number is non-zero then check it is not a duplicate. * Zero sequence numbers are always accepted. */ if (usb_hdr->elt_seq_num != 0) { if (((usb_ctx->rx_seq_num - usb_hdr->elt_seq_num) & 0x80) == 0) /* Reject duplicate element. */ goto done; } usb_ctx->rx_seq_num = usb_hdr->elt_seq_num; switch (usb_hdr->type) { case OZ_GET_DESC_RSP: { struct oz_get_desc_rsp *body = (struct oz_get_desc_rsp *)usb_hdr; int data_len = elt->length - sizeof(struct oz_get_desc_rsp) + 1; u16 offs = le16_to_cpu(get_unaligned(&body->offset)); u16 total_size = le16_to_cpu(get_unaligned(&body->total_size)); oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n"); oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id, body->rcode, body->data, data_len, offs, total_size); } break; case OZ_SET_CONFIG_RSP: { struct oz_set_config_rsp *body = (struct oz_set_config_rsp *)usb_hdr; oz_hcd_control_cnf(usb_ctx->hport, body->req_id, body->rcode, NULL, 0); } break; case OZ_SET_INTERFACE_RSP: { struct oz_set_interface_rsp *body = (struct oz_set_interface_rsp *)usb_hdr; oz_hcd_control_cnf(usb_ctx->hport, body->req_id, body->rcode, NULL, 0); } break; case OZ_VENDOR_CLASS_RSP: { struct oz_vendor_class_rsp *body = (struct oz_vendor_class_rsp *)usb_hdr; oz_hcd_control_cnf(usb_ctx->hport, body->req_id, body->rcode, body->data, elt->length- sizeof(struct oz_vendor_class_rsp)+1); } break; case OZ_USB_ENDPOINT_DATA: oz_usb_handle_ep_data(usb_ctx, usb_hdr, elt->length); break; } done: oz_usb_put(usb_ctx); }
0
CVE-2015-4002
1,999
benign
CWE-119
void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt) { struct oz_usb_hdr *usb_hdr = (struct oz_usb_hdr *)(elt + 1); struct oz_usb_ctx *usb_ctx; spin_lock_bh(&pd->app_lock[OZ_APPID_USB]); usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB]; if (usb_ctx) oz_usb_get(usb_ctx); spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]); if (usb_ctx == NULL) return; /* Context has gone so nothing to do. */ if (usb_ctx->stopped) goto done; /* If sequence number is non-zero then check it is not a duplicate. * Zero sequence numbers are always accepted. */ if (usb_hdr->elt_seq_num != 0) { if (((usb_ctx->rx_seq_num - usb_hdr->elt_seq_num) & 0x80) == 0) /* Reject duplicate element. */ goto done; } usb_ctx->rx_seq_num = usb_hdr->elt_seq_num; switch (usb_hdr->type) { case OZ_GET_DESC_RSP: { struct oz_get_desc_rsp *body = (struct oz_get_desc_rsp *)usb_hdr; u16 offs, total_size; u8 data_len; if (elt->length < sizeof(struct oz_get_desc_rsp) - 1) break; data_len = elt->length - (sizeof(struct oz_get_desc_rsp) - 1); offs = le16_to_cpu(get_unaligned(&body->offset)); total_size = le16_to_cpu(get_unaligned(&body->total_size)); oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n"); oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id, body->rcode, body->data, data_len, offs, total_size); } break; case OZ_SET_CONFIG_RSP: { struct oz_set_config_rsp *body = (struct oz_set_config_rsp *)usb_hdr; oz_hcd_control_cnf(usb_ctx->hport, body->req_id, body->rcode, NULL, 0); } break; case OZ_SET_INTERFACE_RSP: { struct oz_set_interface_rsp *body = (struct oz_set_interface_rsp *)usb_hdr; oz_hcd_control_cnf(usb_ctx->hport, body->req_id, body->rcode, NULL, 0); } break; case OZ_VENDOR_CLASS_RSP: { struct oz_vendor_class_rsp *body = (struct oz_vendor_class_rsp *)usb_hdr; oz_hcd_control_cnf(usb_ctx->hport, body->req_id, body->rcode, body->data, elt->length- sizeof(struct oz_vendor_class_rsp)+1); } break; case OZ_USB_ENDPOINT_DATA: oz_usb_handle_ep_data(usb_ctx, usb_hdr, elt->length); break; } done: oz_usb_put(usb_ctx); }
1
CVE-2015-4002
1,999
vulnerable
CWE-125
int ff_mms_asf_header_parser(MMSContext *mms) { uint8_t *p = mms->asf_header; uint8_t *end; int flags, stream_id; mms->stream_num = 0; if (mms->asf_header_size < sizeof(ff_asf_guid) * 2 + 22 || memcmp(p, ff_asf_header, sizeof(ff_asf_guid))) { av_log(NULL, AV_LOG_ERROR, "Corrupt stream (invalid ASF header, size=%d)\n", mms->asf_header_size); return AVERROR_INVALIDDATA; } end = mms->asf_header + mms->asf_header_size; p += sizeof(ff_asf_guid) + 14; while(end - p >= sizeof(ff_asf_guid) + 8) { uint64_t chunksize; if (!memcmp(p, ff_asf_data_header, sizeof(ff_asf_guid))) { chunksize = 50; // see Reference [2] section 5.1 } else { chunksize = AV_RL64(p + sizeof(ff_asf_guid)); } if (!chunksize || chunksize > end - p) { av_log(NULL, AV_LOG_ERROR, "Corrupt stream (header chunksize %"PRId64" is invalid)\n", chunksize); return AVERROR_INVALIDDATA; } if (!memcmp(p, ff_asf_file_header, sizeof(ff_asf_guid))) { /* read packet size */ if (end - p > sizeof(ff_asf_guid) * 2 + 68) { mms->asf_packet_len = AV_RL32(p + sizeof(ff_asf_guid) * 2 + 64); if (mms->asf_packet_len <= 0 || mms->asf_packet_len > sizeof(mms->in_buffer)) { av_log(NULL, AV_LOG_ERROR, "Corrupt stream (too large pkt_len %d)\n", mms->asf_packet_len); return AVERROR_INVALIDDATA; } } } else if (!memcmp(p, ff_asf_stream_header, sizeof(ff_asf_guid))) { flags = AV_RL16(p + sizeof(ff_asf_guid)*3 + 24); stream_id = flags & 0x7F; //The second condition is for checking CS_PKT_STREAM_ID_REQUEST packet size, //we can calculate the packet size by stream_num. //Please see function send_stream_selection_request(). if (mms->stream_num < MMS_MAX_STREAMS && 46 + mms->stream_num * 6 < sizeof(mms->out_buffer)) { mms->streams = av_fast_realloc(mms->streams, &mms->nb_streams_allocated, (mms->stream_num + 1) * sizeof(MMSStream)); if (!mms->streams) return AVERROR(ENOMEM); mms->streams[mms->stream_num].id = stream_id; mms->stream_num++; } else { av_log(NULL, AV_LOG_ERROR, "Corrupt stream (too many A/V streams)\n"); return AVERROR_INVALIDDATA; } } else if (!memcmp(p, ff_asf_ext_stream_header, sizeof(ff_asf_guid))) { if (end - p >= 88) { int stream_count = AV_RL16(p + 84), ext_len_count = AV_RL16(p + 86); uint64_t skip_bytes = 88; while (stream_count--) { if (end - p < skip_bytes + 4) { av_log(NULL, AV_LOG_ERROR, "Corrupt stream (next stream name length is not in the buffer)\n"); return AVERROR_INVALIDDATA; } skip_bytes += 4 + AV_RL16(p + skip_bytes + 2); } while (ext_len_count--) { if (end - p < skip_bytes + 22) { av_log(NULL, AV_LOG_ERROR, "Corrupt stream (next extension system info length is not in the buffer)\n"); return AVERROR_INVALIDDATA; } skip_bytes += 22 + AV_RL32(p + skip_bytes + 18); } if (end - p < skip_bytes) { av_log(NULL, AV_LOG_ERROR, "Corrupt stream (the last extension system info length is invalid)\n"); return AVERROR_INVALIDDATA; } if (chunksize - skip_bytes > 24) chunksize = skip_bytes; } } else if (!memcmp(p, ff_asf_head1_guid, sizeof(ff_asf_guid))) { chunksize = 46; // see references [2] section 3.4. This should be set 46. } p += chunksize; } return 0; }
0
CVE-2018-1999010
81
benign
CWE-125
int ff_mms_asf_header_parser(MMSContext *mms) { uint8_t *p = mms->asf_header; uint8_t *end; int flags, stream_id; mms->stream_num = 0; if (mms->asf_header_size < sizeof(ff_asf_guid) * 2 + 22 || memcmp(p, ff_asf_header, sizeof(ff_asf_guid))) { av_log(NULL, AV_LOG_ERROR, "Corrupt stream (invalid ASF header, size=%d)\n", mms->asf_header_size); return AVERROR_INVALIDDATA; } end = mms->asf_header + mms->asf_header_size; p += sizeof(ff_asf_guid) + 14; while(end - p >= sizeof(ff_asf_guid) + 8) { uint64_t chunksize; if (!memcmp(p, ff_asf_data_header, sizeof(ff_asf_guid))) { chunksize = 50; // see Reference [2] section 5.1 } else { chunksize = AV_RL64(p + sizeof(ff_asf_guid)); } if (!chunksize || chunksize > end - p) { av_log(NULL, AV_LOG_ERROR, "Corrupt stream (header chunksize %"PRId64" is invalid)\n", chunksize); return AVERROR_INVALIDDATA; } if (!memcmp(p, ff_asf_file_header, sizeof(ff_asf_guid))) { /* read packet size */ if (end - p > sizeof(ff_asf_guid) * 2 + 68) { mms->asf_packet_len = AV_RL32(p + sizeof(ff_asf_guid) * 2 + 64); if (mms->asf_packet_len <= 0 || mms->asf_packet_len > sizeof(mms->in_buffer)) { av_log(NULL, AV_LOG_ERROR, "Corrupt stream (too large pkt_len %d)\n", mms->asf_packet_len); return AVERROR_INVALIDDATA; } } } else if (!memcmp(p, ff_asf_stream_header, sizeof(ff_asf_guid))) { if (end - p >= (sizeof(ff_asf_guid) * 3 + 26)) { flags = AV_RL16(p + sizeof(ff_asf_guid)*3 + 24); stream_id = flags & 0x7F; //The second condition is for checking CS_PKT_STREAM_ID_REQUEST packet size, //we can calculate the packet size by stream_num. //Please see function send_stream_selection_request(). if (mms->stream_num < MMS_MAX_STREAMS && 46 + mms->stream_num * 6 < sizeof(mms->out_buffer)) { mms->streams = av_fast_realloc(mms->streams, &mms->nb_streams_allocated, (mms->stream_num + 1) * sizeof(MMSStream)); if (!mms->streams) return AVERROR(ENOMEM); mms->streams[mms->stream_num].id = stream_id; mms->stream_num++; } else { av_log(NULL, AV_LOG_ERROR, "Corrupt stream (too many A/V streams)\n"); return AVERROR_INVALIDDATA; } } } else if (!memcmp(p, ff_asf_ext_stream_header, sizeof(ff_asf_guid))) { if (end - p >= 88) { int stream_count = AV_RL16(p + 84), ext_len_count = AV_RL16(p + 86); uint64_t skip_bytes = 88; while (stream_count--) { if (end - p < skip_bytes + 4) { av_log(NULL, AV_LOG_ERROR, "Corrupt stream (next stream name length is not in the buffer)\n"); return AVERROR_INVALIDDATA; } skip_bytes += 4 + AV_RL16(p + skip_bytes + 2); } while (ext_len_count--) { if (end - p < skip_bytes + 22) { av_log(NULL, AV_LOG_ERROR, "Corrupt stream (next extension system info length is not in the buffer)\n"); return AVERROR_INVALIDDATA; } skip_bytes += 22 + AV_RL32(p + skip_bytes + 18); } if (end - p < skip_bytes) { av_log(NULL, AV_LOG_ERROR, "Corrupt stream (the last extension system info length is invalid)\n"); return AVERROR_INVALIDDATA; } if (chunksize - skip_bytes > 24) chunksize = skip_bytes; } } else if (!memcmp(p, ff_asf_head1_guid, sizeof(ff_asf_guid))) { chunksize = 46; // see references [2] section 3.4. This should be set 46. if (chunksize > end - p) { av_log(NULL, AV_LOG_ERROR, "Corrupt stream (header chunksize %"PRId64" is invalid)\n", chunksize); return AVERROR_INVALIDDATA; } } p += chunksize; } return 0; }
1
CVE-2018-1999010
81
vulnerable
CWE-787
inline void skip(int bytes) { while (bytes > 0) { int n = check(1, bytes); ptr += n; bytes -= n; } }
0
CVE-2019-15694
1,848
benign
CWE-787
inline void skip(size_t bytes) { while (bytes > 0) { size_t n = check(1, bytes); ptr += n; bytes -= n; } }
1
CVE-2019-15694
1,848
vulnerable
CWE-119
static void tokenadd(struct jv_parser* p, char c) { assert(p->tokenpos <= p->tokenlen); if (p->tokenpos == p->tokenlen) { p->tokenlen = p->tokenlen*2 + 256; p->tokenbuf = jv_mem_realloc(p->tokenbuf, p->tokenlen); } assert(p->tokenpos < p->tokenlen); p->tokenbuf[p->tokenpos++] = c; }
0
CVE-2015-8863
1,647
benign
CWE-119
static void tokenadd(struct jv_parser* p, char c) { assert(p->tokenpos <= p->tokenlen); if (p->tokenpos >= (p->tokenlen - 1)) { p->tokenlen = p->tokenlen*2 + 256; p->tokenbuf = jv_mem_realloc(p->tokenbuf, p->tokenlen); } assert(p->tokenpos < p->tokenlen); p->tokenbuf[p->tokenpos++] = c; }
1
CVE-2015-8863
1,647
vulnerable
CWE-119
header_seek (SF_PRIVATE *psf, sf_count_t position, int whence) { switch (whence) { case SEEK_SET : if (position > SIGNED_SIZEOF (psf->header)) { /* Too much header to cache so just seek instead. */ psf_fseek (psf, position, whence) ; return ; } ; if (position > psf->headend) psf->headend += psf_fread (psf->header + psf->headend, 1, position - psf->headend, psf) ; psf->headindex = position ; break ; case SEEK_CUR : if (psf->headindex + position < 0) break ; if (psf->headindex >= SIGNED_SIZEOF (psf->header)) { psf_fseek (psf, position, whence) ; return ; } ; if (psf->headindex + position <= psf->headend) { psf->headindex += position ; break ; } ; if (psf->headindex + position > SIGNED_SIZEOF (psf->header)) { /* Need to jump this without caching it. */ psf->headindex = psf->headend ; psf_fseek (psf, position, SEEK_CUR) ; break ; } ; psf->headend += psf_fread (psf->header + psf->headend, 1, position - (psf->headend - psf->headindex), psf) ; psf->headindex = psf->headend ; break ; case SEEK_END : default : psf_log_printf (psf, "Bad whence param in header_seek().\n") ; break ; } ; return ; } /* header_seek */
0
CVE-2017-7586
172
benign
CWE-119
header_seek (SF_PRIVATE *psf, sf_count_t position, int whence) { switch (whence) { case SEEK_SET : if (psf->header.indx + position >= psf->header.len) psf_bump_header_allocation (psf, position) ; if (position > psf->header.len) { /* Too much header to cache so just seek instead. */ psf_fseek (psf, position, whence) ; return ; } ; if (position > psf->header.end) psf->header.end += psf_fread (psf->header.ptr + psf->header.end, 1, position - psf->header.end, psf) ; psf->header.indx = position ; break ; case SEEK_CUR : if (psf->header.indx + position >= psf->header.len) psf_bump_header_allocation (psf, position) ; if (psf->header.indx + position < 0) break ; if (psf->header.indx >= psf->header.len) { psf_fseek (psf, position, whence) ; return ; } ; if (psf->header.indx + position <= psf->header.end) { psf->header.indx += position ; break ; } ; if (psf->header.indx + position > psf->header.len) { /* Need to jump this without caching it. */ psf->header.indx = psf->header.end ; psf_fseek (psf, position, SEEK_CUR) ; break ; } ; psf->header.end += psf_fread (psf->header.ptr + psf->header.end, 1, position - (psf->header.end - psf->header.indx), psf) ; psf->header.indx = psf->header.end ; break ; case SEEK_END : default : psf_log_printf (psf, "Bad whence param in header_seek().\n") ; break ; } ; return ; } /* header_seek */
1
CVE-2017-7586
172
vulnerable
CWE-190
MONGO_EXPORT bson_bool_t mongo_cmd_authenticate( mongo *conn, const char *db, const char *user, const char *pass ) { bson from_db; bson cmd; const char *nonce; int result; mongo_md5_state_t st; mongo_md5_byte_t digest[16]; char hex_digest[33]; if( mongo_simple_int_command( conn, db, "getnonce", 1, &from_db ) == MONGO_OK ) { bson_iterator it; bson_find( &it, &from_db, "nonce" ); nonce = bson_iterator_string( &it ); } else { return MONGO_ERROR; } mongo_pass_digest( user, pass, hex_digest ); mongo_md5_init( &st ); mongo_md5_append( &st, ( const mongo_md5_byte_t * )nonce, strlen( nonce ) ); mongo_md5_append( &st, ( const mongo_md5_byte_t * )user, strlen( user ) ); mongo_md5_append( &st, ( const mongo_md5_byte_t * )hex_digest, 32 ); mongo_md5_finish( &st, digest ); digest2hex( digest, hex_digest ); bson_init( &cmd ); bson_append_int( &cmd, "authenticate", 1 ); bson_append_string( &cmd, "user", user ); bson_append_string( &cmd, "nonce", nonce ); bson_append_string( &cmd, "key", hex_digest ); bson_finish( &cmd ); bson_destroy( &from_db ); result = mongo_run_command( conn, db, &cmd, NULL ); bson_destroy( &cmd ); return result; }
0
CVE-2020-12135
62
benign
CWE-190
MONGO_EXPORT bson_bool_t mongo_cmd_authenticate( mongo *conn, const char *db, const char *user, const char *pass ) { bson from_db; bson cmd; const char *nonce; int result; mongo_md5_state_t st; mongo_md5_byte_t digest[16]; char hex_digest[33]; if( mongo_simple_int_command( conn, db, "getnonce", 1, &from_db ) == MONGO_OK ) { bson_iterator it; bson_find( &it, &from_db, "nonce" ); nonce = bson_iterator_string( &it ); } else { return MONGO_ERROR; } mongo_pass_digest( user, pass, hex_digest ); mongo_md5_init( &st ); mongo_md5_append( &st, ( const mongo_md5_byte_t * )nonce, ( int )strlen( nonce ) ); mongo_md5_append( &st, ( const mongo_md5_byte_t * )user, ( int )strlen( user ) ); mongo_md5_append( &st, ( const mongo_md5_byte_t * )hex_digest, 32 ); mongo_md5_finish( &st, digest ); digest2hex( digest, hex_digest ); bson_init( &cmd ); bson_append_int( &cmd, "authenticate", 1 ); bson_append_string( &cmd, "user", user ); bson_append_string( &cmd, "nonce", nonce ); bson_append_string( &cmd, "key", hex_digest ); bson_finish( &cmd ); bson_destroy( &from_db ); result = mongo_run_command( conn, db, &cmd, NULL ); bson_destroy( &cmd ); return result; }
1
CVE-2020-12135
62
vulnerable
CWE-125
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSequenceRNNParams*>(node->builtin_data); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* input_weights = GetInput(context, node, kWeightsTensor); const TfLiteTensor* recurrent_weights = GetInput(context, node, kRecurrentWeightsTensor); const TfLiteTensor* bias = GetInput(context, node, kBiasTensor); // The hidden_state is a variable input tensor that can be modified. TfLiteTensor* hidden_state = const_cast<TfLiteTensor*>(GetInput(context, node, kHiddenStateTensor)); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); switch (input_weights->type) { case kTfLiteFloat32: return EvalFloat(input, input_weights, recurrent_weights, bias, params, hidden_state, output); case kTfLiteUInt8: case kTfLiteInt8: { // TODO(mirkov): implement eval with quantized inputs as well. auto* op_data = reinterpret_cast<OpData*>(node->user_data); TfLiteTensor* input_quantized = GetTemporary(context, node, 0); TfLiteTensor* hidden_state_quantized = GetTemporary(context, node, 1); TfLiteTensor* scaling_factors = GetTemporary(context, node, 2); TfLiteTensor* accum_scratch = GetTemporary(context, node, 3); TfLiteTensor* zero_points = GetTemporary(context, node, 4); TfLiteTensor* row_sums = GetTemporary(context, node, 5); return EvalHybrid(input, input_weights, recurrent_weights, bias, params, input_quantized, hidden_state_quantized, scaling_factors, hidden_state, output, zero_points, accum_scratch, row_sums, &op_data->compute_row_sums); } default: TF_LITE_KERNEL_LOG(context, "Type %d not currently supported.", TfLiteTypeGetName(input_weights->type)); return kTfLiteError; } return kTfLiteOk; }
0
CVE-2020-15211
2,923
benign
CWE-125
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSequenceRNNParams*>(node->builtin_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); const TfLiteTensor* input_weights; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, kWeightsTensor, &input_weights)); const TfLiteTensor* recurrent_weights; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, kRecurrentWeightsTensor, &recurrent_weights)); const TfLiteTensor* bias; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kBiasTensor, &bias)); // The hidden_state is a variable input tensor that can be modified. TfLiteTensor* hidden_state = GetVariableInput(context, node, kHiddenStateTensor); TF_LITE_ENSURE(context, hidden_state != nullptr); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); switch (input_weights->type) { case kTfLiteFloat32: return EvalFloat(input, input_weights, recurrent_weights, bias, params, hidden_state, output); case kTfLiteUInt8: case kTfLiteInt8: { // TODO(mirkov): implement eval with quantized inputs as well. auto* op_data = reinterpret_cast<OpData*>(node->user_data); TfLiteTensor* input_quantized; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 0, &input_quantized)); TfLiteTensor* hidden_state_quantized; TF_LITE_ENSURE_OK( context, GetTemporarySafe(context, node, 1, &hidden_state_quantized)); TfLiteTensor* scaling_factors; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 2, &scaling_factors)); TfLiteTensor* accum_scratch; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 3, &accum_scratch)); TfLiteTensor* zero_points; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 4, &zero_points)); TfLiteTensor* row_sums; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 5, &row_sums)); return EvalHybrid(input, input_weights, recurrent_weights, bias, params, input_quantized, hidden_state_quantized, scaling_factors, hidden_state, output, zero_points, accum_scratch, row_sums, &op_data->compute_row_sums); } default: TF_LITE_KERNEL_LOG(context, "Type %d not currently supported.", TfLiteTypeGetName(input_weights->type)); return kTfLiteError; } return kTfLiteOk; }
1
CVE-2020-15211
2,923
vulnerable
CWE-119
header_put_be_short (SF_PRIVATE *psf, int x) { if (psf->headindex < SIGNED_SIZEOF (psf->header) - 2) { psf->header [psf->headindex++] = (x >> 8) ; psf->header [psf->headindex++] = x ; } ; } /* header_put_be_short */
0
CVE-2017-7586
164
benign
CWE-119
header_put_be_short (SF_PRIVATE *psf, int x) { psf->header.ptr [psf->header.indx++] = (x >> 8) ; psf->header.ptr [psf->header.indx++] = x ; } /* header_put_be_short */
1
CVE-2017-7586
164
vulnerable
CWE-362
static int packet_do_bind(struct sock *sk, const char *name, int ifindex, __be16 proto) { struct packet_sock *po = pkt_sk(sk); struct net_device *dev_curr; __be16 proto_curr; bool need_rehook; struct net_device *dev = NULL; int ret = 0; bool unlisted = false; if (po->fanout) return -EINVAL; lock_sock(sk); spin_lock(&po->bind_lock); rcu_read_lock(); if (name) { dev = dev_get_by_name_rcu(sock_net(sk), name); if (!dev) { ret = -ENODEV; goto out_unlock; } } else if (ifindex) { dev = dev_get_by_index_rcu(sock_net(sk), ifindex); if (!dev) { ret = -ENODEV; goto out_unlock; } } if (dev) dev_hold(dev); proto_curr = po->prot_hook.type; dev_curr = po->prot_hook.dev; need_rehook = proto_curr != proto || dev_curr != dev; if (need_rehook) { if (po->running) { rcu_read_unlock(); __unregister_prot_hook(sk, true); rcu_read_lock(); dev_curr = po->prot_hook.dev; if (dev) unlisted = !dev_get_by_index_rcu(sock_net(sk), dev->ifindex); } po->num = proto; po->prot_hook.type = proto; if (unlikely(unlisted)) { dev_put(dev); po->prot_hook.dev = NULL; po->ifindex = -1; packet_cached_dev_reset(po); } else { po->prot_hook.dev = dev; po->ifindex = dev ? dev->ifindex : 0; packet_cached_dev_assign(po, dev); } } if (dev_curr) dev_put(dev_curr); if (proto == 0 || !need_rehook) goto out_unlock; if (!unlisted && (!dev || (dev->flags & IFF_UP))) { register_prot_hook(sk); } else { sk->sk_err = ENETDOWN; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_error_report(sk); } out_unlock: rcu_read_unlock(); spin_unlock(&po->bind_lock); release_sock(sk); return ret; }
0
CVE-2017-15649
3,090
benign
CWE-362
static int packet_do_bind(struct sock *sk, const char *name, int ifindex, __be16 proto) { struct packet_sock *po = pkt_sk(sk); struct net_device *dev_curr; __be16 proto_curr; bool need_rehook; struct net_device *dev = NULL; int ret = 0; bool unlisted = false; lock_sock(sk); spin_lock(&po->bind_lock); rcu_read_lock(); if (po->fanout) { ret = -EINVAL; goto out_unlock; } if (name) { dev = dev_get_by_name_rcu(sock_net(sk), name); if (!dev) { ret = -ENODEV; goto out_unlock; } } else if (ifindex) { dev = dev_get_by_index_rcu(sock_net(sk), ifindex); if (!dev) { ret = -ENODEV; goto out_unlock; } } if (dev) dev_hold(dev); proto_curr = po->prot_hook.type; dev_curr = po->prot_hook.dev; need_rehook = proto_curr != proto || dev_curr != dev; if (need_rehook) { if (po->running) { rcu_read_unlock(); __unregister_prot_hook(sk, true); rcu_read_lock(); dev_curr = po->prot_hook.dev; if (dev) unlisted = !dev_get_by_index_rcu(sock_net(sk), dev->ifindex); } po->num = proto; po->prot_hook.type = proto; if (unlikely(unlisted)) { dev_put(dev); po->prot_hook.dev = NULL; po->ifindex = -1; packet_cached_dev_reset(po); } else { po->prot_hook.dev = dev; po->ifindex = dev ? dev->ifindex : 0; packet_cached_dev_assign(po, dev); } } if (dev_curr) dev_put(dev_curr); if (proto == 0 || !need_rehook) goto out_unlock; if (!unlisted && (!dev || (dev->flags & IFF_UP))) { register_prot_hook(sk); } else { sk->sk_err = ENETDOWN; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_error_report(sk); } out_unlock: rcu_read_unlock(); spin_unlock(&po->bind_lock); release_sock(sk); return ret; }
1
CVE-2017-15649
3,090
vulnerable
CWE-190
unserialize_uep(bufinfo_T *bi, int *error, char_u *file_name) { int i; u_entry_T *uep; char_u **array; char_u *line; int line_len; uep = (u_entry_T *)U_ALLOC_LINE(sizeof(u_entry_T)); if (uep == NULL) return NULL; vim_memset(uep, 0, sizeof(u_entry_T)); #ifdef U_DEBUG uep->ue_magic = UE_MAGIC; #endif uep->ue_top = undo_read_4c(bi); uep->ue_bot = undo_read_4c(bi); uep->ue_lcount = undo_read_4c(bi); uep->ue_size = undo_read_4c(bi); if (uep->ue_size > 0) { array = (char_u **)U_ALLOC_LINE(sizeof(char_u *) * uep->ue_size); if (array == NULL) { *error = TRUE; return uep; } vim_memset(array, 0, sizeof(char_u *) * uep->ue_size); } else array = NULL; uep->ue_array = array; for (i = 0; i < uep->ue_size; ++i) { line_len = undo_read_4c(bi); if (line_len >= 0) line = read_string_decrypt(bi, line_len); else { line = NULL; corruption_error("line length", file_name); } if (line == NULL) { *error = TRUE; return uep; } array[i] = line; } return uep; }
0
CVE-2017-6350
3,064
benign
CWE-190
unserialize_uep(bufinfo_T *bi, int *error, char_u *file_name) { int i; u_entry_T *uep; char_u **array = NULL; char_u *line; int line_len; uep = (u_entry_T *)U_ALLOC_LINE(sizeof(u_entry_T)); if (uep == NULL) return NULL; vim_memset(uep, 0, sizeof(u_entry_T)); #ifdef U_DEBUG uep->ue_magic = UE_MAGIC; #endif uep->ue_top = undo_read_4c(bi); uep->ue_bot = undo_read_4c(bi); uep->ue_lcount = undo_read_4c(bi); uep->ue_size = undo_read_4c(bi); if (uep->ue_size > 0) { if (uep->ue_size < LONG_MAX / (int)sizeof(char_u *)) array = (char_u **)U_ALLOC_LINE(sizeof(char_u *) * uep->ue_size); if (array == NULL) { *error = TRUE; return uep; } vim_memset(array, 0, sizeof(char_u *) * uep->ue_size); } uep->ue_array = array; for (i = 0; i < uep->ue_size; ++i) { line_len = undo_read_4c(bi); if (line_len >= 0) line = read_string_decrypt(bi, line_len); else { line = NULL; corruption_error("line length", file_name); } if (line == NULL) { *error = TRUE; return uep; } array[i] = line; } return uep; }
1
CVE-2017-6350
3,064
vulnerable
CWE-787
hb_set_set (hb_set_t *set, const hb_set_t *other) { if (unlikely (hb_object_is_immutable (set))) return; set->set (*other); }
0
CVE-2021-45931
3,196
benign
CWE-787
hb_set_set (hb_set_t *set, const hb_set_t *other) { /* Immutible-safe. */ set->set (*other); }
1
CVE-2021-45931
3,196
vulnerable
CWE-190
sg_start_req(Sg_request *srp, unsigned char *cmd) { int res; struct request *rq; Sg_fd *sfp = srp->parentfp; sg_io_hdr_t *hp = &srp->header; int dxfer_len = (int) hp->dxfer_len; int dxfer_dir = hp->dxfer_direction; unsigned int iov_count = hp->iovec_count; Sg_scatter_hold *req_schp = &srp->data; Sg_scatter_hold *rsv_schp = &sfp->reserve; struct request_queue *q = sfp->parentdp->device->request_queue; struct rq_map_data *md, map_data; int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ; unsigned char *long_cmdp = NULL; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_start_req: dxfer_len=%d\n", dxfer_len)); if (hp->cmd_len > BLK_MAX_CDB) { long_cmdp = kzalloc(hp->cmd_len, GFP_KERNEL); if (!long_cmdp) return -ENOMEM; } /* * NOTE * * With scsi-mq enabled, there are a fixed number of preallocated * requests equal in number to shost->can_queue. If all of the * preallocated requests are already in use, then using GFP_ATOMIC with * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL * will cause blk_get_request() to sleep until an active command * completes, freeing up a request. Neither option is ideal, but * GFP_KERNEL is the better choice to prevent userspace from getting an * unexpected EWOULDBLOCK. * * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually * does not sleep except under memory pressure. */ rq = blk_get_request(q, rw, GFP_KERNEL); if (IS_ERR(rq)) { kfree(long_cmdp); return PTR_ERR(rq); } blk_rq_set_block_pc(rq); if (hp->cmd_len > BLK_MAX_CDB) rq->cmd = long_cmdp; memcpy(rq->cmd, cmd, hp->cmd_len); rq->cmd_len = hp->cmd_len; srp->rq = rq; rq->end_io_data = srp; rq->sense = srp->sense_b; rq->retries = SG_DEFAULT_RETRIES; if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE)) return 0; if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO && dxfer_dir != SG_DXFER_UNKNOWN && !iov_count && !sfp->parentdp->device->host->unchecked_isa_dma && blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len)) md = NULL; else md = &map_data; if (md) { if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen) sg_link_reserve(sfp, srp, dxfer_len); else { res = sg_build_indirect(req_schp, sfp, dxfer_len); if (res) return res; } md->pages = req_schp->pages; md->page_order = req_schp->page_order; md->nr_entries = req_schp->k_use_sg; md->offset = 0; md->null_mapped = hp->dxferp ? 0 : 1; if (dxfer_dir == SG_DXFER_TO_FROM_DEV) md->from_user = 1; else md->from_user = 0; } if (unlikely(iov_count > MAX_UIOVEC)) return -EINVAL; if (iov_count) { int size = sizeof(struct iovec) * iov_count; struct iovec *iov; struct iov_iter i; iov = memdup_user(hp->dxferp, size); if (IS_ERR(iov)) return PTR_ERR(iov); iov_iter_init(&i, rw, iov, iov_count, min_t(size_t, hp->dxfer_len, iov_length(iov, iov_count))); res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC); kfree(iov); } else res = blk_rq_map_user(q, rq, md, hp->dxferp, hp->dxfer_len, GFP_ATOMIC); if (!res) { srp->bio = rq->bio; if (!md) { req_schp->dio_in_use = 1; hp->info |= SG_INFO_DIRECT_IO; } } return res; }
0
CVE-2015-5707
2,821
benign
CWE-190
sg_start_req(Sg_request *srp, unsigned char *cmd) { int res; struct request *rq; Sg_fd *sfp = srp->parentfp; sg_io_hdr_t *hp = &srp->header; int dxfer_len = (int) hp->dxfer_len; int dxfer_dir = hp->dxfer_direction; unsigned int iov_count = hp->iovec_count; Sg_scatter_hold *req_schp = &srp->data; Sg_scatter_hold *rsv_schp = &sfp->reserve; struct request_queue *q = sfp->parentdp->device->request_queue; struct rq_map_data *md, map_data; int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ; unsigned char *long_cmdp = NULL; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_start_req: dxfer_len=%d\n", dxfer_len)); if (hp->cmd_len > BLK_MAX_CDB) { long_cmdp = kzalloc(hp->cmd_len, GFP_KERNEL); if (!long_cmdp) return -ENOMEM; } /* * NOTE * * With scsi-mq enabled, there are a fixed number of preallocated * requests equal in number to shost->can_queue. If all of the * preallocated requests are already in use, then using GFP_ATOMIC with * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL * will cause blk_get_request() to sleep until an active command * completes, freeing up a request. Neither option is ideal, but * GFP_KERNEL is the better choice to prevent userspace from getting an * unexpected EWOULDBLOCK. * * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually * does not sleep except under memory pressure. */ rq = blk_get_request(q, rw, GFP_KERNEL); if (IS_ERR(rq)) { kfree(long_cmdp); return PTR_ERR(rq); } blk_rq_set_block_pc(rq); if (hp->cmd_len > BLK_MAX_CDB) rq->cmd = long_cmdp; memcpy(rq->cmd, cmd, hp->cmd_len); rq->cmd_len = hp->cmd_len; srp->rq = rq; rq->end_io_data = srp; rq->sense = srp->sense_b; rq->retries = SG_DEFAULT_RETRIES; if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE)) return 0; if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO && dxfer_dir != SG_DXFER_UNKNOWN && !iov_count && !sfp->parentdp->device->host->unchecked_isa_dma && blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len)) md = NULL; else md = &map_data; if (md) { if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen) sg_link_reserve(sfp, srp, dxfer_len); else { res = sg_build_indirect(req_schp, sfp, dxfer_len); if (res) return res; } md->pages = req_schp->pages; md->page_order = req_schp->page_order; md->nr_entries = req_schp->k_use_sg; md->offset = 0; md->null_mapped = hp->dxferp ? 0 : 1; if (dxfer_dir == SG_DXFER_TO_FROM_DEV) md->from_user = 1; else md->from_user = 0; } if (iov_count) { struct iovec *iov = NULL; struct iov_iter i; res = import_iovec(rw, hp->dxferp, iov_count, 0, &iov, &i); if (res < 0) return res; iov_iter_truncate(&i, hp->dxfer_len); res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC); kfree(iov); } else res = blk_rq_map_user(q, rq, md, hp->dxferp, hp->dxfer_len, GFP_ATOMIC); if (!res) { srp->bio = rq->bio; if (!md) { req_schp->dio_in_use = 1; hp->info |= SG_INFO_DIRECT_IO; } } return res; }
1
CVE-2015-5707
2,821
vulnerable
CWE-362
int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct sockaddr_in *usin = (struct sockaddr_in *)uaddr; struct inet_sock *inet = inet_sk(sk); struct tcp_sock *tp = tcp_sk(sk); __be16 orig_sport, orig_dport; __be32 daddr, nexthop; struct flowi4 fl4; struct rtable *rt; int err; if (addr_len < sizeof(struct sockaddr_in)) return -EINVAL; if (usin->sin_family != AF_INET) return -EAFNOSUPPORT; nexthop = daddr = usin->sin_addr.s_addr; if (inet->opt && inet->opt->srr) { if (!daddr) return -EINVAL; nexthop = inet->opt->faddr; } orig_sport = inet->inet_sport; orig_dport = usin->sin_port; rt = ip_route_connect(&fl4, nexthop, inet->inet_saddr, RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, IPPROTO_TCP, orig_sport, orig_dport, sk, true); if (IS_ERR(rt)) { err = PTR_ERR(rt); if (err == -ENETUNREACH) IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); return err; } if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { ip_rt_put(rt); return -ENETUNREACH; } if (!inet->opt || !inet->opt->srr) daddr = rt->rt_dst; if (!inet->inet_saddr) inet->inet_saddr = rt->rt_src; inet->inet_rcv_saddr = inet->inet_saddr; if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) { /* Reset inherited state */ tp->rx_opt.ts_recent = 0; tp->rx_opt.ts_recent_stamp = 0; tp->write_seq = 0; } if (tcp_death_row.sysctl_tw_recycle && !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) { struct inet_peer *peer = rt_get_peer(rt); /* * VJ's idea. We save last timestamp seen from * the destination in peer table, when entering state * TIME-WAIT * and initialize rx_opt.ts_recent from it, * when trying new connection. */ if (peer) { inet_peer_refcheck(peer); if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) { tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp; tp->rx_opt.ts_recent = peer->tcp_ts; } } } inet->inet_dport = usin->sin_port; inet->inet_daddr = daddr; inet_csk(sk)->icsk_ext_hdr_len = 0; if (inet->opt) inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen; tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT; /* Socket identity is still unknown (sport may be zero). * However we set state to SYN-SENT and not releasing socket * lock select source port, enter ourselves into the hash tables and * complete initialization after this. */ tcp_set_state(sk, TCP_SYN_SENT); err = inet_hash_connect(&tcp_death_row, sk); if (err) goto failure; rt = ip_route_newports(&fl4, rt, orig_sport, orig_dport, inet->inet_sport, inet->inet_dport, sk); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; goto failure; } /* OK, now commit destination to socket. */ sk->sk_gso_type = SKB_GSO_TCPV4; sk_setup_caps(sk, &rt->dst); if (!tp->write_seq) tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr, inet->inet_daddr, inet->inet_sport, usin->sin_port); inet->inet_id = tp->write_seq ^ jiffies; err = tcp_connect(sk); rt = NULL; if (err) goto failure; return 0; failure: /* * This unhashes the socket and releases the local port, * if necessary. */ tcp_set_state(sk, TCP_CLOSE); ip_rt_put(rt); sk->sk_route_caps = 0; inet->inet_dport = 0; return err; }
0
CVE-2012-3552
963
benign
CWE-362
int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct sockaddr_in *usin = (struct sockaddr_in *)uaddr; struct inet_sock *inet = inet_sk(sk); struct tcp_sock *tp = tcp_sk(sk); __be16 orig_sport, orig_dport; __be32 daddr, nexthop; struct flowi4 fl4; struct rtable *rt; int err; struct ip_options_rcu *inet_opt; if (addr_len < sizeof(struct sockaddr_in)) return -EINVAL; if (usin->sin_family != AF_INET) return -EAFNOSUPPORT; nexthop = daddr = usin->sin_addr.s_addr; inet_opt = rcu_dereference_protected(inet->inet_opt, sock_owned_by_user(sk)); if (inet_opt && inet_opt->opt.srr) { if (!daddr) return -EINVAL; nexthop = inet_opt->opt.faddr; } orig_sport = inet->inet_sport; orig_dport = usin->sin_port; rt = ip_route_connect(&fl4, nexthop, inet->inet_saddr, RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, IPPROTO_TCP, orig_sport, orig_dport, sk, true); if (IS_ERR(rt)) { err = PTR_ERR(rt); if (err == -ENETUNREACH) IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); return err; } if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { ip_rt_put(rt); return -ENETUNREACH; } if (!inet_opt || !inet_opt->opt.srr) daddr = rt->rt_dst; if (!inet->inet_saddr) inet->inet_saddr = rt->rt_src; inet->inet_rcv_saddr = inet->inet_saddr; if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) { /* Reset inherited state */ tp->rx_opt.ts_recent = 0; tp->rx_opt.ts_recent_stamp = 0; tp->write_seq = 0; } if (tcp_death_row.sysctl_tw_recycle && !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) { struct inet_peer *peer = rt_get_peer(rt); /* * VJ's idea. We save last timestamp seen from * the destination in peer table, when entering state * TIME-WAIT * and initialize rx_opt.ts_recent from it, * when trying new connection. */ if (peer) { inet_peer_refcheck(peer); if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) { tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp; tp->rx_opt.ts_recent = peer->tcp_ts; } } } inet->inet_dport = usin->sin_port; inet->inet_daddr = daddr; inet_csk(sk)->icsk_ext_hdr_len = 0; if (inet_opt) inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen; tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT; /* Socket identity is still unknown (sport may be zero). * However we set state to SYN-SENT and not releasing socket * lock select source port, enter ourselves into the hash tables and * complete initialization after this. */ tcp_set_state(sk, TCP_SYN_SENT); err = inet_hash_connect(&tcp_death_row, sk); if (err) goto failure; rt = ip_route_newports(&fl4, rt, orig_sport, orig_dport, inet->inet_sport, inet->inet_dport, sk); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; goto failure; } /* OK, now commit destination to socket. */ sk->sk_gso_type = SKB_GSO_TCPV4; sk_setup_caps(sk, &rt->dst); if (!tp->write_seq) tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr, inet->inet_daddr, inet->inet_sport, usin->sin_port); inet->inet_id = tp->write_seq ^ jiffies; err = tcp_connect(sk); rt = NULL; if (err) goto failure; return 0; failure: /* * This unhashes the socket and releases the local port, * if necessary. */ tcp_set_state(sk, TCP_CLOSE); ip_rt_put(rt); sk->sk_route_caps = 0; inet->inet_dport = 0; return err; }
1
CVE-2012-3552
963
vulnerable
CWE-787
std::string& attrf(int ncid, int varId, const char * attrName, std::string& alloc) { alloc = ""; size_t len = 0; nc_inq_attlen(ncid, varId, attrName, &len); if(len < 1) { return alloc; } char attr_vals[NC_MAX_NAME + 1]; memset(attr_vals, 0, NC_MAX_NAME + 1); // Now look through this variable for the attribute if(nc_get_att_text(ncid, varId, attrName, attr_vals) != NC_NOERR) { return alloc; } alloc = std::string(attr_vals); return alloc; }
0
CVE-2019-25050
651
benign
CWE-787
std::string& attrf(int ncid, int varId, const char * attrName, std::string& alloc) { size_t len = 0; nc_inq_attlen(ncid, varId, attrName, &len); if(len < 1) { alloc.clear(); return alloc; } alloc.resize(len); memset(&alloc[0], 0, len); // Now look through this variable for the attribute nc_get_att_text(ncid, varId, attrName, &alloc[0]); return alloc; }
1
CVE-2019-25050
651
vulnerable
CWE-476
CopyKeyAliasesToKeymap(struct xkb_keymap *keymap, KeyNamesInfo *info) { AliasInfo *alias; unsigned i, num_key_aliases; struct xkb_key_alias *key_aliases; /* * Do some sanity checking on the aliases. We can't do it before * because keys and their aliases may be added out-of-order. */ num_key_aliases = 0; darray_foreach(alias, info->aliases) { /* Check that ->real is a key. */ if (!XkbKeyByName(keymap, alias->real, false)) { log_vrb(info->ctx, 5, "Attempt to alias %s to non-existent key %s; Ignored\n", KeyNameText(info->ctx, alias->alias), KeyNameText(info->ctx, alias->real)); alias->real = XKB_ATOM_NONE; continue; } /* Check that ->alias is not a key. */ if (XkbKeyByName(keymap, alias->alias, false)) { log_vrb(info->ctx, 5, "Attempt to create alias with the name of a real key; " "Alias \"%s = %s\" ignored\n", KeyNameText(info->ctx, alias->alias), KeyNameText(info->ctx, alias->real)); alias->real = XKB_ATOM_NONE; continue; } num_key_aliases++; } /* Copy key aliases. */ key_aliases = NULL; if (num_key_aliases > 0) { key_aliases = calloc(num_key_aliases, sizeof(*key_aliases)); if (!key_aliases) return false; } i = 0; darray_foreach(alias, info->aliases) { if (alias->real != XKB_ATOM_NONE) { key_aliases[i].alias = alias->alias; key_aliases[i].real = alias->real; i++; } } keymap->num_key_aliases = num_key_aliases; keymap->key_aliases = key_aliases; return true; }
0
CVE-2018-15858
831
benign
CWE-476
CopyKeyAliasesToKeymap(struct xkb_keymap *keymap, KeyNamesInfo *info) { AliasInfo *alias; unsigned i, num_key_aliases; struct xkb_key_alias *key_aliases; /* * Do some sanity checking on the aliases. We can't do it before * because keys and their aliases may be added out-of-order. */ num_key_aliases = 0; darray_foreach(alias, info->aliases) { /* Check that ->real is a key. */ if (!XkbKeyByName(keymap, alias->real, false)) { log_vrb(info->ctx, 5, "Attempt to alias %s to non-existent key %s; Ignored\n", KeyNameText(info->ctx, alias->alias), KeyNameText(info->ctx, alias->real)); alias->real = XKB_ATOM_NONE; continue; } /* Check that ->alias is not a key. */ if (XkbKeyByName(keymap, alias->alias, false)) { log_vrb(info->ctx, 5, "Attempt to create alias with the name of a real key; " "Alias \"%s = %s\" ignored\n", KeyNameText(info->ctx, alias->alias), KeyNameText(info->ctx, alias->real)); alias->real = XKB_ATOM_NONE; continue; } num_key_aliases++; } /* Copy key aliases. */ key_aliases = NULL; if (num_key_aliases > 0) { key_aliases = calloc(num_key_aliases, sizeof(*key_aliases)); if (!key_aliases) return false; i = 0; darray_foreach(alias, info->aliases) { if (alias->real != XKB_ATOM_NONE) { key_aliases[i].alias = alias->alias; key_aliases[i].real = alias->real; i++; } } } keymap->num_key_aliases = num_key_aliases; keymap->key_aliases = key_aliases; return true; }
1
CVE-2018-15858
831
vulnerable
CWE-119
horAcc8(TIFF* tif, uint8* cp0, tmsize_t cc) { tmsize_t stride = PredictorState(tif)->stride; unsigned char* cp = (unsigned char*) cp0; assert((cc%stride)==0); if (cc > stride) { /* * Pipeline the most common cases. */ if (stride == 3) { unsigned int cr = cp[0]; unsigned int cg = cp[1]; unsigned int cb = cp[2]; cc -= 3; cp += 3; while (cc>0) { cp[0] = (unsigned char) ((cr += cp[0]) & 0xff); cp[1] = (unsigned char) ((cg += cp[1]) & 0xff); cp[2] = (unsigned char) ((cb += cp[2]) & 0xff); cc -= 3; cp += 3; } } else if (stride == 4) { unsigned int cr = cp[0]; unsigned int cg = cp[1]; unsigned int cb = cp[2]; unsigned int ca = cp[3]; cc -= 4; cp += 4; while (cc>0) { cp[0] = (unsigned char) ((cr += cp[0]) & 0xff); cp[1] = (unsigned char) ((cg += cp[1]) & 0xff); cp[2] = (unsigned char) ((cb += cp[2]) & 0xff); cp[3] = (unsigned char) ((ca += cp[3]) & 0xff); cc -= 4; cp += 4; } } else { cc -= stride; do { REPEAT4(stride, cp[stride] = (unsigned char) ((cp[stride] + *cp) & 0xff); cp++) cc -= stride; } while (cc>0); } } }
0
CVE-2016-9535
955
benign
CWE-119
horAcc8(TIFF* tif, uint8* cp0, tmsize_t cc) { tmsize_t stride = PredictorState(tif)->stride; unsigned char* cp = (unsigned char*) cp0; if((cc%stride)!=0) { TIFFErrorExt(tif->tif_clientdata, "horAcc8", "%s", "(cc%stride)!=0"); return 0; } if (cc > stride) { /* * Pipeline the most common cases. */ if (stride == 3) { unsigned int cr = cp[0]; unsigned int cg = cp[1]; unsigned int cb = cp[2]; cc -= 3; cp += 3; while (cc>0) { cp[0] = (unsigned char) ((cr += cp[0]) & 0xff); cp[1] = (unsigned char) ((cg += cp[1]) & 0xff); cp[2] = (unsigned char) ((cb += cp[2]) & 0xff); cc -= 3; cp += 3; } } else if (stride == 4) { unsigned int cr = cp[0]; unsigned int cg = cp[1]; unsigned int cb = cp[2]; unsigned int ca = cp[3]; cc -= 4; cp += 4; while (cc>0) { cp[0] = (unsigned char) ((cr += cp[0]) & 0xff); cp[1] = (unsigned char) ((cg += cp[1]) & 0xff); cp[2] = (unsigned char) ((cb += cp[2]) & 0xff); cp[3] = (unsigned char) ((ca += cp[3]) & 0xff); cc -= 4; cp += 4; } } else { cc -= stride; do { REPEAT4(stride, cp[stride] = (unsigned char) ((cp[stride] + *cp) & 0xff); cp++) cc -= stride; } while (cc>0); } } return 1; }
1
CVE-2016-9535
955
vulnerable
CWE-125
get_html_data (MAPI_Attr *a) { VarLenData **body = XCALLOC(VarLenData*, a->num_values + 1); int j; for (j = 0; j < a->num_values; j++) { body[j] = XMALLOC(VarLenData, 1); body[j]->len = a->values[j].len; body[j]->data = CHECKED_XCALLOC(unsigned char, a->values[j].len); memmove (body[j]->data, a->values[j].data.buf, body[j]->len); } return body; }
0
CVE-2017-6309
1,438
benign
CWE-125
get_html_data (MAPI_Attr *a) { VarLenData **body = XCALLOC(VarLenData*, a->num_values + 1); int j; for (j = 0; j < a->num_values; j++) { if (a->type == szMAPI_BINARY) { body[j] = XMALLOC(VarLenData, 1); body[j]->len = a->values[j].len; body[j]->data = CHECKED_XCALLOC(unsigned char, a->values[j].len); memmove (body[j]->data, a->values[j].data.buf, body[j]->len); } } return body; }
1
CVE-2017-6309
1,438
vulnerable
CWE-125
int yr_re_fast_exec( uint8_t* code, uint8_t* input_data, size_t input_size, int flags, RE_MATCH_CALLBACK_FUNC callback, void* callback_args) { RE_REPEAT_ANY_ARGS* repeat_any_args; uint8_t* code_stack[MAX_FAST_RE_STACK]; uint8_t* input_stack[MAX_FAST_RE_STACK]; int matches_stack[MAX_FAST_RE_STACK]; uint8_t* ip = code; uint8_t* input = input_data; uint8_t* next_input; uint8_t* next_opcode; uint8_t mask; uint8_t value; int i; int stop; int input_incr; int sp = 0; int bytes_matched; int max_bytes_matched = input_size; input_incr = flags & RE_FLAGS_BACKWARDS ? -1 : 1; if (flags & RE_FLAGS_BACKWARDS) input--; code_stack[sp] = code; input_stack[sp] = input; matches_stack[sp] = 0; sp++; while (sp > 0) { sp--; ip = code_stack[sp]; input = input_stack[sp]; bytes_matched = matches_stack[sp]; stop = FALSE; while(!stop) { if (*ip == RE_OPCODE_MATCH) { if (flags & RE_FLAGS_EXHAUSTIVE) { int cb_result = callback( flags & RE_FLAGS_BACKWARDS ? input + 1 : input_data, bytes_matched, flags, callback_args); switch(cb_result) { case ERROR_INSUFFICIENT_MEMORY: return -2; case ERROR_TOO_MANY_MATCHES: return -3; default: if (cb_result != ERROR_SUCCESS) return -4; } break; } else { return bytes_matched; } } if (bytes_matched >= max_bytes_matched) break; switch(*ip) { case RE_OPCODE_LITERAL: if (*input == *(ip + 1)) { bytes_matched++; input += input_incr; ip += 2; } else { stop = TRUE; } break; case RE_OPCODE_MASKED_LITERAL: value = *(int16_t*)(ip + 1) & 0xFF; mask = *(int16_t*)(ip + 1) >> 8; if ((*input & mask) == value) { bytes_matched++; input += input_incr; ip += 3; } else { stop = TRUE; } break; case RE_OPCODE_ANY: bytes_matched++; input += input_incr; ip += 1; break; case RE_OPCODE_REPEAT_ANY_UNGREEDY: repeat_any_args = (RE_REPEAT_ANY_ARGS*)(ip + 1); next_opcode = ip + 1 + sizeof(RE_REPEAT_ANY_ARGS); for (i = repeat_any_args->min + 1; i <= repeat_any_args->max; i++) { next_input = input + i * input_incr; if (bytes_matched + i >= max_bytes_matched) break; if ( *(next_opcode) != RE_OPCODE_LITERAL || (*(next_opcode) == RE_OPCODE_LITERAL && *(next_opcode + 1) == *next_input)) { if (sp >= MAX_FAST_RE_STACK) return -4; code_stack[sp] = next_opcode; input_stack[sp] = next_input; matches_stack[sp] = bytes_matched + i; sp++; } } input += input_incr * repeat_any_args->min; bytes_matched += repeat_any_args->min; ip = next_opcode; break; default: assert(FALSE); } } } return -1; }
0
CVE-2017-8294
3,124
benign
CWE-125
int yr_re_fast_exec( uint8_t* code, uint8_t* input_data, size_t input_forwards_size, size_t input_backwards_size, int flags, RE_MATCH_CALLBACK_FUNC callback, void* callback_args) { RE_REPEAT_ANY_ARGS* repeat_any_args; uint8_t* code_stack[MAX_FAST_RE_STACK]; uint8_t* input_stack[MAX_FAST_RE_STACK]; int matches_stack[MAX_FAST_RE_STACK]; uint8_t* ip = code; uint8_t* input = input_data; uint8_t* next_input; uint8_t* next_opcode; uint8_t mask; uint8_t value; int i; int stop; int input_incr; int sp = 0; int bytes_matched; int max_bytes_matched; max_bytes_matched = flags & RE_FLAGS_BACKWARDS ? input_backwards_size : input_forwards_size; input_incr = flags & RE_FLAGS_BACKWARDS ? -1 : 1; if (flags & RE_FLAGS_BACKWARDS) input--; code_stack[sp] = code; input_stack[sp] = input; matches_stack[sp] = 0; sp++; while (sp > 0) { sp--; ip = code_stack[sp]; input = input_stack[sp]; bytes_matched = matches_stack[sp]; stop = FALSE; while(!stop) { if (*ip == RE_OPCODE_MATCH) { if (flags & RE_FLAGS_EXHAUSTIVE) { int cb_result = callback( flags & RE_FLAGS_BACKWARDS ? input + 1 : input_data, bytes_matched, flags, callback_args); switch(cb_result) { case ERROR_INSUFFICIENT_MEMORY: return -2; case ERROR_TOO_MANY_MATCHES: return -3; default: if (cb_result != ERROR_SUCCESS) return -4; } break; } else { return bytes_matched; } } if (bytes_matched >= max_bytes_matched) break; switch(*ip) { case RE_OPCODE_LITERAL: if (*input == *(ip + 1)) { bytes_matched++; input += input_incr; ip += 2; } else { stop = TRUE; } break; case RE_OPCODE_MASKED_LITERAL: value = *(int16_t*)(ip + 1) & 0xFF; mask = *(int16_t*)(ip + 1) >> 8; if ((*input & mask) == value) { bytes_matched++; input += input_incr; ip += 3; } else { stop = TRUE; } break; case RE_OPCODE_ANY: bytes_matched++; input += input_incr; ip += 1; break; case RE_OPCODE_REPEAT_ANY_UNGREEDY: repeat_any_args = (RE_REPEAT_ANY_ARGS*)(ip + 1); next_opcode = ip + 1 + sizeof(RE_REPEAT_ANY_ARGS); for (i = repeat_any_args->min + 1; i <= repeat_any_args->max; i++) { next_input = input + i * input_incr; if (bytes_matched + i >= max_bytes_matched) break; if ( *(next_opcode) != RE_OPCODE_LITERAL || (*(next_opcode) == RE_OPCODE_LITERAL && *(next_opcode + 1) == *next_input)) { if (sp >= MAX_FAST_RE_STACK) return -4; code_stack[sp] = next_opcode; input_stack[sp] = next_input; matches_stack[sp] = bytes_matched + i; sp++; } } input += input_incr * repeat_any_args->min; bytes_matched += repeat_any_args->min; ip = next_opcode; break; default: assert(FALSE); } } } return -1; }
1
CVE-2017-8294
3,124
vulnerable
CWE-119
smbhash(unsigned char *out, const unsigned char *in, unsigned char *key) { int rc; unsigned char key2[8]; struct crypto_skcipher *tfm_des; struct scatterlist sgin, sgout; struct skcipher_request *req; str_to_key(key, key2); tfm_des = crypto_alloc_skcipher("ecb(des)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm_des)) { rc = PTR_ERR(tfm_des); cifs_dbg(VFS, "could not allocate des crypto API\n"); goto smbhash_err; } req = skcipher_request_alloc(tfm_des, GFP_KERNEL); if (!req) { rc = -ENOMEM; cifs_dbg(VFS, "could not allocate des crypto API\n"); goto smbhash_free_skcipher; } crypto_skcipher_setkey(tfm_des, key2, 8); sg_init_one(&sgin, in, 8); sg_init_one(&sgout, out, 8); skcipher_request_set_callback(req, 0, NULL, NULL); skcipher_request_set_crypt(req, &sgin, &sgout, 8, NULL); rc = crypto_skcipher_encrypt(req); if (rc) cifs_dbg(VFS, "could not encrypt crypt key rc: %d\n", rc); skcipher_request_free(req); smbhash_free_skcipher: crypto_free_skcipher(tfm_des); smbhash_err: return rc; }
0
CVE-2016-10154
3,070
benign
CWE-119
smbhash(unsigned char *out, const unsigned char *in, unsigned char *key) { unsigned char key2[8]; struct crypto_cipher *tfm_des; str_to_key(key, key2); tfm_des = crypto_alloc_cipher("des", 0, 0); if (IS_ERR(tfm_des)) { cifs_dbg(VFS, "could not allocate des crypto API\n"); return PTR_ERR(tfm_des); } crypto_cipher_setkey(tfm_des, key2, 8); crypto_cipher_encrypt_one(tfm_des, out, in); crypto_free_cipher(tfm_des); return 0; }
1
CVE-2016-10154
3,070
vulnerable
CWE-476
bool load_face(Face & face, unsigned int options) { #ifdef GRAPHITE2_TELEMETRY telemetry::category _misc_cat(face.tele.misc); #endif Face::Table silf(face, Tag::Silf, 0x00050000); if (silf) options &= ~gr_face_dumbRendering; else if (!(options & gr_face_dumbRendering)) return false; if (!face.readGlyphs(options)) return false; if (silf) { if (!face.readFeatures() || !face.readGraphite(silf)) { #if !defined GRAPHITE2_NTRACING if (global_log) { *global_log << json::object << "type" << "fontload" << "failure" << face.error() << "context" << face.error_context() << json::close; } #endif return false; } else return true; } else return options & gr_face_dumbRendering; }
0
CVE-2018-7999
2,967
benign
CWE-476
bool load_face(Face & face, unsigned int options) { #ifdef GRAPHITE2_TELEMETRY telemetry::category _misc_cat(face.tele.misc); #endif Face::Table silf(face, Tag::Silf, 0x00050000); if (!silf) return false; if (!face.readGlyphs(options)) return false; if (silf) { if (!face.readFeatures() || !face.readGraphite(silf)) { #if !defined GRAPHITE2_NTRACING if (global_log) { *global_log << json::object << "type" << "fontload" << "failure" << face.error() << "context" << face.error_context() << json::close; } #endif return false; } else return true; } else return false; }
1
CVE-2018-7999
2,967
vulnerable
CWE-190
PyMemoTable_Copy(PyMemoTable *self) { Py_ssize_t i; PyMemoTable *new = PyMemoTable_New(); if (new == NULL) return NULL; new->mt_used = self->mt_used; new->mt_allocated = self->mt_allocated; new->mt_mask = self->mt_mask; /* The table we get from _New() is probably smaller than we wanted. Free it and allocate one that's the right size. */ PyMem_FREE(new->mt_table); new->mt_table = PyMem_NEW(PyMemoEntry, self->mt_allocated); if (new->mt_table == NULL) { PyMem_FREE(new); PyErr_NoMemory(); return NULL; } for (i = 0; i < self->mt_allocated; i++) { Py_XINCREF(self->mt_table[i].me_key); } memcpy(new->mt_table, self->mt_table, sizeof(PyMemoEntry) * self->mt_allocated); return new; }
0
CVE-2018-20406
2,869
benign
CWE-190
PyMemoTable_Copy(PyMemoTable *self) { PyMemoTable *new = PyMemoTable_New(); if (new == NULL) return NULL; new->mt_used = self->mt_used; new->mt_allocated = self->mt_allocated; new->mt_mask = self->mt_mask; /* The table we get from _New() is probably smaller than we wanted. Free it and allocate one that's the right size. */ PyMem_FREE(new->mt_table); new->mt_table = PyMem_NEW(PyMemoEntry, self->mt_allocated); if (new->mt_table == NULL) { PyMem_FREE(new); PyErr_NoMemory(); return NULL; } for (size_t i = 0; i < self->mt_allocated; i++) { Py_XINCREF(self->mt_table[i].me_key); } memcpy(new->mt_table, self->mt_table, sizeof(PyMemoEntry) * self->mt_allocated); return new; }
1
CVE-2018-20406
2,869
vulnerable
CWE-125
GF_Err hdlr_dump(GF_Box *a, FILE * trace) { GF_HandlerBox *p = (GF_HandlerBox *)a; gf_isom_box_dump_start(a, "HandlerBox", trace); if (p->nameUTF8 && (u32) p->nameUTF8[0] == strlen(p->nameUTF8+1)) { fprintf(trace, "hdlrType=\"%s\" Name=\"%s\" ", gf_4cc_to_str(p->handlerType), p->nameUTF8+1); } else { fprintf(trace, "hdlrType=\"%s\" Name=\"%s\" ", gf_4cc_to_str(p->handlerType), p->nameUTF8); } fprintf(trace, "reserved1=\"%d\" reserved2=\"", p->reserved1); dump_data(trace, (char *) p->reserved2, 12); fprintf(trace, "\""); fprintf(trace, ">\n"); gf_isom_box_dump_done("HandlerBox", a, trace); return GF_OK; }
0
CVE-2018-13006
2,111
benign
CWE-125
GF_Err hdlr_dump(GF_Box *a, FILE * trace) { GF_HandlerBox *p = (GF_HandlerBox *)a; gf_isom_box_dump_start(a, "HandlerBox", trace); if (p->nameUTF8 && (u32) p->nameUTF8[0] == strlen(p->nameUTF8)-1) { fprintf(trace, "hdlrType=\"%s\" Name=\"%s\" ", gf_4cc_to_str(p->handlerType), p->nameUTF8+1); } else { fprintf(trace, "hdlrType=\"%s\" Name=\"%s\" ", gf_4cc_to_str(p->handlerType), p->nameUTF8); } fprintf(trace, "reserved1=\"%d\" reserved2=\"", p->reserved1); dump_data(trace, (char *) p->reserved2, 12); fprintf(trace, "\""); fprintf(trace, ">\n"); gf_isom_box_dump_done("HandlerBox", a, trace); return GF_OK; }
1
CVE-2018-13006
2,111
vulnerable
CWE-125
ast_type_init(PyObject *self, PyObject *args, PyObject *kw) { _Py_IDENTIFIER(_fields); Py_ssize_t i, numfields = 0; int res = -1; PyObject *key, *value, *fields; fields = _PyObject_GetAttrId((PyObject*)Py_TYPE(self), &PyId__fields); if (!fields) PyErr_Clear(); if (fields) { numfields = PySequence_Size(fields); if (numfields == -1) goto cleanup; } res = 0; /* if no error occurs, this stays 0 to the end */ if (PyTuple_GET_SIZE(args) > 0) { if (numfields != PyTuple_GET_SIZE(args)) { PyErr_Format(PyExc_TypeError, "%.400s constructor takes %s" "%zd positional argument%s", Py_TYPE(self)->tp_name, numfields == 0 ? "" : "either 0 or ", numfields, numfields == 1 ? "" : "s"); res = -1; goto cleanup; } for (i = 0; i < PyTuple_GET_SIZE(args); i++) { /* cannot be reached when fields is NULL */ PyObject *name = PySequence_GetItem(fields, i); if (!name) { res = -1; goto cleanup; } res = PyObject_SetAttr(self, name, PyTuple_GET_ITEM(args, i)); Py_DECREF(name); if (res < 0) goto cleanup; } } if (kw) { i = 0; /* needed by PyDict_Next */ while (PyDict_Next(kw, &i, &key, &value)) { res = PyObject_SetAttr(self, key, value); if (res < 0) goto cleanup; } } cleanup: Py_XDECREF(fields); return res; }
0
CVE-2019-19274
1,471
benign
CWE-125
ast_type_init(PyObject *self, PyObject *args, PyObject *kw) { Py_ssize_t i, numfields = 0; int res = -1; PyObject *key, *value, *fields; if (lookup_attr_id((PyObject*)Py_TYPE(self), &PyId__fields, &fields) < 0) { goto cleanup; } if (fields) { numfields = PySequence_Size(fields); if (numfields == -1) goto cleanup; } res = 0; /* if no error occurs, this stays 0 to the end */ if (numfields < PyTuple_GET_SIZE(args)) { PyErr_Format(PyExc_TypeError, "%.400s constructor takes at most " "%zd positional argument%s", Py_TYPE(self)->tp_name, numfields, numfields == 1 ? "" : "s"); res = -1; goto cleanup; } for (i = 0; i < PyTuple_GET_SIZE(args); i++) { /* cannot be reached when fields is NULL */ PyObject *name = PySequence_GetItem(fields, i); if (!name) { res = -1; goto cleanup; } res = PyObject_SetAttr(self, name, PyTuple_GET_ITEM(args, i)); Py_DECREF(name); if (res < 0) goto cleanup; } if (kw) { i = 0; /* needed by PyDict_Next */ while (PyDict_Next(kw, &i, &key, &value)) { res = PyObject_SetAttr(self, key, value); if (res < 0) goto cleanup; } } cleanup: Py_XDECREF(fields); return res; }
1
CVE-2019-19274
1,471
vulnerable
CWE-125
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TfLitePackParams* data = reinterpret_cast<TfLitePackParams*>(node->builtin_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), data->values_count); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input0 = GetInput(context, node, 0); const int dimension_size = NumDimensions(input0) + 1; if (data->axis < 0) { data->axis += dimension_size; } TF_LITE_ENSURE(context, NumDimensions(input0) >= data->axis); TF_LITE_ENSURE(context, data->axis >= 0); if (input0->type != kTfLiteInt32 && input0->type != kTfLiteFloat32 && input0->type != kTfLiteUInt8 && input0->type != kTfLiteInt8 && input0->type != kTfLiteInt16 && input0->type != kTfLiteInt64) { context->ReportError(context, "Type '%s' is not supported by pack.", TfLiteTypeGetName(input0->type)); return kTfLiteError; } // Make sure all inputs have the same shape and type. for (int i = 1; i < data->values_count; ++i) { const TfLiteTensor* input = GetInput(context, node, i); TF_LITE_ENSURE(context, HaveSameShapes(input0, input)); TF_LITE_ENSURE_TYPES_EQ(context, input0->type, input->type); } // Resize output. rank R will become rank R + 1 const TfLiteIntArray* input_shape = input0->dims; TfLiteIntArray* output_shape = TfLiteIntArrayCreate(dimension_size); int i = 0; for (int index = 0; index < dimension_size; ++index) { if (index == data->axis) { output_shape->data[index] = data->values_count; } else { output_shape->data[index] = input_shape->data[i++]; } } TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_TYPES_EQ(context, output->type, input0->type); // Guarantee input/output quantization params match as we do not support // packing quantized tensors. for (int i = 0; i < data->values_count; i++) { const TfLiteTensor* input = GetInput(context, node, i); TF_LITE_ENSURE_EQ(context, input->params.zero_point, output->params.zero_point); TF_LITE_ENSURE_EQ(context, input->params.scale, output->params.scale); } return context->ResizeTensor(context, output, output_shape); }
0
CVE-2020-15211
1,683
benign
CWE-125
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TfLitePackParams* data = reinterpret_cast<TfLitePackParams*>(node->builtin_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), data->values_count); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input0; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input0)); const int dimension_size = NumDimensions(input0) + 1; if (data->axis < 0) { data->axis += dimension_size; } TF_LITE_ENSURE(context, NumDimensions(input0) >= data->axis); TF_LITE_ENSURE(context, data->axis >= 0); if (input0->type != kTfLiteInt32 && input0->type != kTfLiteFloat32 && input0->type != kTfLiteUInt8 && input0->type != kTfLiteInt8 && input0->type != kTfLiteInt16 && input0->type != kTfLiteInt64) { context->ReportError(context, "Type '%s' is not supported by pack.", TfLiteTypeGetName(input0->type)); return kTfLiteError; } // Make sure all inputs have the same shape and type. for (int i = 1; i < data->values_count; ++i) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i, &input)); TF_LITE_ENSURE(context, HaveSameShapes(input0, input)); TF_LITE_ENSURE_TYPES_EQ(context, input0->type, input->type); } // Resize output. rank R will become rank R + 1 const TfLiteIntArray* input_shape = input0->dims; TfLiteIntArray* output_shape = TfLiteIntArrayCreate(dimension_size); int i = 0; for (int index = 0; index < dimension_size; ++index) { if (index == data->axis) { output_shape->data[index] = data->values_count; } else { output_shape->data[index] = input_shape->data[i++]; } } TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); TF_LITE_ENSURE_TYPES_EQ(context, output->type, input0->type); // Guarantee input/output quantization params match as we do not support // packing quantized tensors. for (int i = 0; i < data->values_count; i++) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i, &input)); TF_LITE_ENSURE_EQ(context, input->params.zero_point, output->params.zero_point); TF_LITE_ENSURE_EQ(context, input->params.scale, output->params.scale); } return context->ResizeTensor(context, output, output_shape); }
1
CVE-2020-15211
1,683
vulnerable
CWE-125
TfLiteStatus EqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteBool: Comparison<bool, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteFloat32: Comparison<float, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::EqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::EqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteString: ComparisonString(reference_ops::StringRefEqualFn, input1, input2, output, requires_broadcast); break; default: context->ReportError( context, "Does not support type %d, requires bool|float|int|uint8|string", input1->type); return kTfLiteError; } return kTfLiteOk; }
0
CVE-2020-15211
3,054
benign
CWE-125
TfLiteStatus EqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor1, &input1)); const TfLiteTensor* input2; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor2, &input2)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteBool: Comparison<bool, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteFloat32: Comparison<float, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::EqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::EqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteString: ComparisonString(reference_ops::StringRefEqualFn, input1, input2, output, requires_broadcast); break; default: context->ReportError( context, "Does not support type %d, requires bool|float|int|uint8|string", input1->type); return kTfLiteError; } return kTfLiteOk; }
1
CVE-2020-15211
3,054
vulnerable
CWE-119
bool MemoryManager::validate_user_read(const Process& process, VirtualAddress vaddr) const { auto* region = region_from_vaddr(process, vaddr); return region && region->is_readable(); }
0
CVE-2019-20172
1,120
benign
CWE-119
bool MemoryManager::validate_user_read(const Process& process, VirtualAddress vaddr) const { auto* region = user_region_from_vaddr(const_cast<Process&>(process), vaddr); return region && region->is_user_accessible() && region->is_readable(); }
1
CVE-2019-20172
1,120
vulnerable
CWE-119
static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id) { if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) IWL_ERR(priv, "ACTIVATE a non DRIVER active station id %u " "addr %pM\n", sta_id, priv->stations[sta_id].sta.sta.addr); if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) { IWL_DEBUG_ASSOC(priv, "STA id %u addr %pM already present in uCode " "(according to driver)\n", sta_id, priv->stations[sta_id].sta.sta.addr); } else { priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE; IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n", sta_id, priv->stations[sta_id].sta.sta.addr); } }
0
CVE-2012-6712
1,373
benign
CWE-119
static int iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id) { if (sta_id >= IWLAGN_STATION_COUNT) { IWL_ERR(priv, "invalid sta_id %u", sta_id); return -EINVAL; } if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) IWL_ERR(priv, "ACTIVATE a non DRIVER active station id %u " "addr %pM\n", sta_id, priv->stations[sta_id].sta.sta.addr); if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) { IWL_DEBUG_ASSOC(priv, "STA id %u addr %pM already present in uCode " "(according to driver)\n", sta_id, priv->stations[sta_id].sta.sta.addr); } else { priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE; IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n", sta_id, priv->stations[sta_id].sta.sta.addr); } return 0; }
1
CVE-2012-6712
1,373
vulnerable
CWE-476
static VTermScreen *screen_new(VTerm *vt) { VTermState *state = vterm_obtain_state(vt); VTermScreen *screen; int rows, cols; if(!state) return NULL; screen = vterm_allocator_malloc(vt, sizeof(VTermScreen)); vterm_get_size(vt, &rows, &cols); screen->vt = vt; screen->state = state; screen->damage_merge = VTERM_DAMAGE_CELL; screen->damaged.start_row = -1; screen->pending_scrollrect.start_row = -1; screen->rows = rows; screen->cols = cols; screen->callbacks = NULL; screen->cbdata = NULL; screen->buffers[0] = realloc_buffer(screen, NULL, rows, cols); screen->buffer = screen->buffers[0]; screen->sb_buffer = vterm_allocator_malloc(screen->vt, sizeof(VTermScreenCell) * cols); vterm_state_set_callbacks(screen->state, &state_cbs, screen); return screen; }
0
CVE-2018-20786
1,964
benign
CWE-476
static VTermScreen *screen_new(VTerm *vt) { VTermState *state = vterm_obtain_state(vt); VTermScreen *screen; int rows, cols; if (state == NULL) return NULL; screen = vterm_allocator_malloc(vt, sizeof(VTermScreen)); if (screen == NULL) return NULL; vterm_get_size(vt, &rows, &cols); screen->vt = vt; screen->state = state; screen->damage_merge = VTERM_DAMAGE_CELL; screen->damaged.start_row = -1; screen->pending_scrollrect.start_row = -1; screen->rows = rows; screen->cols = cols; screen->callbacks = NULL; screen->cbdata = NULL; screen->buffers[0] = realloc_buffer(screen, NULL, rows, cols); screen->buffer = screen->buffers[0]; screen->sb_buffer = vterm_allocator_malloc(screen->vt, sizeof(VTermScreenCell) * cols); if (screen->buffer == NULL || screen->sb_buffer == NULL) { vterm_screen_free(screen); return NULL; } vterm_state_set_callbacks(screen->state, &state_cbs, screen); return screen; }
1
CVE-2018-20786
1,964
vulnerable
CWE-476
static int i8042_start(struct serio *serio) { struct i8042_port *port = serio->port_data; port->exists = true; mb(); return 0; }
0
CVE-2017-18079
1,995
benign
CWE-476
static int i8042_start(struct serio *serio) { struct i8042_port *port = serio->port_data; spin_lock_irq(&i8042_lock); port->exists = true; spin_unlock_irq(&i8042_lock); return 0; }
1
CVE-2017-18079
1,995
vulnerable
CWE-476
static int on_part_data( multipart_parser *parser, const char *at, size_t length) { multipart_parser_data_t *data = NULL; ogs_assert(parser); data = multipart_parser_get_data(parser); ogs_assert(data); if (at && length) { SWITCH(data->part[data->num_of_part].content_type) CASE(OGS_SBI_CONTENT_JSON_TYPE) CASE(OGS_SBI_CONTENT_5GNAS_TYPE) CASE(OGS_SBI_CONTENT_NGAP_TYPE) size_t offset = 0; if (data->part[data->num_of_part].content == NULL) { data->part[data->num_of_part].content_length = length; data->part[data->num_of_part].content = (char *)ogs_malloc(length + 1); ogs_assert(data->part[data->num_of_part].content); } else { offset = data->part[data->num_of_part].content_length; if ((data->part[data->num_of_part].content_length + length) > OGS_HUGE_LEN) { ogs_error("Overflow length [%d:%d]", (int)data->part[data->num_of_part].content_length, (int)length); ogs_assert_if_reached(); return 0; } data->part[data->num_of_part].content_length += length; data->part[data->num_of_part].content = (char *)ogs_realloc( data->part[data->num_of_part].content, data->part[data->num_of_part].content_length + 1); ogs_assert(data->part[data->num_of_part].content); } memcpy(data->part[data->num_of_part].content + offset, at, length); data->part[data->num_of_part].content[ data->part[data->num_of_part].content_length] = 0; break; DEFAULT ogs_log_hexdump(OGS_LOG_FATAL, (unsigned char *)at, length); ogs_error("Unknown content_type [%s]", data->part[data->num_of_part].content_type); END } return 0; }
0
CVE-2021-44108
1,856
benign
CWE-476
static int on_part_data( multipart_parser *parser, const char *at, size_t length) { multipart_parser_data_t *data = NULL; ogs_assert(parser); data = multipart_parser_get_data(parser); ogs_assert(data); if (data->num_of_part < OGS_SBI_MAX_NUM_OF_PART && at && length) { SWITCH(data->part[data->num_of_part].content_type) CASE(OGS_SBI_CONTENT_JSON_TYPE) CASE(OGS_SBI_CONTENT_5GNAS_TYPE) CASE(OGS_SBI_CONTENT_NGAP_TYPE) size_t offset = 0; if (data->part[data->num_of_part].content == NULL) { data->part[data->num_of_part].content_length = length; data->part[data->num_of_part].content = (char *)ogs_malloc(length + 1); ogs_assert(data->part[data->num_of_part].content); } else { offset = data->part[data->num_of_part].content_length; if ((data->part[data->num_of_part].content_length + length) > OGS_HUGE_LEN) { ogs_error("Overflow length [%d:%d]", (int)data->part[data->num_of_part].content_length, (int)length); ogs_assert_if_reached(); return 0; } data->part[data->num_of_part].content_length += length; data->part[data->num_of_part].content = (char *)ogs_realloc( data->part[data->num_of_part].content, data->part[data->num_of_part].content_length + 1); ogs_assert(data->part[data->num_of_part].content); } memcpy(data->part[data->num_of_part].content + offset, at, length); data->part[data->num_of_part].content[ data->part[data->num_of_part].content_length] = 0; break; DEFAULT ogs_error("Unknown content_type [%s]", data->part[data->num_of_part].content_type); ogs_log_hexdump(OGS_LOG_ERROR, (unsigned char *)at, length); END } return 0; }
1
CVE-2021-44108
1,856
vulnerable
CWE-125
ieee802_15_4_if_print(netdissect_options *ndo, const struct pcap_pkthdr *h, const u_char *p) { u_int caplen = h->caplen; u_int hdrlen; uint16_t fc; uint8_t seq; uint16_t panid = 0; if (caplen < 3) { ND_PRINT((ndo, "[|802.15.4]")); return caplen; } hdrlen = 3; fc = EXTRACT_LE_16BITS(p); seq = EXTRACT_LE_8BITS(p + 2); p += 3; caplen -= 3; ND_PRINT((ndo,"IEEE 802.15.4 %s packet ", ftypes[FC_FRAME_TYPE(fc)])); if (ndo->ndo_vflag) ND_PRINT((ndo,"seq %02x ", seq)); /* * Destination address and PAN ID, if present. */ switch (FC_DEST_ADDRESSING_MODE(fc)) { case FC_ADDRESSING_MODE_NONE: if (fc & FC_PAN_ID_COMPRESSION) { /* * PAN ID compression; this requires that both * the source and destination addresses be present, * but the destination address is missing. */ ND_PRINT((ndo, "[|802.15.4]")); return hdrlen; } if (ndo->ndo_vflag) ND_PRINT((ndo,"none ")); break; case FC_ADDRESSING_MODE_RESERVED: if (ndo->ndo_vflag) ND_PRINT((ndo,"reserved destination addressing mode")); return hdrlen; case FC_ADDRESSING_MODE_SHORT: if (caplen < 2) { ND_PRINT((ndo, "[|802.15.4]")); return hdrlen; } panid = EXTRACT_LE_16BITS(p); p += 2; caplen -= 2; hdrlen += 2; if (caplen < 2) { ND_PRINT((ndo, "[|802.15.4]")); return hdrlen; } if (ndo->ndo_vflag) ND_PRINT((ndo,"%04x:%04x ", panid, EXTRACT_LE_16BITS(p + 2))); p += 2; caplen -= 2; hdrlen += 2; break; case FC_ADDRESSING_MODE_LONG: if (caplen < 2) { ND_PRINT((ndo, "[|802.15.4]")); return hdrlen; } panid = EXTRACT_LE_16BITS(p); p += 2; caplen -= 2; hdrlen += 2; if (caplen < 8) { ND_PRINT((ndo, "[|802.15.4]")); return hdrlen; } if (ndo->ndo_vflag) ND_PRINT((ndo,"%04x:%s ", panid, le64addr_string(ndo, p))); p += 8; caplen -= 8; hdrlen += 8; break; } if (ndo->ndo_vflag) ND_PRINT((ndo,"< ")); /* * Source address and PAN ID, if present. */ switch (FC_SRC_ADDRESSING_MODE(fc)) { case FC_ADDRESSING_MODE_NONE: if (ndo->ndo_vflag) ND_PRINT((ndo,"none ")); break; case FC_ADDRESSING_MODE_RESERVED: if (ndo->ndo_vflag) ND_PRINT((ndo,"reserved source addressing mode")); return 0; case FC_ADDRESSING_MODE_SHORT: if (!(fc & FC_PAN_ID_COMPRESSION)) { /* * The source PAN ID is not compressed out, so * fetch it. (Otherwise, we'll use the destination * PAN ID, fetched above.) */ if (caplen < 2) { ND_PRINT((ndo, "[|802.15.4]")); return hdrlen; } panid = EXTRACT_LE_16BITS(p); p += 2; caplen -= 2; hdrlen += 2; } if (caplen < 2) { ND_PRINT((ndo, "[|802.15.4]")); return hdrlen; } if (ndo->ndo_vflag) ND_PRINT((ndo,"%04x:%04x ", panid, EXTRACT_LE_16BITS(p))); p += 2; caplen -= 2; hdrlen += 2; break; case FC_ADDRESSING_MODE_LONG: if (!(fc & FC_PAN_ID_COMPRESSION)) { /* * The source PAN ID is not compressed out, so * fetch it. (Otherwise, we'll use the destination * PAN ID, fetched above.) */ if (caplen < 2) { ND_PRINT((ndo, "[|802.15.4]")); return hdrlen; } panid = EXTRACT_LE_16BITS(p); p += 2; caplen -= 2; hdrlen += 2; } if (caplen < 8) { ND_PRINT((ndo, "[|802.15.4]")); return hdrlen; } if (ndo->ndo_vflag) ND_PRINT((ndo,"%04x:%s ", panid, le64addr_string(ndo, p))); p += 8; caplen -= 8; hdrlen += 8; break; } if (!ndo->ndo_suppress_default_print) ND_DEFAULTPRINT(p, caplen); return hdrlen; }
0
CVE-2017-13000
2,509
benign
CWE-125
ieee802_15_4_if_print(netdissect_options *ndo, const struct pcap_pkthdr *h, const u_char *p) { u_int caplen = h->caplen; u_int hdrlen; uint16_t fc; uint8_t seq; uint16_t panid = 0; if (caplen < 3) { ND_PRINT((ndo, "[|802.15.4]")); return caplen; } hdrlen = 3; fc = EXTRACT_LE_16BITS(p); seq = EXTRACT_LE_8BITS(p + 2); p += 3; caplen -= 3; ND_PRINT((ndo,"IEEE 802.15.4 %s packet ", ftypes[FC_FRAME_TYPE(fc)])); if (ndo->ndo_vflag) ND_PRINT((ndo,"seq %02x ", seq)); /* * Destination address and PAN ID, if present. */ switch (FC_DEST_ADDRESSING_MODE(fc)) { case FC_ADDRESSING_MODE_NONE: if (fc & FC_PAN_ID_COMPRESSION) { /* * PAN ID compression; this requires that both * the source and destination addresses be present, * but the destination address is missing. */ ND_PRINT((ndo, "[|802.15.4]")); return hdrlen; } if (ndo->ndo_vflag) ND_PRINT((ndo,"none ")); break; case FC_ADDRESSING_MODE_RESERVED: if (ndo->ndo_vflag) ND_PRINT((ndo,"reserved destination addressing mode")); return hdrlen; case FC_ADDRESSING_MODE_SHORT: if (caplen < 2) { ND_PRINT((ndo, "[|802.15.4]")); return hdrlen; } panid = EXTRACT_LE_16BITS(p); p += 2; caplen -= 2; hdrlen += 2; if (caplen < 2) { ND_PRINT((ndo, "[|802.15.4]")); return hdrlen; } if (ndo->ndo_vflag) ND_PRINT((ndo,"%04x:%04x ", panid, EXTRACT_LE_16BITS(p))); p += 2; caplen -= 2; hdrlen += 2; break; case FC_ADDRESSING_MODE_LONG: if (caplen < 2) { ND_PRINT((ndo, "[|802.15.4]")); return hdrlen; } panid = EXTRACT_LE_16BITS(p); p += 2; caplen -= 2; hdrlen += 2; if (caplen < 8) { ND_PRINT((ndo, "[|802.15.4]")); return hdrlen; } if (ndo->ndo_vflag) ND_PRINT((ndo,"%04x:%s ", panid, le64addr_string(ndo, p))); p += 8; caplen -= 8; hdrlen += 8; break; } if (ndo->ndo_vflag) ND_PRINT((ndo,"< ")); /* * Source address and PAN ID, if present. */ switch (FC_SRC_ADDRESSING_MODE(fc)) { case FC_ADDRESSING_MODE_NONE: if (ndo->ndo_vflag) ND_PRINT((ndo,"none ")); break; case FC_ADDRESSING_MODE_RESERVED: if (ndo->ndo_vflag) ND_PRINT((ndo,"reserved source addressing mode")); return 0; case FC_ADDRESSING_MODE_SHORT: if (!(fc & FC_PAN_ID_COMPRESSION)) { /* * The source PAN ID is not compressed out, so * fetch it. (Otherwise, we'll use the destination * PAN ID, fetched above.) */ if (caplen < 2) { ND_PRINT((ndo, "[|802.15.4]")); return hdrlen; } panid = EXTRACT_LE_16BITS(p); p += 2; caplen -= 2; hdrlen += 2; } if (caplen < 2) { ND_PRINT((ndo, "[|802.15.4]")); return hdrlen; } if (ndo->ndo_vflag) ND_PRINT((ndo,"%04x:%04x ", panid, EXTRACT_LE_16BITS(p))); p += 2; caplen -= 2; hdrlen += 2; break; case FC_ADDRESSING_MODE_LONG: if (!(fc & FC_PAN_ID_COMPRESSION)) { /* * The source PAN ID is not compressed out, so * fetch it. (Otherwise, we'll use the destination * PAN ID, fetched above.) */ if (caplen < 2) { ND_PRINT((ndo, "[|802.15.4]")); return hdrlen; } panid = EXTRACT_LE_16BITS(p); p += 2; caplen -= 2; hdrlen += 2; } if (caplen < 8) { ND_PRINT((ndo, "[|802.15.4]")); return hdrlen; } if (ndo->ndo_vflag) ND_PRINT((ndo,"%04x:%s ", panid, le64addr_string(ndo, p))); p += 8; caplen -= 8; hdrlen += 8; break; } if (!ndo->ndo_suppress_default_print) ND_DEFAULTPRINT(p, caplen); return hdrlen; }
1
CVE-2017-13000
2,509
vulnerable
CWE-416
void usb_serial_console_disconnect(struct usb_serial *serial) { if (serial->port[0] == usbcons_info.port) { usb_serial_console_exit(); usb_serial_put(serial); } }
0
CVE-2017-16525
2,184
benign
CWE-416
void usb_serial_console_disconnect(struct usb_serial *serial) { if (serial->port[0] && serial->port[0] == usbcons_info.port) { usb_serial_console_exit(); usb_serial_put(serial); } }
1
CVE-2017-16525
2,184
vulnerable
CWE-119
main_get_appheader (xd3_stream *stream, main_file *ifile, main_file *output, main_file *sfile) { uint8_t *apphead; usize_t appheadsz; int ret; /* The user may disable the application header. Once the appheader * is set, this disables setting it again. */ if (! option_use_appheader) { return; } ret = xd3_get_appheader (stream, & apphead, & appheadsz); /* Ignore failure, it only means we haven't received a header yet. */ if (ret != 0) { return; } if (appheadsz > 0) { char *start = (char*)apphead; char *slash; int place = 0; char *parsed[4]; memset (parsed, 0, sizeof (parsed)); while ((slash = strchr (start, '/')) != NULL) { *slash = 0; parsed[place++] = start; start = slash + 1; } parsed[place++] = start; /* First take the output parameters. */ if (place == 2 || place == 4) { main_get_appheader_params (output, parsed, 1, "output", ifile); } /* Then take the source parameters. */ if (place == 4) { main_get_appheader_params (sfile, parsed+2, 0, "source", ifile); } } option_use_appheader = 0; return; }
0
CVE-2014-9765
827
benign
CWE-119
main_get_appheader (xd3_stream *stream, main_file *ifile, main_file *output, main_file *sfile) { uint8_t *apphead; usize_t appheadsz; int ret; /* The user may disable the application header. Once the appheader * is set, this disables setting it again. */ if (! option_use_appheader) { return; } ret = xd3_get_appheader (stream, & apphead, & appheadsz); /* Ignore failure, it only means we haven't received a header yet. */ if (ret != 0) { return; } if (appheadsz > 0) { const int kMaxArgs = 4; char *start = (char*)apphead; char *slash; int place = 0; char *parsed[kMaxArgs]; memset (parsed, 0, sizeof (parsed)); while ((slash = strchr (start, '/')) != NULL && place < (kMaxArgs-1)) { *slash = 0; parsed[place++] = start; start = slash + 1; } parsed[place++] = start; /* First take the output parameters. */ if (place == 2 || place == 4) { main_get_appheader_params (output, parsed, 1, "output", ifile); } /* Then take the source parameters. */ if (place == 4) { main_get_appheader_params (sfile, parsed+2, 0, "source", ifile); } } option_use_appheader = 0; return; }
1
CVE-2014-9765
827
vulnerable
CWE-787
test_function (char * (*my_asnprintf) (char *, size_t *, const char *, ...)) { char buf[8]; int size; for (size = 0; size <= 8; size++) { size_t length = size; char *result = my_asnprintf (NULL, &length, "%d", 12345); ASSERT (result != NULL); ASSERT (strcmp (result, "12345") == 0); ASSERT (length == 5); free (result); } for (size = 0; size <= 8; size++) { size_t length; char *result; memcpy (buf, "DEADBEEF", 8); length = size; result = my_asnprintf (buf, &length, "%d", 12345); ASSERT (result != NULL); ASSERT (strcmp (result, "12345") == 0); ASSERT (length == 5); if (size < 6) ASSERT (result != buf); ASSERT (memcmp (buf + size, &"DEADBEEF"[size], 8 - size) == 0); if (result != buf) free (result); } }
0
CVE-2018-17942
1,997
benign
CWE-787
test_function (char * (*my_asnprintf) (char *, size_t *, const char *, ...)) { char buf[8]; int size; for (size = 0; size <= 8; size++) { size_t length = size; char *result = my_asnprintf (NULL, &length, "%d", 12345); ASSERT (result != NULL); ASSERT (strcmp (result, "12345") == 0); ASSERT (length == 5); free (result); } for (size = 0; size <= 8; size++) { size_t length; char *result; memcpy (buf, "DEADBEEF", 8); length = size; result = my_asnprintf (buf, &length, "%d", 12345); ASSERT (result != NULL); ASSERT (strcmp (result, "12345") == 0); ASSERT (length == 5); if (size < 5 + 1) ASSERT (result != buf); ASSERT (memcmp (buf + size, &"DEADBEEF"[size], 8 - size) == 0); if (result != buf) free (result); } /* Note: This test assumes IEEE 754 representation of 'double' floats. */ for (size = 0; size <= 8; size++) { size_t length; char *result; memcpy (buf, "DEADBEEF", 8); length = size; result = my_asnprintf (buf, &length, "%2.0f", 1.6314159265358979e+125); ASSERT (result != NULL); ASSERT (strcmp (result, "163141592653589790215729350939528493057529598899734151772468186268423257777068536614838678161083520756952076273094236944990208") == 0); ASSERT (length == 126); if (size < 126 + 1) ASSERT (result != buf); ASSERT (memcmp (buf + size, &"DEADBEEF"[size], 8 - size) == 0); if (result != buf) free (result); } }
1
CVE-2018-17942
1,997
vulnerable
CWE-125
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 5); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* ids = GetInput(context, node, 0); TF_LITE_ENSURE_EQ(context, NumDimensions(ids), 1); TF_LITE_ENSURE_EQ(context, ids->type, kTfLiteInt32); const TfLiteTensor* indices = GetInput(context, node, 1); TF_LITE_ENSURE_EQ(context, NumDimensions(indices), 2); TF_LITE_ENSURE_EQ(context, indices->type, kTfLiteInt32); const TfLiteTensor* shape = GetInput(context, node, 2); TF_LITE_ENSURE_EQ(context, NumDimensions(shape), 1); TF_LITE_ENSURE_EQ(context, shape->type, kTfLiteInt32); const TfLiteTensor* weights = GetInput(context, node, 3); TF_LITE_ENSURE_EQ(context, NumDimensions(weights), 1); TF_LITE_ENSURE_EQ(context, weights->type, kTfLiteFloat32); TF_LITE_ENSURE_EQ(context, SizeOfDimension(indices, 0), SizeOfDimension(ids, 0)); TF_LITE_ENSURE_EQ(context, SizeOfDimension(indices, 0), SizeOfDimension(weights, 0)); const TfLiteTensor* value = GetInput(context, node, 4); TF_LITE_ENSURE(context, NumDimensions(value) >= 2); // Mark the output as a dynamic tensor. TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); output->allocation_type = kTfLiteDynamic; return kTfLiteOk; }
0
CVE-2020-15211
2,746
benign
CWE-125
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 5); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* ids; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &ids)); TF_LITE_ENSURE_EQ(context, NumDimensions(ids), 1); TF_LITE_ENSURE_EQ(context, ids->type, kTfLiteInt32); const TfLiteTensor* indices; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &indices)); TF_LITE_ENSURE_EQ(context, NumDimensions(indices), 2); TF_LITE_ENSURE_EQ(context, indices->type, kTfLiteInt32); const TfLiteTensor* shape; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &shape)); TF_LITE_ENSURE_EQ(context, NumDimensions(shape), 1); TF_LITE_ENSURE_EQ(context, shape->type, kTfLiteInt32); const TfLiteTensor* weights; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 3, &weights)); TF_LITE_ENSURE_EQ(context, NumDimensions(weights), 1); TF_LITE_ENSURE_EQ(context, weights->type, kTfLiteFloat32); TF_LITE_ENSURE_EQ(context, SizeOfDimension(indices, 0), SizeOfDimension(ids, 0)); TF_LITE_ENSURE_EQ(context, SizeOfDimension(indices, 0), SizeOfDimension(weights, 0)); const TfLiteTensor* value; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 4, &value)); TF_LITE_ENSURE(context, NumDimensions(value) >= 2); // Mark the output as a dynamic tensor. TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); output->allocation_type = kTfLiteDynamic; return kTfLiteOk; }
1
CVE-2020-15211
2,746
vulnerable
CWE-787
void FdInStream::readBytes(void* data, int length) { if (length < MIN_BULK_SIZE) { InStream::readBytes(data, length); return; } U8* dataPtr = (U8*)data; int n = end - ptr; if (n > length) n = length; memcpy(dataPtr, ptr, n); dataPtr += n; length -= n; ptr += n; while (length > 0) { n = readWithTimeoutOrCallback(dataPtr, length); dataPtr += n; length -= n; offset += n; } }
0
CVE-2019-15694
1,110
benign
CWE-787
void FdInStream::readBytes(void* data, size_t length) { if (length < MIN_BULK_SIZE) { InStream::readBytes(data, length); return; } U8* dataPtr = (U8*)data; size_t n = end - ptr; if (n > length) n = length; memcpy(dataPtr, ptr, n); dataPtr += n; length -= n; ptr += n; while (length > 0) { n = readWithTimeoutOrCallback(dataPtr, length); dataPtr += n; length -= n; offset += n; } }
1
CVE-2019-15694
1,110
vulnerable
CWE-476
#ifndef GPAC_DISABLE_ISOM_HINTING void dump_isom_sdp(GF_ISOFile *file, char *inName, Bool is_final_name) { const char *sdp; u32 size, i; FILE *dump; if (inName) { char szBuf[1024]; strcpy(szBuf, inName); if (!is_final_name) { char *ext = strchr(szBuf, '.'); if (ext) ext[0] = 0; strcat(szBuf, "_sdp.txt"); } dump = gf_fopen(szBuf, "wt"); if (!dump) { fprintf(stderr, "Failed to open %s for dumping\n", szBuf); return; } } else { dump = stdout; fprintf(dump, "* File SDP content *\n\n"); } //get the movie SDP gf_isom_sdp_get(file, &sdp, &size); fprintf(dump, "%s", sdp); fprintf(dump, "\r\n"); //then tracks for (i=0; i<gf_isom_get_track_count(file); i++) { if (gf_isom_get_media_type(file, i+1) != GF_ISOM_MEDIA_HINT) continue; gf_isom_sdp_track_get(file, i+1, &sdp, &size); fprintf(dump, "%s", sdp); } fprintf(dump, "\n\n");
0
CVE-2020-23932
6
benign
CWE-476
#ifndef GPAC_DISABLE_ISOM_HINTING void dump_isom_sdp(GF_ISOFile *file, char *inName, Bool is_final_name) { const char *sdp; u32 size, i; FILE *dump; if (inName) { char szBuf[1024]; strcpy(szBuf, inName); if (!is_final_name) { char *ext = strchr(szBuf, '.'); if (ext) ext[0] = 0; strcat(szBuf, "_sdp.txt"); } dump = gf_fopen(szBuf, "wt"); if (!dump) { fprintf(stderr, "Failed to open %s for dumping\n", szBuf); return; } } else { dump = stdout; fprintf(dump, "* File SDP content *\n\n"); } //get the movie SDP gf_isom_sdp_get(file, &sdp, &size); if (sdp && size) fprintf(dump, "%s", sdp); fprintf(dump, "\r\n"); //then tracks for (i=0; i<gf_isom_get_track_count(file); i++) { if (gf_isom_get_media_type(file, i+1) != GF_ISOM_MEDIA_HINT) continue; gf_isom_sdp_track_get(file, i+1, &sdp, &size); fprintf(dump, "%s", sdp); } fprintf(dump, "\n\n");
1
CVE-2020-23932
6
vulnerable
CWE-20
static inline char *parse_ip_address_ex(const char *str, size_t str_len, int *portno, int get_err, zend_string **err) { char *colon; char *host = NULL; #ifdef HAVE_IPV6 char *p; if (*(str) == '[' && str_len > 1) { /* IPV6 notation to specify raw address with port (i.e. [fe80::1]:80) */ p = memchr(str + 1, ']', str_len - 2); if (!p || *(p + 1) != ':') { if (get_err) { *err = strpprintf(0, "Failed to parse IPv6 address \"%s\"", str); } return NULL; } *portno = atoi(p + 2); return estrndup(str + 1, p - str - 1); } #endif if (str_len) { colon = memchr(str, ':', str_len - 1); } else { colon = NULL; } if (colon) { *portno = atoi(colon + 1); host = estrndup(str, colon - str); } else { if (get_err) { *err = strpprintf(0, "Failed to parse address \"%s\"", str); } return NULL; } return host; }
0
CVE-2017-7189
839
benign
CWE-20
static inline char *parse_ip_address_ex(const char *str, size_t str_len, int *portno, int get_err, zend_string **err) { char *colon; char *host = NULL; #ifdef HAVE_IPV6 if (*(str) == '[' && str_len > 1) { /* IPV6 notation to specify raw address with port (i.e. [fe80::1]:80) */ char *p = memchr(str + 1, ']', str_len - 2), *e = NULL; if (!p || *(p + 1) != ':') { if (get_err) { *err = strpprintf(0, "Failed to parse IPv6 address \"%s\"", str); } return NULL; } *portno = strtol(p + 2, &e, 10); if (e && *e) { if (get_err) { *err = strpprintf(0, "Failed to parse address \"%s\"", str); } return NULL; } return estrndup(str + 1, p - str - 1); } #endif if (str_len) { colon = memchr(str, ':', str_len - 1); } else { colon = NULL; } if (colon) { char *e = NULL; *portno = strtol(colon + 1, &e, 10); if (!e || !*e) { return estrndup(str, colon - str); } } if (get_err) { *err = strpprintf(0, "Failed to parse address \"%s\"", str); } return NULL; }
1
CVE-2017-7189
839
vulnerable
CWE-20
int pnm_validate(jas_stream_t *in) { uchar buf[2]; int i; int n; assert(JAS_STREAM_MAXPUTBACK >= 2); /* Read the first two characters that constitute the signature. */ if ((n = jas_stream_read(in, buf, 2)) < 0) { return -1; } /* Put these characters back to the stream. */ for (i = n - 1; i >= 0; --i) { if (jas_stream_ungetc(in, buf[i]) == EOF) { return -1; } } /* Did we read enough data? */ if (n < 2) { return -1; } /* Is this the correct signature for a PNM file? */ if (buf[0] == 'P' && isdigit(buf[1])) { return 0; } return -1; }
0
CVE-2016-9395
136
benign
CWE-20
int pnm_validate(jas_stream_t *in) { jas_uchar buf[2]; int i; int n; assert(JAS_STREAM_MAXPUTBACK >= 2); /* Read the first two characters that constitute the signature. */ if ((n = jas_stream_read(in, buf, 2)) < 0) { return -1; } /* Put these characters back to the stream. */ for (i = n - 1; i >= 0; --i) { if (jas_stream_ungetc(in, buf[i]) == EOF) { return -1; } } /* Did we read enough data? */ if (n < 2) { return -1; } /* Is this the correct signature for a PNM file? */ if (buf[0] == 'P' && isdigit(buf[1])) { return 0; } return -1; }
1
CVE-2016-9395
136
vulnerable
CWE-476
void jp2_box_dump(jp2_box_t *box, FILE *out) { jp2_boxinfo_t *boxinfo; boxinfo = jp2_boxinfolookup(box->type); assert(boxinfo); fprintf(out, "JP2 box: "); fprintf(out, "type=%c%s%c (0x%08"PRIxFAST32"); length=%"PRIuFAST32"\n", '"', boxinfo->name, '"', box->type, box->len); if (box->ops->dumpdata) { (*box->ops->dumpdata)(box, out); } }
0
CVE-2016-10250
482
benign
CWE-476
void jp2_box_dump(jp2_box_t *box, FILE *out) { jp2_boxinfo_t *boxinfo; boxinfo = jp2_boxinfolookup(box->type); assert(boxinfo); fprintf(out, "JP2 box: "); fprintf(out, "type=%c%s%c (0x%08"PRIxFAST32"); length=%"PRIuFAST32"\n", '"', boxinfo->name, '"', box->type, box->len); if (box->ops->dumpdata) { (*box->ops->dumpdata)(box, out); } }
1
CVE-2016-10250
482
vulnerable
CWE-787
WRITE_JSON_ELEMENT(ArrStart) { /* increase depth, save: before first array entry no comma needed. */ ctx->commaNeeded[++ctx->depth] = false; return writeChar(ctx, '['); }
0
CVE-2020-36429
2,694
benign
CWE-787
WRITE_JSON_ELEMENT(ArrStart) { /* increase depth, save: before first array entry no comma needed. */ if(ctx->depth >= UA_JSON_ENCODING_MAX_RECURSION) return UA_STATUSCODE_BADENCODINGERROR; ctx->depth++; ctx->commaNeeded[ctx->depth] = false; return writeChar(ctx, '['); }
1
CVE-2020-36429
2,694
vulnerable
CWE-787
exif_data_load_data_thumbnail (ExifData *data, const unsigned char *d, unsigned int ds, ExifLong o, ExifLong s) { /* Sanity checks */ if ((o + s < o) || (o + s < s) || (o + s > ds) || (o > ds)) { exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "Bogus thumbnail offset (%u) or size (%u).", o, s); return; } if (data->data) exif_mem_free (data->priv->mem, data->data); if (!(data->data = exif_data_alloc (data, s))) { EXIF_LOG_NO_MEMORY (data->priv->log, "ExifData", s); data->size = 0; return; } data->size = s; memcpy (data->data, d + o, s); }
0
CVE-2019-9278
2,253
benign
CWE-787
exif_data_load_data_thumbnail (ExifData *data, const unsigned char *d, unsigned int ds, ExifLong o, ExifLong s) { /* Sanity checks */ if (o >= ds) { exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "Bogus thumbnail offset (%u).", o); return; } if (s > ds - o) { exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "Bogus thumbnail size (%u), max would be %u.", s, ds-o); return; } if (data->data) exif_mem_free (data->priv->mem, data->data); if (!(data->data = exif_data_alloc (data, s))) { EXIF_LOG_NO_MEMORY (data->priv->log, "ExifData", s); data->size = 0; return; } data->size = s; memcpy (data->data, d + o, s); }
1
CVE-2019-9278
2,253
vulnerable
CWE-119
static int iwl_process_add_sta_resp(struct iwl_priv *priv, struct iwl_addsta_cmd *addsta, struct iwl_rx_packet *pkt) { u8 sta_id = addsta->sta.sta_id; unsigned long flags; int ret = -EIO; if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n", pkt->hdr.flags); return ret; } IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n", sta_id); spin_lock_irqsave(&priv->shrd->sta_lock, flags); switch (pkt->u.add_sta.status) { case ADD_STA_SUCCESS_MSK: IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n"); iwl_sta_ucode_activate(priv, sta_id); ret = 0; break; case ADD_STA_NO_ROOM_IN_TABLE: IWL_ERR(priv, "Adding station %d failed, no room in table.\n", sta_id); break; case ADD_STA_NO_BLOCK_ACK_RESOURCE: IWL_ERR(priv, "Adding station %d failed, no block ack " "resource.\n", sta_id); break; case ADD_STA_MODIFY_NON_EXIST_STA: IWL_ERR(priv, "Attempting to modify non-existing station %d\n", sta_id); break; default: IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status); break; } IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n", priv->stations[sta_id].sta.mode == STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", sta_id, priv->stations[sta_id].sta.sta.addr); /* * XXX: The MAC address in the command buffer is often changed from * the original sent to the device. That is, the MAC address * written to the command buffer often is not the same MAC address * read from the command buffer when the command returns. This * issue has not yet been resolved and this debugging is left to * observe the problem. */ IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n", priv->stations[sta_id].sta.mode == STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr); spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); return ret; }
0
CVE-2012-6712
1,372
benign
CWE-119
static int iwl_process_add_sta_resp(struct iwl_priv *priv, struct iwl_addsta_cmd *addsta, struct iwl_rx_packet *pkt) { u8 sta_id = addsta->sta.sta_id; unsigned long flags; int ret = -EIO; if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n", pkt->hdr.flags); return ret; } IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n", sta_id); spin_lock_irqsave(&priv->shrd->sta_lock, flags); switch (pkt->u.add_sta.status) { case ADD_STA_SUCCESS_MSK: IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n"); ret = iwl_sta_ucode_activate(priv, sta_id); break; case ADD_STA_NO_ROOM_IN_TABLE: IWL_ERR(priv, "Adding station %d failed, no room in table.\n", sta_id); break; case ADD_STA_NO_BLOCK_ACK_RESOURCE: IWL_ERR(priv, "Adding station %d failed, no block ack " "resource.\n", sta_id); break; case ADD_STA_MODIFY_NON_EXIST_STA: IWL_ERR(priv, "Attempting to modify non-existing station %d\n", sta_id); break; default: IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status); break; } IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n", priv->stations[sta_id].sta.mode == STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", sta_id, priv->stations[sta_id].sta.sta.addr); /* * XXX: The MAC address in the command buffer is often changed from * the original sent to the device. That is, the MAC address * written to the command buffer often is not the same MAC address * read from the command buffer when the command returns. This * issue has not yet been resolved and this debugging is left to * observe the problem. */ IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n", priv->stations[sta_id].sta.mode == STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr); spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); return ret; }
1
CVE-2012-6712
1,372
vulnerable
CWE-125
int ntlm_read_message_header(wStream* s, NTLM_MESSAGE_HEADER* header) { if (Stream_GetRemainingLength(s) < 12) return -1; Stream_Read(s, header->Signature, 8); Stream_Read_UINT32(s, header->MessageType); if (strncmp((char*) header->Signature, NTLM_SIGNATURE, 8) != 0) return -1; return 1; }
0
CVE-2018-8789
1,499
benign
CWE-125
static int ntlm_read_message_header(wStream* s, NTLM_MESSAGE_HEADER* header) { if (Stream_GetRemainingLength(s) < 12) return -1; Stream_Read(s, header->Signature, 8); Stream_Read_UINT32(s, header->MessageType); if (strncmp((char*) header->Signature, NTLM_SIGNATURE, 8) != 0) return -1; return 1; }
1
CVE-2018-8789
1,499
vulnerable