cwe_id
stringclasses
8 values
func
stringlengths
40
61.2k
label
int64
0
1
cve_id
stringlengths
13
16
id
int64
0
3.29k
text_label
stringclasses
2 values
CWE-190
int read_xattrs_from_disk(int fd, struct squashfs_super_block *sBlk, int flag, long long *table_start) { int res, bytes, i, indexes, index_bytes, ids; long long *index, start, end; struct squashfs_xattr_table id_table; TRACE("read_xattrs_from_disk\n"); if(sBlk->xattr_id_table_start == SQUASHFS_INVALID_BLK) return SQUASHFS_INVALID_BLK; /* * Read xattr id table, containing start of xattr metadata and the * number of xattrs in the file system */ res = read_fs_bytes(fd, sBlk->xattr_id_table_start, sizeof(id_table), &id_table); if(res == 0) return 0; SQUASHFS_INSWAP_XATTR_TABLE(&id_table); if(flag) { /* * id_table.xattr_table_start stores the start of the compressed xattr * * metadata blocks. This by definition is also the end of the previous * filesystem table - the id lookup table. */ *table_start = id_table.xattr_table_start; return id_table.xattr_ids; } /* * Allocate and read the index to the xattr id table metadata * blocks */ ids = id_table.xattr_ids; xattr_table_start = id_table.xattr_table_start; index_bytes = SQUASHFS_XATTR_BLOCK_BYTES(ids); indexes = SQUASHFS_XATTR_BLOCKS(ids); index = malloc(index_bytes); if(index == NULL) MEM_ERROR(); res = read_fs_bytes(fd, sBlk->xattr_id_table_start + sizeof(id_table), index_bytes, index); if(res ==0) goto failed1; SQUASHFS_INSWAP_LONG_LONGS(index, indexes); /* * Allocate enough space for the uncompressed xattr id table, and * read and decompress it */ bytes = SQUASHFS_XATTR_BYTES(ids); xattr_ids = malloc(bytes); if(xattr_ids == NULL) MEM_ERROR(); for(i = 0; i < indexes; i++) { int expected = (i + 1) != indexes ? SQUASHFS_METADATA_SIZE : bytes & (SQUASHFS_METADATA_SIZE - 1); int length = read_block(fd, index[i], NULL, expected, ((unsigned char *) xattr_ids) + (i * SQUASHFS_METADATA_SIZE)); TRACE("Read xattr id table block %d, from 0x%llx, length " "%d\n", i, index[i], length); if(length == 0) { ERROR("Failed to read xattr id table block %d, " "from 0x%llx, length %d\n", i, index[i], length); goto failed2; } } /* * Read and decompress the xattr metadata * * Note the first xattr id table metadata block is immediately after * the last xattr metadata block, so we can use index[0] to work out * the end of the xattr metadata */ start = xattr_table_start; end = index[0]; for(i = 0; start < end; i++) { int length; xattrs = realloc(xattrs, (i + 1) * SQUASHFS_METADATA_SIZE); if(xattrs == NULL) MEM_ERROR(); /* store mapping from location of compressed block in fs -> * location of uncompressed block in memory */ save_xattr_block(start, i * SQUASHFS_METADATA_SIZE); length = read_block(fd, start, &start, 0, ((unsigned char *) xattrs) + (i * SQUASHFS_METADATA_SIZE)); TRACE("Read xattr block %d, length %d\n", i, length); if(length == 0) { ERROR("Failed to read xattr block %d\n", i); goto failed3; } /* * If this is not the last metadata block in the xattr metadata * then it should be SQUASHFS_METADATA_SIZE in size. * Note, we can't use expected in read_block() above for this * because we don't know if this is the last block until * after reading. */ if(start != end && length != SQUASHFS_METADATA_SIZE) { ERROR("Xattr block %d should be %d bytes in length, " "it is %d bytes\n", i, SQUASHFS_METADATA_SIZE, length); goto failed3; } } /* swap if necessary the xattr id entries */ for(i = 0; i < ids; i++) SQUASHFS_INSWAP_XATTR_ID(&xattr_ids[i]); free(index); return ids; failed3: free(xattrs); failed2: free(xattr_ids); failed1: free(index); return 0; }
0
CVE-2015-4645
3,100
benign
CWE-190
int read_xattrs_from_disk(int fd, struct squashfs_super_block *sBlk, int flag, long long *table_start) { /* * Note on overflow limits: * Size of ids (id_table.xattr_ids) is 2^32 (unsigned int) * Max size of bytes is 2^32*16 or 2^36 * Max indexes is (2^32*16)/8K or 2^23 * Max index_bytes is ((2^32*16)/8K)*8 or 2^26 or 64M */ int res, i, indexes, index_bytes; unsigned int ids; long long bytes; long long *index, start, end; struct squashfs_xattr_table id_table; TRACE("read_xattrs_from_disk\n"); if(sBlk->xattr_id_table_start == SQUASHFS_INVALID_BLK) return SQUASHFS_INVALID_BLK; /* * Read xattr id table, containing start of xattr metadata and the * number of xattrs in the file system */ res = read_fs_bytes(fd, sBlk->xattr_id_table_start, sizeof(id_table), &id_table); if(res == 0) return 0; SQUASHFS_INSWAP_XATTR_TABLE(&id_table); /* * Compute index table values */ ids = id_table.xattr_ids; xattr_table_start = id_table.xattr_table_start; index_bytes = SQUASHFS_XATTR_BLOCK_BYTES((long long) ids); indexes = SQUASHFS_XATTR_BLOCKS((long long) ids); /* * The size of the index table (index_bytes) should match the * table start and end points */ if(index_bytes != (sBlk->bytes_used - (sBlk->xattr_id_table_start + sizeof(id_table)))) { ERROR("read_xattrs_from_disk: Bad xattr_ids count in super block\n"); return 0; } /* * id_table.xattr_table_start stores the start of the compressed xattr * metadata blocks. This by definition is also the end of the previous * filesystem table - the id lookup table. */ if(table_start != NULL) *table_start = id_table.xattr_table_start; /* * If flag is set then return once we've read the above * table_start. That value is necessary for sanity checking, * but we don't actually want to extract the xattrs, and so * stop here. */ if(flag) return id_table.xattr_ids; /* * Allocate and read the index to the xattr id table metadata * blocks */ index = malloc(index_bytes); if(index == NULL) MEM_ERROR(); res = read_fs_bytes(fd, sBlk->xattr_id_table_start + sizeof(id_table), index_bytes, index); if(res ==0) goto failed1; SQUASHFS_INSWAP_LONG_LONGS(index, indexes); /* * Allocate enough space for the uncompressed xattr id table, and * read and decompress it */ bytes = SQUASHFS_XATTR_BYTES((long long) ids); xattr_ids = malloc(bytes); if(xattr_ids == NULL) MEM_ERROR(); for(i = 0; i < indexes; i++) { int expected = (i + 1) != indexes ? SQUASHFS_METADATA_SIZE : bytes & (SQUASHFS_METADATA_SIZE - 1); int length = read_block(fd, index[i], NULL, expected, ((unsigned char *) xattr_ids) + ((long long) i * SQUASHFS_METADATA_SIZE)); TRACE("Read xattr id table block %d, from 0x%llx, length " "%d\n", i, index[i], length); if(length == 0) { ERROR("Failed to read xattr id table block %d, " "from 0x%llx, length %d\n", i, index[i], length); goto failed2; } } /* * Read and decompress the xattr metadata * * Note the first xattr id table metadata block is immediately after * the last xattr metadata block, so we can use index[0] to work out * the end of the xattr metadata */ start = xattr_table_start; end = index[0]; for(i = 0; start < end; i++) { int length; xattrs = realloc(xattrs, (i + 1) * SQUASHFS_METADATA_SIZE); if(xattrs == NULL) MEM_ERROR(); /* store mapping from location of compressed block in fs -> * location of uncompressed block in memory */ save_xattr_block(start, i * SQUASHFS_METADATA_SIZE); length = read_block(fd, start, &start, 0, ((unsigned char *) xattrs) + (i * SQUASHFS_METADATA_SIZE)); TRACE("Read xattr block %d, length %d\n", i, length); if(length == 0) { ERROR("Failed to read xattr block %d\n", i); goto failed3; } /* * If this is not the last metadata block in the xattr metadata * then it should be SQUASHFS_METADATA_SIZE in size. * Note, we can't use expected in read_block() above for this * because we don't know if this is the last block until * after reading. */ if(start != end && length != SQUASHFS_METADATA_SIZE) { ERROR("Xattr block %d should be %d bytes in length, " "it is %d bytes\n", i, SQUASHFS_METADATA_SIZE, length); goto failed3; } } /* swap if necessary the xattr id entries */ for(i = 0; i < ids; i++) SQUASHFS_INSWAP_XATTR_ID(&xattr_ids[i]); free(index); return ids; failed3: free(xattrs); failed2: free(xattr_ids); failed1: free(index); return 0; }
1
CVE-2015-4645
3,100
vulnerable
CWE-476
smb2_flush(smb_request_t *sr) { smb_ofile_t *of = NULL; uint16_t StructSize; uint16_t reserved1; uint32_t reserved2; smb2fid_t smb2fid; uint32_t status; int rc = 0; /* * SMB2 Flush request */ rc = smb_mbc_decodef( &sr->smb_data, "wwlqq", &StructSize, /* w */ &reserved1, /* w */ &reserved2, /* l */ &smb2fid.persistent, /* q */ &smb2fid.temporal); /* q */ if (rc) return (SDRC_ERROR); if (StructSize != 24) return (SDRC_ERROR); status = smb2sr_lookup_fid(sr, &smb2fid); if (status) { smb2sr_put_error(sr, status); return (SDRC_SUCCESS); } of = sr->fid_ofile; /* * XXX - todo: * Flush named pipe should drain writes. */ if ((of->f_node->flags & NODE_FLAGS_WRITE_THROUGH) == 0) (void) smb_fsop_commit(sr, of->f_cr, of->f_node); /* * SMB2 Flush reply */ (void) smb_mbc_encodef( &sr->reply, "wwl", 4, /* StructSize */ /* w */ 0); /* reserved */ /* w */ return (SDRC_SUCCESS); }
0
CVE-2016-6561
1,455
benign
CWE-476
smb2_flush(smb_request_t *sr) { uint16_t StructSize; uint16_t reserved1; uint32_t reserved2; smb2fid_t smb2fid; uint32_t status; int rc = 0; /* * SMB2 Flush request */ rc = smb_mbc_decodef( &sr->smb_data, "wwlqq", &StructSize, /* w */ &reserved1, /* w */ &reserved2, /* l */ &smb2fid.persistent, /* q */ &smb2fid.temporal); /* q */ if (rc) return (SDRC_ERROR); if (StructSize != 24) return (SDRC_ERROR); status = smb2sr_lookup_fid(sr, &smb2fid); if (status) { smb2sr_put_error(sr, status); return (SDRC_SUCCESS); } smb_ofile_flush(sr, sr->fid_ofile); /* * SMB2 Flush reply */ (void) smb_mbc_encodef( &sr->reply, "wwl", 4, /* StructSize */ /* w */ 0); /* reserved */ /* w */ return (SDRC_SUCCESS); }
1
CVE-2016-6561
1,455
vulnerable
CWE-20
error_t enc28j60SendPacket(NetInterface *interface, const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary) { size_t length; //Retrieve the length of the packet length = netBufferGetLength(buffer) - offset; //Check the frame length if(length > 1536) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Report an error return ERROR_INVALID_LENGTH; } //Make sure the link is up before transmitting the frame if(!interface->linkState) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Drop current packet return NO_ERROR; } //It is recommended to reset the transmit logic before //attempting to transmit a packet enc28j60SetBit(interface, ENC28J60_REG_ECON1, ECON1_TXRST); enc28j60ClearBit(interface, ENC28J60_REG_ECON1, ECON1_TXRST); //Interrupt flags should be cleared after the reset is completed enc28j60ClearBit(interface, ENC28J60_REG_EIR, EIR_TXIF | EIR_TXERIF); //Set transmit buffer location enc28j60WriteReg(interface, ENC28J60_REG_ETXSTL, LSB(ENC28J60_TX_BUFFER_START)); enc28j60WriteReg(interface, ENC28J60_REG_ETXSTH, MSB(ENC28J60_TX_BUFFER_START)); //Point to start of transmit buffer enc28j60WriteReg(interface, ENC28J60_REG_EWRPTL, LSB(ENC28J60_TX_BUFFER_START)); enc28j60WriteReg(interface, ENC28J60_REG_EWRPTH, MSB(ENC28J60_TX_BUFFER_START)); //Copy the data to the transmit buffer enc28j60WriteBuffer(interface, buffer, offset); //ETXND should point to the last byte in the data payload enc28j60WriteReg(interface, ENC28J60_REG_ETXNDL, LSB(ENC28J60_TX_BUFFER_START + length)); enc28j60WriteReg(interface, ENC28J60_REG_ETXNDH, MSB(ENC28J60_TX_BUFFER_START + length)); //Start transmission enc28j60SetBit(interface, ENC28J60_REG_ECON1, ECON1_TXRTS); //Successful processing return NO_ERROR; }
0
CVE-2021-26788
1,957
benign
CWE-20
error_t enc28j60SendPacket(NetInterface *interface, const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary) { size_t length; //Retrieve the length of the packet length = netBufferGetLength(buffer) - offset; //Check the frame length if(length > 1536) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Report an error return ERROR_INVALID_LENGTH; } //Make sure the link is up before transmitting the frame if(!interface->linkState) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Drop current packet return NO_ERROR; } //It is recommended to reset the transmit logic before //attempting to transmit a packet enc28j60SetBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_TXRST); enc28j60ClearBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_TXRST); //Interrupt flags should be cleared after the reset is completed enc28j60ClearBit(interface, ENC28J60_EIR, ENC28J60_EIR_TXIF | ENC28J60_EIR_TXERIF); //Set transmit buffer location enc28j60WriteReg(interface, ENC28J60_ETXSTL, LSB(ENC28J60_TX_BUFFER_START)); enc28j60WriteReg(interface, ENC28J60_ETXSTH, MSB(ENC28J60_TX_BUFFER_START)); //Point to start of transmit buffer enc28j60WriteReg(interface, ENC28J60_EWRPTL, LSB(ENC28J60_TX_BUFFER_START)); enc28j60WriteReg(interface, ENC28J60_EWRPTH, MSB(ENC28J60_TX_BUFFER_START)); //Copy the data to the transmit buffer enc28j60WriteBuffer(interface, buffer, offset); //ETXND should point to the last byte in the data payload enc28j60WriteReg(interface, ENC28J60_ETXNDL, LSB(ENC28J60_TX_BUFFER_START + length)); enc28j60WriteReg(interface, ENC28J60_ETXNDH, MSB(ENC28J60_TX_BUFFER_START + length)); //Start transmission enc28j60SetBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_TXRTS); //Successful processing return NO_ERROR; }
1
CVE-2021-26788
1,957
vulnerable
CWE-20
check_symlinks(struct archive_write_disk *a) { #if !defined(HAVE_LSTAT) /* Platform doesn't have lstat, so we can't look for symlinks. */ (void)a; /* UNUSED */ return (ARCHIVE_OK); #else char *pn; char c; int r; struct stat st; /* * Guard against symlink tricks. Reject any archive entry whose * destination would be altered by a symlink. */ /* Whatever we checked last time doesn't need to be re-checked. */ pn = a->name; if (archive_strlen(&(a->path_safe)) > 0) { char *p = a->path_safe.s; while ((*pn != '\0') && (*p == *pn)) ++p, ++pn; } /* Skip the root directory if the path is absolute. */ if(pn == a->name && pn[0] == '/') ++pn; c = pn[0]; /* Keep going until we've checked the entire name. */ while (pn[0] != '\0' && (pn[0] != '/' || pn[1] != '\0')) { /* Skip the next path element. */ while (*pn != '\0' && *pn != '/') ++pn; c = pn[0]; pn[0] = '\0'; /* Check that we haven't hit a symlink. */ r = lstat(a->name, &st); if (r != 0) { /* We've hit a dir that doesn't exist; stop now. */ if (errno == ENOENT) { break; } else { /* Note: This effectively disables deep directory * support when security checks are enabled. * Otherwise, very long pathnames that trigger * an error here could evade the sandbox. * TODO: We could do better, but it would probably * require merging the symlink checks with the * deep-directory editing. */ return (ARCHIVE_FAILED); } } else if (S_ISLNK(st.st_mode)) { if (c == '\0') { /* * Last element is symlink; remove it * so we can overwrite it with the * item being extracted. */ if (unlink(a->name)) { archive_set_error(&a->archive, errno, "Could not remove symlink %s", a->name); pn[0] = c; return (ARCHIVE_FAILED); } a->pst = NULL; /* * Even if we did remove it, a warning * is in order. The warning is silly, * though, if we're just replacing one * symlink with another symlink. */ if (!S_ISLNK(a->mode)) { archive_set_error(&a->archive, 0, "Removing symlink %s", a->name); } /* Symlink gone. No more problem! */ pn[0] = c; return (0); } else if (a->flags & ARCHIVE_EXTRACT_UNLINK) { /* User asked us to remove problems. */ if (unlink(a->name) != 0) { archive_set_error(&a->archive, 0, "Cannot remove intervening symlink %s", a->name); pn[0] = c; return (ARCHIVE_FAILED); } a->pst = NULL; } else { archive_set_error(&a->archive, 0, "Cannot extract through symlink %s", a->name); pn[0] = c; return (ARCHIVE_FAILED); } } pn[0] = c; if (pn[0] != '\0') pn++; /* Advance to the next segment. */ } pn[0] = c; /* We've checked and/or cleaned the whole path, so remember it. */ archive_strcpy(&a->path_safe, a->name); return (ARCHIVE_OK); #endif }
0
CVE-2016-5418
121
benign
CWE-20
check_symlinks(struct archive_write_disk *a) { struct archive_string error_string; int error_number; int rc; archive_string_init(&error_string); rc = check_symlinks_fsobj(a->name, &error_number, &error_string, a->flags); if (rc != ARCHIVE_OK) { archive_set_error(&a->archive, error_number, "%s", error_string.s); } archive_string_free(&error_string); a->pst = NULL; /* to be safe */ return rc; }
1
CVE-2016-5418
121
vulnerable
CWE-119
static bool parse_reconnect(struct pool *pool, json_t *val) { char *sockaddr_url, *stratum_port, *tmp; char *url, *port, address[256]; if (opt_disable_client_reconnect) { applog(LOG_WARNING, "Stratum client.reconnect forbidden, aborting."); return false; } memset(address, 0, 255); url = (char *)json_string_value(json_array_get(val, 0)); if (!url) url = pool->sockaddr_url; port = (char *)json_string_value(json_array_get(val, 1)); if (!port) port = pool->stratum_port; sprintf(address, "%s:%s", url, port); if (!extract_sockaddr(address, &sockaddr_url, &stratum_port)) return false; applog(LOG_NOTICE, "Reconnect requested from %s to %s", get_pool_name(pool), address); clear_pool_work(pool); mutex_lock(&pool->stratum_lock); __suspend_stratum(pool); tmp = pool->sockaddr_url; pool->sockaddr_url = sockaddr_url; pool->stratum_url = pool->sockaddr_url; free(tmp); tmp = pool->stratum_port; pool->stratum_port = stratum_port; free(tmp); mutex_unlock(&pool->stratum_lock); if (!restart_stratum(pool)) { pool_failed(pool); return false; } return true; }
0
CVE-2014-4501
1,609
benign
CWE-119
static bool parse_reconnect(struct pool *pool, json_t *val) { if (opt_disable_client_reconnect) { applog(LOG_WARNING, "Stratum client.reconnect received but is disabled, not reconnecting."); return false; } char *url, *port, address[256]; char *sockaddr_url, *stratum_port, *tmp; /* Tempvars. */ url = (char *)json_string_value(json_array_get(val, 0)); if (!url) url = pool->sockaddr_url; port = (char *)json_string_value(json_array_get(val, 1)); if (!port) port = pool->stratum_port; snprintf(address, sizeof(address), "%s:%s", url, port); if (!extract_sockaddr(address, &sockaddr_url, &stratum_port)) return false; applog(LOG_NOTICE, "Reconnect requested from %s to %s", get_pool_name(pool), address); clear_pool_work(pool); mutex_lock(&pool->stratum_lock); __suspend_stratum(pool); tmp = pool->sockaddr_url; pool->sockaddr_url = sockaddr_url; pool->stratum_url = pool->sockaddr_url; free(tmp); tmp = pool->stratum_port; pool->stratum_port = stratum_port; free(tmp); mutex_unlock(&pool->stratum_lock); if (!restart_stratum(pool)) { pool_failed(pool); return false; } return true; }
1
CVE-2014-4501
1,609
vulnerable
CWE-787
void ImplPolygon::ImplSplit( sal_uInt16 nPos, sal_uInt16 nSpace, ImplPolygon const * pInitPoly ) { //Can't fit this in :-(, throw ? if (mnPoints + nSpace > USHRT_MAX) return; const sal_uInt16 nNewSize = mnPoints + nSpace; const std::size_t nSpaceSize = static_cast<std::size_t>(nSpace) * sizeof(Point); if( nPos >= mnPoints ) { // Append at the back nPos = mnPoints; ImplSetSize( nNewSize ); if( pInitPoly ) { memcpy( mpPointAry + nPos, pInitPoly->mpPointAry, nSpaceSize ); if( pInitPoly->mpFlagAry ) memcpy( mpFlagAry + nPos, pInitPoly->mpFlagAry, nSpace ); } } else { const sal_uInt16 nSecPos = nPos + nSpace; const sal_uInt16 nRest = mnPoints - nPos; Point* pNewAry = reinterpret_cast<Point*>(new char[ static_cast<std::size_t>(nNewSize) * sizeof(Point) ]); memcpy( pNewAry, mpPointAry, nPos * sizeof( Point ) ); if( pInitPoly ) memcpy( pNewAry + nPos, pInitPoly->mpPointAry, nSpaceSize ); else memset( pNewAry + nPos, 0, nSpaceSize ); memcpy( pNewAry + nSecPos, mpPointAry + nPos, nRest * sizeof( Point ) ); delete[] reinterpret_cast<char*>(mpPointAry); // consider FlagArray if( mpFlagAry ) { PolyFlags* pNewFlagAry = new PolyFlags[ nNewSize ]; memcpy( pNewFlagAry, mpFlagAry, nPos ); if( pInitPoly && pInitPoly->mpFlagAry ) memcpy( pNewFlagAry + nPos, pInitPoly->mpFlagAry, nSpace ); else memset( pNewFlagAry + nPos, 0, nSpace ); memcpy( pNewFlagAry + nSecPos, mpFlagAry + nPos, nRest ); delete[] mpFlagAry; mpFlagAry = pNewFlagAry; } mpPointAry = pNewAry; mnPoints = nNewSize; } }
0
CVE-2017-7870
551
benign
CWE-787
bool ImplPolygon::ImplSplit( sal_uInt16 nPos, sal_uInt16 nSpace, ImplPolygon const * pInitPoly ) { //Can't fit this in :-(, throw ? if (mnPoints + nSpace > USHRT_MAX) { SAL_WARN("tools", "Polygon needs " << mnPoints + nSpace << " points, but only " << USHRT_MAX << " possible"); return false; } const sal_uInt16 nNewSize = mnPoints + nSpace; const std::size_t nSpaceSize = static_cast<std::size_t>(nSpace) * sizeof(Point); if( nPos >= mnPoints ) { // Append at the back nPos = mnPoints; ImplSetSize( nNewSize ); if( pInitPoly ) { memcpy( mpPointAry + nPos, pInitPoly->mpPointAry, nSpaceSize ); if( pInitPoly->mpFlagAry ) memcpy( mpFlagAry + nPos, pInitPoly->mpFlagAry, nSpace ); } } else { const sal_uInt16 nSecPos = nPos + nSpace; const sal_uInt16 nRest = mnPoints - nPos; Point* pNewAry = reinterpret_cast<Point*>(new char[ static_cast<std::size_t>(nNewSize) * sizeof(Point) ]); memcpy( pNewAry, mpPointAry, nPos * sizeof( Point ) ); if( pInitPoly ) memcpy( pNewAry + nPos, pInitPoly->mpPointAry, nSpaceSize ); else memset( pNewAry + nPos, 0, nSpaceSize ); memcpy( pNewAry + nSecPos, mpPointAry + nPos, nRest * sizeof( Point ) ); delete[] reinterpret_cast<char*>(mpPointAry); // consider FlagArray if( mpFlagAry ) { PolyFlags* pNewFlagAry = new PolyFlags[ nNewSize ]; memcpy( pNewFlagAry, mpFlagAry, nPos ); if( pInitPoly && pInitPoly->mpFlagAry ) memcpy( pNewFlagAry + nPos, pInitPoly->mpFlagAry, nSpace ); else memset( pNewFlagAry + nPos, 0, nSpace ); memcpy( pNewFlagAry + nSecPos, mpFlagAry + nPos, nRest ); delete[] mpFlagAry; mpFlagAry = pNewFlagAry; } mpPointAry = pNewAry; mnPoints = nNewSize; } return true; }
1
CVE-2017-7870
551
vulnerable
CWE-125
void nego_process_negotiation_failure(rdpNego* nego, wStream* s) { BYTE flags; UINT16 length; UINT32 failureCode; WLog_DBG(TAG, "RDP_NEG_FAILURE"); Stream_Read_UINT8(s, flags); Stream_Read_UINT16(s, length); Stream_Read_UINT32(s, failureCode); switch (failureCode) { case SSL_REQUIRED_BY_SERVER: WLog_WARN(TAG, "Error: SSL_REQUIRED_BY_SERVER"); break; case SSL_NOT_ALLOWED_BY_SERVER: WLog_WARN(TAG, "Error: SSL_NOT_ALLOWED_BY_SERVER"); nego->sendNegoData = TRUE; break; case SSL_CERT_NOT_ON_SERVER: WLog_ERR(TAG, "Error: SSL_CERT_NOT_ON_SERVER"); nego->sendNegoData = TRUE; break; case INCONSISTENT_FLAGS: WLog_ERR(TAG, "Error: INCONSISTENT_FLAGS"); break; case HYBRID_REQUIRED_BY_SERVER: WLog_WARN(TAG, "Error: HYBRID_REQUIRED_BY_SERVER"); break; default: WLog_ERR(TAG, "Error: Unknown protocol security error %" PRIu32 "", failureCode); break; } nego->state = NEGO_STATE_FAIL; }
0
CVE-2020-11089
1,147
benign
CWE-125
BOOL nego_process_negotiation_failure(rdpNego* nego, wStream* s) { BYTE flags; UINT16 length; UINT32 failureCode; WLog_DBG(TAG, "RDP_NEG_FAILURE"); if (Stream_GetRemainingLength(s) < 7) return FALSE; Stream_Read_UINT8(s, flags); Stream_Read_UINT16(s, length); Stream_Read_UINT32(s, failureCode); switch (failureCode) { case SSL_REQUIRED_BY_SERVER: WLog_WARN(TAG, "Error: SSL_REQUIRED_BY_SERVER"); break; case SSL_NOT_ALLOWED_BY_SERVER: WLog_WARN(TAG, "Error: SSL_NOT_ALLOWED_BY_SERVER"); nego->sendNegoData = TRUE; break; case SSL_CERT_NOT_ON_SERVER: WLog_ERR(TAG, "Error: SSL_CERT_NOT_ON_SERVER"); nego->sendNegoData = TRUE; break; case INCONSISTENT_FLAGS: WLog_ERR(TAG, "Error: INCONSISTENT_FLAGS"); break; case HYBRID_REQUIRED_BY_SERVER: WLog_WARN(TAG, "Error: HYBRID_REQUIRED_BY_SERVER"); break; default: WLog_ERR(TAG, "Error: Unknown protocol security error %" PRIu32 "", failureCode); break; } nego->state = NEGO_STATE_FAIL; return TRUE; }
1
CVE-2020-11089
1,147
vulnerable
CWE-125
static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) { struct trace_array *tr = data; struct ftrace_event_file *ftrace_file; struct syscall_trace_enter *entry; struct syscall_metadata *sys_data; struct ring_buffer_event *event; struct ring_buffer *buffer; unsigned long irq_flags; int pc; int syscall_nr; int size; syscall_nr = trace_get_syscall_nr(current, regs); if (syscall_nr < 0) return; /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */ ftrace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]); if (!ftrace_file) return; if (ftrace_trigger_soft_disabled(ftrace_file)) return; sys_data = syscall_nr_to_meta(syscall_nr); if (!sys_data) return; size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; local_save_flags(irq_flags); pc = preempt_count(); buffer = tr->trace_buffer.buffer; event = trace_buffer_lock_reserve(buffer, sys_data->enter_event->event.type, size, irq_flags, pc); if (!event) return; entry = ring_buffer_event_data(event); entry->nr = syscall_nr; syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); event_trigger_unlock_commit(ftrace_file, buffer, event, entry, irq_flags, pc); }
0
CVE-2014-7825
1,122
benign
CWE-125
static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) { struct trace_array *tr = data; struct ftrace_event_file *ftrace_file; struct syscall_trace_enter *entry; struct syscall_metadata *sys_data; struct ring_buffer_event *event; struct ring_buffer *buffer; unsigned long irq_flags; int pc; int syscall_nr; int size; syscall_nr = trace_get_syscall_nr(current, regs); if (syscall_nr < 0 || syscall_nr >= NR_syscalls) return; /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */ ftrace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]); if (!ftrace_file) return; if (ftrace_trigger_soft_disabled(ftrace_file)) return; sys_data = syscall_nr_to_meta(syscall_nr); if (!sys_data) return; size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; local_save_flags(irq_flags); pc = preempt_count(); buffer = tr->trace_buffer.buffer; event = trace_buffer_lock_reserve(buffer, sys_data->enter_event->event.type, size, irq_flags, pc); if (!event) return; entry = ring_buffer_event_data(event); entry->nr = syscall_nr; syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); event_trigger_unlock_commit(ftrace_file, buffer, event, entry, irq_flags, pc); }
1
CVE-2014-7825
1,122
vulnerable
CWE-190
static u64 __skb_get_nlattr_nest(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) { struct sk_buff *skb = (struct sk_buff *)(long) ctx; struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (A > skb->len - sizeof(struct nlattr)) return 0; nla = (struct nlattr *) &skb->data[A]; if (nla->nla_len > A - skb->len) return 0; nla = nla_find_nested(nla, X); if (nla) return (void *) nla - (void *) skb->data; return 0; }
0
CVE-2014-3144
266
benign
CWE-190
static u64 __skb_get_nlattr_nest(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) { struct sk_buff *skb = (struct sk_buff *)(long) ctx; struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (skb->len < sizeof(struct nlattr)) return 0; if (A > skb->len - sizeof(struct nlattr)) return 0; nla = (struct nlattr *) &skb->data[A]; if (nla->nla_len > skb->len - A) return 0; nla = nla_find_nested(nla, X); if (nla) return (void *) nla - (void *) skb->data; return 0; }
1
CVE-2014-3144
266
vulnerable
CWE-787
int main(int argc, char **argv) { int i, n_valid, do_write, do_scrub; char *c, *dname, *name; DIR *dir; FILE *fp; pdf_t *pdf; pdf_flag_t flags; if (argc < 2) usage(); /* Args */ do_write = do_scrub = flags = 0; name = NULL; for (i=1; i<argc; i++) { if (strncmp(argv[i], "-w", 2) == 0) do_write = 1; else if (strncmp(argv[i], "-i", 2) == 0) flags |= PDF_FLAG_DISP_CREATOR; else if (strncmp(argv[i], "-q", 2) == 0) flags |= PDF_FLAG_QUIET; else if (strncmp(argv[i], "-s", 2) == 0) do_scrub = 1; else if (argv[i][0] != '-') name = argv[i]; else if (argv[i][0] == '-') usage(); } if (!name) usage(); if (!(fp = fopen(name, "r"))) { ERR("Could not open file '%s'\n", argv[1]); return -1; } else if (!pdf_is_pdf(fp)) { ERR("'%s' specified is not a valid PDF\n", name); fclose(fp); return -1; } /* Load PDF */ if (!(pdf = init_pdf(fp, name))) { fclose(fp); return -1; } /* Count valid xrefs */ for (i=0, n_valid=0; i<pdf->n_xrefs; i++) if (pdf->xrefs[i].version) ++n_valid; /* Bail if we only have 1 valid */ if (n_valid < 2) { if (!(flags & (PDF_FLAG_QUIET | PDF_FLAG_DISP_CREATOR))) printf("%s: There is only one version of this PDF\n", pdf->name); if (do_write) { fclose(fp); pdf_delete(pdf); return 0; } } dname = NULL; if (do_write) { /* Create directory to place the various versions in */ if ((c = strrchr(name, '/'))) name = c + 1; if ((c = strrchr(name, '.'))) *c = '\0'; dname = malloc(strlen(name) + 16); sprintf(dname, "%s-versions", name); if (!(dir = opendir(dname))) mkdir(dname, S_IRWXU); else { ERR("This directory already exists, PDF version extraction will " "not occur.\n"); fclose(fp); closedir(dir); free(dname); pdf_delete(pdf); return -1; } /* Write the pdf as a pervious version */ for (i=0; i<pdf->n_xrefs; i++) if (pdf->xrefs[i].version) write_version(fp, name, dname, &pdf->xrefs[i]); } /* Generate a per-object summary */ pdf_summarize(fp, pdf, dname, flags); /* Have we been summoned to scrub history from this PDF */ if (do_scrub) scrub_document(fp, pdf); /* Display extra information */ if (flags & PDF_FLAG_DISP_CREATOR) display_creator(fp, pdf); fclose(fp); free(dname); pdf_delete(pdf); return 0; }
0
CVE-2019-14934
433
benign
CWE-787
int main(int argc, char **argv) { int i, n_valid, do_write, do_scrub; char *c, *dname, *name; DIR *dir; FILE *fp; pdf_t *pdf; pdf_flag_t flags; if (argc < 2) usage(); /* Args */ do_write = do_scrub = flags = 0; name = NULL; for (i=1; i<argc; i++) { if (strncmp(argv[i], "-w", 2) == 0) do_write = 1; else if (strncmp(argv[i], "-i", 2) == 0) flags |= PDF_FLAG_DISP_CREATOR; else if (strncmp(argv[i], "-q", 2) == 0) flags |= PDF_FLAG_QUIET; else if (strncmp(argv[i], "-s", 2) == 0) do_scrub = 1; else if (argv[i][0] != '-') name = argv[i]; else if (argv[i][0] == '-') usage(); } if (!name) usage(); if (!(fp = fopen(name, "r"))) { ERR("Could not open file '%s'\n", argv[1]); return -1; } else if (!pdf_is_pdf(fp)) { ERR("'%s' specified is not a valid PDF\n", name); fclose(fp); return -1; } /* Load PDF */ if (!(pdf = init_pdf(fp, name))) { fclose(fp); return -1; } /* Count valid xrefs */ for (i=0, n_valid=0; i<pdf->n_xrefs; i++) if (pdf->xrefs[i].version) ++n_valid; /* Bail if we only have 1 valid */ if (n_valid < 2) { if (!(flags & (PDF_FLAG_QUIET | PDF_FLAG_DISP_CREATOR))) printf("%s: There is only one version of this PDF\n", pdf->name); if (do_write) { fclose(fp); pdf_delete(pdf); return 0; } } dname = NULL; if (do_write) { /* Create directory to place the various versions in */ if ((c = strrchr(name, '/'))) name = c + 1; if ((c = strrchr(name, '.'))) *c = '\0'; dname = safe_calloc(strlen(name) + 16); sprintf(dname, "%s-versions", name); if (!(dir = opendir(dname))) mkdir(dname, S_IRWXU); else { ERR("This directory already exists, PDF version extraction will " "not occur.\n"); fclose(fp); closedir(dir); free(dname); pdf_delete(pdf); return -1; } /* Write the pdf as a pervious version */ for (i=0; i<pdf->n_xrefs; i++) if (pdf->xrefs[i].version) write_version(fp, name, dname, &pdf->xrefs[i]); } /* Generate a per-object summary */ pdf_summarize(fp, pdf, dname, flags); /* Have we been summoned to scrub history from this PDF */ if (do_scrub) scrub_document(fp, pdf); /* Display extra information */ if (flags & PDF_FLAG_DISP_CREATOR) display_creator(fp, pdf); fclose(fp); free(dname); pdf_delete(pdf); return 0; }
1
CVE-2019-14934
433
vulnerable
CWE-787
grub_ext2_iterate_dir (grub_fshelp_node_t dir, int (*hook) (const char *filename, enum grub_fshelp_filetype filetype, grub_fshelp_node_t node, void *closure), void *closure) { unsigned int fpos = 0; struct grub_fshelp_node *diro = (struct grub_fshelp_node *) dir; if (! diro->inode_read) { grub_ext2_read_inode (diro->data, diro->ino, &diro->inode); if (grub_errno) return 0; } /* Search the file. */ if (hook) while (fpos < grub_le_to_cpu32 (diro->inode.size)) { struct ext2_dirent dirent; grub_ext2_read_file (diro, NULL, NULL, 0, fpos, sizeof (dirent), (char *) &dirent); if (grub_errno) return 0; if (dirent.direntlen == 0) return 0; if (dirent.namelen != 0) { #ifndef _MSC_VER char filename[dirent.namelen + 1]; #else char * filename = grub_malloc (dirent.namelen + 1); #endif struct grub_fshelp_node *fdiro; enum grub_fshelp_filetype type = GRUB_FSHELP_UNKNOWN; grub_ext2_read_file (diro, 0, 0, 0, fpos + sizeof (struct ext2_dirent), dirent.namelen, filename); if (grub_errno) return 0; fdiro = grub_malloc (sizeof (struct grub_fshelp_node)); if (! fdiro) return 0; fdiro->data = diro->data; fdiro->ino = grub_le_to_cpu32 (dirent.inode); filename[dirent.namelen] = '\0'; if (dirent.filetype != FILETYPE_UNKNOWN) { fdiro->inode_read = 0; if (dirent.filetype == FILETYPE_DIRECTORY) type = GRUB_FSHELP_DIR; else if (dirent.filetype == FILETYPE_SYMLINK) type = GRUB_FSHELP_SYMLINK; else if (dirent.filetype == FILETYPE_REG) type = GRUB_FSHELP_REG; } else { /* The filetype can not be read from the dirent, read the inode to get more information. */ grub_ext2_read_inode (diro->data, grub_le_to_cpu32 (dirent.inode), &fdiro->inode); if (grub_errno) { grub_free (fdiro); return 0; } fdiro->inode_read = 1; if ((grub_le_to_cpu16 (fdiro->inode.mode) & FILETYPE_INO_MASK) == FILETYPE_INO_DIRECTORY) type = GRUB_FSHELP_DIR; else if ((grub_le_to_cpu16 (fdiro->inode.mode) & FILETYPE_INO_MASK) == FILETYPE_INO_SYMLINK) type = GRUB_FSHELP_SYMLINK; else if ((grub_le_to_cpu16 (fdiro->inode.mode) & FILETYPE_INO_MASK) == FILETYPE_INO_REG) type = GRUB_FSHELP_REG; } if (hook (filename, type, fdiro, closure)) return 1; } fpos += grub_le_to_cpu16 (dirent.direntlen); } return 0; }
0
CVE-2017-9949
2,158
benign
CWE-787
grub_ext2_iterate_dir (grub_fshelp_node_t dir, int (*hook) (const char *filename, enum grub_fshelp_filetype filetype, grub_fshelp_node_t node, void *closure), void *closure) { unsigned int fpos = 0; struct grub_fshelp_node *diro = (struct grub_fshelp_node *) dir; if (! diro->inode_read) { grub_ext2_read_inode (diro->data, diro->ino, &diro->inode); if (grub_errno) return 0; } /* Search the file. */ if (hook) while (fpos < grub_le_to_cpu32 (diro->inode.size)) { struct ext2_dirent dirent; grub_ext2_read_file (diro, NULL, NULL, 0, fpos, sizeof (dirent), (char *) &dirent); if (grub_errno) return 0; if (dirent.direntlen == 0) return 0; if (dirent.namelen != 0) { char * filename = grub_malloc (dirent.namelen + 1); struct grub_fshelp_node *fdiro; enum grub_fshelp_filetype type = GRUB_FSHELP_UNKNOWN; if (!filename) { break; } grub_ext2_read_file (diro, 0, 0, 0, fpos + sizeof (struct ext2_dirent), dirent.namelen, filename); if (grub_errno) { free (filename); return 0; } fdiro = grub_malloc (sizeof (struct grub_fshelp_node)); if (! fdiro) { free (filename); return 0; } fdiro->data = diro->data; fdiro->ino = grub_le_to_cpu32 (dirent.inode); filename[dirent.namelen] = '\0'; if (dirent.filetype != FILETYPE_UNKNOWN) { fdiro->inode_read = 0; if (dirent.filetype == FILETYPE_DIRECTORY) type = GRUB_FSHELP_DIR; else if (dirent.filetype == FILETYPE_SYMLINK) type = GRUB_FSHELP_SYMLINK; else if (dirent.filetype == FILETYPE_REG) type = GRUB_FSHELP_REG; } else { /* The filetype can not be read from the dirent, read the inode to get more information. */ grub_ext2_read_inode (diro->data, grub_le_to_cpu32 (dirent.inode), &fdiro->inode); if (grub_errno) { free (filename); grub_free (fdiro); return 0; } fdiro->inode_read = 1; if ((grub_le_to_cpu16 (fdiro->inode.mode) & FILETYPE_INO_MASK) == FILETYPE_INO_DIRECTORY) type = GRUB_FSHELP_DIR; else if ((grub_le_to_cpu16 (fdiro->inode.mode) & FILETYPE_INO_MASK) == FILETYPE_INO_SYMLINK) type = GRUB_FSHELP_SYMLINK; else if ((grub_le_to_cpu16 (fdiro->inode.mode) & FILETYPE_INO_MASK) == FILETYPE_INO_REG) type = GRUB_FSHELP_REG; } if (hook (filename, type, fdiro, closure)) { free (filename); return 1; } free (filename); } fpos += grub_le_to_cpu16 (dirent.direntlen); } return 0; }
1
CVE-2017-9949
2,158
vulnerable
CWE-125
ast_for_expr_stmt(struct compiling *c, const node *n) { REQ(n, expr_stmt); /* expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | ('=' (yield_expr|testlist_star_expr))*) annassign: ':' test ['=' test] testlist_star_expr: (test|star_expr) (',' test|star_expr)* [','] augassign: '+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | '<<=' | '>>=' | '**=' | '//=' test: ... here starts the operator precedence dance */ if (NCH(n) == 1) { expr_ty e = ast_for_testlist(c, CHILD(n, 0)); if (!e) return NULL; return Expr(e, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } else if (TYPE(CHILD(n, 1)) == augassign) { expr_ty expr1, expr2; operator_ty newoperator; node *ch = CHILD(n, 0); expr1 = ast_for_testlist(c, ch); if (!expr1) return NULL; if(!set_context(c, expr1, Store, ch)) return NULL; /* set_context checks that most expressions are not the left side. Augmented assignments can only have a name, a subscript, or an attribute on the left, though, so we have to explicitly check for those. */ switch (expr1->kind) { case Name_kind: case Attribute_kind: case Subscript_kind: break; default: ast_error(c, ch, "illegal expression for augmented assignment"); return NULL; } ch = CHILD(n, 2); if (TYPE(ch) == testlist) expr2 = ast_for_testlist(c, ch); else expr2 = ast_for_expr(c, ch); if (!expr2) return NULL; newoperator = ast_for_augassign(c, CHILD(n, 1)); if (!newoperator) return NULL; return AugAssign(expr1, newoperator, expr2, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } else if (TYPE(CHILD(n, 1)) == annassign) { expr_ty expr1, expr2, expr3; node *ch = CHILD(n, 0); node *deep, *ann = CHILD(n, 1); int simple = 1; /* we keep track of parens to qualify (x) as expression not name */ deep = ch; while (NCH(deep) == 1) { deep = CHILD(deep, 0); } if (NCH(deep) > 0 && TYPE(CHILD(deep, 0)) == LPAR) { simple = 0; } expr1 = ast_for_testlist(c, ch); if (!expr1) { return NULL; } switch (expr1->kind) { case Name_kind: if (forbidden_name(c, expr1->v.Name.id, n, 0)) { return NULL; } expr1->v.Name.ctx = Store; break; case Attribute_kind: if (forbidden_name(c, expr1->v.Attribute.attr, n, 1)) { return NULL; } expr1->v.Attribute.ctx = Store; break; case Subscript_kind: expr1->v.Subscript.ctx = Store; break; case List_kind: ast_error(c, ch, "only single target (not list) can be annotated"); return NULL; case Tuple_kind: ast_error(c, ch, "only single target (not tuple) can be annotated"); return NULL; default: ast_error(c, ch, "illegal target for annotation"); return NULL; } if (expr1->kind != Name_kind) { simple = 0; } ch = CHILD(ann, 1); expr2 = ast_for_expr(c, ch); if (!expr2) { return NULL; } if (NCH(ann) == 2) { return AnnAssign(expr1, expr2, NULL, simple, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } else { ch = CHILD(ann, 3); if (TYPE(ch) == testlist) { expr3 = ast_for_testlist(c, ch); } else { expr3 = ast_for_expr(c, ch); } if (!expr3) { return NULL; } return AnnAssign(expr1, expr2, expr3, simple, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } } else { int i; asdl_seq *targets; node *value; expr_ty expression; /* a normal assignment */ REQ(CHILD(n, 1), EQUAL); targets = _Py_asdl_seq_new(NCH(n) / 2, c->c_arena); if (!targets) return NULL; for (i = 0; i < NCH(n) - 2; i += 2) { expr_ty e; node *ch = CHILD(n, i); if (TYPE(ch) == yield_expr) { ast_error(c, ch, "assignment to yield expression not possible"); return NULL; } e = ast_for_testlist(c, ch); if (!e) return NULL; /* set context to assign */ if (!set_context(c, e, Store, CHILD(n, i))) return NULL; asdl_seq_SET(targets, i / 2, e); } value = CHILD(n, NCH(n) - 1); if (TYPE(value) == testlist_star_expr) expression = ast_for_testlist(c, value); else expression = ast_for_expr(c, value); if (!expression) return NULL; return Assign(targets, expression, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } }
0
CVE-2019-19274
1,801
benign
CWE-125
ast_for_expr_stmt(struct compiling *c, const node *n) { REQ(n, expr_stmt); /* expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | [('=' (yield_expr|testlist_star_expr))+ [TYPE_COMMENT]] ) annassign: ':' test ['=' (yield_expr|testlist)] testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [','] augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | '<<=' | '>>=' | '**=' | '//=') test: ... here starts the operator precedence dance */ int num = NCH(n); if (num == 1) { expr_ty e = ast_for_testlist(c, CHILD(n, 0)); if (!e) return NULL; return Expr(e, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } else if (TYPE(CHILD(n, 1)) == augassign) { expr_ty expr1, expr2; operator_ty newoperator; node *ch = CHILD(n, 0); expr1 = ast_for_testlist(c, ch); if (!expr1) return NULL; if(!set_context(c, expr1, Store, ch)) return NULL; /* set_context checks that most expressions are not the left side. Augmented assignments can only have a name, a subscript, or an attribute on the left, though, so we have to explicitly check for those. */ switch (expr1->kind) { case Name_kind: case Attribute_kind: case Subscript_kind: break; default: ast_error(c, ch, "illegal expression for augmented assignment"); return NULL; } ch = CHILD(n, 2); if (TYPE(ch) == testlist) expr2 = ast_for_testlist(c, ch); else expr2 = ast_for_expr(c, ch); if (!expr2) return NULL; newoperator = ast_for_augassign(c, CHILD(n, 1)); if (!newoperator) return NULL; return AugAssign(expr1, newoperator, expr2, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } else if (TYPE(CHILD(n, 1)) == annassign) { expr_ty expr1, expr2, expr3; node *ch = CHILD(n, 0); node *deep, *ann = CHILD(n, 1); int simple = 1; /* we keep track of parens to qualify (x) as expression not name */ deep = ch; while (NCH(deep) == 1) { deep = CHILD(deep, 0); } if (NCH(deep) > 0 && TYPE(CHILD(deep, 0)) == LPAR) { simple = 0; } expr1 = ast_for_testlist(c, ch); if (!expr1) { return NULL; } switch (expr1->kind) { case Name_kind: if (forbidden_name(c, expr1->v.Name.id, n, 0)) { return NULL; } expr1->v.Name.ctx = Store; break; case Attribute_kind: if (forbidden_name(c, expr1->v.Attribute.attr, n, 1)) { return NULL; } expr1->v.Attribute.ctx = Store; break; case Subscript_kind: expr1->v.Subscript.ctx = Store; break; case List_kind: ast_error(c, ch, "only single target (not list) can be annotated"); return NULL; case Tuple_kind: ast_error(c, ch, "only single target (not tuple) can be annotated"); return NULL; default: ast_error(c, ch, "illegal target for annotation"); return NULL; } if (expr1->kind != Name_kind) { simple = 0; } ch = CHILD(ann, 1); expr2 = ast_for_expr(c, ch); if (!expr2) { return NULL; } if (NCH(ann) == 2) { return AnnAssign(expr1, expr2, NULL, simple, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } else { ch = CHILD(ann, 3); if (TYPE(ch) == testlist) { expr3 = ast_for_testlist(c, ch); } else { expr3 = ast_for_expr(c, ch); } if (!expr3) { return NULL; } return AnnAssign(expr1, expr2, expr3, simple, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } } else { int i, nch_minus_type, has_type_comment; asdl_seq *targets; node *value; expr_ty expression; string type_comment; /* a normal assignment */ REQ(CHILD(n, 1), EQUAL); has_type_comment = TYPE(CHILD(n, num - 1)) == TYPE_COMMENT; nch_minus_type = num - has_type_comment; targets = _Py_asdl_seq_new(nch_minus_type / 2, c->c_arena); if (!targets) return NULL; for (i = 0; i < nch_minus_type - 2; i += 2) { expr_ty e; node *ch = CHILD(n, i); if (TYPE(ch) == yield_expr) { ast_error(c, ch, "assignment to yield expression not possible"); return NULL; } e = ast_for_testlist(c, ch); if (!e) return NULL; /* set context to assign */ if (!set_context(c, e, Store, CHILD(n, i))) return NULL; asdl_seq_SET(targets, i / 2, e); } value = CHILD(n, nch_minus_type - 1); if (TYPE(value) == testlist_star_expr) expression = ast_for_testlist(c, value); else expression = ast_for_expr(c, value); if (!expression) return NULL; if (has_type_comment) { type_comment = NEW_TYPE_COMMENT(CHILD(n, nch_minus_type)); if (!type_comment) return NULL; } else type_comment = NULL; return Assign(targets, expression, type_comment, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } }
1
CVE-2019-19274
1,801
vulnerable
CWE-787
static void libxsmm_sparse_csr_reader( const char* i_csr_file_in, unsigned int** o_row_idx, unsigned int** o_column_idx, REALTYPE** o_values, unsigned int* o_row_count, unsigned int* o_column_count, unsigned int* o_element_count ) { FILE *l_csr_file_handle; const unsigned int l_line_length = 512; char l_line[512/*l_line_length*/+1]; unsigned int l_header_read = 0; unsigned int* l_row_idx_id = NULL; unsigned int l_i = 0; l_csr_file_handle = fopen( i_csr_file_in, "r" ); if ( l_csr_file_handle == NULL ) { fprintf( stderr, "cannot open CSR file!\n" ); return; } while (fgets(l_line, l_line_length, l_csr_file_handle) != NULL) { if ( strlen(l_line) == l_line_length ) { fprintf( stderr, "could not read file length!\n" ); return; } /* check if we are still reading comments header */ if ( l_line[0] == '%' ) { continue; } else { /* if we are the first line after comment header, we allocate our data structures */ if ( l_header_read == 0 ) { if ( sscanf(l_line, "%u %u %u", o_row_count, o_column_count, o_element_count) == 3 ) { /* allocate CSC datastructure matching mtx file */ *o_column_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_element_count)); *o_row_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_row_count + 1)); *o_values = (REALTYPE*) malloc(sizeof(double) * (*o_element_count)); l_row_idx_id = (unsigned int*) malloc(sizeof(unsigned int) * (*o_row_count)); /* check if mallocs were successful */ if ( ( *o_row_idx == NULL ) || ( *o_column_idx == NULL ) || ( *o_values == NULL ) || ( l_row_idx_id == NULL ) ) { fprintf( stderr, "could not allocate sp data!\n" ); return; } /* set everything to zero for init */ memset(*o_row_idx, 0, sizeof(unsigned int)*(*o_row_count + 1)); memset(*o_column_idx, 0, sizeof(unsigned int)*(*o_element_count)); memset(*o_values, 0, sizeof(double)*(*o_element_count)); memset(l_row_idx_id, 0, sizeof(unsigned int)*(*o_row_count)); /* init column idx */ for ( l_i = 0; l_i < (*o_row_count + 1); l_i++) (*o_row_idx)[l_i] = (*o_element_count); /* init */ (*o_row_idx)[0] = 0; l_i = 0; l_header_read = 1; } else { fprintf( stderr, "could not csr description!\n" ); return; } /* now we read the actual content */ } else { unsigned int l_row, l_column; REALTYPE l_value; /* read a line of content */ #if defined(__EDGE_EXECUTE_F32__) if ( sscanf(l_line, "%u %u %f", &l_row, &l_column, &l_value) != 3 ) { fprintf( stderr, "could not read element!\n" ); return; } #else if ( sscanf(l_line, "%u %u %lf", &l_row, &l_column, &l_value) != 3 ) { fprintf( stderr, "could not read element!\n" ); return; } #endif /* adjust numbers to zero termination */ l_row--; l_column--; /* add these values to row and value structure */ (*o_column_idx)[l_i] = l_column; (*o_values)[l_i] = l_value; l_i++; /* handle columns, set id to own for this column, yeah we need to handle empty columns */ l_row_idx_id[l_row] = 1; (*o_row_idx)[l_row+1] = l_i; } } } /* close mtx file */ fclose( l_csr_file_handle ); /* check if we read a file which was consistent */ if ( l_i != (*o_element_count) ) { fprintf( stderr, "we were not able to read all elements!\n" ); return; } /* let's handle empty rows */ for ( l_i = 0; l_i < (*o_row_count); l_i++) { if ( l_row_idx_id[l_i] == 0 ) { (*o_row_idx)[l_i+1] = (*o_row_idx)[l_i]; } } /* free helper data structure */ if ( l_row_idx_id != NULL ) { free( l_row_idx_id ); } }
0
CVE-2018-20541
226
benign
CWE-787
static void libxsmm_sparse_csr_reader( const char* i_csr_file_in, unsigned int** o_row_idx, unsigned int** o_column_idx, REALTYPE** o_values, unsigned int* o_row_count, unsigned int* o_column_count, unsigned int* o_element_count ) { FILE *l_csr_file_handle; const unsigned int l_line_length = 512; char l_line[512/*l_line_length*/+1]; unsigned int l_header_read = 0; unsigned int* l_row_idx_id = NULL; unsigned int l_i = 0; l_csr_file_handle = fopen( i_csr_file_in, "r" ); if ( l_csr_file_handle == NULL ) { fprintf( stderr, "cannot open CSR file!\n" ); return; } while (fgets(l_line, l_line_length, l_csr_file_handle) != NULL) { if ( strlen(l_line) == l_line_length ) { fprintf( stderr, "could not read file length!\n" ); return; } /* check if we are still reading comments header */ if ( l_line[0] == '%' ) { continue; } else { /* if we are the first line after comment header, we allocate our data structures */ if ( l_header_read == 0 ) { if (3 == sscanf(l_line, "%u %u %u", o_row_count, o_column_count, o_element_count) && 0 != *o_row_count && 0 != *o_column_count && 0 != *o_element_count) { /* allocate CSC datastructure matching mtx file */ *o_column_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_element_count)); *o_row_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_row_count + 1)); *o_values = (REALTYPE*) malloc(sizeof(double) * (*o_element_count)); l_row_idx_id = (unsigned int*) malloc(sizeof(unsigned int) * (*o_row_count)); /* check if mallocs were successful */ if ( ( *o_row_idx == NULL ) || ( *o_column_idx == NULL ) || ( *o_values == NULL ) || ( l_row_idx_id == NULL ) ) { fprintf( stderr, "could not allocate sp data!\n" ); return; } /* set everything to zero for init */ memset(*o_row_idx, 0, sizeof(unsigned int)*(*o_row_count + 1)); memset(*o_column_idx, 0, sizeof(unsigned int)*(*o_element_count)); memset(*o_values, 0, sizeof(double)*(*o_element_count)); memset(l_row_idx_id, 0, sizeof(unsigned int)*(*o_row_count)); /* init column idx */ for ( l_i = 0; l_i < (*o_row_count + 1); l_i++) (*o_row_idx)[l_i] = (*o_element_count); /* init */ (*o_row_idx)[0] = 0; l_i = 0; l_header_read = 1; } else { fprintf( stderr, "could not csr description!\n" ); return; } /* now we read the actual content */ } else { unsigned int l_row, l_column; REALTYPE l_value; /* read a line of content */ #if defined(__EDGE_EXECUTE_F32__) if ( sscanf(l_line, "%u %u %f", &l_row, &l_column, &l_value) != 3 ) { fprintf( stderr, "could not read element!\n" ); return; } #else if ( sscanf(l_line, "%u %u %lf", &l_row, &l_column, &l_value) != 3 ) { fprintf( stderr, "could not read element!\n" ); return; } #endif /* adjust numbers to zero termination */ l_row--; l_column--; /* add these values to row and value structure */ (*o_column_idx)[l_i] = l_column; (*o_values)[l_i] = l_value; l_i++; /* handle columns, set id to own for this column, yeah we need to handle empty columns */ l_row_idx_id[l_row] = 1; (*o_row_idx)[l_row+1] = l_i; } } } /* close mtx file */ fclose( l_csr_file_handle ); /* check if we read a file which was consistent */ if ( l_i != (*o_element_count) ) { fprintf( stderr, "we were not able to read all elements!\n" ); return; } /* let's handle empty rows */ for ( l_i = 0; l_i < (*o_row_count); l_i++) { if ( l_row_idx_id[l_i] == 0 ) { (*o_row_idx)[l_i+1] = (*o_row_idx)[l_i]; } } /* free helper data structure */ if ( l_row_idx_id != NULL ) { free( l_row_idx_id ); } }
1
CVE-2018-20541
226
vulnerable
CWE-119
static int cdxl_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *pkt) { CDXLVideoContext *c = avctx->priv_data; AVFrame * const p = data; int ret, w, h, encoding, aligned_width, buf_size = pkt->size; const uint8_t *buf = pkt->data; if (buf_size < 32) return AVERROR_INVALIDDATA; encoding = buf[1] & 7; c->format = buf[1] & 0xE0; w = AV_RB16(&buf[14]); h = AV_RB16(&buf[16]); c->bpp = buf[19]; c->palette_size = AV_RB16(&buf[20]); c->palette = buf + 32; c->video = c->palette + c->palette_size; c->video_size = buf_size - c->palette_size - 32; if (c->palette_size > 512) return AVERROR_INVALIDDATA; if (buf_size < c->palette_size + 32) return AVERROR_INVALIDDATA; if (c->bpp < 1) return AVERROR_INVALIDDATA; if (c->format != BIT_PLANAR && c->format != BIT_LINE && c->format != CHUNKY) { avpriv_request_sample(avctx, "Pixel format 0x%0x", c->format); return AVERROR_PATCHWELCOME; } if ((ret = ff_set_dimensions(avctx, w, h)) < 0) return ret; if (c->format == CHUNKY) aligned_width = avctx->width; else aligned_width = FFALIGN(c->avctx->width, 16); c->padded_bits = aligned_width - c->avctx->width; if (c->video_size < aligned_width * avctx->height * (int64_t)c->bpp / 8) return AVERROR_INVALIDDATA; if (!encoding && c->palette_size && c->bpp <= 8) { avctx->pix_fmt = AV_PIX_FMT_PAL8; } else if (encoding == 1 && (c->bpp == 6 || c->bpp == 8)) { if (c->palette_size != (1 << (c->bpp - 1))) return AVERROR_INVALIDDATA; avctx->pix_fmt = AV_PIX_FMT_BGR24; } else if (!encoding && c->bpp == 24 && c->format == CHUNKY && !c->palette_size) { avctx->pix_fmt = AV_PIX_FMT_RGB24; } else { avpriv_request_sample(avctx, "Encoding %d, bpp %d and format 0x%x", encoding, c->bpp, c->format); return AVERROR_PATCHWELCOME; } if ((ret = ff_get_buffer(avctx, p, 0)) < 0) return ret; p->pict_type = AV_PICTURE_TYPE_I; if (encoding) { av_fast_padded_malloc(&c->new_video, &c->new_video_size, h * w + AV_INPUT_BUFFER_PADDING_SIZE); if (!c->new_video) return AVERROR(ENOMEM); if (c->bpp == 8) cdxl_decode_ham8(c, p); else cdxl_decode_ham6(c, p); } else if (avctx->pix_fmt == AV_PIX_FMT_PAL8) { cdxl_decode_rgb(c, p); } else { cdxl_decode_raw(c, p); } *got_frame = 1; return buf_size; }
0
CVE-2017-9996
2,094
benign
CWE-119
static int cdxl_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *pkt) { CDXLVideoContext *c = avctx->priv_data; AVFrame * const p = data; int ret, w, h, encoding, aligned_width, buf_size = pkt->size; const uint8_t *buf = pkt->data; if (buf_size < 32) return AVERROR_INVALIDDATA; encoding = buf[1] & 7; c->format = buf[1] & 0xE0; w = AV_RB16(&buf[14]); h = AV_RB16(&buf[16]); c->bpp = buf[19]; c->palette_size = AV_RB16(&buf[20]); c->palette = buf + 32; c->video = c->palette + c->palette_size; c->video_size = buf_size - c->palette_size - 32; if (c->palette_size > 512) return AVERROR_INVALIDDATA; if (buf_size < c->palette_size + 32) return AVERROR_INVALIDDATA; if (c->bpp < 1) return AVERROR_INVALIDDATA; if (c->format != BIT_PLANAR && c->format != BIT_LINE && c->format != CHUNKY) { avpriv_request_sample(avctx, "Pixel format 0x%0x", c->format); return AVERROR_PATCHWELCOME; } if ((ret = ff_set_dimensions(avctx, w, h)) < 0) return ret; if (c->format == CHUNKY) aligned_width = avctx->width; else aligned_width = FFALIGN(c->avctx->width, 16); c->padded_bits = aligned_width - c->avctx->width; if (c->video_size < aligned_width * avctx->height * (int64_t)c->bpp / 8) return AVERROR_INVALIDDATA; if (!encoding && c->palette_size && c->bpp <= 8 && c->format != CHUNKY) { avctx->pix_fmt = AV_PIX_FMT_PAL8; } else if (encoding == 1 && (c->bpp == 6 || c->bpp == 8)) { if (c->palette_size != (1 << (c->bpp - 1))) return AVERROR_INVALIDDATA; avctx->pix_fmt = AV_PIX_FMT_BGR24; } else if (!encoding && c->bpp == 24 && c->format == CHUNKY && !c->palette_size) { avctx->pix_fmt = AV_PIX_FMT_RGB24; } else { avpriv_request_sample(avctx, "Encoding %d, bpp %d and format 0x%x", encoding, c->bpp, c->format); return AVERROR_PATCHWELCOME; } if ((ret = ff_get_buffer(avctx, p, 0)) < 0) return ret; p->pict_type = AV_PICTURE_TYPE_I; if (encoding) { av_fast_padded_malloc(&c->new_video, &c->new_video_size, h * w + AV_INPUT_BUFFER_PADDING_SIZE); if (!c->new_video) return AVERROR(ENOMEM); if (c->bpp == 8) cdxl_decode_ham8(c, p); else cdxl_decode_ham6(c, p); } else if (avctx->pix_fmt == AV_PIX_FMT_PAL8) { cdxl_decode_rgb(c, p); } else { cdxl_decode_raw(c, p); } *got_frame = 1; return buf_size; }
1
CVE-2017-9996
2,094
vulnerable
CWE-362
static int cipso_v4_delopt(struct ip_options **opt_ptr) { int hdr_delta = 0; struct ip_options *opt = *opt_ptr; if (opt->srr || opt->rr || opt->ts || opt->router_alert) { u8 cipso_len; u8 cipso_off; unsigned char *cipso_ptr; int iter; int optlen_new; cipso_off = opt->cipso - sizeof(struct iphdr); cipso_ptr = &opt->__data[cipso_off]; cipso_len = cipso_ptr[1]; if (opt->srr > opt->cipso) opt->srr -= cipso_len; if (opt->rr > opt->cipso) opt->rr -= cipso_len; if (opt->ts > opt->cipso) opt->ts -= cipso_len; if (opt->router_alert > opt->cipso) opt->router_alert -= cipso_len; opt->cipso = 0; memmove(cipso_ptr, cipso_ptr + cipso_len, opt->optlen - cipso_off - cipso_len); /* determining the new total option length is tricky because of * the padding necessary, the only thing i can think to do at * this point is walk the options one-by-one, skipping the * padding at the end to determine the actual option size and * from there we can determine the new total option length */ iter = 0; optlen_new = 0; while (iter < opt->optlen) if (opt->__data[iter] != IPOPT_NOP) { iter += opt->__data[iter + 1]; optlen_new = iter; } else iter++; hdr_delta = opt->optlen; opt->optlen = (optlen_new + 3) & ~3; hdr_delta -= opt->optlen; } else { /* only the cipso option was present on the socket so we can * remove the entire option struct */ *opt_ptr = NULL; hdr_delta = opt->optlen; kfree(opt); } return hdr_delta; }
0
CVE-2012-3552
3,022
benign
CWE-362
static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr) { int hdr_delta = 0; struct ip_options_rcu *opt = *opt_ptr; if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) { u8 cipso_len; u8 cipso_off; unsigned char *cipso_ptr; int iter; int optlen_new; cipso_off = opt->opt.cipso - sizeof(struct iphdr); cipso_ptr = &opt->opt.__data[cipso_off]; cipso_len = cipso_ptr[1]; if (opt->opt.srr > opt->opt.cipso) opt->opt.srr -= cipso_len; if (opt->opt.rr > opt->opt.cipso) opt->opt.rr -= cipso_len; if (opt->opt.ts > opt->opt.cipso) opt->opt.ts -= cipso_len; if (opt->opt.router_alert > opt->opt.cipso) opt->opt.router_alert -= cipso_len; opt->opt.cipso = 0; memmove(cipso_ptr, cipso_ptr + cipso_len, opt->opt.optlen - cipso_off - cipso_len); /* determining the new total option length is tricky because of * the padding necessary, the only thing i can think to do at * this point is walk the options one-by-one, skipping the * padding at the end to determine the actual option size and * from there we can determine the new total option length */ iter = 0; optlen_new = 0; while (iter < opt->opt.optlen) if (opt->opt.__data[iter] != IPOPT_NOP) { iter += opt->opt.__data[iter + 1]; optlen_new = iter; } else iter++; hdr_delta = opt->opt.optlen; opt->opt.optlen = (optlen_new + 3) & ~3; hdr_delta -= opt->opt.optlen; } else { /* only the cipso option was present on the socket so we can * remove the entire option struct */ *opt_ptr = NULL; hdr_delta = opt->opt.optlen; call_rcu(&opt->rcu, opt_kfree_rcu); } return hdr_delta; }
1
CVE-2012-3552
3,022
vulnerable
CWE-20
TEST_F(QuantizedConv2DTest, OddPaddingBatch) { const int stride = 2; TF_ASSERT_OK(NodeDefBuilder("quantized_conv_op", "QuantizedConv2D") .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("out_type", DataTypeToEnum<qint32>::v()) .Attr("strides", {1, stride, stride, 1}) .Attr("padding", "SAME") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const int depth = 1; const int image_width = 4; const int image_height = 4; const int image_batch_count = 3; AddInputFromArray<quint8>( TensorShape({image_batch_count, image_height, image_width, depth}), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}); const int filter_size = 3; const int filter_count = 1; AddInputFromArray<quint8>( TensorShape({filter_size, filter_size, depth, filter_count}), {1, 2, 3, 4, 5, 6, 7, 8, 9}); AddInputFromArray<float>(TensorShape({1}), {0}); AddInputFromArray<float>(TensorShape({1}), {255.0f}); AddInputFromArray<float>(TensorShape({1}), {0}); AddInputFromArray<float>(TensorShape({1}), {255.0f}); TF_ASSERT_OK(RunOpKernel()); const int expected_width = image_width / stride; const int expected_height = (image_height * filter_count) / stride; Tensor expected(DT_QINT32, TensorShape({image_batch_count, expected_height, expected_width, filter_count})); test::FillValues<qint32>(&expected, {348, 252, 274, 175, // 348, 252, 274, 175, // 348, 252, 274, 175}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); }
0
CVE-2022-29201
3,244
benign
CWE-20
TEST_F(QuantizedConv2DTest, OddPaddingBatch) { const int stride = 2; TF_ASSERT_OK(NodeDefBuilder("quantized_conv_op", "QuantizedConv2D") .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("out_type", DataTypeToEnum<qint32>::v()) .Attr("strides", {1, stride, stride, 1}) .Attr("padding", "SAME") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const int depth = 1; const int image_width = 4; const int image_height = 4; const int image_batch_count = 3; AddInputFromArray<quint8>( TensorShape({image_batch_count, image_height, image_width, depth}), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}); const int filter_size = 3; const int filter_count = 1; AddInputFromArray<quint8>( TensorShape({filter_size, filter_size, depth, filter_count}), {1, 2, 3, 4, 5, 6, 7, 8, 9}); AddInputFromArray<float>(TensorShape({}), {0}); AddInputFromArray<float>(TensorShape({}), {255.0f}); AddInputFromArray<float>(TensorShape({}), {0}); AddInputFromArray<float>(TensorShape({}), {255.0f}); TF_ASSERT_OK(RunOpKernel()); const int expected_width = image_width / stride; const int expected_height = (image_height * filter_count) / stride; Tensor expected(DT_QINT32, TensorShape({image_batch_count, expected_height, expected_width, filter_count})); test::FillValues<qint32>(&expected, {348, 252, 274, 175, // 348, 252, 274, 175, // 348, 252, 274, 175}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); }
1
CVE-2022-29201
3,244
vulnerable
CWE-787
inline void skip(int bytes) { while (bytes > 0) { int n = check(1, bytes); ptr += n; bytes -= n; } }
0
CVE-2019-15694
2,350
benign
CWE-787
inline void skip(size_t bytes) { while (bytes > 0) { size_t n = check(1, bytes); ptr += n; bytes -= n; } }
1
CVE-2019-15694
2,350
vulnerable
CWE-476
int PackLinuxElf32::canUnpack() { if (super::canUnpack()) { return true; } if (Elf32_Ehdr::ET_DYN==get_te16(&ehdri.e_type)) { PackLinuxElf32help1(fi); } return false; }
0
CVE-2021-30500
3,212
benign
CWE-476
int PackLinuxElf32::canUnpack() { if (checkEhdr(&ehdri)) { return false; } // FIXME: ET_DYN same as 64-bit canUnpack ?? if (Elf32_Ehdr::ET_DYN==get_te16(&ehdri.e_type)) { PackLinuxElf32help1(fi); } if (super::canUnpack()) { return true; } return false; }
1
CVE-2021-30500
3,212
vulnerable
CWE-416
void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { get_page(buf->page); }
0
CVE-2019-11487
1,691
benign
CWE-416
bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { return try_get_page(buf->page); }
1
CVE-2019-11487
1,691
vulnerable
CWE-119
header_put_le_int (SF_PRIVATE *psf, int x) { if (psf->headindex < SIGNED_SIZEOF (psf->header) - 4) { psf->header [psf->headindex++] = x ; psf->header [psf->headindex++] = (x >> 8) ; psf->header [psf->headindex++] = (x >> 16) ; psf->header [psf->headindex++] = (x >> 24) ; } ; } /* header_put_le_int */
0
CVE-2017-7586
168
benign
CWE-119
header_put_le_int (SF_PRIVATE *psf, int x) { psf->header.ptr [psf->header.indx++] = x ; psf->header.ptr [psf->header.indx++] = (x >> 8) ; psf->header.ptr [psf->header.indx++] = (x >> 16) ; psf->header.ptr [psf->header.indx++] = (x >> 24) ; } /* header_put_le_int */
1
CVE-2017-7586
168
vulnerable
CWE-125
int read_image_tga( gdIOCtx *ctx, oTga *tga ) { int pixel_block_size = (tga->bits / 8); int image_block_size = (tga->width * tga->height) * pixel_block_size; int* decompression_buffer = NULL; unsigned char* conversion_buffer = NULL; int buffer_caret = 0; int bitmap_caret = 0; int i = 0; int encoded_pixels; int rle_size; if(overflow2(tga->width, tga->height)) { return -1; } if(overflow2(tga->width * tga->height, pixel_block_size)) { return -1; } if(overflow2(image_block_size, sizeof(int))) { return -1; } /*! \todo Add more image type support. */ if (tga->imagetype != TGA_TYPE_RGB && tga->imagetype != TGA_TYPE_RGB_RLE) return -1; /*! \brief Allocate memmory for image block * Allocate a chunk of memory for the image block to be passed into. */ tga->bitmap = (int *) gdMalloc(image_block_size * sizeof(int)); if (tga->bitmap == NULL) return -1; switch (tga->imagetype) { case TGA_TYPE_RGB: /*! \brief Read in uncompressed RGB TGA * Chunk load the pixel data from an uncompressed RGB type TGA. */ conversion_buffer = (unsigned char *) gdMalloc(image_block_size * sizeof(unsigned char)); if (conversion_buffer == NULL) { return -1; } if (gdGetBuf(conversion_buffer, image_block_size, ctx) != image_block_size) { gd_error("gd-tga: premature end of image data\n"); gdFree(conversion_buffer); return -1; } while (buffer_caret < image_block_size) { tga->bitmap[buffer_caret] = (int) conversion_buffer[buffer_caret]; buffer_caret++; } gdFree(conversion_buffer); break; case TGA_TYPE_RGB_RLE: /*! \brief Read in RLE compressed RGB TGA * Chunk load the pixel data from an RLE compressed RGB type TGA. */ decompression_buffer = (int*) gdMalloc(image_block_size * sizeof(int)); if (decompression_buffer == NULL) { return -1; } conversion_buffer = (unsigned char *) gdMalloc(image_block_size * sizeof(unsigned char)); if (conversion_buffer == NULL) { gd_error("gd-tga: premature end of image data\n"); gdFree( decompression_buffer ); return -1; } rle_size = gdGetBuf(conversion_buffer, image_block_size, ctx); if (rle_size <= 0) { gdFree(conversion_buffer); gdFree(decompression_buffer); return -1; } buffer_caret = 0; while( buffer_caret < rle_size) { decompression_buffer[buffer_caret] = (int)conversion_buffer[buffer_caret]; buffer_caret++; } buffer_caret = 0; while( bitmap_caret < image_block_size ) { if ((decompression_buffer[buffer_caret] & TGA_RLE_FLAG) == TGA_RLE_FLAG) { encoded_pixels = ( ( decompression_buffer[ buffer_caret ] & ~TGA_RLE_FLAG ) + 1 ); buffer_caret++; if ((bitmap_caret + (encoded_pixels * pixel_block_size)) > image_block_size) { gdFree( decompression_buffer ); gdFree( conversion_buffer ); return -1; } for (i = 0; i < encoded_pixels; i++) { memcpy(tga->bitmap + bitmap_caret, decompression_buffer + buffer_caret, pixel_block_size * sizeof(int)); bitmap_caret += pixel_block_size; } buffer_caret += pixel_block_size; } else { encoded_pixels = decompression_buffer[ buffer_caret ] + 1; buffer_caret++; if ((bitmap_caret + (encoded_pixels * pixel_block_size)) > image_block_size) { gdFree( decompression_buffer ); gdFree( conversion_buffer ); return -1; } memcpy(tga->bitmap + bitmap_caret, decompression_buffer + buffer_caret, encoded_pixels * pixel_block_size * sizeof(int)); bitmap_caret += (encoded_pixels * pixel_block_size); buffer_caret += (encoded_pixels * pixel_block_size); } } gdFree( decompression_buffer ); gdFree( conversion_buffer ); break; } return 1; }
0
CVE-2016-6906
2,638
benign
CWE-125
int read_image_tga( gdIOCtx *ctx, oTga *tga ) { int pixel_block_size = (tga->bits / 8); int image_block_size = (tga->width * tga->height) * pixel_block_size; int* decompression_buffer = NULL; unsigned char* conversion_buffer = NULL; int buffer_caret = 0; int bitmap_caret = 0; int i = 0; int encoded_pixels; int rle_size; if(overflow2(tga->width, tga->height)) { return -1; } if(overflow2(tga->width * tga->height, pixel_block_size)) { return -1; } if(overflow2(image_block_size, sizeof(int))) { return -1; } /*! \todo Add more image type support. */ if (tga->imagetype != TGA_TYPE_RGB && tga->imagetype != TGA_TYPE_RGB_RLE) return -1; /*! \brief Allocate memmory for image block * Allocate a chunk of memory for the image block to be passed into. */ tga->bitmap = (int *) gdMalloc(image_block_size * sizeof(int)); if (tga->bitmap == NULL) return -1; switch (tga->imagetype) { case TGA_TYPE_RGB: /*! \brief Read in uncompressed RGB TGA * Chunk load the pixel data from an uncompressed RGB type TGA. */ conversion_buffer = (unsigned char *) gdMalloc(image_block_size * sizeof(unsigned char)); if (conversion_buffer == NULL) { return -1; } if (gdGetBuf(conversion_buffer, image_block_size, ctx) != image_block_size) { gd_error("gd-tga: premature end of image data\n"); gdFree(conversion_buffer); return -1; } while (buffer_caret < image_block_size) { tga->bitmap[buffer_caret] = (int) conversion_buffer[buffer_caret]; buffer_caret++; } gdFree(conversion_buffer); break; case TGA_TYPE_RGB_RLE: /*! \brief Read in RLE compressed RGB TGA * Chunk load the pixel data from an RLE compressed RGB type TGA. */ decompression_buffer = (int*) gdMalloc(image_block_size * sizeof(int)); if (decompression_buffer == NULL) { return -1; } conversion_buffer = (unsigned char *) gdMalloc(image_block_size * sizeof(unsigned char)); if (conversion_buffer == NULL) { gd_error("gd-tga: premature end of image data\n"); gdFree( decompression_buffer ); return -1; } rle_size = gdGetBuf(conversion_buffer, image_block_size, ctx); if (rle_size <= 0) { gdFree(conversion_buffer); gdFree(decompression_buffer); return -1; } buffer_caret = 0; while( buffer_caret < rle_size) { decompression_buffer[buffer_caret] = (int)conversion_buffer[buffer_caret]; buffer_caret++; } buffer_caret = 0; while( bitmap_caret < image_block_size ) { if ((decompression_buffer[buffer_caret] & TGA_RLE_FLAG) == TGA_RLE_FLAG) { encoded_pixels = ( ( decompression_buffer[ buffer_caret ] & ~TGA_RLE_FLAG ) + 1 ); buffer_caret++; if ((bitmap_caret + (encoded_pixels * pixel_block_size)) > image_block_size || buffer_caret + pixel_block_size > rle_size) { gdFree( decompression_buffer ); gdFree( conversion_buffer ); return -1; } for (i = 0; i < encoded_pixels; i++) { memcpy(tga->bitmap + bitmap_caret, decompression_buffer + buffer_caret, pixel_block_size * sizeof(int)); bitmap_caret += pixel_block_size; } buffer_caret += pixel_block_size; } else { encoded_pixels = decompression_buffer[ buffer_caret ] + 1; buffer_caret++; if ((bitmap_caret + (encoded_pixels * pixel_block_size)) > image_block_size || buffer_caret + (encoded_pixels * pixel_block_size) > rle_size) { gdFree( decompression_buffer ); gdFree( conversion_buffer ); return -1; } memcpy(tga->bitmap + bitmap_caret, decompression_buffer + buffer_caret, encoded_pixels * pixel_block_size * sizeof(int)); bitmap_caret += (encoded_pixels * pixel_block_size); buffer_caret += (encoded_pixels * pixel_block_size); } } gdFree( decompression_buffer ); gdFree( conversion_buffer ); break; } return 1; }
1
CVE-2016-6906
2,638
vulnerable
CWE-787
char *string_crypt(const char *key, const char *salt) { assertx(key); assertx(salt); char random_salt[12]; if (!*salt) { memcpy(random_salt,"$1$",3); ito64(random_salt+3,rand(),8); random_salt[11] = '\0'; return string_crypt(key, random_salt); } auto const saltLen = strlen(salt); if ((saltLen > sizeof("$2X$00$")) && (salt[0] == '$') && (salt[1] == '2') && (salt[2] >= 'a') && (salt[2] <= 'z') && (salt[3] == '$') && (salt[4] >= '0') && (salt[4] <= '3') && (salt[5] >= '0') && (salt[5] <= '9') && (salt[6] == '$')) { // Bundled blowfish crypt() char output[61]; static constexpr size_t maxSaltLength = 123; char paddedSalt[maxSaltLength + 1]; paddedSalt[0] = paddedSalt[maxSaltLength] = '\0'; memset(&paddedSalt[1], '$', maxSaltLength - 1); memcpy(paddedSalt, salt, std::min(maxSaltLength, saltLen)); paddedSalt[saltLen] = '\0'; if (php_crypt_blowfish_rn(key, paddedSalt, output, sizeof(output))) { return strdup(output); } } else { // System crypt() function #ifdef USE_PHP_CRYPT_R return php_crypt_r(key, salt); #else static Mutex mutex; Lock lock(mutex); char *crypt_res = crypt(key,salt); if (crypt_res) { return strdup(crypt_res); } #endif } return ((salt[0] == '*') && (salt[1] == '0')) ? strdup("*1") : strdup("*0"); }
0
CVE-2020-1917
306
benign
CWE-787
char *string_crypt(const char *key, const char *salt) { assertx(key); assertx(salt); char random_salt[12]; if (!*salt) { memcpy(random_salt,"$1$",3); ito64(random_salt+3,rand(),8); random_salt[11] = '\0'; return string_crypt(key, random_salt); } auto const saltLen = strlen(salt); if ((saltLen > sizeof("$2X$00$")) && (salt[0] == '$') && (salt[1] == '2') && (salt[2] >= 'a') && (salt[2] <= 'z') && (salt[3] == '$') && (salt[4] >= '0') && (salt[4] <= '3') && (salt[5] >= '0') && (salt[5] <= '9') && (salt[6] == '$')) { // Bundled blowfish crypt() char output[61]; static constexpr size_t maxSaltLength = 123; char paddedSalt[maxSaltLength + 1]; paddedSalt[0] = paddedSalt[maxSaltLength] = '\0'; memset(&paddedSalt[1], '$', maxSaltLength - 1); memcpy(paddedSalt, salt, std::min(maxSaltLength, saltLen)); paddedSalt[std::min(maxSaltLength, saltLen)] = '\0'; if (php_crypt_blowfish_rn(key, paddedSalt, output, sizeof(output))) { return strdup(output); } } else { // System crypt() function #ifdef USE_PHP_CRYPT_R return php_crypt_r(key, salt); #else static Mutex mutex; Lock lock(mutex); char *crypt_res = crypt(key,salt); if (crypt_res) { return strdup(crypt_res); } #endif } return ((salt[0] == '*') && (salt[1] == '0')) ? strdup("*1") : strdup("*0"); }
1
CVE-2020-1917
306
vulnerable
CWE-787
CharArray(int len) { buf = new char[len](); }
0
CVE-2019-15694
818
benign
CWE-787
CharArray(size_t len) { buf = new char[len](); }
1
CVE-2019-15694
818
vulnerable
CWE-476
GF_Err gf_isom_get_text_description(GF_ISOFile *movie, u32 trackNumber, u32 descriptionIndex, GF_TextSampleDescriptor **out_desc) { GF_TrackBox *trak; u32 i; Bool is_qt_text = GF_FALSE; GF_Tx3gSampleEntryBox *txt; if (!descriptionIndex || !out_desc) return GF_BAD_PARAM; trak = gf_isom_get_track_from_file(movie, trackNumber); if (!trak || !trak->Media) return GF_BAD_PARAM; switch (trak->Media->handler->handlerType) { case GF_ISOM_MEDIA_TEXT: case GF_ISOM_MEDIA_SUBT: break; default: return GF_BAD_PARAM; } txt = (GF_Tx3gSampleEntryBox*)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, descriptionIndex - 1); if (!txt) return GF_BAD_PARAM; switch (txt->type) { case GF_ISOM_BOX_TYPE_TX3G: break; case GF_ISOM_BOX_TYPE_TEXT: is_qt_text = GF_TRUE; break; default: return GF_BAD_PARAM; } (*out_desc) = (GF_TextSampleDescriptor *) gf_odf_desc_new(GF_ODF_TX3G_TAG); if (! (*out_desc) ) return GF_OUT_OF_MEM; (*out_desc)->back_color = txt->back_color; (*out_desc)->default_pos = txt->default_box; (*out_desc)->default_style = txt->default_style; (*out_desc)->displayFlags = txt->displayFlags; (*out_desc)->vert_justif = txt->vertical_justification; (*out_desc)->horiz_justif = txt->horizontal_justification; if (is_qt_text) { GF_TextSampleEntryBox *qt_txt = (GF_TextSampleEntryBox *) txt; if (qt_txt->textName) { (*out_desc)->font_count = 1; (*out_desc)->fonts = (GF_FontRecord *) gf_malloc(sizeof(GF_FontRecord)); (*out_desc)->fonts[0].fontName = gf_strdup(qt_txt->textName); } } else { (*out_desc)->font_count = txt->font_table->entry_count; (*out_desc)->fonts = (GF_FontRecord *) gf_malloc(sizeof(GF_FontRecord) * txt->font_table->entry_count); for (i=0; i<txt->font_table->entry_count; i++) { (*out_desc)->fonts[i].fontID = txt->font_table->fonts[i].fontID; if (txt->font_table->fonts[i].fontName) (*out_desc)->fonts[i].fontName = gf_strdup(txt->font_table->fonts[i].fontName); } } return GF_OK; }
0
CVE-2021-32139
1,938
benign
CWE-476
GF_Err gf_isom_get_text_description(GF_ISOFile *movie, u32 trackNumber, u32 descriptionIndex, GF_TextSampleDescriptor **out_desc) { GF_TrackBox *trak; u32 i; GF_Tx3gSampleEntryBox *txt = NULL; GF_TextSampleEntryBox *qt_txt = NULL; if (!descriptionIndex || !out_desc) return GF_BAD_PARAM; trak = gf_isom_get_track_from_file(movie, trackNumber); if (!trak || !trak->Media) return GF_BAD_PARAM; switch (trak->Media->handler->handlerType) { case GF_ISOM_MEDIA_TEXT: case GF_ISOM_MEDIA_SUBT: break; default: return GF_BAD_PARAM; } txt = (GF_Tx3gSampleEntryBox*)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, descriptionIndex - 1); if (!txt) return GF_BAD_PARAM; switch (txt->type) { case GF_ISOM_BOX_TYPE_TX3G: break; case GF_ISOM_BOX_TYPE_TEXT: qt_txt = (GF_TextSampleEntryBox *)txt; txt = NULL; break; default: return GF_BAD_PARAM; } (*out_desc) = (GF_TextSampleDescriptor *) gf_odf_desc_new(GF_ODF_TX3G_TAG); if (! (*out_desc) ) return GF_OUT_OF_MEM; if (qt_txt) { (*out_desc)->back_color = rgb_48_to_32(qt_txt->background_color); (*out_desc)->default_pos = qt_txt->default_box; (*out_desc)->default_style.style_flags = qt_txt->fontFace; (*out_desc)->default_style.text_color = rgb_48_to_32(qt_txt->foreground_color); (*out_desc)->displayFlags = qt_txt->displayFlags; (*out_desc)->vert_justif = -1; (*out_desc)->horiz_justif = qt_txt->textJustification; if (qt_txt->textName) { (*out_desc)->font_count = 1; (*out_desc)->fonts = (GF_FontRecord *) gf_malloc(sizeof(GF_FontRecord)); (*out_desc)->fonts[0].fontName = gf_strdup(qt_txt->textName); } } else { (*out_desc)->back_color = txt->back_color; (*out_desc)->default_pos = txt->default_box; (*out_desc)->default_style = txt->default_style; (*out_desc)->displayFlags = txt->displayFlags; (*out_desc)->vert_justif = txt->vertical_justification; (*out_desc)->horiz_justif = txt->horizontal_justification; (*out_desc)->font_count = txt->font_table->entry_count; (*out_desc)->fonts = (GF_FontRecord *) gf_malloc(sizeof(GF_FontRecord) * txt->font_table->entry_count); for (i=0; i<txt->font_table->entry_count; i++) { (*out_desc)->fonts[i].fontID = txt->font_table->fonts[i].fontID; if (txt->font_table->fonts[i].fontName) (*out_desc)->fonts[i].fontName = gf_strdup(txt->font_table->fonts[i].fontName); } } return GF_OK; }
1
CVE-2021-32139
1,938
vulnerable
CWE-362
packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); int ret; if (level != SOL_PACKET) return -ENOPROTOOPT; switch (optname) { case PACKET_ADD_MEMBERSHIP: case PACKET_DROP_MEMBERSHIP: { struct packet_mreq_max mreq; int len = optlen; memset(&mreq, 0, sizeof(mreq)); if (len < sizeof(struct packet_mreq)) return -EINVAL; if (len > sizeof(mreq)) len = sizeof(mreq); if (copy_from_user(&mreq, optval, len)) return -EFAULT; if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address))) return -EINVAL; if (optname == PACKET_ADD_MEMBERSHIP) ret = packet_mc_add(sk, &mreq); else ret = packet_mc_drop(sk, &mreq); return ret; } case PACKET_RX_RING: case PACKET_TX_RING: { union tpacket_req_u req_u; int len; switch (po->tp_version) { case TPACKET_V1: case TPACKET_V2: len = sizeof(req_u.req); break; case TPACKET_V3: default: len = sizeof(req_u.req3); break; } if (optlen < len) return -EINVAL; if (copy_from_user(&req_u.req, optval, len)) return -EFAULT; return packet_set_ring(sk, &req_u, 0, optname == PACKET_TX_RING); } case PACKET_COPY_THRESH: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; pkt_sk(sk)->copy_thresh = val; return 0; } case PACKET_VERSION: { int val; if (optlen != sizeof(val)) return -EINVAL; if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; switch (val) { case TPACKET_V1: case TPACKET_V2: case TPACKET_V3: po->tp_version = val; return 0; default: return -EINVAL; } } case PACKET_RESERVE: { unsigned int val; if (optlen != sizeof(val)) return -EINVAL; if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->tp_reserve = val; return 0; } case PACKET_LOSS: { unsigned int val; if (optlen != sizeof(val)) return -EINVAL; if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->tp_loss = !!val; return 0; } case PACKET_AUXDATA: { int val; if (optlen < sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->auxdata = !!val; return 0; } case PACKET_ORIGDEV: { int val; if (optlen < sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->origdev = !!val; return 0; } case PACKET_VNET_HDR: { int val; if (sock->type != SOCK_RAW) return -EINVAL; if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) return -EBUSY; if (optlen < sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->has_vnet_hdr = !!val; return 0; } case PACKET_TIMESTAMP: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->tp_tstamp = val; return 0; } case PACKET_FANOUT: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; return fanout_add(sk, val & 0xffff, val >> 16); } case PACKET_FANOUT_DATA: { if (!po->fanout) return -EINVAL; return fanout_set_data(po, optval, optlen); } case PACKET_TX_HAS_OFF: { unsigned int val; if (optlen != sizeof(val)) return -EINVAL; if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->tp_tx_has_off = !!val; return 0; } case PACKET_QDISC_BYPASS: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->xmit = val ? packet_direct_xmit : dev_queue_xmit; return 0; } default: return -ENOPROTOOPT; } }
0
CVE-2016-8655
245
benign
CWE-362
packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); int ret; if (level != SOL_PACKET) return -ENOPROTOOPT; switch (optname) { case PACKET_ADD_MEMBERSHIP: case PACKET_DROP_MEMBERSHIP: { struct packet_mreq_max mreq; int len = optlen; memset(&mreq, 0, sizeof(mreq)); if (len < sizeof(struct packet_mreq)) return -EINVAL; if (len > sizeof(mreq)) len = sizeof(mreq); if (copy_from_user(&mreq, optval, len)) return -EFAULT; if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address))) return -EINVAL; if (optname == PACKET_ADD_MEMBERSHIP) ret = packet_mc_add(sk, &mreq); else ret = packet_mc_drop(sk, &mreq); return ret; } case PACKET_RX_RING: case PACKET_TX_RING: { union tpacket_req_u req_u; int len; switch (po->tp_version) { case TPACKET_V1: case TPACKET_V2: len = sizeof(req_u.req); break; case TPACKET_V3: default: len = sizeof(req_u.req3); break; } if (optlen < len) return -EINVAL; if (copy_from_user(&req_u.req, optval, len)) return -EFAULT; return packet_set_ring(sk, &req_u, 0, optname == PACKET_TX_RING); } case PACKET_COPY_THRESH: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; pkt_sk(sk)->copy_thresh = val; return 0; } case PACKET_VERSION: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; switch (val) { case TPACKET_V1: case TPACKET_V2: case TPACKET_V3: break; default: return -EINVAL; } lock_sock(sk); if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { ret = -EBUSY; } else { po->tp_version = val; ret = 0; } release_sock(sk); return ret; } case PACKET_RESERVE: { unsigned int val; if (optlen != sizeof(val)) return -EINVAL; if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->tp_reserve = val; return 0; } case PACKET_LOSS: { unsigned int val; if (optlen != sizeof(val)) return -EINVAL; if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->tp_loss = !!val; return 0; } case PACKET_AUXDATA: { int val; if (optlen < sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->auxdata = !!val; return 0; } case PACKET_ORIGDEV: { int val; if (optlen < sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->origdev = !!val; return 0; } case PACKET_VNET_HDR: { int val; if (sock->type != SOCK_RAW) return -EINVAL; if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) return -EBUSY; if (optlen < sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->has_vnet_hdr = !!val; return 0; } case PACKET_TIMESTAMP: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->tp_tstamp = val; return 0; } case PACKET_FANOUT: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; return fanout_add(sk, val & 0xffff, val >> 16); } case PACKET_FANOUT_DATA: { if (!po->fanout) return -EINVAL; return fanout_set_data(po, optval, optlen); } case PACKET_TX_HAS_OFF: { unsigned int val; if (optlen != sizeof(val)) return -EINVAL; if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->tp_tx_has_off = !!val; return 0; } case PACKET_QDISC_BYPASS: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->xmit = val ? packet_direct_xmit : dev_queue_xmit; return 0; } default: return -ENOPROTOOPT; } }
1
CVE-2016-8655
245
vulnerable
CWE-362
static void smp_task_done(struct sas_task *task) { if (!del_timer(&task->slow_task->timer)) return; complete(&task->slow_task->completion); }
0
CVE-2018-20836
2,342
benign
CWE-362
static void smp_task_done(struct sas_task *task) { del_timer(&task->slow_task->timer); complete(&task->slow_task->completion); }
1
CVE-2018-20836
2,342
vulnerable
CWE-416
may_get_cmd_block(exarg_T *eap, char_u *p, char_u **tofree, int *flags) { char_u *retp = p; if (*p == '{' && ends_excmd2(eap->arg, skipwhite(p + 1)) && eap->getline != NULL) { garray_T ga; char_u *line = NULL; ga_init2(&ga, sizeof(char_u *), 10); if (ga_add_string(&ga, p) == FAIL) return retp; // If the argument ends in "}" it must have been concatenated already // for ISN_EXEC. if (p[STRLEN(p) - 1] != '}') // Read lines between '{' and '}'. Does not support nesting or // here-doc constructs. for (;;) { vim_free(line); if ((line = eap->getline(':', eap->cookie, 0, GETLINE_CONCAT_CONTBAR)) == NULL) { emsg(_(e_missing_rcurly)); break; } if (ga_add_string(&ga, line) == FAIL) break; if (*skipwhite(line) == '}') break; } vim_free(line); retp = *tofree = ga_concat_strings(&ga, "\n"); ga_clear_strings(&ga); *flags |= UC_VIM9; } return retp; }
0
CVE-2022-0156
1,163
benign
CWE-416
may_get_cmd_block(exarg_T *eap, char_u *p, char_u **tofree, int *flags) { char_u *retp = p; if (*p == '{' && ends_excmd2(eap->arg, skipwhite(p + 1)) && eap->getline != NULL) { garray_T ga; char_u *line = NULL; ga_init2(&ga, sizeof(char_u *), 10); if (ga_copy_string(&ga, p) == FAIL) return retp; // If the argument ends in "}" it must have been concatenated already // for ISN_EXEC. if (p[STRLEN(p) - 1] != '}') // Read lines between '{' and '}'. Does not support nesting or // here-doc constructs. for (;;) { vim_free(line); if ((line = eap->getline(':', eap->cookie, 0, GETLINE_CONCAT_CONTBAR)) == NULL) { emsg(_(e_missing_rcurly)); break; } if (ga_copy_string(&ga, line) == FAIL) break; if (*skipwhite(line) == '}') break; } vim_free(line); retp = *tofree = ga_concat_strings(&ga, "\n"); ga_clear_strings(&ga); *flags |= UC_VIM9; } return retp; }
1
CVE-2022-0156
1,163
vulnerable
CWE-416
net_bind(short unsigned *port, int type, const char *log_service_name) { struct addrinfo hints = { 0 }; struct addrinfo *servinfo; struct addrinfo *ptr; const char *cfgaddr; char addr[INET6_ADDRSTRLEN]; char strport[8]; int yes = 1; int no = 0; int fd; int ret; cfgaddr = cfg_getstr(cfg_getsec(cfg, "general"), "bind_address"); hints.ai_socktype = (type & (SOCK_STREAM | SOCK_DGRAM)); // filter since type can be SOCK_STREAM | SOCK_NONBLOCK hints.ai_family = (cfg_getbool(cfg_getsec(cfg, "general"), "ipv6")) ? AF_INET6 : AF_INET; hints.ai_flags = cfgaddr ? 0 : AI_PASSIVE; snprintf(strport, sizeof(strport), "%hu", *port); ret = getaddrinfo(cfgaddr, strport, &hints, &servinfo); if (ret < 0) { DPRINTF(E_LOG, L_MISC, "Failure creating '%s' service, could not resolve '%s' (port %s): %s\n", log_service_name, cfgaddr ? cfgaddr : "(ANY)", strport, gai_strerror(ret)); return -1; } for (ptr = servinfo, fd = -1; ptr != NULL; ptr = ptr->ai_next) { if (fd >= 0) close(fd); fd = socket(ptr->ai_family, type | SOCK_CLOEXEC, ptr->ai_protocol); if (fd < 0) continue; // TODO libevent sets this, we do the same? ret = setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &yes, sizeof(yes)); if (ret < 0) continue; ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)); if (ret < 0) continue; if (ptr->ai_family == AF_INET6) { // We want to be sure the service is dual stack ret = setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &no, sizeof(no)); if (ret < 0) continue; } ret = bind(fd, ptr->ai_addr, ptr->ai_addrlen); if (ret < 0) continue; break; } freeaddrinfo(servinfo); if (!ptr) { DPRINTF(E_LOG, L_MISC, "Could not create service '%s' with address %s, port %hu: %s\n", log_service_name, cfgaddr ? cfgaddr : "(ANY)", *port, strerror(errno)); goto error; } // Get the port that was assigned ret = getsockname(fd, ptr->ai_addr, &ptr->ai_addrlen); if (ret < 0) { DPRINTF(E_LOG, L_MISC, "Could not find address of service '%s': %s\n", log_service_name, strerror(errno)); goto error; } net_port_get(port, (union net_sockaddr *)ptr->ai_addr); net_address_get(addr, sizeof(addr), (union net_sockaddr *)ptr->ai_addr); DPRINTF(E_DBG, L_MISC, "Service '%s' bound to %s, port %hu, socket %d\n", log_service_name, addr, *port, fd); return fd; error: close(fd); return -1; }
0
CVE-2021-38383
2,739
benign
CWE-416
net_bind(short unsigned *port, int type, const char *log_service_name) { struct addrinfo hints = { 0 }; struct addrinfo *servinfo; struct addrinfo *ptr; union net_sockaddr naddr = { 0 }; socklen_t naddr_len = sizeof(naddr); const char *cfgaddr; char addr[INET6_ADDRSTRLEN]; char strport[8]; int yes = 1; int no = 0; int fd; int ret; cfgaddr = cfg_getstr(cfg_getsec(cfg, "general"), "bind_address"); hints.ai_socktype = (type & (SOCK_STREAM | SOCK_DGRAM)); // filter since type can be SOCK_STREAM | SOCK_NONBLOCK hints.ai_family = (cfg_getbool(cfg_getsec(cfg, "general"), "ipv6")) ? AF_INET6 : AF_INET; hints.ai_flags = cfgaddr ? 0 : AI_PASSIVE; snprintf(strport, sizeof(strport), "%hu", *port); ret = getaddrinfo(cfgaddr, strport, &hints, &servinfo); if (ret < 0) { DPRINTF(E_LOG, L_MISC, "Failure creating '%s' service, could not resolve '%s' (port %s): %s\n", log_service_name, cfgaddr ? cfgaddr : "(ANY)", strport, gai_strerror(ret)); return -1; } for (ptr = servinfo, fd = -1; ptr != NULL; ptr = ptr->ai_next) { if (fd >= 0) close(fd); fd = socket(ptr->ai_family, type | SOCK_CLOEXEC, ptr->ai_protocol); if (fd < 0) continue; // TODO libevent sets this, we do the same? ret = setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &yes, sizeof(yes)); if (ret < 0) continue; ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)); if (ret < 0) continue; if (ptr->ai_family == AF_INET6) { // We want to be sure the service is dual stack ret = setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &no, sizeof(no)); if (ret < 0) continue; } ret = bind(fd, ptr->ai_addr, ptr->ai_addrlen); if (ret < 0) continue; break; } freeaddrinfo(servinfo); if (!ptr) { DPRINTF(E_LOG, L_MISC, "Could not create service '%s' with address %s, port %hu: %s\n", log_service_name, cfgaddr ? cfgaddr : "(ANY)", *port, strerror(errno)); goto error; } // Get our address (as string) and the port that was assigned (necessary when // caller didn't specify a port) ret = getsockname(fd, &naddr.sa, &naddr_len); if (ret < 0) { DPRINTF(E_LOG, L_MISC, "Error finding address of service '%s': %s\n", log_service_name, strerror(errno)); goto error; } else if (naddr_len > sizeof(naddr)) { DPRINTF(E_LOG, L_MISC, "Unexpected address length of service '%s'\n", log_service_name); goto error; } net_port_get(port, &naddr); net_address_get(addr, sizeof(addr), &naddr); DPRINTF(E_DBG, L_MISC, "Service '%s' bound to %s, port %hu, socket %d\n", log_service_name, addr, *port, fd); return fd; error: close(fd); return -1; }
1
CVE-2021-38383
2,739
vulnerable
CWE-20
int mk_request_error(int http_status, struct client_session *cs, struct session_request *sr) { int ret, fd; mk_ptr_t message, *page = 0; struct error_page *entry; struct mk_list *head; struct file_info finfo; mk_header_set_http_status(sr, http_status); /* * We are nice sending error pages for clients who at least respect * the especification */ if (http_status != MK_CLIENT_LENGTH_REQUIRED && http_status != MK_CLIENT_BAD_REQUEST && http_status != MK_CLIENT_REQUEST_ENTITY_TOO_LARGE) { /* Lookup a customized error page */ mk_list_foreach(head, &sr->host_conf->error_pages) { entry = mk_list_entry(head, struct error_page, _head); if (entry->status != http_status) { continue; } /* validate error file */ ret = mk_file_get_info(entry->real_path, &finfo); if (ret == -1) { break; } /* open file */ fd = open(entry->real_path, config->open_flags); if (fd == -1) { break; } sr->fd_file = fd; sr->bytes_to_send = finfo.size; sr->headers.content_length = finfo.size; sr->headers.real_length = finfo.size; memcpy(&sr->file_info, &finfo, sizeof(struct file_info)); mk_header_send(cs->socket, cs, sr); return mk_http_send_file(cs, sr); } } mk_ptr_reset(&message); switch (http_status) { case MK_CLIENT_BAD_REQUEST: page = mk_request_set_default_page("Bad Request", sr->uri, sr->host_conf->host_signature); break; case MK_CLIENT_FORBIDDEN: page = mk_request_set_default_page("Forbidden", sr->uri, sr->host_conf->host_signature); break; case MK_CLIENT_NOT_FOUND: mk_string_build(&message.data, &message.len, "The requested URL was not found on this server."); page = mk_request_set_default_page("Not Found", message, sr->host_conf->host_signature); mk_ptr_free(&message); break; case MK_CLIENT_REQUEST_ENTITY_TOO_LARGE: mk_string_build(&message.data, &message.len, "The request entity is too large."); page = mk_request_set_default_page("Entity too large", message, sr->host_conf->host_signature); mk_ptr_free(&message); break; case MK_CLIENT_METHOD_NOT_ALLOWED: page = mk_request_set_default_page("Method Not Allowed", sr->uri, sr->host_conf->host_signature); break; case MK_CLIENT_REQUEST_TIMEOUT: case MK_CLIENT_LENGTH_REQUIRED: break; case MK_SERVER_NOT_IMPLEMENTED: page = mk_request_set_default_page("Method Not Implemented", sr->uri, sr->host_conf->host_signature); break; case MK_SERVER_INTERNAL_ERROR: page = mk_request_set_default_page("Internal Server Error", sr->uri, sr->host_conf->host_signature); break; case MK_SERVER_HTTP_VERSION_UNSUP: mk_ptr_reset(&message); page = mk_request_set_default_page("HTTP Version Not Supported", message, sr->host_conf->host_signature); break; } if (page) { sr->headers.content_length = page->len; } sr->headers.location = NULL; sr->headers.cgi = SH_NOCGI; sr->headers.pconnections_left = 0; sr->headers.last_modified = -1; if (!page) { mk_ptr_reset(&sr->headers.content_type); } else { mk_ptr_set(&sr->headers.content_type, "text/html\r\n"); } mk_header_send(cs->socket, cs, sr); if (page) { if (sr->method != MK_HTTP_METHOD_HEAD) mk_socket_send(cs->socket, page->data, page->len); mk_ptr_free(page); mk_mem_free(page); } /* Turn off TCP_CORK */ mk_server_cork_flag(cs->socket, TCP_CORK_OFF); return EXIT_ERROR; }
0
CVE-2014-5336
2,311
benign
CWE-20
int mk_request_error(int http_status, struct client_session *cs, struct session_request *sr) { int ret, fd; mk_ptr_t message, *page = 0; struct error_page *entry; struct mk_list *head; struct file_info finfo; mk_header_set_http_status(sr, http_status); /* * We are nice sending error pages for clients who at least respect * the especification */ if (http_status != MK_CLIENT_LENGTH_REQUIRED && http_status != MK_CLIENT_BAD_REQUEST && http_status != MK_CLIENT_REQUEST_ENTITY_TOO_LARGE) { /* Lookup a customized error page */ mk_list_foreach(head, &sr->host_conf->error_pages) { entry = mk_list_entry(head, struct error_page, _head); if (entry->status != http_status) { continue; } /* validate error file */ ret = mk_file_get_info(entry->real_path, &finfo); if (ret == -1) { break; } /* open file */ fd = open(entry->real_path, config->open_flags); if (fd == -1) { break; } sr->fd_file = fd; sr->fd_is_fdt = MK_FALSE; sr->bytes_to_send = finfo.size; sr->headers.content_length = finfo.size; sr->headers.real_length = finfo.size; memcpy(&sr->file_info, &finfo, sizeof(struct file_info)); mk_header_send(cs->socket, cs, sr); return mk_http_send_file(cs, sr); } } mk_ptr_reset(&message); switch (http_status) { case MK_CLIENT_BAD_REQUEST: page = mk_request_set_default_page("Bad Request", sr->uri, sr->host_conf->host_signature); break; case MK_CLIENT_FORBIDDEN: page = mk_request_set_default_page("Forbidden", sr->uri, sr->host_conf->host_signature); break; case MK_CLIENT_NOT_FOUND: mk_string_build(&message.data, &message.len, "The requested URL was not found on this server."); page = mk_request_set_default_page("Not Found", message, sr->host_conf->host_signature); mk_ptr_free(&message); break; case MK_CLIENT_REQUEST_ENTITY_TOO_LARGE: mk_string_build(&message.data, &message.len, "The request entity is too large."); page = mk_request_set_default_page("Entity too large", message, sr->host_conf->host_signature); mk_ptr_free(&message); break; case MK_CLIENT_METHOD_NOT_ALLOWED: page = mk_request_set_default_page("Method Not Allowed", sr->uri, sr->host_conf->host_signature); break; case MK_CLIENT_REQUEST_TIMEOUT: case MK_CLIENT_LENGTH_REQUIRED: break; case MK_SERVER_NOT_IMPLEMENTED: page = mk_request_set_default_page("Method Not Implemented", sr->uri, sr->host_conf->host_signature); break; case MK_SERVER_INTERNAL_ERROR: page = mk_request_set_default_page("Internal Server Error", sr->uri, sr->host_conf->host_signature); break; case MK_SERVER_HTTP_VERSION_UNSUP: mk_ptr_reset(&message); page = mk_request_set_default_page("HTTP Version Not Supported", message, sr->host_conf->host_signature); break; } if (page) { sr->headers.content_length = page->len; } sr->headers.location = NULL; sr->headers.cgi = SH_NOCGI; sr->headers.pconnections_left = 0; sr->headers.last_modified = -1; if (!page) { mk_ptr_reset(&sr->headers.content_type); } else { mk_ptr_set(&sr->headers.content_type, "text/html\r\n"); } mk_header_send(cs->socket, cs, sr); if (page) { if (sr->method != MK_HTTP_METHOD_HEAD) mk_socket_send(cs->socket, page->data, page->len); mk_ptr_free(page); mk_mem_free(page); } /* Turn off TCP_CORK */ mk_server_cork_flag(cs->socket, TCP_CORK_OFF); return EXIT_ERROR; }
1
CVE-2014-5336
2,311
vulnerable
CWE-787
static void _imap_quote_string (char *dest, size_t dlen, const char *src, const char *to_quote) { char *pt; const char *s; pt = dest; s = src; *pt++ = '"'; /* save room for trailing quote-char */ dlen -= 2; for (; *s && dlen; s++) { if (strchr (to_quote, *s)) { dlen -= 2; if (!dlen) break; *pt++ = '\\'; *pt++ = *s; } else { *pt++ = *s; dlen--; } } *pt++ = '"'; *pt = 0; }
0
CVE-2018-14352
3,093
benign
CWE-787
static void _imap_quote_string (char *dest, size_t dlen, const char *src, const char *to_quote) { char *pt; const char *s; if (!(dest && dlen && src && to_quote)) return; if (dlen < 3) { *dest = 0; return; } pt = dest; s = src; /* save room for pre/post quote-char and trailing null */ dlen -= 3; *pt++ = '"'; for (; *s && dlen; s++) { if (strchr (to_quote, *s)) { if (dlen < 2) break; dlen -= 2; *pt++ = '\\'; *pt++ = *s; } else { *pt++ = *s; dlen--; } } *pt++ = '"'; *pt = 0; }
1
CVE-2018-14352
3,093
vulnerable
CWE-787
void RegKey::getBinary(const TCHAR* valname, void** data, int* length, void* def, int deflen) const { try { getBinary(valname, data, length); } catch(rdr::Exception&) { if (deflen) { *data = new char[deflen]; memcpy(*data, def, deflen); } else *data = 0; *length = deflen; } }
0
CVE-2019-15694
829
benign
CWE-787
void RegKey::getBinary(const TCHAR* valname, void** data, size_t* length, void* def, size_t deflen) const { try { getBinary(valname, data, length); } catch(rdr::Exception&) { if (deflen) { *data = new char[deflen]; memcpy(*data, def, deflen); } else *data = 0; *length = deflen; } }
1
CVE-2019-15694
829
vulnerable
CWE-119
void Huff_transmit (huff_t *huff, int ch, byte *fout) { int i; if (huff->loc[ch] == NULL) { /* node_t hasn't been transmitted, send a NYT, then the symbol */ Huff_transmit(huff, NYT, fout); for (i = 7; i >= 0; i--) { add_bit((char)((ch >> i) & 0x1), fout); } } else { send(huff->loc[ch], NULL, fout); } }
0
CVE-2017-11721
540
benign
CWE-119
void Huff_transmit (huff_t *huff, int ch, byte *fout, int maxoffset) { int i; if (huff->loc[ch] == NULL) { /* node_t hasn't been transmitted, send a NYT, then the symbol */ Huff_transmit(huff, NYT, fout, maxoffset); for (i = 7; i >= 0; i--) { add_bit((char)((ch >> i) & 0x1), fout); } } else { send(huff->loc[ch], NULL, fout, maxoffset); } }
1
CVE-2017-11721
540
vulnerable
CWE-119
static inline int check_entry_size_and_hooks(struct arpt_entry *e, struct xt_table_info *newinfo, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int valid_hooks) { unsigned int h; int err; if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 || (unsigned char *)e + sizeof(struct arpt_entry) >= limit) { duprintf("Bad offset %p\n", e); return -EINVAL; } if (e->next_offset < sizeof(struct arpt_entry) + sizeof(struct xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } err = check_entry(e); if (err) return err; /* Check hooks & underflows */ for (h = 0; h < NF_ARP_NUMHOOKS; h++) { if (!(valid_hooks & (1 << h))) continue; if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) { if (!check_underflow(e)) { pr_err("Underflows must be unconditional and " "use the STANDARD target with " "ACCEPT/DROP\n"); return -EINVAL; } newinfo->underflow[h] = underflows[h]; } } /* Clear counters and comefrom */ e->counters = ((struct xt_counters) { 0, 0 }); e->comefrom = 0; return 0; }
0
CVE-2016-4998
1,669
benign
CWE-119
static inline int check_entry_size_and_hooks(struct arpt_entry *e, struct xt_table_info *newinfo, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int valid_hooks) { unsigned int h; int err; if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 || (unsigned char *)e + sizeof(struct arpt_entry) >= limit || (unsigned char *)e + e->next_offset > limit) { duprintf("Bad offset %p\n", e); return -EINVAL; } if (e->next_offset < sizeof(struct arpt_entry) + sizeof(struct xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } err = check_entry(e); if (err) return err; /* Check hooks & underflows */ for (h = 0; h < NF_ARP_NUMHOOKS; h++) { if (!(valid_hooks & (1 << h))) continue; if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) { if (!check_underflow(e)) { pr_err("Underflows must be unconditional and " "use the STANDARD target with " "ACCEPT/DROP\n"); return -EINVAL; } newinfo->underflow[h] = underflows[h]; } } /* Clear counters and comefrom */ e->counters = ((struct xt_counters) { 0, 0 }); e->comefrom = 0; return 0; }
1
CVE-2016-4998
1,669
vulnerable
CWE-476
netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr; struct ieee80211_sub_if_data *tmp_sdata, *sdata; struct cfg80211_chan_def *chandef; u16 len_rthdr; int hdrlen; memset(info, 0, sizeof(*info)); info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS | IEEE80211_TX_CTL_INJECTED; /* Sanity-check and process the injection radiotap header */ if (!ieee80211_parse_tx_radiotap(skb, dev)) goto fail; /* we now know there is a radiotap header with a length we can use */ len_rthdr = ieee80211_get_radiotap_len(skb->data); /* * fix up the pointers accounting for the radiotap * header still being in there. We are being given * a precooked IEEE80211 header so no need for * normal processing */ skb_set_mac_header(skb, len_rthdr); /* * these are just fixed to the end of the rt area since we * don't have any better information and at this point, nobody cares */ skb_set_network_header(skb, len_rthdr); skb_set_transport_header(skb, len_rthdr); if (skb->len < len_rthdr + 2) goto fail; hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr); hdrlen = ieee80211_hdrlen(hdr->frame_control); if (skb->len < len_rthdr + hdrlen) goto fail; /* * Initialize skb->protocol if the injected frame is a data frame * carrying a rfc1042 header */ if (ieee80211_is_data(hdr->frame_control) && skb->len >= len_rthdr + hdrlen + sizeof(rfc1042_header) + 2) { u8 *payload = (u8 *)hdr + hdrlen; if (ether_addr_equal(payload, rfc1042_header)) skb->protocol = cpu_to_be16((payload[6] << 8) | payload[7]); } rcu_read_lock(); /* * We process outgoing injected frames that have a local address * we handle as though they are non-injected frames. * This code here isn't entirely correct, the local MAC address * isn't always enough to find the interface to use; for proper * VLAN support we have an nl80211-based mechanism. * * This is necessary, for example, for old hostapd versions that * don't use nl80211-based management TX/RX. */ sdata = IEEE80211_DEV_TO_SUB_IF(dev); list_for_each_entry_rcu(tmp_sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(tmp_sdata)) continue; if (tmp_sdata->vif.type == NL80211_IFTYPE_MONITOR || tmp_sdata->vif.type == NL80211_IFTYPE_AP_VLAN) continue; if (ether_addr_equal(tmp_sdata->vif.addr, hdr->addr2)) { sdata = tmp_sdata; break; } } chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (!chanctx_conf) { tmp_sdata = rcu_dereference(local->monitor_sdata); if (tmp_sdata) chanctx_conf = rcu_dereference(tmp_sdata->vif.chanctx_conf); } if (chanctx_conf) chandef = &chanctx_conf->def; else if (!local->use_chanctx) chandef = &local->_oper_chandef; else goto fail_rcu; /* * Frame injection is not allowed if beaconing is not allowed * or if we need radar detection. Beaconing is usually not allowed when * the mode or operation (Adhoc, AP, Mesh) does not support DFS. * Passive scan is also used in world regulatory domains where * your country is not known and as such it should be treated as * NO TX unless the channel is explicitly allowed in which case * your current regulatory domain would not have the passive scan * flag. * * Since AP mode uses monitor interfaces to inject/TX management * frames we can make AP mode the exception to this rule once it * supports radar detection as its implementation can deal with * radar detection by itself. We can do that later by adding a * monitor flag interfaces used for AP support. */ if (!cfg80211_reg_can_beacon(local->hw.wiphy, chandef, sdata->vif.type)) goto fail_rcu; info->band = chandef->chan->band; /* Initialize skb->priority according to frame type and TID class, * with respect to the sub interface that the frame will actually * be transmitted on. If the DONT_REORDER flag is set, the original * skb-priority is preserved to assure frames injected with this * flag are not reordered relative to each other. */ ieee80211_select_queue_80211(sdata, skb, hdr); skb_set_queue_mapping(skb, ieee80211_ac_from_tid(skb->priority)); /* remove the injection radiotap header */ skb_pull(skb, len_rthdr); ieee80211_xmit(sdata, NULL, skb); rcu_read_unlock(); return NETDEV_TX_OK; fail_rcu: rcu_read_unlock(); fail: dev_kfree_skb(skb); return NETDEV_TX_OK; /* meaning, we dealt with the skb */ }
0
CVE-2021-38206
2,176
benign
CWE-476
netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr; struct ieee80211_sub_if_data *tmp_sdata, *sdata; struct cfg80211_chan_def *chandef; u16 len_rthdr; int hdrlen; memset(info, 0, sizeof(*info)); info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS | IEEE80211_TX_CTL_INJECTED; /* Sanity-check the length of the radiotap header */ if (!ieee80211_validate_radiotap_len(skb)) goto fail; /* we now know there is a radiotap header with a length we can use */ len_rthdr = ieee80211_get_radiotap_len(skb->data); /* * fix up the pointers accounting for the radiotap * header still being in there. We are being given * a precooked IEEE80211 header so no need for * normal processing */ skb_set_mac_header(skb, len_rthdr); /* * these are just fixed to the end of the rt area since we * don't have any better information and at this point, nobody cares */ skb_set_network_header(skb, len_rthdr); skb_set_transport_header(skb, len_rthdr); if (skb->len < len_rthdr + 2) goto fail; hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr); hdrlen = ieee80211_hdrlen(hdr->frame_control); if (skb->len < len_rthdr + hdrlen) goto fail; /* * Initialize skb->protocol if the injected frame is a data frame * carrying a rfc1042 header */ if (ieee80211_is_data(hdr->frame_control) && skb->len >= len_rthdr + hdrlen + sizeof(rfc1042_header) + 2) { u8 *payload = (u8 *)hdr + hdrlen; if (ether_addr_equal(payload, rfc1042_header)) skb->protocol = cpu_to_be16((payload[6] << 8) | payload[7]); } rcu_read_lock(); /* * We process outgoing injected frames that have a local address * we handle as though they are non-injected frames. * This code here isn't entirely correct, the local MAC address * isn't always enough to find the interface to use; for proper * VLAN support we have an nl80211-based mechanism. * * This is necessary, for example, for old hostapd versions that * don't use nl80211-based management TX/RX. */ sdata = IEEE80211_DEV_TO_SUB_IF(dev); list_for_each_entry_rcu(tmp_sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(tmp_sdata)) continue; if (tmp_sdata->vif.type == NL80211_IFTYPE_MONITOR || tmp_sdata->vif.type == NL80211_IFTYPE_AP_VLAN) continue; if (ether_addr_equal(tmp_sdata->vif.addr, hdr->addr2)) { sdata = tmp_sdata; break; } } chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (!chanctx_conf) { tmp_sdata = rcu_dereference(local->monitor_sdata); if (tmp_sdata) chanctx_conf = rcu_dereference(tmp_sdata->vif.chanctx_conf); } if (chanctx_conf) chandef = &chanctx_conf->def; else if (!local->use_chanctx) chandef = &local->_oper_chandef; else goto fail_rcu; /* * Frame injection is not allowed if beaconing is not allowed * or if we need radar detection. Beaconing is usually not allowed when * the mode or operation (Adhoc, AP, Mesh) does not support DFS. * Passive scan is also used in world regulatory domains where * your country is not known and as such it should be treated as * NO TX unless the channel is explicitly allowed in which case * your current regulatory domain would not have the passive scan * flag. * * Since AP mode uses monitor interfaces to inject/TX management * frames we can make AP mode the exception to this rule once it * supports radar detection as its implementation can deal with * radar detection by itself. We can do that later by adding a * monitor flag interfaces used for AP support. */ if (!cfg80211_reg_can_beacon(local->hw.wiphy, chandef, sdata->vif.type)) goto fail_rcu; info->band = chandef->chan->band; /* Initialize skb->priority according to frame type and TID class, * with respect to the sub interface that the frame will actually * be transmitted on. If the DONT_REORDER flag is set, the original * skb-priority is preserved to assure frames injected with this * flag are not reordered relative to each other. */ ieee80211_select_queue_80211(sdata, skb, hdr); skb_set_queue_mapping(skb, ieee80211_ac_from_tid(skb->priority)); /* * Process the radiotap header. This will now take into account the * selected chandef above to accurately set injection rates and * retransmissions. */ if (!ieee80211_parse_tx_radiotap(skb, dev)) goto fail_rcu; /* remove the injection radiotap header */ skb_pull(skb, len_rthdr); ieee80211_xmit(sdata, NULL, skb); rcu_read_unlock(); return NETDEV_TX_OK; fail_rcu: rcu_read_unlock(); fail: dev_kfree_skb(skb); return NETDEV_TX_OK; /* meaning, we dealt with the skb */ }
1
CVE-2021-38206
2,176
vulnerable
CWE-125
explicit DataFormatDimMapOp(OpKernelConstruction* context) : OpKernel(context) { string src_format; OP_REQUIRES_OK(context, context->GetAttr("src_format", &src_format)); string dst_format; OP_REQUIRES_OK(context, context->GetAttr("dst_format", &dst_format)); OP_REQUIRES(context, src_format.size() == 4 || src_format.size() == 5, errors::InvalidArgument(strings::StrCat( "Source format must of length 4 or 5, received " "src_format = ", src_format))); OP_REQUIRES( context, dst_format.size() == 4 || dst_format.size() == 5, errors::InvalidArgument(strings::StrCat( "Destination format must of length 4 or 5, received dst_format = ", dst_format))); dst_idx_ = Tensor(DT_INT32, {static_cast<int64>(src_format.size())}); for (int i = 0; i < src_format.size(); ++i) { for (int j = 0; j < dst_format.size(); ++j) { if (dst_format[j] == src_format[i]) { dst_idx_.vec<int>()(i) = j; break; } } } }
0
CVE-2020-26267
1,094
benign
CWE-125
explicit DataFormatDimMapOp(OpKernelConstruction* context) : OpKernel(context) { string src_format; OP_REQUIRES_OK(context, context->GetAttr("src_format", &src_format)); string dst_format; OP_REQUIRES_OK(context, context->GetAttr("dst_format", &dst_format)); OP_REQUIRES(context, src_format.size() == 4 || src_format.size() == 5, errors::InvalidArgument( "Source format must be of length 4 or 5, received " "src_format = ", src_format)); OP_REQUIRES(context, dst_format.size() == 4 || dst_format.size() == 5, errors::InvalidArgument("Destination format must be of length " "4 or 5, received dst_format = ", dst_format)); OP_REQUIRES( context, IsValidPermutation(src_format, dst_format), errors::InvalidArgument( "Destination and source format must determine a permutation, got ", src_format, " and ", dst_format)); dst_idx_ = Tensor(DT_INT32, {static_cast<int64>(src_format.size())}); for (int i = 0; i < src_format.size(); ++i) { for (int j = 0; j < dst_format.size(); ++j) { if (dst_format[j] == src_format[i]) { dst_idx_.vec<int>()(i) = j; break; } } } }
1
CVE-2020-26267
1,094
vulnerable
CWE-20
static int FNAME(walk_addr_generic)(struct guest_walker *walker, struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, gva_t addr, u32 access) { pt_element_t pte; pt_element_t __user *ptep_user; gfn_t table_gfn; unsigned index, pt_access, uninitialized_var(pte_access); gpa_t pte_gpa; bool eperm, present, rsvd_fault; int offset, write_fault, user_fault, fetch_fault; write_fault = access & PFERR_WRITE_MASK; user_fault = access & PFERR_USER_MASK; fetch_fault = access & PFERR_FETCH_MASK; trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault, fetch_fault); walk: present = true; eperm = rsvd_fault = false; walker->level = mmu->root_level; pte = mmu->get_cr3(vcpu); #if PTTYPE == 64 if (walker->level == PT32E_ROOT_LEVEL) { pte = kvm_pdptr_read_mmu(vcpu, mmu, (addr >> 30) & 3); trace_kvm_mmu_paging_element(pte, walker->level); if (!is_present_gpte(pte)) { present = false; goto error; } --walker->level; } #endif ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || (mmu->get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0); pt_access = ACC_ALL; for (;;) { gfn_t real_gfn; unsigned long host_addr; index = PT_INDEX(addr, walker->level); table_gfn = gpte_to_gfn(pte); offset = index * sizeof(pt_element_t); pte_gpa = gfn_to_gpa(table_gfn) + offset; walker->table_gfn[walker->level - 1] = table_gfn; walker->pte_gpa[walker->level - 1] = pte_gpa; real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn), PFERR_USER_MASK|PFERR_WRITE_MASK); if (unlikely(real_gfn == UNMAPPED_GVA)) { present = false; break; } real_gfn = gpa_to_gfn(real_gfn); host_addr = gfn_to_hva(vcpu->kvm, real_gfn); if (unlikely(kvm_is_error_hva(host_addr))) { present = false; break; } ptep_user = (pt_element_t __user *)((void *)host_addr + offset); if (unlikely(copy_from_user(&pte, ptep_user, sizeof(pte)))) { present = false; break; } trace_kvm_mmu_paging_element(pte, walker->level); if (unlikely(!is_present_gpte(pte))) { present = false; break; } if (unlikely(is_rsvd_bits_set(&vcpu->arch.mmu, pte, walker->level))) { rsvd_fault = true; break; } if (unlikely(write_fault && !is_writable_pte(pte) && (user_fault || is_write_protection(vcpu)))) eperm = true; if (unlikely(user_fault && !(pte & PT_USER_MASK))) eperm = true; #if PTTYPE == 64 if (unlikely(fetch_fault && (pte & PT64_NX_MASK))) eperm = true; #endif if (!eperm && !rsvd_fault && unlikely(!(pte & PT_ACCESSED_MASK))) { int ret; trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte)); ret = FNAME(cmpxchg_gpte)(vcpu, mmu, table_gfn, index, pte, pte|PT_ACCESSED_MASK); if (ret < 0) { present = false; break; } else if (ret) goto walk; mark_page_dirty(vcpu->kvm, table_gfn); pte |= PT_ACCESSED_MASK; } pte_access = pt_access & FNAME(gpte_access)(vcpu, pte); walker->ptes[walker->level - 1] = pte; if ((walker->level == PT_PAGE_TABLE_LEVEL) || ((walker->level == PT_DIRECTORY_LEVEL) && is_large_pte(pte) && (PTTYPE == 64 || is_pse(vcpu))) || ((walker->level == PT_PDPE_LEVEL) && is_large_pte(pte) && mmu->root_level == PT64_ROOT_LEVEL)) { int lvl = walker->level; gpa_t real_gpa; gfn_t gfn; u32 ac; gfn = gpte_to_gfn_lvl(pte, lvl); gfn += (addr & PT_LVL_OFFSET_MASK(lvl)) >> PAGE_SHIFT; if (PTTYPE == 32 && walker->level == PT_DIRECTORY_LEVEL && is_cpuid_PSE36()) gfn += pse36_gfn_delta(pte); ac = write_fault | fetch_fault | user_fault; real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), ac); if (real_gpa == UNMAPPED_GVA) return 0; walker->gfn = real_gpa >> PAGE_SHIFT; break; } pt_access = pte_access; --walker->level; } if (unlikely(!present || eperm || rsvd_fault)) goto error; if (write_fault && unlikely(!is_dirty_gpte(pte))) { int ret; trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); ret = FNAME(cmpxchg_gpte)(vcpu, mmu, table_gfn, index, pte, pte|PT_DIRTY_MASK); if (ret < 0) { present = false; goto error; } else if (ret) goto walk; mark_page_dirty(vcpu->kvm, table_gfn); pte |= PT_DIRTY_MASK; walker->ptes[walker->level - 1] = pte; } walker->pt_access = pt_access; walker->pte_access = pte_access; pgprintk("%s: pte %llx pte_access %x pt_access %x\n", __func__, (u64)pte, pte_access, pt_access); return 1; error: walker->fault.vector = PF_VECTOR; walker->fault.error_code_valid = true; walker->fault.error_code = 0; if (present) walker->fault.error_code |= PFERR_PRESENT_MASK; walker->fault.error_code |= write_fault | user_fault; if (fetch_fault && mmu->nx) walker->fault.error_code |= PFERR_FETCH_MASK; if (rsvd_fault) walker->fault.error_code |= PFERR_RSVD_MASK; walker->fault.address = addr; walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu; trace_kvm_mmu_walker_error(walker->fault.error_code); return 0; }
0
CVE-2013-1943
1,709
benign
CWE-20
static int FNAME(walk_addr_generic)(struct guest_walker *walker, struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, gva_t addr, u32 access) { pt_element_t pte; pt_element_t __user *ptep_user; gfn_t table_gfn; unsigned index, pt_access, uninitialized_var(pte_access); gpa_t pte_gpa; bool eperm, present, rsvd_fault; int offset, write_fault, user_fault, fetch_fault; write_fault = access & PFERR_WRITE_MASK; user_fault = access & PFERR_USER_MASK; fetch_fault = access & PFERR_FETCH_MASK; trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault, fetch_fault); walk: present = true; eperm = rsvd_fault = false; walker->level = mmu->root_level; pte = mmu->get_cr3(vcpu); #if PTTYPE == 64 if (walker->level == PT32E_ROOT_LEVEL) { pte = kvm_pdptr_read_mmu(vcpu, mmu, (addr >> 30) & 3); trace_kvm_mmu_paging_element(pte, walker->level); if (!is_present_gpte(pte)) { present = false; goto error; } --walker->level; } #endif ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || (mmu->get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0); pt_access = ACC_ALL; for (;;) { gfn_t real_gfn; unsigned long host_addr; index = PT_INDEX(addr, walker->level); table_gfn = gpte_to_gfn(pte); offset = index * sizeof(pt_element_t); pte_gpa = gfn_to_gpa(table_gfn) + offset; walker->table_gfn[walker->level - 1] = table_gfn; walker->pte_gpa[walker->level - 1] = pte_gpa; real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn), PFERR_USER_MASK|PFERR_WRITE_MASK); if (unlikely(real_gfn == UNMAPPED_GVA)) { present = false; break; } real_gfn = gpa_to_gfn(real_gfn); host_addr = gfn_to_hva(vcpu->kvm, real_gfn); if (unlikely(kvm_is_error_hva(host_addr))) { present = false; break; } ptep_user = (pt_element_t __user *)((void *)host_addr + offset); if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) { present = false; break; } trace_kvm_mmu_paging_element(pte, walker->level); if (unlikely(!is_present_gpte(pte))) { present = false; break; } if (unlikely(is_rsvd_bits_set(&vcpu->arch.mmu, pte, walker->level))) { rsvd_fault = true; break; } if (unlikely(write_fault && !is_writable_pte(pte) && (user_fault || is_write_protection(vcpu)))) eperm = true; if (unlikely(user_fault && !(pte & PT_USER_MASK))) eperm = true; #if PTTYPE == 64 if (unlikely(fetch_fault && (pte & PT64_NX_MASK))) eperm = true; #endif if (!eperm && !rsvd_fault && unlikely(!(pte & PT_ACCESSED_MASK))) { int ret; trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte)); ret = FNAME(cmpxchg_gpte)(vcpu, mmu, table_gfn, index, pte, pte|PT_ACCESSED_MASK); if (ret < 0) { present = false; break; } else if (ret) goto walk; mark_page_dirty(vcpu->kvm, table_gfn); pte |= PT_ACCESSED_MASK; } pte_access = pt_access & FNAME(gpte_access)(vcpu, pte); walker->ptes[walker->level - 1] = pte; if ((walker->level == PT_PAGE_TABLE_LEVEL) || ((walker->level == PT_DIRECTORY_LEVEL) && is_large_pte(pte) && (PTTYPE == 64 || is_pse(vcpu))) || ((walker->level == PT_PDPE_LEVEL) && is_large_pte(pte) && mmu->root_level == PT64_ROOT_LEVEL)) { int lvl = walker->level; gpa_t real_gpa; gfn_t gfn; u32 ac; gfn = gpte_to_gfn_lvl(pte, lvl); gfn += (addr & PT_LVL_OFFSET_MASK(lvl)) >> PAGE_SHIFT; if (PTTYPE == 32 && walker->level == PT_DIRECTORY_LEVEL && is_cpuid_PSE36()) gfn += pse36_gfn_delta(pte); ac = write_fault | fetch_fault | user_fault; real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), ac); if (real_gpa == UNMAPPED_GVA) return 0; walker->gfn = real_gpa >> PAGE_SHIFT; break; } pt_access = pte_access; --walker->level; } if (unlikely(!present || eperm || rsvd_fault)) goto error; if (write_fault && unlikely(!is_dirty_gpte(pte))) { int ret; trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); ret = FNAME(cmpxchg_gpte)(vcpu, mmu, table_gfn, index, pte, pte|PT_DIRTY_MASK); if (ret < 0) { present = false; goto error; } else if (ret) goto walk; mark_page_dirty(vcpu->kvm, table_gfn); pte |= PT_DIRTY_MASK; walker->ptes[walker->level - 1] = pte; } walker->pt_access = pt_access; walker->pte_access = pte_access; pgprintk("%s: pte %llx pte_access %x pt_access %x\n", __func__, (u64)pte, pte_access, pt_access); return 1; error: walker->fault.vector = PF_VECTOR; walker->fault.error_code_valid = true; walker->fault.error_code = 0; if (present) walker->fault.error_code |= PFERR_PRESENT_MASK; walker->fault.error_code |= write_fault | user_fault; if (fetch_fault && mmu->nx) walker->fault.error_code |= PFERR_FETCH_MASK; if (rsvd_fault) walker->fault.error_code |= PFERR_RSVD_MASK; walker->fault.address = addr; walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu; trace_kvm_mmu_walker_error(walker->fault.error_code); return 0; }
1
CVE-2013-1943
1,709
vulnerable
CWE-787
void trustedGenerateSEK(int *errStatus, char *errString, uint8_t *encrypted_sek, uint32_t *enc_len, char *sek_hex) { CALL_ONCE LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_sek); CHECK_STATE(sek_hex); RANDOM_CHAR_BUF(SEK_raw, SGX_AESGCM_KEY_SIZE); carray2Hex((uint8_t*) SEK_raw, SGX_AESGCM_KEY_SIZE, sek_hex); memcpy(AES_key, SEK_raw, SGX_AESGCM_KEY_SIZE); derive_DH_Key(); sealHexSEK(errStatus, errString, encrypted_sek, enc_len, sek_hex); if (*errStatus != 0) { LOG_ERROR("sealHexSEK failed"); goto clean; } SET_SUCCESS clean: LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); }
0
CVE-2021-36218
328
benign
CWE-787
void trustedGenerateSEK(int *errStatus, char *errString, uint8_t *encrypted_sek, uint64_t *enc_len, char *sek_hex) { CALL_ONCE LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_sek); CHECK_STATE(sek_hex); RANDOM_CHAR_BUF(SEK_raw, SGX_AESGCM_KEY_SIZE); carray2Hex((uint8_t*) SEK_raw, SGX_AESGCM_KEY_SIZE, sek_hex); memcpy(AES_key, SEK_raw, SGX_AESGCM_KEY_SIZE); derive_DH_Key(); sealHexSEK(errStatus, errString, encrypted_sek, enc_len, sek_hex); if (*errStatus != 0) { LOG_ERROR("sealHexSEK failed"); goto clean; } SET_SUCCESS clean: LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); }
1
CVE-2021-36218
328
vulnerable
CWE-416
ex_function(exarg_T *eap) { (void)define_function(eap, NULL); }
0
CVE-2021-4173
1,791
benign
CWE-416
ex_function(exarg_T *eap) { char_u *line_to_free = NULL; (void)define_function(eap, NULL, &line_to_free); vim_free(line_to_free); }
1
CVE-2021-4173
1,791
vulnerable
CWE-125
void ntlm_populate_message_header(NTLM_MESSAGE_HEADER* header, UINT32 MessageType) { CopyMemory(header->Signature, NTLM_SIGNATURE, sizeof(NTLM_SIGNATURE)); header->MessageType = MessageType; }
0
CVE-2018-8789
1,494
benign
CWE-125
static void ntlm_populate_message_header(NTLM_MESSAGE_HEADER* header, UINT32 MessageType) { CopyMemory(header->Signature, NTLM_SIGNATURE, sizeof(NTLM_SIGNATURE)); header->MessageType = MessageType; }
1
CVE-2018-8789
1,494
vulnerable
CWE-125
TfLiteStatus LeakyReluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const auto* params = reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data); const LeakyReluOpData* data = reinterpret_cast<LeakyReluOpData*>(node->user_data); LeakyReluParams op_params; switch (input->type) { case kTfLiteFloat32: { op_params.alpha = params->alpha; optimized_ops::LeakyRelu( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteUInt8: { QuantizeLeakyRelu<uint8_t>(input, output, data); return kTfLiteOk; } break; case kTfLiteInt8: { QuantizeLeakyRelu<int8_t>(input, output, data); return kTfLiteOk; } break; case kTfLiteInt16: { QuantizeLeakyRelu<int16_t>(input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, int8, int16 and uint8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
0
CVE-2020-15211
2,423
benign
CWE-125
TfLiteStatus LeakyReluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const auto* params = reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data); const LeakyReluOpData* data = reinterpret_cast<LeakyReluOpData*>(node->user_data); LeakyReluParams op_params; switch (input->type) { case kTfLiteFloat32: { op_params.alpha = params->alpha; optimized_ops::LeakyRelu( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteUInt8: { QuantizeLeakyRelu<uint8_t>(input, output, data); return kTfLiteOk; } break; case kTfLiteInt8: { QuantizeLeakyRelu<int8_t>(input, output, data); return kTfLiteOk; } break; case kTfLiteInt16: { QuantizeLeakyRelu<int16_t>(input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, int8, int16 and uint8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
1
CVE-2020-15211
2,423
vulnerable
CWE-787
grub_ext2_read_block (grub_fshelp_node_t node, grub_disk_addr_t fileblock) { struct grub_ext2_data *data = node->data; struct grub_ext2_inode *inode = &node->inode; int blknr = -1; unsigned int blksz = EXT2_BLOCK_SIZE (data); int log2_blksz = LOG2_EXT2_BLOCK_SIZE (data); if (grub_le_to_cpu32(inode->flags) & EXT4_EXTENTS_FLAG) { #ifndef _MSC_VER char buf[EXT2_BLOCK_SIZE (data)]; #else char * buf = grub_malloc (EXT2_BLOCK_SIZE(data)); #endif struct grub_ext4_extent_header *leaf; struct grub_ext4_extent *ext; int i; leaf = grub_ext4_find_leaf (data, buf, (struct grub_ext4_extent_header *) inode->blocks.dir_blocks, fileblock); if (! leaf) { grub_error (GRUB_ERR_BAD_FS, "invalid extent"); return -1; } ext = (struct grub_ext4_extent *) (leaf + 1); for (i = 0; i < grub_le_to_cpu16 (leaf->entries); i++) { if (fileblock < grub_le_to_cpu32 (ext[i].block)) break; } if (--i >= 0) { fileblock -= grub_le_to_cpu32 (ext[i].block); if (fileblock >= grub_le_to_cpu16 (ext[i].len)) return 0; else { grub_disk_addr_t start; start = grub_le_to_cpu16 (ext[i].start_hi); start = (start << 32) + grub_le_to_cpu32 (ext[i].start); return fileblock + start; } } else { grub_error (GRUB_ERR_BAD_FS, "something wrong with extent"); return -1; } } /* Direct blocks. */ if (fileblock < INDIRECT_BLOCKS) blknr = grub_le_to_cpu32 (inode->blocks.dir_blocks[fileblock]); /* Indirect. */ else if (fileblock < INDIRECT_BLOCKS + blksz / 4) { grub_uint32_t *indir; indir = grub_malloc (blksz); if (! indir) return grub_errno; if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (inode->blocks.indir_block)) << log2_blksz, 0, blksz, indir)) return grub_errno; blknr = grub_le_to_cpu32 (indir[fileblock - INDIRECT_BLOCKS]); grub_free (indir); } /* Double indirect. */ else if (fileblock < (grub_disk_addr_t)(INDIRECT_BLOCKS + blksz / 4) \ * (grub_disk_addr_t)(blksz / 4 + 1)) { unsigned int perblock = blksz / 4; unsigned int rblock = fileblock - (INDIRECT_BLOCKS + blksz / 4); grub_uint32_t *indir; indir = grub_malloc (blksz); if (! indir) return grub_errno; if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (inode->blocks.double_indir_block)) << log2_blksz, 0, blksz, indir)) return grub_errno; if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (indir[rblock / perblock])) << log2_blksz, 0, blksz, indir)) return grub_errno; blknr = grub_le_to_cpu32 (indir[rblock % perblock]); grub_free (indir); } /* triple indirect. */ else { grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, "ext2fs doesn't support triple indirect blocks"); } return blknr; }
0
CVE-2017-9949
2,159
benign
CWE-787
grub_ext2_read_block (grub_fshelp_node_t node, grub_disk_addr_t fileblock) { struct grub_ext2_data *data = node->data; struct grub_ext2_inode *inode = &node->inode; int blknr = -1; unsigned int blksz = EXT2_BLOCK_SIZE (data); int log2_blksz = LOG2_EXT2_BLOCK_SIZE (data); if (grub_le_to_cpu32(inode->flags) & EXT4_EXTENTS_FLAG) { #ifndef _MSC_VER char buf[EXT2_BLOCK_SIZE (data)]; #else char * buf = grub_malloc (EXT2_BLOCK_SIZE(data)); #endif struct grub_ext4_extent_header *leaf; struct grub_ext4_extent *ext; int i; leaf = grub_ext4_find_leaf (data, buf, (struct grub_ext4_extent_header *) inode->blocks.dir_blocks, fileblock); if (! leaf) { grub_error (GRUB_ERR_BAD_FS, "invalid extent"); return -1; } ext = (struct grub_ext4_extent *) (leaf + 1); for (i = 0; i < grub_le_to_cpu16 (leaf->entries); i++) { if (fileblock < grub_le_to_cpu32 (ext[i].block)) break; } if (--i >= 0) { fileblock -= grub_le_to_cpu32 (ext[i].block); if (fileblock >= grub_le_to_cpu16 (ext[i].len)) return 0; else { grub_disk_addr_t start; start = grub_le_to_cpu16 (ext[i].start_hi); start = (start << 32) + grub_le_to_cpu32 (ext[i].start); return fileblock + start; } } else { grub_error (GRUB_ERR_BAD_FS, "something wrong with extent"); return -1; } } /* Direct blocks. */ if (fileblock < INDIRECT_BLOCKS) { blknr = grub_le_to_cpu32 (inode->blocks.dir_blocks[fileblock]); /* Indirect. */ } else if (fileblock < INDIRECT_BLOCKS + blksz / 4) { grub_uint32_t *indir; indir = grub_malloc (blksz); if (! indir) return grub_errno; if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (inode->blocks.indir_block)) << log2_blksz, 0, blksz, indir)) return grub_errno; blknr = grub_le_to_cpu32 (indir[fileblock - INDIRECT_BLOCKS]); grub_free (indir); } /* Double indirect. */ else if (fileblock < (grub_disk_addr_t)(INDIRECT_BLOCKS + blksz / 4) \ * (grub_disk_addr_t)(blksz / 4 + 1)) { unsigned int perblock = blksz / 4; unsigned int rblock = fileblock - (INDIRECT_BLOCKS + blksz / 4); grub_uint32_t *indir; indir = grub_malloc (blksz); if (! indir) return grub_errno; if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (inode->blocks.double_indir_block)) << log2_blksz, 0, blksz, indir)) return grub_errno; if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (indir[rblock / perblock])) << log2_blksz, 0, blksz, indir)) return grub_errno; blknr = grub_le_to_cpu32 (indir[rblock % perblock]); grub_free (indir); } /* triple indirect. */ else { grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, "ext2fs doesn't support triple indirect blocks"); } return blknr; }
1
CVE-2017-9949
2,159
vulnerable
CWE-119
static void fwnet_receive_broadcast(struct fw_iso_context *context, u32 cycle, size_t header_length, void *header, void *data) { struct fwnet_device *dev; struct fw_iso_packet packet; __be16 *hdr_ptr; __be32 *buf_ptr; int retval; u32 length; u16 source_node_id; u32 specifier_id; u32 ver; unsigned long offset; unsigned long flags; dev = data; hdr_ptr = header; length = be16_to_cpup(hdr_ptr); spin_lock_irqsave(&dev->lock, flags); offset = dev->rcv_buffer_size * dev->broadcast_rcv_next_ptr; buf_ptr = dev->broadcast_rcv_buffer_ptrs[dev->broadcast_rcv_next_ptr++]; if (dev->broadcast_rcv_next_ptr == dev->num_broadcast_rcv_ptrs) dev->broadcast_rcv_next_ptr = 0; spin_unlock_irqrestore(&dev->lock, flags); specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8 | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24; ver = be32_to_cpu(buf_ptr[1]) & 0xffffff; source_node_id = be32_to_cpu(buf_ptr[0]) >> 16; if (specifier_id == IANA_SPECIFIER_ID && (ver == RFC2734_SW_VERSION #if IS_ENABLED(CONFIG_IPV6) || ver == RFC3146_SW_VERSION #endif )) { buf_ptr += 2; length -= IEEE1394_GASP_HDR_SIZE; fwnet_incoming_packet(dev, buf_ptr, length, source_node_id, context->card->generation, true); } packet.payload_length = dev->rcv_buffer_size; packet.interrupt = 1; packet.skip = 0; packet.tag = 3; packet.sy = 0; packet.header_length = IEEE1394_GASP_HDR_SIZE; spin_lock_irqsave(&dev->lock, flags); retval = fw_iso_context_queue(dev->broadcast_rcv_context, &packet, &dev->broadcast_rcv_buffer, offset); spin_unlock_irqrestore(&dev->lock, flags); if (retval >= 0) fw_iso_context_queue_flush(dev->broadcast_rcv_context); else dev_err(&dev->netdev->dev, "requeue failed\n"); }
0
CVE-2016-8633
1,787
benign
CWE-119
static void fwnet_receive_broadcast(struct fw_iso_context *context, u32 cycle, size_t header_length, void *header, void *data) { struct fwnet_device *dev; struct fw_iso_packet packet; __be16 *hdr_ptr; __be32 *buf_ptr; int retval; u32 length; unsigned long offset; unsigned long flags; dev = data; hdr_ptr = header; length = be16_to_cpup(hdr_ptr); spin_lock_irqsave(&dev->lock, flags); offset = dev->rcv_buffer_size * dev->broadcast_rcv_next_ptr; buf_ptr = dev->broadcast_rcv_buffer_ptrs[dev->broadcast_rcv_next_ptr++]; if (dev->broadcast_rcv_next_ptr == dev->num_broadcast_rcv_ptrs) dev->broadcast_rcv_next_ptr = 0; spin_unlock_irqrestore(&dev->lock, flags); if (length > IEEE1394_GASP_HDR_SIZE && gasp_specifier_id(buf_ptr) == IANA_SPECIFIER_ID && (gasp_version(buf_ptr) == RFC2734_SW_VERSION #if IS_ENABLED(CONFIG_IPV6) || gasp_version(buf_ptr) == RFC3146_SW_VERSION #endif )) fwnet_incoming_packet(dev, buf_ptr + 2, length - IEEE1394_GASP_HDR_SIZE, gasp_source_id(buf_ptr), context->card->generation, true); packet.payload_length = dev->rcv_buffer_size; packet.interrupt = 1; packet.skip = 0; packet.tag = 3; packet.sy = 0; packet.header_length = IEEE1394_GASP_HDR_SIZE; spin_lock_irqsave(&dev->lock, flags); retval = fw_iso_context_queue(dev->broadcast_rcv_context, &packet, &dev->broadcast_rcv_buffer, offset); spin_unlock_irqrestore(&dev->lock, flags); if (retval >= 0) fw_iso_context_queue_flush(dev->broadcast_rcv_context); else dev_err(&dev->netdev->dev, "requeue failed\n"); }
1
CVE-2016-8633
1,787
vulnerable
CWE-476
jas_image_t *bmp_decode(jas_stream_t *in, char *optstr) { jas_image_t *image; bmp_hdr_t hdr; bmp_info_t *info; uint_fast16_t cmptno; jas_image_cmptparm_t cmptparms[3]; jas_image_cmptparm_t *cmptparm; uint_fast16_t numcmpts; long n; if (optstr) { jas_eprintf("warning: ignoring BMP decoder options\n"); } jas_eprintf( "THE BMP FORMAT IS NOT FULLY SUPPORTED!\n" "THAT IS, THE JASPER SOFTWARE CANNOT DECODE ALL TYPES OF BMP DATA.\n" "IF YOU HAVE ANY PROBLEMS, PLEASE TRY CONVERTING YOUR IMAGE DATA\n" "TO THE PNM FORMAT, AND USING THIS FORMAT INSTEAD.\n" ); /* Read the bitmap header. */ if (bmp_gethdr(in, &hdr)) { jas_eprintf("cannot get header\n"); return 0; } JAS_DBGLOG(1, ( "BMP header: magic 0x%x; siz %d; res1 %d; res2 %d; off %d\n", hdr.magic, hdr.siz, hdr.reserved1, hdr.reserved2, hdr.off )); /* Read the bitmap information. */ if (!(info = bmp_getinfo(in))) { jas_eprintf("cannot get info\n"); return 0; } JAS_DBGLOG(1, ("BMP information: len %d; width %d; height %d; numplanes %d; " "depth %d; enctype %d; siz %d; hres %d; vres %d; numcolors %d; " "mincolors %d\n", info->len, info->width, info->height, info->numplanes, info->depth, info->enctype, info->siz, info->hres, info->vres, info->numcolors, info->mincolors)); /* Ensure that we support this type of BMP file. */ if (!bmp_issupported(&hdr, info)) { jas_eprintf("error: unsupported BMP encoding\n"); bmp_info_destroy(info); return 0; } /* Skip over any useless data between the end of the palette and start of the bitmap data. */ if ((n = hdr.off - (BMP_HDRLEN + BMP_INFOLEN + BMP_PALLEN(info))) < 0) { jas_eprintf("error: possibly bad bitmap offset?\n"); return 0; } if (n > 0) { jas_eprintf("skipping unknown data in BMP file\n"); if (bmp_gobble(in, n)) { bmp_info_destroy(info); return 0; } } /* Get the number of components. */ numcmpts = bmp_numcmpts(info); for (cmptno = 0, cmptparm = cmptparms; cmptno < numcmpts; ++cmptno, ++cmptparm) { cmptparm->tlx = 0; cmptparm->tly = 0; cmptparm->hstep = 1; cmptparm->vstep = 1; cmptparm->width = info->width; cmptparm->height = info->height; cmptparm->prec = 8; cmptparm->sgnd = false; } /* Create image object. */ if (!(image = jas_image_create(numcmpts, cmptparms, JAS_CLRSPC_UNKNOWN))) { bmp_info_destroy(info); return 0; } if (numcmpts == 3) { jas_image_setclrspc(image, JAS_CLRSPC_SRGB); jas_image_setcmpttype(image, 0, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_R)); jas_image_setcmpttype(image, 1, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_G)); jas_image_setcmpttype(image, 2, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_B)); } else { jas_image_setclrspc(image, JAS_CLRSPC_SGRAY); jas_image_setcmpttype(image, 0, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_GRAY_Y)); } /* Read the bitmap data. */ if (bmp_getdata(in, info, image)) { bmp_info_destroy(info); jas_image_destroy(image); return 0; } bmp_info_destroy(info); return image; }
0
CVE-2016-8884
191
benign
CWE-476
jas_image_t *bmp_decode(jas_stream_t *in, char *optstr) { jas_image_t *image; bmp_hdr_t hdr; bmp_info_t *info; uint_fast16_t cmptno; jas_image_cmptparm_t cmptparms[3]; jas_image_cmptparm_t *cmptparm; uint_fast16_t numcmpts; long n; image = 0; info = 0; if (optstr) { jas_eprintf("warning: ignoring BMP decoder options\n"); } jas_eprintf( "THE BMP FORMAT IS NOT FULLY SUPPORTED!\n" "THAT IS, THE JASPER SOFTWARE CANNOT DECODE ALL TYPES OF BMP DATA.\n" "IF YOU HAVE ANY PROBLEMS, PLEASE TRY CONVERTING YOUR IMAGE DATA\n" "TO THE PNM FORMAT, AND USING THIS FORMAT INSTEAD.\n" ); /* Read the bitmap header. */ if (bmp_gethdr(in, &hdr)) { jas_eprintf("cannot get header\n"); goto error; //return 0; } JAS_DBGLOG(1, ( "BMP header: magic 0x%x; siz %d; res1 %d; res2 %d; off %d\n", hdr.magic, hdr.siz, hdr.reserved1, hdr.reserved2, hdr.off )); /* Read the bitmap information. */ if (!(info = bmp_getinfo(in))) { jas_eprintf("cannot get info\n"); //return 0; goto error; } JAS_DBGLOG(1, ("BMP information: len %ld; width %ld; height %ld; numplanes %d; " "depth %d; enctype %ld; siz %ld; hres %ld; vres %ld; numcolors %ld; " "mincolors %ld\n", JAS_CAST(long, info->len), JAS_CAST(long, info->width), JAS_CAST(long, info->height), JAS_CAST(long, info->numplanes), JAS_CAST(long, info->depth), JAS_CAST(long, info->enctype), JAS_CAST(long, info->siz), JAS_CAST(long, info->hres), JAS_CAST(long, info->vres), JAS_CAST(long, info->numcolors), JAS_CAST(long, info->mincolors))); if (info->width < 0 || info->height < 0 || info->numplanes < 0 || info->depth < 0 || info->siz < 0 || info->hres < 0 || info->vres < 0) { jas_eprintf("corrupt bit stream\n"); goto error; } /* Ensure that we support this type of BMP file. */ if (!bmp_issupported(&hdr, info)) { jas_eprintf("error: unsupported BMP encoding\n"); //bmp_info_destroy(info); //return 0; goto error; } /* Skip over any useless data between the end of the palette and start of the bitmap data. */ if ((n = hdr.off - (BMP_HDRLEN + BMP_INFOLEN + BMP_PALLEN(info))) < 0) { jas_eprintf("error: possibly bad bitmap offset?\n"); goto error; //return 0; } if (n > 0) { jas_eprintf("skipping unknown data in BMP file\n"); if (bmp_gobble(in, n)) { //bmp_info_destroy(info); //return 0; goto error; } } /* Get the number of components. */ numcmpts = bmp_numcmpts(info); for (cmptno = 0, cmptparm = cmptparms; cmptno < numcmpts; ++cmptno, ++cmptparm) { cmptparm->tlx = 0; cmptparm->tly = 0; cmptparm->hstep = 1; cmptparm->vstep = 1; cmptparm->width = info->width; cmptparm->height = info->height; cmptparm->prec = 8; cmptparm->sgnd = false; } /* Create image object. */ if (!(image = jas_image_create(numcmpts, cmptparms, JAS_CLRSPC_UNKNOWN))) { //bmp_info_destroy(info); //return 0; goto error; } if (numcmpts == 3) { jas_image_setclrspc(image, JAS_CLRSPC_SRGB); jas_image_setcmpttype(image, 0, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_R)); jas_image_setcmpttype(image, 1, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_G)); jas_image_setcmpttype(image, 2, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_B)); } else { jas_image_setclrspc(image, JAS_CLRSPC_SGRAY); jas_image_setcmpttype(image, 0, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_GRAY_Y)); } /* Read the bitmap data. */ if (bmp_getdata(in, info, image)) { //bmp_info_destroy(info); //jas_image_destroy(image); //return 0; goto error; } bmp_info_destroy(info); return image; error: if (info) { bmp_info_destroy(info); } if (image) { jas_image_destroy(image); } return 0; }
1
CVE-2016-8884
191
vulnerable
CWE-125
PixarLogClose(TIFF* tif) { TIFFDirectory *td = &tif->tif_dir; /* In a really sneaky (and really incorrect, and untruthful, and * troublesome, and error-prone) maneuver that completely goes against * the spirit of TIFF, and breaks TIFF, on close, we covertly * modify both bitspersample and sampleformat in the directory to * indicate 8-bit linear. This way, the decode "just works" even for * readers that don't know about PixarLog, or how to set * the PIXARLOGDATFMT pseudo-tag. */ td->td_bitspersample = 8; td->td_sampleformat = SAMPLEFORMAT_UINT; }
0
CVE-2016-10269
2,168
benign
CWE-125
PixarLogClose(TIFF* tif) { PixarLogState* sp = (PixarLogState*) tif->tif_data; TIFFDirectory *td = &tif->tif_dir; assert(sp != 0); /* In a really sneaky (and really incorrect, and untruthful, and * troublesome, and error-prone) maneuver that completely goes against * the spirit of TIFF, and breaks TIFF, on close, we covertly * modify both bitspersample and sampleformat in the directory to * indicate 8-bit linear. This way, the decode "just works" even for * readers that don't know about PixarLog, or how to set * the PIXARLOGDATFMT pseudo-tag. */ if (sp->state&PLSTATE_INIT) { /* We test the state to avoid an issue such as in * http://bugzilla.maptools.org/show_bug.cgi?id=2604 * What appends in that case is that the bitspersample is 1 and * a TransferFunction is set. The size of the TransferFunction * depends on 1<<bitspersample. So if we increase it, an access * out of the buffer will happen at directory flushing. * Another option would be to clear those targs. */ td->td_bitspersample = 8; td->td_sampleformat = SAMPLEFORMAT_UINT; } }
1
CVE-2016-10269
2,168
vulnerable
CWE-416
lambda_function_body( char_u **arg, typval_T *rettv, evalarg_T *evalarg, garray_T *newargs, garray_T *argtypes, int varargs, garray_T *default_args, char_u *ret_type) { int evaluate = (evalarg->eval_flags & EVAL_EVALUATE); garray_T *gap = &evalarg->eval_ga; garray_T *freegap = &evalarg->eval_freega; ufunc_T *ufunc = NULL; exarg_T eap; garray_T newlines; char_u *cmdline = NULL; int ret = FAIL; char_u *line_to_free = NULL; partial_T *pt; char_u *name; int lnum_save = -1; linenr_T sourcing_lnum_top = SOURCING_LNUM; if (!ends_excmd2(*arg, skipwhite(*arg + 1))) { semsg(_(e_trailing_arg), *arg + 1); return FAIL; } CLEAR_FIELD(eap); eap.cmdidx = CMD_block; eap.forceit = FALSE; eap.cmdlinep = &cmdline; eap.skip = !evaluate; if (evalarg->eval_cctx != NULL) fill_exarg_from_cctx(&eap, evalarg->eval_cctx); else { eap.getline = evalarg->eval_getline; eap.cookie = evalarg->eval_cookie; } ga_init2(&newlines, (int)sizeof(char_u *), 10); if (get_function_body(&eap, &newlines, NULL, &line_to_free) == FAIL) { vim_free(cmdline); goto erret; } // When inside a lambda must add the function lines to evalarg.eval_ga. evalarg->eval_break_count += newlines.ga_len; if (gap->ga_itemsize > 0) { int idx; char_u *last; size_t plen; char_u *pnl; for (idx = 0; idx < newlines.ga_len; ++idx) { char_u *p = skipwhite(((char_u **)newlines.ga_data)[idx]); if (ga_grow(gap, 1) == FAIL || ga_grow(freegap, 1) == FAIL) goto erret; // Going to concatenate the lines after parsing. For an empty or // comment line use an empty string. // Insert NL characters at the start of each line, the string will // be split again later in .get_lambda_tv(). if (*p == NUL || vim9_comment_start(p)) p = (char_u *)""; plen = STRLEN(p); pnl = vim_strnsave((char_u *)"\n", plen + 1); if (pnl != NULL) mch_memmove(pnl + 1, p, plen + 1); ((char_u **)gap->ga_data)[gap->ga_len++] = pnl; ((char_u **)freegap->ga_data)[freegap->ga_len++] = pnl; } if (ga_grow(gap, 1) == FAIL || ga_grow(freegap, 1) == FAIL) goto erret; if (cmdline != NULL) // more is following after the "}", which was skipped last = cmdline; else // nothing is following the "}" last = (char_u *)"}"; plen = STRLEN(last); pnl = vim_strnsave((char_u *)"\n", plen + 1); if (pnl != NULL) mch_memmove(pnl + 1, last, plen + 1); ((char_u **)gap->ga_data)[gap->ga_len++] = pnl; ((char_u **)freegap->ga_data)[freegap->ga_len++] = pnl; } if (cmdline != NULL) { garray_T *tfgap = &evalarg->eval_tofree_ga; // Something comes after the "}". *arg = eap.nextcmd; // "arg" points into cmdline, need to keep the line and free it later. if (ga_grow(tfgap, 1) == OK) { ((char_u **)(tfgap->ga_data))[tfgap->ga_len++] = cmdline; evalarg->eval_using_cmdline = TRUE; } } else *arg = (char_u *)""; if (!evaluate) { ret = OK; goto erret; } name = get_lambda_name(); ufunc = alloc_clear(offsetof(ufunc_T, uf_name) + STRLEN(name) + 1); if (ufunc == NULL) goto erret; set_ufunc_name(ufunc, name); if (hash_add(&func_hashtab, UF2HIKEY(ufunc)) == FAIL) goto erret; ufunc->uf_flags = FC_LAMBDA; ufunc->uf_refcount = 1; ufunc->uf_args = *newargs; newargs->ga_data = NULL; ufunc->uf_def_args = *default_args; default_args->ga_data = NULL; ufunc->uf_func_type = &t_func_any; // error messages are for the first function line lnum_save = SOURCING_LNUM; SOURCING_LNUM = sourcing_lnum_top; // parse argument types if (parse_argument_types(ufunc, argtypes, varargs) == FAIL) { SOURCING_LNUM = lnum_save; goto erret; } // parse the return type, if any if (parse_return_type(ufunc, ret_type) == FAIL) goto erret; pt = ALLOC_CLEAR_ONE(partial_T); if (pt == NULL) goto erret; pt->pt_func = ufunc; pt->pt_refcount = 1; ufunc->uf_lines = newlines; newlines.ga_data = NULL; if (sandbox) ufunc->uf_flags |= FC_SANDBOX; if (!ASCII_ISUPPER(*ufunc->uf_name)) ufunc->uf_flags |= FC_VIM9; ufunc->uf_script_ctx = current_sctx; ufunc->uf_script_ctx_version = current_sctx.sc_version; ufunc->uf_script_ctx.sc_lnum += sourcing_lnum_top; set_function_type(ufunc); function_using_block_scopes(ufunc, evalarg->eval_cstack); rettv->vval.v_partial = pt; rettv->v_type = VAR_PARTIAL; ufunc = NULL; ret = OK; erret: if (lnum_save >= 0) SOURCING_LNUM = lnum_save; vim_free(line_to_free); ga_clear_strings(&newlines); if (newargs != NULL) ga_clear_strings(newargs); ga_clear_strings(default_args); if (ufunc != NULL) { func_clear(ufunc, TRUE); func_free(ufunc, TRUE); } return ret; }
0
CVE-2021-4173
1,793
benign
CWE-416
lambda_function_body( char_u **arg, typval_T *rettv, evalarg_T *evalarg, garray_T *newargs, garray_T *argtypes, int varargs, garray_T *default_args, char_u *ret_type) { int evaluate = (evalarg->eval_flags & EVAL_EVALUATE); garray_T *gap = &evalarg->eval_ga; garray_T *freegap = &evalarg->eval_freega; ufunc_T *ufunc = NULL; exarg_T eap; garray_T newlines; char_u *cmdline = NULL; int ret = FAIL; char_u *line_to_free = NULL; partial_T *pt; char_u *name; int lnum_save = -1; linenr_T sourcing_lnum_top = SOURCING_LNUM; if (!ends_excmd2(*arg, skipwhite(*arg + 1))) { semsg(_(e_trailing_arg), *arg + 1); return FAIL; } CLEAR_FIELD(eap); eap.cmdidx = CMD_block; eap.forceit = FALSE; eap.cmdlinep = &cmdline; eap.skip = !evaluate; if (evalarg->eval_cctx != NULL) fill_exarg_from_cctx(&eap, evalarg->eval_cctx); else { eap.getline = evalarg->eval_getline; eap.cookie = evalarg->eval_cookie; } ga_init2(&newlines, (int)sizeof(char_u *), 10); if (get_function_body(&eap, &newlines, NULL, &line_to_free) == FAIL) { vim_free(cmdline); goto erret; } // When inside a lambda must add the function lines to evalarg.eval_ga. evalarg->eval_break_count += newlines.ga_len; if (gap->ga_itemsize > 0) { int idx; char_u *last; size_t plen; char_u *pnl; for (idx = 0; idx < newlines.ga_len; ++idx) { char_u *p = skipwhite(((char_u **)newlines.ga_data)[idx]); if (ga_grow(gap, 1) == FAIL || ga_grow(freegap, 1) == FAIL) goto erret; // Going to concatenate the lines after parsing. For an empty or // comment line use an empty string. // Insert NL characters at the start of each line, the string will // be split again later in .get_lambda_tv(). if (*p == NUL || vim9_comment_start(p)) p = (char_u *)""; plen = STRLEN(p); pnl = vim_strnsave((char_u *)"\n", plen + 1); if (pnl != NULL) mch_memmove(pnl + 1, p, plen + 1); ((char_u **)gap->ga_data)[gap->ga_len++] = pnl; ((char_u **)freegap->ga_data)[freegap->ga_len++] = pnl; } if (ga_grow(gap, 1) == FAIL || ga_grow(freegap, 1) == FAIL) goto erret; if (eap.nextcmd != NULL) // more is following after the "}", which was skipped last = cmdline; else // nothing is following the "}" last = (char_u *)"}"; plen = STRLEN(last); pnl = vim_strnsave((char_u *)"\n", plen + 1); if (pnl != NULL) mch_memmove(pnl + 1, last, plen + 1); ((char_u **)gap->ga_data)[gap->ga_len++] = pnl; ((char_u **)freegap->ga_data)[freegap->ga_len++] = pnl; } if (eap.nextcmd != NULL) { garray_T *tfgap = &evalarg->eval_tofree_ga; // Something comes after the "}". *arg = eap.nextcmd; // "arg" points into cmdline, need to keep the line and free it later. if (ga_grow(tfgap, 1) == OK) { ((char_u **)(tfgap->ga_data))[tfgap->ga_len++] = cmdline; evalarg->eval_using_cmdline = TRUE; if (cmdline == line_to_free) line_to_free = NULL; } } else *arg = (char_u *)""; if (!evaluate) { ret = OK; goto erret; } name = get_lambda_name(); ufunc = alloc_clear(offsetof(ufunc_T, uf_name) + STRLEN(name) + 1); if (ufunc == NULL) goto erret; set_ufunc_name(ufunc, name); if (hash_add(&func_hashtab, UF2HIKEY(ufunc)) == FAIL) goto erret; ufunc->uf_flags = FC_LAMBDA; ufunc->uf_refcount = 1; ufunc->uf_args = *newargs; newargs->ga_data = NULL; ufunc->uf_def_args = *default_args; default_args->ga_data = NULL; ufunc->uf_func_type = &t_func_any; // error messages are for the first function line lnum_save = SOURCING_LNUM; SOURCING_LNUM = sourcing_lnum_top; // parse argument types if (parse_argument_types(ufunc, argtypes, varargs) == FAIL) { SOURCING_LNUM = lnum_save; goto erret; } // parse the return type, if any if (parse_return_type(ufunc, ret_type) == FAIL) goto erret; pt = ALLOC_CLEAR_ONE(partial_T); if (pt == NULL) goto erret; pt->pt_func = ufunc; pt->pt_refcount = 1; ufunc->uf_lines = newlines; newlines.ga_data = NULL; if (sandbox) ufunc->uf_flags |= FC_SANDBOX; if (!ASCII_ISUPPER(*ufunc->uf_name)) ufunc->uf_flags |= FC_VIM9; ufunc->uf_script_ctx = current_sctx; ufunc->uf_script_ctx_version = current_sctx.sc_version; ufunc->uf_script_ctx.sc_lnum += sourcing_lnum_top; set_function_type(ufunc); function_using_block_scopes(ufunc, evalarg->eval_cstack); rettv->vval.v_partial = pt; rettv->v_type = VAR_PARTIAL; ufunc = NULL; ret = OK; erret: if (lnum_save >= 0) SOURCING_LNUM = lnum_save; vim_free(line_to_free); ga_clear_strings(&newlines); if (newargs != NULL) ga_clear_strings(newargs); ga_clear_strings(default_args); if (ufunc != NULL) { func_clear(ufunc, TRUE); func_free(ufunc, TRUE); } return ret; }
1
CVE-2021-4173
1,793
vulnerable
CWE-787
void pdf_get_version(FILE *fp, pdf_t *pdf) { char *header, *c; header = get_header(fp); /* Locate version string start and make sure we dont go past header */ if ((c = strstr(header, "%PDF-")) && (c + strlen("%PDF-M.m") + 2)) { pdf->pdf_major_version = atoi(c + strlen("%PDF-")); pdf->pdf_minor_version = atoi(c + strlen("%PDF-M.")); } free(header); }
0
CVE-2020-20740
1,427
benign
CWE-787
void pdf_get_version(FILE *fp, pdf_t *pdf) { char *header = get_header(fp); /* Locate version string start and make sure we dont go past header * The format is %PDF-M.m, where 'M' is the major number and 'm' minor. */ const char *c; if ((c = strstr(header, "%PDF-")) && ((c + 6)[0] == '.') && // Separator isdigit((c + 5)[0]) && // Major number isdigit((c + 7)[0])) // Minor number { pdf->pdf_major_version = atoi(c + strlen("%PDF-")); pdf->pdf_minor_version = atoi(c + strlen("%PDF-M.")); } free(header); }
1
CVE-2020-20740
1,427
vulnerable
CWE-119
DECLAREwriteFunc(writeBufferToContigTiles) { uint32 imagew = TIFFScanlineSize(out); uint32 tilew = TIFFTileRowSize(out); int iskew = imagew - tilew; tsize_t tilesize = TIFFTileSize(out); tdata_t obuf; uint8* bufp = (uint8*) buf; uint32 tl, tw; uint32 row; (void) spp; obuf = _TIFFmalloc(TIFFTileSize(out)); if (obuf == NULL) return 0; _TIFFmemset(obuf, 0, tilesize); (void) TIFFGetField(out, TIFFTAG_TILELENGTH, &tl); (void) TIFFGetField(out, TIFFTAG_TILEWIDTH, &tw); for (row = 0; row < imagelength; row += tilelength) { uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl; uint32 colb = 0; uint32 col; for (col = 0; col < imagewidth; col += tw) { /* * Tile is clipped horizontally. Calculate * visible portion and skewing factors. */ if (colb + tilew > imagew) { uint32 width = imagew - colb; int oskew = tilew - width; cpStripToTile(obuf, bufp + colb, nrow, width, oskew, oskew + iskew); } else cpStripToTile(obuf, bufp + colb, nrow, tilew, 0, iskew); if (TIFFWriteTile(out, obuf, col, row, 0, 0) < 0) { TIFFError(TIFFFileName(out), "Error, can't write tile at %lu %lu", (unsigned long) col, (unsigned long) row); _TIFFfree(obuf); return 0; } colb += tilew; } bufp += nrow * imagew; } _TIFFfree(obuf); return 1; }
0
CVE-2016-9540
1,988
benign
CWE-119
DECLAREwriteFunc(writeBufferToContigTiles) { uint32 imagew = TIFFScanlineSize(out); uint32 tilew = TIFFTileRowSize(out); int iskew = imagew - tilew; tsize_t tilesize = TIFFTileSize(out); tdata_t obuf; uint8* bufp = (uint8*) buf; uint32 tl, tw; uint32 row; (void) spp; obuf = _TIFFmalloc(TIFFTileSize(out)); if (obuf == NULL) return 0; _TIFFmemset(obuf, 0, tilesize); (void) TIFFGetField(out, TIFFTAG_TILELENGTH, &tl); (void) TIFFGetField(out, TIFFTAG_TILEWIDTH, &tw); for (row = 0; row < imagelength; row += tilelength) { uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl; uint32 colb = 0; uint32 col; for (col = 0; col < imagewidth && colb < imagew; col += tw) { /* * Tile is clipped horizontally. Calculate * visible portion and skewing factors. */ if (colb + tilew > imagew) { uint32 width = imagew - colb; int oskew = tilew - width; cpStripToTile(obuf, bufp + colb, nrow, width, oskew, oskew + iskew); } else cpStripToTile(obuf, bufp + colb, nrow, tilew, 0, iskew); if (TIFFWriteTile(out, obuf, col, row, 0, 0) < 0) { TIFFError(TIFFFileName(out), "Error, can't write tile at %lu %lu", (unsigned long) col, (unsigned long) row); _TIFFfree(obuf); return 0; } colb += tilew; } bufp += nrow * imagew; } _TIFFfree(obuf); return 1; }
1
CVE-2016-9540
1,988
vulnerable
CWE-125
AP4_HdlrAtom::AP4_HdlrAtom(AP4_UI32 size, AP4_UI08 version, AP4_UI32 flags, AP4_ByteStream& stream) : AP4_Atom(AP4_ATOM_TYPE_HDLR, size, version, flags) { AP4_UI32 predefined; stream.ReadUI32(predefined); stream.ReadUI32(m_HandlerType); stream.ReadUI32(m_Reserved[0]); stream.ReadUI32(m_Reserved[1]); stream.ReadUI32(m_Reserved[2]); // read the name unless it is empty if (size < AP4_FULL_ATOM_HEADER_SIZE+20) return; AP4_UI32 name_size = size-(AP4_FULL_ATOM_HEADER_SIZE+20); char* name = new char[name_size+1]; if (name == NULL) return; stream.Read(name, name_size); name[name_size] = '\0'; // force a null termination // handle a special case: the Quicktime files have a pascal // string here, but ISO MP4 files have a C string. // we try to detect a pascal encoding and correct it. if (name[0] == name_size-1) { m_HandlerName = name+1; } else { m_HandlerName = name; } delete[] name; }
0
CVE-2017-14643
2,138
benign
CWE-125
AP4_HdlrAtom::AP4_HdlrAtom(AP4_UI32 size, AP4_UI08 version, AP4_UI32 flags, AP4_ByteStream& stream) : AP4_Atom(AP4_ATOM_TYPE_HDLR, size, version, flags) { AP4_UI32 predefined; stream.ReadUI32(predefined); stream.ReadUI32(m_HandlerType); stream.ReadUI32(m_Reserved[0]); stream.ReadUI32(m_Reserved[1]); stream.ReadUI32(m_Reserved[2]); // read the name unless it is empty if (size < AP4_FULL_ATOM_HEADER_SIZE+20) return; AP4_UI32 name_size = size-(AP4_FULL_ATOM_HEADER_SIZE+20); char* name = new char[name_size+1]; if (name == NULL) return; stream.Read(name, name_size); name[name_size] = '\0'; // force a null termination // handle a special case: the Quicktime files have a pascal // string here, but ISO MP4 files have a C string. // we try to detect a pascal encoding and correct it. if ((AP4_UI08)name[0] == (AP4_UI08)(name_size-1)) { m_HandlerName = name+1; } else { m_HandlerName = name; } delete[] name; }
1
CVE-2017-14643
2,138
vulnerable
CWE-119
PredictorDecodeRow(TIFF* tif, uint8* op0, tmsize_t occ0, uint16 s) { TIFFPredictorState *sp = PredictorState(tif); assert(sp != NULL); assert(sp->decoderow != NULL); assert(sp->decodepfunc != NULL); if ((*sp->decoderow)(tif, op0, occ0, s)) { (*sp->decodepfunc)(tif, op0, occ0); return 1; } else return 0; }
0
CVE-2016-9535
947
benign
CWE-119
PredictorDecodeRow(TIFF* tif, uint8* op0, tmsize_t occ0, uint16 s) { TIFFPredictorState *sp = PredictorState(tif); assert(sp != NULL); assert(sp->decoderow != NULL); assert(sp->decodepfunc != NULL); if ((*sp->decoderow)(tif, op0, occ0, s)) { return (*sp->decodepfunc)(tif, op0, occ0); } else return 0; }
1
CVE-2016-9535
947
vulnerable
CWE-20
static int keyring_search_iterator(const void *object, void *iterator_data) { struct keyring_search_context *ctx = iterator_data; const struct key *key = keyring_ptr_to_key(object); unsigned long kflags = key->flags; kenter("{%d}", key->serial); /* ignore keys not of this type */ if (key->type != ctx->index_key.type) { kleave(" = 0 [!type]"); return 0; } /* skip invalidated, revoked and expired keys */ if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { if (kflags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED))) { ctx->result = ERR_PTR(-EKEYREVOKED); kleave(" = %d [invrev]", ctx->skipped_ret); goto skipped; } if (key->expiry && ctx->now.tv_sec >= key->expiry) { if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED)) ctx->result = ERR_PTR(-EKEYEXPIRED); kleave(" = %d [expire]", ctx->skipped_ret); goto skipped; } } /* keys that don't match */ if (!ctx->match_data.cmp(key, &ctx->match_data)) { kleave(" = 0 [!match]"); return 0; } /* key must have search permissions */ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) && key_task_permission(make_key_ref(key, ctx->possessed), ctx->cred, KEY_NEED_SEARCH) < 0) { ctx->result = ERR_PTR(-EACCES); kleave(" = %d [!perm]", ctx->skipped_ret); goto skipped; } if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { /* we set a different error code if we pass a negative key */ if (kflags & (1 << KEY_FLAG_NEGATIVE)) { smp_rmb(); ctx->result = ERR_PTR(key->reject_error); kleave(" = %d [neg]", ctx->skipped_ret); goto skipped; } } /* Found */ ctx->result = make_key_ref(key, ctx->possessed); kleave(" = 1 [found]"); return 1; skipped: return ctx->skipped_ret; }
0
CVE-2017-15951
2,100
benign
CWE-20
static int keyring_search_iterator(const void *object, void *iterator_data) { struct keyring_search_context *ctx = iterator_data; const struct key *key = keyring_ptr_to_key(object); unsigned long kflags = READ_ONCE(key->flags); short state = READ_ONCE(key->state); kenter("{%d}", key->serial); /* ignore keys not of this type */ if (key->type != ctx->index_key.type) { kleave(" = 0 [!type]"); return 0; } /* skip invalidated, revoked and expired keys */ if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { if (kflags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED))) { ctx->result = ERR_PTR(-EKEYREVOKED); kleave(" = %d [invrev]", ctx->skipped_ret); goto skipped; } if (key->expiry && ctx->now.tv_sec >= key->expiry) { if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED)) ctx->result = ERR_PTR(-EKEYEXPIRED); kleave(" = %d [expire]", ctx->skipped_ret); goto skipped; } } /* keys that don't match */ if (!ctx->match_data.cmp(key, &ctx->match_data)) { kleave(" = 0 [!match]"); return 0; } /* key must have search permissions */ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) && key_task_permission(make_key_ref(key, ctx->possessed), ctx->cred, KEY_NEED_SEARCH) < 0) { ctx->result = ERR_PTR(-EACCES); kleave(" = %d [!perm]", ctx->skipped_ret); goto skipped; } if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { /* we set a different error code if we pass a negative key */ if (state < 0) { ctx->result = ERR_PTR(state); kleave(" = %d [neg]", ctx->skipped_ret); goto skipped; } } /* Found */ ctx->result = make_key_ref(key, ctx->possessed); kleave(" = 1 [found]"); return 1; skipped: return ctx->skipped_ret; }
1
CVE-2017-15951
2,100
vulnerable