cwe_id
stringclasses 8
values | func
stringlengths 40
61.2k
| label
int64 0
1
| cve_id
stringlengths 13
16
| id
int64 0
3.29k
| text_label
stringclasses 2
values |
---|---|---|---|---|---|
CWE-119 | horizontalDifference8(unsigned char *ip, int n, int stride,
unsigned short *wp, uint16 *From8)
{
register int r1, g1, b1, a1, r2, g2, b2, a2, mask;
#undef CLAMP
#define CLAMP(v) (From8[(v)])
mask = CODE_MASK;
if (n >= stride) {
if (stride == 3) {
r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]);
b2 = wp[2] = CLAMP(ip[2]);
n -= 3;
while (n > 0) {
n -= 3;
r1 = CLAMP(ip[3]); wp[3] = (uint16)((r1-r2) & mask); r2 = r1;
g1 = CLAMP(ip[4]); wp[4] = (uint16)((g1-g2) & mask); g2 = g1;
b1 = CLAMP(ip[5]); wp[5] = (uint16)((b1-b2) & mask); b2 = b1;
wp += 3;
ip += 3;
}
} else if (stride == 4) {
r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]);
b2 = wp[2] = CLAMP(ip[2]); a2 = wp[3] = CLAMP(ip[3]);
n -= 4;
while (n > 0) {
n -= 4;
r1 = CLAMP(ip[4]); wp[4] = (uint16)((r1-r2) & mask); r2 = r1;
g1 = CLAMP(ip[5]); wp[5] = (uint16)((g1-g2) & mask); g2 = g1;
b1 = CLAMP(ip[6]); wp[6] = (uint16)((b1-b2) & mask); b2 = b1;
a1 = CLAMP(ip[7]); wp[7] = (uint16)((a1-a2) & mask); a2 = a1;
wp += 4;
ip += 4;
}
} else {
wp += n + stride - 1; /* point to last one */
ip += n + stride - 1; /* point to last one */
n -= stride;
while (n > 0) {
REPEAT(stride, wp[0] = CLAMP(ip[0]);
wp[stride] -= wp[0];
wp[stride] &= mask;
wp--; ip--)
n -= stride;
}
REPEAT(stride, wp[0] = CLAMP(ip[0]); wp--; ip--)
}
}
} | 0 | CVE-2016-9533 | 2,533 | benign |
CWE-119 | horizontalDifference8(unsigned char *ip, int n, int stride,
unsigned short *wp, uint16 *From8)
{
register int r1, g1, b1, a1, r2, g2, b2, a2, mask;
#undef CLAMP
#define CLAMP(v) (From8[(v)])
mask = CODE_MASK;
if (n >= stride) {
if (stride == 3) {
r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]);
b2 = wp[2] = CLAMP(ip[2]);
n -= 3;
while (n > 0) {
n -= 3;
r1 = CLAMP(ip[3]); wp[3] = (uint16)((r1-r2) & mask); r2 = r1;
g1 = CLAMP(ip[4]); wp[4] = (uint16)((g1-g2) & mask); g2 = g1;
b1 = CLAMP(ip[5]); wp[5] = (uint16)((b1-b2) & mask); b2 = b1;
wp += 3;
ip += 3;
}
} else if (stride == 4) {
r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]);
b2 = wp[2] = CLAMP(ip[2]); a2 = wp[3] = CLAMP(ip[3]);
n -= 4;
while (n > 0) {
n -= 4;
r1 = CLAMP(ip[4]); wp[4] = (uint16)((r1-r2) & mask); r2 = r1;
g1 = CLAMP(ip[5]); wp[5] = (uint16)((g1-g2) & mask); g2 = g1;
b1 = CLAMP(ip[6]); wp[6] = (uint16)((b1-b2) & mask); b2 = b1;
a1 = CLAMP(ip[7]); wp[7] = (uint16)((a1-a2) & mask); a2 = a1;
wp += 4;
ip += 4;
}
} else {
REPEAT(stride, wp[0] = CLAMP(ip[0]); wp++; ip++)
n -= stride;
while (n > 0) {
REPEAT(stride,
wp[0] = (uint16)((CLAMP(ip[0])-CLAMP(ip[-stride])) & mask);
wp++; ip++)
n -= stride;
}
}
}
} | 1 | CVE-2016-9533 | 2,533 | vulnerable |
CWE-125 | inline bool ShapeIsVector(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* shape = GetInput(context, node, kShapeTensor);
return (shape->dims->size == 1 && shape->type == kTfLiteInt32);
} | 0 | CVE-2020-15211 | 1,118 | benign |
CWE-125 | inline bool ShapeIsVector(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* shape = GetInput(context, node, kShapeTensor);
return (shape != nullptr && shape->dims->size == 1 &&
shape->type == kTfLiteInt32);
} | 1 | CVE-2020-15211 | 1,118 | vulnerable |
CWE-125 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
// There are two ways in which the 'output' can be made dynamic: it could be
// a string tensor, or its shape cannot be calculated during Prepare(). In
// either case, we now have all the information to calculate its shape.
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context, ResizeOutput(context, node));
}
// Note that string tensors are always "dynamic" in the sense that their size
// is not known until we have all the content. This applies even when their
// shape is known ahead of time. As a result, a string tensor is never given
// any memory by ResizeOutput(), and we need to do it manually here. Since
// reshape doesn't change the data, the output tensor needs exactly as many
// bytes as the input tensor.
if (output->type == kTfLiteString) {
auto bytes_required = input->bytes;
TfLiteTensorRealloc(bytes_required, output);
output->bytes = bytes_required;
}
memcpy(output->data.raw, input->data.raw, input->bytes);
return kTfLiteOk;
} | 0 | CVE-2020-15211 | 1,115 | benign |
CWE-125 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
// There are two ways in which the 'output' can be made dynamic: it could be
// a string tensor, or its shape cannot be calculated during Prepare(). In
// either case, we now have all the information to calculate its shape.
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context, ResizeOutput(context, node));
}
// Note that string tensors are always "dynamic" in the sense that their size
// is not known until we have all the content. This applies even when their
// shape is known ahead of time. As a result, a string tensor is never given
// any memory by ResizeOutput(), and we need to do it manually here. Since
// reshape doesn't change the data, the output tensor needs exactly as many
// bytes as the input tensor.
if (output->type == kTfLiteString) {
auto bytes_required = input->bytes;
TfLiteTensorRealloc(bytes_required, output);
output->bytes = bytes_required;
}
memcpy(output->data.raw, input->data.raw, input->bytes);
return kTfLiteOk;
} | 1 | CVE-2020-15211 | 1,115 | vulnerable |
CWE-476 | RList *r_bin_ne_get_segments(r_bin_ne_obj_t *bin) {
int i;
if (!bin) {
return NULL;
}
RList *segments = r_list_newf (free);
for (i = 0; i < bin->ne_header->SegCount; i++) {
RBinSection *bs = R_NEW0 (RBinSection);
if (!bs) {
return segments;
}
NE_image_segment_entry *se = &bin->segment_entries[i];
bs->size = se->length;
bs->vsize = se->minAllocSz ? se->minAllocSz : 64000;
bs->bits = R_SYS_BITS_16;
bs->is_data = se->flags & IS_DATA;
bs->perm = __translate_perms (se->flags);
bs->paddr = (ut64)se->offset * bin->alignment;
bs->name = r_str_newf ("%s.%" PFMT64d, se->flags & IS_MOVEABLE ? "MOVEABLE" : "FIXED", bs->paddr);
bs->is_segment = true;
r_list_append (segments, bs);
}
bin->segments = segments;
return segments;
} | 0 | CVE-2022-1382 | 1,986 | benign |
CWE-476 | RList *r_bin_ne_get_segments(r_bin_ne_obj_t *bin) {
int i;
if (!bin || !bin->segment_entries) {
return NULL;
}
RList *segments = r_list_newf (free);
for (i = 0; i < bin->ne_header->SegCount; i++) {
RBinSection *bs = R_NEW0 (RBinSection);
if (!bs) {
return segments;
}
NE_image_segment_entry *se = &bin->segment_entries[i];
bs->size = se->length;
bs->vsize = se->minAllocSz ? se->minAllocSz : 64000;
bs->bits = R_SYS_BITS_16;
bs->is_data = se->flags & IS_DATA;
bs->perm = __translate_perms (se->flags);
bs->paddr = (ut64)se->offset * bin->alignment;
bs->name = r_str_newf ("%s.%" PFMT64d, se->flags & IS_MOVEABLE ? "MOVEABLE" : "FIXED", bs->paddr);
bs->is_segment = true;
r_list_append (segments, bs);
}
bin->segments = segments;
return segments;
} | 1 | CVE-2022-1382 | 1,986 | vulnerable |
CWE-119 | sf_open_virtual (SF_VIRTUAL_IO *sfvirtual, int mode, SF_INFO *sfinfo, void *user_data)
{ SF_PRIVATE *psf ;
/* Make sure we have a valid set ot virtual pointers. */
if (sfvirtual->get_filelen == NULL || sfvirtual->seek == NULL || sfvirtual->tell == NULL)
{ sf_errno = SFE_BAD_VIRTUAL_IO ;
snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_get_filelen / vio_seek / vio_tell in SF_VIRTUAL_IO struct.\n") ;
return NULL ;
} ;
if ((mode == SFM_READ || mode == SFM_RDWR) && sfvirtual->read == NULL)
{ sf_errno = SFE_BAD_VIRTUAL_IO ;
snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_read in SF_VIRTUAL_IO struct.\n") ;
return NULL ;
} ;
if ((mode == SFM_WRITE || mode == SFM_RDWR) && sfvirtual->write == NULL)
{ sf_errno = SFE_BAD_VIRTUAL_IO ;
snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_write in SF_VIRTUAL_IO struct.\n") ;
return NULL ;
} ;
if ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL)
{ sf_errno = SFE_MALLOC_FAILED ;
return NULL ;
} ;
psf_init_files (psf) ;
psf->virtual_io = SF_TRUE ;
psf->vio = *sfvirtual ;
psf->vio_user_data = user_data ;
psf->file.mode = mode ;
return psf_open_file (psf, sfinfo) ;
} /* sf_open_virtual */ | 0 | CVE-2017-7586 | 2,323 | benign |
CWE-119 | sf_open_virtual (SF_VIRTUAL_IO *sfvirtual, int mode, SF_INFO *sfinfo, void *user_data)
{ SF_PRIVATE *psf ;
/* Make sure we have a valid set ot virtual pointers. */
if (sfvirtual->get_filelen == NULL || sfvirtual->seek == NULL || sfvirtual->tell == NULL)
{ sf_errno = SFE_BAD_VIRTUAL_IO ;
snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_get_filelen / vio_seek / vio_tell in SF_VIRTUAL_IO struct.\n") ;
return NULL ;
} ;
if ((mode == SFM_READ || mode == SFM_RDWR) && sfvirtual->read == NULL)
{ sf_errno = SFE_BAD_VIRTUAL_IO ;
snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_read in SF_VIRTUAL_IO struct.\n") ;
return NULL ;
} ;
if ((mode == SFM_WRITE || mode == SFM_RDWR) && sfvirtual->write == NULL)
{ sf_errno = SFE_BAD_VIRTUAL_IO ;
snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_write in SF_VIRTUAL_IO struct.\n") ;
return NULL ;
} ;
if ((psf = psf_allocate ()) == NULL)
{ sf_errno = SFE_MALLOC_FAILED ;
return NULL ;
} ;
psf_init_files (psf) ;
psf->virtual_io = SF_TRUE ;
psf->vio = *sfvirtual ;
psf->vio_user_data = user_data ;
psf->file.mode = mode ;
return psf_open_file (psf, sfinfo) ;
} /* sf_open_virtual */ | 1 | CVE-2017-7586 | 2,323 | vulnerable |
CWE-125 | TfLiteStatus StoreAllDecodedSequences(
TfLiteContext* context,
const std::vector<std::vector<std::vector<int>>>& sequences,
TfLiteNode* node, int top_paths) {
const int32_t batch_size = sequences.size();
std::vector<int32_t> num_entries(top_paths, 0);
// Calculate num_entries per path
for (const auto& batch_s : sequences) {
TF_LITE_ENSURE_EQ(context, batch_s.size(), top_paths);
for (int p = 0; p < top_paths; ++p) {
num_entries[p] += batch_s[p].size();
}
}
for (int p = 0; p < top_paths; ++p) {
const int32_t p_num = num_entries[p];
// Resize the decoded outputs.
TfLiteTensor* indices = GetOutput(context, node, p);
TF_LITE_ENSURE_OK(context, Resize(context, {p_num, 2}, indices));
TfLiteTensor* values = GetOutput(context, node, p + top_paths);
TF_LITE_ENSURE_OK(context, Resize(context, {p_num}, values));
TfLiteTensor* decoded_shape = GetOutput(context, node, p + 2 * top_paths);
TF_LITE_ENSURE_OK(context, Resize(context, {2}, decoded_shape));
int32_t max_decoded = 0;
int32_t offset = 0;
int32_t* indices_data = GetTensorData<int32_t>(indices);
int32_t* values_data = GetTensorData<int32_t>(values);
int32_t* decoded_shape_data = GetTensorData<int32_t>(decoded_shape);
for (int b = 0; b < batch_size; ++b) {
auto& p_batch = sequences[b][p];
int32_t num_decoded = p_batch.size();
max_decoded = std::max(max_decoded, num_decoded);
std::copy_n(p_batch.begin(), num_decoded, values_data + offset);
for (int32_t t = 0; t < num_decoded; ++t, ++offset) {
indices_data[offset * 2] = b;
indices_data[offset * 2 + 1] = t;
}
}
decoded_shape_data[0] = batch_size;
decoded_shape_data[1] = max_decoded;
}
return kTfLiteOk;
} | 0 | CVE-2020-15211 | 1,813 | benign |
CWE-125 | TfLiteStatus StoreAllDecodedSequences(
TfLiteContext* context,
const std::vector<std::vector<std::vector<int>>>& sequences,
TfLiteNode* node, int top_paths) {
const int32_t batch_size = sequences.size();
std::vector<int32_t> num_entries(top_paths, 0);
// Calculate num_entries per path
for (const auto& batch_s : sequences) {
TF_LITE_ENSURE_EQ(context, batch_s.size(), top_paths);
for (int p = 0; p < top_paths; ++p) {
num_entries[p] += batch_s[p].size();
}
}
for (int p = 0; p < top_paths; ++p) {
const int32_t p_num = num_entries[p];
// Resize the decoded outputs.
TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, p, &indices));
TF_LITE_ENSURE_OK(context, Resize(context, {p_num, 2}, indices));
TfLiteTensor* values;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, p + top_paths, &values));
TF_LITE_ENSURE_OK(context, Resize(context, {p_num}, values));
TfLiteTensor* decoded_shape;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, p + 2 * top_paths,
&decoded_shape));
TF_LITE_ENSURE_OK(context, Resize(context, {2}, decoded_shape));
int32_t max_decoded = 0;
int32_t offset = 0;
int32_t* indices_data = GetTensorData<int32_t>(indices);
int32_t* values_data = GetTensorData<int32_t>(values);
int32_t* decoded_shape_data = GetTensorData<int32_t>(decoded_shape);
for (int b = 0; b < batch_size; ++b) {
auto& p_batch = sequences[b][p];
int32_t num_decoded = p_batch.size();
max_decoded = std::max(max_decoded, num_decoded);
std::copy_n(p_batch.begin(), num_decoded, values_data + offset);
for (int32_t t = 0; t < num_decoded; ++t, ++offset) {
indices_data[offset * 2] = b;
indices_data[offset * 2 + 1] = t;
}
}
decoded_shape_data[0] = batch_size;
decoded_shape_data[1] = max_decoded;
}
return kTfLiteOk;
} | 1 | CVE-2020-15211 | 1,813 | vulnerable |
CWE-787 | CFontFileBase(char *sFile, int nLen, bool bFreeFileData)
{
m_sFileData = m_sFile = (unsigned char *)sFile;
m_nLen = nLen;
m_bFreeFileData = bFreeFileData;
m_nPos = 0;
} | 0 | CVE-2022-29777 | 1,927 | benign |
CWE-787 | CFontFileBase(char *sFile, int nLen, bool bFreeFileData)
{
m_sFileData = m_sFile = (unsigned char *)sFile;
m_nLen = (nLen > 0) ? 0 : (unsigned int)nLen;
m_nPos = 0;
m_bFreeFileData = bFreeFileData;
} | 1 | CVE-2022-29777 | 1,927 | vulnerable |
CWE-125 | snmp_ber_encode_integer(unsigned char *out, uint32_t *out_len, uint32_t number)
{
uint32_t original_out_len;
original_out_len = *out_len;
do {
(*out_len)++;
*out-- = (uint8_t)(number & 0xFF);
number >>= 8;
} while(number);
out = snmp_ber_encode_length(out, out_len, ((*out_len - original_out_len) & 0xFF));
out = snmp_ber_encode_type(out, out_len, BER_DATA_TYPE_INTEGER);
return out;
} | 0 | CVE-2020-12141 | 393 | benign |
CWE-125 | snmp_ber_encode_integer(snmp_packet_t *snmp_packet, uint32_t number)
{
return snmp_ber_encode_unsigned_integer(snmp_packet, BER_DATA_TYPE_INTEGER, number);
} | 1 | CVE-2020-12141 | 393 | vulnerable |
CWE-125 | lookup_bytestring(netdissect_options *ndo, register const u_char *bs,
const unsigned int nlen)
{
struct enamemem *tp;
register u_int i, j, k;
if (nlen >= 6) {
k = (bs[0] << 8) | bs[1];
j = (bs[2] << 8) | bs[3];
i = (bs[4] << 8) | bs[5];
} else if (nlen >= 4) {
k = (bs[0] << 8) | bs[1];
j = (bs[2] << 8) | bs[3];
i = 0;
} else
i = j = k = 0;
tp = &bytestringtable[(i ^ j) & (HASHNAMESIZE-1)];
while (tp->e_nxt)
if (tp->e_addr0 == i &&
tp->e_addr1 == j &&
tp->e_addr2 == k &&
memcmp((const char *)bs, (const char *)(tp->e_bs), nlen) == 0)
return tp;
else
tp = tp->e_nxt;
tp->e_addr0 = i;
tp->e_addr1 = j;
tp->e_addr2 = k;
tp->e_bs = (u_char *) calloc(1, nlen + 1);
if (tp->e_bs == NULL)
(*ndo->ndo_error)(ndo, "lookup_bytestring: calloc");
memcpy(tp->e_bs, bs, nlen);
tp->e_nxt = (struct enamemem *)calloc(1, sizeof(*tp));
if (tp->e_nxt == NULL)
(*ndo->ndo_error)(ndo, "lookup_bytestring: calloc");
return tp;
} | 0 | CVE-2017-12894 | 432 | benign |
CWE-125 | lookup_bytestring(netdissect_options *ndo, register const u_char *bs,
const unsigned int nlen)
{
struct bsnamemem *tp;
register u_int i, j, k;
if (nlen >= 6) {
k = (bs[0] << 8) | bs[1];
j = (bs[2] << 8) | bs[3];
i = (bs[4] << 8) | bs[5];
} else if (nlen >= 4) {
k = (bs[0] << 8) | bs[1];
j = (bs[2] << 8) | bs[3];
i = 0;
} else
i = j = k = 0;
tp = &bytestringtable[(i ^ j) & (HASHNAMESIZE-1)];
while (tp->bs_nxt)
if (nlen == tp->bs_nbytes &&
tp->bs_addr0 == i &&
tp->bs_addr1 == j &&
tp->bs_addr2 == k &&
memcmp((const char *)bs, (const char *)(tp->bs_bytes), nlen) == 0)
return tp;
else
tp = tp->bs_nxt;
tp->bs_addr0 = i;
tp->bs_addr1 = j;
tp->bs_addr2 = k;
tp->bs_bytes = (u_char *) calloc(1, nlen + 1);
if (tp->bs_bytes == NULL)
(*ndo->ndo_error)(ndo, "lookup_bytestring: calloc");
memcpy(tp->bs_bytes, bs, nlen);
tp->bs_nbytes = nlen;
tp->bs_nxt = (struct bsnamemem *)calloc(1, sizeof(*tp));
if (tp->bs_nxt == NULL)
(*ndo->ndo_error)(ndo, "lookup_bytestring: calloc");
return tp;
} | 1 | CVE-2017-12894 | 432 | vulnerable |
CWE-20 | bool_t ksz8851IrqHandler(NetInterface *interface)
{
bool_t flag;
size_t n;
uint16_t ier;
uint16_t isr;
//This flag will be set if a higher priority task must be woken
flag = FALSE;
//Save IER register value
ier = ksz8851ReadReg(interface, KSZ8851_REG_IER);
//Disable interrupts to release the interrupt line
ksz8851WriteReg(interface, KSZ8851_REG_IER, 0);
//Read interrupt status register
isr = ksz8851ReadReg(interface, KSZ8851_REG_ISR);
//Link status change?
if((isr & ISR_LCIS) != 0)
{
//Disable LCIE interrupt
ier &= ~IER_LCIE;
//Set event flag
interface->nicEvent = TRUE;
//Notify the TCP/IP stack of the event
flag |= osSetEventFromIsr(&netEvent);
}
//Packet transmission complete?
if((isr & ISR_TXIS) != 0)
{
//Clear interrupt flag
ksz8851WriteReg(interface, KSZ8851_REG_ISR, ISR_TXIS);
//Get the amount of free memory available in the TX FIFO
n = ksz8851ReadReg(interface, KSZ8851_REG_TXMIR) & TXMIR_TXMA_MASK;
//Check whether the TX FIFO is available for writing
if(n >= (ETH_MAX_FRAME_SIZE + 8))
{
//Notify the TCP/IP stack that the transmitter is ready to send
flag |= osSetEventFromIsr(&interface->nicTxEvent);
}
}
//Packet received?
if((isr & ISR_RXIS) != 0)
{
//Disable RXIE interrupt
ier &= ~IER_RXIE;
//Set event flag
interface->nicEvent = TRUE;
//Notify the TCP/IP stack of the event
flag |= osSetEventFromIsr(&netEvent);
}
//Re-enable interrupts once the interrupt has been serviced
ksz8851WriteReg(interface, KSZ8851_REG_IER, ier);
//A higher priority task must be woken?
return flag;
} | 0 | CVE-2021-26788 | 84 | benign |
CWE-20 | bool_t ksz8851IrqHandler(NetInterface *interface)
{
bool_t flag;
size_t n;
uint16_t ier;
uint16_t isr;
//This flag will be set if a higher priority task must be woken
flag = FALSE;
//Save IER register value
ier = ksz8851ReadReg(interface, KSZ8851_IER);
//Disable interrupts to release the interrupt line
ksz8851WriteReg(interface, KSZ8851_IER, 0);
//Read interrupt status register
isr = ksz8851ReadReg(interface, KSZ8851_ISR);
//Link status change?
if((isr & KSZ8851_ISR_LCIS) != 0)
{
//Disable LCIE interrupt
ier &= ~KSZ8851_IER_LCIE;
//Set event flag
interface->nicEvent = TRUE;
//Notify the TCP/IP stack of the event
flag |= osSetEventFromIsr(&netEvent);
}
//Packet transmission complete?
if((isr & KSZ8851_ISR_TXIS) != 0)
{
//Clear interrupt flag
ksz8851WriteReg(interface, KSZ8851_ISR, KSZ8851_ISR_TXIS);
//Get the amount of free memory available in the TX FIFO
n = ksz8851ReadReg(interface, KSZ8851_TXMIR) & KSZ8851_TXMIR_TXMA;
//Check whether the TX FIFO is available for writing
if(n >= (ETH_MAX_FRAME_SIZE + 8))
{
//Notify the TCP/IP stack that the transmitter is ready to send
flag |= osSetEventFromIsr(&interface->nicTxEvent);
}
}
//Packet received?
if((isr & KSZ8851_ISR_RXIS) != 0)
{
//Disable RXIE interrupt
ier &= ~KSZ8851_IER_RXIE;
//Set event flag
interface->nicEvent = TRUE;
//Notify the TCP/IP stack of the event
flag |= osSetEventFromIsr(&netEvent);
}
//Re-enable interrupts once the interrupt has been serviced
ksz8851WriteReg(interface, KSZ8851_IER, ier);
//A higher priority task must be woken?
return flag;
} | 1 | CVE-2021-26788 | 84 | vulnerable |
CWE-787 | PlainPasswd::PlainPasswd(int len) : CharArray(len) {
} | 0 | CVE-2019-15694 | 3,222 | benign |
CWE-787 | PlainPasswd::PlainPasswd(size_t len) : CharArray(len) {
} | 1 | CVE-2019-15694 | 3,222 | vulnerable |
CWE-787 | void Archive::Seek(int64 Offset,int Method)
{
if (!QOpen.Seek(Offset,Method))
File::Seek(Offset,Method);
} | 0 | CVE-2017-20006 | 3,112 | benign |
CWE-787 | void Archive::Seek(int64 Offset,int Method)
{
#ifdef USE_QOPEN
if (QOpen.Seek(Offset,Method))
return;
#endif
#ifdef USE_ARCMEM
if (ArcMem.Seek(Offset,Method))
return;
#endif
File::Seek(Offset,Method);
} | 1 | CVE-2017-20006 | 3,112 | vulnerable |
CWE-787 | CURLcode Curl_smtp_escape_eob(struct connectdata *conn, const ssize_t nread)
{
/* When sending a SMTP payload we must detect CRLF. sequences making sure
they are sent as CRLF.. instead, as a . on the beginning of a line will
be deleted by the server when not part of an EOB terminator and a
genuine CRLF.CRLF which isn't escaped will wrongly be detected as end of
data by the server
*/
ssize_t i;
ssize_t si;
struct Curl_easy *data = conn->data;
struct SMTP *smtp = data->req.protop;
char *scratch = data->state.scratch;
char *newscratch = NULL;
char *oldscratch = NULL;
size_t eob_sent;
/* Do we need to allocate a scratch buffer? */
if(!scratch || data->set.crlf) {
oldscratch = scratch;
scratch = newscratch = malloc(2 * data->set.buffer_size);
if(!newscratch) {
failf(data, "Failed to alloc scratch buffer!");
return CURLE_OUT_OF_MEMORY;
}
}
/* Have we already sent part of the EOB? */
eob_sent = smtp->eob;
/* This loop can be improved by some kind of Boyer-Moore style of
approach but that is saved for later... */
for(i = 0, si = 0; i < nread; i++) {
if(SMTP_EOB[smtp->eob] == data->req.upload_fromhere[i]) {
smtp->eob++;
/* Is the EOB potentially the terminating CRLF? */
if(2 == smtp->eob || SMTP_EOB_LEN == smtp->eob)
smtp->trailing_crlf = TRUE;
else
smtp->trailing_crlf = FALSE;
}
else if(smtp->eob) {
/* A previous substring matched so output that first */
memcpy(&scratch[si], &SMTP_EOB[eob_sent], smtp->eob - eob_sent);
si += smtp->eob - eob_sent;
/* Then compare the first byte */
if(SMTP_EOB[0] == data->req.upload_fromhere[i])
smtp->eob = 1;
else
smtp->eob = 0;
eob_sent = 0;
/* Reset the trailing CRLF flag as there was more data */
smtp->trailing_crlf = FALSE;
}
/* Do we have a match for CRLF. as per RFC-5321, sect. 4.5.2 */
if(SMTP_EOB_FIND_LEN == smtp->eob) {
/* Copy the replacement data to the target buffer */
memcpy(&scratch[si], &SMTP_EOB_REPL[eob_sent],
SMTP_EOB_REPL_LEN - eob_sent);
si += SMTP_EOB_REPL_LEN - eob_sent;
smtp->eob = 0;
eob_sent = 0;
}
else if(!smtp->eob)
scratch[si++] = data->req.upload_fromhere[i];
}
if(smtp->eob - eob_sent) {
/* A substring matched before processing ended so output that now */
memcpy(&scratch[si], &SMTP_EOB[eob_sent], smtp->eob - eob_sent);
si += smtp->eob - eob_sent;
}
/* Only use the new buffer if we replaced something */
if(si != nread) {
/* Upload from the new (replaced) buffer instead */
data->req.upload_fromhere = scratch;
/* Save the buffer so it can be freed later */
data->state.scratch = scratch;
/* Free the old scratch buffer */
free(oldscratch);
/* Set the new amount too */
data->req.upload_present = si;
}
else
free(newscratch);
return CURLE_OK;
} | 0 | CVE-2018-0500 | 658 | benign |
CWE-787 | CURLcode Curl_smtp_escape_eob(struct connectdata *conn, const ssize_t nread)
{
/* When sending a SMTP payload we must detect CRLF. sequences making sure
they are sent as CRLF.. instead, as a . on the beginning of a line will
be deleted by the server when not part of an EOB terminator and a
genuine CRLF.CRLF which isn't escaped will wrongly be detected as end of
data by the server
*/
ssize_t i;
ssize_t si;
struct Curl_easy *data = conn->data;
struct SMTP *smtp = data->req.protop;
char *scratch = data->state.scratch;
char *newscratch = NULL;
char *oldscratch = NULL;
size_t eob_sent;
/* Do we need to allocate a scratch buffer? */
if(!scratch || data->set.crlf) {
oldscratch = scratch;
scratch = newscratch = malloc(2 * UPLOAD_BUFSIZE);
if(!newscratch) {
failf(data, "Failed to alloc scratch buffer!");
return CURLE_OUT_OF_MEMORY;
}
}
DEBUGASSERT(UPLOAD_BUFSIZE >= nread);
/* Have we already sent part of the EOB? */
eob_sent = smtp->eob;
/* This loop can be improved by some kind of Boyer-Moore style of
approach but that is saved for later... */
for(i = 0, si = 0; i < nread; i++) {
if(SMTP_EOB[smtp->eob] == data->req.upload_fromhere[i]) {
smtp->eob++;
/* Is the EOB potentially the terminating CRLF? */
if(2 == smtp->eob || SMTP_EOB_LEN == smtp->eob)
smtp->trailing_crlf = TRUE;
else
smtp->trailing_crlf = FALSE;
}
else if(smtp->eob) {
/* A previous substring matched so output that first */
memcpy(&scratch[si], &SMTP_EOB[eob_sent], smtp->eob - eob_sent);
si += smtp->eob - eob_sent;
/* Then compare the first byte */
if(SMTP_EOB[0] == data->req.upload_fromhere[i])
smtp->eob = 1;
else
smtp->eob = 0;
eob_sent = 0;
/* Reset the trailing CRLF flag as there was more data */
smtp->trailing_crlf = FALSE;
}
/* Do we have a match for CRLF. as per RFC-5321, sect. 4.5.2 */
if(SMTP_EOB_FIND_LEN == smtp->eob) {
/* Copy the replacement data to the target buffer */
memcpy(&scratch[si], &SMTP_EOB_REPL[eob_sent],
SMTP_EOB_REPL_LEN - eob_sent);
si += SMTP_EOB_REPL_LEN - eob_sent;
smtp->eob = 0;
eob_sent = 0;
}
else if(!smtp->eob)
scratch[si++] = data->req.upload_fromhere[i];
}
if(smtp->eob - eob_sent) {
/* A substring matched before processing ended so output that now */
memcpy(&scratch[si], &SMTP_EOB[eob_sent], smtp->eob - eob_sent);
si += smtp->eob - eob_sent;
}
/* Only use the new buffer if we replaced something */
if(si != nread) {
/* Upload from the new (replaced) buffer instead */
data->req.upload_fromhere = scratch;
/* Save the buffer so it can be freed later */
data->state.scratch = scratch;
/* Free the old scratch buffer */
free(oldscratch);
/* Set the new amount too */
data->req.upload_present = si;
}
else
free(newscratch);
return CURLE_OK;
} | 1 | CVE-2018-0500 | 658 | vulnerable |
CWE-119 | static pyc_object *get_set_object(RBuffer *buffer) {
pyc_object *ret = NULL;
bool error = false;
ut32 n = get_ut32 (buffer, &error);
if (n > ST32_MAX) {
eprintf ("bad marshal data (set size out of range)\n");
return NULL;
}
if (error) {
return NULL;
}
ret = get_array_object_generic (buffer, n);
if (!ret) {
return NULL;
}
ret->type = TYPE_SET;
return ret;
} | 0 | CVE-2022-0523 | 686 | benign |
CWE-119 | static pyc_object *get_set_object(RBuffer *buffer) {
bool error = false;
ut32 n = get_ut32 (buffer, &error);
if (n > ST32_MAX) {
eprintf ("bad marshal data (set size out of range)\n");
return NULL;
}
if (error) {
return NULL;
}
pyc_object *ret = get_array_object_generic (buffer, n);
if (ret) {
ret->type = TYPE_SET;
}
return ret;
} | 1 | CVE-2022-0523 | 686 | vulnerable |
CWE-190 | MONGO_EXPORT int bson_append_finish_object( bson *b ) {
char *start;
int i;
if ( bson_ensure_space( b, 1 ) == BSON_ERROR ) return BSON_ERROR;
bson_append_byte( b , 0 );
start = b->data + b->stack[ --b->stackPos ];
i = b->cur - start;
bson_little_endian32( start, &i );
return BSON_OK;
} | 0 | CVE-2020-12135 | 2,704 | benign |
CWE-190 | MONGO_EXPORT int bson_append_finish_object( bson *b ) {
char *start;
int i;
if ( bson_ensure_space( b, 1 ) == BSON_ERROR ) return BSON_ERROR;
bson_append_byte( b , 0 );
start = b->data + b->stack[ --b->stackPos ];
i = ( int )( b->cur - start );
bson_little_endian32( start, &i );
return BSON_OK;
} | 1 | CVE-2020-12135 | 2,704 | vulnerable |
CWE-416 | static struct page *follow_pmd_mask(struct vm_area_struct *vma,
unsigned long address, pud_t *pudp,
unsigned int flags,
struct follow_page_context *ctx)
{
pmd_t *pmd, pmdval;
spinlock_t *ptl;
struct page *page;
struct mm_struct *mm = vma->vm_mm;
pmd = pmd_offset(pudp, address);
/*
* The READ_ONCE() will stabilize the pmdval in a register or
* on the stack so that it will stop changing under the code.
*/
pmdval = READ_ONCE(*pmd);
if (pmd_none(pmdval))
return no_page_table(vma, flags);
if (pmd_huge(pmdval) && vma->vm_flags & VM_HUGETLB) {
page = follow_huge_pmd(mm, address, pmd, flags);
if (page)
return page;
return no_page_table(vma, flags);
}
if (is_hugepd(__hugepd(pmd_val(pmdval)))) {
page = follow_huge_pd(vma, address,
__hugepd(pmd_val(pmdval)), flags,
PMD_SHIFT);
if (page)
return page;
return no_page_table(vma, flags);
}
retry:
if (!pmd_present(pmdval)) {
if (likely(!(flags & FOLL_MIGRATION)))
return no_page_table(vma, flags);
VM_BUG_ON(thp_migration_supported() &&
!is_pmd_migration_entry(pmdval));
if (is_pmd_migration_entry(pmdval))
pmd_migration_entry_wait(mm, pmd);
pmdval = READ_ONCE(*pmd);
/*
* MADV_DONTNEED may convert the pmd to null because
* mmap_sem is held in read mode
*/
if (pmd_none(pmdval))
return no_page_table(vma, flags);
goto retry;
}
if (pmd_devmap(pmdval)) {
ptl = pmd_lock(mm, pmd);
page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
spin_unlock(ptl);
if (page)
return page;
}
if (likely(!pmd_trans_huge(pmdval)))
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
return no_page_table(vma, flags);
retry_locked:
ptl = pmd_lock(mm, pmd);
if (unlikely(pmd_none(*pmd))) {
spin_unlock(ptl);
return no_page_table(vma, flags);
}
if (unlikely(!pmd_present(*pmd))) {
spin_unlock(ptl);
if (likely(!(flags & FOLL_MIGRATION)))
return no_page_table(vma, flags);
pmd_migration_entry_wait(mm, pmd);
goto retry_locked;
}
if (unlikely(!pmd_trans_huge(*pmd))) {
spin_unlock(ptl);
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
}
if (flags & FOLL_SPLIT) {
int ret;
page = pmd_page(*pmd);
if (is_huge_zero_page(page)) {
spin_unlock(ptl);
ret = 0;
split_huge_pmd(vma, pmd, address);
if (pmd_trans_unstable(pmd))
ret = -EBUSY;
} else {
get_page(page);
spin_unlock(ptl);
lock_page(page);
ret = split_huge_page(page);
unlock_page(page);
put_page(page);
if (pmd_none(*pmd))
return no_page_table(vma, flags);
}
return ret ? ERR_PTR(ret) :
follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
}
page = follow_trans_huge_pmd(vma, address, pmd, flags);
spin_unlock(ptl);
ctx->page_mask = HPAGE_PMD_NR - 1;
return page;
} | 0 | CVE-2019-11487 | 981 | benign |
CWE-416 | static struct page *follow_pmd_mask(struct vm_area_struct *vma,
unsigned long address, pud_t *pudp,
unsigned int flags,
struct follow_page_context *ctx)
{
pmd_t *pmd, pmdval;
spinlock_t *ptl;
struct page *page;
struct mm_struct *mm = vma->vm_mm;
pmd = pmd_offset(pudp, address);
/*
* The READ_ONCE() will stabilize the pmdval in a register or
* on the stack so that it will stop changing under the code.
*/
pmdval = READ_ONCE(*pmd);
if (pmd_none(pmdval))
return no_page_table(vma, flags);
if (pmd_huge(pmdval) && vma->vm_flags & VM_HUGETLB) {
page = follow_huge_pmd(mm, address, pmd, flags);
if (page)
return page;
return no_page_table(vma, flags);
}
if (is_hugepd(__hugepd(pmd_val(pmdval)))) {
page = follow_huge_pd(vma, address,
__hugepd(pmd_val(pmdval)), flags,
PMD_SHIFT);
if (page)
return page;
return no_page_table(vma, flags);
}
retry:
if (!pmd_present(pmdval)) {
if (likely(!(flags & FOLL_MIGRATION)))
return no_page_table(vma, flags);
VM_BUG_ON(thp_migration_supported() &&
!is_pmd_migration_entry(pmdval));
if (is_pmd_migration_entry(pmdval))
pmd_migration_entry_wait(mm, pmd);
pmdval = READ_ONCE(*pmd);
/*
* MADV_DONTNEED may convert the pmd to null because
* mmap_sem is held in read mode
*/
if (pmd_none(pmdval))
return no_page_table(vma, flags);
goto retry;
}
if (pmd_devmap(pmdval)) {
ptl = pmd_lock(mm, pmd);
page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
spin_unlock(ptl);
if (page)
return page;
}
if (likely(!pmd_trans_huge(pmdval)))
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
return no_page_table(vma, flags);
retry_locked:
ptl = pmd_lock(mm, pmd);
if (unlikely(pmd_none(*pmd))) {
spin_unlock(ptl);
return no_page_table(vma, flags);
}
if (unlikely(!pmd_present(*pmd))) {
spin_unlock(ptl);
if (likely(!(flags & FOLL_MIGRATION)))
return no_page_table(vma, flags);
pmd_migration_entry_wait(mm, pmd);
goto retry_locked;
}
if (unlikely(!pmd_trans_huge(*pmd))) {
spin_unlock(ptl);
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
}
if (flags & FOLL_SPLIT) {
int ret;
page = pmd_page(*pmd);
if (is_huge_zero_page(page)) {
spin_unlock(ptl);
ret = 0;
split_huge_pmd(vma, pmd, address);
if (pmd_trans_unstable(pmd))
ret = -EBUSY;
} else {
if (unlikely(!try_get_page(page))) {
spin_unlock(ptl);
return ERR_PTR(-ENOMEM);
}
spin_unlock(ptl);
lock_page(page);
ret = split_huge_page(page);
unlock_page(page);
put_page(page);
if (pmd_none(*pmd))
return no_page_table(vma, flags);
}
return ret ? ERR_PTR(ret) :
follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
}
page = follow_trans_huge_pmd(vma, address, pmd, flags);
spin_unlock(ptl);
ctx->page_mask = HPAGE_PMD_NR - 1;
return page;
} | 1 | CVE-2019-11487 | 981 | vulnerable |
CWE-476 | static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
size_t count)
{
struct o2nm_node *node = to_o2nm_node(item);
struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
unsigned long tmp;
char *p = (char *)page;
ssize_t ret;
tmp = simple_strtoul(p, &p, 0);
if (!p || (*p && (*p != '\n')))
return -EINVAL;
tmp = !!tmp; /* boolean of whether this node wants to be local */
/* setting local turns on networking rx for now so we require having
* set everything else first */
if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
!test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) ||
!test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
return -EINVAL; /* XXX */
/* the only failure case is trying to set a new local node
* when a different one is already set */
if (tmp && tmp == cluster->cl_has_local &&
cluster->cl_local_node != node->nd_num)
return -EBUSY;
/* bring up the rx thread if we're setting the new local node. */
if (tmp && !cluster->cl_has_local) {
ret = o2net_start_listening(node);
if (ret)
return ret;
}
if (!tmp && cluster->cl_has_local &&
cluster->cl_local_node == node->nd_num) {
o2net_stop_listening(node);
cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
}
node->nd_local = tmp;
if (node->nd_local) {
cluster->cl_has_local = tmp;
cluster->cl_local_node = node->nd_num;
}
return count;
} | 0 | CVE-2017-18216 | 2,990 | benign |
CWE-476 | static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
size_t count)
{
struct o2nm_node *node = to_o2nm_node(item);
struct o2nm_cluster *cluster;
unsigned long tmp;
char *p = (char *)page;
ssize_t ret;
tmp = simple_strtoul(p, &p, 0);
if (!p || (*p && (*p != '\n')))
return -EINVAL;
tmp = !!tmp; /* boolean of whether this node wants to be local */
/* setting local turns on networking rx for now so we require having
* set everything else first */
if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
!test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) ||
!test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
return -EINVAL; /* XXX */
o2nm_lock_subsystem();
cluster = to_o2nm_cluster_from_node(node);
if (!cluster) {
ret = -EINVAL;
goto out;
}
/* the only failure case is trying to set a new local node
* when a different one is already set */
if (tmp && tmp == cluster->cl_has_local &&
cluster->cl_local_node != node->nd_num) {
ret = -EBUSY;
goto out;
}
/* bring up the rx thread if we're setting the new local node. */
if (tmp && !cluster->cl_has_local) {
ret = o2net_start_listening(node);
if (ret)
goto out;
}
if (!tmp && cluster->cl_has_local &&
cluster->cl_local_node == node->nd_num) {
o2net_stop_listening(node);
cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
}
node->nd_local = tmp;
if (node->nd_local) {
cluster->cl_has_local = tmp;
cluster->cl_local_node = node->nd_num;
}
ret = count;
out:
o2nm_unlock_subsystem();
return ret;
} | 1 | CVE-2017-18216 | 2,990 | vulnerable |
CWE-119 | static void update_read_synchronize(rdpUpdate* update, wStream* s)
{
WINPR_UNUSED(update);
Stream_Seek_UINT16(s); /* pad2Octets (2 bytes) */
/**
* The Synchronize Update is an artifact from the
* T.128 protocol and should be ignored.
*/
} | 0 | CVE-2020-11046 | 1,553 | benign |
CWE-119 | static BOOL update_read_synchronize(rdpUpdate* update, wStream* s)
{
WINPR_UNUSED(update);
return Stream_SafeSeek(s, 2); /* pad2Octets (2 bytes) */
/**
* The Synchronize Update is an artifact from the
* T.128 protocol and should be ignored.
*/
} | 1 | CVE-2020-11046 | 1,553 | vulnerable |
CWE-476 | int avpriv_ac3_parse_header(AC3HeaderInfo **phdr, const uint8_t *buf,
size_t size)
{
GetBitContext gb;
AC3HeaderInfo *hdr;
int err;
if (!*phdr)
*phdr = av_mallocz(sizeof(AC3HeaderInfo));
if (!*phdr)
return AVERROR(ENOMEM);
hdr = *phdr;
init_get_bits8(&gb, buf, size);
err = ff_ac3_parse_header(&gb, hdr);
if (err < 0)
return AVERROR_INVALIDDATA;
return get_bits_count(&gb);
} | 0 | CVE-2018-13303 | 1,224 | benign |
CWE-476 | int avpriv_ac3_parse_header(AC3HeaderInfo **phdr, const uint8_t *buf,
size_t size)
{
GetBitContext gb;
AC3HeaderInfo *hdr;
int err;
if (!*phdr)
*phdr = av_mallocz(sizeof(AC3HeaderInfo));
if (!*phdr)
return AVERROR(ENOMEM);
hdr = *phdr;
err = init_get_bits8(&gb, buf, size);
if (err < 0)
return AVERROR_INVALIDDATA;
err = ff_ac3_parse_header(&gb, hdr);
if (err < 0)
return AVERROR_INVALIDDATA;
return get_bits_count(&gb);
} | 1 | CVE-2018-13303 | 1,224 | vulnerable |
CWE-119 | ppp_hdlc(netdissect_options *ndo,
const u_char *p, int length)
{
u_char *b, *s, *t, c;
int i, proto;
const void *se;
if (length <= 0)
return;
b = (uint8_t *)malloc(length);
if (b == NULL)
return;
/*
* Unescape all the data into a temporary, private, buffer.
* Do this so that we dont overwrite the original packet
* contents.
*/
for (s = (u_char *)p, t = b, i = length; i > 0; i--) {
c = *s++;
if (c == 0x7d) {
if (i > 1) {
i--;
c = *s++ ^ 0x20;
} else
continue;
}
*t++ = c;
}
se = ndo->ndo_snapend;
ndo->ndo_snapend = t;
length = t - b;
/* now lets guess about the payload codepoint format */
if (length < 1)
goto trunc;
proto = *b; /* start with a one-octet codepoint guess */
switch (proto) {
case PPP_IP:
ip_print(ndo, b + 1, length - 1);
goto cleanup;
case PPP_IPV6:
ip6_print(ndo, b + 1, length - 1);
goto cleanup;
default: /* no luck - try next guess */
break;
}
if (length < 2)
goto trunc;
proto = EXTRACT_16BITS(b); /* next guess - load two octets */
switch (proto) {
case (PPP_ADDRESS << 8 | PPP_CONTROL): /* looks like a PPP frame */
if (length < 4)
goto trunc;
proto = EXTRACT_16BITS(b+2); /* load the PPP proto-id */
handle_ppp(ndo, proto, b + 4, length - 4);
break;
default: /* last guess - proto must be a PPP proto-id */
handle_ppp(ndo, proto, b + 2, length - 2);
break;
}
cleanup:
ndo->ndo_snapend = se;
free(b);
return;
trunc:
ndo->ndo_snapend = se;
free(b);
ND_PRINT((ndo, "[|ppp]"));
} | 0 | CVE-2014-9140 | 342 | benign |
CWE-119 | ppp_hdlc(netdissect_options *ndo,
const u_char *p, int length)
{
u_char *b, *t, c;
const u_char *s;
int i, proto;
const void *se;
if (length <= 0)
return;
b = (u_char *)malloc(length);
if (b == NULL)
return;
/*
* Unescape all the data into a temporary, private, buffer.
* Do this so that we dont overwrite the original packet
* contents.
*/
for (s = p, t = b, i = length; i > 0 && ND_TTEST(*s); i--) {
c = *s++;
if (c == 0x7d) {
if (i <= 1 || !ND_TTEST(*s))
break;
i--;
c = *s++ ^ 0x20;
}
*t++ = c;
}
se = ndo->ndo_snapend;
ndo->ndo_snapend = t;
length = t - b;
/* now lets guess about the payload codepoint format */
if (length < 1)
goto trunc;
proto = *b; /* start with a one-octet codepoint guess */
switch (proto) {
case PPP_IP:
ip_print(ndo, b + 1, length - 1);
goto cleanup;
case PPP_IPV6:
ip6_print(ndo, b + 1, length - 1);
goto cleanup;
default: /* no luck - try next guess */
break;
}
if (length < 2)
goto trunc;
proto = EXTRACT_16BITS(b); /* next guess - load two octets */
switch (proto) {
case (PPP_ADDRESS << 8 | PPP_CONTROL): /* looks like a PPP frame */
if (length < 4)
goto trunc;
proto = EXTRACT_16BITS(b+2); /* load the PPP proto-id */
handle_ppp(ndo, proto, b + 4, length - 4);
break;
default: /* last guess - proto must be a PPP proto-id */
handle_ppp(ndo, proto, b + 2, length - 2);
break;
}
cleanup:
ndo->ndo_snapend = se;
free(b);
return;
trunc:
ndo->ndo_snapend = se;
free(b);
ND_PRINT((ndo, "[|ppp]"));
} | 1 | CVE-2014-9140 | 342 | vulnerable |
CWE-119 | kvp_respond_to_host(char *key, char *value, int error)
{
struct hv_kvp_msg *kvp_msg;
struct hv_kvp_msg_enumerate *kvp_data;
char *key_name;
struct icmsg_hdr *icmsghdrp;
int keylen, valuelen;
u32 buf_len;
struct vmbus_channel *channel;
u64 req_id;
/*
* If a transaction is not active; log and return.
*/
if (!kvp_transaction.active) {
/*
* This is a spurious call!
*/
pr_warn("KVP: Transaction not active\n");
return;
}
/*
* Copy the global state for completing the transaction. Note that
* only one transaction can be active at a time.
*/
buf_len = kvp_transaction.recv_len;
channel = kvp_transaction.recv_channel;
req_id = kvp_transaction.recv_req_id;
kvp_transaction.active = false;
if (channel->onchannel_callback == NULL)
/*
* We have raced with util driver being unloaded;
* silently return.
*/
return;
icmsghdrp = (struct icmsg_hdr *)
&recv_buffer[sizeof(struct vmbuspipe_hdr)];
kvp_msg = (struct hv_kvp_msg *)
&recv_buffer[sizeof(struct vmbuspipe_hdr) +
sizeof(struct icmsg_hdr)];
kvp_data = &kvp_msg->kvp_data;
key_name = key;
/*
* If the error parameter is set, terminate the host's enumeration.
*/
if (error) {
/*
* We don't support this index or the we have timedout;
* terminate the host-side iteration by returning an error.
*/
icmsghdrp->status = HV_E_FAIL;
goto response_done;
}
/*
* The windows host expects the key/value pair to be encoded
* in utf16.
*/
keylen = utf8s_to_utf16s(key_name, strlen(key_name),
(wchar_t *)kvp_data->data.key);
kvp_data->data.key_size = 2*(keylen + 1); /* utf16 encoding */
valuelen = utf8s_to_utf16s(value, strlen(value),
(wchar_t *)kvp_data->data.value);
kvp_data->data.value_size = 2*(valuelen + 1); /* utf16 encoding */
kvp_data->data.value_type = REG_SZ; /* all our values are strings */
icmsghdrp->status = HV_S_OK;
response_done:
icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
VM_PKT_DATA_INBAND, 0);
} | 0 | CVE-2013-1773 | 790 | benign |
CWE-119 | kvp_respond_to_host(char *key, char *value, int error)
{
struct hv_kvp_msg *kvp_msg;
struct hv_kvp_msg_enumerate *kvp_data;
char *key_name;
struct icmsg_hdr *icmsghdrp;
int keylen, valuelen;
u32 buf_len;
struct vmbus_channel *channel;
u64 req_id;
/*
* If a transaction is not active; log and return.
*/
if (!kvp_transaction.active) {
/*
* This is a spurious call!
*/
pr_warn("KVP: Transaction not active\n");
return;
}
/*
* Copy the global state for completing the transaction. Note that
* only one transaction can be active at a time.
*/
buf_len = kvp_transaction.recv_len;
channel = kvp_transaction.recv_channel;
req_id = kvp_transaction.recv_req_id;
kvp_transaction.active = false;
if (channel->onchannel_callback == NULL)
/*
* We have raced with util driver being unloaded;
* silently return.
*/
return;
icmsghdrp = (struct icmsg_hdr *)
&recv_buffer[sizeof(struct vmbuspipe_hdr)];
kvp_msg = (struct hv_kvp_msg *)
&recv_buffer[sizeof(struct vmbuspipe_hdr) +
sizeof(struct icmsg_hdr)];
kvp_data = &kvp_msg->kvp_data;
key_name = key;
/*
* If the error parameter is set, terminate the host's enumeration.
*/
if (error) {
/*
* We don't support this index or the we have timedout;
* terminate the host-side iteration by returning an error.
*/
icmsghdrp->status = HV_E_FAIL;
goto response_done;
}
/*
* The windows host expects the key/value pair to be encoded
* in utf16.
*/
keylen = utf8s_to_utf16s(key_name, strlen(key_name), UTF16_HOST_ENDIAN,
(wchar_t *) kvp_data->data.key,
HV_KVP_EXCHANGE_MAX_KEY_SIZE / 2);
kvp_data->data.key_size = 2*(keylen + 1); /* utf16 encoding */
valuelen = utf8s_to_utf16s(value, strlen(value), UTF16_HOST_ENDIAN,
(wchar_t *) kvp_data->data.value,
HV_KVP_EXCHANGE_MAX_VALUE_SIZE / 2);
kvp_data->data.value_size = 2*(valuelen + 1); /* utf16 encoding */
kvp_data->data.value_type = REG_SZ; /* all our values are strings */
icmsghdrp->status = HV_S_OK;
response_done:
icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
VM_PKT_DATA_INBAND, 0);
} | 1 | CVE-2013-1773 | 790 | vulnerable |
CWE-119 | uint32_t mt_random (mtrand *mt) {
uint32_t y;
unsigned long mag01[2];
mag01[0] = 0;
mag01[1] = MATRIX_A;
if (mt->mt_index_ >= MT_LEN) {
int kk;
for (kk = 0; kk < MT_LEN - MT_IA; kk++) {
y = (mt->mt_buffer_[kk] & UPPER_MASK) |
(mt->mt_buffer_[kk + 1] & LOWER_MASK);
mt->mt_buffer_[kk] = mt->mt_buffer_[kk + MT_IA] ^
(y >> 1) ^ mag01[y & 0x1UL];
}
for (;kk < MT_LEN - 1; kk++) {
y = (mt->mt_buffer_[kk] & UPPER_MASK) |
(mt->mt_buffer_[kk + 1] & LOWER_MASK);
mt->mt_buffer_[kk] = mt->mt_buffer_[kk + (MT_IA - MT_LEN)] ^
(y >> 1) ^ mag01[y & 0x1UL];
}
y = (mt->mt_buffer_[MT_LEN - 1] & UPPER_MASK) |
(mt->mt_buffer_[0] & LOWER_MASK);
mt->mt_buffer_[MT_LEN - 1] = mt->mt_buffer_[MT_IA - 1] ^
(y >> 1) ^ mag01[y & 0x1UL];
mt->mt_index_ = 0;
}
y = mt->mt_buffer_[mt->mt_index_++];
y ^= (y >> 11);
y ^= (y << 7) & 0x9d2c5680UL;
y ^= (y << 15) & 0xefc60000UL;
y ^= (y >> 18);
return y;
} | 0 | CVE-2014-9765 | 1,740 | benign |
CWE-119 | uint32_t mt_random (mtrand *mt) {
uint32_t y;
unsigned long mag01[2];
mag01[0] = 0;
mag01[1] = MATRIX_A;
if (mt->mt_index_ >= MT_LEN) {
int kk;
for (kk = 0; kk < MT_LEN - MT_IA; kk++) {
y = (mt->mt_buffer_[kk] & UPPER_MASK) |
(mt->mt_buffer_[kk + 1] & LOWER_MASK);
mt->mt_buffer_[kk] = mt->mt_buffer_[kk + MT_IA] ^
(y >> 1) ^ mag01[y & 0x1UL];
}
for (;kk < MT_LEN - 1; kk++) {
y = (mt->mt_buffer_[kk] & UPPER_MASK) |
(mt->mt_buffer_[kk + 1] & LOWER_MASK);
mt->mt_buffer_[kk] = mt->mt_buffer_[kk + (MT_IA - MT_LEN)] ^
(y >> 1) ^ mag01[y & 0x1UL];
}
y = (mt->mt_buffer_[MT_LEN - 1] & UPPER_MASK) |
(mt->mt_buffer_[0] & LOWER_MASK);
mt->mt_buffer_[MT_LEN - 1] = mt->mt_buffer_[MT_IA - 1] ^
(y >> 1) ^ mag01[y & 0x1UL];
mt->mt_index_ = 0;
}
y = mt->mt_buffer_[mt->mt_index_++];
y ^= (y >> 11);
y ^= (y << 7) & 0x9d2c5680UL;
y ^= (y << 15) & 0xefc60000UL;
y ^= (y >> 18);
return y;
} | 1 | CVE-2014-9765 | 1,740 | vulnerable |
CWE-119 | int LibRaw::dcraw_process(void)
{
int quality,i;
int iterations=-1, dcb_enhance=1, noiserd=0;
int eeci_refine_fl=0, es_med_passes_fl=0;
float cared=0,cablue=0;
float linenoise=0;
float lclean=0,cclean=0;
float thresh=0;
float preser=0;
float expos=1.0;
CHECK_ORDER_LOW(LIBRAW_PROGRESS_LOAD_RAW);
// CHECK_ORDER_HIGH(LIBRAW_PROGRESS_PRE_INTERPOLATE);
try {
int no_crop = 1;
if (~O.cropbox[2] && ~O.cropbox[3])
no_crop=0;
libraw_decoder_info_t di;
get_decoder_info(&di);
int subtract_inline = !O.bad_pixels && !O.dark_frame && !O.wf_debanding && !(di.decoder_flags & LIBRAW_DECODER_LEGACY) && !IO.zero_is_bad;
raw2image_ex(subtract_inline); // allocate imgdata.image and copy data!
int save_4color = O.four_color_rgb;
if (IO.zero_is_bad)
{
remove_zeroes();
SET_PROC_FLAG(LIBRAW_PROGRESS_REMOVE_ZEROES);
}
if(O.half_size)
O.four_color_rgb = 1;
if(O.bad_pixels && no_crop)
{
bad_pixels(O.bad_pixels);
SET_PROC_FLAG(LIBRAW_PROGRESS_BAD_PIXELS);
}
if (O.dark_frame && no_crop)
{
subtract (O.dark_frame);
SET_PROC_FLAG(LIBRAW_PROGRESS_DARK_FRAME);
}
if (O.wf_debanding)
{
wf_remove_banding();
}
quality = 2 + !IO.fuji_width;
if (O.user_qual >= 0) quality = O.user_qual;
if(!subtract_inline || !C.data_maximum)
{
adjust_bl();
subtract_black();
}
adjust_maximum();
if (O.user_sat > 0) C.maximum = O.user_sat;
if (P1.is_foveon)
{
if(load_raw == &LibRaw::foveon_dp_load_raw)
{
for (int i=0; i < S.height*S.width*4; i++)
if ((short) imgdata.image[0][i] < 0) imgdata.image[0][i] = 0;
}
else
foveon_interpolate();
SET_PROC_FLAG(LIBRAW_PROGRESS_FOVEON_INTERPOLATE);
}
if (O.green_matching && !O.half_size)
{
green_matching();
}
if (!P1.is_foveon)
{
scale_colors();
SET_PROC_FLAG(LIBRAW_PROGRESS_SCALE_COLORS);
}
pre_interpolate();
SET_PROC_FLAG(LIBRAW_PROGRESS_PRE_INTERPOLATE);
if (O.dcb_iterations >= 0) iterations = O.dcb_iterations;
if (O.dcb_enhance_fl >=0 ) dcb_enhance = O.dcb_enhance_fl;
if (O.fbdd_noiserd >=0 ) noiserd = O.fbdd_noiserd;
if (O.eeci_refine >=0 ) eeci_refine_fl = O.eeci_refine;
if (O.es_med_passes >0 ) es_med_passes_fl = O.es_med_passes;
// LIBRAW_DEMOSAIC_PACK_GPL3
if (!O.half_size && O.cfa_green >0) {thresh=O.green_thresh ;green_equilibrate(thresh);}
if (O.exp_correc >0) {expos=O.exp_shift ; preser=O.exp_preser; exp_bef(expos,preser);}
if (O.ca_correc >0 ) {cablue=O.cablue; cared=O.cared; CA_correct_RT(cablue, cared);}
if (O.cfaline >0 ) {linenoise=O.linenoise; cfa_linedn(linenoise);}
if (O.cfa_clean >0 ) {lclean=O.lclean; cclean=O.cclean; cfa_impulse_gauss(lclean,cclean);}
if (P1.filters)
{
if (noiserd>0 && P1.colors==3 && P1.filters) fbdd(noiserd);
if (quality == 0)
lin_interpolate();
else if (quality == 1 || P1.colors > 3 || P1.filters < 1000)
vng_interpolate();
else if (quality == 2)
ppg_interpolate();
else if (quality == 3)
ahd_interpolate(); // really don't need it here due to fallback op
else if (quality == 4)
dcb(iterations, dcb_enhance);
// LIBRAW_DEMOSAIC_PACK_GPL2
else if (quality == 5)
ahd_interpolate_mod();
else if (quality == 6)
afd_interpolate_pl(2,1);
else if (quality == 7)
vcd_interpolate(0);
else if (quality == 8)
vcd_interpolate(12);
else if (quality == 9)
lmmse_interpolate(1);
// LIBRAW_DEMOSAIC_PACK_GPL3
else if (quality == 10)
amaze_demosaic_RT();
// LGPL2
else if (quality == 11)
dht_interpolate();
else if (quality == 12)
aahd_interpolate();
// fallback to AHD
else
ahd_interpolate();
SET_PROC_FLAG(LIBRAW_PROGRESS_INTERPOLATE);
}
if (IO.mix_green)
{
for (P1.colors=3, i=0; i < S.height * S.width; i++)
imgdata.image[i][1] = (imgdata.image[i][1] + imgdata.image[i][3]) >> 1;
SET_PROC_FLAG(LIBRAW_PROGRESS_MIX_GREEN);
}
if(!P1.is_foveon)
{
if (P1.colors == 3)
{
if (quality == 8)
{
if (eeci_refine_fl == 1) refinement();
if (O.med_passes > 0) median_filter_new();
if (es_med_passes_fl > 0) es_median_filter();
}
else {
median_filter();
}
SET_PROC_FLAG(LIBRAW_PROGRESS_MEDIAN_FILTER);
}
}
if (O.highlight == 2)
{
blend_highlights();
SET_PROC_FLAG(LIBRAW_PROGRESS_HIGHLIGHTS);
}
if (O.highlight > 2)
{
recover_highlights();
SET_PROC_FLAG(LIBRAW_PROGRESS_HIGHLIGHTS);
}
if (O.use_fuji_rotate)
{
fuji_rotate();
SET_PROC_FLAG(LIBRAW_PROGRESS_FUJI_ROTATE);
}
if(!libraw_internal_data.output_data.histogram)
{
libraw_internal_data.output_data.histogram = (int (*)[LIBRAW_HISTOGRAM_SIZE]) malloc(sizeof(*libraw_internal_data.output_data.histogram)*4);
merror(libraw_internal_data.output_data.histogram,"LibRaw::dcraw_process()");
}
#ifndef NO_LCMS
if(O.camera_profile)
{
apply_profile(O.camera_profile,O.output_profile);
SET_PROC_FLAG(LIBRAW_PROGRESS_APPLY_PROFILE);
}
#endif
convert_to_rgb();
SET_PROC_FLAG(LIBRAW_PROGRESS_CONVERT_RGB);
if (O.use_fuji_rotate)
{
stretch();
SET_PROC_FLAG(LIBRAW_PROGRESS_STRETCH);
}
O.four_color_rgb = save_4color; // also, restore
return 0;
}
catch ( LibRaw_exceptions err) {
EXCEPTION_HANDLER(err);
}
} | 0 | CVE-2013-2127 | 2,493 | benign |
CWE-119 | int LibRaw::dcraw_process(void)
{
int quality,i;
int iterations=-1, dcb_enhance=1, noiserd=0;
int eeci_refine_fl=0, es_med_passes_fl=0;
float cared=0,cablue=0;
float linenoise=0;
float lclean=0,cclean=0;
float thresh=0;
float preser=0;
float expos=1.0;
CHECK_ORDER_LOW(LIBRAW_PROGRESS_LOAD_RAW);
// CHECK_ORDER_HIGH(LIBRAW_PROGRESS_PRE_INTERPOLATE);
try {
int no_crop = 1;
if (~O.cropbox[2] && ~O.cropbox[3])
no_crop=0;
libraw_decoder_info_t di;
get_decoder_info(&di);
int subtract_inline = !O.bad_pixels && !O.dark_frame && !O.wf_debanding && !(di.decoder_flags & LIBRAW_DECODER_LEGACY) && !IO.zero_is_bad;
raw2image_ex(subtract_inline); // allocate imgdata.image and copy data!
int save_4color = O.four_color_rgb;
if (IO.zero_is_bad)
{
remove_zeroes();
SET_PROC_FLAG(LIBRAW_PROGRESS_REMOVE_ZEROES);
}
if(O.half_size)
O.four_color_rgb = 1;
if(O.bad_pixels && no_crop)
{
bad_pixels(O.bad_pixels);
SET_PROC_FLAG(LIBRAW_PROGRESS_BAD_PIXELS);
}
if (O.dark_frame && no_crop)
{
subtract (O.dark_frame);
SET_PROC_FLAG(LIBRAW_PROGRESS_DARK_FRAME);
}
if (O.wf_debanding)
{
wf_remove_banding();
}
quality = 2 + !IO.fuji_width;
if (O.user_qual >= 0) quality = O.user_qual;
if(!subtract_inline || !C.data_maximum)
{
adjust_bl();
subtract_black();
}
adjust_maximum();
if (O.user_sat > 0) C.maximum = O.user_sat;
if (P1.is_foveon)
{
if(load_raw == &LibRaw::foveon_dp_load_raw)
{
for (int i=0; i < S.height*S.width*4; i++)
if ((short) imgdata.image[0][i] < 0) imgdata.image[0][i] = 0;
}
else
foveon_interpolate();
SET_PROC_FLAG(LIBRAW_PROGRESS_FOVEON_INTERPOLATE);
}
if (O.green_matching && !O.half_size)
{
green_matching();
}
if (!P1.is_foveon)
{
scale_colors();
SET_PROC_FLAG(LIBRAW_PROGRESS_SCALE_COLORS);
}
pre_interpolate();
SET_PROC_FLAG(LIBRAW_PROGRESS_PRE_INTERPOLATE);
if (O.dcb_iterations >= 0) iterations = O.dcb_iterations;
if (O.dcb_enhance_fl >=0 ) dcb_enhance = O.dcb_enhance_fl;
if (O.fbdd_noiserd >=0 ) noiserd = O.fbdd_noiserd;
if (O.eeci_refine >=0 ) eeci_refine_fl = O.eeci_refine;
if (O.es_med_passes >0 ) es_med_passes_fl = O.es_med_passes;
// LIBRAW_DEMOSAIC_PACK_GPL3
if (!O.half_size && O.cfa_green >0) {thresh=O.green_thresh ;green_equilibrate(thresh);}
if (O.exp_correc >0) {expos=O.exp_shift ; preser=O.exp_preser; exp_bef(expos,preser);}
if (O.ca_correc >0 ) {cablue=O.cablue; cared=O.cared; CA_correct_RT(cablue, cared);}
if (O.cfaline >0 ) {linenoise=O.linenoise; cfa_linedn(linenoise);}
if (O.cfa_clean >0 ) {lclean=O.lclean; cclean=O.cclean; cfa_impulse_gauss(lclean,cclean);}
if (P1.filters)
{
if (noiserd>0 && P1.colors==3 && P1.filters) fbdd(noiserd);
if (quality == 0)
lin_interpolate();
else if (quality == 1 || P1.colors > 3 || P1.filters < 1000)
vng_interpolate();
else if (quality == 2)
ppg_interpolate();
else if (quality == 3)
ahd_interpolate(); // really don't need it here due to fallback op
else if (quality == 4)
dcb(iterations, dcb_enhance);
// LIBRAW_DEMOSAIC_PACK_GPL2
else if (quality == 5)
ahd_interpolate_mod();
else if (quality == 6)
afd_interpolate_pl(2,1);
else if (quality == 7)
vcd_interpolate(0);
else if (quality == 8)
vcd_interpolate(12);
else if (quality == 9)
lmmse_interpolate(1);
// LIBRAW_DEMOSAIC_PACK_GPL3
else if (quality == 10)
amaze_demosaic_RT();
// LGPL2
else if (quality == 11)
dht_interpolate();
else if (quality == 12)
aahd_interpolate();
// fallback to AHD
else
ahd_interpolate();
SET_PROC_FLAG(LIBRAW_PROGRESS_INTERPOLATE);
}
if (IO.mix_green)
{
for (P1.colors=3, i=0; i < S.height * S.width; i++)
imgdata.image[i][1] = (imgdata.image[i][1] + imgdata.image[i][3]) >> 1;
SET_PROC_FLAG(LIBRAW_PROGRESS_MIX_GREEN);
}
if(!P1.is_foveon)
{
if (P1.colors == 3)
{
if (quality == 8)
{
if (eeci_refine_fl == 1) refinement();
if (O.med_passes > 0) median_filter_new();
if (es_med_passes_fl > 0) es_median_filter();
}
else {
median_filter();
}
SET_PROC_FLAG(LIBRAW_PROGRESS_MEDIAN_FILTER);
}
}
if (O.highlight == 2)
{
blend_highlights();
SET_PROC_FLAG(LIBRAW_PROGRESS_HIGHLIGHTS);
}
if (O.highlight > 2)
{
recover_highlights();
SET_PROC_FLAG(LIBRAW_PROGRESS_HIGHLIGHTS);
}
if (O.use_fuji_rotate)
{
fuji_rotate();
SET_PROC_FLAG(LIBRAW_PROGRESS_FUJI_ROTATE);
}
if(!libraw_internal_data.output_data.histogram)
{
libraw_internal_data.output_data.histogram = (int (*)[LIBRAW_HISTOGRAM_SIZE]) malloc(sizeof(*libraw_internal_data.output_data.histogram)*4);
merror(libraw_internal_data.output_data.histogram,"LibRaw::dcraw_process()");
}
#ifndef NO_LCMS
if(O.camera_profile)
{
apply_profile(O.camera_profile,O.output_profile);
SET_PROC_FLAG(LIBRAW_PROGRESS_APPLY_PROFILE);
}
#endif
convert_to_rgb();
SET_PROC_FLAG(LIBRAW_PROGRESS_CONVERT_RGB);
if (O.use_fuji_rotate)
{
stretch();
SET_PROC_FLAG(LIBRAW_PROGRESS_STRETCH);
}
O.four_color_rgb = save_4color; // also, restore
return 0;
}
catch ( LibRaw_exceptions err) {
EXCEPTION_HANDLER(err);
}
} | 1 | CVE-2013-2127 | 2,493 | vulnerable |
CWE-787 | TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
const TfLiteTensor* data,
const TfLiteTensor* segment_ids,
TfLiteTensor* output) {
int max_index = -1;
const int segment_id_size = segment_ids->dims->data[0];
if (segment_id_size > 0) {
max_index = segment_ids->data.i32[segment_id_size - 1];
}
const int data_rank = NumDimensions(data);
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(NumDimensions(data));
output_shape->data[0] = max_index + 1;
for (int i = 1; i < data_rank; ++i) {
output_shape->data[i] = data->dims->data[i];
}
return context->ResizeTensor(context, output, output_shape);
} | 0 | CVE-2020-15212 | 1,505 | benign |
CWE-787 | TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
const TfLiteTensor* data,
const TfLiteTensor* segment_ids,
TfLiteTensor* output) {
// Segment ids should be of same cardinality as first input dimension and they
// should be increasing by at most 1, from 0 (e.g., [0, 0, 1, 2, 3] is valid)
const int segment_id_size = segment_ids->dims->data[0];
TF_LITE_ENSURE_EQ(context, segment_id_size, data->dims->data[0]);
int previous_segment_id = -1;
for (int i = 0; i < segment_id_size; i++) {
const int current_segment_id = GetTensorData<int32_t>(segment_ids)[i];
if (i == 0) {
TF_LITE_ENSURE_EQ(context, current_segment_id, 0);
} else {
int delta = current_segment_id - previous_segment_id;
TF_LITE_ENSURE(context, delta == 0 || delta == 1);
}
previous_segment_id = current_segment_id;
}
const int max_index = previous_segment_id;
const int data_rank = NumDimensions(data);
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(NumDimensions(data));
output_shape->data[0] = max_index + 1;
for (int i = 1; i < data_rank; ++i) {
output_shape->data[i] = data->dims->data[i];
}
return context->ResizeTensor(context, output, output_shape);
} | 1 | CVE-2020-15212 | 1,505 | vulnerable |
CWE-125 | static int cx24116_send_diseqc_msg(struct dvb_frontend *fe,
struct dvb_diseqc_master_cmd *d)
{
struct cx24116_state *state = fe->demodulator_priv;
int i, ret;
/* Dump DiSEqC message */
if (debug) {
printk(KERN_INFO "cx24116: %s(", __func__);
for (i = 0 ; i < d->msg_len ;) {
printk(KERN_INFO "0x%02x", d->msg[i]);
if (++i < d->msg_len)
printk(KERN_INFO ", ");
}
printk(") toneburst=%d\n", toneburst);
}
/* Validate length */
if (d->msg_len > (CX24116_ARGLEN - CX24116_DISEQC_MSGOFS))
return -EINVAL;
/* DiSEqC message */
for (i = 0; i < d->msg_len; i++)
state->dsec_cmd.args[CX24116_DISEQC_MSGOFS + i] = d->msg[i];
/* DiSEqC message length */
state->dsec_cmd.args[CX24116_DISEQC_MSGLEN] = d->msg_len;
/* Command length */
state->dsec_cmd.len = CX24116_DISEQC_MSGOFS +
state->dsec_cmd.args[CX24116_DISEQC_MSGLEN];
/* DiSEqC toneburst */
if (toneburst == CX24116_DISEQC_MESGCACHE)
/* Message is cached */
return 0;
else if (toneburst == CX24116_DISEQC_TONEOFF)
/* Message is sent without burst */
state->dsec_cmd.args[CX24116_DISEQC_BURST] = 0;
else if (toneburst == CX24116_DISEQC_TONECACHE) {
/*
* Message is sent with derived else cached burst
*
* WRITE PORT GROUP COMMAND 38
*
* 0/A/A: E0 10 38 F0..F3
* 1/B/B: E0 10 38 F4..F7
* 2/C/A: E0 10 38 F8..FB
* 3/D/B: E0 10 38 FC..FF
*
* databyte[3]= 8421:8421
* ABCD:WXYZ
* CLR :SET
*
* WX= PORT SELECT 0..3 (X=TONEBURST)
* Y = VOLTAGE (0=13V, 1=18V)
* Z = BAND (0=LOW, 1=HIGH(22K))
*/
if (d->msg_len >= 4 && d->msg[2] == 0x38)
state->dsec_cmd.args[CX24116_DISEQC_BURST] =
((d->msg[3] & 4) >> 2);
if (debug)
dprintk("%s burst=%d\n", __func__,
state->dsec_cmd.args[CX24116_DISEQC_BURST]);
}
/* Wait for LNB ready */
ret = cx24116_wait_for_lnb(fe);
if (ret != 0)
return ret;
/* Wait for voltage/min repeat delay */
msleep(100);
/* Command */
ret = cx24116_cmd_execute(fe, &state->dsec_cmd);
if (ret != 0)
return ret;
/*
* Wait for send
*
* Eutelsat spec:
* >15ms delay + (XXX determine if FW does this, see set_tone)
* 13.5ms per byte +
* >15ms delay +
* 12.5ms burst +
* >15ms delay (XXX determine if FW does this, see set_tone)
*/
msleep((state->dsec_cmd.args[CX24116_DISEQC_MSGLEN] << 4) +
((toneburst == CX24116_DISEQC_TONEOFF) ? 30 : 60));
return 0;
} | 0 | CVE-2015-9289 | 1,222 | benign |
CWE-125 | static int cx24116_send_diseqc_msg(struct dvb_frontend *fe,
struct dvb_diseqc_master_cmd *d)
{
struct cx24116_state *state = fe->demodulator_priv;
int i, ret;
/* Validate length */
if (d->msg_len > sizeof(d->msg))
return -EINVAL;
/* Dump DiSEqC message */
if (debug) {
printk(KERN_INFO "cx24116: %s(", __func__);
for (i = 0 ; i < d->msg_len ;) {
printk(KERN_INFO "0x%02x", d->msg[i]);
if (++i < d->msg_len)
printk(KERN_INFO ", ");
}
printk(") toneburst=%d\n", toneburst);
}
/* DiSEqC message */
for (i = 0; i < d->msg_len; i++)
state->dsec_cmd.args[CX24116_DISEQC_MSGOFS + i] = d->msg[i];
/* DiSEqC message length */
state->dsec_cmd.args[CX24116_DISEQC_MSGLEN] = d->msg_len;
/* Command length */
state->dsec_cmd.len = CX24116_DISEQC_MSGOFS +
state->dsec_cmd.args[CX24116_DISEQC_MSGLEN];
/* DiSEqC toneburst */
if (toneburst == CX24116_DISEQC_MESGCACHE)
/* Message is cached */
return 0;
else if (toneburst == CX24116_DISEQC_TONEOFF)
/* Message is sent without burst */
state->dsec_cmd.args[CX24116_DISEQC_BURST] = 0;
else if (toneburst == CX24116_DISEQC_TONECACHE) {
/*
* Message is sent with derived else cached burst
*
* WRITE PORT GROUP COMMAND 38
*
* 0/A/A: E0 10 38 F0..F3
* 1/B/B: E0 10 38 F4..F7
* 2/C/A: E0 10 38 F8..FB
* 3/D/B: E0 10 38 FC..FF
*
* databyte[3]= 8421:8421
* ABCD:WXYZ
* CLR :SET
*
* WX= PORT SELECT 0..3 (X=TONEBURST)
* Y = VOLTAGE (0=13V, 1=18V)
* Z = BAND (0=LOW, 1=HIGH(22K))
*/
if (d->msg_len >= 4 && d->msg[2] == 0x38)
state->dsec_cmd.args[CX24116_DISEQC_BURST] =
((d->msg[3] & 4) >> 2);
if (debug)
dprintk("%s burst=%d\n", __func__,
state->dsec_cmd.args[CX24116_DISEQC_BURST]);
}
/* Wait for LNB ready */
ret = cx24116_wait_for_lnb(fe);
if (ret != 0)
return ret;
/* Wait for voltage/min repeat delay */
msleep(100);
/* Command */
ret = cx24116_cmd_execute(fe, &state->dsec_cmd);
if (ret != 0)
return ret;
/*
* Wait for send
*
* Eutelsat spec:
* >15ms delay + (XXX determine if FW does this, see set_tone)
* 13.5ms per byte +
* >15ms delay +
* 12.5ms burst +
* >15ms delay (XXX determine if FW does this, see set_tone)
*/
msleep((state->dsec_cmd.args[CX24116_DISEQC_MSGLEN] << 4) +
((toneburst == CX24116_DISEQC_TONEOFF) ? 30 : 60));
return 0;
} | 1 | CVE-2015-9289 | 1,222 | vulnerable |
CWE-476 | Http::FilterDataStatus Context::onRequestBody(int body_buffer_length, bool end_of_stream) {
if (!wasm_->onRequestBody_) {
return Http::FilterDataStatus::Continue;
}
switch (wasm_
->onRequestBody_(this, id_, static_cast<uint32_t>(body_buffer_length),
static_cast<uint32_t>(end_of_stream))
.u64_) {
case 0:
return Http::FilterDataStatus::Continue;
case 1:
return Http::FilterDataStatus::StopIterationAndBuffer;
case 2:
return Http::FilterDataStatus::StopIterationAndWatermark;
default:
return Http::FilterDataStatus::StopIterationNoBuffer;
}
} | 0 | CVE-2020-10739 | 1,637 | benign |
CWE-476 | Http::FilterDataStatus Context::onRequestBody(int body_buffer_length, bool end_of_stream) {
if (!in_vm_context_created_ || !wasm_->onRequestBody_) {
return Http::FilterDataStatus::Continue;
}
switch (wasm_
->onRequestBody_(this, id_, static_cast<uint32_t>(body_buffer_length),
static_cast<uint32_t>(end_of_stream))
.u64_) {
case 0:
return Http::FilterDataStatus::Continue;
case 1:
return Http::FilterDataStatus::StopIterationAndBuffer;
case 2:
return Http::FilterDataStatus::StopIterationAndWatermark;
default:
return Http::FilterDataStatus::StopIterationNoBuffer;
}
} | 1 | CVE-2020-10739 | 1,637 | vulnerable |
CWE-416 | static void async_polkit_query_free(AsyncPolkitQuery *q) {
if (!q)
return;
sd_bus_slot_unref(q->slot);
if (q->registry && q->request)
hashmap_remove(q->registry, q->request);
sd_bus_message_unref(q->request);
sd_bus_message_unref(q->reply);
free(q->action);
strv_free(q->details);
free(q);
} | 0 | CVE-2020-1712 | 3,043 | benign |
CWE-416 | static void async_polkit_query_free(AsyncPolkitQuery *q) {
if (!q)
return;
sd_bus_slot_unref(q->slot);
if (q->registry && q->request)
hashmap_remove(q->registry, q->request);
sd_bus_message_unref(q->request);
sd_bus_message_unref(q->reply);
free(q->action);
strv_free(q->details);
sd_event_source_disable_unref(q->defer_event_source);
free(q);
} | 1 | CVE-2020-1712 | 3,043 | vulnerable |
CWE-119 | static OPJ_BOOL opj_tcd_code_block_enc_allocate_data(opj_tcd_cblk_enc_t *
p_code_block)
{
OPJ_UINT32 l_data_size;
/* The +1 is needed for https://github.com/uclouvain/openjpeg/issues/835 */
l_data_size = 1 + (OPJ_UINT32)((p_code_block->x1 - p_code_block->x0) *
(p_code_block->y1 - p_code_block->y0) * (OPJ_INT32)sizeof(OPJ_UINT32));
if (l_data_size > p_code_block->data_size) {
if (p_code_block->data) {
/* We refer to data - 1 since below we incremented it */
opj_free(p_code_block->data - 1);
}
p_code_block->data = (OPJ_BYTE*) opj_malloc(l_data_size + 1);
if (! p_code_block->data) {
p_code_block->data_size = 0U;
return OPJ_FALSE;
}
p_code_block->data_size = l_data_size;
/* We reserve the initial byte as a fake byte to a non-FF value */
/* and increment the data pointer, so that opj_mqc_init_enc() */
/* can do bp = data - 1, and opj_mqc_byteout() can safely dereference */
/* it. */
p_code_block->data[0] = 0;
p_code_block->data += 1; /*why +1 ?*/
}
return OPJ_TRUE;
} | 0 | CVE-2017-14151 | 183 | benign |
CWE-119 | static OPJ_BOOL opj_tcd_code_block_enc_allocate_data(opj_tcd_cblk_enc_t *
p_code_block)
{
OPJ_UINT32 l_data_size;
/* +1 is needed for https://github.com/uclouvain/openjpeg/issues/835 */
/* and actually +2 required for https://github.com/uclouvain/openjpeg/issues/982 */
/* TODO: is there a theoretical upper-bound for the compressed code */
/* block size ? */
l_data_size = 2 + (OPJ_UINT32)((p_code_block->x1 - p_code_block->x0) *
(p_code_block->y1 - p_code_block->y0) * (OPJ_INT32)sizeof(OPJ_UINT32));
if (l_data_size > p_code_block->data_size) {
if (p_code_block->data) {
/* We refer to data - 1 since below we incremented it */
opj_free(p_code_block->data - 1);
}
p_code_block->data = (OPJ_BYTE*) opj_malloc(l_data_size + 1);
if (! p_code_block->data) {
p_code_block->data_size = 0U;
return OPJ_FALSE;
}
p_code_block->data_size = l_data_size;
/* We reserve the initial byte as a fake byte to a non-FF value */
/* and increment the data pointer, so that opj_mqc_init_enc() */
/* can do bp = data - 1, and opj_mqc_byteout() can safely dereference */
/* it. */
p_code_block->data[0] = 0;
p_code_block->data += 1; /*why +1 ?*/
}
return OPJ_TRUE;
} | 1 | CVE-2017-14151 | 183 | vulnerable |
CWE-476 | int fscrypt_get_encryption_info(struct inode *inode)
{
struct fscrypt_info *ci = inode->i_crypt_info;
if (!ci ||
(ci->ci_keyring_key &&
(ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
(1 << KEY_FLAG_REVOKED) |
(1 << KEY_FLAG_DEAD)))))
return fscrypt_get_crypt_info(inode);
return 0;
} | 0 | CVE-2017-7374 | 3,073 | benign |
CWE-476 | int fscrypt_get_encryption_info(struct inode *inode)
{
struct fscrypt_info *crypt_info;
struct fscrypt_context ctx;
struct crypto_skcipher *ctfm;
const char *cipher_str;
int keysize;
u8 *raw_key = NULL;
int res;
if (inode->i_crypt_info)
return 0;
res = fscrypt_initialize(inode->i_sb->s_cop->flags);
if (res)
return res;
if (!inode->i_sb->s_cop->get_context)
return -EOPNOTSUPP;
res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
if (res < 0) {
if (!fscrypt_dummy_context_enabled(inode) ||
inode->i_sb->s_cop->is_encrypted(inode))
return res;
/* Fake up a context for an unencrypted directory */
memset(&ctx, 0, sizeof(ctx));
ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS;
ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS;
memset(ctx.master_key_descriptor, 0x42, FS_KEY_DESCRIPTOR_SIZE);
} else if (res != sizeof(ctx)) {
return -EINVAL;
}
if (ctx.format != FS_ENCRYPTION_CONTEXT_FORMAT_V1)
return -EINVAL;
if (ctx.flags & ~FS_POLICY_FLAGS_VALID)
return -EINVAL;
crypt_info = kmem_cache_alloc(fscrypt_info_cachep, GFP_NOFS);
if (!crypt_info)
return -ENOMEM;
crypt_info->ci_flags = ctx.flags;
crypt_info->ci_data_mode = ctx.contents_encryption_mode;
crypt_info->ci_filename_mode = ctx.filenames_encryption_mode;
crypt_info->ci_ctfm = NULL;
memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
sizeof(crypt_info->ci_master_key));
res = determine_cipher_type(crypt_info, inode, &cipher_str, &keysize);
if (res)
goto out;
/*
* This cannot be a stack buffer because it is passed to the scatterlist
* crypto API as part of key derivation.
*/
res = -ENOMEM;
raw_key = kmalloc(FS_MAX_KEY_SIZE, GFP_NOFS);
if (!raw_key)
goto out;
res = validate_user_key(crypt_info, &ctx, raw_key, FS_KEY_DESC_PREFIX);
if (res && inode->i_sb->s_cop->key_prefix) {
int res2 = validate_user_key(crypt_info, &ctx, raw_key,
inode->i_sb->s_cop->key_prefix);
if (res2) {
if (res2 == -ENOKEY)
res = -ENOKEY;
goto out;
}
} else if (res) {
goto out;
}
ctfm = crypto_alloc_skcipher(cipher_str, 0, 0);
if (!ctfm || IS_ERR(ctfm)) {
res = ctfm ? PTR_ERR(ctfm) : -ENOMEM;
printk(KERN_DEBUG
"%s: error %d (inode %u) allocating crypto tfm\n",
__func__, res, (unsigned) inode->i_ino);
goto out;
}
crypt_info->ci_ctfm = ctfm;
crypto_skcipher_clear_flags(ctfm, ~0);
crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY);
res = crypto_skcipher_setkey(ctfm, raw_key, keysize);
if (res)
goto out;
if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL)
crypt_info = NULL;
out:
if (res == -ENOKEY)
res = 0;
put_crypt_info(crypt_info);
kzfree(raw_key);
return res;
} | 1 | CVE-2017-7374 | 3,073 | vulnerable |
CWE-125 | static const struct usb_cdc_union_desc *
ims_pcu_get_cdc_union_desc(struct usb_interface *intf)
{
const void *buf = intf->altsetting->extra;
size_t buflen = intf->altsetting->extralen;
struct usb_cdc_union_desc *union_desc;
if (!buf) {
dev_err(&intf->dev, "Missing descriptor data\n");
return NULL;
}
if (!buflen) {
dev_err(&intf->dev, "Zero length descriptor\n");
return NULL;
}
while (buflen > 0) {
union_desc = (struct usb_cdc_union_desc *)buf;
if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE &&
union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) {
dev_dbg(&intf->dev, "Found union header\n");
return union_desc;
}
buflen -= union_desc->bLength;
buf += union_desc->bLength;
}
dev_err(&intf->dev, "Missing CDC union descriptor\n");
return NULL; | 0 | CVE-2017-16645 | 2,819 | benign |
CWE-125 | static const struct usb_cdc_union_desc *
ims_pcu_get_cdc_union_desc(struct usb_interface *intf)
{
const void *buf = intf->altsetting->extra;
size_t buflen = intf->altsetting->extralen;
struct usb_cdc_union_desc *union_desc;
if (!buf) {
dev_err(&intf->dev, "Missing descriptor data\n");
return NULL;
}
if (!buflen) {
dev_err(&intf->dev, "Zero length descriptor\n");
return NULL;
}
while (buflen >= sizeof(*union_desc)) {
union_desc = (struct usb_cdc_union_desc *)buf;
if (union_desc->bLength > buflen) {
dev_err(&intf->dev, "Too large descriptor\n");
return NULL;
}
if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE &&
union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) {
dev_dbg(&intf->dev, "Found union header\n");
if (union_desc->bLength >= sizeof(*union_desc))
return union_desc;
dev_err(&intf->dev,
"Union descriptor to short (%d vs %zd\n)",
union_desc->bLength, sizeof(*union_desc));
return NULL;
}
buflen -= union_desc->bLength;
buf += union_desc->bLength;
}
dev_err(&intf->dev, "Missing CDC union descriptor\n");
return NULL; | 1 | CVE-2017-16645 | 2,819 | vulnerable |
CWE-362 | void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long vmflag)
{
unsigned long addr;
/* do a global flush by default */
unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
preempt_disable();
if (current->active_mm != mm)
goto out;
if (!current->mm) {
leave_mm(smp_processor_id());
goto out;
}
if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
base_pages_to_flush = (end - start) >> PAGE_SHIFT;
if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
base_pages_to_flush = TLB_FLUSH_ALL;
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
local_flush_tlb();
} else {
/* flush range by one by one 'invlpg' */
for (addr = start; addr < end; addr += PAGE_SIZE) {
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
__flush_tlb_single(addr);
}
}
trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush);
out:
if (base_pages_to_flush == TLB_FLUSH_ALL) {
start = 0UL;
end = TLB_FLUSH_ALL;
}
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, start, end);
preempt_enable();
} | 0 | CVE-2016-2069 | 2,409 | benign |
CWE-362 | void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long vmflag)
{
unsigned long addr;
/* do a global flush by default */
unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
preempt_disable();
if (current->active_mm != mm) {
/* Synchronize with switch_mm. */
smp_mb();
goto out;
}
if (!current->mm) {
leave_mm(smp_processor_id());
/* Synchronize with switch_mm. */
smp_mb();
goto out;
}
if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
base_pages_to_flush = (end - start) >> PAGE_SHIFT;
/*
* Both branches below are implicit full barriers (MOV to CR or
* INVLPG) that synchronize with switch_mm.
*/
if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
base_pages_to_flush = TLB_FLUSH_ALL;
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
local_flush_tlb();
} else {
/* flush range by one by one 'invlpg' */
for (addr = start; addr < end; addr += PAGE_SIZE) {
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
__flush_tlb_single(addr);
}
}
trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush);
out:
if (base_pages_to_flush == TLB_FLUSH_ALL) {
start = 0UL;
end = TLB_FLUSH_ALL;
}
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, start, end);
preempt_enable();
} | 1 | CVE-2016-2069 | 2,409 | vulnerable |
CWE-119 | static int gemsafe_get_cert_len(sc_card_t *card)
{
int r;
u8 ibuf[GEMSAFE_MAX_OBJLEN];
u8 *iptr;
struct sc_path path;
struct sc_file *file;
size_t objlen, certlen;
unsigned int ind, i=0;
sc_format_path(GEMSAFE_PATH, &path);
r = sc_select_file(card, &path, &file);
if (r != SC_SUCCESS || !file)
return SC_ERROR_INTERNAL;
/* Initial read */
r = sc_read_binary(card, 0, ibuf, GEMSAFE_READ_QUANTUM, 0);
if (r < 0)
return SC_ERROR_INTERNAL;
/* Actual stored object size is encoded in first 2 bytes
* (allocated EF space is much greater!)
*/
objlen = (((size_t) ibuf[0]) << 8) | ibuf[1];
sc_log(card->ctx, "Stored object is of size: %"SC_FORMAT_LEN_SIZE_T"u",
objlen);
if (objlen < 1 || objlen > GEMSAFE_MAX_OBJLEN) {
sc_log(card->ctx, "Invalid object size: %"SC_FORMAT_LEN_SIZE_T"u",
objlen);
return SC_ERROR_INTERNAL;
}
/* It looks like the first thing in the block is a table of
* which keys are allocated. The table is small and is in the
* first 248 bytes. Example for a card with 10 key containers:
* 01 f0 00 03 03 b0 00 03 <= 1st key unallocated
* 01 f0 00 04 03 b0 00 04 <= 2nd key unallocated
* 01 fe 14 00 05 03 b0 00 05 <= 3rd key allocated
* 01 fe 14 01 06 03 b0 00 06 <= 4th key allocated
* 01 f0 00 07 03 b0 00 07 <= 5th key unallocated
* ...
* 01 f0 00 0c 03 b0 00 0c <= 10th key unallocated
* For allocated keys, the fourth byte seems to indicate the
* default key and the fifth byte indicates the key_ref of
* the private key.
*/
ind = 2; /* skip length */
while (ibuf[ind] == 0x01) {
if (ibuf[ind+1] == 0xFE) {
gemsafe_prkeys[i].ref = ibuf[ind+4];
sc_log(card->ctx, "Key container %d is allocated and uses key_ref %d",
i+1, gemsafe_prkeys[i].ref);
ind += 9;
}
else {
gemsafe_prkeys[i].label = NULL;
gemsafe_cert[i].label = NULL;
sc_log(card->ctx, "Key container %d is unallocated", i+1);
ind += 8;
}
i++;
}
/* Delete additional key containers from the data structures if
* this card can't accommodate them.
*/
for (; i < gemsafe_cert_max; i++) {
gemsafe_prkeys[i].label = NULL;
gemsafe_cert[i].label = NULL;
}
/* Read entire file, then dissect in memory.
* Gemalto ClassicClient seems to do it the same way.
*/
iptr = ibuf + GEMSAFE_READ_QUANTUM;
while ((size_t)(iptr - ibuf) < objlen) {
r = sc_read_binary(card, iptr - ibuf, iptr,
MIN(GEMSAFE_READ_QUANTUM, objlen - (iptr - ibuf)), 0);
if (r < 0) {
sc_log(card->ctx, "Could not read cert object");
return SC_ERROR_INTERNAL;
}
iptr += GEMSAFE_READ_QUANTUM;
}
/* Search buffer for certificates, they start with 0x3082. */
i = 0;
while (ind < objlen - 1) {
if (ibuf[ind] == 0x30 && ibuf[ind+1] == 0x82) {
/* Find next allocated key container */
while (i < gemsafe_cert_max && gemsafe_cert[i].label == NULL)
i++;
if (i == gemsafe_cert_max) {
sc_log(card->ctx, "Warning: Found orphaned certificate at offset %d", ind);
return SC_SUCCESS;
}
/* DER cert len is encoded this way */
if (ind+3 >= sizeof ibuf)
return SC_ERROR_INVALID_DATA;
certlen = ((((size_t) ibuf[ind+2]) << 8) | ibuf[ind+3]) + 4;
sc_log(card->ctx,
"Found certificate of key container %d at offset %d, len %"SC_FORMAT_LEN_SIZE_T"u",
i+1, ind, certlen);
gemsafe_cert[i].index = ind;
gemsafe_cert[i].count = certlen;
ind += certlen;
i++;
} else
ind++;
}
/* Delete additional key containers from the data structures if
* they're missing on the card.
*/
for (; i < gemsafe_cert_max; i++) {
if (gemsafe_cert[i].label) {
sc_log(card->ctx, "Warning: Certificate of key container %d is missing", i+1);
gemsafe_prkeys[i].label = NULL;
gemsafe_cert[i].label = NULL;
}
}
return SC_SUCCESS;
} | 0 | CVE-2018-16391 | 2,490 | benign |
CWE-119 | static int gemsafe_get_cert_len(sc_card_t *card)
{
int r;
u8 ibuf[GEMSAFE_MAX_OBJLEN];
u8 *iptr;
struct sc_path path;
struct sc_file *file;
size_t objlen, certlen;
unsigned int ind, i=0;
sc_format_path(GEMSAFE_PATH, &path);
r = sc_select_file(card, &path, &file);
if (r != SC_SUCCESS || !file)
return SC_ERROR_INTERNAL;
/* Initial read */
r = sc_read_binary(card, 0, ibuf, GEMSAFE_READ_QUANTUM, 0);
if (r < 0)
return SC_ERROR_INTERNAL;
/* Actual stored object size is encoded in first 2 bytes
* (allocated EF space is much greater!)
*/
objlen = (((size_t) ibuf[0]) << 8) | ibuf[1];
sc_log(card->ctx, "Stored object is of size: %"SC_FORMAT_LEN_SIZE_T"u",
objlen);
if (objlen < 1 || objlen > GEMSAFE_MAX_OBJLEN) {
sc_log(card->ctx, "Invalid object size: %"SC_FORMAT_LEN_SIZE_T"u",
objlen);
return SC_ERROR_INTERNAL;
}
/* It looks like the first thing in the block is a table of
* which keys are allocated. The table is small and is in the
* first 248 bytes. Example for a card with 10 key containers:
* 01 f0 00 03 03 b0 00 03 <= 1st key unallocated
* 01 f0 00 04 03 b0 00 04 <= 2nd key unallocated
* 01 fe 14 00 05 03 b0 00 05 <= 3rd key allocated
* 01 fe 14 01 06 03 b0 00 06 <= 4th key allocated
* 01 f0 00 07 03 b0 00 07 <= 5th key unallocated
* ...
* 01 f0 00 0c 03 b0 00 0c <= 10th key unallocated
* For allocated keys, the fourth byte seems to indicate the
* default key and the fifth byte indicates the key_ref of
* the private key.
*/
ind = 2; /* skip length */
while (ibuf[ind] == 0x01 && i < gemsafe_cert_max) {
if (ibuf[ind+1] == 0xFE) {
gemsafe_prkeys[i].ref = ibuf[ind+4];
sc_log(card->ctx, "Key container %d is allocated and uses key_ref %d",
i+1, gemsafe_prkeys[i].ref);
ind += 9;
}
else {
gemsafe_prkeys[i].label = NULL;
gemsafe_cert[i].label = NULL;
sc_log(card->ctx, "Key container %d is unallocated", i+1);
ind += 8;
}
i++;
}
/* Delete additional key containers from the data structures if
* this card can't accommodate them.
*/
for (; i < gemsafe_cert_max; i++) {
gemsafe_prkeys[i].label = NULL;
gemsafe_cert[i].label = NULL;
}
/* Read entire file, then dissect in memory.
* Gemalto ClassicClient seems to do it the same way.
*/
iptr = ibuf + GEMSAFE_READ_QUANTUM;
while ((size_t)(iptr - ibuf) < objlen) {
r = sc_read_binary(card, iptr - ibuf, iptr,
MIN(GEMSAFE_READ_QUANTUM, objlen - (iptr - ibuf)), 0);
if (r < 0) {
sc_log(card->ctx, "Could not read cert object");
return SC_ERROR_INTERNAL;
}
iptr += GEMSAFE_READ_QUANTUM;
}
/* Search buffer for certificates, they start with 0x3082. */
i = 0;
while (ind < objlen - 1) {
if (ibuf[ind] == 0x30 && ibuf[ind+1] == 0x82) {
/* Find next allocated key container */
while (i < gemsafe_cert_max && gemsafe_cert[i].label == NULL)
i++;
if (i == gemsafe_cert_max) {
sc_log(card->ctx, "Warning: Found orphaned certificate at offset %d", ind);
return SC_SUCCESS;
}
/* DER cert len is encoded this way */
if (ind+3 >= sizeof ibuf)
return SC_ERROR_INVALID_DATA;
certlen = ((((size_t) ibuf[ind+2]) << 8) | ibuf[ind+3]) + 4;
sc_log(card->ctx,
"Found certificate of key container %d at offset %d, len %"SC_FORMAT_LEN_SIZE_T"u",
i+1, ind, certlen);
gemsafe_cert[i].index = ind;
gemsafe_cert[i].count = certlen;
ind += certlen;
i++;
} else
ind++;
}
/* Delete additional key containers from the data structures if
* they're missing on the card.
*/
for (; i < gemsafe_cert_max; i++) {
if (gemsafe_cert[i].label) {
sc_log(card->ctx, "Warning: Certificate of key container %d is missing", i+1);
gemsafe_prkeys[i].label = NULL;
gemsafe_cert[i].label = NULL;
}
}
return SC_SUCCESS;
} | 1 | CVE-2018-16391 | 2,490 | vulnerable |
CWE-119 | static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
u32 val, ktime_t *abs_time, u32 bitset,
int clockrt, u32 __user *uaddr2)
{
struct hrtimer_sleeper timeout, *to = NULL;
struct rt_mutex_waiter rt_waiter;
struct rt_mutex *pi_mutex = NULL;
struct futex_hash_bucket *hb;
union futex_key key2;
struct futex_q q;
int res, ret;
if (!bitset)
return -EINVAL;
if (abs_time) {
to = &timeout;
hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME :
CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
hrtimer_init_sleeper(to, current);
hrtimer_set_expires_range_ns(&to->timer, *abs_time,
current->timer_slack_ns);
}
/*
* The waiter is allocated on our stack, manipulated by the requeue
* code while we sleep on uaddr.
*/
debug_rt_mutex_init_waiter(&rt_waiter);
rt_waiter.task = NULL;
key2 = FUTEX_KEY_INIT;
ret = get_futex_key(uaddr2, fshared, &key2);
if (unlikely(ret != 0))
goto out;
q.pi_state = NULL;
q.bitset = bitset;
q.rt_waiter = &rt_waiter;
q.requeue_pi_key = &key2;
/* Prepare to wait on uaddr. */
ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
if (ret)
goto out_key2;
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
spin_lock(&hb->lock);
ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
spin_unlock(&hb->lock);
if (ret)
goto out_put_keys;
/*
* In order for us to be here, we know our q.key == key2, and since
* we took the hb->lock above, we also know that futex_requeue() has
* completed and we no longer have to concern ourselves with a wakeup
* race with the atomic proxy lock acquition by the requeue code.
*/
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
/*
* Got the lock. We might not be the anticipated owner if we
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
spin_lock(q.lock_ptr);
ret = fixup_pi_state_owner(uaddr2, &q, current,
fshared);
spin_unlock(q.lock_ptr);
}
} else {
/*
* We have been woken up by futex_unlock_pi(), a timeout, or a
* signal. futex_unlock_pi() will not destroy the lock_ptr nor
* the pi_state.
*/
WARN_ON(!&q.pi_state);
pi_mutex = &q.pi_state->pi_mutex;
ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
debug_rt_mutex_free_waiter(&rt_waiter);
spin_lock(q.lock_ptr);
/*
* Fixup the pi_state owner and possibly acquire the lock if we
* haven't already.
*/
res = fixup_owner(uaddr2, fshared, &q, !ret);
/*
* If fixup_owner() returned an error, proprogate that. If it
* acquired the lock, clear -ETIMEDOUT or -EINTR.
*/
if (res)
ret = (res < 0) ? res : 0;
/* Unqueue and drop the lock. */
unqueue_me_pi(&q);
}
/*
* If fixup_pi_state_owner() faulted and was unable to handle the
* fault, unlock the rt_mutex and return the fault to userspace.
*/
if (ret == -EFAULT) {
if (rt_mutex_owner(pi_mutex) == current)
rt_mutex_unlock(pi_mutex);
} else if (ret == -EINTR) {
/*
* We've already been requeued, but cannot restart by calling
* futex_lock_pi() directly. We could restart this syscall, but
* it would detect that the user space "val" changed and return
* -EWOULDBLOCK. Save the overhead of the restart and return
* -EWOULDBLOCK directly.
*/
ret = -EWOULDBLOCK;
}
out_put_keys:
put_futex_key(fshared, &q.key);
out_key2:
put_futex_key(fshared, &key2);
out:
if (to) {
hrtimer_cancel(&to->timer);
destroy_hrtimer_on_stack(&to->timer);
}
return ret;
} | 0 | CVE-2014-0205 | 256 | benign |
CWE-119 | static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
u32 val, ktime_t *abs_time, u32 bitset,
int clockrt, u32 __user *uaddr2)
{
struct hrtimer_sleeper timeout, *to = NULL;
struct rt_mutex_waiter rt_waiter;
struct rt_mutex *pi_mutex = NULL;
struct futex_hash_bucket *hb;
union futex_key key2;
struct futex_q q;
int res, ret;
if (!bitset)
return -EINVAL;
if (abs_time) {
to = &timeout;
hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME :
CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
hrtimer_init_sleeper(to, current);
hrtimer_set_expires_range_ns(&to->timer, *abs_time,
current->timer_slack_ns);
}
/*
* The waiter is allocated on our stack, manipulated by the requeue
* code while we sleep on uaddr.
*/
debug_rt_mutex_init_waiter(&rt_waiter);
rt_waiter.task = NULL;
key2 = FUTEX_KEY_INIT;
ret = get_futex_key(uaddr2, fshared, &key2);
if (unlikely(ret != 0))
goto out;
q.pi_state = NULL;
q.bitset = bitset;
q.rt_waiter = &rt_waiter;
q.requeue_pi_key = &key2;
/*
* Prepare to wait on uaddr. On success, increments q.key (key1) ref
* count.
*/
ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
if (ret)
goto out_key2;
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
spin_lock(&hb->lock);
ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
spin_unlock(&hb->lock);
if (ret)
goto out_put_keys;
/*
* In order for us to be here, we know our q.key == key2, and since
* we took the hb->lock above, we also know that futex_requeue() has
* completed and we no longer have to concern ourselves with a wakeup
* race with the atomic proxy lock acquisition by the requeue code. The
* futex_requeue dropped our key1 reference and incremented our key2
* reference count.
*/
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
/*
* Got the lock. We might not be the anticipated owner if we
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
spin_lock(q.lock_ptr);
ret = fixup_pi_state_owner(uaddr2, &q, current,
fshared);
spin_unlock(q.lock_ptr);
}
} else {
/*
* We have been woken up by futex_unlock_pi(), a timeout, or a
* signal. futex_unlock_pi() will not destroy the lock_ptr nor
* the pi_state.
*/
WARN_ON(!&q.pi_state);
pi_mutex = &q.pi_state->pi_mutex;
ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
debug_rt_mutex_free_waiter(&rt_waiter);
spin_lock(q.lock_ptr);
/*
* Fixup the pi_state owner and possibly acquire the lock if we
* haven't already.
*/
res = fixup_owner(uaddr2, fshared, &q, !ret);
/*
* If fixup_owner() returned an error, proprogate that. If it
* acquired the lock, clear -ETIMEDOUT or -EINTR.
*/
if (res)
ret = (res < 0) ? res : 0;
/* Unqueue and drop the lock. */
unqueue_me_pi(&q);
}
/*
* If fixup_pi_state_owner() faulted and was unable to handle the
* fault, unlock the rt_mutex and return the fault to userspace.
*/
if (ret == -EFAULT) {
if (rt_mutex_owner(pi_mutex) == current)
rt_mutex_unlock(pi_mutex);
} else if (ret == -EINTR) {
/*
* We've already been requeued, but cannot restart by calling
* futex_lock_pi() directly. We could restart this syscall, but
* it would detect that the user space "val" changed and return
* -EWOULDBLOCK. Save the overhead of the restart and return
* -EWOULDBLOCK directly.
*/
ret = -EWOULDBLOCK;
}
out_put_keys:
put_futex_key(fshared, &q.key);
out_key2:
put_futex_key(fshared, &key2);
out:
if (to) {
hrtimer_cancel(&to->timer);
destroy_hrtimer_on_stack(&to->timer);
}
return ret;
} | 1 | CVE-2014-0205 | 256 | vulnerable |
CWE-125 | void ntlm_print_message_fields(NTLM_MESSAGE_FIELDS* fields, const char* name)
{
WLog_DBG(TAG, "%s (Len: %"PRIu16" MaxLen: %"PRIu16" BufferOffset: %"PRIu32")",
name, fields->Len, fields->MaxLen, fields->BufferOffset);
if (fields->Len > 0)
winpr_HexDump(TAG, WLOG_DEBUG, fields->Buffer, fields->Len);
} | 0 | CVE-2018-8789 | 1,495 | benign |
CWE-125 | static void ntlm_print_message_fields(NTLM_MESSAGE_FIELDS* fields, const char* name)
{
WLog_DBG(TAG, "%s (Len: %"PRIu16" MaxLen: %"PRIu16" BufferOffset: %"PRIu32")",
name, fields->Len, fields->MaxLen, fields->BufferOffset);
if (fields->Len > 0)
winpr_HexDump(TAG, WLOG_DEBUG, fields->Buffer, fields->Len);
} | 1 | CVE-2018-8789 | 1,495 | vulnerable |
CWE-119 | bool extract_sockaddr(char *url, char **sockaddr_url, char **sockaddr_port)
{
char *url_begin, *url_end, *ipv6_begin, *ipv6_end, *port_start = NULL;
char url_address[256], port[6];
int url_len, port_len = 0;
*sockaddr_url = url;
url_begin = strstr(url, "//");
if (!url_begin)
url_begin = url;
else
url_begin += 2;
/* Look for numeric ipv6 entries */
ipv6_begin = strstr(url_begin, "[");
ipv6_end = strstr(url_begin, "]");
if (ipv6_begin && ipv6_end && ipv6_end > ipv6_begin)
url_end = strstr(ipv6_end, ":");
else
url_end = strstr(url_begin, ":");
if (url_end) {
url_len = url_end - url_begin;
port_len = strlen(url_begin) - url_len - 1;
if (port_len < 1)
return false;
port_start = url_end + 1;
} else
url_len = strlen(url_begin);
if (url_len < 1)
return false;
sprintf(url_address, "%.*s", url_len, url_begin);
if (port_len) {
char *slash;
snprintf(port, 6, "%.*s", port_len, port_start);
slash = strchr(port, '/');
if (slash)
*slash = '\0';
} else
strcpy(port, "80");
*sockaddr_port = strdup(port);
*sockaddr_url = strdup(url_address);
return true;
} | 0 | CVE-2014-4501 | 511 | benign |
CWE-119 | bool extract_sockaddr(char *url, char **sockaddr_url, char **sockaddr_port)
{
char *url_begin, *url_end, *ipv6_begin, *ipv6_end, *port_start = NULL;
char url_address[256], port[6];
int url_len, port_len = 0;
*sockaddr_url = url;
url_begin = strstr(url, "//");
if (!url_begin)
url_begin = url;
else
url_begin += 2;
/* Look for numeric ipv6 entries */
ipv6_begin = strstr(url_begin, "[");
ipv6_end = strstr(url_begin, "]");
if (ipv6_begin && ipv6_end && ipv6_end > ipv6_begin)
url_end = strstr(ipv6_end, ":");
else
url_end = strstr(url_begin, ":");
if (url_end) {
url_len = url_end - url_begin;
port_len = strlen(url_begin) - url_len - 1;
if (port_len < 1)
return false;
port_start = url_end + 1;
} else
url_len = strlen(url_begin);
if (url_len < 1)
return false;
snprintf(url_address, 254, "%.*s", url_len, url_begin);
if (port_len) {
char *slash;
snprintf(port, 6, "%.*s", port_len, port_start);
slash = strchr(port, '/');
if (slash)
*slash = '\0';
} else
strcpy(port, "80");
*sockaddr_port = strdup(port);
*sockaddr_url = strdup(url_address);
return true;
} | 1 | CVE-2014-4501 | 511 | vulnerable |
CWE-119 | static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
unsigned long size)
{
gfn_t end_gfn;
pfn_t pfn;
pfn = gfn_to_pfn_memslot(slot, gfn);
end_gfn = gfn + (size >> PAGE_SHIFT);
gfn += 1;
if (is_error_noslot_pfn(pfn))
return pfn;
while (gfn < end_gfn)
gfn_to_pfn_memslot(slot, gfn++);
return pfn;
} | 0 | CVE-2014-8369 | 2,885 | benign |
CWE-119 | static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
unsigned long npages)
{
gfn_t end_gfn;
pfn_t pfn;
pfn = gfn_to_pfn_memslot(slot, gfn);
end_gfn = gfn + npages;
gfn += 1;
if (is_error_noslot_pfn(pfn))
return pfn;
while (gfn < end_gfn)
gfn_to_pfn_memslot(slot, gfn++);
return pfn;
} | 1 | CVE-2014-8369 | 2,885 | vulnerable |
CWE-119 | static void scsi_read_data(SCSIRequest *req)
{
SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
uint32_t n;
if (r->sector_count == (uint32_t)-1) {
DPRINTF("Read buf_len=%zd\n", r->iov.iov_len);
r->sector_count = 0;
scsi_req_data(&r->req, r->iov.iov_len);
return;
}
DPRINTF("Read sector_count=%d\n", r->sector_count);
if (r->sector_count == 0) {
/* This also clears the sense buffer for REQUEST SENSE. */
scsi_req_complete(&r->req, GOOD);
return;
}
/* No data transfer may already be in progress */
assert(r->req.aiocb == NULL);
if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
DPRINTF("Data transfer direction invalid\n");
scsi_read_complete(r, -EINVAL);
return;
}
n = r->sector_count;
if (n > SCSI_DMA_BUF_SIZE / 512)
n = SCSI_DMA_BUF_SIZE / 512;
if (s->tray_open) {
scsi_read_complete(r, -ENOMEDIUM);
}
r->iov.iov_len = n * 512;
qemu_iovec_init_external(&r->qiov, &r->iov, 1);
bdrv_acct_start(s->bs, &r->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_READ);
r->req.aiocb = bdrv_aio_readv(s->bs, r->sector, &r->qiov, n,
scsi_read_complete, r);
if (r->req.aiocb == NULL) {
scsi_read_complete(r, -EIO);
}
} | 0 | CVE-2011-3346 | 440 | benign |
CWE-119 | static void scsi_read_data(SCSIRequest *req)
{
SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
uint32_t n;
if (r->sector_count == (uint32_t)-1) {
DPRINTF("Read buf_len=%zd\n", r->iov.iov_len);
r->sector_count = 0;
scsi_req_data(&r->req, r->iov.iov_len);
return;
}
DPRINTF("Read sector_count=%d\n", r->sector_count);
if (r->sector_count == 0) {
/* This also clears the sense buffer for REQUEST SENSE. */
scsi_req_complete(&r->req, GOOD);
return;
}
/* No data transfer may already be in progress */
assert(r->req.aiocb == NULL);
if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
DPRINTF("Data transfer direction invalid\n");
scsi_read_complete(r, -EINVAL);
return;
}
if (s->tray_open) {
scsi_read_complete(r, -ENOMEDIUM);
}
n = scsi_init_iovec(r);
bdrv_acct_start(s->bs, &r->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_READ);
r->req.aiocb = bdrv_aio_readv(s->bs, r->sector, &r->qiov, n,
scsi_read_complete, r);
if (r->req.aiocb == NULL) {
scsi_read_complete(r, -EIO);
}
} | 1 | CVE-2011-3346 | 440 | vulnerable |
CWE-787 | void trustedGetEncryptedSecretShareAES(int *errStatus, char *errString, uint8_t *encrypted_skey, uint32_t *dec_len,
char *result_str, char *s_shareG2, char *pub_keyB, uint8_t _t, uint8_t _n,
uint8_t ind) {
LOG_INFO(__FUNCTION__);
INIT_ERROR_STATE
uint32_t enc_len;
int status;
CHECK_STATE(encrypted_skey);
CHECK_STATE(result_str);
CHECK_STATE(s_shareG2);
CHECK_STATE(pub_keyB);
LOG_DEBUG(__FUNCTION__);
SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN);
SAFE_CHAR_BUF(pub_key_x, BUF_LEN);SAFE_CHAR_BUF(pub_key_y, BUF_LEN);
trustedGenerateEcdsaKeyAES(&status, errString, encrypted_skey, &enc_len, pub_key_x, pub_key_y);
CHECK_STATUS("trustedGenerateEcdsaKeyAES failed");
status = AES_decrypt(encrypted_skey, enc_len, skey, ECDSA_SKEY_LEN);
skey[ECDSA_SKEY_LEN - 1] = 0;
CHECK_STATUS2("AES_decrypt failed (in trustedGetEncryptedSecretShareAES) with status %d");
*dec_len = enc_len;
SAFE_CHAR_BUF(common_key, ECDSA_SKEY_LEN);
status = gen_session_key(skey, pub_keyB, common_key);
CHECK_STATUS("gen_session_key failed")
SAFE_CHAR_BUF(s_share, ECDSA_SKEY_LEN);
status = calc_secret_share(getThreadLocalDecryptedDkgPoly(), s_share, _t, _n, ind);
CHECK_STATUS("calc secret share failed")
status = calc_secret_shareG2(s_share, s_shareG2);
CHECK_STATUS("invalid decr secret share");
SAFE_CHAR_BUF(cypher, ECDSA_SKEY_LEN);
status=xor_encrypt(common_key, s_share, cypher);
CHECK_STATUS("xor_encrypt failed")
strncpy(result_str, cypher, strlen(cypher));
strncpy(result_str + strlen(cypher), pub_key_x, strlen(pub_key_x));
strncpy(result_str + strlen(pub_key_x) + strlen(pub_key_y), pub_key_y, strlen(pub_key_y));
SET_SUCCESS
clean:
;
LOG_INFO(__FUNCTION__ );
LOG_INFO("SGX call completed");
} | 0 | CVE-2021-36218 | 330 | benign |
CWE-787 | void trustedGetEncryptedSecretShareAES(int *errStatus, char *errString, uint8_t *encrypted_skey, uint64_t *dec_len,
char *result_str, char *s_shareG2, char *pub_keyB, uint8_t _t, uint8_t _n,
uint8_t ind) {
LOG_INFO(__FUNCTION__);
INIT_ERROR_STATE
uint64_t enc_len;
int status;
CHECK_STATE(encrypted_skey);
CHECK_STATE(result_str);
CHECK_STATE(s_shareG2);
CHECK_STATE(pub_keyB);
LOG_DEBUG(__FUNCTION__);
SAFE_CHAR_BUF(skey, BUF_LEN);
SAFE_CHAR_BUF(pub_key_x, BUF_LEN);SAFE_CHAR_BUF(pub_key_y, BUF_LEN);
trustedGenerateEcdsaKeyAES(&status, errString, encrypted_skey, &enc_len, pub_key_x, pub_key_y);
CHECK_STATUS("trustedGenerateEcdsaKeyAES failed");
status = AES_decrypt(encrypted_skey, enc_len, skey, BUF_LEN);
skey[ECDSA_SKEY_LEN - 1] = 0;
CHECK_STATUS2("AES_decrypt failed (in trustedGetEncryptedSecretShareAES) with status %d");
*dec_len = enc_len;
SAFE_CHAR_BUF(common_key, BUF_LEN);
status = gen_session_key(skey, pub_keyB, common_key);
CHECK_STATUS("gen_session_key failed")
SAFE_CHAR_BUF(s_share, BUF_LEN);
status = calc_secret_share(getThreadLocalDecryptedDkgPoly(), s_share, _t, _n, ind);
CHECK_STATUS("calc secret share failed")
status = calc_secret_shareG2(s_share, s_shareG2);
CHECK_STATUS("invalid decr secret share");
SAFE_CHAR_BUF(cypher, BUF_LEN);
status=xor_encrypt(common_key, s_share, cypher);
CHECK_STATUS("xor_encrypt failed")
strncpy(result_str, cypher, strlen(cypher));
strncpy(result_str + strlen(cypher), pub_key_x, strlen(pub_key_x));
strncpy(result_str + strlen(pub_key_x) + strlen(pub_key_y), pub_key_y, strlen(pub_key_y));
SET_SUCCESS
clean:
;
LOG_INFO(__FUNCTION__ );
LOG_INFO("SGX call completed");
} | 1 | CVE-2021-36218 | 330 | vulnerable |
CWE-125 | parse_elements(netdissect_options *ndo,
struct mgmt_body_t *pbody, const u_char *p, int offset,
u_int length)
{
u_int elementlen;
struct ssid_t ssid;
struct challenge_t challenge;
struct rates_t rates;
struct ds_t ds;
struct cf_t cf;
struct tim_t tim;
/*
* We haven't seen any elements yet.
*/
pbody->challenge_present = 0;
pbody->ssid_present = 0;
pbody->rates_present = 0;
pbody->ds_present = 0;
pbody->cf_present = 0;
pbody->tim_present = 0;
while (length != 0) {
/* Make sure we at least have the element ID and length. */
if (!ND_TTEST2(*(p + offset), 2))
return 0;
if (length < 2)
return 0;
elementlen = *(p + offset + 1);
/* Make sure we have the entire element. */
if (!ND_TTEST2(*(p + offset + 2), elementlen))
return 0;
if (length < elementlen + 2)
return 0;
switch (*(p + offset)) {
case E_SSID:
memcpy(&ssid, p + offset, 2);
offset += 2;
length -= 2;
if (ssid.length != 0) {
if (ssid.length > sizeof(ssid.ssid) - 1)
return 0;
if (!ND_TTEST2(*(p + offset), ssid.length))
return 0;
if (length < ssid.length)
return 0;
memcpy(&ssid.ssid, p + offset, ssid.length);
offset += ssid.length;
length -= ssid.length;
}
ssid.ssid[ssid.length] = '\0';
/*
* Present and not truncated.
*
* If we haven't already seen an SSID IE,
* copy this one, otherwise ignore this one,
* so we later report the first one we saw.
*/
if (!pbody->ssid_present) {
pbody->ssid = ssid;
pbody->ssid_present = 1;
}
break;
case E_CHALLENGE:
memcpy(&challenge, p + offset, 2);
offset += 2;
length -= 2;
if (challenge.length != 0) {
if (challenge.length >
sizeof(challenge.text) - 1)
return 0;
if (!ND_TTEST2(*(p + offset), challenge.length))
return 0;
if (length < challenge.length)
return 0;
memcpy(&challenge.text, p + offset,
challenge.length);
offset += challenge.length;
length -= challenge.length;
}
challenge.text[challenge.length] = '\0';
/*
* Present and not truncated.
*
* If we haven't already seen a challenge IE,
* copy this one, otherwise ignore this one,
* so we later report the first one we saw.
*/
if (!pbody->challenge_present) {
pbody->challenge = challenge;
pbody->challenge_present = 1;
}
break;
case E_RATES:
memcpy(&rates, p + offset, 2);
offset += 2;
length -= 2;
if (rates.length != 0) {
if (rates.length > sizeof rates.rate)
return 0;
if (!ND_TTEST2(*(p + offset), rates.length))
return 0;
if (length < rates.length)
return 0;
memcpy(&rates.rate, p + offset, rates.length);
offset += rates.length;
length -= rates.length;
}
/*
* Present and not truncated.
*
* If we haven't already seen a rates IE,
* copy this one if it's not zero-length,
* otherwise ignore this one, so we later
* report the first one we saw.
*
* We ignore zero-length rates IEs as some
* devices seem to put a zero-length rates
* IE, followed by an SSID IE, followed by
* a non-zero-length rates IE into frames,
* even though IEEE Std 802.11-2007 doesn't
* seem to indicate that a zero-length rates
* IE is valid.
*/
if (!pbody->rates_present && rates.length != 0) {
pbody->rates = rates;
pbody->rates_present = 1;
}
break;
case E_DS:
memcpy(&ds, p + offset, 2);
offset += 2;
length -= 2;
if (ds.length != 1) {
offset += ds.length;
length -= ds.length;
break;
}
ds.channel = *(p + offset);
offset += 1;
length -= 1;
/*
* Present and not truncated.
*
* If we haven't already seen a DS IE,
* copy this one, otherwise ignore this one,
* so we later report the first one we saw.
*/
if (!pbody->ds_present) {
pbody->ds = ds;
pbody->ds_present = 1;
}
break;
case E_CF:
memcpy(&cf, p + offset, 2);
offset += 2;
length -= 2;
if (cf.length != 6) {
offset += cf.length;
length -= cf.length;
break;
}
memcpy(&cf.count, p + offset, 6);
offset += 6;
length -= 6;
/*
* Present and not truncated.
*
* If we haven't already seen a CF IE,
* copy this one, otherwise ignore this one,
* so we later report the first one we saw.
*/
if (!pbody->cf_present) {
pbody->cf = cf;
pbody->cf_present = 1;
}
break;
case E_TIM:
memcpy(&tim, p + offset, 2);
offset += 2;
length -= 2;
if (tim.length <= 3) {
offset += tim.length;
length -= tim.length;
break;
}
if (tim.length - 3 > (int)sizeof tim.bitmap)
return 0;
memcpy(&tim.count, p + offset, 3);
offset += 3;
length -= 3;
memcpy(tim.bitmap, p + offset + 3, tim.length - 3);
offset += tim.length - 3;
length -= tim.length - 3;
/*
* Present and not truncated.
*
* If we haven't already seen a TIM IE,
* copy this one, otherwise ignore this one,
* so we later report the first one we saw.
*/
if (!pbody->tim_present) {
pbody->tim = tim;
pbody->tim_present = 1;
}
break;
default:
#if 0
ND_PRINT((ndo, "(1) unhandled element_id (%d) ",
*(p + offset)));
#endif
offset += 2 + elementlen;
length -= 2 + elementlen;
break;
}
}
/* No problems found. */
return 1;
} | 0 | CVE-2017-13008 | 1,064 | benign |
CWE-125 | parse_elements(netdissect_options *ndo,
struct mgmt_body_t *pbody, const u_char *p, int offset,
u_int length)
{
u_int elementlen;
struct ssid_t ssid;
struct challenge_t challenge;
struct rates_t rates;
struct ds_t ds;
struct cf_t cf;
struct tim_t tim;
/*
* We haven't seen any elements yet.
*/
pbody->challenge_present = 0;
pbody->ssid_present = 0;
pbody->rates_present = 0;
pbody->ds_present = 0;
pbody->cf_present = 0;
pbody->tim_present = 0;
while (length != 0) {
/* Make sure we at least have the element ID and length. */
if (!ND_TTEST2(*(p + offset), 2))
return 0;
if (length < 2)
return 0;
elementlen = *(p + offset + 1);
/* Make sure we have the entire element. */
if (!ND_TTEST2(*(p + offset + 2), elementlen))
return 0;
if (length < elementlen + 2)
return 0;
switch (*(p + offset)) {
case E_SSID:
memcpy(&ssid, p + offset, 2);
offset += 2;
length -= 2;
if (ssid.length != 0) {
if (ssid.length > sizeof(ssid.ssid) - 1)
return 0;
memcpy(&ssid.ssid, p + offset, ssid.length);
offset += ssid.length;
length -= ssid.length;
}
ssid.ssid[ssid.length] = '\0';
/*
* Present and not truncated.
*
* If we haven't already seen an SSID IE,
* copy this one, otherwise ignore this one,
* so we later report the first one we saw.
*/
if (!pbody->ssid_present) {
pbody->ssid = ssid;
pbody->ssid_present = 1;
}
break;
case E_CHALLENGE:
memcpy(&challenge, p + offset, 2);
offset += 2;
length -= 2;
if (challenge.length != 0) {
if (challenge.length >
sizeof(challenge.text) - 1)
return 0;
memcpy(&challenge.text, p + offset,
challenge.length);
offset += challenge.length;
length -= challenge.length;
}
challenge.text[challenge.length] = '\0';
/*
* Present and not truncated.
*
* If we haven't already seen a challenge IE,
* copy this one, otherwise ignore this one,
* so we later report the first one we saw.
*/
if (!pbody->challenge_present) {
pbody->challenge = challenge;
pbody->challenge_present = 1;
}
break;
case E_RATES:
memcpy(&rates, p + offset, 2);
offset += 2;
length -= 2;
if (rates.length != 0) {
if (rates.length > sizeof rates.rate)
return 0;
memcpy(&rates.rate, p + offset, rates.length);
offset += rates.length;
length -= rates.length;
}
/*
* Present and not truncated.
*
* If we haven't already seen a rates IE,
* copy this one if it's not zero-length,
* otherwise ignore this one, so we later
* report the first one we saw.
*
* We ignore zero-length rates IEs as some
* devices seem to put a zero-length rates
* IE, followed by an SSID IE, followed by
* a non-zero-length rates IE into frames,
* even though IEEE Std 802.11-2007 doesn't
* seem to indicate that a zero-length rates
* IE is valid.
*/
if (!pbody->rates_present && rates.length != 0) {
pbody->rates = rates;
pbody->rates_present = 1;
}
break;
case E_DS:
memcpy(&ds, p + offset, 2);
offset += 2;
length -= 2;
if (ds.length != 1) {
offset += ds.length;
length -= ds.length;
break;
}
ds.channel = *(p + offset);
offset += 1;
length -= 1;
/*
* Present and not truncated.
*
* If we haven't already seen a DS IE,
* copy this one, otherwise ignore this one,
* so we later report the first one we saw.
*/
if (!pbody->ds_present) {
pbody->ds = ds;
pbody->ds_present = 1;
}
break;
case E_CF:
memcpy(&cf, p + offset, 2);
offset += 2;
length -= 2;
if (cf.length != 6) {
offset += cf.length;
length -= cf.length;
break;
}
memcpy(&cf.count, p + offset, 6);
offset += 6;
length -= 6;
/*
* Present and not truncated.
*
* If we haven't already seen a CF IE,
* copy this one, otherwise ignore this one,
* so we later report the first one we saw.
*/
if (!pbody->cf_present) {
pbody->cf = cf;
pbody->cf_present = 1;
}
break;
case E_TIM:
memcpy(&tim, p + offset, 2);
offset += 2;
length -= 2;
if (tim.length <= 3) {
offset += tim.length;
length -= tim.length;
break;
}
if (tim.length - 3 > (int)sizeof tim.bitmap)
return 0;
memcpy(&tim.count, p + offset, 3);
offset += 3;
length -= 3;
memcpy(tim.bitmap, p + offset, tim.length - 3);
offset += tim.length - 3;
length -= tim.length - 3;
/*
* Present and not truncated.
*
* If we haven't already seen a TIM IE,
* copy this one, otherwise ignore this one,
* so we later report the first one we saw.
*/
if (!pbody->tim_present) {
pbody->tim = tim;
pbody->tim_present = 1;
}
break;
default:
#if 0
ND_PRINT((ndo, "(1) unhandled element_id (%d) ",
*(p + offset)));
#endif
offset += 2 + elementlen;
length -= 2 + elementlen;
break;
}
}
/* No problems found. */
return 1;
} | 1 | CVE-2017-13008 | 1,064 | vulnerable |
CWE-362 | static int snd_timer_user_open(struct inode *inode, struct file *file)
{
struct snd_timer_user *tu;
int err;
err = nonseekable_open(inode, file);
if (err < 0)
return err;
tu = kzalloc(sizeof(*tu), GFP_KERNEL);
if (tu == NULL)
return -ENOMEM;
spin_lock_init(&tu->qlock);
init_waitqueue_head(&tu->qchange_sleep);
mutex_init(&tu->tread_sem);
tu->ticks = 1;
tu->queue_size = 128;
tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
GFP_KERNEL);
if (tu->queue == NULL) {
kfree(tu);
return -ENOMEM;
}
file->private_data = tu;
return 0;
} | 0 | CVE-2016-2546 | 3,209 | benign |
CWE-362 | static int snd_timer_user_open(struct inode *inode, struct file *file)
{
struct snd_timer_user *tu;
int err;
err = nonseekable_open(inode, file);
if (err < 0)
return err;
tu = kzalloc(sizeof(*tu), GFP_KERNEL);
if (tu == NULL)
return -ENOMEM;
spin_lock_init(&tu->qlock);
init_waitqueue_head(&tu->qchange_sleep);
mutex_init(&tu->ioctl_lock);
tu->ticks = 1;
tu->queue_size = 128;
tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
GFP_KERNEL);
if (tu->queue == NULL) {
kfree(tu);
return -ENOMEM;
}
file->private_data = tu;
return 0;
} | 1 | CVE-2016-2546 | 3,209 | vulnerable |
CWE-20 | uint16_t enc624j600ReadPhyReg(NetInterface *interface, uint8_t address)
{
//Write the address of the PHY register to read from
enc624j600WriteReg(interface, ENC624J600_REG_MIREGADR, MIREGADR_R8 | address);
//Start read operation
enc624j600WriteReg(interface, ENC624J600_REG_MICMD, MICMD_MIIRD);
//Wait at least 25.6us before polling the BUSY bit
usleep(100);
//Wait for the read operation to complete
while((enc624j600ReadReg(interface, ENC624J600_REG_MISTAT) & MISTAT_BUSY) != 0)
{
}
//Clear command register
enc624j600WriteReg(interface, ENC624J600_REG_MICMD, 0x00);
//Return register contents
return enc624j600ReadReg(interface, ENC624J600_REG_MIRD);
} | 0 | CVE-2021-26788 | 1,901 | benign |
CWE-20 | uint16_t enc624j600ReadPhyReg(NetInterface *interface, uint8_t address)
{
uint16_t status;
//Write the address of the PHY register to read from
enc624j600WriteReg(interface, ENC624J600_MIREGADR,
ENC624J600_MIREGADR_R12_8_DEFAULT | address);
//Start read operation
enc624j600WriteReg(interface, ENC624J600_MICMD, ENC624J600_MICMD_MIIRD);
//Wait at least 25.6us before polling the BUSY bit
usleep(100);
//Wait for the read operation to complete
do
{
//Read MII Management Status register
status = enc624j600ReadReg(interface, ENC624J600_MISTAT);
//Check the value of the busy status bit
} while((status & ENC624J600_MISTAT_BUSY) != 0);
//Clear command register
enc624j600WriteReg(interface, ENC624J600_MICMD, 0x00);
//Return register contents
return enc624j600ReadReg(interface, ENC624J600_MIRD);
} | 1 | CVE-2021-26788 | 1,901 | vulnerable |
CWE-125 | static RList *r_bin_wasm_get_element_entries (RBinWasmObj *bin, RBinWasmSection *sec) {
RList *ret = NULL;
RBinWasmElementEntry *ptr = NULL;
if (!(ret = r_list_newf ((RListFree)free))) {
return NULL;
}
ut8* buf = bin->buf->buf + (ut32)sec->payload_data;
ut32 len = sec->payload_len;
ut32 count = sec->count;
ut32 i = 0, r = 0;
while (i < len && r < count) {
if (!(ptr = R_NEW0 (RBinWasmElementEntry))) {
return ret;
}
if (!(consume_u32 (buf + i, buf + len, &ptr->index, &i))) {
free (ptr);
return ret;
}
if (!(consume_init_expr (buf + i, buf + len, R_BIN_WASM_END_OF_CODE, NULL, &i))) {
free (ptr);
return ret;
}
if (!(consume_u32 (buf + i, buf + len, &ptr->num_elem, &i))) {
free (ptr);
return ret;
}
ut32 j = 0;
while (i < len && j < ptr->num_elem ) {
// TODO: allocate space and fill entry
ut32 e;
if (!(consume_u32 (buf + i, buf + len, &e, &i))) {
free (ptr);
return ret;
}
}
r_list_append (ret, ptr);
r += 1;
}
return ret;
} | 0 | CVE-2017-7854 | 1,311 | benign |
CWE-125 | static RList *r_bin_wasm_get_element_entries (RBinWasmObj *bin, RBinWasmSection *sec) {
RList *ret = NULL;
RBinWasmElementEntry *ptr = NULL;
if (!(ret = r_list_newf ((RListFree)free))) {
return NULL;
}
ut8* buf = bin->buf->buf + (ut32)sec->payload_data;
int buflen = bin->buf->length - (ut32)sec->payload_data;
ut32 len = sec->payload_len;
ut32 count = sec->count;
ut32 i = 0, r = 0;
while (i < len && len < buflen && r < count) {
if (!(ptr = R_NEW0 (RBinWasmElementEntry))) {
return ret;
}
if (!(consume_u32 (buf + i, buf + len, &ptr->index, &i))) {
goto beach;
}
if (!(consume_init_expr (buf + i, buf + len, R_BIN_WASM_END_OF_CODE, NULL, &i))) {
goto beach;
}
if (!(consume_u32 (buf + i, buf + len, &ptr->num_elem, &i))) {
goto beach;
}
ut32 j = 0;
while (i < len && j < ptr->num_elem) {
// TODO: allocate space and fill entry
ut32 e;
if (!(consume_u32 (buf + i, buf + len, &e, &i))) {
free (ptr);
return ret;
}
}
r_list_append (ret, ptr);
r += 1;
}
return ret;
beach:
free (ptr);
return ret;
} | 1 | CVE-2017-7854 | 1,311 | vulnerable |
CWE-119 | static int http_buf_read(URLContext *h, uint8_t *buf, int size)
{
HTTPContext *s = h->priv_data;
int len;
/* read bytes from input buffer first */
len = s->buf_end - s->buf_ptr;
if (len > 0) {
if (len > size)
len = size;
memcpy(buf, s->buf_ptr, len);
s->buf_ptr += len;
} else {
int64_t target_end = s->end_off ? s->end_off : s->filesize;
if ((!s->willclose || s->chunksize < 0) &&
target_end >= 0 && s->off >= target_end)
return AVERROR_EOF;
len = ffurl_read(s->hd, buf, size);
if (!len && (!s->willclose || s->chunksize < 0) &&
target_end >= 0 && s->off < target_end) {
av_log(h, AV_LOG_ERROR,
"Stream ends prematurely at %"PRId64", should be %"PRId64"\n",
s->off, target_end
);
return AVERROR(EIO);
}
}
if (len > 0) {
s->off += len;
if (s->chunksize > 0)
s->chunksize -= len;
}
return len;
} | 0 | CVE-2016-10190 | 2,655 | benign |
CWE-119 | static int http_buf_read(URLContext *h, uint8_t *buf, int size)
{
HTTPContext *s = h->priv_data;
int len;
/* read bytes from input buffer first */
len = s->buf_end - s->buf_ptr;
if (len > 0) {
if (len > size)
len = size;
memcpy(buf, s->buf_ptr, len);
s->buf_ptr += len;
} else {
uint64_t target_end = s->end_off ? s->end_off : s->filesize;
if ((!s->willclose || s->chunksize == UINT64_MAX) && s->off >= target_end)
return AVERROR_EOF;
len = ffurl_read(s->hd, buf, size);
if (!len && (!s->willclose || s->chunksize == UINT64_MAX) && s->off < target_end) {
av_log(h, AV_LOG_ERROR,
"Stream ends prematurely at %"PRIu64", should be %"PRIu64"\n",
s->off, target_end
);
return AVERROR(EIO);
}
}
if (len > 0) {
s->off += len;
if (s->chunksize > 0)
s->chunksize -= len;
}
return len;
} | 1 | CVE-2016-10190 | 2,655 | vulnerable |
CWE-125 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpContext op_context(context, node);
TF_LITE_ENSURE_OK(context, InitializeTemporaries(context, node, &op_context));
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
bool adj_x = op_context.params->adj_x;
bool adj_y = op_context.params->adj_y;
const TfLiteTensor* lhs_data = GetInput(context, node, kInputLHSTensor);
const TfLiteTensor* rhs_data = GetInput(context, node, kInputRHSTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
// Note that quantized inference requires that all tensors have their
// parameters set. This is usually done during quantized training.
if (lhs_data->type == kTfLiteInt8) {
double real_multiplier = 0.0;
TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler(
context, lhs_data, rhs_data, output, &real_multiplier));
int exponent;
QuantizeMultiplier(real_multiplier, &op_data->output_multiplier, &exponent);
op_data->output_shift = exponent;
// BatchMatMul has no fused activation functions. Therefore, set
// output activation min and max to min and max of int8_t type,
// respecitvely.
op_data->output_activation_min = std::numeric_limits<int8_t>::min();
op_data->output_activation_max = std::numeric_limits<int8_t>::max();
}
TF_LITE_ENSURE(context, lhs_data->type == kTfLiteFloat32 ||
lhs_data->type == kTfLiteInt8);
TF_LITE_ENSURE(context, rhs_data->type == kTfLiteFloat32 ||
rhs_data->type == kTfLiteInt8);
// Support dimensions between 2 and 4, inclusive.
TF_LITE_ENSURE(context, NumDimensions(lhs_data) >= 2);
TF_LITE_ENSURE(context, NumDimensions(lhs_data) <= 4);
TF_LITE_ENSURE(context, NumDimensions(rhs_data) >= 2);
TF_LITE_ENSURE(context, NumDimensions(rhs_data) <= 4);
const int lhs_rank = NumDimensions(lhs_data);
const int rhs_rank = NumDimensions(rhs_data);
const int output_rank = std::max(lhs_rank, rhs_rank);
const RuntimeShape extended_lhs_shape =
RuntimeShape::ExtendedShape(output_rank, GetTensorShape(lhs_data));
const RuntimeShape extended_rhs_shape =
RuntimeShape::ExtendedShape(output_rank, GetTensorShape(rhs_data));
// Ensure any batch dimensions obey broacasting rules.
for (int i = 0; i < output_rank - 2; ++i) {
const int lhs_dim = extended_lhs_shape.Dims(i);
const int rhs_dim = extended_rhs_shape.Dims(i);
if (lhs_dim != rhs_dim) {
if (lhs_dim != 1) {
TF_LITE_ENSURE_EQ(context, rhs_dim, 1);
}
}
}
// Ensure other dimensions work for matrix multiplication.
int accum_dim_lhs = adj_x ? extended_lhs_shape.Dims(output_rank - 2)
: extended_lhs_shape.Dims(output_rank - 1);
int accum_dim_rhs = adj_y ? extended_rhs_shape.Dims(output_rank - 1)
: extended_rhs_shape.Dims(output_rank - 2);
TF_LITE_ENSURE_EQ(context, accum_dim_lhs, accum_dim_rhs);
TfLiteStatus status =
ResizeOutputTensor(context, extended_lhs_shape, extended_rhs_shape, adj_x,
adj_y, output_rank, output);
return status;
} | 0 | CVE-2020-15211 | 2,565 | benign |
CWE-125 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpContext op_context(context, node);
TF_LITE_ENSURE_OK(context, InitializeTemporaries(context, node, &op_context));
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
bool adj_x = op_context.params->adj_x;
bool adj_y = op_context.params->adj_y;
const TfLiteTensor* lhs_data;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputLHSTensor, &lhs_data));
const TfLiteTensor* rhs_data;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputRHSTensor, &rhs_data));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
// Note that quantized inference requires that all tensors have their
// parameters set. This is usually done during quantized training.
if (lhs_data->type == kTfLiteInt8) {
double real_multiplier = 0.0;
TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler(
context, lhs_data, rhs_data, output, &real_multiplier));
int exponent;
QuantizeMultiplier(real_multiplier, &op_data->output_multiplier, &exponent);
op_data->output_shift = exponent;
// BatchMatMul has no fused activation functions. Therefore, set
// output activation min and max to min and max of int8_t type,
// respecitvely.
op_data->output_activation_min = std::numeric_limits<int8_t>::min();
op_data->output_activation_max = std::numeric_limits<int8_t>::max();
}
TF_LITE_ENSURE(context, lhs_data->type == kTfLiteFloat32 ||
lhs_data->type == kTfLiteInt8);
TF_LITE_ENSURE(context, rhs_data->type == kTfLiteFloat32 ||
rhs_data->type == kTfLiteInt8);
// Support dimensions between 2 and 4, inclusive.
TF_LITE_ENSURE(context, NumDimensions(lhs_data) >= 2);
TF_LITE_ENSURE(context, NumDimensions(lhs_data) <= 4);
TF_LITE_ENSURE(context, NumDimensions(rhs_data) >= 2);
TF_LITE_ENSURE(context, NumDimensions(rhs_data) <= 4);
const int lhs_rank = NumDimensions(lhs_data);
const int rhs_rank = NumDimensions(rhs_data);
const int output_rank = std::max(lhs_rank, rhs_rank);
const RuntimeShape extended_lhs_shape =
RuntimeShape::ExtendedShape(output_rank, GetTensorShape(lhs_data));
const RuntimeShape extended_rhs_shape =
RuntimeShape::ExtendedShape(output_rank, GetTensorShape(rhs_data));
// Ensure any batch dimensions obey broacasting rules.
for (int i = 0; i < output_rank - 2; ++i) {
const int lhs_dim = extended_lhs_shape.Dims(i);
const int rhs_dim = extended_rhs_shape.Dims(i);
if (lhs_dim != rhs_dim) {
if (lhs_dim != 1) {
TF_LITE_ENSURE_EQ(context, rhs_dim, 1);
}
}
}
// Ensure other dimensions work for matrix multiplication.
int accum_dim_lhs = adj_x ? extended_lhs_shape.Dims(output_rank - 2)
: extended_lhs_shape.Dims(output_rank - 1);
int accum_dim_rhs = adj_y ? extended_rhs_shape.Dims(output_rank - 1)
: extended_rhs_shape.Dims(output_rank - 2);
TF_LITE_ENSURE_EQ(context, accum_dim_lhs, accum_dim_rhs);
TfLiteStatus status =
ResizeOutputTensor(context, extended_lhs_shape, extended_rhs_shape, adj_x,
adj_y, output_rank, output);
return status;
} | 1 | CVE-2020-15211 | 2,565 | vulnerable |
CWE-125 | void MACH0_(iterate_chained_fixups)(struct MACH0_(obj_t) *bin, ut64 limit_start, ut64 limit_end, ut32 event_mask, RFixupCallback callback, void * context) {
int i = 0;
for (; i < bin->nsegs; i++) {
if (!bin->chained_starts[i]) {
continue;
}
int page_size = bin->chained_starts[i]->page_size;
if (page_size < 1) {
page_size = 4096;
}
ut64 start = bin->segs[i].fileoff;
ut64 end = start + bin->segs[i].filesize;
if (end >= limit_start && start <= limit_end) {
ut64 page_idx = (R_MAX (start, limit_start) - start) / page_size;
ut64 page_end_idx = (R_MIN (limit_end, end) - start) / page_size;
for (; page_idx <= page_end_idx; page_idx++) {
if (page_idx >= bin->chained_starts[i]->page_count) {
break;
}
ut16 page_start = bin->chained_starts[i]->page_start[page_idx];
if (page_start == DYLD_CHAINED_PTR_START_NONE) {
continue;
}
ut64 cursor = start + page_idx * page_size + page_start;
while (cursor < limit_end && cursor < end) {
ut8 tmp[8];
bool previous_rebasing = bin->rebasing_buffer;
bin->rebasing_buffer = true;
if (r_buf_read_at (bin->b, cursor, tmp, 8) != 8) {
bin->rebasing_buffer = previous_rebasing;
break;
}
bin->rebasing_buffer = previous_rebasing;
ut64 raw_ptr = r_read_le64 (tmp);
ut64 ptr_value = raw_ptr;
ut64 delta, stride, addend;
ut16 pointer_format = bin->chained_starts[i]->pointer_format;
RFixupEvent event = R_FIXUP_EVENT_NONE;
ut8 key = 0, addr_div = 0;
ut16 diversity = 0;
ut32 ordinal = UT32_MAX;
if (pointer_format == DYLD_CHAINED_PTR_ARM64E) {
stride = 8;
bool is_auth = IS_PTR_AUTH (raw_ptr);
bool is_bind = IS_PTR_BIND (raw_ptr);
if (is_auth && is_bind) {
struct dyld_chained_ptr_arm64e_auth_bind *p =
(struct dyld_chained_ptr_arm64e_auth_bind *) &raw_ptr;
event = R_FIXUP_EVENT_BIND_AUTH;
delta = p->next;
ordinal = p->ordinal;
key = p->key;
addr_div = p->addrDiv;
diversity = p->diversity;
} else if (!is_auth && is_bind) {
struct dyld_chained_ptr_arm64e_bind *p =
(struct dyld_chained_ptr_arm64e_bind *) &raw_ptr;
event = R_FIXUP_EVENT_BIND;
delta = p->next;
ordinal = p->ordinal;
addend = p->addend;
} else if (is_auth && !is_bind) {
struct dyld_chained_ptr_arm64e_auth_rebase *p =
(struct dyld_chained_ptr_arm64e_auth_rebase *) &raw_ptr;
event = R_FIXUP_EVENT_REBASE_AUTH;
delta = p->next;
ptr_value = p->target + bin->baddr;
key = p->key;
addr_div = p->addrDiv;
diversity = p->diversity;
} else {
struct dyld_chained_ptr_arm64e_rebase *p =
(struct dyld_chained_ptr_arm64e_rebase *) &raw_ptr;
event = R_FIXUP_EVENT_REBASE;
delta = p->next;
ptr_value = ((ut64)p->high8 << 56) | p->target;
}
} else if (pointer_format == DYLD_CHAINED_PTR_ARM64E_USERLAND24) {
stride = 8;
struct dyld_chained_ptr_arm64e_bind24 *bind =
(struct dyld_chained_ptr_arm64e_bind24 *) &raw_ptr;
if (bind->bind) {
delta = bind->next;
if (bind->auth) {
struct dyld_chained_ptr_arm64e_auth_bind24 *p =
(struct dyld_chained_ptr_arm64e_auth_bind24 *) &raw_ptr;
event = R_FIXUP_EVENT_BIND_AUTH;
ordinal = p->ordinal;
key = p->key;
addr_div = p->addrDiv;
diversity = p->diversity;
} else {
event = R_FIXUP_EVENT_BIND;
ordinal = bind->ordinal;
addend = bind->addend;
}
} else {
if (bind->auth) {
struct dyld_chained_ptr_arm64e_auth_rebase *p =
(struct dyld_chained_ptr_arm64e_auth_rebase *) &raw_ptr;
event = R_FIXUP_EVENT_REBASE_AUTH;
delta = p->next;
ptr_value = p->target + bin->baddr;
key = p->key;
addr_div = p->addrDiv;
diversity = p->diversity;
} else {
struct dyld_chained_ptr_arm64e_rebase *p =
(struct dyld_chained_ptr_arm64e_rebase *) &raw_ptr;
event = R_FIXUP_EVENT_REBASE;
delta = p->next;
ptr_value = bin->baddr + (((ut64)p->high8 << 56) | p->target);
}
}
} else if (pointer_format == DYLD_CHAINED_PTR_64_OFFSET) {
stride = 4;
struct dyld_chained_ptr_64_bind *bind =
(struct dyld_chained_ptr_64_bind *) &raw_ptr;
if (bind->bind) {
event = R_FIXUP_EVENT_BIND;
delta = bind->next;
ordinal = bind->ordinal;
addend = bind->addend;
} else {
struct dyld_chained_ptr_64_rebase *p =
(struct dyld_chained_ptr_64_rebase *) &raw_ptr;
event = R_FIXUP_EVENT_REBASE;
delta = p->next;
ptr_value = bin->baddr + (((ut64)p->high8 << 56) | p->target);
}
} else {
eprintf ("Unsupported chained pointer format %d\n", pointer_format);
return;
}
if (cursor >= limit_start && cursor <= limit_end - 8 && (event & event_mask) != 0) {
bool carry_on;
switch (event) {
case R_FIXUP_EVENT_BIND: {
RFixupBindEventDetails event_details;
event_details.type = event;
event_details.bin = bin;
event_details.offset = cursor;
event_details.raw_ptr = raw_ptr;
event_details.ordinal = ordinal;
event_details.addend = addend;
carry_on = callback (context, (RFixupEventDetails *) &event_details);
break;
}
case R_FIXUP_EVENT_BIND_AUTH: {
RFixupBindAuthEventDetails event_details;
event_details.type = event;
event_details.bin = bin;
event_details.offset = cursor;
event_details.raw_ptr = raw_ptr;
event_details.ordinal = ordinal;
event_details.key = key;
event_details.addr_div = addr_div;
event_details.diversity = diversity;
carry_on = callback (context, (RFixupEventDetails *) &event_details);
break;
}
case R_FIXUP_EVENT_REBASE: {
RFixupRebaseEventDetails event_details;
event_details.type = event;
event_details.bin = bin;
event_details.offset = cursor;
event_details.raw_ptr = raw_ptr;
event_details.ptr_value = ptr_value;
carry_on = callback (context, (RFixupEventDetails *) &event_details);
break;
}
case R_FIXUP_EVENT_REBASE_AUTH: {
RFixupRebaseAuthEventDetails event_details;
event_details.type = event;
event_details.bin = bin;
event_details.offset = cursor;
event_details.raw_ptr = raw_ptr;
event_details.ptr_value = ptr_value;
event_details.key = key;
event_details.addr_div = addr_div;
event_details.diversity = diversity;
carry_on = callback (context, (RFixupEventDetails *) &event_details);
break;
}
default:
eprintf ("Unexpected event while iterating chained fixups\n");
carry_on = false;
}
if (!carry_on) {
return;
}
}
cursor += delta * stride;
if (!delta) {
break;
}
}
}
}
}
} | 0 | CVE-2022-1052 | 1,241 | benign |
CWE-125 | void MACH0_(iterate_chained_fixups)(struct MACH0_(obj_t) *bin, ut64 limit_start, ut64 limit_end, ut32 event_mask, RFixupCallback callback, void * context) {
int i = 0;
for (; i < bin->nsegs && i < bin->segs_count; i++) {
if (!bin->chained_starts[i]) {
continue;
}
int page_size = bin->chained_starts[i]->page_size;
if (page_size < 1) {
page_size = 4096;
}
ut64 start = bin->segs[i].fileoff;
ut64 end = start + bin->segs[i].filesize;
if (end >= limit_start && start <= limit_end) {
ut64 page_idx = (R_MAX (start, limit_start) - start) / page_size;
ut64 page_end_idx = (R_MIN (limit_end, end) - start) / page_size;
for (; page_idx <= page_end_idx; page_idx++) {
if (page_idx >= bin->chained_starts[i]->page_count) {
break;
}
ut16 page_start = bin->chained_starts[i]->page_start[page_idx];
if (page_start == DYLD_CHAINED_PTR_START_NONE) {
continue;
}
ut64 cursor = start + page_idx * page_size + page_start;
while (cursor < limit_end && cursor < end) {
ut8 tmp[8];
bool previous_rebasing = bin->rebasing_buffer;
bin->rebasing_buffer = true;
if (r_buf_read_at (bin->b, cursor, tmp, 8) != 8) {
bin->rebasing_buffer = previous_rebasing;
break;
}
bin->rebasing_buffer = previous_rebasing;
ut64 raw_ptr = r_read_le64 (tmp);
ut64 ptr_value = raw_ptr;
ut64 delta, stride, addend;
ut16 pointer_format = bin->chained_starts[i]->pointer_format;
RFixupEvent event = R_FIXUP_EVENT_NONE;
ut8 key = 0, addr_div = 0;
ut16 diversity = 0;
ut32 ordinal = UT32_MAX;
if (pointer_format == DYLD_CHAINED_PTR_ARM64E) {
stride = 8;
bool is_auth = IS_PTR_AUTH (raw_ptr);
bool is_bind = IS_PTR_BIND (raw_ptr);
if (is_auth && is_bind) {
struct dyld_chained_ptr_arm64e_auth_bind *p =
(struct dyld_chained_ptr_arm64e_auth_bind *) &raw_ptr;
event = R_FIXUP_EVENT_BIND_AUTH;
delta = p->next;
ordinal = p->ordinal;
key = p->key;
addr_div = p->addrDiv;
diversity = p->diversity;
} else if (!is_auth && is_bind) {
struct dyld_chained_ptr_arm64e_bind *p =
(struct dyld_chained_ptr_arm64e_bind *) &raw_ptr;
event = R_FIXUP_EVENT_BIND;
delta = p->next;
ordinal = p->ordinal;
addend = p->addend;
} else if (is_auth && !is_bind) {
struct dyld_chained_ptr_arm64e_auth_rebase *p =
(struct dyld_chained_ptr_arm64e_auth_rebase *) &raw_ptr;
event = R_FIXUP_EVENT_REBASE_AUTH;
delta = p->next;
ptr_value = p->target + bin->baddr;
key = p->key;
addr_div = p->addrDiv;
diversity = p->diversity;
} else {
struct dyld_chained_ptr_arm64e_rebase *p =
(struct dyld_chained_ptr_arm64e_rebase *) &raw_ptr;
event = R_FIXUP_EVENT_REBASE;
delta = p->next;
ptr_value = ((ut64)p->high8 << 56) | p->target;
}
} else if (pointer_format == DYLD_CHAINED_PTR_ARM64E_USERLAND24) {
stride = 8;
struct dyld_chained_ptr_arm64e_bind24 *bind =
(struct dyld_chained_ptr_arm64e_bind24 *) &raw_ptr;
if (bind->bind) {
delta = bind->next;
if (bind->auth) {
struct dyld_chained_ptr_arm64e_auth_bind24 *p =
(struct dyld_chained_ptr_arm64e_auth_bind24 *) &raw_ptr;
event = R_FIXUP_EVENT_BIND_AUTH;
ordinal = p->ordinal;
key = p->key;
addr_div = p->addrDiv;
diversity = p->diversity;
} else {
event = R_FIXUP_EVENT_BIND;
ordinal = bind->ordinal;
addend = bind->addend;
}
} else {
if (bind->auth) {
struct dyld_chained_ptr_arm64e_auth_rebase *p =
(struct dyld_chained_ptr_arm64e_auth_rebase *) &raw_ptr;
event = R_FIXUP_EVENT_REBASE_AUTH;
delta = p->next;
ptr_value = p->target + bin->baddr;
key = p->key;
addr_div = p->addrDiv;
diversity = p->diversity;
} else {
struct dyld_chained_ptr_arm64e_rebase *p =
(struct dyld_chained_ptr_arm64e_rebase *) &raw_ptr;
event = R_FIXUP_EVENT_REBASE;
delta = p->next;
ptr_value = bin->baddr + (((ut64)p->high8 << 56) | p->target);
}
}
} else if (pointer_format == DYLD_CHAINED_PTR_64_OFFSET) {
stride = 4;
struct dyld_chained_ptr_64_bind *bind =
(struct dyld_chained_ptr_64_bind *) &raw_ptr;
if (bind->bind) {
event = R_FIXUP_EVENT_BIND;
delta = bind->next;
ordinal = bind->ordinal;
addend = bind->addend;
} else {
struct dyld_chained_ptr_64_rebase *p =
(struct dyld_chained_ptr_64_rebase *) &raw_ptr;
event = R_FIXUP_EVENT_REBASE;
delta = p->next;
ptr_value = bin->baddr + (((ut64)p->high8 << 56) | p->target);
}
} else {
eprintf ("Unsupported chained pointer format %d\n", pointer_format);
return;
}
if (cursor >= limit_start && cursor <= limit_end - 8 && (event & event_mask) != 0) {
bool carry_on;
switch (event) {
case R_FIXUP_EVENT_BIND: {
RFixupBindEventDetails event_details;
event_details.type = event;
event_details.bin = bin;
event_details.offset = cursor;
event_details.raw_ptr = raw_ptr;
event_details.ordinal = ordinal;
event_details.addend = addend;
carry_on = callback (context, (RFixupEventDetails *) &event_details);
break;
}
case R_FIXUP_EVENT_BIND_AUTH: {
RFixupBindAuthEventDetails event_details;
event_details.type = event;
event_details.bin = bin;
event_details.offset = cursor;
event_details.raw_ptr = raw_ptr;
event_details.ordinal = ordinal;
event_details.key = key;
event_details.addr_div = addr_div;
event_details.diversity = diversity;
carry_on = callback (context, (RFixupEventDetails *) &event_details);
break;
}
case R_FIXUP_EVENT_REBASE: {
RFixupRebaseEventDetails event_details;
event_details.type = event;
event_details.bin = bin;
event_details.offset = cursor;
event_details.raw_ptr = raw_ptr;
event_details.ptr_value = ptr_value;
carry_on = callback (context, (RFixupEventDetails *) &event_details);
break;
}
case R_FIXUP_EVENT_REBASE_AUTH: {
RFixupRebaseAuthEventDetails event_details;
event_details.type = event;
event_details.bin = bin;
event_details.offset = cursor;
event_details.raw_ptr = raw_ptr;
event_details.ptr_value = ptr_value;
event_details.key = key;
event_details.addr_div = addr_div;
event_details.diversity = diversity;
carry_on = callback (context, (RFixupEventDetails *) &event_details);
break;
}
default:
eprintf ("Unexpected event while iterating chained fixups\n");
carry_on = false;
}
if (!carry_on) {
return;
}
}
cursor += delta * stride;
if (!delta) {
break;
}
}
}
}
}
} | 1 | CVE-2022-1052 | 1,241 | vulnerable |
CWE-416 | CURLcode Curl_close(struct Curl_easy *data)
{
struct Curl_multi *m;
if(!data)
return CURLE_OK;
Curl_expire_clear(data); /* shut off timers */
m = data->multi;
if(m)
/* This handle is still part of a multi handle, take care of this first
and detach this handle from there. */
curl_multi_remove_handle(data->multi, data);
if(data->multi_easy)
/* when curl_easy_perform() is used, it creates its own multi handle to
use and this is the one */
curl_multi_cleanup(data->multi_easy);
/* Destroy the timeout list that is held in the easy handle. It is
/normally/ done by curl_multi_remove_handle() but this is "just in
case" */
Curl_llist_destroy(&data->state.timeoutlist, NULL);
data->magic = 0; /* force a clear AFTER the possibly enforced removal from
the multi handle, since that function uses the magic
field! */
if(data->state.rangestringalloc)
free(data->state.range);
/* freed here just in case DONE wasn't called */
Curl_free_request_state(data);
/* Close down all open SSL info and sessions */
Curl_ssl_close_all(data);
Curl_safefree(data->state.first_host);
Curl_safefree(data->state.scratch);
Curl_ssl_free_certinfo(data);
/* Cleanup possible redirect junk */
free(data->req.newurl);
data->req.newurl = NULL;
if(data->change.referer_alloc) {
Curl_safefree(data->change.referer);
data->change.referer_alloc = FALSE;
}
data->change.referer = NULL;
Curl_up_free(data);
Curl_safefree(data->state.buffer);
Curl_safefree(data->state.headerbuff);
Curl_safefree(data->state.ulbuf);
Curl_flush_cookies(data, 1);
Curl_digest_cleanup(data);
Curl_safefree(data->info.contenttype);
Curl_safefree(data->info.wouldredirect);
/* this destroys the channel and we cannot use it anymore after this */
Curl_resolver_cleanup(data->state.resolver);
Curl_http2_cleanup_dependencies(data);
Curl_convert_close(data);
/* No longer a dirty share, if it exists */
if(data->share) {
Curl_share_lock(data, CURL_LOCK_DATA_SHARE, CURL_LOCK_ACCESS_SINGLE);
data->share->dirty--;
Curl_share_unlock(data, CURL_LOCK_DATA_SHARE);
}
/* destruct wildcard structures if it is needed */
Curl_wildcard_dtor(&data->wildcard);
Curl_freeset(data);
free(data);
return CURLE_OK;
} | 0 | CVE-2018-16840 | 2,307 | benign |
CWE-416 | CURLcode Curl_close(struct Curl_easy *data)
{
struct Curl_multi *m;
if(!data)
return CURLE_OK;
Curl_expire_clear(data); /* shut off timers */
m = data->multi;
if(m)
/* This handle is still part of a multi handle, take care of this first
and detach this handle from there. */
curl_multi_remove_handle(data->multi, data);
if(data->multi_easy) {
/* when curl_easy_perform() is used, it creates its own multi handle to
use and this is the one */
curl_multi_cleanup(data->multi_easy);
data->multi_easy = NULL;
}
/* Destroy the timeout list that is held in the easy handle. It is
/normally/ done by curl_multi_remove_handle() but this is "just in
case" */
Curl_llist_destroy(&data->state.timeoutlist, NULL);
data->magic = 0; /* force a clear AFTER the possibly enforced removal from
the multi handle, since that function uses the magic
field! */
if(data->state.rangestringalloc)
free(data->state.range);
/* freed here just in case DONE wasn't called */
Curl_free_request_state(data);
/* Close down all open SSL info and sessions */
Curl_ssl_close_all(data);
Curl_safefree(data->state.first_host);
Curl_safefree(data->state.scratch);
Curl_ssl_free_certinfo(data);
/* Cleanup possible redirect junk */
free(data->req.newurl);
data->req.newurl = NULL;
if(data->change.referer_alloc) {
Curl_safefree(data->change.referer);
data->change.referer_alloc = FALSE;
}
data->change.referer = NULL;
Curl_up_free(data);
Curl_safefree(data->state.buffer);
Curl_safefree(data->state.headerbuff);
Curl_safefree(data->state.ulbuf);
Curl_flush_cookies(data, 1);
Curl_digest_cleanup(data);
Curl_safefree(data->info.contenttype);
Curl_safefree(data->info.wouldredirect);
/* this destroys the channel and we cannot use it anymore after this */
Curl_resolver_cleanup(data->state.resolver);
Curl_http2_cleanup_dependencies(data);
Curl_convert_close(data);
/* No longer a dirty share, if it exists */
if(data->share) {
Curl_share_lock(data, CURL_LOCK_DATA_SHARE, CURL_LOCK_ACCESS_SINGLE);
data->share->dirty--;
Curl_share_unlock(data, CURL_LOCK_DATA_SHARE);
}
/* destruct wildcard structures if it is needed */
Curl_wildcard_dtor(&data->wildcard);
Curl_freeset(data);
free(data);
return CURLE_OK;
} | 1 | CVE-2018-16840 | 2,307 | vulnerable |
CWE-20 | jas_matrix_t *jas_matrix_create(int numrows, int numcols)
{
jas_matrix_t *matrix;
int i;
size_t size;
matrix = 0;
if (numrows < 0 || numcols < 0) {
goto error;
}
if (!(matrix = jas_malloc(sizeof(jas_matrix_t)))) {
goto error;
}
matrix->flags_ = 0;
matrix->numrows_ = numrows;
matrix->numcols_ = numcols;
matrix->rows_ = 0;
matrix->maxrows_ = numrows;
matrix->data_ = 0;
matrix->datasize_ = 0;
// matrix->datasize_ = numrows * numcols;
if (!jas_safe_size_mul(numrows, numcols, &size)) {
goto error;
}
matrix->datasize_ = size;
if (matrix->maxrows_ > 0) {
if (!(matrix->rows_ = jas_alloc2(matrix->maxrows_,
sizeof(jas_seqent_t *)))) {
goto error;
}
}
if (matrix->datasize_ > 0) {
if (!(matrix->data_ = jas_alloc2(matrix->datasize_,
sizeof(jas_seqent_t)))) {
goto error;
}
}
for (i = 0; i < numrows; ++i) {
matrix->rows_[i] = &matrix->data_[i * matrix->numcols_];
}
for (i = 0; i < matrix->datasize_; ++i) {
matrix->data_[i] = 0;
}
matrix->xstart_ = 0;
matrix->ystart_ = 0;
matrix->xend_ = matrix->numcols_;
matrix->yend_ = matrix->numrows_;
return matrix;
error:
if (matrix) {
jas_matrix_destroy(matrix);
}
return 0;
} | 0 | CVE-2016-9395 | 2,381 | benign |
CWE-20 | jas_matrix_t *jas_matrix_create(jas_matind_t numrows, jas_matind_t numcols)
{
jas_matrix_t *matrix;
jas_matind_t i;
size_t size;
matrix = 0;
if (numrows < 0 || numcols < 0) {
goto error;
}
if (!(matrix = jas_malloc(sizeof(jas_matrix_t)))) {
goto error;
}
matrix->flags_ = 0;
matrix->numrows_ = numrows;
matrix->numcols_ = numcols;
matrix->rows_ = 0;
matrix->maxrows_ = numrows;
matrix->data_ = 0;
matrix->datasize_ = 0;
// matrix->datasize_ = numrows * numcols;
if (!jas_safe_size_mul(numrows, numcols, &size)) {
goto error;
}
matrix->datasize_ = size;
if (matrix->maxrows_ > 0) {
if (!(matrix->rows_ = jas_alloc2(matrix->maxrows_,
sizeof(jas_seqent_t *)))) {
goto error;
}
}
if (matrix->datasize_ > 0) {
if (!(matrix->data_ = jas_alloc2(matrix->datasize_,
sizeof(jas_seqent_t)))) {
goto error;
}
}
for (i = 0; i < numrows; ++i) {
matrix->rows_[i] = &matrix->data_[i * matrix->numcols_];
}
for (i = 0; i < matrix->datasize_; ++i) {
matrix->data_[i] = 0;
}
matrix->xstart_ = 0;
matrix->ystart_ = 0;
matrix->xend_ = matrix->numcols_;
matrix->yend_ = matrix->numrows_;
return matrix;
error:
if (matrix) {
jas_matrix_destroy(matrix);
}
return 0;
} | 1 | CVE-2016-9395 | 2,381 | vulnerable |
CWE-119 | static void show_object(struct object *object, struct strbuf *path,
const char *last, void *data)
{
struct bitmap *base = data;
bitmap_set(base, find_object_pos(object->oid.hash));
mark_as_seen(object);
} | 0 | CVE-2016-2315 | 368 | benign |
CWE-119 | static void show_object(struct object *object, const char *name, void *data)
{
struct bitmap *base = data;
bitmap_set(base, find_object_pos(object->oid.hash));
mark_as_seen(object);
} | 1 | CVE-2016-2315 | 368 | vulnerable |
CWE-125 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteAudioSpectrogramParams*>(node->user_data);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE(context, params->spectrogram->Initialize(params->window_size,
params->stride));
const float* input_data = GetTensorData<float>(input);
const int64_t sample_count = input->dims->data[0];
const int64_t channel_count = input->dims->data[1];
const int64_t output_width = params->spectrogram->output_frequency_channels();
float* output_flat = GetTensorData<float>(output);
std::vector<float> input_for_channel(sample_count);
for (int64_t channel = 0; channel < channel_count; ++channel) {
float* output_slice =
output_flat + (channel * params->output_height * output_width);
for (int i = 0; i < sample_count; ++i) {
input_for_channel[i] = input_data[i * channel_count + channel];
}
std::vector<std::vector<float>> spectrogram_output;
TF_LITE_ENSURE(context,
params->spectrogram->ComputeSquaredMagnitudeSpectrogram(
input_for_channel, &spectrogram_output));
TF_LITE_ENSURE_EQ(context, spectrogram_output.size(),
params->output_height);
TF_LITE_ENSURE(context, spectrogram_output.empty() ||
(spectrogram_output[0].size() == output_width));
for (int row_index = 0; row_index < params->output_height; ++row_index) {
const std::vector<float>& spectrogram_row = spectrogram_output[row_index];
TF_LITE_ENSURE_EQ(context, spectrogram_row.size(), output_width);
float* output_row = output_slice + (row_index * output_width);
if (params->magnitude_squared) {
for (int i = 0; i < output_width; ++i) {
output_row[i] = spectrogram_row[i];
}
} else {
for (int i = 0; i < output_width; ++i) {
output_row[i] = sqrtf(spectrogram_row[i]);
}
}
}
}
return kTfLiteOk;
} | 0 | CVE-2020-15211 | 2,965 | benign |
CWE-125 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteAudioSpectrogramParams*>(node->user_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE(context, params->spectrogram->Initialize(params->window_size,
params->stride));
const float* input_data = GetTensorData<float>(input);
const int64_t sample_count = input->dims->data[0];
const int64_t channel_count = input->dims->data[1];
const int64_t output_width = params->spectrogram->output_frequency_channels();
float* output_flat = GetTensorData<float>(output);
std::vector<float> input_for_channel(sample_count);
for (int64_t channel = 0; channel < channel_count; ++channel) {
float* output_slice =
output_flat + (channel * params->output_height * output_width);
for (int i = 0; i < sample_count; ++i) {
input_for_channel[i] = input_data[i * channel_count + channel];
}
std::vector<std::vector<float>> spectrogram_output;
TF_LITE_ENSURE(context,
params->spectrogram->ComputeSquaredMagnitudeSpectrogram(
input_for_channel, &spectrogram_output));
TF_LITE_ENSURE_EQ(context, spectrogram_output.size(),
params->output_height);
TF_LITE_ENSURE(context, spectrogram_output.empty() ||
(spectrogram_output[0].size() == output_width));
for (int row_index = 0; row_index < params->output_height; ++row_index) {
const std::vector<float>& spectrogram_row = spectrogram_output[row_index];
TF_LITE_ENSURE_EQ(context, spectrogram_row.size(), output_width);
float* output_row = output_slice + (row_index * output_width);
if (params->magnitude_squared) {
for (int i = 0; i < output_width; ++i) {
output_row[i] = spectrogram_row[i];
}
} else {
for (int i = 0; i < output_width; ++i) {
output_row[i] = sqrtf(spectrogram_row[i]);
}
}
}
}
return kTfLiteOk;
} | 1 | CVE-2020-15211 | 2,965 | vulnerable |
CWE-362 | set<int> PipeSocketHandler::listen(const SocketEndpoint& endpoint) {
lock_guard<std::recursive_mutex> guard(globalMutex);
string pipePath = endpoint.name();
if (pipeServerSockets.find(pipePath) != pipeServerSockets.end()) {
throw runtime_error("Tried to listen twice on the same path");
}
sockaddr_un local;
int fd = socket(AF_UNIX, SOCK_STREAM, 0);
FATAL_FAIL(fd);
initServerSocket(fd);
local.sun_family = AF_UNIX; /* local is declared before socket() ^ */
strcpy(local.sun_path, pipePath.c_str());
unlink(local.sun_path);
FATAL_FAIL(::bind(fd, (struct sockaddr*)&local, sizeof(sockaddr_un)));
::listen(fd, 5);
#ifndef WIN32
FATAL_FAIL(::chmod(local.sun_path, S_IRUSR | S_IWUSR | S_IXUSR));
#endif
pipeServerSockets[pipePath] = set<int>({fd});
return pipeServerSockets[pipePath];
} | 0 | CVE-2022-24949 | 1,772 | benign |
CWE-362 | set<int> PipeSocketHandler::listen(const SocketEndpoint& endpoint) {
lock_guard<std::recursive_mutex> guard(globalMutex);
string pipePath = endpoint.name();
if (pipeServerSockets.find(pipePath) != pipeServerSockets.end()) {
throw runtime_error("Tried to listen twice on the same path");
}
sockaddr_un local;
int fd = socket(AF_UNIX, SOCK_STREAM, 0);
FATAL_FAIL(fd);
initServerSocket(fd);
local.sun_family = AF_UNIX; /* local is declared before socket() ^ */
strncpy(local.sun_path, pipePath.c_str(), sizeof(local.sun_path));
unlink(local.sun_path);
FATAL_FAIL(::bind(fd, (struct sockaddr*)&local, sizeof(sockaddr_un)));
::listen(fd, 5);
#ifndef WIN32
FATAL_FAIL(::chmod(local.sun_path, S_IRUSR | S_IWUSR | S_IXUSR));
#endif
pipeServerSockets[pipePath] = set<int>({fd});
return pipeServerSockets[pipePath];
} | 1 | CVE-2022-24949 | 1,772 | vulnerable |